25 Commits

Author SHA1 Message Date
6053d1b4ec Update Dockerfile with runtime dependencies, copy only necessary files 2024-03-15 21:02:59 -05:00
f43f9cda2f Catch RuntimeError when GS not installed 2024-03-15 20:41:01 -05:00
992040e812 Add basic label generator app, add return values to parsing 2024-03-15 20:31:37 -05:00
5502a5069d All real samples ordered have working spec lookup 2024-03-14 22:09:12 -05:00
fc2af34450 Add Alphawire datasheet fallback 2024-03-14 22:06:13 -05:00
39723ec442 Update Alphawire table parsing 2024-03-14 21:35:28 -05:00
25ceb6c133 Merge branch 'main' of https://git.myitr.org/Jukebox/jukebox-software 2024-03-14 01:49:19 -05:00
56451d3e5c Inverse kinematic update to account for base rotation 2024-03-14 01:49:15 -05:00
53638f72e1 Merge branch 'dthomas_meilisearch' 2024-03-12 16:15:13 -05:00
5ef8795eb4 Merge branch 'main' into dthomas_meilisearch
# Conflicts:
#	.gitignore
#	read_datasheet.py
2024-03-12 16:13:41 -05:00
a63faba2aa Add checks to updating filterable attributes to avoid hitting weird edge cases 2024-03-12 16:08:47 -05:00
0f2c19e811 Merge remote-tracking branch 'origin/dthomas_meilisearch' into dthomas_meilisearch 2024-03-08 19:13:03 -06:00
b18355fc14 nuke database.py 2024-03-08 19:12:41 -06:00
aadb6ba24d add search functions to JukeboxSearch 2024-03-01 21:24:37 -06:00
4561b1c1a3 fix error when index does not exist 2024-03-01 20:37:22 -06:00
6edd0b4ef0 fix map datatype 2024-03-01 20:37:02 -06:00
2c242aac29 Merge branch 'main' into dthomas_meilisearch 2024-03-01 19:26:57 -06:00
b585f8cdb7 Merge branch 'main' into dthomas_meilisearch 2024-03-01 19:25:30 -06:00
f12d8a8062 add print statement 2024-03-01 19:24:47 -06:00
fc9ff4c8b2 split lists if they contain more than 2 commas 2024-03-01 19:13:28 -06:00
e903150fd4 Add functions for connecting to Meilisearch and adding documents 2024-02-20 10:33:01 -06:00
d0ea696274 reorganize gitignore and add comments 2024-02-20 10:15:56 -06:00
eea8c9f5fa Merge branch 'main' into dthomas_meilisearch 2024-02-20 10:04:33 -06:00
68b95bfe17 add a module for using meilisearch 2024-02-17 22:46:11 -06:00
e3e9b855f9 add compose file with meilisearch image 2024-02-17 22:45:30 -06:00
15 changed files with 965 additions and 303 deletions

16
.gitignore vendored
View File

@ -1,10 +1,22 @@
# python
venv
__pycache__
# cable data folder(s)
cables
cables-sample.zip
# meilisearch (mainly where I've put the data volume for the container)
meili_data
# IDE things
.vscode
output.log
.idea
# videos
*.webm
output.mp4
# log files
output.log
cables-sample.zip
# images
*.png
# Built app
build
# Generated label images
labels

View File

@ -1,7 +1,9 @@
FROM python:latest
FROM python:3.11-slim
RUN apt-get update && apt-get install -y libgl1-mesa-glx ghostscript && apt-get clean && rm -rf /var/lib/apt/lists
COPY . .
# Get runtime dependencies
# glx for OpenCV, ghostscript for datasheet PDF rendering, zbar for barcode scanning, git for cloning repos
RUN apt-get update && apt-get install -y libgl1-mesa-glx ghostscript libzbar0 git && apt-get clean && rm -rf /var/lib/apt/lists
COPY *.py *.yml *.sh *.txt *.html static templates ./
#COPY config-server.yml config.yml
RUN pip3 install -r requirements.txt

13
compose.yml Normal file
View File

@ -0,0 +1,13 @@
services:
meilisearch:
image: "getmeili/meilisearch:v1.6.2"
ports:
- "7700:7700"
environment:
MEILI_MASTER_KEY: fluffybunnyrabbit
MEILI_NO_ANALYTICS: true
volumes:
- "meili_data:/meili_data"
volumes:
meili_data:

View File

@ -1,140 +0,0 @@
"""This module contains functionality for interacting with a PostgreSQL database. It will automatically handle error
conditions (i.e. missing columns) without terminating the entire program. Use the :py:class:`DBConnector` class to
handle database interactions, either as a standalone object or in a context manager."""
from __future__ import annotations
import os
import psycopg2
from psycopg2 import DatabaseError, OperationalError
from psycopg2.errors import UndefinedColumn
DB_ADDRESS = os.getenv('DB_ADDRESS', 'localhost')
DB_PORT = os.getenv('DB_PORT', 5432)
DB_USER = os.getenv('DB_USER', 'postgres')
DB_PASSWORD = os.getenv('DB_PASSWORD', '')
DB_NAME = os.getenv('DB_NAME', 'postgres')
DB_TABLE = os.getenv('DB_TABLE', 'cables')
class DBConnector:
"""Context managed database class. Use with statements to automatically open and close the database connection, like
so:
.. code-block:: python
with DBConnector() as db:
db.read()
"""
def _db_start(self):
"""Setup the database connection and cursor."""
try:
self.conn = psycopg2.connect(
f"host={DB_ADDRESS} port={DB_PORT} dbname={DB_NAME} user={DB_USER} password={DB_PASSWORD}")
self.cur = self.conn.cursor()
except OperationalError as e:
raise e
def _db_stop(self):
"""Close the cursor and connection."""
self.cur.close()
self.conn.close()
def __init__(self):
self._db_start()
def __del__(self):
self._db_stop()
def __enter__(self):
self._db_start()
def __exit__(self):
self._db_stop()
def _get_cols(self) -> set[str]:
"""Get the list of columns in the database.
:return: A list of column names."""
query = f"select COLUMN_NAME from information_schema.columns where table_name={DB_TABLE}"
rows = {x["COLUMN_NAME"] for x in self._query(query)}
return rows
def _column_parity(self, columns: list[str] | set[str]) -> set[str]:
"""If the listed columns are not in the database, add them.
:param columns: The columns we expect are in the database.
:return: The list of columns in the database after querying."""
cols = set(columns)
existing = self._get_cols()
needs = cols.difference(existing.intersection(cols))
if len(needs) > 0:
query = f"ALTER TABLE {DB_TABLE} {', '.join([f'ADD COLUMN {c}' for c in needs])}"
self._query(query)
existing = self._get_cols()
return existing
def _query(self, sql) -> list[dict]:
"""Basic function for running queries.
:param sql: SQL query as plaintext.
:return: Results of the query, or an empty list if none."""
result = []
try:
self.cur.execute(sql)
result = self._read_dict()
except DatabaseError as e:
print(f"ERROR {e.pgcode}: {e.pgerror}\n"
f"Caused by query: {sql}")
finally:
return result
def _read_dict(self) -> list[dict]:
"""Read the cursor as a list of dictionaries. psycopg2 defaults to using a list of tuples, so we want to convert
each row into a dictionary before we return it."""
cols = [i.name for i in self.cur.description]
results = []
for row in self.cur:
row_dict = {}
for i in range(0, len(row)):
if row[i]:
row_dict = {**row_dict, cols[i]: row[i]}
results.append(row_dict)
return results
def read(self, **kwargs) -> list[dict]:
"""Read rows from a database that match the specified filters.
:param kwargs: Column constraints; i.e. what value to filter by in what column.
:returns: A list of dictionaries of all matching rows, or an empty list if no match."""
args = []
for kw in kwargs.keys():
args.append(f"{kw} ILIKE {kwargs['kw']}")
query = f"SELECT * FROM {DB_TABLE}"
if len(args) > 0:
query += f" WHERE {' AND '.join(args)}"
return self._query(query)
def write(self, **kwargs) -> dict:
"""Write a row to the database.
:param kwargs: Values to write for each database; specify each column separately!
:returns: The row you just added."""
self._column_parity(set(kwargs.keys()))
values = []
for val in kwargs.keys():
values.append(kwargs[val])
query = f"INSERT INTO {DB_TABLE} ({', '.join(kwargs.keys())}) VALUES ({', '.join(values)})"
self._query(query)
return kwargs
def write_all(self, items: list[dict]) -> list[dict]:
"""Write multiple rows to the database.
:param items: Rows to write, as a list of dictionaries.
:returns: The rows that were added successfully."""
successes = []
for i in items:
res0 = self.write(**i)
if res0:
successes.append(res0)
return successes

View File

@ -27,6 +27,7 @@ def check_internet(url='https://belden.com', timeout=5):
def query_search(partnum, source):
fprint("Searching for " + partnum)
if source == "Belden":
token_url = "https://www.belden.com/coveo/rest/token?t=" + str(int(time.time()))
with requests.get(token_url) as r:
@ -52,15 +53,15 @@ def query_search(partnum, source):
a = json.loads(a)
idx = -1
name = ""
for partid in range(len(a["results"])):
for partid in range(len(a["results"])-1, -1, -1):
name = a["results"][partid]["title"]
if name != partnum:
if name.find(partnum) >= 0:
idx = partid
break
#break
elif partnum.find(name) >= 0:
idx = partid
break
#break
else:
idx = partid
@ -69,7 +70,7 @@ def query_search(partnum, source):
if idx < 0:
fprint("Could not find part in API: " + partnum)
return False
fprint("Search result found: result " + str(idx) + ", for ID " + name)
#fprint("Search result found: result " + str(idx) + ", for ID " + name)
#urlname = a["results"][0]["raw"]["catalogitemurlname"]
img = a["results"][idx]["raw"]["catalogitemimageurl"]
img = img[0:img.index("?")]
@ -92,7 +93,7 @@ def query_search(partnum, source):
#print(out)
return out
except:
print("falied to search with API. Falling back to datasheet lookup.")
print("Failed to search with API. Falling back to datasheet lookup.")
return False
@ -112,12 +113,14 @@ def query_search(partnum, source):
r = requests.get(url=alphaurl)
data = r.json()
output = dict()
#print(data)
#print(data["Results"])
try:
if data["Count"] > 0:
#print(data["Results"][0]["Url"])
for result in data["Results"]:
if result["Url"].split("/")[-1] == partnum:
#print(result["Url"])
if result["Url"].split("/")[-1] == partnum.replace("-", "").replace("/", "_"):
#print(partnum)
#print(result["Html"])
try:
@ -133,14 +136,18 @@ def query_search(partnum, source):
dsidx = result["Html"].index("<a href=\"/disteAPI/") + 9
dsidx2 = result["Html"].index(partnum, dsidx) + len(partnum)
output["datasheet"] = "https://www.alphawire.com" + result["Html"][dsidx:dsidx2]
output["partnum"] = partnum
#"test".index()
output["partnum"] = partnum.replace("/", "_") #.replace("-", "").replace("/", "_")
#
# "test".index()
#print(output)
return output
except:
print("Failed to search with API. Falling back to datasheet lookup.")
return False
print("Failed to search with API. Falling back to datasheet lookup.")
return False
@ -150,14 +157,19 @@ def touch(path):
def get_multi(partnums, delay=0.25):
def get_multi(partnums, delay=0.25, dir="cables/", cache=True):
with alive_bar(len(partnums) * 2, dual_line=True, calibrate=30, bar="classic2", spinner="classic") as bar:
def _try_download_datasheet(partnum, output_dir): # Guess datasheet URL
failed = list()
actualpartnums = list()
def _try_download_datasheet(partnum, output_dir, dstype): # Guess datasheet URL
global bartext
sanitized_name = partnum.replace(" ", "")
url = "https://catalog.belden.com/techdata/EN/" + sanitized_name + "_techdata.pdf"
if dstype == "Belden":
sanitized_name = partnum.replace(" ", "")
url = "https://catalog.belden.com/techdata/EN/" + sanitized_name + "_techdata.pdf"
elif dstype == "Alphawire":
# Alphawire Datasheet URLs do not use a sanitized part number (but product pages do)
url = "https://www.alphawire.com/disteAPI/SpecPDF/DownloadProductSpecPdf?productPartNumber=" + partnum
#fprint(url)
try:
with requests.get(url, stream=True) as r:
@ -167,8 +179,9 @@ def get_multi(partnums, delay=0.25):
if r.status_code == 404:
return False
os.makedirs(output_dir, exist_ok=True)
bartext = ""
with open(output_dir + "/datasheet.pdf", 'wb') as f:
for chunk in r.iter_content(chunk_size=131072):
for chunk in r.iter_content(chunk_size=65536):
# If you have chunk encoded response uncomment if
# and set chunk_size parameter to None.
#if chunk:
@ -195,8 +208,9 @@ def get_multi(partnums, delay=0.25):
if r.status_code == 404:
return False
os.makedirs(output_dir, exist_ok=True)
bartext = ""
with open(output_dir + "/datasheet.pdf", 'wb') as f:
for chunk in r.iter_content(chunk_size=131072):
for chunk in r.iter_content(chunk_size=65536):
# If you have chunk encoded response uncomment if
# and set chunk_size parameter to None.
#if chunk:
@ -221,8 +235,9 @@ def get_multi(partnums, delay=0.25):
if r.status_code == 404:
return False
os.makedirs(output_dir, exist_ok=True)
bartext = ""
with open(output_dir + "/part-hires." + url.split(".")[-1], 'wb') as f:
for chunk in r.iter_content(chunk_size=131072):
for chunk in r.iter_content(chunk_size=65536):
# If you have chunk encoded response uncomment if
# and set chunk_size parameter to None.
#if chunk:
@ -245,8 +260,9 @@ def get_multi(partnums, delay=0.25):
fprint("Parsing Datasheet contents of " + partnum)
bar.text = "Parsing Datasheet contents of " + partnum + ".pdf..."
read_datasheet.parse(path, output_dir, partnum, dstype)
out = read_datasheet.parse(path, output_dir, partnum, dstype)
bar(skipped=False)
return out
else:
fprint("Datasheet already parsed for " + partnum)
bar.text = "Datasheet already parsed for " + partnum + ".pdf"
@ -258,57 +274,73 @@ def get_multi(partnums, delay=0.25):
bar(skipped=False)
fprint("Parsing Datasheet contents of " + partnum)
bar.text = "Parsing Datasheet contents of " + partnum + ".pdf..."
read_datasheet.parse(path, output_dir, partnum, dstype)
out = read_datasheet.parse(path, output_dir, partnum, dstype)
bar(skipped=False)
return out
def run_search(partnum):
output_dir = "cables/" + partnum
oldpartnum = partnum
if dstype == "Alphawire":
# For alphawire, sanitize the part number for only the final result check, because their API is very wierd
# For the actual search, it must be un-sanitized
partnum = partnum.replace("/","_")
output_dir = dir + partnum
path = output_dir + "/datasheet.pdf"
bartext = "Downloading files for part " + partnum
bar.text = bartext
#
if (not os.path.exists(output_dir + "/found_part_hires")) or not (os.path.exists(path) and os.path.getsize(path) > 1):
partnum = oldpartnum.replace("_","/")
returnval = [partnum, dstype, False, False]
if (not os.path.exists(output_dir + "/found_part_hires")) or not (os.path.exists(path) and os.path.getsize(path) > 1) or not cache:
# Use query
search_result = query_search(partnum, dstype)
# Try to use belden.com search
if search_result is not False:
# Download high resolution part image if available and needed
#oldpartnum = partnum
partnum = search_result["partnum"]
output_dir = "cables/" + partnum
returnval = [partnum, dstype, False, False]
output_dir = dir + partnum
path = output_dir + "/datasheet.pdf"
bartext = "Downloading files for part " + partnum
bar.text = bartext
if not os.path.exists(output_dir + "/found_part_hires"):
if not os.path.exists(output_dir + "/found_part_hires") or not cache:
if _download_image(search_result["image"], output_dir):
fprint("Downloaded hi-res part image for " + partnum)
returnval = [partnum, dstype, True, False]
touch(output_dir + "/found_part_hires")
else:
fprint("Using cached hi-res part image for " + partnum)
# Download datasheet from provided URL if needed
if os.path.exists(path) and os.path.getsize(path) > 1:
__use_cached_datasheet(partnum, path, output_dir, dstype)
if os.path.exists(path) and os.path.getsize(path) > 1 and cache:
out = __use_cached_datasheet(partnum, path, output_dir, dstype)
returnval = [partnum, dstype, True, out]
elif _download_datasheet(search_result["datasheet"], output_dir) is not False:
__downloaded_datasheet(partnum, path, output_dir, dstype)
out = __downloaded_datasheet(partnum, path, output_dir, dstype)
returnval = [partnum, dstype, True, out]
elif os.path.exists(path) and os.path.getsize(path) > 1:
__use_cached_datasheet(partnum, path, output_dir, dstype)
elif os.path.exists(path) and os.path.getsize(path) > 1 and cache:
out = __use_cached_datasheet(partnum, path, output_dir, dstype)
returnval = [partnum, dstype, True, out]
# If search fails, and we don't already have the datasheet, guess datasheet URL and skip the hires image download
elif _try_download_datasheet(partnum, output_dir) is not False:
__downloaded_datasheet(partnum, path, output_dir, dstype)
elif _try_download_datasheet(partnum, output_dir, dstype) is not False:
out = __downloaded_datasheet(partnum, path, output_dir, dstype)
returnval = [partnum, dstype, False, out]
# Failed to download with search or guess :(
else:
return False
return True
actualpartnums.append(returnval)
return returnval
# We already have a hi-res image and the datasheet - perfect!
else:
fprint("Using cached hi-res part image for " + partnum)
__use_cached_datasheet(partnum, path, output_dir, dstype)
out = __use_cached_datasheet(partnum, path, output_dir, dstype)
returnval = [partnum, dstype, False, out]
return True
for fullpartnum in partnums:
@ -339,7 +371,7 @@ def get_multi(partnums, delay=0.25):
if not success:
fprint("Failed to download datasheet for part " + partnum)
bar.text = "Failed to download datasheet for part " + partnum
failed.append(partnum)
failed.append((partnum, dstype))
bar(skipped=True)
bar(skipped=True)
time.sleep(delay)
@ -347,10 +379,10 @@ def get_multi(partnums, delay=0.25):
if len(failed) > 0:
fprint("Failed to download:")
for partnum in failed:
fprint(partnum)
return False # Go to manual review upload page
fprint(partnum[1] + " " + partnum[0])
return False, actualpartnums # Go to manual review upload page
else:
return True # All cables downloaded; we are good to go
return True, actualpartnums # All cables downloaded; we are good to go
@ -377,7 +409,7 @@ if __name__ == "__main__":
"AW3050",
"AW6714",
"AW1172C",
"AW2211/4",
"AWFIT-221-1/4",
"BLTF-1LF-006-RS5N",
"BLTF-SD9-006-RI5N",
@ -421,8 +453,8 @@ if __name__ == "__main__":
"BLC6D1100007"
]
#query_search("86104CY", "Alphawire")
get_multi(partnums, 0.25)
print(query_search("74002", "Belden"))
#get_multi(partnums, 0.25)
#query_search("10GXS13", "Belden")

BIN
gs10030w64.exe Normal file

Binary file not shown.

File diff suppressed because one or more lines are too long

100
label_generator.py Executable file
View File

@ -0,0 +1,100 @@
#!/usr/bin/env python3
from get_specs import get_multi
import sys
import uuid
import os
import signal
from PIL import Image
from label_image import generate_code
def input_cable():
print("")
print("Use the full part number. Spaces, special characters are allowed. Do not specify the brand.")
print("")
print("Please enter a part number and press enter:")
inputnum = input("").strip()
if len(inputnum) < 2:
killall_signal(0, 0)
print("Input part number:", inputnum)
print("Searching databases for cables...")
# Search both AW and BL sites
status, output = get_multi(["BL"+inputnum, "AW"+inputnum], delay=0.1, dir="temp/" + str(uuid.uuid4()) + "/", cache=False)
print("")
if len(output) > 1:
for i in output:
print(i[1], i[0])
print("Multiple brands with the same part number! Please type \"b\" for the Belden part number or \"a\" for the Alphawire cable")
inputbrand = input()
if inputbrand == "b":
output = [output[0]]
elif inputbrand == "a":
output = [output[1]]
elif len(output) == 0:
print("No results found for part number", inputnum + ". Please try again with a different part number.")
return
output = output[0]
print("")
if output[2] and output[3]:
print("Cable result found -",output[1], output[0], "with high-quality image and full specs")
elif output[2]:
print("Cable result found -",output[1], output[0], "with high-quality image and no specs")
elif output[3]:
print("Cable result found -",output[1], output[0], "with no/low quality image and full specs")
else:
print("Cable result found -",output[1], output[0], "with no/low quality image and no specs")
print("")
if not output[3]:
print("Unable to decode cable specs. Please try again with a different part number.")
return False
else:
print("")
print("*** Cable details confirmed. Creating label...")
print("")
img = None
imgstr = ""
if output[1] == "Belden":
imgstr = "BL"
elif output[1] == "Alphawire":
imgstr = "AW"
img = generate_code(imgstr + output[0])
os.makedirs("labels", exist_ok=True)
img.save("labels/" + imgstr + output[0] + ".png")
def delete_folder(path):
# Check if the path is a directory
if not os.path.isdir(path):
return
# List all files and directories in the path
for filename in os.listdir(path):
file_path = os.path.join(path, filename)
# If it's a directory, recursively call this function
if os.path.isdir(file_path):
delete_folder(file_path)
else:
# If it's a file, remove it
os.remove(file_path)
# After removing all contents, remove the directory itself
os.rmdir(path)
def killall_signal(a,b):
delete_folder("temp")
os.kill(os.getpid(), 9) # dirty kill of self
if __name__ == "__main__":
signal.signal(signal.SIGINT, killall_signal)
signal.signal(signal.SIGTERM, killall_signal)
print("Welcome to the Jukebox cable utility. This tool will allow you to verify Belden & Alphawire cable part numbers and create labels for samples in the Jukebox.")
print("This tool requires internet access to download cable specifications and verify part numbers.")
#print("Use Ctrl+C to exit.")
while True:
delete_folder("temp")
input_cable()

238
label_image.py Executable file
View File

@ -0,0 +1,238 @@
#!/usr/bin/env python3
from util import fprint
from PIL import Image
from PIL import ImageDraw
#import cv2
import numpy as np
#import math
# Copied from http://en.wikipedia.org/wiki/Code_128
# Value Weights 128A 128B 128C
CODE128_CHART = """
0 212222 space space 00
1 222122 ! ! 01
2 222221 " " 02
3 121223 # # 03
4 121322 $ $ 04
5 131222 % % 05
6 122213 & & 06
7 122312 ' ' 07
8 132212 ( ( 08
9 221213 ) ) 09
10 221312 * * 10
11 231212 + + 11
12 112232 , , 12
13 122132 - - 13
14 122231 . . 14
15 113222 / / 15
16 123122 0 0 16
17 123221 1 1 17
18 223211 2 2 18
19 221132 3 3 19
20 221231 4 4 20
21 213212 5 5 21
22 223112 6 6 22
23 312131 7 7 23
24 311222 8 8 24
25 321122 9 9 25
26 321221 : : 26
27 312212 ; ; 27
28 322112 < < 28
29 322211 = = 29
30 212123 > > 30
31 212321 ? ? 31
32 232121 @ @ 32
33 111323 A A 33
34 131123 B B 34
35 131321 C C 35
36 112313 D D 36
37 132113 E E 37
38 132311 F F 38
39 211313 G G 39
40 231113 H H 40
41 231311 I I 41
42 112133 J J 42
43 112331 K K 43
44 132131 L L 44
45 113123 M M 45
46 113321 N N 46
47 133121 O O 47
48 313121 P P 48
49 211331 Q Q 49
50 231131 R R 50
51 213113 S S 51
52 213311 T T 52
53 213131 U U 53
54 311123 V V 54
55 311321 W W 55
56 331121 X X 56
57 312113 Y Y 57
58 312311 Z Z 58
59 332111 [ [ 59
60 314111 \ \ 60
61 221411 ] ] 61
62 431111 ^ ^ 62
63 111224 _ _ 63
64 111422 NUL ` 64
65 121124 SOH a 65
66 121421 STX b 66
67 141122 ETX c 67
68 141221 EOT d 68
69 112214 ENQ e 69
70 112412 ACK f 70
71 122114 BEL g 71
72 122411 BS h 72
73 142112 HT i 73
74 142211 LF j 74
75 241211 VT k 75
76 221114 FF l 76
77 413111 CR m 77
78 241112 SO n 78
79 134111 SI o 79
80 111242 DLE p 80
81 121142 DC1 q 81
82 121241 DC2 r 82
83 114212 DC3 s 83
84 124112 DC4 t 84
85 124211 NAK u 85
86 411212 SYN v 86
87 421112 ETB w 87
88 421211 CAN x 88
89 212141 EM y 89
90 214121 SUB z 90
91 412121 ESC { 91
92 111143 FS | 92
93 111341 GS } 93
94 131141 RS ~ 94
95 114113 US DEL 95
96 114311 FNC3 FNC3 96
97 411113 FNC2 FNC2 97
98 411311 ShiftB ShiftA 98
99 113141 CodeC CodeC 99
100 114131 CodeB FNC4 CodeB
101 311141 FNC4 CodeA CodeA
102 411131 FNC1 FNC1 FNC1
103 211412 StartA StartA StartA
104 211214 StartB StartB StartB
105 211232 StartC StartC StartC
106 2331112 Stop Stop Stop
""".split()
VALUES = [int(value) for value in CODE128_CHART[0::5]]
WEIGHTS = dict(zip(VALUES, CODE128_CHART[1::5]))
CODE128A = dict(zip(CODE128_CHART[2::5], VALUES))
CODE128B = dict(zip(CODE128_CHART[3::5], VALUES))
CODE128C = dict(zip(CODE128_CHART[4::5], VALUES))
for charset in (CODE128A, CODE128B):
charset[' '] = charset.pop('space')
def generate_code(data, show=False, check=False):
img = code128_image(data)
if show:
img.show()
#img.show()
#print(data)
if(check):
from pyzbar.pyzbar import decode
from pyzbar.pyzbar import ZBarSymbol
print(decode(img, symbols=[ZBarSymbol.CODE128])[0].data.decode('ascii'))
#if(decode(img, symbols=[ZBarSymbol.CODE128])[0].data.decode('ascii') == data):
# return True
#else:
# return False
return img
def code128_format(data):
"""
Generate an optimal barcode from ASCII text
"""
text = str(data)
pos = 0
length = len(text)
# Start Code
if text[:2].isdigit():
charset = CODE128C
codes = [charset['StartC']]
else:
charset = CODE128B
codes = [charset['StartB']]
# Data
while pos < length:
if charset is CODE128C:
if text[pos:pos+2].isdigit() and length - pos > 1:
# Encode Code C two characters at a time
codes.append(int(text[pos:pos+2]))
pos += 2
else:
# Switch to Code B
codes.append(charset['CodeB'])
charset = CODE128B
elif text[pos:pos+4].isdigit() and length - pos >= 4:
# Switch to Code C
codes.append(charset['CodeC'])
charset = CODE128C
else:
# Encode Code B one character at a time
codes.append(charset[text[pos]])
pos += 1
# Checksum
checksum = 0
for weight, code in enumerate(codes):
checksum += max(weight, 1) * code
codes.append(checksum % 103)
# Stop Code
codes.append(charset['Stop'])
return codes
def code128_image(data, height=100, thickness=3, quiet_zone=False):
if not data[-1] == CODE128B['Stop']:
data = code128_format(data)
barcode_widths = []
for code in data:
for weight in WEIGHTS[code]:
barcode_widths.append(int(weight) * thickness)
width = sum(barcode_widths)
x = 0
if quiet_zone:
width += 20 * thickness
x = 10 * thickness
# Monochrome Image
img = Image.new('RGB', (int(width * 10), int(width * 10)), 'white')
draw = ImageDraw.Draw(img)
draw_bar = True
for bwidth in barcode_widths:
bwidth *= 4
if draw_bar:
draw.rectangle(((x + int(width * 3), width*6.25), (x + int(width * 3) + bwidth - 1, width*7)), fill='black')
draw_bar = not draw_bar
x += bwidth
#draw.arc(((width - width/5, width - width/5), (width*9 + width/5, width*9 + width/5)),0,360,fill='blue', width = int(width/8))
draw.arc(((width+int(width / 1.4), width+int(width / 1.4)), (width*9-int(width / 1.4), width*9-int(width / 1.4))),0,360,fill='blue', width = int(width/8))
return img
if __name__ == "__main__":
#print(generate_code("BL10GXS13"))
#print(generate_code("BL10GXgd35j35S13"))
#print(generate_code("BL10GX54hS13"))
print(generate_code("BL10Gj34qXS13", False, False))
#print(generate_code("BL104w5545dp7bfwp43643534/4563G-XS13"))
#adjust_image(cv2.imread('test_skew.jpg'))

View File

@ -1,9 +1,8 @@
#!/usr/bin/env python3
# Parse Belden catalog techdata datasheets
# Parse Belden (100%) & Alphawire (75%) catalog techdata datasheets
import pandas as pd
pd.set_option('future.no_silent_downcasting', True)
from PyPDF2 import PdfReader
import camelot
import numpy as np
@ -13,55 +12,94 @@ import json
from util import fprint
import uuid
from util import run_cmd
from util import win32
import os
import glob
import sys
def touch(path):
with open(path, 'a'):
os.utime(path, None)
def find_data_file(filename):
if getattr(sys, "frozen", False):
# The application is frozen
datadir = os.path.dirname(sys.executable)
else:
# The application is not frozen
# Change this bit to match where you store your data files:
datadir = os.path.dirname(__file__)
return os.path.join(datadir, filename)
def extract_table_name(table_start, searchpage, reader, dstype, fallbackname):
if dstype == "Belden":
ymin = table_start
ymax = table_start + 10
elif dstype == "Alphawire":
ymin = table_start - 5
ymax = table_start + 10
page = reader.pages[searchpage - 1]
parts = []
def visitor_body(text, cm, tm, fontDict, fontSize):
y = tm[5]
if y > ymin and y < ymax:
parts.append(text)
page.extract_text(visitor_text=visitor_body)
text_body = "".join(parts).strip('\n')
if len(text_body) == 0:
text_body = str(fallbackname)
return text_body
#fprint(text_body)
def parse(filename, output_dir, partnum, dstype):
tables = []
# Extract table data
tables = camelot.read_pdf(filename, pages="1-end", flavor='lattice', backend="poppler", split_text=False, line_scale=100, process_background=True, resolution=600, interations=1, layout_kwargs={'detect_vertical': False, 'char_margin': 0.5}, shift_text=['r', 't'])
try:
if dstype == "Belden":
tables = camelot.read_pdf(filename, pages="1-end", flavor='lattice', backend="ghostscript", split_text=False, line_scale=100, process_background=True, resolution=600, interations=1, layout_kwargs={'detect_vertical': False, 'char_margin': 0.5}, shift_text=['r', 't'])
elif dstype == "Alphawire":
tables = camelot.read_pdf(filename, pages="1-end", flavor='lattice', backend="ghostscript", split_text=False, line_scale=50, process_background=True, resolution=600, interations=1, layout_kwargs={'detect_vertical': True, 'char_margin': 0.5}, shift_text=['l', 't'])
except (OSError, RuntimeError) as e:
print(e)
if win32:
print("Ghostscript is not installed! Launching installer...")
#subprocess.run([r".\\gs10030w64.exe"])
os.system(r'''Powershell -Command "& { Start-Process \"''' + find_data_file("gs10030w64.exe") + r'''\" -Verb RunAs } " ''')
# Will return once file launched...
print("Once the install is completed, try again.")
return False
else:
print("Ghostscript is not installed. You can install it with e.g. apt install ghostscript for Debian-based systems.")
return False
#fprint("Total tables extracted:", tables.n)
n = 0
pagenum = 0
#pagenum = 0
reader = PdfReader(filename)
page = reader.pages[0]
table_list = {}
table_list_raw = {}
pd.set_option('future.no_silent_downcasting', True)
for table in tables:
#with pd.options.context("future.no_silent_downcasting", True):
table.df.infer_objects(copy=False)
table.df.replace('', np.nan, inplace=True)
table.df = table.df.replace('', np.nan).infer_objects(copy=False)
table.df.dropna(inplace=True, how="all")
table.df.dropna(inplace=True, axis="columns", how="all")
table.df.replace(np.nan, '', inplace=True)
table.df = table.df.replace(np.nan, '').infer_objects(copy=False)
if not table.df.empty:
#fprint("\nTable " + str(n))
# Extract table names
table_start = table.cells[0][0].lt[1] # Read top-left cell's top-left coordinate
#fprint(table_start)
ymin = table_start
ymax = table_start + 10
if pagenum != table.page - 1:
pagenum = table.page - 1
page = reader.pages[table.page - 1]
parts = []
def visitor_body(text, cm, tm, fontDict, fontSize):
y = tm[5]
if y > ymin and y < ymax:
parts.append(text)
page.extract_text(visitor_text=visitor_body)
text_body = "".join(parts).strip('\n')
if len(text_body) == 0:
text_body = str(n)
#fprint(text_body)
text_body = extract_table_name(table_start, table.page, reader, dstype, n)
table_list[text_body] = table.df
if dstype == "Alphawire":
table_list_raw[text_body] = table
#table.to_html("table" + str(n) + ".html")
#fprint(table.df)
@ -71,7 +109,7 @@ def parse(filename, output_dir, partnum, dstype):
#tables.export(output_dir + '/techdata.json', f='json')
# fprint(table_list)
#fprint(table_list)
# Extract Basic details - part name & description, image, etc
reader = PdfReader(filename)
@ -100,24 +138,32 @@ def parse(filename, output_dir, partnum, dstype):
tables = dict()
torename = dict()
previous_table = ""
#print(table_list.keys())
for table_name in table_list.keys():
# determine shape: horizontal or vertical
table = table_list[table_name]
rows = table.shape[0]
cols = table.shape[1]
vertical = None
#print(rows, cols, table_name)
if rows > 2 and cols == 2:
vertical = True
elif cols == 1:
elif cols == 1 and rows > 1:
vertical = False
elif rows == 1:
vertical = True
elif cols == 2: # and rows <= 2
# inconsistent
if table.iloc[0, 0].find(":") == len(table.iloc[0, 0]) - 1: # check if last character is ":" indicating a vertical table
vertical = True
else:
vertical = False
if dstype == "Belden":
if table.iloc[0, 0].find(":") == len(table.iloc[0, 0]) - 1: # check if last character is ":" indicating a vertical table
vertical = True
else:
vertical = False
elif dstype == "Alphawire":
if table.iloc[0, 0].find(")") == 1 or table.iloc[0, 0].find(")") == 2 or table.iloc[0, 0].find(":") == len(table.iloc[0, 0]) - 1: # check if last character is ":" indicating a vertical table
vertical = True
else:
vertical = False
elif cols > 2: # and rows <= 2
vertical = False
@ -125,9 +171,12 @@ def parse(filename, output_dir, partnum, dstype):
vertical = False
else: # 1 column, <= 2 rows
vertical = False
#print(vertical)
# missing name check
for table_name_2 in table_list.keys():
if dstype == "Alphawire" and table_name_2.find("\n") >= 0:
torename[table_name_2] = table_name_2[0:table_name_2.find("\n")]
if table_name_2.find(table.iloc[-1, 0]) >= 0:
# Name taken from table directly above - this table does not have a name
torename[table_name_2] = "Specs " + str(len(tables))
@ -136,8 +185,12 @@ def parse(filename, output_dir, partnum, dstype):
if vertical:
out = dict()
for row in table.itertuples(index=False, name=None):
out[row[0].replace("\n", " ").replace(":", "")] = row[1]
if rows > 1:
for row in table.itertuples(index=False, name=None):
out[row[0].replace("\n", " ").replace(":", "")] = row[1]
else:
for row in table.itertuples(index=False, name=None):
out[row[0].replace("\n", " ").replace(":", "")] = ""
else: # horizontal
out = dict()
@ -147,9 +200,52 @@ def parse(filename, output_dir, partnum, dstype):
tables[table_name] = out
# multi-page table check, Alphawire
if dstype == "Alphawire" and table_name.isdigit():
# table continues from previous page or has name on previous page
thistbl = table_list_raw[table_name]
prevtbl = table_list_raw[previous_table]
if prevtbl.cells[-1][0].lb[1] < 50 and thistbl.cells[0][0].lt[1] > 600:
# wraparound
#print("WRAP")
#print("PREV TABLE", prevtbl.df)
#print("THIS TABLE", thistbl.df)
#print("PREV TABLE CORNER", prevtbl.cells[-1][0].lb[1])
#print("THIS TABLE CORNER", thistbl.cells[0][0].lt[1])
main_key = previous_table
cont_key = table_name
#print(vertical)
if vertical == False:
main_keys = list(tables[main_key].keys())
for i, (cont_key, cont_values) in enumerate(tables[cont_key].items()):
if i < len(main_keys):
#print(tables[main_key][main_keys[i]])
tables[main_key][main_keys[i]] = (tuple(tables[main_key][main_keys[i]]) + (cont_key,) + cont_values)
del tables[table_name]
else:
#print(tables[cont_key].keys())
for key in tables[cont_key].keys():
#print(main_key, key, cont_key, key)
tables[main_key][key] = tables[cont_key][key]
del tables[table_name]
elif thistbl.cells[0][0].lt[1] > 600:
# name on previous page (grrrr)
#print("NAMEABOVE")
#print("PREV TABLE", prevtbl.df)
#print("THIS TABLE", thistbl.df)
#print("PREV TABLE CORNER", prevtbl.cells[-1][0].lb[1])
#print("THIS TABLE CORNER", thistbl.cells[0][0].lt[1])
name = extract_table_name(50, prevtbl.page,reader,dstype,table_name).strip("\n").strip()
#print("FOUND NAME:", name)
torename[table_name] = name
# multi-page table check
# multi-page table check, Belden
if dstype == "Belden":
if table_name.isdigit() and len(tables) > 1:
#fprint(table_name)
@ -177,7 +273,7 @@ def parse(filename, output_dir, partnum, dstype):
previous_table = table_name
# remove renamed tables
# remove & rename tables
for table_name in torename.keys():
tables[torename[table_name]] = tables[table_name]
del tables[table_name]
@ -211,11 +307,20 @@ def parse(filename, output_dir, partnum, dstype):
#print(output_table)
run_cmd("rm \"" + output_dir + "\"/*.json") # not reliable!
with open(output_dir + "/" + output_table["searchspecs"]["id"] + ".json", 'w') as json_file:
#run_cmd("rm \"" + output_dir + "\"/*.json") # not reliable!
pattern = os.path.join(output_dir, '*.json')
json_files = glob.glob(pattern)
for file_path in json_files:
os.remove(file_path)
#print(f"Deleted {file_path}")
with open(output_dir + "/search_" + output_table["searchspecs"]["id"] + ".json", 'w') as json_file:
json.dump(output_table["searchspecs"], json_file)
touch(output_dir + "/parsed")
return output_table
with open(output_dir + "/specs_" + output_table["partnum"] + ".json", 'w') as json_file:
json.dump(output_table["fullspecs"], json_file)
#print(json.dumps(output_table, indent=2))
touch(output_dir + "/parsed") # mark as parsed
return True
def flatten(tables):
@ -245,7 +350,11 @@ def flatten(tables):
#print("\"" + keyname + "\":", "\"" + str(out[fullkeyname]) + "\",")
elif len(tables[table][key]) == 1:
out[fullkeyname] = convert_to_number(tables[table][key][0])
#print("\"" + keyname + "\":", "\"" + str(out[fullkeyname]) + "\",")
# if the item has at least two commas in it, split it
if tables[table][key].count(',') > 0:
out[fullkeyname] = list(map(lambda x: x.strip(), tables[table][key].split(",")))
#print("\"" + keyname + "\":", "\"" + str(out[fullkeyname]) + "\",")
# if the item has at least two commas in it, split it
@ -260,4 +369,4 @@ def flatten(tables):
if __name__ == "__main__":
parse("test2.pdf", "cables/10GXS13", "10GXS13")
print(parse("cables/3050/datasheet.pdf", "cables/3050", "3050", "Alphawire"))

View File

@ -1,11 +1,11 @@
# Runtime
camelot-py[base]
camelot-py
opencv-python
pypdf2==2.12.1
alive-progress
requests
git+https://github.com/Byeongdulee/python-urx.git
psycopg2-binary
meilisearch
pyyaml
Flask
selenium
@ -15,6 +15,13 @@ websockets
numpy
scipy
ipywidgets
pandas
pyarrow
ghostscript
pyzbar
# Development
matplotlib
#cx_Freeze # uncomment if building label generator app
# requires windows 10 SDK, visual C++, etc

117
search.py Normal file
View File

@ -0,0 +1,117 @@
"""Interactions with the Meilisearch API for adding and searching cables."""
from meilisearch import Client
from meilisearch.task import TaskInfo
from meilisearch.errors import MeilisearchApiError
import json
DEFAULT_URL = "http://localhost:7700"
DEFAULT_APIKEY = "fluffybunnyrabbit" # I WOULD RECOMMEND SOMETHING MORE SECURE
DEFAULT_INDEX = "cables"
DEFAULT_FILTERABLE_ATTRS = ["partnum", "uuid", "position"] # default filterable attributes
class JukeboxSearch:
"""Class for interacting with the Meilisearch API."""
def __init__(self,
url: str = None,
api_key: str = None,
index: str = None,
filterable_attrs: list = None):
"""Connect to Meilisearch and perform first-run tasks as necessary.
:param url: Address of the Meilisearch server. Defaults to ``http://localhost:7700`` if unspecified.
:param api_key: API key used to authenticate with Meilisearch. It is highly recommended to set this as something
secure if you can access this endpoint publicly, but you can ignore this and set Meilisearch's default API key
to ``fluffybunnyrabbit``.
:param index: The name of the index to configure. Defaults to ``cables`` if unspecified.
:param filterable_attrs: List of all the attributes we want to filter by."""
# connect to Meilisearch
url = url or DEFAULT_URL
api_key = api_key or DEFAULT_APIKEY
filterable_attrs = filterable_attrs or DEFAULT_FILTERABLE_ATTRS
self.index = index or DEFAULT_INDEX
self.client = Client(url, api_key)
# create the index if it does not exist already
try:
self.client.get_index(self.index)
except MeilisearchApiError as _:
self.client.create_index(self.index)
# make a variable to easily reference the index
self.idxref = self.client.index(self.index)
# update filterable attributes if needed
self.update_filterables(filterable_attrs)
def add_document(self, document: dict) -> TaskInfo:
"""Add a cable to the Meilisearch index.
:param document: Dictionary containing all the cable data.
:returns: A TaskInfo object for the addition of the new document."""
return self.idxref.add_documents(document)
def add_documents(self, documents: list):
"""Add a list of cables to the Meilisearch index.
:param documents: List of dictionaries containing all the cable data.
:returns: A TaskInfo object for the last new document."""
taskinfo = None
for i in documents:
taskinfo = self.add_document(i)
return taskinfo
def update_filterables(self, filterables: list):
"""Update filterable attributes and wait for database to fully index. If the filterable attributes matches the
current attributes in the database, don't update (saves reindexing).
:param filterables: List of all filterable attributes"""
existing_filterables = self.idxref.get_filterable_attributes()
if len(set(existing_filterables).difference(set(filterables))) > 0:
taskref = self.idxref.update_filterable_attributes(filterables)
self.client.wait_for_task(taskref.index_uid)
def search(self, query: str, filters: str = None):
"""Execute a search query on the Meilisearch index.
:param query: Seach query
:param filters: A meilisearch compatible filter statement.
:returns: The search results dict. Actual results are in a list under "hits", but there are other nice values that are useful in the root element."""
if filters:
q = self.idxref.search(query, {"filter": filters})
else:
q = self.idxref.search(query)
return q
def _filter_one(self, filter: str):
"""Get the first item to match a filter.
:param filter: A meilisearch compatible filter statement.
:returns: A dict containing the results; If no results found, an empty dict."""
q = self.search("", filter)
if q["estimatedTotalHits"] != 0:
return ["hits"][0]
else:
return dict()
def get_position(self, position: str):
"""Get a part by position.
:param partnum: The position to search for."""
return self._filter_one(f"position = {position}")
def get_uuid(self, uuid: str):
"""Get a specific UUID.
:param uuid: The UUID to search for."""
return self._filter_one(f"uuid = {uuid}")
def get_partnum(self, partnum: str):
"""Get a specific part number.
:param partnum: The part number to search for."""
return self._filter_one(f"partnum = {partnum}")
# entrypoint
if __name__ == "__main__":
jbs = JukeboxSearch()

31
setup-label-generator.py Normal file
View File

@ -0,0 +1,31 @@
import sys
from cx_Freeze import setup, Executable
debug = True
debug = not debug
# Dependencies are automatically detected, but it might need fine tuning.
# "packages": ["os"] is used as example only
import opcode
import os
import distutils
#distutils_path = os.path.join(os.path.dirname(opcode.__file__), 'distutils')
build_exe_options = {"include_msvcr": True, "packages": ["camelot", "setuptools"], "optimize": 0, "silent": True, "include_files": ["gs10030w64.exe"], "excludes": ["scipy", "torch"]}
# base="Win32GUI" should be used only for Windows GUI app
base = "console"
#if sys.platform == "win32" and not debug:
# base = "Win32GUI"
if sys.platform == "linux" or sys.platform == "linux2" or sys.platform == "darwin":
name = "jukebox-labelgen"
else:
name = "jukebox-labelgen.exe"
setup(
name="IP Pigeon",
version="0.2.4",
description="IP Pigeon client application",
options={"build_exe": build_exe_options},
executables=[Executable("label_generator.py", base=base, uac_admin=False, target_name=name)],
)

View File

@ -216,35 +216,65 @@ def normalize_degree(theta):
# Return angle
return normalized_theta
def get_joints_from_xyz_rel(x, y, z, initial_guess = (math.pi/2, math.pi/2, 0), limbs=(.422864, .359041, .092124)):
# Get polar coordinates of x,y pair
r, theta = cartesian_to_polar(x, y)
# Get length of each limb
l1, l2, l3 = limbs
def get_joints_from_xyz_rel(x, y, z, rx=0, ry=-math.pi/2, rz=0, initial_guess = (math.pi/2, math.pi/2, 0)):
# Get limbs and offsets
offset_x, offset_y, offset_z = (0, 0, 0.14) # Tool offset
l_bs, l1, l2, l3, l_wt = (0.1333, .425, .39225, .1267, .0997) # Limb lengths
#l3=0.15
# Calculate base angle and r relative to shoulder joint
def calculate_theta(x, y, a):
# Calculate if we need the + or - in our equations
if (x>-a and y>=0) or (x>a and y<0):
flip = 1
elif (x<-a and y>=0) or (x<a and y<0):
flip = -1
else:
# Critical section (x=a, or x=-a). Infinite slope
# Return 0 or 180 depending on sign
return math.atan2(y, 0)
# Calculate tangent line y = mx + b
m = (x*y - math.sqrt(x*x*y*y-(x*x-a*a)*(y*y-a*a)))/(x*x-a*a)
b = flip * a * math.sqrt(1+m*m)
# Calculate equivalent tangent point on circle
cx = (-flip*m*b)/(1+m*m)
cy = m*cx + flip*b
# Calculate base angle, make angle negative if flip=1
theta = math.atan2(cy, cx) + (-math.pi if flip==1 else 0)
return theta
base_theta = calculate_theta(x, y, l_bs)
cx, cy = l_bs*math.cos(base_theta), l_bs*math.sin(l_bs)
r = math.sqrt((x-cx)**2 + (y-cy)**2)
# Formulas to find out joint positions for (r, z)
def inv_kin_r_z(p):
a, b, c = p
return (l1*math.cos(a) + l2*math.cos(a-b) + l3*math.cos(a-b-c) - r, # r
l1*math.sin(a) + l2*math.sin(a-b) - l3*math.sin(a-b-c) - z, # z
l1*math.sin(a) + l2*math.sin(a-b) - l3*math.sin(a-b-c) - (l3*math.sin(a-b-c)) - (z + offset_z), # z
a-b-c) # wrist angle
# Normalize angles
base, shoulder, elbow, wrist = [normalize_degree(deg) for deg in [theta, *fsolve(inv_kin_r_z, initial_guess)]]
base, shoulder, elbow, wrist1 = [normalize_degree(deg) for deg in [base_theta, *fsolve(inv_kin_r_z, initial_guess)]]
# Return result
return base, shoulder, elbow, wrist
return base, shoulder, elbow, wrist1, ry, rz
def get_joints_from_xyz_abs(x, y, z):
joints = get_joints_from_xyz_rel(x, y, z)
def get_joints_from_xyz_abs(x, y, z, rx=0, ry=-math.pi/2, rz=math.pi/2):
joints = get_joints_from_xyz_rel(x, y, z, rx, ry, rz)
# Joint offsets
# Base, Shoulder, Elbow, Wrist
inverse = [1, -1, 1, 1]
offsets = [0, 0, 0, -math.pi/2]
inverse = [1, -1, 1, 1, 1, 1]
offsets = [-math.pi/2, 0, 0, -math.pi/2, 0, 0]
# Return adjusted joint positions
return [o+j*i for j, o, i in zip(joints, offsets, inverse)]
@ -262,7 +292,7 @@ if __name__ == "__main__":
0.40002172976662786,
0,
-3.14152741295329,
0]
math.radians(62)]
# time.sleep(.5)
@ -290,14 +320,21 @@ if __name__ == "__main__":
# set_pos_abs(*home_pose)
angles = get_joints_from_xyz_abs(0.3, 0.3, 0.3)
rob.movej([*angles, *rob.getj()[4:]], acc=1, vel=1)
angles = get_joints_from_xyz_abs(-0.3, -0.3, 0.7)
rob.movej([*angles, *rob.getj()[4:]], acc=1, vel=1)
angles = get_joints_from_xyz_abs(-0.7, 0, 0)
rob.movej(angles, acc=2, vel=2)
# joints = []
# for i in np.linspace(-0.3, -0.7, 50):
# joints.append(get_joints_from_xyz_abs(i, 0, 0))
# rob.movejs(joints, acc=2, vel=2)
# time.sleep(5)
# angles = get_joints_from_xyz_abs(0, -0.6, 0)
# rob.movej(angles, acc=2, vel=2)
angles = get_joints_from_xyz_abs(-0.3, 0.4, 0.2)
rob.movej([*angles, *rob.getj()[4:]], acc=1, vel=1)
# set_pos_abs(*p1)

View File

@ -70,7 +70,7 @@ def fprint(msg, settings = None, sendqueue = None):
except Exception as e:
try:
print('[????:' + frm.function + ']:', str(msg))
print('[util:fprint]: ' + str(e))
#print('[util:fprint]: ' + str(e))
except:
print('[????]:', str(msg))