27 Commits

Author SHA1 Message Date
9b1b92e21d Re-add https:// format, needed for most devices 2024-03-21 19:48:46 -05:00
ee0f8f4250 Remove cairosvg VERSION patch 2024-03-21 19:41:46 -05:00
9c9435570b Remove cairosvg as it doesn't have pip-only lib. Use png instead. 2024-03-21 19:39:43 -05:00
1bf10f7349 Update label generator to include belden logo, use QR code, URL matching 2024-03-21 18:57:59 -05:00
3c8d6c7ad3 Remove debug print 2024-03-20 16:31:29 -05:00
cbe7225fc9 Fix bug with part name in query_search 2024-03-20 16:27:43 -05:00
44efc4006e Correct flip rz orientation 2024-03-19 17:48:32 -05:00
218303e92b Try different moves 2024-03-17 22:01:34 -05:00
e5d3f87b5c Go to home position first 2024-03-17 20:49:39 -05:00
2ab1d0dbb3 Add flip around mode to fit edge slots 2024-03-17 20:48:26 -05:00
1338c3f440 Added tool z rotation for angled gripper 2024-03-17 20:07:25 -05:00
83b077b4df Make limb lengths and offsets global 2024-03-17 19:59:17 -05:00
f16242f5be Re-merge calculate_theta into get_joints_from_xyz_rel 2024-03-17 19:53:49 -05:00
2f28a01b7c Add basic kinematics for gripper angle 2024-03-17 19:50:04 -05:00
fb85a56d47 Use movejs to go to all cable positions 2024-03-17 16:21:57 -05:00
bec0c63763 Fix ur5_control bugs, fully working IK!! Thanks Nadeem 2024-03-17 16:13:53 -05:00
4ae30b82a0 Cleaned up notebook 2024-03-17 15:43:06 -05:00
4bc3e30116 Fixed edge cases when calculating base angle 2024-03-17 01:01:39 -05:00
6053d1b4ec Update Dockerfile with runtime dependencies, copy only necessary files 2024-03-15 21:02:59 -05:00
f43f9cda2f Catch RuntimeError when GS not installed 2024-03-15 20:41:01 -05:00
992040e812 Add basic label generator app, add return values to parsing 2024-03-15 20:31:37 -05:00
5502a5069d All real samples ordered have working spec lookup 2024-03-14 22:09:12 -05:00
fc2af34450 Add Alphawire datasheet fallback 2024-03-14 22:06:13 -05:00
39723ec442 Update Alphawire table parsing 2024-03-14 21:35:28 -05:00
25ceb6c133 Merge branch 'main' of https://git.myitr.org/Jukebox/jukebox-software 2024-03-14 01:49:19 -05:00
56451d3e5c Inverse kinematic update to account for base rotation 2024-03-14 01:49:15 -05:00
53638f72e1 Merge branch 'dthomas_meilisearch' 2024-03-12 16:15:13 -05:00
20 changed files with 1152 additions and 307 deletions

6
.gitignore vendored
View File

@ -15,4 +15,8 @@ output.mp4
# log files # log files
output.log output.log
# images # images
*.png map*.png
# Built app
build
# Generated label images
labels

View File

@ -1,7 +1,9 @@
FROM python:latest FROM python:3.11-slim
RUN apt-get update && apt-get install -y libgl1-mesa-glx ghostscript && apt-get clean && rm -rf /var/lib/apt/lists # Get runtime dependencies
COPY . . # glx for OpenCV, ghostscript for datasheet PDF rendering, zbar for barcode scanning, git for cloning repos
RUN apt-get update && apt-get install -y libgl1-mesa-glx ghostscript libzbar0 git && apt-get clean && rm -rf /var/lib/apt/lists
COPY *.py *.yml *.sh *.txt *.html static templates ./
#COPY config-server.yml config.yml #COPY config-server.yml config.yml
RUN pip3 install -r requirements.txt RUN pip3 install -r requirements.txt

BIN
GothamCond-Medium.otf Normal file

Binary file not shown.

BIN
belden-logo-superhires.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 448 KiB

41
belden-logo.svg Normal file
View File

@ -0,0 +1,41 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 25.2.1, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
viewBox="0 0 446.6 151.4" style="enable-background:new 0 0 446.6 151.4;" xml:space="preserve">
<style type="text/css">
.st0{fill:#004990;}
</style>
<g>
<g>
<path class="st0" d="M21.2,32.1h-1.4v87.2H55c20.3,0,32-9.1,32-24.9c0-12.3-5.6-19.7-15.8-22.1c5.1-3.6,7.8-9.1,7.8-16.9
c0-15.5-8.9-23.3-26.5-23.3H21.2z M44.7,51.6c7.4,0,11.4,1.1,11.4,6.8c0,4.8-3.1,6.8-10.5,6.8c0,0-0.6,0-1.1,0
c0-2.1,0-11.5,0-13.6C44.5,51.6,44.7,51.6,44.7,51.6z M45.4,84.4l1.8,0c4.6,0,10.3-0.1,13,2.6c1.2,1.2,1.8,2.9,1.8,5.2
c0,2-0.6,3.5-1.7,4.7c-3,3-9.2,2.9-13.8,2.9c0,0-1.4,0-2.1,0c0-2.1,0-13.3,0-15.4C44.9,84.4,45.4,84.4,45.4,84.4z"/>
<g>
<path class="st0" d="M139.8,32.1H90.4v87.2h50.8V98c0,0-23.7,0-26.1,0c0-2,0-9.8,0-11.8c2.4,0,24.8,0,24.8,0V64.8
c0,0-22.3,0-24.8,0c0-2,0-9.4,0-11.4c2.5,0,26.1,0,26.1,0V32.1H139.8z"/>
</g>
<g>
<path class="st0" d="M169.3,32.1H146v87.2h51V98c0,0-23.9,0-26.3,0c0-2.6,0-65.9,0-65.9H169.3z"/>
</g>
<g>
<path class="st0" d="M332.4,32.1H283v87.2h50.8V98c0,0-23.7,0-26.1,0c0-2,0-9.8,0-11.8c2.4,0,24.8,0,24.8,0V64.8
c0,0-22.3,0-24.8,0c0-2,0-9.4,0-11.4c2.5,0,26.1,0,26.1,0V32.1H332.4z"/>
</g>
<g>
<path class="st0" d="M424.6,32.1h-23.3c0,0,0,43,0,49.3c-4-5.1-38.4-49.3-38.4-49.3h-24v87.2h24.7c0,0,0-43.1,0-49.5
c4,5.1,38.4,49.5,38.4,49.5h24V32.1H424.6z"/>
</g>
<g>
<g>
<path class="st0" d="M233.8,32.1h-32.5v87.2h32.5c24.4,0,44.3-19.6,44.3-43.6C278.1,51.7,258.2,32.1,233.8,32.1z M226,53.5
c13.6,0.3,22,8.7,22,22.2c0,13.6-8.2,21.9-22,22.2V53.5z M231.5,101.3c12.5-3,20.7-10.7,20.8-25.7c-0.2-15-8.3-22.7-20.8-25.7
c14,2.1,25,9.5,25,25.6v0.1C256.5,91.8,245.5,99.2,231.5,101.3z M260.2,75.6c-0.2-18-10-29.8-24.9-33.3
c16.7,2.5,29.6,14,29.6,33.3c0,0,0,0,0,0.1h0c0,19.3-13,30.7-29.7,33.2C250.1,105.3,260,93.6,260.2,75.6z M240.3,115.7
c16.7-4.7,28.3-19.2,28.5-39.9v-0.3c-0.2-20.7-11.9-35.1-28.5-39.8c19,3.9,33.5,17.7,33.6,39.7v0.4
C273.9,97.9,259.4,111.8,240.3,115.7z"/>
</g>
</g>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 2.2 KiB

View File

@ -27,6 +27,7 @@ def check_internet(url='https://belden.com', timeout=5):
def query_search(partnum, source): def query_search(partnum, source):
fprint("Searching for " + partnum)
if source == "Belden": if source == "Belden":
token_url = "https://www.belden.com/coveo/rest/token?t=" + str(int(time.time())) token_url = "https://www.belden.com/coveo/rest/token?t=" + str(int(time.time()))
with requests.get(token_url) as r: with requests.get(token_url) as r:
@ -52,15 +53,15 @@ def query_search(partnum, source):
a = json.loads(a) a = json.loads(a)
idx = -1 idx = -1
name = "" name = ""
for partid in range(len(a["results"])): for partid in range(len(a["results"])-1, -1, -1):
name = a["results"][partid]["title"] name = a["results"][partid]["title"]
if name != partnum: if name != partnum:
if name.find(partnum) >= 0: if name.find(partnum) >= 0:
idx = partid idx = partid
break #break
elif partnum.find(name) >= 0: elif partnum.find(name) >= 0:
idx = partid idx = partid
break #break
else: else:
idx = partid idx = partid
@ -69,7 +70,9 @@ def query_search(partnum, source):
if idx < 0: if idx < 0:
fprint("Could not find part in API: " + partnum) fprint("Could not find part in API: " + partnum)
return False return False
fprint("Search result found: result " + str(idx) + ", for ID " + name)
name = a["results"][idx]["title"]
#fprint("Search result found: result " + str(idx) + ", for ID " + name)
#urlname = a["results"][0]["raw"]["catalogitemurlname"] #urlname = a["results"][0]["raw"]["catalogitemurlname"]
img = a["results"][idx]["raw"]["catalogitemimageurl"] img = a["results"][idx]["raw"]["catalogitemimageurl"]
img = img[0:img.index("?")] img = img[0:img.index("?")]
@ -92,7 +95,7 @@ def query_search(partnum, source):
#print(out) #print(out)
return out return out
except: except:
print("falied to search with API. Falling back to datasheet lookup.") print("Failed to search with API. Falling back to datasheet lookup.")
return False return False
@ -112,12 +115,14 @@ def query_search(partnum, source):
r = requests.get(url=alphaurl) r = requests.get(url=alphaurl)
data = r.json() data = r.json()
output = dict() output = dict()
#print(data) #print(data["Results"])
try: try:
if data["Count"] > 0: if data["Count"] > 0:
#print(data["Results"][0]["Url"]) #print(data["Results"][0]["Url"])
for result in data["Results"]: for result in data["Results"]:
if result["Url"].split("/")[-1] == partnum: #print(result["Url"])
if result["Url"].split("/")[-1] == partnum.replace("-", "").replace("/", "_"):
#print(partnum) #print(partnum)
#print(result["Html"]) #print(result["Html"])
try: try:
@ -133,14 +138,18 @@ def query_search(partnum, source):
dsidx = result["Html"].index("<a href=\"/disteAPI/") + 9 dsidx = result["Html"].index("<a href=\"/disteAPI/") + 9
dsidx2 = result["Html"].index(partnum, dsidx) + len(partnum) dsidx2 = result["Html"].index(partnum, dsidx) + len(partnum)
output["datasheet"] = "https://www.alphawire.com" + result["Html"][dsidx:dsidx2] output["datasheet"] = "https://www.alphawire.com" + result["Html"][dsidx:dsidx2]
output["partnum"] = partnum output["partnum"] = partnum.replace("/", "_") #.replace("-", "").replace("/", "_")
#"test".index() #
# "test".index()
#print(output) #print(output)
return output return output
except: except:
print("Failed to search with API. Falling back to datasheet lookup.")
return False return False
print("Failed to search with API. Falling back to datasheet lookup.")
return False return False
@ -150,14 +159,19 @@ def touch(path):
def get_multi(partnums, delay=0.25): def get_multi(partnums, delay=0.25, dir="cables/", cache=True):
with alive_bar(len(partnums) * 2, dual_line=True, calibrate=30, bar="classic2", spinner="classic") as bar: with alive_bar(len(partnums) * 2, dual_line=True, calibrate=30, bar="classic2", spinner="classic") as bar:
failed = list()
def _try_download_datasheet(partnum, output_dir): # Guess datasheet URL actualpartnums = list()
def _try_download_datasheet(partnum, output_dir, dstype): # Guess datasheet URL
global bartext global bartext
sanitized_name = partnum.replace(" ", "") if dstype == "Belden":
url = "https://catalog.belden.com/techdata/EN/" + sanitized_name + "_techdata.pdf" sanitized_name = partnum.replace(" ", "")
url = "https://catalog.belden.com/techdata/EN/" + sanitized_name + "_techdata.pdf"
elif dstype == "Alphawire":
# Alphawire Datasheet URLs do not use a sanitized part number (but product pages do)
url = "https://www.alphawire.com/disteAPI/SpecPDF/DownloadProductSpecPdf?productPartNumber=" + partnum
#fprint(url) #fprint(url)
try: try:
with requests.get(url, stream=True) as r: with requests.get(url, stream=True) as r:
@ -167,8 +181,9 @@ def get_multi(partnums, delay=0.25):
if r.status_code == 404: if r.status_code == 404:
return False return False
os.makedirs(output_dir, exist_ok=True) os.makedirs(output_dir, exist_ok=True)
bartext = ""
with open(output_dir + "/datasheet.pdf", 'wb') as f: with open(output_dir + "/datasheet.pdf", 'wb') as f:
for chunk in r.iter_content(chunk_size=131072): for chunk in r.iter_content(chunk_size=65536):
# If you have chunk encoded response uncomment if # If you have chunk encoded response uncomment if
# and set chunk_size parameter to None. # and set chunk_size parameter to None.
#if chunk: #if chunk:
@ -195,8 +210,9 @@ def get_multi(partnums, delay=0.25):
if r.status_code == 404: if r.status_code == 404:
return False return False
os.makedirs(output_dir, exist_ok=True) os.makedirs(output_dir, exist_ok=True)
bartext = ""
with open(output_dir + "/datasheet.pdf", 'wb') as f: with open(output_dir + "/datasheet.pdf", 'wb') as f:
for chunk in r.iter_content(chunk_size=131072): for chunk in r.iter_content(chunk_size=65536):
# If you have chunk encoded response uncomment if # If you have chunk encoded response uncomment if
# and set chunk_size parameter to None. # and set chunk_size parameter to None.
#if chunk: #if chunk:
@ -221,8 +237,9 @@ def get_multi(partnums, delay=0.25):
if r.status_code == 404: if r.status_code == 404:
return False return False
os.makedirs(output_dir, exist_ok=True) os.makedirs(output_dir, exist_ok=True)
bartext = ""
with open(output_dir + "/part-hires." + url.split(".")[-1], 'wb') as f: with open(output_dir + "/part-hires." + url.split(".")[-1], 'wb') as f:
for chunk in r.iter_content(chunk_size=131072): for chunk in r.iter_content(chunk_size=65536):
# If you have chunk encoded response uncomment if # If you have chunk encoded response uncomment if
# and set chunk_size parameter to None. # and set chunk_size parameter to None.
#if chunk: #if chunk:
@ -245,8 +262,9 @@ def get_multi(partnums, delay=0.25):
fprint("Parsing Datasheet contents of " + partnum) fprint("Parsing Datasheet contents of " + partnum)
bar.text = "Parsing Datasheet contents of " + partnum + ".pdf..." bar.text = "Parsing Datasheet contents of " + partnum + ".pdf..."
read_datasheet.parse(path, output_dir, partnum, dstype) out = read_datasheet.parse(path, output_dir, partnum, dstype)
bar(skipped=False) bar(skipped=False)
return out
else: else:
fprint("Datasheet already parsed for " + partnum) fprint("Datasheet already parsed for " + partnum)
bar.text = "Datasheet already parsed for " + partnum + ".pdf" bar.text = "Datasheet already parsed for " + partnum + ".pdf"
@ -258,57 +276,74 @@ def get_multi(partnums, delay=0.25):
bar(skipped=False) bar(skipped=False)
fprint("Parsing Datasheet contents of " + partnum) fprint("Parsing Datasheet contents of " + partnum)
bar.text = "Parsing Datasheet contents of " + partnum + ".pdf..." bar.text = "Parsing Datasheet contents of " + partnum + ".pdf..."
read_datasheet.parse(path, output_dir, partnum, dstype) out = read_datasheet.parse(path, output_dir, partnum, dstype)
bar(skipped=False) bar(skipped=False)
return out
def run_search(partnum): def run_search(partnum):
output_dir = "cables/" + partnum partnum = partnum.replace("%20", " ") # undo URL encoding
oldpartnum = partnum
if dstype == "Alphawire":
# For alphawire, sanitize the part number for only the final result check, because their API is very wierd
# For the actual search, it must be un-sanitized
partnum = partnum.replace("/","_")
output_dir = dir + partnum
path = output_dir + "/datasheet.pdf" path = output_dir + "/datasheet.pdf"
bartext = "Downloading files for part " + partnum bartext = "Downloading files for part " + partnum
bar.text = bartext bar.text = bartext
# partnum = oldpartnum.replace("_","/")
if (not os.path.exists(output_dir + "/found_part_hires")) or not (os.path.exists(path) and os.path.getsize(path) > 1): returnval = [partnum, dstype, False, False]
if (not os.path.exists(output_dir + "/found_part_hires")) or not (os.path.exists(path) and os.path.getsize(path) > 1) or not cache:
# Use query # Use query
search_result = query_search(partnum, dstype) search_result = query_search(partnum, dstype)
# Try to use belden.com search # Try to use belden.com search
if search_result is not False: if search_result is not False:
# Download high resolution part image if available and needed # Download high resolution part image if available and needed
#oldpartnum = partnum
partnum = search_result["partnum"] partnum = search_result["partnum"]
output_dir = "cables/" + partnum returnval = [partnum, dstype, False, False]
output_dir = dir + partnum
path = output_dir + "/datasheet.pdf" path = output_dir + "/datasheet.pdf"
bartext = "Downloading files for part " + partnum bartext = "Downloading files for part " + partnum
bar.text = bartext bar.text = bartext
if not os.path.exists(output_dir + "/found_part_hires"): if not os.path.exists(output_dir + "/found_part_hires") or not cache:
if _download_image(search_result["image"], output_dir): if _download_image(search_result["image"], output_dir):
fprint("Downloaded hi-res part image for " + partnum) fprint("Downloaded hi-res part image for " + partnum)
returnval = [partnum, dstype, True, False]
touch(output_dir + "/found_part_hires") touch(output_dir + "/found_part_hires")
else: else:
fprint("Using cached hi-res part image for " + partnum) fprint("Using cached hi-res part image for " + partnum)
# Download datasheet from provided URL if needed # Download datasheet from provided URL if needed
if os.path.exists(path) and os.path.getsize(path) > 1: if os.path.exists(path) and os.path.getsize(path) > 1 and cache:
__use_cached_datasheet(partnum, path, output_dir, dstype) out = __use_cached_datasheet(partnum, path, output_dir, dstype)
returnval = [partnum, dstype, True, out]
elif _download_datasheet(search_result["datasheet"], output_dir) is not False: elif _download_datasheet(search_result["datasheet"], output_dir) is not False:
__downloaded_datasheet(partnum, path, output_dir, dstype) out = __downloaded_datasheet(partnum, path, output_dir, dstype)
returnval = [partnum, dstype, True, out]
elif os.path.exists(path) and os.path.getsize(path) > 1: elif os.path.exists(path) and os.path.getsize(path) > 1 and cache:
__use_cached_datasheet(partnum, path, output_dir, dstype) out = __use_cached_datasheet(partnum, path, output_dir, dstype)
returnval = [partnum, dstype, True, out]
# If search fails, and we don't already have the datasheet, guess datasheet URL and skip the hires image download # If search fails, and we don't already have the datasheet, guess datasheet URL and skip the hires image download
elif _try_download_datasheet(partnum, output_dir) is not False: elif _try_download_datasheet(partnum, output_dir, dstype) is not False:
__downloaded_datasheet(partnum, path, output_dir, dstype) out = __downloaded_datasheet(partnum, path, output_dir, dstype)
returnval = [partnum, dstype, False, out]
# Failed to download with search or guess :( # Failed to download with search or guess :(
else: else:
return False return False
return True actualpartnums.append(returnval)
return returnval
# We already have a hi-res image and the datasheet - perfect! # We already have a hi-res image and the datasheet - perfect!
else: else:
fprint("Using cached hi-res part image for " + partnum) fprint("Using cached hi-res part image for " + partnum)
__use_cached_datasheet(partnum, path, output_dir, dstype) out = __use_cached_datasheet(partnum, path, output_dir, dstype)
returnval = [partnum, dstype, False, out]
return True return True
for fullpartnum in partnums: for fullpartnum in partnums:
@ -339,7 +374,7 @@ def get_multi(partnums, delay=0.25):
if not success: if not success:
fprint("Failed to download datasheet for part " + partnum) fprint("Failed to download datasheet for part " + partnum)
bar.text = "Failed to download datasheet for part " + partnum bar.text = "Failed to download datasheet for part " + partnum
failed.append(partnum) failed.append((partnum, dstype))
bar(skipped=True) bar(skipped=True)
bar(skipped=True) bar(skipped=True)
time.sleep(delay) time.sleep(delay)
@ -347,10 +382,10 @@ def get_multi(partnums, delay=0.25):
if len(failed) > 0: if len(failed) > 0:
fprint("Failed to download:") fprint("Failed to download:")
for partnum in failed: for partnum in failed:
fprint(partnum) fprint(partnum[1] + " " + partnum[0])
return False # Go to manual review upload page return False, actualpartnums # Go to manual review upload page
else: else:
return True # All cables downloaded; we are good to go return True, actualpartnums # All cables downloaded; we are good to go
@ -373,11 +408,12 @@ if __name__ == "__main__":
# ] # ]
partnums = [ partnums = [
# Actual cables in Jukebox # Actual cables in Jukebox
"AW86104CY", "AW86104CY",
"AW3050", "AW3050",
"AW6714", "AW6714",
"AW1172C", "AW1172C",
"AW2211/4", "AWFIT-221-1/4",
"BLTF-1LF-006-RS5N", "BLTF-1LF-006-RS5N",
"BLTF-SD9-006-RI5N", "BLTF-SD9-006-RI5N",
@ -405,7 +441,7 @@ if __name__ == "__main__":
# Some ones I picked, including some invalid ones # Some ones I picked, including some invalid ones
"BL10GXS12", "BL10GXS12",
"BLRST 5L-RKT 5L-949", "BLRST%205L-RKT%205L-949",
"BL10GXS13", "BL10GXS13",
"BL10GXW12", "BL10GXW12",
"BL10GXW13", "BL10GXW13",
@ -416,12 +452,17 @@ if __name__ == "__main__":
"BLFISD012R9", "BLFISD012R9",
"BLFDSD012A9", "BLFDSD012A9",
"BLFSSL024NG", "BLFSSL024NG",
"BLFISX006W0", "BLFISX006W0", # datasheet only
"BLFISX00103", "BLFISX00103", # invalid
"BLC6D1100007" "BLC6D1100007" # invalid
] ]
#query_search("86104CY", "Alphawire") #print(query_search("TT-SLG-024-HTNN", "Belden"))
from label_generator import gen_label
gen_label("BLTF-SD9-006-RI5")
gen_label("BLRA500P")
gen_label("AWFIT-221-1_4")
gen_label("BLRST 5L-RKT 5L-949")
get_multi(partnums, 0.25) get_multi(partnums, 0.25)
#query_search("10GXS13", "Belden") #query_search("10GXS13", "Belden")

BIN
gs10030w64.exe Normal file

Binary file not shown.

View File

@ -1 +0,0 @@
<html> <head> <title>RGB Controller Configuration</title> <style> body { background-color: #cccccc; font-family: Arial, Helvetica, Sans-Serif; Color: #000088; } </style> </head> <body> <h1>RGB Controller Configuration</h1><br> <h2>Set IP address</h2> Needs reboot to apply<br> Set to 0.0.0.0 for DHCP <form method="post" enctype="application/x-www-form-urlencoded" action="/postform/"> <input type="text" name="ipa" value="0" size="3">. <input type="text" name="ipb" value="0" size="3">. <input type="text" name="ipc" value="0" size="3">. <input type="text" name="ipd" value="0" size="3"> <input type="submit" value="Set"> </form><br> <h2>Set Hostname</h2> Needs reboot to apply<br> Max 64 characters <form method="post" enctype="application/x-www-form-urlencoded" action="/postform/"> <input type="text" name="hostname" value="RGBController" size="20"> <input type="submit" value="Set"> </form><br> <h2>DMX512 Start Universe</h2> Applies immediately<br> Between (inclusive) 1-65000 <form method="post" enctype="application/x-www-form-urlencoded" action="/postform/"> <input type="text" name="universe" value="1" size="5"> <input type="submit" value="Set"> </form><br> <form method="post" enctype="application/x-www-form-urlencoded" action="/postform/"> <input type="submit" name="reboot" value="Reboot"> </form><br> </body></html>

File diff suppressed because one or more lines are too long

106
label_generator.py Executable file
View File

@ -0,0 +1,106 @@
#!/usr/bin/env python3
from get_specs import get_multi
import sys
import uuid
import os
import signal
from PIL import Image
from label_image import generate_code
def input_cable():
print("")
print("Use the full part number. Spaces, special characters are allowed. Do not specify the brand.")
print("")
print("Please enter a part number and press enter:")
inputnum = input("").strip()
if len(inputnum) < 2:
killall_signal(0, 0)
print("Input part number:", inputnum)
print("Searching databases for cables...")
# Search both AW and BL sites
status, output = get_multi(["BL"+inputnum, "AW"+inputnum], delay=0.1, dir="temp/" + str(uuid.uuid4()) + "/", cache=False)
print("")
if len(output) > 1:
for i in output:
print(i[1], i[0])
print("Multiple brands with the same part number! Please type \"b\" for the Belden part number or \"a\" for the Alphawire cable")
inputbrand = input()
if inputbrand == "b":
output = [output[0]]
elif inputbrand == "a":
output = [output[1]]
elif len(output) == 0:
print("No results found for part number", inputnum + ". Please try again with a different part number.")
return
output = output[0]
print("")
if output[2] and output[3]:
print("Cable result found -",output[1], output[0], "with high-quality image and full specs")
elif output[2]:
print("Cable result found -",output[1], output[0], "with high-quality image and no specs")
elif output[3]:
print("Cable result found -",output[1], output[0], "with no/low quality image and full specs")
else:
print("Cable result found -",output[1], output[0], "with no/low quality image and no specs")
print("")
if not output[3]:
print("Unable to decode cable specs. Please try again with a different part number.")
return False
else:
print("")
print("*** Cable details confirmed. Creating label...")
print("")
img = None
imgstr = ""
if output[1] == "Belden":
imgstr = "BL"
elif output[1] == "Alphawire":
imgstr = "AW"
gen_label(imgstr + output[0])
#img = generate_code(imgstr + output[0])
#os.makedirs("labels", exist_ok=True)
#img.save("labels/" + imgstr + output[0] + ".png")
def gen_label(partnum, path="labels"):
img = generate_code(partnum)
os.makedirs(path, exist_ok=True)
img.save(path + "/" + partnum + ".png")
def delete_folder(path):
# Check if the path is a directory
if not os.path.isdir(path):
return
# List all files and directories in the path
for filename in os.listdir(path):
file_path = os.path.join(path, filename)
# If it's a directory, recursively call this function
if os.path.isdir(file_path):
delete_folder(file_path)
else:
# If it's a file, remove it
os.remove(file_path)
# After removing all contents, remove the directory itself
os.rmdir(path)
def killall_signal(a,b):
delete_folder("temp")
os.kill(os.getpid(), 9) # dirty kill of self
if __name__ == "__main__":
signal.signal(signal.SIGINT, killall_signal)
signal.signal(signal.SIGTERM, killall_signal)
print("Welcome to the Jukebox cable utility. This tool will allow you to verify Belden & Alphawire cable part numbers and create labels for samples in the Jukebox.")
print("This tool requires internet access to download cable specifications and verify part numbers.")
#print("Use Ctrl+C to exit.")
while True:
delete_folder("temp")
input_cable()

319
label_image.py Executable file
View File

@ -0,0 +1,319 @@
#!/usr/bin/env python3
from util import fprint
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
#import cv2
import numpy as np
from util import find_data_file
import segno
import io
#import cairosvg
#import math
# Copied from http://en.wikipedia.org/wiki/Code_128
# Value Weights 128A 128B 128C
CODE128_CHART = """
0 212222 space space 00
1 222122 ! ! 01
2 222221 " " 02
3 121223 # # 03
4 121322 $ $ 04
5 131222 % % 05
6 122213 & & 06
7 122312 ' ' 07
8 132212 ( ( 08
9 221213 ) ) 09
10 221312 * * 10
11 231212 + + 11
12 112232 , , 12
13 122132 - - 13
14 122231 . . 14
15 113222 / / 15
16 123122 0 0 16
17 123221 1 1 17
18 223211 2 2 18
19 221132 3 3 19
20 221231 4 4 20
21 213212 5 5 21
22 223112 6 6 22
23 312131 7 7 23
24 311222 8 8 24
25 321122 9 9 25
26 321221 : : 26
27 312212 ; ; 27
28 322112 < < 28
29 322211 = = 29
30 212123 > > 30
31 212321 ? ? 31
32 232121 @ @ 32
33 111323 A A 33
34 131123 B B 34
35 131321 C C 35
36 112313 D D 36
37 132113 E E 37
38 132311 F F 38
39 211313 G G 39
40 231113 H H 40
41 231311 I I 41
42 112133 J J 42
43 112331 K K 43
44 132131 L L 44
45 113123 M M 45
46 113321 N N 46
47 133121 O O 47
48 313121 P P 48
49 211331 Q Q 49
50 231131 R R 50
51 213113 S S 51
52 213311 T T 52
53 213131 U U 53
54 311123 V V 54
55 311321 W W 55
56 331121 X X 56
57 312113 Y Y 57
58 312311 Z Z 58
59 332111 [ [ 59
60 314111 \ \ 60
61 221411 ] ] 61
62 431111 ^ ^ 62
63 111224 _ _ 63
64 111422 NUL ` 64
65 121124 SOH a 65
66 121421 STX b 66
67 141122 ETX c 67
68 141221 EOT d 68
69 112214 ENQ e 69
70 112412 ACK f 70
71 122114 BEL g 71
72 122411 BS h 72
73 142112 HT i 73
74 142211 LF j 74
75 241211 VT k 75
76 221114 FF l 76
77 413111 CR m 77
78 241112 SO n 78
79 134111 SI o 79
80 111242 DLE p 80
81 121142 DC1 q 81
82 121241 DC2 r 82
83 114212 DC3 s 83
84 124112 DC4 t 84
85 124211 NAK u 85
86 411212 SYN v 86
87 421112 ETB w 87
88 421211 CAN x 88
89 212141 EM y 89
90 214121 SUB z 90
91 412121 ESC { 91
92 111143 FS | 92
93 111341 GS } 93
94 131141 RS ~ 94
95 114113 US DEL 95
96 114311 FNC3 FNC3 96
97 411113 FNC2 FNC2 97
98 411311 ShiftB ShiftA 98
99 113141 CodeC CodeC 99
100 114131 CodeB FNC4 CodeB
101 311141 FNC4 CodeA CodeA
102 411131 FNC1 FNC1 FNC1
103 211412 StartA StartA StartA
104 211214 StartB StartB StartB
105 211232 StartC StartC StartC
106 2331112 Stop Stop Stop
""".split()
VALUES = [int(value) for value in CODE128_CHART[0::5]]
WEIGHTS = dict(zip(VALUES, CODE128_CHART[1::5]))
CODE128A = dict(zip(CODE128_CHART[2::5], VALUES))
CODE128B = dict(zip(CODE128_CHART[3::5], VALUES))
CODE128C = dict(zip(CODE128_CHART[4::5], VALUES))
for charset in (CODE128A, CODE128B):
charset[' '] = charset.pop('space')
def generate_code(data, show=False, check=False):
#img = code128_image(data)
img = qr_image(data)
if show:
img.show()
#img.show()
#print(data)
if(check):
from pyzbar.pyzbar import decode
from pyzbar.pyzbar import ZBarSymbol
print(decode(img, symbols=[ZBarSymbol.CODE128])[0].data.decode('ascii'))
#if(decode(img, symbols=[ZBarSymbol.CODE128])[0].data.decode('ascii') == data):
# return True
#else:
# return False
return img
def code128_format(data):
"""
Generate an optimal barcode from ASCII text
"""
text = str(data)
pos = 0
length = len(text)
# Start Code
if text[:2].isdigit():
charset = CODE128C
codes = [charset['StartC']]
else:
charset = CODE128B
codes = [charset['StartB']]
# Data
while pos < length:
if charset is CODE128C:
if text[pos:pos+2].isdigit() and length - pos > 1:
# Encode Code C two characters at a time
codes.append(int(text[pos:pos+2]))
pos += 2
else:
# Switch to Code B
codes.append(charset['CodeB'])
charset = CODE128B
elif text[pos:pos+4].isdigit() and length - pos >= 4:
# Switch to Code C
codes.append(charset['CodeC'])
charset = CODE128C
else:
# Encode Code B one character at a time
codes.append(charset[text[pos]])
pos += 1
# Checksum
checksum = 0
for weight, code in enumerate(codes):
checksum += max(weight, 1) * code
codes.append(checksum % 103)
# Stop Code
codes.append(charset['Stop'])
return codes
def code128_image(data, height=100, thickness=3, quiet_zone=False):
partnum = data
if not data[-1] == CODE128B['Stop']:
data = code128_format(data)
barcode_widths = []
for code in data:
for weight in WEIGHTS[code]:
barcode_widths.append(int(weight) * thickness)
width = sum(barcode_widths)
x = 0
if quiet_zone:
width += 20 * thickness
x = 10 * thickness
# Monochrome Image
img = Image.new('RGB', (int(width * 10), int(width * 10)), 'white')
draw = ImageDraw.Draw(img)
draw_bar = True
for bwidth in barcode_widths:
bwidth *= 4
if draw_bar:
draw.rectangle(((x + int(width * 3), width*6.25), (x + int(width * 3) + bwidth - 1, width*7)), fill='black')
draw_bar = not draw_bar
x += bwidth
#draw.arc(((width - width/5, width - width/5), (width*9 + width/5, width*9 + width/5)),0,360,fill='blue', width = int(width/8))
draw.arc(((width+int(width / 1.4), width+int(width / 1.4)), (width*9-int(width / 1.4), width*9-int(width / 1.4))),0,360,fill='blue', width = int(width/8))
font_path = find_data_file("OCRAEXT.TTF")
font_size = width/2
font = ImageFont.truetype(font_path, font_size)
text_width = font.getlength(partnum)
while text_width > width*4:
font_size -= 1
font = ImageFont.truetype(font_path, font_size)
text_width = font.getlength(partnum)
txtx = (int(width * 10) - text_width) / 2
txty = (int(width * 10)) / 2 + width / 2
draw.text((txtx,txty),partnum, "black", font)
return img
def qr_image(data, width=600):
partnum = data
# Monochrome Image
img = Image.new('RGB', (int(width * 10), int(width * 10)), 'white')
draw = ImageDraw.Draw(img)
#svg_path = find_data_file("belden-logo.svg")
#with open(svg_path, 'rb') as svg_file:
# png_image = cairosvg.svg2png(file_obj=svg_file,dpi=width*30, scale=30, background_color="white")
#with open("output.png", 'wb') as file:
# file.write(png_image)
png_image_io = "belden-logo-superhires.png"
png_image_pillow = Image.open(png_image_io)
png_width, png_height = png_image_pillow.size
png_image_pillow = png_image_pillow.resize((int(width*5.2), int(width*5.2/png_width*png_height)))
png_width, png_height = png_image_pillow.size
# paste belden logo first because it has a big border that would cover stuff up
img.paste(png_image_pillow, (int(width*5-png_width/2), int(width*4.25 - png_height/2)))
# draw circle border
#draw.arc(((width - width/5, width - width/5), (width*9 + width/5, width*9 + width/5)),0,360,fill='blue', width = int(width/8))
draw.arc(((width+int(width / 1.4), width+int(width / 1.4)), (width*9-int(width / 1.4), width*9-int(width / 1.4))),0,360,fill=(0, 73,144), width = int(width/8))
font_path = find_data_file("GothamCond-Medium.otf")
font_size = width/2
font = ImageFont.truetype(font_path, font_size)
text_width = font.getlength(partnum[2:])
# shrink font dynamically if it's too long of a name
while text_width > width*4:
font_size -= 1
font = ImageFont.truetype(font_path, font_size)
text_width = font.getlength(partnum[2:])
txtx = (int(width * 10) - text_width) / 2
txty = (int(width * 10)) / 2
# draw part number text
draw.text((txtx,txty),partnum[2:], "black", font)
# Draw QR code
partnum = partnum.replace(" ", "%20")
qrcode = segno.make('HTTPS://BLDN.APP/' + partnum,micro=False,boost_error=False,error="L",mask=3)
out = io.BytesIO()
qrx, _ = qrcode.symbol_size(1,0)
qrcode.save(out, scale=width*2/qrx, kind="PNG", border=0)
qrimg = Image.open(out)
img.paste(qrimg, box=(int(width*4),int(width*5.75)))
return img
if __name__ == "__main__":
#print(generate_code("BL10GXS13"))
#print(generate_code("BL10GXgd35j35S13"))
#print(generate_code("BL10GX54hS13"))
#print(generate_code("BL10Gj34qXS13", False, False))
#print(generate_code("BL104w5545dp7bfwp43643534/4563G-XS13"))
#adjust_image(cv2.imread('test_skew.jpg'))
path = "labels"
img = generate_code("BL10GXS13")
import os
os.makedirs(path, exist_ok=True)
img.save(path + "/" + "BL10GXS13" + ".png")

View File

@ -655,7 +655,7 @@ if __name__ == "__main__":
if not ret: if not ret:
break break
mapimage(frame, fps=30)""" mapimage(frame, fps=30)"""
show = True show = False
ring = 1 ring = 1
startup_animation(show) startup_animation(show)
for x in range(54): for x in range(54):

BIN
map3.png

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

After

Width:  |  Height:  |  Size: 41 KiB

View File

@ -1,9 +1,8 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Parse Belden catalog techdata datasheets # Parse Belden (100%) & Alphawire (75%) catalog techdata datasheets
import pandas as pd import pandas as pd
pd.set_option('future.no_silent_downcasting', True)
from PyPDF2 import PdfReader from PyPDF2 import PdfReader
import camelot import camelot
import numpy as np import numpy as np
@ -13,55 +12,94 @@ import json
from util import fprint from util import fprint
import uuid import uuid
from util import run_cmd from util import run_cmd
from util import win32
import os import os
import glob
import sys
def touch(path): def touch(path):
with open(path, 'a'): with open(path, 'a'):
os.utime(path, None) os.utime(path, None)
def find_data_file(filename):
if getattr(sys, "frozen", False):
# The application is frozen
datadir = os.path.dirname(sys.executable)
else:
# The application is not frozen
# Change this bit to match where you store your data files:
datadir = os.path.dirname(__file__)
return os.path.join(datadir, filename)
def extract_table_name(table_start, searchpage, reader, dstype, fallbackname):
if dstype == "Belden":
ymin = table_start
ymax = table_start + 10
elif dstype == "Alphawire":
ymin = table_start - 5
ymax = table_start + 10
page = reader.pages[searchpage - 1]
parts = []
def visitor_body(text, cm, tm, fontDict, fontSize):
y = tm[5]
if y > ymin and y < ymax:
parts.append(text)
page.extract_text(visitor_text=visitor_body)
text_body = "".join(parts).strip('\n')
if len(text_body) == 0:
text_body = str(fallbackname)
return text_body
#fprint(text_body)
def parse(filename, output_dir, partnum, dstype): def parse(filename, output_dir, partnum, dstype):
tables = []
# Extract table data # Extract table data
try:
tables = camelot.read_pdf(filename, pages="1-end", flavor='lattice', backend="poppler", split_text=False, line_scale=100, process_background=True, resolution=600, interations=1, layout_kwargs={'detect_vertical': False, 'char_margin': 0.5}, shift_text=['r', 't']) if dstype == "Belden":
tables = camelot.read_pdf(filename, pages="1-end", flavor='lattice', backend="ghostscript", split_text=False, line_scale=100, process_background=True, resolution=600, interations=1, layout_kwargs={'detect_vertical': False, 'char_margin': 0.5}, shift_text=['r', 't'])
elif dstype == "Alphawire":
tables = camelot.read_pdf(filename, pages="1-end", flavor='lattice', backend="ghostscript", split_text=False, line_scale=50, process_background=True, resolution=600, interations=1, layout_kwargs={'detect_vertical': True, 'char_margin': 0.5}, shift_text=['l', 't'])
except (OSError, RuntimeError) as e:
print(e)
if win32:
print("Ghostscript is not installed! Launching installer...")
#subprocess.run([r".\\gs10030w64.exe"])
os.system(r'''Powershell -Command "& { Start-Process \"''' + find_data_file("gs10030w64.exe") + r'''\" -Verb RunAs } " ''')
# Will return once file launched...
print("Once the install is completed, try again.")
return False
else:
print("Ghostscript is not installed. You can install it with e.g. apt install ghostscript for Debian-based systems.")
return False
#fprint("Total tables extracted:", tables.n) #fprint("Total tables extracted:", tables.n)
n = 0 n = 0
pagenum = 0 #pagenum = 0
reader = PdfReader(filename) reader = PdfReader(filename)
page = reader.pages[0] page = reader.pages[0]
table_list = {} table_list = {}
table_list_raw = {}
pd.set_option('future.no_silent_downcasting', True)
for table in tables: for table in tables:
#with pd.options.context("future.no_silent_downcasting", True):
table.df.infer_objects(copy=False) table.df.infer_objects(copy=False)
table.df.replace('', np.nan, inplace=True) table.df = table.df.replace('', np.nan).infer_objects(copy=False)
table.df.dropna(inplace=True, how="all") table.df.dropna(inplace=True, how="all")
table.df.dropna(inplace=True, axis="columns", how="all") table.df.dropna(inplace=True, axis="columns", how="all")
table.df.replace(np.nan, '', inplace=True) table.df = table.df.replace(np.nan, '').infer_objects(copy=False)
if not table.df.empty: if not table.df.empty:
#fprint("\nTable " + str(n)) #fprint("\nTable " + str(n))
# Extract table names # Extract table names
table_start = table.cells[0][0].lt[1] # Read top-left cell's top-left coordinate table_start = table.cells[0][0].lt[1] # Read top-left cell's top-left coordinate
#fprint(table_start) #fprint(table_start)
ymin = table_start
ymax = table_start + 10
if pagenum != table.page - 1:
pagenum = table.page - 1
page = reader.pages[table.page - 1]
parts = []
def visitor_body(text, cm, tm, fontDict, fontSize):
y = tm[5]
if y > ymin and y < ymax:
parts.append(text)
page.extract_text(visitor_text=visitor_body)
text_body = "".join(parts).strip('\n')
if len(text_body) == 0:
text_body = str(n)
#fprint(text_body)
text_body = extract_table_name(table_start, table.page, reader, dstype, n)
table_list[text_body] = table.df table_list[text_body] = table.df
if dstype == "Alphawire":
table_list_raw[text_body] = table
#table.to_html("table" + str(n) + ".html") #table.to_html("table" + str(n) + ".html")
#fprint(table.df) #fprint(table.df)
@ -71,7 +109,7 @@ def parse(filename, output_dir, partnum, dstype):
#tables.export(output_dir + '/techdata.json', f='json') #tables.export(output_dir + '/techdata.json', f='json')
# fprint(table_list) #fprint(table_list)
# Extract Basic details - part name & description, image, etc # Extract Basic details - part name & description, image, etc
reader = PdfReader(filename) reader = PdfReader(filename)
@ -100,24 +138,32 @@ def parse(filename, output_dir, partnum, dstype):
tables = dict() tables = dict()
torename = dict() torename = dict()
previous_table = "" previous_table = ""
#print(table_list.keys())
for table_name in table_list.keys(): for table_name in table_list.keys():
# determine shape: horizontal or vertical # determine shape: horizontal or vertical
table = table_list[table_name] table = table_list[table_name]
rows = table.shape[0] rows = table.shape[0]
cols = table.shape[1] cols = table.shape[1]
vertical = None vertical = None
#print(rows, cols, table_name)
if rows > 2 and cols == 2: if rows > 2 and cols == 2:
vertical = True vertical = True
elif cols == 1: elif cols == 1 and rows > 1:
vertical = False vertical = False
elif rows == 1: elif rows == 1:
vertical = True vertical = True
elif cols == 2: # and rows <= 2 elif cols == 2: # and rows <= 2
# inconsistent # inconsistent
if table.iloc[0, 0].find(":") == len(table.iloc[0, 0]) - 1: # check if last character is ":" indicating a vertical table if dstype == "Belden":
vertical = True if table.iloc[0, 0].find(":") == len(table.iloc[0, 0]) - 1: # check if last character is ":" indicating a vertical table
else: vertical = True
vertical = False else:
vertical = False
elif dstype == "Alphawire":
if table.iloc[0, 0].find(")") == 1 or table.iloc[0, 0].find(")") == 2 or table.iloc[0, 0].find(":") == len(table.iloc[0, 0]) - 1: # check if last character is ":" indicating a vertical table
vertical = True
else:
vertical = False
elif cols > 2: # and rows <= 2 elif cols > 2: # and rows <= 2
vertical = False vertical = False
@ -125,9 +171,12 @@ def parse(filename, output_dir, partnum, dstype):
vertical = False vertical = False
else: # 1 column, <= 2 rows else: # 1 column, <= 2 rows
vertical = False vertical = False
#print(vertical)
# missing name check # missing name check
for table_name_2 in table_list.keys(): for table_name_2 in table_list.keys():
if dstype == "Alphawire" and table_name_2.find("\n") >= 0:
torename[table_name_2] = table_name_2[0:table_name_2.find("\n")]
if table_name_2.find(table.iloc[-1, 0]) >= 0: if table_name_2.find(table.iloc[-1, 0]) >= 0:
# Name taken from table directly above - this table does not have a name # Name taken from table directly above - this table does not have a name
torename[table_name_2] = "Specs " + str(len(tables)) torename[table_name_2] = "Specs " + str(len(tables))
@ -136,8 +185,12 @@ def parse(filename, output_dir, partnum, dstype):
if vertical: if vertical:
out = dict() out = dict()
for row in table.itertuples(index=False, name=None): if rows > 1:
out[row[0].replace("\n", " ").replace(":", "")] = row[1] for row in table.itertuples(index=False, name=None):
out[row[0].replace("\n", " ").replace(":", "")] = row[1]
else:
for row in table.itertuples(index=False, name=None):
out[row[0].replace("\n", " ").replace(":", "")] = ""
else: # horizontal else: # horizontal
out = dict() out = dict()
@ -147,9 +200,52 @@ def parse(filename, output_dir, partnum, dstype):
tables[table_name] = out tables[table_name] = out
# multi-page table check, Alphawire
if dstype == "Alphawire" and table_name.isdigit():
# table continues from previous page or has name on previous page
thistbl = table_list_raw[table_name]
prevtbl = table_list_raw[previous_table]
if prevtbl.cells[-1][0].lb[1] < 50 and thistbl.cells[0][0].lt[1] > 600:
# wraparound
#print("WRAP")
#print("PREV TABLE", prevtbl.df)
#print("THIS TABLE", thistbl.df)
#print("PREV TABLE CORNER", prevtbl.cells[-1][0].lb[1])
#print("THIS TABLE CORNER", thistbl.cells[0][0].lt[1])
main_key = previous_table
cont_key = table_name
#print(vertical)
if vertical == False:
main_keys = list(tables[main_key].keys())
for i, (cont_key, cont_values) in enumerate(tables[cont_key].items()):
if i < len(main_keys):
#print(tables[main_key][main_keys[i]])
tables[main_key][main_keys[i]] = (tuple(tables[main_key][main_keys[i]]) + (cont_key,) + cont_values)
del tables[table_name]
else:
#print(tables[cont_key].keys())
for key in tables[cont_key].keys():
#print(main_key, key, cont_key, key)
tables[main_key][key] = tables[cont_key][key]
del tables[table_name]
elif thistbl.cells[0][0].lt[1] > 600:
# name on previous page (grrrr)
#print("NAMEABOVE")
#print("PREV TABLE", prevtbl.df)
#print("THIS TABLE", thistbl.df)
#print("PREV TABLE CORNER", prevtbl.cells[-1][0].lb[1])
#print("THIS TABLE CORNER", thistbl.cells[0][0].lt[1])
name = extract_table_name(50, prevtbl.page,reader,dstype,table_name).strip("\n").strip()
#print("FOUND NAME:", name)
torename[table_name] = name
# multi-page table check
# multi-page table check, Belden
if dstype == "Belden": if dstype == "Belden":
if table_name.isdigit() and len(tables) > 1: if table_name.isdigit() and len(tables) > 1:
#fprint(table_name) #fprint(table_name)
@ -177,7 +273,7 @@ def parse(filename, output_dir, partnum, dstype):
previous_table = table_name previous_table = table_name
# remove renamed tables # remove & rename tables
for table_name in torename.keys(): for table_name in torename.keys():
tables[torename[table_name]] = tables[table_name] tables[torename[table_name]] = tables[table_name]
del tables[table_name] del tables[table_name]
@ -211,11 +307,20 @@ def parse(filename, output_dir, partnum, dstype):
#print(output_table) #print(output_table)
run_cmd("rm \"" + output_dir + "\"/*.json") # not reliable! #run_cmd("rm \"" + output_dir + "\"/*.json") # not reliable!
with open(output_dir + "/" + output_table["searchspecs"]["id"] + ".json", 'w') as json_file: pattern = os.path.join(output_dir, '*.json')
json_files = glob.glob(pattern)
for file_path in json_files:
os.remove(file_path)
#print(f"Deleted {file_path}")
with open(output_dir + "/search_" + output_table["searchspecs"]["id"] + ".json", 'w') as json_file:
json.dump(output_table["searchspecs"], json_file) json.dump(output_table["searchspecs"], json_file)
touch(output_dir + "/parsed") with open(output_dir + "/specs_" + output_table["partnum"] + ".json", 'w') as json_file:
return output_table json.dump(output_table["fullspecs"], json_file)
#print(json.dumps(output_table, indent=2))
touch(output_dir + "/parsed") # mark as parsed
return True
def flatten(tables): def flatten(tables):
@ -245,7 +350,6 @@ def flatten(tables):
#print("\"" + keyname + "\":", "\"" + str(out[fullkeyname]) + "\",") #print("\"" + keyname + "\":", "\"" + str(out[fullkeyname]) + "\",")
elif len(tables[table][key]) == 1: elif len(tables[table][key]) == 1:
out[fullkeyname] = convert_to_number(tables[table][key][0]) out[fullkeyname] = convert_to_number(tables[table][key][0])
#print("\"" + keyname + "\":", "\"" + str(out[fullkeyname]) + "\",") #print("\"" + keyname + "\":", "\"" + str(out[fullkeyname]) + "\",")
# if the item has at least two commas in it, split it # if the item has at least two commas in it, split it
@ -256,7 +360,7 @@ def flatten(tables):
# if the item has at least two commas in it, split it # if the item has at least two commas in it, split it
if tables[table][key].count(',') > 0: if tables[table][key].count(',') > 0:
out[fullkeyname] = list(map(lambda x: x.strip(), tables[table][key].split(","))) out[fullkeyname] = list(map(lambda x: x.strip(), tables[table][key].split(",")))
print("\"" + keyname + "\":", "\"" + str(out[fullkeyname]) + "\",") #print("\"" + keyname + "\":", "\"" + str(out[fullkeyname]) + "\",")
#print("}") #print("}")
@ -265,4 +369,4 @@ def flatten(tables):
if __name__ == "__main__": if __name__ == "__main__":
parse("test2.pdf", "cables/10GXS13", "10GXS13") print(parse("cables/3050/datasheet.pdf", "cables/3050", "3050", "Alphawire"))

View File

@ -1,5 +1,5 @@
# Runtime # Runtime
camelot-py[base] camelot-py
opencv-python opencv-python
pypdf2==2.12.1 pypdf2==2.12.1
alive-progress alive-progress
@ -15,6 +15,13 @@ websockets
numpy numpy
scipy scipy
ipywidgets ipywidgets
pandas
pyarrow
ghostscript
pyzbar
segno
# Development # Development
matplotlib matplotlib
#cx_Freeze # uncomment if building label generator app
# requires windows 10 SDK, visual C++, etc

31
setup-label-generator.py Normal file
View File

@ -0,0 +1,31 @@
import sys
from cx_Freeze import setup, Executable
debug = True
debug = not debug
# Dependencies are automatically detected, but it might need fine tuning.
# "packages": ["os"] is used as example only
import opcode
import os
import distutils
#distutils_path = os.path.join(os.path.dirname(opcode.__file__), 'distutils')
build_exe_options = {"include_msvcr": True, "packages": ["camelot", "setuptools", "segno"], "optimize": 0, "silent": True, "include_files": ["gs10030w64.exe", "GothamCond-Medium.otf", "belden-logo-superhires.png"], "excludes": ["scipy", "torch"]}
# base="Win32GUI" should be used only for Windows GUI app
base = "console"
#if sys.platform == "win32" and not debug:
# base = "Win32GUI"
if sys.platform == "linux" or sys.platform == "linux2" or sys.platform == "darwin":
name = "jukebox-labelgen"
else:
name = "jukebox-labelgen.exe"
setup(
name="IP Pigeon",
version="0.2.4",
description="IP Pigeon client application",
options={"build_exe": build_exe_options},
executables=[Executable("label_generator.py", base=base, uac_admin=False, target_name=name)],
)

BIN
test.pdf

Binary file not shown.

BIN
test2.pdf

Binary file not shown.

View File

@ -14,7 +14,8 @@ from util import fprint
rob = None rob = None
offset_x, offset_y, offset_z = (0, 0, 0.14) # Tool offset
limb_base, limb1, limb2, limb3, limb_wrist = (0.105, .425, .39225, .1, .0997) # Limb lengths
def init(ip): def init(ip):
global rob global rob
@ -190,6 +191,11 @@ def move_to_polar(start_pos, end_pos):
return rx_intermediate return rx_intermediate
def degtorad(angle):
return angle/180.0 * math.pi
def radtodeg(angle):
return angle*180.0 / math.pi
def move_to_home(): def move_to_home():
global rob global rob
@ -216,39 +222,107 @@ def normalize_degree(theta):
# Return angle # Return angle
return normalized_theta return normalized_theta
def get_joints_from_xyz_rel(x, y, z, initial_guess = (math.pi/2, math.pi/2, 0), limbs=(.422864, .359041, .092124)):
# Get polar coordinates of x,y pair
r, theta = cartesian_to_polar(x, y) def get_joints_from_xyz_rel(x, y, z, rx=0, ry=-math.pi/2, rz=0, initial_guess = (math.pi/2, math.pi/2, 0), l3offset=0):
# Get limbs and offsets
# Get length of each limb #l3=0.15
l1, l2, l3 = limbs l_bs, l1, l2, l3, l_wt = (limb_base, limb1, limb2, limb3, limb_wrist) # Limb lengths
l3 += l3offset # add wrist offset, used for gripper angle calculations
# Calculate base angle and r relative to shoulder joint
def calculate_theta(x, y, a):
# Calculate if we need the + or - in our equations
if (x>-a and y>=0) or (x>a and y<0):
flip = 1
elif (x<-a and y>=0) or (x<a and y<0):
flip = -1
else:
# Critical section (x=a, or x=-a). Infinite slope
# Return 0 or 180 depending on sign
return math.atan2(y, 0)
# Calculate tangent line y = mx + b
m = (x*y - math.sqrt(x*x*y*y-(x*x-a*a)*(y*y-a*a)))/(x*x-a*a)
b = flip * a * math.sqrt(1+m*m)
# Calculate equivalent tangent point on circle
cx = (-flip*m*b)/(1+m*m)
cy = m*cx + flip*b
# Calculate base angle, make angle negative if flip=1
theta = math.atan2(cy, cx) + (-math.pi if flip==1 else 0)
return theta
base_theta = calculate_theta(x, y, l_bs)
cx, cy = l_bs*math.cos(base_theta), l_bs*math.sin(base_theta)
r = math.sqrt((x+offset_x+cx)**2 + (y+offset_y+cy)**2)
# Formulas to find out joint positions for (r, z) # Formulas to find out joint positions for (r, z)
def inv_kin_r_z(p): def inv_kin_r_z(p):
a, b, c = p a, b, c = p
return (l1*math.cos(a) + l2*math.cos(a-b) + l3*math.cos(a-b-c) - r, # r return (l1*math.cos(a) + l2*math.cos(a-b) + l3*math.cos(a-b-c) - r, # r
l1*math.sin(a) + l2*math.sin(a-b) - l3*math.sin(a-b-c) - z, # z l1*math.sin(a) + l2*math.sin(a-b) - l3*math.sin(a-b-c) - (l3*math.sin(a-b-c)) - (z + offset_z), # z
a-b-c) # wrist angle a-b-c) # wrist angle
# Normalize angles # Normalize angles
base, shoulder, elbow, wrist = [normalize_degree(deg) for deg in [theta, *fsolve(inv_kin_r_z, initial_guess)]] base, shoulder, elbow, wrist1 = [normalize_degree(deg) for deg in [base_theta, *fsolve(inv_kin_r_z, initial_guess)]]
wrist1 += rx
# Return result # Return result
return base, shoulder, elbow, wrist return base, shoulder, elbow, wrist1, ry, rz
def get_joints_from_xyz_abs(x, y, z): def get_joints_from_xyz_abs(x, y, z, rx=0, ry=-math.pi/2, rz=math.pi/2, l3offset=0):
joints = get_joints_from_xyz_rel(x, y, z) joints = get_joints_from_xyz_rel(x, y, z, rx, ry, rz, l3offset=l3offset)
# Joint offsets # Joint offsets
# Base, Shoulder, Elbow, Wrist # Base, Shoulder, Elbow, Wrist
inverse = [1, -1, 1, 1] inverse = [1, -1, 1, 1, 1, 1]
offsets = [0, 0, 0, -math.pi/2] offsets = [-math.pi/2, 0, 0, -math.pi/2, 0, 0]
if radtodeg(joints[1]) > 137:
print("CRASH! Shoulder at", joints[1] * 180/math.pi)
#else:
#print("Shoulder at", joints[1] * 180/math.pi)
# Return adjusted joint positions # Return adjusted joint positions
return [o+j*i for j, o, i in zip(joints, offsets, inverse)] return [o+j*i for j, o, i in zip(joints, offsets, inverse)]
# gripper angle: from vertical
# gripper length: from joint to start of grip
# to flip, you can use flip=True or make gripper angle negative
def offset_gripper_angle(x, y, z, gripperangle=35, gripperlength=0.20+0.018, flip=False):
if gripperangle < 0:
rz = - math.pi / 2
else:
rz = math.pi / 2
if flip:
gripperangle = -degtorad(gripperangle)
grippery = gripperlength - math.cos(gripperangle) * gripperlength
grippery += math.sin(gripperangle) * limb3
gripperx = math.sin(gripperangle) * gripperlength + limb3 * 2
gripperx -= (1-math.cos(gripperangle)) * limb3
rz = math.pi / 2
# flip the whole wrist
return get_joints_from_xyz_abs(x, y, z-grippery, rx=gripperangle + degtorad(180), l3offset=-gripperx, ry=math.pi/2, rz=rz)
else:
gripperangle = degtorad(gripperangle)
grippery = gripperlength - math.cos(gripperangle) * gripperlength
grippery -= math.sin(gripperangle) * limb3
gripperx = math.sin(gripperangle) * gripperlength
gripperx += (1-math.cos(gripperangle)) * limb3
return get_joints_from_xyz_abs(x, y, z-grippery, rx=gripperangle, l3offset=-gripperx, rz=rz)
def goto_holder_index(idx, z=0.05, gripperangle=35, flip=False):
joint = config["position_map"][idx]
print("Going to cable holder index", joint["index"], "at position", joint["pos"])
angles = offset_gripper_angle(joint["pos"][1]/1000, joint["pos"][0]/1000, z, gripperangle=gripperangle, flip=flip)
#rob.movej(angles, acc=2, vel=2)
return angles
#angles = get_joints_from_xyz_abs(joint["pos"][1]/1000, joint["pos"][0]/1000, 0.05, )
if __name__ == "__main__": if __name__ == "__main__":
#rob.movej((0, 0, 0, 0, 0, 0), 0.1, 0.2) #rob.movej((0, 0, 0, 0, 0, 0), 0.1, 0.2)
@ -262,7 +336,7 @@ if __name__ == "__main__":
0.40002172976662786, 0.40002172976662786,
0, 0,
-3.14152741295329, -3.14152741295329,
0] math.radians(62)]
# time.sleep(.5) # time.sleep(.5)
@ -290,14 +364,88 @@ if __name__ == "__main__":
# set_pos_abs(*home_pose) # set_pos_abs(*home_pose)
angles = get_joints_from_xyz_abs(0.3, 0.3, 0.3)
rob.movej([*angles, *rob.getj()[4:]], acc=1, vel=1)
angles = get_joints_from_xyz_abs(-0.3, -0.3, 0.7) # angles = get_joints_from_xyz_abs(-0.2, 0, 0)
rob.movej([*angles, *rob.getj()[4:]], acc=1, vel=1) # rob.movej(angles, acc=2, vel=2)
# angles = get_joints_from_xyz_abs(-0.2, -0.2, 0)
# rob.movej(angles, acc=2, vel=2)
# angles = get_joints_from_xyz_abs(0, -0.6, 0)
# rob.movej(angles, acc=2, vel=2)
# angles = get_joints_from_xyz_abs(0, -0.5, 0)
# rob.movej(angles, acc=2, vel=2)
# angles = get_joints_from_xyz_abs(0, -0.4, 0)
# rob.movej(angles, acc=2, vel=2)
# angles = get_joints_from_xyz_abs(0, -0.3, 0)
# rob.movej(angles, acc=2, vel=2)
# angles = get_joints_from_xyz_abs(0, -0.2, 0)
# rob.movej(angles, acc=2, vel=2)
# angles = get_joints_from_xyz_abs(0, -0.13, 0)
# rob.movej(angles, acc=2, vel=2)
config = None
joints = []
for i in np.linspace(-0.2, -0.7, 50):
joints.append(get_joints_from_xyz_abs(i, 0, 0))
#rob.movejs(joints, acc=2, vel=2)
import yaml
with open('config.yml', 'r') as fileread:
#global config
config = yaml.safe_load(fileread)
#rob.movej(goto_holder_index(24, 0.2, 0), acc=2, vel=2)
#joints = []
#for i in np.linspace(0, 340, 340):
# joints.append(goto_holder_index(24, 0.5, i))
#rob.movejs(joints, acc=1, vel=3)
angle = 30
rob.movej(goto_holder_index(26, 0.1, angle), acc=2, vel=2)
time.sleep(1)
rob.movej(goto_holder_index(25, 0.1, angle), acc=2, vel=2)
time.sleep(1)
rob.movej(goto_holder_index(24, 0.1, angle, flip=True), acc=2, vel=2)
#rob.movej(goto_holder_index(32, 0.2, angle), acc=2, vel=2)
#rob.movej(goto_holder_index(38, 0.2, angle), acc=2, vel=2)
#rob.movej(goto_holder_index(25, 0.1, angle, flip=True), acc=2, vel=2)
#rob.movej(goto_holder_index(25, 0.2, angle, flip=True), acc=2, vel=2)
#rob.movej(goto_holder_index(24, 0.1, angle, flip=True), acc=2, vel=2)
#time.sleep(1)
#rob.movej(goto_holder_index(25, 0.1, angle, flip=True), acc=2, vel=2)
#rob.movej(goto_holder_index(49, 0.1, angle), acc=2, vel=2)
#rob.movej(goto_holder_index(49, 0.1, angle, flip=True), acc=2, vel=2)
# rob.movej(goto_holder_index(50, 0.1, angle, flip=True), acc=2, vel=2)
# rob.movej(goto_holder_index(51, 0.1, angle, flip=True), acc=2, vel=2)
# rob.movej(goto_holder_index(52, 0.1, angle, flip=True), acc=2, vel=2)
# rob.movej(goto_holder_index(53, 0.1, angle, flip=True), acc=2, vel=2)
#time.sleep(2)
#rob.movej(goto_holder_index(24, 0.15, 35, flip=True), acc=2, vel=2)
#time.sleep(10)
# time.sleep(4)
# goto_holder_index(26, 0.1, 20)
# time.sleep(4)
# goto_holder_index(26, 0.1, 30)
# time.sleep(4)
# goto_holder_index(26, 0.1, 40)
# for joint in config["position_map"]:
#joint = config["position_map"][26]
#print("Going to cable holder index", joint["index"], "at position", joint["pos"])
#angles = get_joints_from_xyz_abs(joint["pos"][1]/1000, joint["pos"][0]/1000, 0.05, )# rx=math.pi / 5)
#joints.append(angles)
#rob.movej(angles, acc=2, vel=2)
#time.sleep(10)
#rob.movejs(joints, acc=2, vel=2)
# joints = []
# for i in np.linspace(-0.3, -0.7, 50):
# joints.append(get_joints_from_xyz_abs(i, 0, 0))
# rob.movejs(joints, acc=2, vel=2)
# time.sleep(5)
# angles = get_joints_from_xyz_abs(0, -0.6, 0)
# rob.movej(angles, acc=2, vel=2)
angles = get_joints_from_xyz_abs(-0.3, 0.4, 0.2)
rob.movej([*angles, *rob.getj()[4:]], acc=1, vel=1)
# set_pos_abs(*p1) # set_pos_abs(*p1)

View File

@ -70,7 +70,7 @@ def fprint(msg, settings = None, sendqueue = None):
except Exception as e: except Exception as e:
try: try:
print('[????:' + frm.function + ']:', str(msg)) print('[????:' + frm.function + ']:', str(msg))
print('[util:fprint]: ' + str(e)) #print('[util:fprint]: ' + str(e))
except: except:
print('[????]:', str(msg)) print('[????]:', str(msg))