34 Commits

Author SHA1 Message Date
0f2c19e811 Merge remote-tracking branch 'origin/dthomas_meilisearch' into dthomas_meilisearch 2024-03-08 19:13:03 -06:00
b18355fc14 nuke database.py 2024-03-08 19:12:41 -06:00
aadb6ba24d add search functions to JukeboxSearch 2024-03-01 21:24:37 -06:00
4561b1c1a3 fix error when index does not exist 2024-03-01 20:37:22 -06:00
6edd0b4ef0 fix map datatype 2024-03-01 20:37:02 -06:00
2c242aac29 Merge branch 'main' into dthomas_meilisearch 2024-03-01 19:26:57 -06:00
af6ffe451d Fix get_specs 2024-03-01 19:26:47 -06:00
b585f8cdb7 Merge branch 'main' into dthomas_meilisearch 2024-03-01 19:25:30 -06:00
50bf835d13 Update parsing and stuff 2024-03-01 19:25:01 -06:00
f12d8a8062 add print statement 2024-03-01 19:24:47 -06:00
fc9ff4c8b2 split lists if they contain more than 2 commas 2024-03-01 19:13:28 -06:00
e903150fd4 Add functions for connecting to Meilisearch and adding documents 2024-02-20 10:33:01 -06:00
d0ea696274 reorganize gitignore and add comments 2024-02-20 10:15:56 -06:00
eea8c9f5fa Merge branch 'main' into dthomas_meilisearch 2024-02-20 10:04:33 -06:00
fe5de4e54c Adjust datasheet parsing for meilisearch, add dockerfile 2024-02-17 23:08:21 -06:00
68b95bfe17 add a module for using meilisearch 2024-02-17 22:46:11 -06:00
e3e9b855f9 add compose file with meilisearch image 2024-02-17 22:45:30 -06:00
523915feb0 add partnum to parsing 2024-02-17 20:31:43 -06:00
b5b2a936c1 Switch to binary dependency 2024-02-17 20:25:51 -06:00
afd144bd32 FIx requirements.txt 2024-02-17 20:22:54 -06:00
eb221a5206 Create main runner app, with async multithreading 2024-02-17 20:21:42 -06:00
db7c8c4577 Add video abstraction class 2024-02-17 20:06:37 -06:00
21b1bf7992 Added system exit 2024-02-16 20:36:43 -06:00
d376dba67c Add keyboard control scripts, add websocket server 2024-02-08 12:35:30 -06:00
95631dbdbe Add LED array mapping, load base config for tabletop rings, and image/video player mode 2024-01-25 20:06:10 -06:00
9aef296763 More logging 2024-01-18 16:45:37 -06:00
2b287492de Make firefox start faster, verbose start 2024-01-18 16:41:57 -06:00
d2a4d93590 Working network test 2024-01-18 16:35:02 -06:00
58605dbe85 test server/client setup 2024-01-18 16:00:12 -06:00
7bf3276ce9 Add basic windows control system. Needs VM functional to continue 2024-01-17 20:14:20 -06:00
818688452b Add local loading page 2024-01-17 16:46:20 -06:00
01526524d4 Create main runner app, with async multithreading 2024-01-17 16:06:15 -06:00
33671683ea test rendering json in HTML table format 2024-01-17 09:23:06 -06:00
fad885c610 Add UR5 control test, datasheet JSON output 2024-01-16 17:27:55 -06:00
12 changed files with 316 additions and 214 deletions

11
.gitignore vendored
View File

@ -1,9 +1,16 @@
# python
venv
__pycache__
# cable data folder(s)
cables
cables-sample.zip
# meilisearch (mainly where I've put the data volume for the container)
meili_data
# IDE things
.vscode
output.log
.idea
# videos
*.webm
output.mp4
# log files
output.log
cables-sample.zip

11
Dockerfile Normal file
View File

@ -0,0 +1,11 @@
FROM python:latest
RUN apt-get update && apt-get install -y libgl1-mesa-glx ghostscript && apt-get clean && rm -rf /var/lib/apt/lists
COPY . .
#COPY config-server.yml config.yml
RUN pip3 install -r requirements.txt
CMD ["python3", "run.py"]
EXPOSE 5000
EXPOSE 8000
EXPOSE 9000

13
compose.yml Normal file
View File

@ -0,0 +1,13 @@
services:
meilisearch:
image: "getmeili/meilisearch:v1.6.2"
ports:
- "7700:7700"
environment:
MEILI_MASTER_KEY: fluffybunnyrabbit
MEILI_NO_ANALYTICS: true
volumes:
- "meili_data:/meili_data"
volumes:
meili_data:

View File

@ -32,13 +32,13 @@ led:
ledstart: 288
ledend: 431
mode: rgb
- universe: 1
ip: 192.168.68.130
- universe: 4
ip: 192.168.5.40
ledstart: 432
ledend: 575
mode: rgb
- universe: 4
ip: 192.168.68.131
- universe: 1
ip: 192.168.5.4
ledstart: 576
ledend: 719
mode: rgb

View File

@ -1,140 +0,0 @@
"""This module contains functionality for interacting with a PostgreSQL database. It will automatically handle error
conditions (i.e. missing columns) without terminating the entire program. Use the :py:class:`DBConnector` class to
handle database interactions, either as a standalone object or in a context manager."""
from __future__ import annotations
import os
import psycopg2
from psycopg2 import DatabaseError, OperationalError
from psycopg2.errors import UndefinedColumn
DB_ADDRESS = os.getenv('DB_ADDRESS', 'localhost')
DB_PORT = os.getenv('DB_PORT', 5432)
DB_USER = os.getenv('DB_USER', 'postgres')
DB_PASSWORD = os.getenv('DB_PASSWORD', '')
DB_NAME = os.getenv('DB_NAME', 'postgres')
DB_TABLE = os.getenv('DB_TABLE', 'cables')
class DBConnector:
"""Context managed database class. Use with statements to automatically open and close the database connection, like
so:
.. code-block:: python
with DBConnector() as db:
db.read()
"""
def _db_start(self):
"""Setup the database connection and cursor."""
try:
self.conn = psycopg2.connect(
f"host={DB_ADDRESS} port={DB_PORT} dbname={DB_NAME} user={DB_USER} password={DB_PASSWORD}")
self.cur = self.conn.cursor()
except OperationalError as e:
raise e
def _db_stop(self):
"""Close the cursor and connection."""
self.cur.close()
self.conn.close()
def __init__(self):
self._db_start()
def __del__(self):
self._db_stop()
def __enter__(self):
self._db_start()
def __exit__(self):
self._db_stop()
def _get_cols(self) -> set[str]:
"""Get the list of columns in the database.
:return: A list of column names."""
query = f"select COLUMN_NAME from information_schema.columns where table_name={DB_TABLE}"
rows = {x["COLUMN_NAME"] for x in self._query(query)}
return rows
def _column_parity(self, columns: list[str] | set[str]) -> set[str]:
"""If the listed columns are not in the database, add them.
:param columns: The columns we expect are in the database.
:return: The list of columns in the database after querying."""
cols = set(columns)
existing = self._get_cols()
needs = cols.difference(existing.intersection(cols))
if len(needs) > 0:
query = f"ALTER TABLE {DB_TABLE} {', '.join([f'ADD COLUMN {c}' for c in needs])}"
self._query(query)
existing = self._get_cols()
return existing
def _query(self, sql) -> list[dict]:
"""Basic function for running queries.
:param sql: SQL query as plaintext.
:return: Results of the query, or an empty list if none."""
result = []
try:
self.cur.execute(sql)
result = self._read_dict()
except DatabaseError as e:
print(f"ERROR {e.pgcode}: {e.pgerror}\n"
f"Caused by query: {sql}")
finally:
return result
def _read_dict(self) -> list[dict]:
"""Read the cursor as a list of dictionaries. psycopg2 defaults to using a list of tuples, so we want to convert
each row into a dictionary before we return it."""
cols = [i.name for i in self.cur.description]
results = []
for row in self.cur:
row_dict = {}
for i in range(0, len(row)):
if row[i]:
row_dict = {**row_dict, cols[i]: row[i]}
results.append(row_dict)
return results
def read(self, **kwargs) -> list[dict]:
"""Read rows from a database that match the specified filters.
:param kwargs: Column constraints; i.e. what value to filter by in what column.
:returns: A list of dictionaries of all matching rows, or an empty list if no match."""
args = []
for kw in kwargs.keys():
args.append(f"{kw} ILIKE {kwargs['kw']}")
query = f"SELECT * FROM {DB_TABLE}"
if len(args) > 0:
query += f" WHERE {' AND '.join(args)}"
return self._query(query)
def write(self, **kwargs) -> dict:
"""Write a row to the database.
:param kwargs: Values to write for each database; specify each column separately!
:returns: The row you just added."""
self._column_parity(set(kwargs.keys()))
values = []
for val in kwargs.keys():
values.append(kwargs[val])
query = f"INSERT INTO {DB_TABLE} ({', '.join(kwargs.keys())}) VALUES ({', '.join(values)})"
self._query(query)
return kwargs
def write_all(self, items: list[dict]) -> list[dict]:
"""Write multiple rows to the database.
:param items: Rows to write, as a list of dictionaries.
:returns: The rows that were added successfully."""
successes = []
for i in items:
res0 = self.write(**i)
if res0:
successes.append(res0)
return successes

View File

@ -26,7 +26,7 @@ def check_internet(url='https://belden.com', timeout=5):
def query_search(partnum):
def query_search(partnum, source):
"""token_url = "https://www.belden.com/coveo/rest/token?t=" + str(int(time.time()))
with requests.get(token_url) as r:
out = json.loads(r.content)
@ -49,15 +49,50 @@ def query_search(partnum):
# Bash script uses some crazy json formatting that I could not figure out
# Despite the fact that I wrote it
# So I'll just leave it, becuase it works.
if source == "Belden":
command = ["./query-search.sh", partnum]
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
if result.returncode != 0: # error
fprint("No results found in search database for " + partnum + ". No hi-res part image available.", result.stderr)
return False
else:
data_out = json.loads(result.stdout)
return data_out
elif source == "Alphawire":
alphaurl = "https://www.alphawire.com//sxa/search/results/?l=en&s={4A774076-6068-460C-9CC6-A2D8E85E407F}&itemid={BF82F58C-EFD9-4D8B-AE3E-097DD12CF7DA}&sig=&autoFireSearch=true&productpartnumber=*" + partnum + "*&v={B22CD56D-AB95-4048-8AA1-5BBDF2F2D17F}&p=10&e=0&o=ProductPartNumber%2CAscending"
r = requests.get(url=alphaurl)
data = r.json()
output = dict()
#print(data)
try:
if data["Count"] > 0:
print(data["Results"][0]["Url"])
result = data["Results"][0]
if result["Url"].split("/")[-1] == partnum:
#print(partnum)
print(result["Html"])
try:
imgidx = result["Html"].index("<img src=") + 10
imgidx2 = result["Html"].index("?", imgidx)
output["image"] = result["Html"][imgidx:imgidx2]
if output["image"].index("http") != 0:
output["image"] = ""
print("No cable image found.")
except:
print("No cable image found.")
command = ["./query-search.sh", partnum]
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
if result.returncode != 0: # error
fprint("No results found in search database for " + partnum + ". No hi-res part image available.", result.stderr)
dsidx = result["Html"].index("<a href=\"/disteAPI/") + 9
dsidx2 = result["Html"].index(partnum, dsidx) + len(partnum)
output["datasheet"] = "https://www.alphawire.com" + result["Html"][dsidx:dsidx2]
#"test".index()
print(output)
return output
except:
return False
return False
else:
data_out = json.loads(result.stdout)
return data_out
def touch(path):
with open(path, 'a'):
@ -126,7 +161,7 @@ def get_multi(partnums):
sys.exit()
def _download_image(url, output_dir): # Download datasheet with known URL
def _download_image(url, output_dir): # Download image with known URL
global bartext
#fprint(url)
@ -151,25 +186,31 @@ def get_multi(partnums):
os.remove(partnum + "/datasheet.pdf")
sys.exit()
def __use_cached_datasheet(partnum, path, output_dir):
def __use_cached_datasheet(partnum, path, output_dir, dstype):
fprint("Using cached datasheet for " + partnum)
bar.text = "Using cached datasheet for " + partnum
bar(skipped=True)
fprint("Parsing Datasheet contents of " + partnum)
bar.text = "Parsing Datasheet contents of " + partnum + ".pdf..."
read_datasheet.parse(path, output_dir)
read_datasheet.parse(path, output_dir, partnum, dstype)
bar(skipped=False)
def __downloaded_datasheet(partnum, path, output_dir):
def __downloaded_datasheet(partnum, path, output_dir, dstype):
fprint("Downloaded " + path)
bar.text = "Downloaded " + path
bar(skipped=False)
fprint("Parsing Datasheet contents of " + partnum)
bar.text = "Parsing Datasheet contents of " + partnum + ".pdf..."
read_datasheet.parse(path, output_dir)
read_datasheet.parse(path, output_dir, partnum, dstype)
bar(skipped=False)
for partnum in partnums:
for fullpartnum in partnums:
if fullpartnum[0:2] == "BL": # catalog.belden.com entry\
partnum = fullpartnum[2:]
dstype = "Belden"
elif fullpartnum[0:2] == "AW":
partnum = fullpartnum[2:]
dstype = "Alphawire"
output_dir = "cables/" + partnum
path = output_dir + "/datasheet.pdf"
bartext = "Downloading files for part " + partnum
@ -177,7 +218,7 @@ def get_multi(partnums):
#
if (not os.path.exists(output_dir + "/found_part_hires")) or not (os.path.exists(path) and os.path.getsize(path) > 1):
# Use query
search_result = query_search(partnum.replace(" ", ""))
search_result = query_search(partnum.replace(" ", ""), dstype)
# Try to use belden.com search
if search_result is not False:
# Download high resolution part image if available and needed
@ -190,17 +231,17 @@ def get_multi(partnums):
# Download datasheet from provided URL if needed
if os.path.exists(path) and os.path.getsize(path) > 1:
__use_cached_datasheet(partnum, path, output_dir)
__use_cached_datasheet(partnum, path, output_dir, dstype)
elif _download_datasheet(search_result["datasheet"], output_dir) is not False:
__downloaded_datasheet(partnum, path, output_dir)
__downloaded_datasheet(partnum, path, output_dir, dstype)
elif os.path.exists(path) and os.path.getsize(path) > 1:
__use_cached_datasheet(partnum, path, output_dir)
__use_cached_datasheet(partnum, path, output_dir, dstype)
# If search fails, and we don't already have the datasheet, guess datasheet URL and skip the hires image download
elif _try_download_datasheet(partnum, output_dir) is not False:
__downloaded_datasheet(partnum, path, output_dir)
__downloaded_datasheet(partnum, path, output_dir, dstype)
# Failed to download with search or guess :(
else:
@ -213,7 +254,7 @@ def get_multi(partnums):
# We already have a hi-res image and the datasheet - perfect!
else:
fprint("Using cached hi-res part image for " + partnum)
__use_cached_datasheet(partnum, path, output_dir)
__use_cached_datasheet(partnum, path, output_dir, dstype)
if len(failed) > 0:
fprint("Failed to download:")
@ -227,21 +268,22 @@ def get_multi(partnums):
if __name__ == "__main__":
partnums = ["10GXS12", "RST 5L-RKT 5L-949",
"10GXS13",
"10GXW12",
"10GXW13",
"2412",
"2413",
"OSP6AU",
"FI4D024P9",
"FISD012R9",
"FDSD012A9",
"FSSL024NG",
"FISX006W0",
"FISX00103",
"C6D1100007"
partnums = ["BL7958A", "BL10GXS12", "BLRST 5L-RKT 5L-949",
"BL10GXS13",
"BL10GXW12",
"BL10GXW13",
"BL2412",
"BL2413",
"BLOSP6AU",
"BLFI4D024P9",
"BLFISD012R9",
"BLFDSD012A9",
"BLFSSL024NG",
"BLFISX006W0",
"BLFISX00103",
"BLC6D1100007"
]
get_multi(partnums)
#query_search("3248", "Alphawire")

View File

@ -178,9 +178,9 @@ def init():
sendall(data)
#time.sleep(50000)
fprint("Running start-up test sequence...")
for y in range(1):
for y in range(100):
for x in range(len(leds)):
setpixel(5,5,5,x)
setpixel(0,0,150,x)
sendall(data)
#time.sleep(2)
#alloffsmooth()
@ -290,7 +290,7 @@ def close():
time.sleep(0.5)
sender.stop()
def mapimage(image, fps=60):
def mapimage(image, fps=90):
global start
while uptime() - start < 1/fps:
time.sleep(0.00001)

BIN
map.png

Binary file not shown.

Before

Width:  |  Height:  |  Size: 381 KiB

After

Width:  |  Height:  |  Size: 381 KiB

View File

@ -9,8 +9,10 @@ from PIL import Image
import io
import json
from util import fprint
import uuid
from util import run_cmd
def parse(filename, output_dir):
def parse(filename, output_dir, partnum, dstype):
# Extract table data
@ -22,6 +24,7 @@ def parse(filename, output_dir):
page = reader.pages[0]
table_list = {}
for table in tables:
table.df.infer_objects(copy=False)
table.df.replace('', np.nan, inplace=True)
table.df.dropna(inplace=True, how="all")
table.df.dropna(inplace=True, axis="columns", how="all")
@ -137,44 +140,110 @@ def parse(filename, output_dir):
# multi-page table check
if table_name.isdigit() and len(tables) > 1:
fprint(table_name)
fprint(previous_table)
main_key = previous_table
cont_key = table_name
fprint(tables)
if vertical == False:
main_keys = list(tables[main_key].keys())
for i, (cont_key, cont_values) in enumerate(tables[cont_key].items()):
if i < len(main_keys):
fprint(tables[main_key][main_keys[i]])
tables[main_key][main_keys[i]] = (tables[main_key][main_keys[i]] + (cont_key,) + cont_values)
del tables[table_name]
else:
for key in tables[cont_key].keys():
tables[main_key][key] = tables[cont_key][key]
del tables[table_name]
if dstype == "Belden":
if table_name.isdigit() and len(tables) > 1:
fprint(table_name)
fprint(previous_table)
main_key = previous_table
cont_key = table_name
fprint(tables)
if vertical == False:
main_keys = list(tables[main_key].keys())
for i, (cont_key, cont_values) in enumerate(tables[cont_key].items()):
if i < len(main_keys):
fprint(tables[main_key][main_keys[i]])
tables[main_key][main_keys[i]] = (tables[main_key][main_keys[i]] + (cont_key,) + cont_values)
del tables[table_name]
else:
for key in tables[cont_key].keys():
tables[main_key][key] = tables[cont_key][key]
del tables[table_name]
previous_table = table_name
# remove multi-line values that occasionally squeak through
def replace_newlines_in_dict(d):
for key, value in d.items():
if isinstance(value, str):
# Replace \n with " " if the value is a string
d[key] = value.replace('\n', ' ')
elif isinstance(value, dict):
# Recursively call the function if the value is another dictionary
replace_newlines_in_dict(value)
return d
tables = replace_newlines_in_dict(tables)
fprint(tables)
with open(output_dir + "/tables.json", 'w') as json_file:
json.dump(tables, json_file)
# summary
output_table = dict()
output_table["partnum"] = partnum
id = str(uuid.uuid4())
output_table["id"] = id
#output_table["position"] = id
#output_table["brand"] = brand
output_table["fullspecs"] = tables
output_table["searchspecs"] = {"partnum": partnum, **flatten(tables)}
output_table["searchspecs"]["id"] = id
print(output_table)
run_cmd("rm " + output_dir + "/*.json") # not reliable!
with open(output_dir + "/" + output_table["searchspecs"]["id"] + ".json", 'w') as json_file:
json.dump(output_table["searchspecs"], json_file)
return output_table
def flatten(tables):
def convert_to_number(s):
try:
# First, try converting to an integer.
return int(s)
except ValueError:
# If that fails, try converting to a float.
try:
return float(s)
except ValueError:
# If it fails again, return the original string.
return s
out = dict()
print("{")
for table in tables.keys():
for key in tables[table].keys():
if len(key) < 64:
keyname = key
else:
keyname = key[0:64]
return tables
fullkeyname = (table + ": " + keyname).replace(".","")
if type(tables[table][key]) is not tuple:
out[fullkeyname] = convert_to_number(tables[table][key])
print("\"" + keyname + "\":", "\"" + str(out[fullkeyname]) + "\",")
elif len(tables[table][key]) == 1:
out[fullkeyname] = convert_to_number(tables[table][key][0])
print("\"" + keyname + "\":", "\"" + str(out[fullkeyname]) + "\",")
# if the item has at least two commas in it, split it
if tables[table][key].count(',') > 0:
out[fullkeyname] = list(map(lambda x: x.strip(), tables[table][key].split(",")))
print("\"" + keyname + "\":", "\"" + str(out[fullkeyname]) + "\",")
print("}")
return out
if __name__ == "__main__":
parse("test2.pdf", "10GXS13")
parse("test2.pdf", "cables/10GXS13", "10GXS13")

View File

@ -5,7 +5,7 @@ pypdf2==2.12.1
alive-progress
requests
git+https://github.com/Byeongdulee/python-urx.git
psycopg2
meilisearch
pyyaml
Flask
selenium

100
search.py Normal file
View File

@ -0,0 +1,100 @@
"""Interactions with the Meilisearch API for adding and searching cables."""
from meilisearch import Client
from meilisearch.task import TaskInfo
from meilisearch.errors import MeilisearchApiError
import json
DEFAULT_URL = "http://localhost:7700"
DEFAULT_APIKEY = "fluffybunnyrabbit" # I WOULD RECOMMEND SOMETHING MORE SECURE
DEFAULT_INDEX = "cables"
DEFAULT_FILTERABLE_ATTRS = ["partnum", "uuid", "position"] # default filterable attributes
class JukeboxSearch:
"""Class for interacting with the Meilisearch API."""
def __init__(self,
url: str = None,
api_key: str = None,
index: str = None,
filterable_attrs: list = None):
"""Connect to Meilisearch and perform first-run tasks as necessary.
:param url: Address of the Meilisearch server. Defaults to ``http://localhost:7700`` if unspecified.
:param api_key: API key used to authenticate with Meilisearch. It is highly recommended to set this as something
secure if you can access this endpoint publicly, but you can ignore this and set Meilisearch's default API key
to ``fluffybunnyrabbit``.
:param index: The name of the index to configure. Defaults to ``cables`` if unspecified.
:param filterable_attrs: List of all the attributes we want to filter by."""
# connect to Meilisearch
url = url or DEFAULT_URL
api_key = api_key or DEFAULT_APIKEY
filterable_attrs = filterable_attrs or DEFAULT_FILTERABLE_ATTRS
self.index = index or DEFAULT_INDEX
self.client = Client(url, api_key)
# create the index if it does not exist already
try:
self.client.get_index(self.index)
except MeilisearchApiError as _:
self.client.create_index(self.index)
# make a variable to easily reference the index
self.idxref = self.client.index(self.index)
self.idxref.update_filterable_attributes(filterable_attrs)
def add_document(self, document: dict) -> TaskInfo:
"""Add a cable to the Meilisearch index.
:param document: Dictionary containing all the cable data.
:returns: A TaskInfo object for the addition of the new document."""
return self.idxref.add_documents(document)
def add_documents(self, documents: list):
"""Add a list of cables to the Meilisearch index.
:param documents: List of dictionaries containing all the cable data.
:returns: A TaskInfo object for the last new document."""
taskinfo = None
for i in documents:
taskinfo = self.add_document(i)
return taskinfo
def search(self, query: str, filters: str = None):
"""Execute a search query on the Meilisearch index.
:param query: Seach query
:param filters: A meilisearch compatible filter statement.
:returns: The search results dict. Actual results are in a list under "hits", but there are other nice values that are useful in the root element."""
if filters:
q = self.idxref.search(query, {"filter": filters})
else:
q = self.idxref.search(query)
return q
def _filter_one(self, filter: str):
"""Get the first item to match a filter.
:param filter: A meilisearch compatible filter statement.
:returns: A dict containing the results; If no results found, an empty dict."""
q = self.search("", filter)
if q["estimatedTotalHits"] != 0:
return ["hits"][0]
else:
return dict()
def get_position(self, position: str):
"""Get a part by position.
:param partnum: The position to search for."""
return self._filter_one(f"position = {position}")
def get_uuid(self, uuid: str):
"""Get a specific UUID.
:param uuid: The UUID to search for."""
return self._filter_one(f"uuid = {uuid}")
def get_partnum(self, partnum: str):
"""Get a specific part number.
:param partnum: The part number to search for."""
return self._filter_one(f"partnum = {partnum}")

View File

@ -123,7 +123,7 @@ class Logger(object):
self.terminal = sys.stdout
def write(self, message):
self.log.write(message)
#self.log.write(message)
#close(filename)
#self.log = open(filename, "a")
try: