alfraser commited on
Commit
2d7adb6
1 Parent(s): e6ad26b

Migrated from using print statements in the application code to using logger module (left prints in files intended to be run as scripts)

Browse files
pages/040_Test_Reporter.py CHANGED
@@ -70,7 +70,7 @@ def show_stats(for_test_group: str):
70
  questions = list(stats[0]['q_and_a'].keys())
71
  num_archs = len(stats)
72
  with st.expander("**Request/Response Details**"):
73
- print(f'Displaying {len(questions)} questions and {num_archs} architectures')
74
  for q in questions:
75
  with st.expander(f"**{q}**"):
76
  for i in range(num_archs):
 
70
  questions = list(stats[0]['q_and_a'].keys())
71
  num_archs = len(stats)
72
  with st.expander("**Request/Response Details**"):
73
+ st.write(f'Displaying {len(questions)} questions and {num_archs} architectures')
74
  for q in questions:
75
  with st.expander(f"**{q}**"):
76
  for i in range(num_archs):
src/architectures.py CHANGED
@@ -7,6 +7,7 @@ config/architectures.json
7
 
8
  import chromadb
9
  import json
 
10
  import os
11
  import regex as re
12
  import requests
@@ -227,11 +228,11 @@ class LogWorker(Thread):
227
  if request is None:
228
  # There was a period of inactivity so run the timeout functions
229
  for func in LogWorker.timeout_functions:
230
- print(f"LogWorker commit running {func.__name__}")
231
  try:
232
  func()
233
  except Exception as e:
234
- print(f"Timeout func {func.__name__} had error {e}")
235
  else:
236
  if LogWorker.commit_timer is not None and LogWorker.commit_timer.is_alive():
237
  # Cancel the inactivity timer
@@ -250,7 +251,7 @@ class LogWorker(Thread):
250
  if LogWorker.commit_count >= LogWorker.commit_after:
251
  LogWorker.commit_repo()
252
  except Exception as err:
253
- print(f"Request / trace save failed {err}")
254
 
255
  # Restart the inactivity timer
256
  LogWorker.commit_timer = Timer(LogWorker.commit_time, LogWorker.signal_commit)
@@ -262,14 +263,14 @@ class LogWorker(Thread):
262
  If the working log file is not download, then get a local copy.
263
  Add the new record to the local file.
264
  """
265
- print(f"LogWorker logging open record {LogWorker.commit_count + 1}")
266
  if cls.save_repo is None and not cls.save_repo_load_error:
267
  try:
268
  hf_write_token = hf_api_token(write=True)
269
  cls.save_repo = Repository(local_dir=cls.trace_dir, clone_from=cls.save_repo_url, token=hf_write_token)
270
  except Exception as err:
271
  cls.save_repo_load_error = True
272
- print(f"Error connecting to the save repo {err} - persistence now disabled")
273
 
274
  if cls.save_repo is not None:
275
  with open(cls.trace_file, 'r') as f:
@@ -284,7 +285,7 @@ class LogWorker(Thread):
284
  If there are any changes in the local file which are not committed to the repo then commit them.
285
  """
286
  if cls.commit_count > 0:
287
- print(f"LogWorker committing {LogWorker.commit_count} open records")
288
  cls.save_repo.push_to_hub()
289
  LogWorker.commit_count = 0
290
 
@@ -293,7 +294,7 @@ class LogWorker(Thread):
293
  # Signalling this back via the queue and not doing the work here as it would
294
  # be executed on the Timer thread and may conflict with resources if the main
295
  # LogWorker starts doing work concurrently.
296
- print("LogWorker signalling commit based on time elapsed")
297
  cls.queue.put((None, None, None, None, None))
298
 
299
  @classmethod
@@ -350,7 +351,7 @@ class Architecture:
350
  cls.save_repo.push_to_hub()
351
  except Exception as err:
352
  cls.save_repo_load_error = True
353
- print(f"Error connecting to the save repo {err} - persistence now disabled")
354
 
355
  @classmethod
356
  def get_trace_records(cls) -> List[Dict]:
@@ -363,7 +364,7 @@ class Architecture:
363
  cls.save_repo = Repository(local_dir=cls.trace_dir, clone_from=cls.save_repo_url, token=hf_write_token)
364
  except Exception as err:
365
  cls.save_repo_load_error = True
366
- print(f"Error connecting to the save repo {err} - persistence now disabled")
367
  return []
368
  with open(cls.trace_file, 'r') as f:
369
  test_json = json.load(f)
@@ -436,7 +437,7 @@ class Architecture:
436
  :param request: The architecture request to pass down the pipeline
437
  :return: The trace record for this invocation of the architecture
438
  """
439
- print(f'{self.name} processing query "{request.request}"')
440
  trace = ArchitectureTrace()
441
  for component in self.steps:
442
  trace.start_trace(name=component.__class__.__name__)
 
7
 
8
  import chromadb
9
  import json
10
+ import logging
11
  import os
12
  import regex as re
13
  import requests
 
228
  if request is None:
229
  # There was a period of inactivity so run the timeout functions
230
  for func in LogWorker.timeout_functions:
231
+ logging.info(f"LogWorker commit running {func.__name__}")
232
  try:
233
  func()
234
  except Exception as e:
235
+ logging.error(f"Timeout func {func.__name__} had error {e}")
236
  else:
237
  if LogWorker.commit_timer is not None and LogWorker.commit_timer.is_alive():
238
  # Cancel the inactivity timer
 
251
  if LogWorker.commit_count >= LogWorker.commit_after:
252
  LogWorker.commit_repo()
253
  except Exception as err:
254
+ logging.error(f"Request / trace save failed {err}")
255
 
256
  # Restart the inactivity timer
257
  LogWorker.commit_timer = Timer(LogWorker.commit_time, LogWorker.signal_commit)
 
263
  If the working log file is not download, then get a local copy.
264
  Add the new record to the local file.
265
  """
266
+ logging.debug(f"LogWorker logging open record {LogWorker.commit_count + 1}")
267
  if cls.save_repo is None and not cls.save_repo_load_error:
268
  try:
269
  hf_write_token = hf_api_token(write=True)
270
  cls.save_repo = Repository(local_dir=cls.trace_dir, clone_from=cls.save_repo_url, token=hf_write_token)
271
  except Exception as err:
272
  cls.save_repo_load_error = True
273
+ logging.error(f"Error connecting to the save repo {err} - persistence now disabled")
274
 
275
  if cls.save_repo is not None:
276
  with open(cls.trace_file, 'r') as f:
 
285
  If there are any changes in the local file which are not committed to the repo then commit them.
286
  """
287
  if cls.commit_count > 0:
288
+ logging.info(f"LogWorker committing {LogWorker.commit_count} open records")
289
  cls.save_repo.push_to_hub()
290
  LogWorker.commit_count = 0
291
 
 
294
  # Signalling this back via the queue and not doing the work here as it would
295
  # be executed on the Timer thread and may conflict with resources if the main
296
  # LogWorker starts doing work concurrently.
297
+ logging.debug("LogWorker signalling commit based on time elapsed")
298
  cls.queue.put((None, None, None, None, None))
299
 
300
  @classmethod
 
351
  cls.save_repo.push_to_hub()
352
  except Exception as err:
353
  cls.save_repo_load_error = True
354
+ logging.error(f"Error connecting to the save repo {err} - persistence now disabled")
355
 
356
  @classmethod
357
  def get_trace_records(cls) -> List[Dict]:
 
364
  cls.save_repo = Repository(local_dir=cls.trace_dir, clone_from=cls.save_repo_url, token=hf_write_token)
365
  except Exception as err:
366
  cls.save_repo_load_error = True
367
+ logging.error(f"Error connecting to the save repo {err} - persistence now disabled")
368
  return []
369
  with open(cls.trace_file, 'r') as f:
370
  test_json = json.load(f)
 
437
  :param request: The architecture request to pass down the pipeline
438
  :return: The trace record for this invocation of the architecture
439
  """
440
+ logging.info(f'{self.name} processing query "{request.request}"')
441
  trace = ArchitectureTrace()
442
  for component in self.steps:
443
  trace.start_trace(name=component.__class__.__name__)
src/common.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import os
2
  import requests
3
  import streamlit as st
@@ -7,6 +8,8 @@ from time import time
7
  from typing import List, Tuple
8
 
9
 
 
 
10
  data_dir = os.path.join(os.path.dirname(__file__), '..', 'data')
11
  img_dir = os.path.join(os.path.dirname(__file__), '..', 'img')
12
  config_dir = os.path.join(os.path.dirname(__file__), '..', 'config')
 
1
+ import logging
2
  import os
3
  import requests
4
  import streamlit as st
 
8
  from typing import List, Tuple
9
 
10
 
11
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%Y-%m-%d %I:%M:%S')
12
+
13
  data_dir = os.path.join(os.path.dirname(__file__), '..', 'data')
14
  img_dir = os.path.join(os.path.dirname(__file__), '..', 'img')
15
  config_dir = os.path.join(os.path.dirname(__file__), '..', 'config')
src/testing.py CHANGED
@@ -1,6 +1,7 @@
1
  from __future__ import annotations # For self-referencing annotations
2
 
3
  import json
 
4
  import os
5
  import shutil
6
  import sqlite3
@@ -37,7 +38,7 @@ class ArchitectureTestWorker(Thread):
37
  if arch is None: # None passed to signal end of test requests
38
  running = False
39
  else:
40
- print(f'{self.worker_name} running "{request.request}" through {arch}')
41
  architecture = Architecture.get_architecture(arch)
42
  architecture(request, trace_tags=self.trace_tags, trace_comment=self.trace_comment)
43
  finally:
@@ -338,7 +339,7 @@ class TestGroup:
338
  into the class variable - for efficiency do not reload unless requested
339
  """
340
  if cls.force_load_all not in LogWorker.timeout_functions:
341
- print("TestGroup adding forced refresh to LogWorker timeout")
342
  LogWorker.timeout_functions.append(TestGroup.force_load_all)
343
 
344
  if cls.all is None or reload:
@@ -429,7 +430,7 @@ def move_test_records_to_db(hf_hub_token: str) -> None:
429
  con.execute(sql)
430
  con.commit()
431
  else:
432
- print(f"Warning TestGroup {test_group.test_group} was not added to the DB as it already existed there")
433
 
434
  def load_all_test_groups_to_db(con: sqlite3.Connection) -> None:
435
  """
@@ -446,7 +447,7 @@ def move_test_records_to_db(hf_hub_token: str) -> None:
446
  conn = get_local_db()
447
  load_all_test_groups_to_db(conn)
448
  Architecture.wipe_trace(hf_hub_token)
449
- print("REMINDER: need to commit the local sqlite file to make it available to the server")
450
 
451
 
452
  if __name__ == "__main__":
 
1
  from __future__ import annotations # For self-referencing annotations
2
 
3
  import json
4
+ import logging
5
  import os
6
  import shutil
7
  import sqlite3
 
38
  if arch is None: # None passed to signal end of test requests
39
  running = False
40
  else:
41
+ logging.info(f'{self.worker_name} running "{request.request}" through {arch}')
42
  architecture = Architecture.get_architecture(arch)
43
  architecture(request, trace_tags=self.trace_tags, trace_comment=self.trace_comment)
44
  finally:
 
339
  into the class variable - for efficiency do not reload unless requested
340
  """
341
  if cls.force_load_all not in LogWorker.timeout_functions:
342
+ logging.info("TestGroup adding forced refresh to LogWorker timeout")
343
  LogWorker.timeout_functions.append(TestGroup.force_load_all)
344
 
345
  if cls.all is None or reload:
 
430
  con.execute(sql)
431
  con.commit()
432
  else:
433
+ logging.warning(f"TestGroup {test_group.test_group} was not added to the DB as it already existed there")
434
 
435
  def load_all_test_groups_to_db(con: sqlite3.Connection) -> None:
436
  """
 
447
  conn = get_local_db()
448
  load_all_test_groups_to_db(conn)
449
  Architecture.wipe_trace(hf_hub_token)
450
+ logging.info("REMINDER: need to commit the local sqlite file to make it available to the server")
451
 
452
 
453
  if __name__ == "__main__":