Martín Santillán Cooper commited on
Commit
34f1382
1 Parent(s): baad5d9

improve logs

Browse files
Files changed (2) hide show
  1. src/app.py +0 -1
  2. src/model.py +3 -2
src/app.py CHANGED
@@ -72,7 +72,6 @@ def on_show_prompt_click(criteria, context, user_message, assistant_message, sta
72
 
73
  messages = get_messages(test_case=test_case, sub_catalog_name=state['selected_sub_catalog'])
74
  prompt = get_prompt(messages, criteria_name)
75
- prompt = json.dumps(prompt, indent=4)
76
  prompt = prompt.replace('<', '&lt;').replace('>', '&gt;').replace('\\n', '<br>')
77
  return gr.Markdown(prompt)
78
 
 
72
 
73
  messages = get_messages(test_case=test_case, sub_catalog_name=state['selected_sub_catalog'])
74
  prompt = get_prompt(messages, criteria_name)
 
75
  prompt = prompt.replace('<', '&lt;').replace('>', '&gt;').replace('\\n', '<br>')
76
  return gr.Markdown(prompt)
77
 
src/model.py CHANGED
@@ -63,7 +63,7 @@ def get_prompt(messages, criteria_name):
63
 
64
 
65
  def generate_text(messages, criteria_name):
66
- logger.debug(f'Prompts content is: \n{messages}')
67
 
68
  mock_model_call = os.getenv('MOCK_MODEL_CALL') == 'true'
69
  if mock_model_call:
@@ -73,7 +73,8 @@ def generate_text(messages, criteria_name):
73
 
74
  start = time()
75
  chat = get_prompt(messages, criteria_name)
76
-
 
77
  with torch.no_grad():
78
  output = model.generate(chat, sampling_params, use_tqdm=False)
79
 
 
63
 
64
 
65
  def generate_text(messages, criteria_name):
66
+ logger.debug(f'Messages are: \n{messages}')
67
 
68
  mock_model_call = os.getenv('MOCK_MODEL_CALL') == 'true'
69
  if mock_model_call:
 
73
 
74
  start = time()
75
  chat = get_prompt(messages, criteria_name)
76
+ logger.debug(f'Prompt is \n{chat}')
77
+
78
  with torch.no_grad():
79
  output = model.generate(chat, sampling_params, use_tqdm=False)
80