5to9 commited on
Commit
ee6ec78
·
1 Parent(s): b6c4ccb

0.13 catch more exceptions

Browse files
Files changed (1) hide show
  1. app.py +7 -2
app.py CHANGED
@@ -24,6 +24,8 @@ models_available = [
24
  "mistralai/Mistral-7B-Instruct-v0.3",
25
  ]
26
 
 
 
27
  tokenizer_a, model_a = None, None
28
  tokenizer_b, model_b = None, None
29
  torch_dtype = torch.bfloat16
@@ -75,7 +77,6 @@ def load_model_b(model_id):
75
  try:
76
  model_id_b = model_id
77
  tokenizer_b = AutoTokenizer.from_pretrained(model_id)
78
- logging.debug(f"***** model B eos_token: {tokenizer_b.eos_token}")
79
  model_b = AutoModelForCausalLM.from_pretrained(
80
  model_id,
81
  torch_dtype=torch_dtype,
@@ -133,7 +134,7 @@ def generate_both(system_prompt, input_text, chatbot_a, chatbot_b, max_new_token
133
  top_p=top_p,
134
  repetition_penalty=repetition_penalty,
135
  )
136
-
137
  generation_kwargs_b = dict(
138
  input_ids=input_ids_b,
139
  streamer=text_streamer_b,
@@ -171,6 +172,8 @@ def generate_both(system_prompt, input_text, chatbot_a, chatbot_b, max_new_token
171
  yield chatbot_a, chatbot_b
172
  except StopIteration:
173
  finished_a = True
 
 
174
 
175
  if not finished_b:
176
  try:
@@ -183,6 +186,8 @@ def generate_both(system_prompt, input_text, chatbot_a, chatbot_b, max_new_token
183
  yield chatbot_a, chatbot_b
184
  except StopIteration:
185
  finished_b = True
 
 
186
 
187
  return chatbot_a, chatbot_b
188
 
 
24
  "mistralai/Mistral-7B-Instruct-v0.3",
25
  ]
26
 
27
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
28
+
29
  tokenizer_a, model_a = None, None
30
  tokenizer_b, model_b = None, None
31
  torch_dtype = torch.bfloat16
 
77
  try:
78
  model_id_b = model_id
79
  tokenizer_b = AutoTokenizer.from_pretrained(model_id)
 
80
  model_b = AutoModelForCausalLM.from_pretrained(
81
  model_id,
82
  torch_dtype=torch_dtype,
 
134
  top_p=top_p,
135
  repetition_penalty=repetition_penalty,
136
  )
137
+
138
  generation_kwargs_b = dict(
139
  input_ids=input_ids_b,
140
  streamer=text_streamer_b,
 
172
  yield chatbot_a, chatbot_b
173
  except StopIteration:
174
  finished_a = True
175
+ except Exception as e:
176
+ logging.error(f'{SPACER} Error: {e}, Traceback {traceback.format_exc()}')
177
 
178
  if not finished_b:
179
  try:
 
186
  yield chatbot_a, chatbot_b
187
  except StopIteration:
188
  finished_b = True
189
+ except Exception as e:
190
+ logging.error(f'{SPACER} Error: {e}, Traceback {traceback.format_exc()}')
191
 
192
  return chatbot_a, chatbot_b
193