divyahansg commited on
Commit
b1e7c26
1 Parent(s): fa62343
Files changed (1) hide show
  1. modules/text_generation.py +4 -0
modules/text_generation.py CHANGED
@@ -91,6 +91,9 @@ def clear_torch_cache():
91
 
92
  def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, eos_token=None, stopping_string=None):
93
  clear_torch_cache()
 
 
 
94
  t0 = time.time()
95
 
96
  # These models are not part of Hugging Face, so we handle them
@@ -234,5 +237,6 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
234
 
235
  finally:
236
  t1 = time.time()
 
237
  print(f"Output generated in {(t1-t0):.2f} seconds ({(len(output)-len(original_input_ids[0]))/(t1-t0):.2f} tokens/s, {len(output)-len(original_input_ids[0])} tokens)")
238
  return
 
91
 
92
  def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, eos_token=None, stopping_string=None):
93
  clear_torch_cache()
94
+ print(question)
95
+ print(eos_token)
96
+ print(stopping_string)
97
  t0 = time.time()
98
 
99
  # These models are not part of Hugging Face, so we handle them
 
237
 
238
  finally:
239
  t1 = time.time()
240
+ print(output)
241
  print(f"Output generated in {(t1-t0):.2f} seconds ({(len(output)-len(original_input_ids[0]))/(t1-t0):.2f} tokens/s, {len(output)-len(original_input_ids[0])} tokens)")
242
  return