xu3kev commited on
Commit
c626985
1 Parent(s): 0ad98a6

decrease samples

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -223,7 +223,7 @@ def llm_call(question_prompt, model_name,
223
  top_p=1, n_samples=64, stop=None):
224
  if HUGGINGFACE:
225
  model_inputs = hug_tokenizer([question_prompt], return_tensors="pt").to('cuda')
226
- generated_ids = hug_model.generate(**model_inputs, max_length=1400, temperature=1, num_return_sequences=64, do_sample=True)
227
  responses = hug_tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
228
  codes = []
229
  for response in responses:
 
223
  top_p=1, n_samples=64, stop=None):
224
  if HUGGINGFACE:
225
  model_inputs = hug_tokenizer([question_prompt], return_tensors="pt").to('cuda')
226
+ generated_ids = hug_model.generate(**model_inputs, max_length=1400, temperature=1, num_return_sequences=32, do_sample=True)
227
  responses = hug_tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
228
  codes = []
229
  for response in responses: