UraniaLi commited on
Commit
b90f374
·
1 Parent(s): 4092a7e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -23,13 +23,14 @@ def extract_longer_answers_from_paragraphs(paragraphs, query, tokenizer, model):
23
  context += question
24
  inputs = tokenizer(context, return_tensors="pt", add_special_tokens=False).to(device)
25
  top_p = 0.9 # Adjust as needed
26
- max_len = 50 # Adjust as needed
27
  outputs = model.generate(
28
  **inputs,
29
  top_p=top_p,
30
  max_length=max_len,
31
- num_beams=1, # Adjust as needed
32
- no_repeat_ngram_size=2 # Adjust as needed
 
33
  )
34
  answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
35
 
 
23
  context += question
24
  inputs = tokenizer(context, return_tensors="pt", add_special_tokens=False).to(device)
25
  top_p = 0.9 # Adjust as needed
26
+ max_len = 300 # Adjust as needed
27
  outputs = model.generate(
28
  **inputs,
29
  top_p=top_p,
30
  max_length=max_len,
31
+ num_beams=1,
32
+ do_sample= True,
33
+ no_repeat_ngram_size=2
34
  )
35
  answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
36