yilunzhao commited on
Commit
34cb0d4
1 Parent(s): bf2f5f2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -19,14 +19,14 @@ else:
19
  @spaces.GPU
20
  def generate_response(passage: str, question: str) -> str:
21
  # Prepare the input text by combining the passage and question
22
- message = [f"Passage: {passage}\nQuestion: {question}"]
23
  inputs = tokenizer(message, return_tensors='pt', return_token_type_ids=False).to('cuda')
24
 
25
  response = model.generate(**inputs, max_new_tokens=100)
26
 
27
  response = tokenizer.batch_decode(response, skip_special_tokens=True)[0]
28
 
29
- response = response[len(message[0]):].strip().split('\n')[0]
30
 
31
  return response
32
 
 
19
  @spaces.GPU
20
  def generate_response(passage: str, question: str) -> str:
21
  # Prepare the input text by combining the passage and question
22
+ message = [f"Passage: {passage}\nQuestion: {question}\nAnswer: "]
23
  inputs = tokenizer(message, return_tensors='pt', return_token_type_ids=False).to('cuda')
24
 
25
  response = model.generate(**inputs, max_new_tokens=100)
26
 
27
  response = tokenizer.batch_decode(response, skip_special_tokens=True)[0]
28
 
29
+ response = response[len(message[0]):].strip()
30
 
31
  return response
32