paavansundar commited on
Commit
6151565
1 Parent(s): 5a1884d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -11,15 +11,15 @@ data_collator = DataCollatorForLanguageModeling(tokenizer=__tokenizer, mlm=False
11
  def queryGPT(question):
12
  return generate_response(__model, __tokenizer, question)
13
 
14
- def generate_response(__model, __tokenizer, prompt, max_length=200):
15
 
16
  input_ids = tokenizer.encode(prompt, return_tensors="pt") # 'pt' for returning pytorch tensor
17
 
18
  # Create the attention mask and pad token id
19
  attention_mask = torch.ones_like(input_ids)
20
- pad_token_id = __tokenizer.eos_token_id
21
 
22
- output = __model.generate(
23
  input_ids,
24
  max_length=max_length,
25
  num_return_sequences=1,
@@ -27,7 +27,7 @@ def generate_response(__model, __tokenizer, prompt, max_length=200):
27
  pad_token_id=pad_token_id
28
  )
29
 
30
- return __tokenizer.decode(output[0], skip_special_tokens=True)
31
 
32
  with gr.Blocks() as demo:
33
 
 
11
  def queryGPT(question):
12
  return generate_response(__model, __tokenizer, question)
13
 
14
+ def generate_response(model,tokenizer, prompt, max_length=200):
15
 
16
  input_ids = tokenizer.encode(prompt, return_tensors="pt") # 'pt' for returning pytorch tensor
17
 
18
  # Create the attention mask and pad token id
19
  attention_mask = torch.ones_like(input_ids)
20
+ pad_token_id = tokenizer.eos_token_id
21
 
22
+ output = model.generate(
23
  input_ids,
24
  max_length=max_length,
25
  num_return_sequences=1,
 
27
  pad_token_id=pad_token_id
28
  )
29
 
30
+ return tokenizer.decode(output[0], skip_special_tokens=True)
31
 
32
  with gr.Blocks() as demo:
33