king007 commited on
Commit
39598d0
β€’
1 Parent(s): 817d6d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -9,13 +9,13 @@ model2 = AutoModelForSeq2SeqLM.from_pretrained("Kaludi/chatgpt-gpt4-prompts-bart
9
 
10
  def generate(prompt, max_new_tokens):
11
  batch = tokenizer(prompt, return_tensors="pt")
12
- generated_ids = model.generate(batch["input_ids"], max_new_tokens=max_new_tokens)
13
  output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
14
  return output[0]
15
 
16
  def generate2(prompt, max_new_tokens):
17
  batch = tokenizer2(prompt, return_tensors="pt")
18
- generated_ids = model2.generate(batch["input_ids"], max_new_tokens=max_new_tokens)
19
  output = tokenizer2.batch_decode(generated_ids, skip_special_tokens=True)
20
  return output[0]
21
 
 
9
 
10
  def generate(prompt, max_new_tokens):
11
  batch = tokenizer(prompt, return_tensors="pt")
12
+ generated_ids = model.generate(batch["input_ids"], max_new_tokens=int(max_new_tokens))
13
  output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
14
  return output[0]
15
 
16
  def generate2(prompt, max_new_tokens):
17
  batch = tokenizer2(prompt, return_tensors="pt")
18
+ generated_ids = model2.generate(batch["input_ids"], max_new_tokens=int(max_new_tokens))
19
  output = tokenizer2.batch_decode(generated_ids, skip_special_tokens=True)
20
  return output[0]
21