DaniRIU commited on
Commit
802136d
1 Parent(s): 9c4fc30

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -2,9 +2,11 @@ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
2
  import gradio as gr
3
  import time
4
 
5
- godel = gr.Interface.load("huggingface/microsoft/GODEL-v1_1-large-seq2seq")
6
  tokenizer = AutoTokenizer.from_pretrained("microsoft/GODEL-v1_1-large-seq2seq")
7
- #model = AutoModelForSeq2SeqLM.from_pretrained(godel)
 
 
8
 
9
  def generate(dialog):
10
  dialog = [dialog]
@@ -22,7 +24,7 @@ def generate(dialog):
22
  dialog = ' EOS '.join(dialog)
23
  query = f"{instruction} [CONTEXT] {dialog} {knowledge}"
24
  input_ids = tokenizer(f"{query}", return_tensors="pt").input_ids
25
- outputs = godel.generate(input_ids, max_length=128, min_length=8, top_p=0.9, do_sample=True)
26
  output = tokenizer.decode(outputs[0], skip_special_tokens=True)
27
  return output
28
 
 
2
  import gradio as gr
3
  import time
4
 
5
+ #godel = gr.load("huggingface/microsoft/GODEL-v1_1-large-seq2seq")
6
  tokenizer = AutoTokenizer.from_pretrained("microsoft/GODEL-v1_1-large-seq2seq")
7
+ print("Tokenizer loaded")
8
+ model = AutoModelForSeq2SeqLM.from_pretrained("microsoft/GODEL-v1_1-large-seq2seq")
9
+ print("Loaded")
10
 
11
  def generate(dialog):
12
  dialog = [dialog]
 
24
  dialog = ' EOS '.join(dialog)
25
  query = f"{instruction} [CONTEXT] {dialog} {knowledge}"
26
  input_ids = tokenizer(f"{query}", return_tensors="pt").input_ids
27
+ outputs = model.generate(input_ids, max_length=128, min_length=8, top_p=0.9, do_sample=True)
28
  output = tokenizer.decode(outputs[0], skip_special_tokens=True)
29
  return output
30