Spaces:
Runtime error
Runtime error
Commit
·
a5056e5
1
Parent(s):
54bc641
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,20 +3,20 @@ import gradio as gr
|
|
| 3 |
import tensorflow as tf
|
| 4 |
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
| 5 |
|
| 6 |
-
global tokenizer, model, script_speaker_name, script_responder_name
|
| 7 |
tokenizer = GPT2Tokenizer.from_pretrained("ethzanalytics/ai-msgbot-gpt2-XL-dialogue")
|
| 8 |
model = GPT2LMHeadModel.from_pretrained("ethzanalytics/ai-msgbot-gpt2-XL-dialogue", pad_token_id=tokenizer.eos_token_id)
|
| 9 |
|
| 10 |
script_speaker_name = "person alpha"
|
| 11 |
|
| 12 |
script_responder_name = "person beta"
|
| 13 |
-
|
| 14 |
def output(prompt):
|
| 15 |
-
sentence = script_speaker_name + ': ' + prompt + '\n' + script_responder_name + ': '
|
| 16 |
input_ids = tokenizer.encode(sentence, return_tensors='pt')
|
| 17 |
# generate text until the output length (which includes the context length) reaches 50
|
| 18 |
output = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2, early_stopping=True)
|
| 19 |
-
|
| 20 |
|
| 21 |
iface = gr.Interface(fn=output, inputs="text", outputs="text")
|
| 22 |
iface.launch()
|
|
|
|
| 3 |
import tensorflow as tf
|
| 4 |
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
| 5 |
|
| 6 |
+
global tokenizer, model, script_speaker_name, script_responder_name, convo
|
| 7 |
tokenizer = GPT2Tokenizer.from_pretrained("ethzanalytics/ai-msgbot-gpt2-XL-dialogue")
|
| 8 |
model = GPT2LMHeadModel.from_pretrained("ethzanalytics/ai-msgbot-gpt2-XL-dialogue", pad_token_id=tokenizer.eos_token_id)
|
| 9 |
|
| 10 |
script_speaker_name = "person alpha"
|
| 11 |
|
| 12 |
script_responder_name = "person beta"
|
| 13 |
+
convo = ''
|
| 14 |
def output(prompt):
|
| 15 |
+
sentence = convo + '\n' + script_speaker_name + ': ' + prompt + '\n' + script_responder_name + ': '
|
| 16 |
input_ids = tokenizer.encode(sentence, return_tensors='pt')
|
| 17 |
# generate text until the output length (which includes the context length) reaches 50
|
| 18 |
output = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2, early_stopping=True)
|
| 19 |
+
convo = tokenizer.decode(output[0], skip_special_tokens=True))
|
| 20 |
|
| 21 |
iface = gr.Interface(fn=output, inputs="text", outputs="text")
|
| 22 |
iface.launch()
|