Spaces:
Runtime error
Runtime error
Ahsen Khaliq
commited on
Commit
•
f05ef3d
1
Parent(s):
67e9336
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
2 |
+
import torch
|
3 |
+
import gradio as gr
|
4 |
+
|
5 |
+
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
|
6 |
+
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
|
7 |
+
|
8 |
+
|
9 |
+
|
10 |
+
def dialogpt(text):
|
11 |
+
history = gr.get_state() or []
|
12 |
+
# encode the new user input, add the eos_token and return a tensor in Pytorch
|
13 |
+
for step in range(50000):
|
14 |
+
|
15 |
+
new_user_input_ids = tokenizer.encode(text + tokenizer.eos_token, return_tensors='pt')
|
16 |
+
|
17 |
+
# append the new user input tokens to the chat history
|
18 |
+
bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if step > 0 else new_user_input_ids
|
19 |
+
|
20 |
+
# generated a response while limiting the total chat history to 1000 tokens,
|
21 |
+
chat_history_ids = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)
|
22 |
+
response = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
|
23 |
+
history.append((text, response))
|
24 |
+
gr.set_state(history)
|
25 |
+
# pretty print last ouput tokens from bot
|
26 |
+
html = "<div class='chatbot'>"
|
27 |
+
for user_msg, resp_msg in history:
|
28 |
+
html += f"<div class='user_msg'>{user_msg}</div>"
|
29 |
+
html += f"<div class='resp_msg'>{resp_msg}</div>"
|
30 |
+
html += "</div>"
|
31 |
+
return html
|
32 |
+
|
33 |
+
|
34 |
+
inputs = gr.inputs.Textbox(lines=1, label="Input Text")
|
35 |
+
outputs = gr.outputs.Textbox(label="DialoGPT")
|
36 |
+
|
37 |
+
title = "DialoGPT"
|
38 |
+
description = "demo for Microsoft DialoGPT with Hugging Face transformers. To use it, simply input text or click one of the examples text to load them. Read more at the links below. *This is not a Microsoft product and is developed for Gradio*"
|
39 |
+
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/1911.00536'>DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation</a> | <a href='https://github.com/microsoft/DialoGPT'>Github Repo</a> | <a href='https://huggingface.co/microsoft/DialoGPT-large'>Hugging Face DialoGPT-large</a></p>"
|
40 |
+
examples = [
|
41 |
+
["Hi, how are you?"],
|
42 |
+
["How far away is the moon?"],
|
43 |
+
]
|
44 |
+
|
45 |
+
gr.Interface(dialogpt, inputs, "html", title=title, description=description, article=article, examples=examples,css="""
|
46 |
+
.chatbox {display:flex;flex-direction:column}
|
47 |
+
.user_msg, .resp_msg {padding:4px;margin-bottom:4px;border-radius:4px;width:80%}
|
48 |
+
.user_msg {background-color:cornflowerblue;color:white;align-self:start}
|
49 |
+
.resp_msg {background-color:lightgray;align-self:self-end}
|
50 |
+
""").launch(debug=True)
|