File size: 1,619 Bytes
63b45ac |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
import torch
import gradio as gr
device = "cuda" if torch.cuda.is_available() else "cpu"
model_name = "./blenderbot-1B-distill"
tokenizer = BlenderbotTokenizer.from_pretrained(model_name)
model = BlenderbotForConditionalGeneration.from_pretrained(model_name)
model.to(device)
def get_reply(response, history = []):
response = input("You: ")
history.append(response)
if response.endswith(("bye", "Bye", "bye.", "Bye.")):
return "<div class='chatbot'>Chatbot restarted</div>", []
if len(history) > 4: history = history[-4:]
inputs = tokenizer(" ".join(history), return_tensors="pt")
inputs.to(device)
outputs = model.generate(**inputs)
reply = tokenizer.decode(outputs[0][1:-1]).strip()
history.append(reply)
html = "<div class='chatbot'>"
for m, msg in enumerate(history):
cls = "user" if m%2 == 0 else "bot"
html += "<div class='msg {}'> {}</div>".format(cls, msg)
html += "</div>"
return html, history
css = """
.chatbox {display:flex;flex-direction:column}
.msg {padding:4px;margin-bottom:4px;border-radius:4px;width:80%}
.msg.user {background-color:cornflowerblue;color:white}
.msg.bot {background-color:lightgray;align-self:self-end}
.footer {display:none !important}
"""
gr.Interface(fn=get_reply,
theme="default",
inputs=[gr.inputs.Textbox(placeholder="How are you?"),
"state"],
outputs=["html", "state"],
enable_queue=True,
css=css).launch(debug=True) |