Spaces:
Sleeping
Sleeping
File size: 1,170 Bytes
61c335c 463cbdb 63ccdfb 32b12c5 63ccdfb 32b12c5 63ccdfb 32b12c5 63ccdfb 32b12c5 63ccdfb 32b12c5 63ccdfb 32b12c5 63ccdfb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
from transformers import AutoModelWithLMHead, AutoTokenizer
import gradio as gr
tokenizer = AutoTokenizer.from_pretrained('microsoft/DialoGPT-small', padding_side='right')
model = AutoModelWithLMHead.from_pretrained('tomkr000/scottbotai')
def chat(message, history=[]):
inputs = tokenizer.encode(message + tokenizer.eos_token, return_tensors="pt")
reply_ids = model.generate(
inputs, max_length=1000,
pad_token_id=tokenizer.eos_token_id,
no_repeat_ngram_size=3,
do_sample=True,
top_k=100,
top_p=0.7,
temperature = 0.8
)
response = tokenizer.decode(reply_ids[:,inputs.shape[1]:][0], skip_special_tokens=True)
history.append((message, response))
return history, history
# chatbot = gr.Chatbot().style(color_map=("green", "pink"))
demo = gr.Interface(
fn=chat,
inputs = ["text", "state"],
outputs = ['chatbot', "state"],
allow_flagging="never",
).launch()
|