| import gradio as gr |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
| import torch |
|
|
| |
| tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small") |
| model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small") |
|
|
| |
| def chatbot(msg): |
| input_ids = tokenizer.encode(msg + tokenizer.eos_token, return_tensors='pt') |
| output_ids = model.generate(input_ids, max_length=100, pad_token_id=tokenizer.eos_token_id) |
| reply = tokenizer.decode(output_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True) |
| return f"🐱 Dr. Ask: {reply}" |
|
|
| demo = gr.Interface(fn=chatbot, |
| inputs=gr.Textbox(label="Bạn hỏi gì nè?"), |
| outputs=gr.Textbox(label="Dr. Ask trả lời"), |
| title="🐱 Dr. Ask AI", |
| theme="default") |
|
|
| demo.launch() |
|
|