ConvoAI / app.py
Wonder-Griffin's picture
Update app.py
b487d52 verified
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
import torch
import time
# Load model directly
model = AutoModelForCausalLM.from_pretrained("Wonder-Griffin/ZeusForCausalLM", trust_remote_code=True, torch_dtype="auto"),)
tokenizer = AutoTokenizer.from_pretrained("Wonder-Griffin/ZeusForCausalLM")
device = torch.device("cpu")
model = model.to(device)
def respond(message, history):
context = f"A: {message}\nB:"
inputs = tokenizer(context, return_tensors="pt", truncation=True, max_length=128)
inputs = {k: v.to(device) for k, v in inputs.items()}
start_time = time.time()
outputs = model.generate(
**inputs,
max_new_tokens=16,
do_sample=False,
pad_token_id=tokenizer.eos_token_id
)
duration = time.time() - start_time
print(f"⏱️ Response time: {duration:.2f} seconds")
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
reply = decoded[len(context):].strip().split("\n")[0]
history.append((message, reply))
return history
with gr.Blocks() as demo:
gr.Markdown("<h1 style='text-align: center; color: #2e8b57;'>🤖 ConvoAI</h1>")
gr.Markdown("<p style='text-align: center; font-size: 18px;'>Conversational AI trained using the DailyDialog dataset. Engage in meaningful and natural conversations!</p>")
gr.Markdown('<p style="text-align: center; color: #2e8b57;">Download the Model: <a href="https://huggingface.co/GBhaveshKumar/ConvoAI">Link</a></p>')
chatbot = gr.Chatbot()
textbox = gr.Textbox(placeholder="Type your message and press Enter.")
state = gr.State([])
def user_input(message, history):
history = history or []
updated_history = respond(message, history)
return "", updated_history, updated_history
textbox.submit(user_input, [textbox, state], [textbox, chatbot, state])
demo.launch()