File size: 1,898 Bytes
b487d52
1904414
 
1395477
1904414
b487d52
 
 
 
1904414
3fec72c
0ca05c5
1904414
795bd5a
1395477
3fec72c
0ca05c5
795bd5a
1395477
0ca05c5
 
355e058
64d20a7
795bd5a
 
1395477
 
e07de5b
3fec72c
 
 
795bd5a
0ca05c5
 
 
64d20a7
 
35c86f2
f13397c
0ca05c5
ab0da13
0ca05c5
 
ab0da13
 
 
1395477
1904414
ab0da13
795bd5a
0ca05c5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
import torch
import time

# Load model directly

model = AutoModelForCausalLM.from_pretrained("Wonder-Griffin/ZeusForCausalLM", trust_remote_code=True, torch_dtype="auto"),)
tokenizer = AutoTokenizer.from_pretrained("Wonder-Griffin/ZeusForCausalLM")

device = torch.device("cpu") 
model = model.to(device)

def respond(message, history):
    context = f"A: {message}\nB:"
    inputs = tokenizer(context, return_tensors="pt", truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}

    start_time = time.time()
    outputs = model.generate(
        **inputs,
        max_new_tokens=16,
        do_sample=False,
        pad_token_id=tokenizer.eos_token_id
    )
    duration = time.time() - start_time
    print(f"⏱️ Response time: {duration:.2f} seconds")

    decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
    reply = decoded[len(context):].strip().split("\n")[0]

    history.append((message, reply))
    return history

with gr.Blocks() as demo:
    gr.Markdown("<h1 style='text-align: center; color: #2e8b57;'>🤖 ConvoAI</h1>")
    gr.Markdown("<p style='text-align: center; font-size: 18px;'>Conversational AI trained using the DailyDialog dataset. Engage in meaningful and natural conversations!</p>")
    gr.Markdown('<p style="text-align: center; color: #2e8b57;">Download the Model: <a href="https://huggingface.co/GBhaveshKumar/ConvoAI">Link</a></p>')

    chatbot = gr.Chatbot()
    textbox = gr.Textbox(placeholder="Type your message and press Enter.")
    state = gr.State([])

    def user_input(message, history):
        history = history or []
        updated_history = respond(message, history)
        return "", updated_history, updated_history

    textbox.submit(user_input, [textbox, state], [textbox, chatbot, state])

demo.launch()