File size: 1,111 Bytes
c78dbbc
0357984
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c78dbbc
 
0357984
c78dbbc
0357984
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

# Model and tokenizer paths
model_path = "rajj0/autotrain-phi3-midium-4k-godsent-orpo-6"

# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForCausalLM.from_pretrained(
    model_path,
    device_map="auto",
    torch_dtype='auto'
).eval()

# Function to generate a response from the model
def generate_response(user_input):
    messages = [{"role": "user", "content": user_input}]
    input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors='pt')
    output_ids = model.generate(input_ids.to('cuda'))
    response = tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
    return response

# Create the Gradio interface
iface = gr.Interface(
    fn=generate_response,
    inputs="text",
    outputs="text",
    title="PHI Model Chatbot",
    description="A chatbot powered by the PHI model."
)

# Launch the Gradio interface
if __name__ == "__main__":
    iface.launch()