AbstractAI_PHI / app.py
rajj0's picture
Update app.py
0357984 verified
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Model and tokenizer paths
model_path = "rajj0/autotrain-phi3-midium-4k-godsent-orpo-6"
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForCausalLM.from_pretrained(
model_path,
device_map="auto",
torch_dtype='auto'
).eval()
# Function to generate a response from the model
def generate_response(user_input):
messages = [{"role": "user", "content": user_input}]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors='pt')
output_ids = model.generate(input_ids.to('cuda'))
response = tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
return response
# Create the Gradio interface
iface = gr.Interface(
fn=generate_response,
inputs="text",
outputs="text",
title="PHI Model Chatbot",
description="A chatbot powered by the PHI model."
)
# Launch the Gradio interface
if __name__ == "__main__":
iface.launch()