Spaces:
Sleeping
Sleeping
import gradio as gr | |
from fpl_client import FPLClient | |
from nlp_utils import process_query | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import torch | |
tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/deepseek-coder-1.3b-instruct", trust_remote_code=True) | |
model = AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-coder-1.3b-instruct", trust_remote_code=True, torch_dtype=torch.bfloat16) | |
# Use CPU if CUDA is not available | |
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda") | |
model = model.to(device) | |
# Theme builder | |
# gr.themes.builder() | |
theme = gr.themes.Soft( | |
primary_hue="sky", | |
neutral_hue="slate", | |
) | |
# Initialize the FPL client | |
fpl_client = FPLClient() | |
# Function to handle user input and generate a response | |
def chatbot_response(query): | |
response = process_query(query, fpl_client) | |
# if response if a JSON boject iterate over the elements and conver is a list like "a": "b" "/n" "c": "d" | |
if isinstance(response, dict): | |
response = "\n".join([f"{key}: {value}" for key, value in response.items()]) | |
# Generate response using the model | |
messages = [{'role': 'user', 'content': query}] | |
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(model.device) | |
outputs = model.generate(inputs, max_new_tokens=512, do_sample=False, top_k=50, top_p=0.95, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id) | |
model_response = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True) | |
return response + "\n\n" + model_response | |
# Set up the Gradio interface | |
iface = gr.Interface( | |
fn=chatbot_response, | |
inputs=gr.Textbox(label="Ask our FPL Expert"), | |
outputs=gr.Textbox(label="Hope it helps!"), | |
theme=theme, | |
title="FPL Chatbot" | |
) | |
if __name__ == "__main__": | |
iface.launch() |