Spaces:
Sleeping
Sleeping
import gradio as gr | |
import time | |
from transformers import pipeline, TFAutoModelForSeq2SeqLM, AutoTokenizer | |
import torch, os | |
if torch.backends.mps.is_available(): | |
device = torch.device("mps") | |
elif torch.cuda.is_available(): | |
device = torch.device("cuda") | |
else: | |
device = torch.device("cpu") | |
print(device) | |
modelName = "Qwen/Qwen2.5-1.5B-Instruct" #Qwen/Qwen2.5-1.5B-Instruct | |
chatbot_model = pipeline("text-generation", model=modelName, device=device, batch_size=16) | |
sentiment_model = pipeline("sentiment-analysis", device=device) | |
def echo(message, history, system_prompt, tokens): | |
conversation_history = [] | |
for item in history: | |
conversation_history.append( | |
{"role": "user", "content": item[0]}, | |
) | |
conversation_history.append( | |
{"role": "assistant", "content": item[1]}, | |
) | |
sentiment = sentiment_model(message)[0] | |
if sentiment['label'] == 'NEGATIVE': | |
prompt = f"The user is upset, respond with empathy and support: {message}" | |
else: | |
prompt = f"Respond to the following query: {message}" | |
conversation_history.append( | |
{"role": "user", "content": prompt}, | |
) | |
print(conversation_history) | |
result = chatbot_model(conversation_history, num_return_sequences=1, max_new_tokens=tokens) | |
return result[0]['generated_text'][-1]['content'] | |
demo = gr.ChatInterface( | |
echo, | |
additional_inputs=[ | |
gr.Textbox(value="You are a friendly Chatbot.", label="System prompt"), | |
gr.Slider(minimum=1, maximum=500, value=225, step=1, label="Max new tokens"), | |
], | |
title="Scotts Chatbot", | |
description="A friendly, AI-powered chatbot designed to respond to user queries based on the Qwen model.", | |
# css="footer {visibility: hidden}" | |
) | |
if __name__ == "__main__": | |
demo.launch() |