|
|
|
import gradio as gr |
|
from huggingface_hub import InferenceClient |
|
from transformers import pipeline |
|
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer |
|
|
|
|
|
pvm_model_adapt = "facebook/blenderbot-400M-distill" |
|
tokenizer = AutoTokenizer.from_pretrained(pvm_model_adapt) |
|
model = AutoModelForSeq2SeqLM.from_pretrained(pvm_model_adapt) |
|
|
|
|
|
from textblob import TextBlob |
|
|
|
def analyze_sentiment(text): |
|
sentiment_score = TextBlob(text).sentiment.polarity |
|
if sentiment_score > 0.3: |
|
return "positive" |
|
elif sentiment_score < -0.3: |
|
return "negative" |
|
else: |
|
return "neutral" |
|
|
|
def chatbot_response(user_input): |
|
sentiment = analyze_sentiment(user_input) |
|
|
|
|
|
inputs = tokenizer(user_input, return_tensors="pt") |
|
output = model.generate(**inputs) |
|
response = tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
|
|
|
if sentiment == "positive": |
|
ui_style = "π Friendly mode activated!" |
|
elif sentiment == "negative": |
|
ui_style = "π Supportive mode activated. I'm here to help." |
|
else: |
|
ui_style = "π Neutral mode." |
|
|
|
return f"{ui_style}\n\nChatbot: {response}" |
|
|
|
|
|
import gradio as gr |
|
|
|
|
|
iface = gr.Interface( |
|
fn=chatbot_response, |
|
inputs=gr.Textbox(lines=2, placeholder="start with your message..."), |
|
outputs="text", |
|
title="PVM Adaptive Chat", |
|
description="This chatbot adapts to the user sentiment.", |
|
) |
|
|
|
iface.launch() |
|
|
|
|
|
|
|
|
|
|