Spaces:
Sleeping
Sleeping
File size: 3,649 Bytes
c6f7445 5671ec3 0ba2c5f 98888dc 0ba2c5f 5671ec3 98888dc 4e7a35b 98888dc 5671ec3 c6f7445 98888dc c6f7445 5671ec3 c6f7445 98888dc 5671ec3 c6f7445 98888dc c6f7445 5671ec3 c6f7445 98888dc 0ba2c5f 2ebe084 0ba2c5f 98888dc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
import streamlit as st
from transformers import pipeline
import google.generativeai as genai
import json
import random
# Load language configurations from JSON
with open('languages_config.json', 'r', encoding='utf-8') as f:
LANGUAGES = json.load(f)['LANGUAGES']
# Load the JSON data for emotion templates
with open('emotion_templates.json', 'r') as f:
data = json.load(f)
# Configure Gemini (replace with your API key)
genai.configure(api_key="AIzaSyCYRYNwCU1f9cgJYn8pd86Xcf6hiSMwJr0")
model = genai.GenerativeModel('gemini-2.0-flash')
def generate_text(prompt, context=""):
"""
Generates text using the Gemini model.
"""
try:
response = model.generate_content(prompt)
return response.text
except Exception as e:
print(f"Error generating text: {e}")
return "I am sorry, I encountered an error while generating the text."
def create_prompt(emotion, topic=None):
"""
Chooses a random prompt from the template list.
"""
templates = data["emotion_templates"][emotion]
prompt = random.choice(templates)
if topic:
# Replace various placeholders in the prompt
placeholders = ["[topic/person]", "[topic]", "[person]", "[object]", "[outcome]"]
for placeholder in placeholders:
prompt = prompt.replace(placeholder, topic)
subfix_prompt = "Make the generated text in the same language as the topic.\n"
subfix_prompt += "Make the generated text short.\n"
prefix_prompt = "## topic\n" + topic
prompt = subfix_prompt + prompt + prefix_prompt
return prompt
# 1. Emotion Detection Model (Using Hugging Face's transformer)
emotion_classifier = pipeline("text-classification", model="AnasAlokla/multilingual_go_emotions")
# 2. Conversational Agent Logic
def get_ai_response(user_input, emotion_predictions):
"""Generates AI response based on user input and detected emotions."""
dominant_emotion = None
max_score = 0
responses = None
for prediction in emotion_predictions:
if prediction['score'] > max_score:
max_score = prediction['score']
dominant_emotion = prediction['label']
prompt_text = create_prompt(dominant_emotion, user_input)
responses = generate_text(prompt_text)
# Handle cases where no specific emotion is clear
if dominant_emotion is None:
return "Error for response"
else:
return responses
# 3. Streamlit Frontend
def main():
# Language Selection
selected_language = st.sidebar.selectbox(
"Select Interface Language",
list(LANGUAGES.keys()),
index=0 # Default to English
)
# Display Image
st.image('chatBot_image.jpg', channels='RGB')
# Set page title and header based on selected language
st.title(LANGUAGES[selected_language]['title'])
# Input Text Box
user_input = st.text_input(
LANGUAGES[selected_language]['input_placeholder'],
""
)
if user_input:
# Emotion Detection
emotion_predictions = emotion_classifier(user_input)
# Display Emotions
st.subheader(LANGUAGES[selected_language]['emotions_header'])
for prediction in emotion_predictions:
st.write(f"- {prediction['label']}: {prediction['score']:.2f}")
# Get AI Response
ai_response = get_ai_response(user_input, emotion_predictions)
# Display AI Response
st.subheader(LANGUAGES[selected_language]['response_header'])
st.write(ai_response)
# Run the main function
if __name__ == "__main__":
main() |