dimoZ's picture
Update app.py
25d4b1a verified
import streamlit as st
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification, AutoModelForCausalLM
import torch
# ----------------------------------
# Page Configuration
# ----------------------------------
st.set_page_config(page_title="AI Sentiment Essay Generator", page_icon="🧠", layout="wide")
st.markdown("""
<style>
body {
background-color: #0e1117;
color: white;
}
.positive {background: linear-gradient(90deg, #3fc1c9, #4caf50); border-radius: 10px; padding: 1rem; margin-bottom:1rem;}
.negative {background: linear-gradient(90deg, #f05454, #b33939); border-radius: 10px; padding: 1rem; margin-bottom:1rem;}
.neutral {background: linear-gradient(90deg, #607d8b, #455a64); border-radius: 10px; padding: 1rem; margin-bottom:1rem;}
.essay-box {
background-color: #1e1e2e;
border: 1px solid #333;
border-radius: 10px;
padding: 1.5rem;
margin-top: 1rem;
color: #f1f1f1;
box-shadow: 0 0 10px rgba(0,0,0,0.3);
}
</style>
""", unsafe_allow_html=True)
st.title("🧠 AI Sentiment Essay Generator")
st.caption("Analyze your topic’s sentiment and let AI write an expressive essay aligned with it 🎭")
# ----------------------------------
# Model Initialization (cached)
# ----------------------------------
@st.cache_resource
def load_models():
sentiment_model_name = "cardiffnlp/twitter-roberta-base-sentiment-latest"
sentiment_analyzer = pipeline("sentiment-analysis", model=sentiment_model_name)
text_gen_model_name = "Qwen/Qwen1.5-1.8B"
tokenizer = AutoTokenizer.from_pretrained(text_gen_model_name)
text_model = AutoModelForCausalLM.from_pretrained(text_gen_model_name)
return sentiment_analyzer, tokenizer, text_model
sentiment_analyzer, tokenizer, text_model = load_models()
# ----------------------------------
# User Input
# ----------------------------------
user_prompt = st.text_area("πŸ’¬ Enter a topic or sentence:", placeholder="e.g., The future of Artificial Intelligence...")
generate_btn = st.button("✨ Generate Essay")
# ----------------------------------
# Processing
# ----------------------------------
if generate_btn and user_prompt.strip():
with st.spinner("Analyzing sentiment..."):
sentiment_result = sentiment_analyzer(user_prompt)[0]
label_map = {
"LABEL_0": "Negative",
"LABEL_1": "Neutral",
"LABEL_2": "Positive"
}
sentiment_label = label_map.get(sentiment_result["label"], sentiment_result["label"])
confidence = round(sentiment_result["score"] * 100, 2)
# Sentiment Display
sentiment_emoji = {"Positive": "😊", "Negative": "😠", "Neutral": "😐"}
sentiment_class = sentiment_label.lower()
st.markdown(f"<div class='{sentiment_class}'>", unsafe_allow_html=True)
st.markdown(
f"<h3 style='text-align:center;'>{sentiment_emoji.get(sentiment_label, '😐')} "
f"Detected Sentiment: <b>{sentiment_label}</b> ({confidence}%)</h3>",
unsafe_allow_html=True
)
st.markdown("</div>", unsafe_allow_html=True)
# ----------------------------------
# Generate Essay
# ----------------------------------
with st.spinner("Generating essay with AI..."):
gen_prompt = f"Write a {sentiment_label.lower()} and expressive essay with a short title about: {user_prompt}"
input_ids = tokenizer(gen_prompt, return_tensors="pt").input_ids
output_ids = text_model.generate(
input_ids,
max_new_tokens=500,
temperature=0.9,
top_p=0.95,
do_sample=True,
pad_token_id=tokenizer.eos_token_id
)
generated_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
# Clean up output: remove initial instruction phrases
cleaned_text = generated_text.replace(gen_prompt, "").strip()
cleaned_text = cleaned_text.replace("Title:", "**Title:**").strip()
# ----------------------------------
# Display Essay in Styled Box
# ----------------------------------
st.markdown("### πŸ“ AI-Generated Essay")
st.markdown(f"<div class='essay-box'>{cleaned_text}</div>", unsafe_allow_html=True)
st.markdown("---")
st.caption("✨ Powered by RoBERTa + Qwen | Streamlit Frontend")
elif generate_btn:
st.warning("⚠️ Please enter a topic or sentence first.")