Spaces:
Running
Running
# NLP + Emotion model | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification,AutoModelForSeq2SeqLM, MarianTokenizer, MarianMTModel | |
import lyricsgenius | |
import torch | |
import numpy as np | |
import joblib | |
import gradio as gr | |
import matplotlib.pyplot as plt | |
import warnings | |
from langdetect import detect | |
import os | |
import requests | |
warnings.filterwarnings("ignore") | |
# π Replace these with your real tokens | |
#GENIUS_API_TOKEN = os.getenv("GENIUS_API_TOKEN") | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
# Initialize Genius API | |
#genius = lyricsgenius.Genius(GENIUS_API_TOKEN) | |
# Load emotion model from Hugging Face | |
model_name = "bhadresh-savani/bert-base-uncased-emotion" | |
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=HF_TOKEN) | |
model = AutoModelForSequenceClassification.from_pretrained(model_name, use_auth_token=HF_TOKEN) | |
model.eval() | |
# Load Meta's NLLB model | |
nllb_tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-1.3B") | |
nllb_model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-1.3B") | |
# Load ML-based model | |
#emotion_model = joblib.load("emotion_model.pkl") | |
#song_encoder = joblib.load("song_encoder.pkl") | |
#emotion_decoder = joblib.load("emotion_decoder.pkl") | |
# Genius API | |
#genius = lyricsgenius.Genius(GENIUS_API_TOKEN) | |
def translate_to_english(text): | |
try: | |
language = detect(text) | |
if language == 'en': | |
print("β Detected English β no translation needed.") | |
return text | |
elif language == 'tl' or language == 'fil': | |
print("π Detected Tagalog β translating to English...") | |
else: | |
print(f"π Detected '{language}' β attempting translation anyway.") | |
except Exception as e: | |
print(f"β οΈ Language detection failed: {e}. Proceeding with translation.") | |
# Translate using NLLB | |
inputs = nllb_tokenizer( | |
text, return_tensors="pt", truncation=True, padding=True, max_length=512 | |
) | |
inputs["forced_bos_token_id"] = nllb_tokenizer.lang_code_to_id["eng_Latn"] | |
translated_tokens = nllb_model.generate(**inputs, max_length=512) | |
return nllb_tokenizer.decode(translated_tokens[0], skip_special_tokens=True) | |
def get_translated_lyrics(title, artist): | |
try: | |
print(f"π΅ Searching for \"{title}\" by {artist} using Lyrics.ovh...") | |
url = f"https://api.lyrics.ovh/v1/{artist}/{title}" | |
response = requests.get(url) | |
if response.status_code == 200: | |
lyrics = response.json().get("lyrics", "") | |
if lyrics.strip(): | |
print("π Original Lyrics Snippet:\n", lyrics[:300], "\n") | |
return translate_to_english(lyrics) | |
else: | |
print(f"β οΈ Lyrics found but empty for: \"{title}\" by {artist}") | |
else: | |
print(f"β οΈ Lyrics.ovh returned status code {response.status_code} for: \"{title}\" by {artist}") | |
except Exception as e: | |
print(f"β οΈ Lyrics.ovh error: {e}") | |
return None | |
def get_emotion_distribution(text): | |
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True) | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
probs = torch.nn.functional.softmax(outputs.logits, dim=-1)[0] | |
labels = model.config.id2label | |
return {labels[i]: float(probs[i]) for i in range(len(probs))} | |
def average_emotions(song_emotions): | |
emotion_keys = list(song_emotions[0].keys()) | |
vectors = [[song.get(k, 0) for k in emotion_keys] for song in song_emotions] | |
avg_vector = np.mean(vectors, axis=0) | |
return dict(zip(emotion_keys, avg_vector)) | |
def display_emotion_pie_chart(emotion_scores, top_n=6, min_threshold=0.01): | |
sorted_emotions = sorted(emotion_scores.items(), key=lambda x: x[1], reverse=True) | |
filtered = [(e, s) for e, s in sorted_emotions if s > min_threshold][:top_n] | |
if not filtered: | |
return None | |
labels = [e.title() for e, _ in filtered] | |
sizes = [s for _, s in filtered] | |
explode = [0.05 if i == 0 else 0 for i in range(len(sizes))] | |
colors = plt.cm.Pastel1.colors[:len(labels)] | |
fig, ax = plt.subplots(figsize=(6, 6)) | |
ax.pie(sizes, labels=labels, autopct=lambda p: f"{p:.1f}%" if p > 3 else "", | |
startangle=140, explode=explode, colors=colors, | |
textprops=dict(color="black", fontsize=12), | |
wedgeprops=dict(width=0.5, edgecolor='white')) | |
ax.set_title("π Emotion Composition", fontsize=14, fontweight='bold') | |
plt.tight_layout() | |
return fig | |
def analyze_lyrics(song_title, artist_name): | |
if not song_title or not song_title.strip(): | |
return "Please enter a song title.", None | |
translated = get_translated_lyrics(song_title, artist_name) | |
if not translated: | |
return "β Lyrics not found or translation failed.", None | |
emotions = get_emotion_distribution(translated) | |
summary = f"π§ Dominant Emotion: {max(emotions, key=emotions.get).upper()}" | |
pie = display_emotion_pie_chart(emotions) | |
return summary, pie | |
def analyze_multiple_songs(song1, artist1, song2, artist2, song3, artist3): | |
all_emotions = [] | |
for title, artist in [(song1, artist1), (song2, artist2), (song3, artist3)]: | |
if not title or not title.strip(): | |
print(f"β οΈ Skipping empty input: {title}") | |
continue | |
lyrics = get_translated_lyrics(title, artist) | |
if lyrics: | |
emotions = get_emotion_distribution(lyrics) | |
all_emotions.append(emotions) | |
if not all_emotions: | |
return "β No valid songs found.", None | |
avg_emotions = average_emotions(all_emotions) | |
dominant_emotion = max(avg_emotions, key=avg_emotions.get).lower() | |
# π§ Store mapped mood for recommendation | |
general_mood = goemotion_to_general.get(dominant_emotion, "calm") | |
detected_mood["mood"] = general_mood | |
summary = f"π§ Dominant Emotion: {dominant_emotion.upper()} β Mood: {general_mood.upper()}" | |
pie = display_emotion_pie_chart(avg_emotions) | |
return summary, pie | |
import pandas as pd | |
import matplotlib.pyplot as plt | |
from collections import Counter | |
# Load pseudo-labeled songs | |
labeled_df = pd.read_csv("labeled_songs.csv") | |
label_map = {0: "sad", 1: "happy", 2: "energetic", 3: "calm"} | |
inverse_map = {v: k for k, v in label_map.items()} | |
labeled_df["predicted_emotion_name"] = labeled_df["predicted_emotion"].map(label_map) | |
# Mood-shift rules | |
mood_shift_map = { | |
"sad": "happy", | |
"happy": "calm", | |
"calm": "energetic", | |
"energetic": "calm" | |
} | |
# Track last detected mood | |
detected_mood = {"mood": None} | |
# Mapping from GoEmotion to general moods | |
goemotion_to_general = { | |
"admiration": "happy", "amusement": "happy", "anger": "sad", "annoyance": "energetic", | |
"approval": "happy", "caring": "calm", "confusion": "sad", "curiosity": "energetic", | |
"desire": "energetic", "disappointment": "sad", "disapproval": "energetic", "disgust": "energetic", | |
"embarrassment": "sad", "excitement": "energetic", "fear": "sad", "gratitude": "happy", | |
"grief": "sad", "joy": "happy", "love": "happy", "nervousness": "energetic", "optimism": "happy", | |
"pride": "happy", "realization": "calm", "relief": "calm", "remorse": "sad", "sadness": "sad", | |
"surprise": "energetic", "neutral": "calm" | |
} | |
# Analyze 3 songs, get dominant emotion, and store mapped general mood | |
def analyze_and_store_lyrics(song1, artist1, song2, artist2, song3, artist3): | |
summary, fig = analyze_multiple_songs(song1, artist1, song2, artist2, song3, artist3) | |
# Extract dominant GoEmotion from summary | |
if "Playlist Mood:" in summary: | |
raw_goemotion = summary.split(":")[1].strip().lower() | |
detected_mood["mood"] = goemotion_to_general.get(raw_goemotion, "calm") | |
summary += f"\nποΈ Mapped Mood: {detected_mood['mood'].upper()}" | |
return summary, fig | |
# Recommend songs with the same mood as detected | |
def recommend_similar_mood_songs(): | |
mood = detected_mood["mood"] | |
if not mood: | |
return pd.DataFrame([{"β οΈ": "Analyze songs first"}]) | |
filtered = labeled_df[labeled_df["predicted_emotion_name"] == mood] | |
if filtered.empty: | |
return pd.DataFrame([{"β οΈ": f"No songs found for mood '{mood}'"}]) | |
return filtered.sample(n=min(5, len(filtered)))[["name", "artists", "predicted_emotion_name"]] | |
# Recommend songs with a shifted mood | |
def recommend_mood_shift_songs(): | |
mood = detected_mood["mood"] | |
if not mood: | |
return pd.DataFrame([{"β οΈ": "Analyze songs first"}]) | |
target = mood_shift_map.get(mood) | |
if not target: | |
return pd.DataFrame([{"β οΈ": f"No shift rule for '{mood}'"}]) | |
filtered = labeled_df[labeled_df["predicted_emotion_name"] == target] | |
if filtered.empty: | |
return pd.DataFrame([{"β οΈ": f"No songs found for mood shift to '{target}'"}]) | |
return filtered.sample(n=min(5, len(filtered)))[["name", "artists", "predicted_emotion_name"]] | |
import gradio as gr | |
css = """ | |
@import url('https://fonts.googleapis.com/css2?family=Poppins:wght@300;500&display=swap'); | |
body, h1, h2, h3, h4, h5, h6, p, label, button, input, textarea { | |
font-family: 'Poppins', sans-serif !important; | |
color: #222 !important; | |
font-size: 16px !important; | |
} | |
body { | |
background: linear-gradient(to right, #e0f7fa, #f3e5f5); | |
margin: 0; | |
padding: 0; | |
} | |
.gradio-container, .gradio-interface, .gradio-box { | |
background-color: rgba(255, 255, 255, 0.9) !important; | |
border-radius: 16px !important; | |
padding: 24px !important; | |
box-shadow: 0 8px 24px rgba(0, 0, 0, 0.08); | |
} | |
.gr-textbox, .gr-input, input, textarea { | |
background-color: #fff !important; | |
color: #222 !important; | |
border: 1px solid #ccc !important; | |
border-radius: 12px !important; | |
padding: 12px !important; | |
font-size: 16px !important; | |
} | |
.gr-textbox:focus, .gr-input:focus { | |
border-color: #a78bfa !important; | |
} | |
.track-btn, .gr-button { | |
background: linear-gradient(135deg, #cfd9df, #e2ebf0) !important; | |
border: none !important; | |
color: #333 !important; | |
font-weight: 600 !important; | |
border-radius: 10px !important; | |
padding: 12px 24px !important; | |
font-size: 16px !important; | |
transition: background 0.4s ease, transform 0.2s ease !important; | |
margin-top: 16px !important; | |
} | |
.track-btn:hover, .gr-button:hover { | |
background: linear-gradient(135deg, #e2ebf0, #cfd9df) !important; | |
transform: scale(1.02); | |
} | |
label { | |
font-weight: 500 !important; | |
font-size: 14px !important; | |
color: #444 !important; | |
} | |
""" | |
# Define your Gradio interface | |
with gr.Blocks(css=css, title="Lyric Mood Tracker") as app: | |
gr.Markdown( | |
"<h1 style='text-align: center; font-weight: 500; color: #4A4A4A;'>πΏ Lyric Mood Tracker</h1>" | |
"<p style='text-align: center; color: #666; font-size: 16px;'>Understand the emotional landscape of your favorite songs</p>" | |
) | |
with gr.Row(): | |
with gr.Column(): | |
song1 = gr.Textbox(label="π΅ Song 1 Title") | |
artist1 = gr.Textbox(label="π€ Artist 1") | |
with gr.Column(): | |
song2 = gr.Textbox(label="π΅ Song 2 Title") | |
artist2 = gr.Textbox(label="π€ Artist 2") | |
with gr.Column(): | |
song3 = gr.Textbox(label="π΅ Song 3 Title") | |
artist3 = gr.Textbox(label="π€ Artist 3") | |
run_btn = gr.Button("π Analyze Mood", elem_classes=["track-btn"]) | |
output_text = gr.Textbox(label="π§ Mood Summary") | |
output_plot = gr.Plot(label="π Emotion Pie Chart") | |
with gr.Row(): | |
rec_similar_btn = gr.Button("π― Recommend Similar Mood Songs", elem_classes=["track-btn"]) | |
rec_shift_btn = gr.Button("π Recommend Mood-Shift Songs", elem_classes=["track-btn"]) | |
similar_output = gr.Dataframe(label="π§ Similar Mood Recommendations") | |
shift_output = gr.Dataframe(label="πͺ Mood Shift Recommendations") | |
# All functions now use all 3 songs as inputs | |
run_btn.click( | |
analyze_multiple_songs, | |
inputs=[song1, artist1, song2, artist2, song3, artist3], | |
outputs=[output_text, output_plot] | |
) | |
run_btn.click( | |
analyze_and_store_lyrics, | |
inputs=[song1, artist1, song2, artist2, song3, artist3], | |
outputs=[output_text, output_plot] | |
) | |
run_btn.click( | |
analyze_lyrics, | |
inputs=[song1, artist1, song2, artist2, song3, artist3], | |
outputs=[output_text, output_plot] | |
) | |
rec_similar_btn.click(recommend_similar_mood_songs, outputs=similar_output) | |
rec_shift_btn.click(recommend_mood_shift_songs, outputs=shift_output) | |
app.launch(share=True) | |