prajyot2003 commited on
Commit
67ccb45
·
verified ·
1 Parent(s): fffcabf

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +174 -0
app.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import streamlit as st
4
+ from transformers import pipeline
5
+ from youtubesearchpython import VideosSearch
6
+
7
+ # -----------------------------
8
+ # App Setup / Config
9
+ # -----------------------------
10
+ os.environ.setdefault("PYTORCH_ENABLE_MPS_FALLBACK", "1") # quiet Mac MPS warnings
11
+ st.set_page_config(page_title="Moodify 🎶", page_icon="🎵", layout="centered")
12
+
13
+ st.title("🎵 Moodify – AI Music Recommender")
14
+ st.caption("Type how you feel (or speak locally), I'll detect your emotion and play matching music 🎶")
15
+
16
+ # Detect if we're on Hugging Face Space
17
+ RUNNING_IN_SPACE = "SPACE_ID" in os.environ
18
+
19
+ # -----------------------------
20
+ # Optional voice input (only local)
21
+ # -----------------------------
22
+ use_voice = False
23
+ if not RUNNING_IN_SPACE:
24
+ try:
25
+ import speech_recognition as sr # optional local dependency
26
+ use_voice = True
27
+ except Exception:
28
+ # Keep running; voice is optional
29
+ pass
30
+
31
+ # -----------------------------
32
+ # Load emotion model (cached)
33
+ # -----------------------------
34
+ @st.cache_resource(show_spinner=False)
35
+ def load_emotion_model():
36
+ # DistilRoBERTa emotion model (lightweight & accurate)
37
+ # Returns top label by default (pipeline default top_k=1)
38
+ return pipeline("sentiment-analysis", model="j-hartmann/emotion-english-distilroberta-base", device=-1)
39
+
40
+ emotion_model = load_emotion_model()
41
+
42
+ # -----------------------------
43
+ # Utilities
44
+ # -----------------------------
45
+ def detect_emotion(text: str) -> str:
46
+ """Return lowercase emotion label from model."""
47
+ try:
48
+ res = emotion_model(text)[0]
49
+ label = res["label"].lower()
50
+ return label
51
+ except Exception:
52
+ return "neutral"
53
+
54
+ # Map detected emotions → search moods/genres
55
+ EMOTION_TO_MOOD = {
56
+ "joy": "happy",
57
+ "love": "romantic",
58
+ "anger": "calm", # steer to calming music
59
+ "sadness": "sad",
60
+ "fear": "relaxing",
61
+ "surprise": "energetic",
62
+ "disgust": "moody",
63
+ "neutral": "chill"
64
+ }
65
+
66
+ # Extra moods you can pick manually
67
+ MOOD_OPTIONS = sorted(list(set(EMOTION_TO_MOOD.values()) | {"lofi", "focus", "party", "workout", "sleep"}))
68
+
69
+ @st.cache_data(show_spinner=False)
70
+ def yt_search(query: str, limit: int = 6):
71
+ vs = VideosSearch(query, limit=limit)
72
+ data = vs.result()
73
+ items = data.get("result", [])
74
+ # Return (title, url, id)
75
+ parsed = []
76
+ for v in items:
77
+ title = v.get("title", "Untitled")
78
+ link = v.get("link")
79
+ vid_id = None
80
+ # Most links are standard watch?v=..; if not, just pass link to st.video()
81
+ if link and "watch?v=" in link:
82
+ vid_id = link.split("watch?v=")[-1].split("&")[0]
83
+ parsed.append((title, link, vid_id))
84
+ return parsed
85
+
86
+ def get_voice_input() -> str | None:
87
+ """Record a short phrase and transcribe with Google (local only)."""
88
+ try:
89
+ recognizer = sr.Recognizer()
90
+ with sr.Microphone() as source:
91
+ st.info("🎙️ Speak now…")
92
+ audio = recognizer.listen(source, timeout=5, phrase_time_limit=7)
93
+ text = recognizer.recognize_google(audio)
94
+ st.success(f"You said: **{text}**")
95
+ return text
96
+ except sr.UnknownValueError:
97
+ st.error("I couldn't understand—try again.")
98
+ except sr.WaitTimeoutError:
99
+ st.error("No voice detected—try again.")
100
+ except Exception as e:
101
+ st.error(f"Voice error: {e}")
102
+ return None
103
+
104
+ # -----------------------------
105
+ # UI
106
+ # -----------------------------
107
+ st.write("### How do you want to share your mood?")
108
+ if use_voice:
109
+ input_mode = st.radio("Input method", ["Type", "Speak"], horizontal=True)
110
+ else:
111
+ input_mode = "Type"
112
+ if RUNNING_IN_SPACE:
113
+ st.info("🎙️ Voice input is disabled on Hugging Face Spaces. Use text input below.")
114
+
115
+ user_text = ""
116
+ if input_mode == "Type":
117
+ user_text = st.text_input("Tell me how you feel (e.g., “I feel lonely”, “I’m excited for tonight”)")
118
+ else:
119
+ if st.button("🎤 Tap to Speak"):
120
+ said = get_voice_input()
121
+ if said:
122
+ user_text = said
123
+
124
+ # If user gave any text, detect emotion → propose mood; else let them manually pick
125
+ detected_emotion = None
126
+ detected_mood = None
127
+
128
+ if user_text.strip():
129
+ with st.spinner("Analyzing your feelings…"):
130
+ detected_emotion = detect_emotion(user_text)
131
+ st.write(f"**Detected emotion:** `{detected_emotion}`")
132
+ detected_mood = EMOTION_TO_MOOD.get(detected_emotion, "chill")
133
+
134
+ # Manual override dropdown (preselect detected mood if we have it)
135
+ st.write("### Choose/adjust mood")
136
+ default_index = MOOD_OPTIONS.index(detected_mood) if detected_mood in MOOD_OPTIONS else MOOD_OPTIONS.index("chill")
137
+ chosen_mood = st.selectbox(
138
+ "Adjust if needed (auto set from your feelings):",
139
+ MOOD_OPTIONS,
140
+ index=default_index
141
+ )
142
+
143
+ # Generate playlist
144
+ if st.button("🎵 Generate Playlist"):
145
+ if not user_text.strip() and not detected_mood:
146
+ st.warning("Type how you feel or use the dropdown to pick a mood.")
147
+ else:
148
+ query = f"{chosen_mood} songs playlist"
149
+ with st.spinner(f"Finding {chosen_mood} tracks on YouTube…"):
150
+ songs = yt_search(query, limit=6)
151
+
152
+ if not songs:
153
+ st.error("No songs found. Try a different mood.")
154
+ else:
155
+ st.subheader("🎶 Your Moodify Playlist")
156
+ # Autoplay first track if possible
157
+ first = songs[0]
158
+ if first[2]: # have video id
159
+ # Embed playlist style (first as main, others as queue)
160
+ ids = [s[2] for s in songs if s[2]]
161
+ if len(ids) > 1:
162
+ embed_url = f"https://www.youtube.com/embed/{ids[0]}?autoplay=1&playlist={','.join(ids[1:])}"
163
+ else:
164
+ embed_url = f"https://www.youtube.com/embed/{ids[0]}?autoplay=1"
165
+ st.components.v1.iframe(embed_url, height=420, width=720)
166
+ else:
167
+ # Fallback to direct video link
168
+ st.video(first[1])
169
+
170
+ with st.expander("Show track list"):
171
+ for title, link, _ in songs:
172
+ st.markdown(f"- [{title}]({link})")
173
+
174
+ st.success("Enjoy your music! 🎧")