|
import gradio as gr |
|
|
|
from SER import live_emotion_recognition |
|
from MT import search_song_by_emotion |
|
|
|
def audio_interface(audio): |
|
emotion = live_emotion_recognition(audio) |
|
preview_url = search_song_by_emotion(emotion) |
|
if preview_url: |
|
audio = gr.Audio(value=preview_url,label="Recommend Music π·") |
|
return emotion,audio |
|
else: |
|
return emotion,gr.Audio(value=None,label="Unexpected Error Occured β οΈ") |
|
|
|
description = "This is an emotion-based music recommendation system. How are you feeling today ? Hope I will help you to make you happy π." |
|
|
|
css = """ |
|
#container{ |
|
margin: 0 auto; |
|
max-width: 80rem; |
|
} |
|
""" |
|
|
|
iface = gr.Interface (title='Emotion Based Recommendation System', |
|
description=description, |
|
fn=audio_interface, |
|
inputs=[gr.Audio(sources='microphone', type='filepath',label="How are you feeling today ?")], |
|
outputs=[gr.Textbox(label="Detected Emotion "),gr.Audio(label="Recommended Music π·")]) |
|
|
|
iface.launch() |