Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -709,6 +709,16 @@ css = """
|
|
| 709 |
}
|
| 710 |
"""
|
| 711 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 712 |
with gr.Blocks(css=css) as demo:
|
| 713 |
gr.HTML(f"<h1 style='text-align: center;'>{title}</h1>")
|
| 714 |
gr.Markdown(description_text)
|
|
@@ -750,14 +760,24 @@ with gr.Blocks(css=css) as demo:
|
|
| 750 |
mood_chart = gr.Plot(label="Mood Probabilities", scale=2, elem_classes=["gr-box"])
|
| 751 |
va_chart = gr.Plot(label="Valence-Arousal Space", scale=1, elem_classes=["gr-box"])
|
| 752 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 753 |
predict_btn.click(
|
| 754 |
-
fn=
|
| 755 |
inputs=[input_audio, threshold],
|
| 756 |
outputs=[output_text, va_chart, mood_chart]
|
| 757 |
)
|
| 758 |
|
|
|
|
|
|
|
| 759 |
# Launch the App
|
| 760 |
demo.queue().launch()
|
| 761 |
|
| 762 |
|
|
|
|
|
|
|
|
|
|
| 763 |
|
|
|
|
| 709 |
}
|
| 710 |
"""
|
| 711 |
|
| 712 |
+
def analyze_emotions(audio_path, threshold):
|
| 713 |
+
if audio_path is None:
|
| 714 |
+
return "❌ Please upload a valid audio file.", None, None
|
| 715 |
+
try:
|
| 716 |
+
model_output = music2emo.predict(audio_path, threshold)
|
| 717 |
+
return format_prediction(model_output)
|
| 718 |
+
except Exception as e:
|
| 719 |
+
return f"⚠️ Error during prediction: {e}", None, None
|
| 720 |
+
|
| 721 |
+
|
| 722 |
with gr.Blocks(css=css) as demo:
|
| 723 |
gr.HTML(f"<h1 style='text-align: center;'>{title}</h1>")
|
| 724 |
gr.Markdown(description_text)
|
|
|
|
| 760 |
mood_chart = gr.Plot(label="Mood Probabilities", scale=2, elem_classes=["gr-box"])
|
| 761 |
va_chart = gr.Plot(label="Valence-Arousal Space", scale=1, elem_classes=["gr-box"])
|
| 762 |
|
| 763 |
+
# predict_btn.click(
|
| 764 |
+
# fn=lambda audio, thresh: format_prediction(music2emo.predict(audio, thresh)),
|
| 765 |
+
# inputs=[input_audio, threshold],
|
| 766 |
+
# outputs=[output_text, va_chart, mood_chart]
|
| 767 |
+
# )
|
| 768 |
predict_btn.click(
|
| 769 |
+
fn=analyze_emotions,
|
| 770 |
inputs=[input_audio, threshold],
|
| 771 |
outputs=[output_text, va_chart, mood_chart]
|
| 772 |
)
|
| 773 |
|
| 774 |
+
|
| 775 |
+
|
| 776 |
# Launch the App
|
| 777 |
demo.queue().launch()
|
| 778 |
|
| 779 |
|
| 780 |
+
|
| 781 |
+
|
| 782 |
+
|
| 783 |
|