Spaces:
Runtime error
Runtime error
immelstorun
commited on
Commit
·
e7fc258
1
Parent(s):
401d010
Update app.py
Browse files
app.py
CHANGED
@@ -22,46 +22,24 @@ emotion_dict = {
|
|
22 |
'neu': 'Neutral'
|
23 |
}
|
24 |
|
25 |
-
def predict_emotion(audio):
|
26 |
-
out_prob, score, index, text_lab = learner.classify_file(audio.name)
|
27 |
-
return emotion_dict[text_lab[0]]
|
28 |
|
29 |
-
def predict_emotion_from_file(file_path):
|
30 |
-
audio = gr.Audio(file_path=file_path, source="upload", type="filepath")
|
31 |
-
return predict_emotion(audio)
|
32 |
|
33 |
-
#
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
outputs = "text"
|
36 |
title = "ML Speech Emotion Detection"
|
37 |
description = "Speechbrain powered wav2vec 2.0 pretrained model on IEMOCAP dataset using Gradio."
|
38 |
|
39 |
-
|
40 |
-
rec_folder = "rec"
|
41 |
-
audio_files = [f for f in os.listdir(rec_folder) if f.endswith(('.wav', '.mp3'))]
|
42 |
-
audio_files = [os.path.join(rec_folder, f) for f in audio_files]
|
43 |
-
file_dropdown = gr.inputs.Dropdown(label="Select Preloaded Audio File", choices=audio_files)
|
44 |
-
|
45 |
-
# Create the Gradio interface with both tabs
|
46 |
-
iface = gr.Interface(
|
47 |
-
fn=predict_emotion,
|
48 |
-
inputs=inputs,
|
49 |
-
outputs=outputs,
|
50 |
-
title=title,
|
51 |
-
description=description,
|
52 |
-
tab_name="Upload Audio"
|
53 |
-
)
|
54 |
-
|
55 |
-
# Add the second tab to the interface
|
56 |
-
iface.add_tab(
|
57 |
-
"Select Preloaded Audio",
|
58 |
-
gr.Interface(
|
59 |
-
fn=predict_emotion_from_file,
|
60 |
-
inputs=file_dropdown,
|
61 |
-
outputs=outputs,
|
62 |
-
tab_name="Select Preloaded Audio"
|
63 |
-
)
|
64 |
-
)
|
65 |
-
|
66 |
-
# Launch the Gradio app
|
67 |
-
iface.launch()
|
|
|
22 |
'neu': 'Neutral'
|
23 |
}
|
24 |
|
|
|
|
|
|
|
25 |
|
|
|
|
|
|
|
26 |
|
27 |
+
# Assuming emotion_dict and learner are defined elsewhere in your code
|
28 |
+
# and learner.classify_file is a method that classifies the audio file
|
29 |
+
|
30 |
+
def predict_emotion(audio, rec_file):
|
31 |
+
rec_path = os.path.join("rec", rec_file.name)
|
32 |
+
# Assuming you want to use the audio file from the 'rec' directory for prediction
|
33 |
+
out_prob, score, index, text_lab = learner.classify_file(rec_path)
|
34 |
+
return emotion_dict[text_lab[0]]
|
35 |
+
|
36 |
+
# Loading gradio interface
|
37 |
+
inputs = [
|
38 |
+
gr.inputs.Audio(label="Input Audio", type="file"),
|
39 |
+
gr.inputs.File(label="Choose file from rec directory", type="file", default="rec/")
|
40 |
+
]
|
41 |
outputs = "text"
|
42 |
title = "ML Speech Emotion Detection"
|
43 |
description = "Speechbrain powered wav2vec 2.0 pretrained model on IEMOCAP dataset using Gradio."
|
44 |
|
45 |
+
gr.Interface(fn=predict_emotion, inputs=inputs, outputs=outputs, title=title, description=description).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|