Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -49,20 +49,21 @@ def classify_emotion(audio):
|
|
49 |
return emo_dict[text_lab[0]]
|
50 |
|
51 |
def slider_logic(slider):
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
|
|
66 |
# Create a Gradio interface with audio file and text inputs
|
67 |
def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, explitive_selection, slider):
|
68 |
# Transcribe the audio file using Whisper ASR
|
|
|
49 |
return emo_dict[text_lab[0]]
|
50 |
|
51 |
def slider_logic(slider):
|
52 |
+
threshold = 0
|
53 |
+
if slider == 1:
|
54 |
+
threshold = .98
|
55 |
+
elif slider == 2:
|
56 |
+
threshold = .88
|
57 |
+
elif slider == 3:
|
58 |
+
threshold = .77
|
59 |
+
elif slider == 4:
|
60 |
+
threshold = .66
|
61 |
+
elif slider == 5:
|
62 |
+
threshold = .55
|
63 |
+
else:
|
64 |
+
threshold = []
|
65 |
+
return threshold
|
66 |
+
|
67 |
# Create a Gradio interface with audio file and text inputs
|
68 |
def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, explitive_selection, slider):
|
69 |
# Transcribe the audio file using Whisper ASR
|