Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,31 +5,48 @@ import time
|
|
5 |
from tensorflow.keras.models import load_model
|
6 |
|
7 |
# Load the emotion prediction model
|
8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
# Function to extract MFCC features from audio
|
11 |
def extract_mfcc(wav_file_name):
|
12 |
-
|
13 |
-
|
14 |
-
|
|
|
|
|
|
|
|
|
15 |
|
16 |
# Emotions dictionary
|
17 |
emotions = {1: 'neutral', 2: 'calm', 3: 'happy', 4: 'sad', 5: 'angry', 6: 'fearful', 7: 'disgust', 8: 'surprised'}
|
18 |
|
19 |
# Function to predict emotion from audio
|
20 |
def predict_emotion_from_audio(wav_filepath):
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
# Create a combined function that calls both models
|
30 |
def get_predictions(audio_input):
|
31 |
emotion_prediction = predict_emotion_from_audio(audio_input)
|
32 |
-
|
33 |
return [emotion_prediction]
|
34 |
|
35 |
# Create the Gradio interface
|
|
|
5 |
from tensorflow.keras.models import load_model
|
6 |
|
7 |
# Load the emotion prediction model
|
8 |
+
def load_emotion_model(model_path):
|
9 |
+
try:
|
10 |
+
model = load_model(model_path)
|
11 |
+
return model
|
12 |
+
except Exception as e:
|
13 |
+
print("Error loading emotion prediction model:", e)
|
14 |
+
return None
|
15 |
+
|
16 |
+
model_path = 'mymodel_SER_LSTM_RAVDESS.h5'
|
17 |
+
model = load_emotion_model(model_path)
|
18 |
|
19 |
# Function to extract MFCC features from audio
|
20 |
def extract_mfcc(wav_file_name):
|
21 |
+
try:
|
22 |
+
y, sr = librosa.load(wav_file_name)
|
23 |
+
mfccs = np.mean(librosa.feature.mfcc(y=y, sr=sr, n_mfcc=40).T, axis=0)
|
24 |
+
return mfccs
|
25 |
+
except Exception as e:
|
26 |
+
print("Error extracting MFCC features:", e)
|
27 |
+
return None
|
28 |
|
29 |
# Emotions dictionary
|
30 |
emotions = {1: 'neutral', 2: 'calm', 3: 'happy', 4: 'sad', 5: 'angry', 6: 'fearful', 7: 'disgust', 8: 'surprised'}
|
31 |
|
32 |
# Function to predict emotion from audio
|
33 |
def predict_emotion_from_audio(wav_filepath):
|
34 |
+
try:
|
35 |
+
test_point = extract_mfcc(wav_filepath)
|
36 |
+
if test_point is not None:
|
37 |
+
test_point = np.reshape(test_point, newshape=(1, 40, 1))
|
38 |
+
predictions = model.predict(test_point)
|
39 |
+
predicted_emotion_label = np.argmax(predictions[0]) + 1
|
40 |
+
return emotions[predicted_emotion_label]
|
41 |
+
else:
|
42 |
+
return "Error: Unable to extract features"
|
43 |
+
except Exception as e:
|
44 |
+
print("Error predicting emotion:", e)
|
45 |
+
return None
|
46 |
|
47 |
# Create a combined function that calls both models
|
48 |
def get_predictions(audio_input):
|
49 |
emotion_prediction = predict_emotion_from_audio(audio_input)
|
|
|
50 |
return [emotion_prediction]
|
51 |
|
52 |
# Create the Gradio interface
|