alvi123 commited on
Commit
f45be4e
β€’
1 Parent(s): 4cab2b2

initial commit

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. app.py +103 -0
  3. extract_features.py +33 -0
  4. finalized_rf.sav +3 -0
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ finalized_rf.sav filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import wave
3
+ import matplotlib.pyplot as plt
4
+ import numpy as np
5
+ from extract_features import *
6
+ import pickle
7
+ import soundfile
8
+ import librosa
9
+
10
+ classifier = pickle.load(open('finalized_rf.sav', 'rb'))
11
+
12
+ def emotion_predict(input):
13
+ input_features = extract_feature(input, mfcc=True, chroma=True, mel=True, contrast=True, tonnetz=True)
14
+ rf_prediction = classifier.predict(input_features.reshape(1,-1))
15
+ if rf_prediction == 'happy':
16
+ return 'Happy 😎'
17
+ elif rf_prediction == 'neutral':
18
+ return 'Neutral 😐'
19
+ elif rf_prediction == 'sad':
20
+ return 'Sad 😒'
21
+ else:
22
+ return 'Angry 😀'
23
+
24
+
25
+ def plot_fig(input):
26
+ wav = wave.open(input, 'r')
27
+
28
+ raw = wav.readframes(-1)
29
+ raw = np.frombuffer(raw, "int16")
30
+ sampleRate = wav.getframerate()
31
+
32
+ Time = np.linspace(0, len(raw)/sampleRate, num=len(raw))
33
+
34
+ fig = plt.figure()
35
+
36
+ plt.rcParams["figure.figsize"] = (50,15)
37
+
38
+ plt.title("Waveform Of the Audio", fontsize=25)
39
+
40
+ plt.xticks(fontsize=15)
41
+
42
+ plt.yticks(fontsize=15)
43
+
44
+ plt.ylabel("Amplitude", fontsize=25)
45
+
46
+ plt.plot(Time, raw, color='red')
47
+
48
+ return fig
49
+
50
+
51
+ with gr.Blocks() as app:
52
+ gr.Markdown(
53
+ """
54
+ # Speech Emotion Detector 🎡😍
55
+ This application classifies inputted audio πŸ”Š according to the verbal emotion into four categories:
56
+ 1. Happy 😎
57
+ 2. Neutral 😐
58
+ 3. Sad 😒
59
+ 4. Angry 😀
60
+ """
61
+ )
62
+ with gr.Tab("Record Audio"):
63
+ record_input = gr.Audio(source="microphone", type="filepath")
64
+
65
+ with gr.Accordion("Audio Visualization", open=False):
66
+ gr.Markdown(
67
+ """
68
+ ### Visualization will work only after Audio has been submitted
69
+ """
70
+ )
71
+ plot_record = gr.Button("Display Audio Signal")
72
+ plot_record_c = gr.Plot(label='Waveform Of the Audio')
73
+
74
+ record_button = gr.Button("Detect Emotion")
75
+ record_output = gr.Text(label = 'Emotion Detected')
76
+
77
+ with gr.Tab("Upload Audio File"):
78
+ gr.Markdown(
79
+ """
80
+ ## Uploaded Audio should be of .wav format
81
+ """
82
+ )
83
+
84
+ upload_input = gr.Audio(type="filepath")
85
+
86
+ with gr.Accordion("Audio Visualization", open=False):
87
+ gr.Markdown(
88
+ """
89
+ ### Visualization will work only after Audio has been submitted
90
+ """
91
+ )
92
+ plot_upload = gr.Button("Display Audio Signal")
93
+ plot_upload_c = gr.Plot(label='Waveform Of the Audio')
94
+
95
+ upload_button = gr.Button("Detect Emotion")
96
+ upload_output = gr.Text(label = 'Emotion Detected')
97
+
98
+ record_button.click(emotion_predict, inputs=record_input, outputs=record_output)
99
+ upload_button.click(emotion_predict, inputs=upload_input, outputs=upload_output)
100
+ plot_record.click(plot_fig, inputs=record_input, outputs=plot_record_c)
101
+ plot_upload.click(plot_fig, inputs=upload_input, outputs=plot_upload_c)
102
+
103
+ app.launch()
extract_features.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import soundfile
3
+ import librosa
4
+
5
+ def extract_feature(file_name, **kwargs):
6
+
7
+ chroma = kwargs.get("chroma")
8
+ contrast = kwargs.get("contrast")
9
+ mfcc = kwargs.get("mfcc")
10
+ mel = kwargs.get("mel")
11
+ tonnetz = kwargs.get("tonnetz")
12
+
13
+ with soundfile.SoundFile(file_name) as audio_clip:
14
+ X = audio_clip.read(dtype="float32")
15
+ sound_fourier = np.abs(librosa.stft(X)) # Conducting short time fourier transform of audio clip
16
+ result = np.array([])
17
+
18
+ if mfcc:
19
+ mfccs = np.mean(librosa.feature.mfcc(y=X, sr=audio_clip.samplerate, n_mfcc=40).T, axis=0)
20
+ result = np.hstack((result, mfccs))
21
+ if chroma:
22
+ chroma = np.mean(librosa.feature.chroma_stft(S=sound_fourier, sr=audio_clip.samplerate).T, axis=0)
23
+ result = np.hstack((result, chroma))
24
+ if mel:
25
+ mel = np.mean(librosa.feature.melspectrogram(X, sr=audio_clip.samplerate).T, axis=0)
26
+ result = np.hstack((result, mel))
27
+ if contrast:
28
+ contrast = np.mean(librosa.feature.spectral_contrast(S=sound_fourier, sr=audio_clip.samplerate).T, axis=0)
29
+ result = np.hstack((result, contrast))
30
+ if tonnetz:
31
+ tonnetz = np.mean(librosa.feature.tonnetz(y=librosa.effects.harmonic(X), sr=audio_clip.samplerate).T, axis=0)
32
+ result = np.hstack((result, tonnetz))
33
+ return result
finalized_rf.sav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:daf37e379f462a4f0e936c39a69aee28e4941c4de46f2e3308711f27042fb514
3
+ size 3096321