pragnakalp commited on
Commit
c9c2be1
1 Parent(s): 552dbf1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -68
app.py CHANGED
@@ -1,30 +1,23 @@
1
  from __future__ import absolute_import, division, print_function, unicode_literals
2
 
3
  from flask import Flask, make_response, render_template, request, jsonify, redirect, url_for, send_from_directory
4
- from flask_cors import CORS
5
- import sys
6
  import os
7
- import librosa
8
- import librosa.display
9
- import numpy as np
10
- from datetime import date
11
- import re
12
- import json
13
- import email
14
- import csv
15
- import datetime
16
- import smtplib
17
- import ssl
18
- from email.mime.text import MIMEText
19
- import time
20
  import pytz
21
- import requests
22
- # import pyaudio
23
- import wave
24
  import shutil
 
 
25
  import warnings
 
 
 
26
  import tensorflow as tf
27
  import gradio as gr
 
 
 
 
28
  from keras.models import Sequential
29
  from keras.layers import Dense
30
  from keras.utils import to_categorical
@@ -32,7 +25,8 @@ from keras.layers import Flatten, Dropout, Activation
32
  from keras.layers import Conv2D, MaxPooling2D
33
  from keras.layers import BatchNormalization
34
  from sklearn.model_selection import train_test_split
35
- from tqdm import tqdm
 
36
 
37
  warnings.filterwarnings("ignore")
38
 
@@ -81,41 +75,33 @@ model.load_weights('speech_emotion_detection_ravdess_savee.h5')
81
 
82
 
83
  def selected_audio(audio):
84
- if audio and audio != 'Please select any of the following options':
85
- post_file_name = audio.lower() + '.wav'
86
-
87
- filepath = os.path.join("pre_recoreded",post_file_name)
88
- if os.path.exists(filepath):
89
- print("SELECT file name => ",filepath)
90
- result = predict_speech_emotion(filepath)
91
- print("result = ",result)
92
-
 
93
  return result
 
 
 
94
 
95
  def recorded_audio(audio):
 
 
96
  try:
97
- fileList = os.listdir('recorded_audio')
98
- new_wav_file = ""
99
-
100
- if(fileList):
101
- filename_list = []
102
-
103
- for i in fileList:
104
- filename = i.split('.')[0]
105
- filename_list.append(int(filename))
106
 
107
- max_file = max(filename_list)
108
- new_wav_file = int(max_file) + 1
109
- else:
110
- new_wav_file="1"
111
-
112
- new_wav_file = str(new_wav_file) + ".wav"
113
-
114
- # filepath = os.path.join('recorded_audio', new_wav_file)
115
- # shutil.move(recorded_audio, filepath)
116
- filepath = 'recorded_audio/22.wav'
117
- result = predict_speech_emotion(audio.name)
118
- return result
119
  except Exception as e:
120
  print(e)
121
  return "ERROR"
@@ -137,31 +123,14 @@ def predict_speech_emotion(filepath):
137
  return result
138
 
139
 
140
- # demo = gr.Interface(
141
- # fn=send_audio,
142
- # inputs=gr.Audio(source="microphone", type="filepath"),
143
- # outputs="text")
144
-
145
- # demo.launch()
146
-
147
- # selected_audio = gr.Dropdown(["Angry", "Happy", "Sad", "Disgust","Fear", "Surprise", "Neutral"],
148
- # lable = "Input Audio")
149
- # audio_ui=gr.Audio()
150
- # text = gr.Textbox()
151
- # demo = gr.Interface(
152
- # fn=send_audio,
153
- # inputs=selected_audio,
154
- # outputs=[audio_ui,text])
155
-
156
- # demo.launch()
157
-
158
  def return_audio_clip(audio_text):
159
  post_file_name = audio_text.lower() + '.wav'
160
  filepath = os.path.join("pre_recoreded",post_file_name)
161
  return filepath
162
 
163
  with gr.Blocks(css=".gradio-container {background-color: lightgray;}") as demo:
164
- gr.Markdown("Select audio or record audio")
 
165
  with gr.Row():
166
  with gr.Column():
167
  input_audio_text = gr.Dropdown(lable="Input Audio",choices=["Please select any of the following options","Angry", "Happy", "Sad", "Disgust","Fear", "Surprise", "Neutral"],interactive=True)
 
1
  from __future__ import absolute_import, division, print_function, unicode_literals
2
 
3
  from flask import Flask, make_response, render_template, request, jsonify, redirect, url_for, send_from_directory
 
 
4
  import os
5
+ import sys
 
 
 
 
 
 
 
 
 
 
 
 
6
  import pytz
7
+ import librosa
 
 
8
  import shutil
9
+ import random
10
+ import string
11
  import warnings
12
+ import datetime
13
+ import librosa.display
14
+ import numpy as np
15
  import tensorflow as tf
16
  import gradio as gr
17
+
18
+ # import pyaudio
19
+ # import wave
20
+ from tqdm import tqdm
21
  from keras.models import Sequential
22
  from keras.layers import Dense
23
  from keras.utils import to_categorical
 
25
  from keras.layers import Conv2D, MaxPooling2D
26
  from keras.layers import BatchNormalization
27
  from sklearn.model_selection import train_test_split
28
+
29
+ from save_data import flag
30
 
31
  warnings.filterwarnings("ignore")
32
 
 
75
 
76
 
77
  def selected_audio(audio):
78
+ try:
79
+ if audio and audio != 'Please select any of the following options':
80
+ post_file_name = audio.lower() + '.wav'
81
+
82
+ filepath = os.path.join("pre_recoreded",post_file_name)
83
+ if os.path.exists(filepath):
84
+ print("SELECT file name => ",filepath)
85
+ result = predict_speech_emotion(filepath)
86
+ print("result = ",result)
87
+
88
  return result
89
+ except Exception as e:
90
+ print(e)
91
+ return "ERROR"
92
 
93
  def recorded_audio(audio):
94
+
95
+ get_audio_name = ''
96
  try:
97
+ if audio:
98
+ get_audio_name = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(5)])
99
+ audio_file_path = audio.name
100
+ final_output = predict_speech_emotion(audio_file_path)
 
 
 
 
 
101
 
102
+ flag(audio_file_path,get_audio_name,final_output)
103
+
104
+ return final_output
 
 
 
 
 
 
 
 
 
105
  except Exception as e:
106
  print(e)
107
  return "ERROR"
 
123
  return result
124
 
125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
  def return_audio_clip(audio_text):
127
  post_file_name = audio_text.lower() + '.wav'
128
  filepath = os.path.join("pre_recoreded",post_file_name)
129
  return filepath
130
 
131
  with gr.Blocks(css=".gradio-container {background-color: lightgray;}") as demo:
132
+ gr.Markdown("""<h1 style='text-align: center;>Audio Emotion Detection</h1>""")
133
+
134
  with gr.Row():
135
  with gr.Column():
136
  input_audio_text = gr.Dropdown(lable="Input Audio",choices=["Please select any of the following options","Angry", "Happy", "Sad", "Disgust","Fear", "Surprise", "Neutral"],interactive=True)