khalida1wwin
commited on
Commit
•
f27ba88
1
Parent(s):
8d6521c
update app.py
Browse files
app.py
CHANGED
@@ -189,8 +189,8 @@ audio_output_prediction, sample_rate, min_duration, frame_length, hop_length_fra
|
|
189 |
print(audio_output_prediction)
|
190 |
sf.write(audio_output_prediction , denoise_long[0, :], sample_rate)
|
191 |
|
192 |
-
def denoise_audio(audioName
|
193 |
-
|
194 |
sr, data = audioName
|
195 |
sf.write("temp.wav",data, sr)
|
196 |
testNo = "temp"
|
@@ -200,36 +200,29 @@ def denoise_audio(audioName,mic):
|
|
200 |
|
201 |
|
202 |
|
203 |
-
else:
|
204 |
-
sr, data = mic
|
205 |
-
sf.write("temp.wav",data, sr)
|
206 |
-
testNo = "temp"
|
207 |
-
audio_dir_prediction = os.path.abspath("/")+ str(testNo) +".wav"
|
208 |
-
sample_rate, data = mic[0], mic[1]
|
209 |
-
len_data = len(data)
|
210 |
|
211 |
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
|
234 |
|
235 |
examples = [
|
@@ -241,7 +234,7 @@ examples = [
|
|
241 |
|
242 |
|
243 |
iface = gr.Interface(fn = denoise_audio,
|
244 |
-
inputs =
|
245 |
outputs = 'audio',
|
246 |
title = 'audio to denoised Audio Application',
|
247 |
description = 'A simple application to denoise audio speech usinf UNet deep learning model. Upload your own audio, or click one of the examples to load them.',
|
|
|
189 |
print(audio_output_prediction)
|
190 |
sf.write(audio_output_prediction , denoise_long[0, :], sample_rate)
|
191 |
|
192 |
+
def denoise_audio(audioName):
|
193 |
+
|
194 |
sr, data = audioName
|
195 |
sf.write("temp.wav",data, sr)
|
196 |
testNo = "temp"
|
|
|
200 |
|
201 |
|
202 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
203 |
|
204 |
|
205 |
+
t = len_data / sample_rate # returns duration but in floats
|
206 |
+
print("t:",t)
|
207 |
+
weights_path = os.path.abspath("./")
|
208 |
+
name_model = "model_unet"
|
209 |
+
audio_dir_prediction = os.path.abspath("./")
|
210 |
+
dir_save_prediction = os.path.abspath("./")
|
211 |
+
audio_output_prediction = "test.wav"
|
212 |
+
audio_input_prediction = ["temp.wav"]
|
213 |
+
sample_rate = 8000
|
214 |
+
min_duration = t
|
215 |
+
frame_length = 8064
|
216 |
+
hop_length_frame = 8064
|
217 |
+
n_fft = 255
|
218 |
+
hop_length_fft = 63
|
219 |
+
|
220 |
+
dim_square_spec = int(n_fft / 2) + 1
|
221 |
+
|
222 |
+
prediction(weights_path, name_model, audio_dir_prediction, dir_save_prediction, audio_input_prediction,
|
223 |
+
audio_output_prediction, sample_rate, min_duration, frame_length, hop_length_frame, n_fft, hop_length_fft)
|
224 |
+
print(audio_output_prediction)
|
225 |
+
return audio_output_prediction
|
226 |
|
227 |
|
228 |
examples = [
|
|
|
234 |
|
235 |
|
236 |
iface = gr.Interface(fn = denoise_audio,
|
237 |
+
inputs = 'audio',
|
238 |
outputs = 'audio',
|
239 |
title = 'audio to denoised Audio Application',
|
240 |
description = 'A simple application to denoise audio speech usinf UNet deep learning model. Upload your own audio, or click one of the examples to load them.',
|