Hendrik Schroeter commited on
Commit
b64ea7b
1 Parent(s): 9b336fb

Update to df3, improve cleanup

Browse files
Files changed (1) hide show
  1. app.py +21 -14
app.py CHANGED
@@ -3,7 +3,7 @@ import math
3
  import os
4
  import tempfile
5
  import time
6
- from typing import Optional, Tuple, Union
7
 
8
  import gradio as gr
9
  import matplotlib.pyplot as plt
@@ -19,7 +19,7 @@ from df.enhance import enhance, init_df, load_audio, save_audio
19
  from df.io import resample
20
 
21
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
22
- model, df, _ = init_df("./DeepFilterNet2", config_allow_defaults=True)
23
  model = model.to(device=device).eval()
24
 
25
  fig_noisy: plt.Figure
@@ -100,7 +100,7 @@ def load_audio_gradio(
100
  return audio, meta
101
 
102
 
103
- def demo_fn(speech_upl: str, noise_type: str, snr: int, mic_input: str):
104
  if mic_input:
105
  speech_upl = mic_input
106
  sr = config("sr", 48000, int, section="df")
@@ -147,16 +147,10 @@ def demo_fn(speech_upl: str, noise_type: str, snr: int, mic_input: str):
147
  ax_enh.clear()
148
  noisy_im = spec_im(sample, sr=sr, figure=fig_noisy, ax=ax_noisy)
149
  enh_im = spec_im(enhanced, sr=sr, figure=fig_enh, ax=ax_enh)
150
- # Cleanup some old wav files
151
- if os.path.exists("/tmp"):
152
- days = 1
153
- for f in glob.glob("/tmp/*wav"):
154
- is_old = (time.time() - os.path.getmtime(f)) / 3600 > 24 * days
155
- if is_old and f not in (enhanced_wav, noisy_wav):
156
- try:
157
- os.remove(f)
158
- except Exception as e:
159
- print(f"failed to remove file {f}: {e}")
160
  return noisy_wav, noisy_im, enhanced_wav, enh_im
161
 
162
 
@@ -257,6 +251,19 @@ def spec_im(
257
  return Image.frombytes("RGB", figure.canvas.get_width_height(), figure.canvas.tostring_rgb())
258
 
259
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260
  def toggle(choice):
261
  if choice == "mic":
262
  return gr.update(visible=True, value=None), gr.update(visible=False, value=None)
@@ -320,5 +327,5 @@ with gr.Blocks() as demo:
320
  ),
321
  gr.Markdown(open("usage.md").read())
322
 
323
-
324
  demo.launch(enable_queue=True)
 
3
  import os
4
  import tempfile
5
  import time
6
+ from typing import List, Optional, Tuple, Union
7
 
8
  import gradio as gr
9
  import matplotlib.pyplot as plt
 
19
  from df.io import resample
20
 
21
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
22
+ model, df, _ = init_df(config_allow_defaults=True)
23
  model = model.to(device=device).eval()
24
 
25
  fig_noisy: plt.Figure
 
100
  return audio, meta
101
 
102
 
103
+ def demo_fn(speech_upl: str, noise_type: str, snr: int, mic_input: Optional[str] = None):
104
  if mic_input:
105
  speech_upl = mic_input
106
  sr = config("sr", 48000, int, section="df")
 
147
  ax_enh.clear()
148
  noisy_im = spec_im(sample, sr=sr, figure=fig_noisy, ax=ax_noisy)
149
  enh_im = spec_im(enhanced, sr=sr, figure=fig_enh, ax=ax_enh)
150
+ filter = [speech_upl, noisy_wav, enhanced_wav]
151
+ if mic_input is not None and mic_input != "":
152
+ filter.append(mic_input)
153
+ cleanup_tmp(filter)
 
 
 
 
 
 
154
  return noisy_wav, noisy_im, enhanced_wav, enh_im
155
 
156
 
 
251
  return Image.frombytes("RGB", figure.canvas.get_width_height(), figure.canvas.tostring_rgb())
252
 
253
 
254
+ def cleanup_tmp(filter: List[str] = [], hours_keep=2):
255
+ # Cleanup some old wav files
256
+ if os.path.exists("/tmp"):
257
+ for f in glob.glob("/tmp/*"):
258
+ is_old = (time.time() - os.path.getmtime(f)) / 3600 > hours_keep
259
+ if is_old and f not in filter:
260
+ try:
261
+ os.remove(f)
262
+ logger.info(f"Removed file {f}")
263
+ except Exception as e:
264
+ logger.warning(f"failed to remove file {f}: {e}")
265
+
266
+
267
  def toggle(choice):
268
  if choice == "mic":
269
  return gr.update(visible=True, value=None), gr.update(visible=False, value=None)
 
327
  ),
328
  gr.Markdown(open("usage.md").read())
329
 
330
+ cleanup_tmp()
331
  demo.launch(enable_queue=True)