BHW commited on
Commit
8aed886
1 Parent(s): 5363ed3

Upload run.py

Browse files
Files changed (1) hide show
  1. run.py +105 -0
run.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+
3
+ import gradio as gr
4
+ import librosa
5
+ import numpy as np
6
+ import soundfile as sf
7
+ from transformers import pipeline
8
+
9
+ TARGET_SAMPLE_RATE = 16_000
10
+ AUDIO_SECONDS_THRESHOLD = 5
11
+ pipe = pipeline("audio-classification", model="MIT/ast-finetuned-audioset-10-10-0.4593")
12
+ prediction = [{"score": 1, "label": "recording..."}]
13
+
14
+
15
+ def normalize_waveform(waveform, datatype=np.float32): # source datatype: np.int16
16
+ waveform = waveform.astype(dtype=datatype)
17
+ waveform /= 32768.0
18
+ return waveform
19
+
20
+
21
+ def streaming_recording_fn(stream, new_chunk):
22
+ global prediction
23
+ sr, y = new_chunk
24
+ y = normalize_waveform(y)
25
+ y = librosa.resample(y, orig_sr=sr, target_sr=TARGET_SAMPLE_RATE)
26
+ if stream is not None:
27
+ if (stream.shape[-1] / TARGET_SAMPLE_RATE) >= AUDIO_SECONDS_THRESHOLD:
28
+ prediction = pipe(stream)
29
+ file_name = f'./audio/{time.strftime("%Y%m%d_%H%M%S", time.localtime())}.wav'
30
+ sf.write(file_name, stream, TARGET_SAMPLE_RATE)
31
+ print(f"SAVE AUDIO: {file_name}")
32
+ print(f">>>>>>1\t{y.shape=}, {stream.shape=}\n\t{prediction[0]=}")
33
+ stream = None
34
+ else:
35
+ stream = np.concatenate([stream, y], axis=-1)
36
+ print(f">>>>>>2\t{y.shape=}, {stream.shape=}")
37
+ else:
38
+ stream = y
39
+ print(f">>>>>>3\t{y.shape=}, {stream.shape=}")
40
+
41
+ return stream, {i['label']: i['score'] for i in prediction}
42
+
43
+
44
+ def microphone_fn(waveform):
45
+ print('-' * 120)
46
+ print(f"{waveform=}")
47
+ sr, y = waveform
48
+ y = normalize_waveform(y)
49
+ y = librosa.resample(y, orig_sr=sr, target_sr=TARGET_SAMPLE_RATE)
50
+ result = pipe(y)
51
+ file_name = f'./audio/{time.strftime("%Y%m%d_%H%M%S", time.localtime())}.wav'
52
+ sf.write(file_name, y, TARGET_SAMPLE_RATE)
53
+ return {i['label']: i['score'] for i in result}
54
+
55
+
56
+ def file_fn(waveform):
57
+ print('-' * 120)
58
+ print(f"{waveform=}")
59
+ sr, y = waveform
60
+ y = normalize_waveform(y)
61
+ y = librosa.resample(y, orig_sr=sr, target_sr=TARGET_SAMPLE_RATE)
62
+ result = pipe(y)
63
+ file_name = f'./audio/{time.strftime("%Y%m%d_%H%M%S", time.localtime())}.wav'
64
+ sf.write(file_name, y, TARGET_SAMPLE_RATE)
65
+ return {i['label']: i['score'] for i in result}
66
+
67
+
68
+ streaming_demo = gr.Interface(
69
+ fn=streaming_recording_fn,
70
+ inputs=["state", gr.Audio(sources=["microphone"], streaming=True)],
71
+ outputs=["state", "label"],
72
+ live=True,
73
+ )
74
+
75
+ microphone_demo = gr.Interface(
76
+ fn=microphone_fn,
77
+ inputs=[gr.Audio(sources=["microphone"], type="numpy")],
78
+ outputs=["label"]
79
+ )
80
+
81
+ file_demo = gr.Interface(
82
+ fn=file_fn,
83
+ inputs=[gr.Audio(sources=["upload"], type="numpy")],
84
+ outputs=["label"]
85
+ )
86
+
87
+ with gr.Blocks() as example:
88
+ inputs = [gr.Audio(sources=["upload"], type="numpy")]
89
+ output = gr.Label()
90
+
91
+ examples = [
92
+ ["audio/cantina.wav"],
93
+ ["audio/cat.mp3"]
94
+ ]
95
+ ex = gr.Examples(examples,
96
+ fn=file_fn, inputs=inputs, outputs=output,
97
+ run_on_click=True)
98
+
99
+ with gr.Blocks() as demo:
100
+ gr.TabbedInterface([file_demo, streaming_demo, microphone_demo, example],
101
+ ["Audio file", "Streaming", "Microphone", "example"])
102
+
103
+ if __name__ == "__main__":
104
+
105
+ demo.launch(share=True)