Spaces:
Build error
Build error
ahmedghani
commited on
Commit
•
3d7e2e4
1
Parent(s):
3a7d000
added samples
Browse files- app.py +14 -4
- samples/mixture1.wav +0 -0
- samples/mixture2.wav +0 -0
- samples/mixture3.wav +0 -0
- svoice/separate.py +1 -1
app.py
CHANGED
@@ -34,14 +34,18 @@ else:
|
|
34 |
)
|
35 |
print("Whisper ASR model loaded.")
|
36 |
|
37 |
-
def separator(audio, rec_audio):
|
38 |
outputs= {}
|
39 |
-
|
|
|
|
|
|
|
40 |
if audio:
|
41 |
write('input/original.wav', audio[0], audio[1])
|
42 |
elif rec_audio:
|
43 |
write('input/original.wav', rec_audio[0], rec_audio[1])
|
44 |
-
|
|
|
45 |
separate_demo(mix_dir="./input")
|
46 |
separated_files = glob(os.path.join('separated', "*.wav"))
|
47 |
separated_files = [f for f in separated_files if "original.wav" not in f]
|
@@ -95,6 +99,12 @@ with demo:
|
|
95 |
outputs_audio = [output_audio1, output_audio2, output_audio3, output_audio4, output_audio5, output_audio6, output_audio7]
|
96 |
outputs_text = [output_text1, output_text2, output_text3, output_text4, output_text5, output_text6, output_text7]
|
97 |
button = gr.Button("Separate")
|
98 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
|
100 |
demo.launch()
|
|
|
34 |
)
|
35 |
print("Whisper ASR model loaded.")
|
36 |
|
37 |
+
def separator(audio, rec_audio, example):
|
38 |
outputs= {}
|
39 |
+
for f in glob('input/*'):
|
40 |
+
os.remove(f)
|
41 |
+
for f in glob('separated/*'):
|
42 |
+
os.remove(f)
|
43 |
if audio:
|
44 |
write('input/original.wav', audio[0], audio[1])
|
45 |
elif rec_audio:
|
46 |
write('input/original.wav', rec_audio[0], rec_audio[1])
|
47 |
+
else:
|
48 |
+
os.system(f'cp {example} input/original.wav')
|
49 |
separate_demo(mix_dir="./input")
|
50 |
separated_files = glob(os.path.join('separated', "*.wav"))
|
51 |
separated_files = [f for f in separated_files if "original.wav" not in f]
|
|
|
99 |
outputs_audio = [output_audio1, output_audio2, output_audio3, output_audio4, output_audio5, output_audio6, output_audio7]
|
100 |
outputs_text = [output_text1, output_text2, output_text3, output_text4, output_text5, output_text6, output_text7]
|
101 |
button = gr.Button("Separate")
|
102 |
+
examples = [
|
103 |
+
"samples/mixture1.wav",
|
104 |
+
"samples/mixture2.wav",
|
105 |
+
"samples/mixture3.wav"
|
106 |
+
]
|
107 |
+
example_selector = gr.inputs.Dropdown(examples, label="Example Audio", default="samples/mixture1.wav")
|
108 |
+
button.click(separator, inputs=[input_audio, rec_audio, example_selector], outputs=outputs_audio + outputs_text)
|
109 |
|
110 |
demo.launch()
|
samples/mixture1.wav
ADDED
Binary file (108 kB). View file
|
|
samples/mixture2.wav
ADDED
Binary file (119 kB). View file
|
|
samples/mixture3.wav
ADDED
Binary file (99.2 kB). View file
|
|
svoice/separate.py
CHANGED
@@ -20,7 +20,7 @@ def load_model():
|
|
20 |
global pkg
|
21 |
print("Loading svoice model if available...")
|
22 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
23 |
-
pkg = torch.load('checkpoint.th')
|
24 |
if 'model' in pkg:
|
25 |
model = pkg['model']
|
26 |
else:
|
|
|
20 |
global pkg
|
21 |
print("Loading svoice model if available...")
|
22 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
23 |
+
pkg = torch.load('checkpoint.th', map_location=device)
|
24 |
if 'model' in pkg:
|
25 |
model = pkg['model']
|
26 |
else:
|