| import gradio as gr | |
| from scipy.io.wavfile import write | |
| import librosa | |
| import os | |
| examples = [] | |
| folder_path = "./Data Record" | |
| for file_name in os.listdir(folder_path): | |
| if file_name.endswith(".txt"): | |
| file_path = os.path.join(folder_path, file_name) | |
| with open(file_path, "r", encoding="utf-8") as f: | |
| content = f.read() | |
| examples.append([file_name.split(".")[0], content]) | |
| examples1 = examples[0:400] | |
| examples2 = examples[400:800] | |
| examples3 = examples[800:1200] | |
| examples4 = examples[1200:1600] | |
| examples5 = examples[1600:2000] | |
| examples6 = examples[2000:2400] | |
| examples7 = examples[2400:2800] | |
| examples8 = examples[2800:3200] | |
| examples9 = examples[3200:3600] | |
| examples10 = examples[3600:4000] | |
| examples11 = examples[4000:4400] | |
| examples12 = examples[4400:4800] | |
| examples13 = examples[4800:5200] | |
| examples14 = examples[5200:5600] | |
| examples15 = examples[5600:6000] | |
| examples16 = examples[6000:6400] | |
| examples17 = examples[6400:6800] | |
| examples18 = examples[6800:7200] | |
| examples19 = examples[7200:7600] | |
| examples20 = examples[7600:8000] | |
| examples21 = examples[8000:8400] | |
| examples22 = examples[8400:8800] | |
| examples23 = examples[8800:9200] | |
| examples24 = examples[9200:9600] | |
| examples25 = examples[9600:10000] | |
| examples26 = examples[10000:10400] | |
| examples27 = examples[10400:10800] | |
| examples28 = examples[10800:11200] | |
| examples29 = examples[11200:11600] | |
| examples30 = examples[11600:12000] | |
| examples31 = examples[12000:12400] | |
| examples32 = examples[12400:12800] | |
| examples33 = examples[12800:13200] | |
| examples34 = examples[13200:13600] | |
| examples35 = examples[13600:14000] | |
| folder_name = "Data" | |
| path = folder_name | |
| if not os.path.exists(path): | |
| os.makedirs(path) | |
| def speak1(text1, text2, path): | |
| warn_output = "" | |
| if (path is not None): | |
| warn_output = ( | |
| "WARNING: You've uploaded an audio file and used the microphone. " | |
| "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n" | |
| ) | |
| elif (path is None): | |
| return "ERROR: You have to either use the microphone or upload an audio file" | |
| X_new, sr_new = librosa.load(path) | |
| folder_name = "./Data1/" | |
| dst = folder_name + text1 + ".wav" | |
| write(dst, sr_new, X_new) | |
| return "Done" | |
| def speak2(text1, text2, path): | |
| warn_output = "" | |
| if (path is not None): | |
| warn_output = ( | |
| "WARNING: You've uploaded an audio file and used the microphone. " | |
| "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n" | |
| ) | |
| elif (path is None): | |
| return "ERROR: You have to either use the microphone or upload an audio file" | |
| X_new, sr_new = librosa.load(path) | |
| folder_name = "./Data2/" | |
| dst = folder_name + text1 + ".wav" | |
| write(dst, sr_new, X_new) | |
| return "Done" | |
| def speak3(text1, text2, path): | |
| warn_output = "" | |
| if (path is not None): | |
| warn_output = ( | |
| "WARNING: You've uploaded an audio file and used the microphone. " | |
| "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n" | |
| ) | |
| elif (path is None): | |
| return "ERROR: You have to either use the microphone or upload an audio file" | |
| X_new, sr_new = librosa.load(path) | |
| folder_name = "./Data3/" | |
| dst = folder_name + text1 + ".wav" | |
| write(dst, sr_new, X_new) | |
| return "Done" | |
| def speak4(text1, text2, path): | |
| warn_output = "" | |
| if (path is not None): | |
| warn_output = ( | |
| "WARNING: You've uploaded an audio file and used the microphone. " | |
| "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n" | |
| ) | |
| elif (path is None): | |
| return "ERROR: You have to either use the microphone or upload an audio file" | |
| X_new, sr_new = librosa.load(path) | |
| folder_name = "./Data/" | |
| dst = folder_name + text1 + ".wav" | |
| write(dst, sr_new, X_new) | |
| return "Done" | |
| def speak(text1, text2, path): | |
| warn_output = "" | |
| if (path is not None): | |
| warn_output = ( | |
| "WARNING: You've uploaded an audio file and used the microphone. " | |
| "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n" | |
| ) | |
| elif (path is None): | |
| return "ERROR: You have to either use the microphone or upload an audio file" | |
| X_new, sr_new = librosa.load(path) | |
| folder_name = "./Data/" | |
| dst = folder_name + text1 + ".wav" | |
| write(dst, sr_new, X_new) | |
| return "Done" | |
| def speak(text1, text2, path): | |
| warn_output = "" | |
| if (path is not None): | |
| warn_output = ( | |
| "WARNING: You've uploaded an audio file and used the microphone. " | |
| "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n" | |
| ) | |
| elif (path is None): | |
| return "ERROR: You have to either use the microphone or upload an audio file" | |
| X_new, sr_new = librosa.load(path) | |
| folder_name = "./Data/" | |
| dst = folder_name + text1 + ".wav" | |
| write(dst, sr_new, X_new) | |
| return "Done" | |
| def speak(text1, text2, path): | |
| warn_output = "" | |
| if (path is not None): | |
| warn_output = ( | |
| "WARNING: You've uploaded an audio file and used the microphone. " | |
| "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n" | |
| ) | |
| elif (path is None): | |
| return "ERROR: You have to either use the microphone or upload an audio file" | |
| X_new, sr_new = librosa.load(path) | |
| folder_name = "./Data/" | |
| dst = folder_name + text1 + ".wav" | |
| write(dst, sr_new, X_new) | |
| return "Done" | |
| def speak(text1, text2, path): | |
| warn_output = "" | |
| if (path is not None): | |
| warn_output = ( | |
| "WARNING: You've uploaded an audio file and used the microphone. " | |
| "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n" | |
| ) | |
| elif (path is None): | |
| return "ERROR: You have to either use the microphone or upload an audio file" | |
| X_new, sr_new = librosa.load(path) | |
| folder_name = "./Data/" | |
| dst = folder_name + text1 + ".wav" | |
| write(dst, sr_new, X_new) | |
| return "Done" | |
| title = "Data" | |
| p1 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples1, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p2 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples2, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p3 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples3, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p4 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples4, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p5 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples5, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p6 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples6, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p7 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples7, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p8 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples8, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p9 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples9, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p10 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples10, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p11 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples11, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p12 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples12, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p13 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples13, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p14 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples14, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p15 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples15, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p16 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples16, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p17 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples17, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p18 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples18, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p19 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples19, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p20 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples20, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p21 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples21, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p22 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples22, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p23 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples23, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p24 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples24, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p25 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples25, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p26 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples26, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p27 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples27, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p28 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples28, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p29 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples29, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p30 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples30, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p31 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples31, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p32 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples32, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p33 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples33, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p34 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples34, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| p35 = gr.Interface( | |
| fn=speak, | |
| inputs=[gr.inputs.Textbox(label="Number 1"), gr.inputs.Textbox(label="Number 2"), | |
| gr.inputs.Audio(source="microphone", type="filepath", optional=True)], | |
| outputs="text", | |
| title=title, | |
| examples=examples35, | |
| theme="default", | |
| allow_screenshot=False, | |
| allow_flagging="never", | |
| ) | |
| demo = gr.Blocks() | |
| with demo: | |
| gr.TabbedInterface([p1, p2, p3, p4, p5, p6, p7, | |
| p8, p9, p10, p11, p12, p13, p14, | |
| p15, p16, p17, p18, p19, p20, p21, | |
| p22, p23, p24, p25, p26, p27, p28, | |
| p29, p30, p31, p32, p33, p34, p35], | |
| ["Data 1", "Data 2", "Data 3", "Data 4", "Data 5", "Data 6", "Data 7", | |
| "Data 8", "Data 9", "Data 10", "Data 11", "Data 12", "Data 13", "Data 14", | |
| "Data 15", "Data 16", "Data 17", "Data 18", "Data 19", "Data 20", "Data 21", | |
| "Data 22", "Data 23", "Data 24", "Data 25", "Data 26", "Data 27", "Data 28", | |
| "Data 29", "Data 30", "Data 31", "Data 32", "Data 33", "Data 34", "Data 35"]) | |
| demo.launch(share=True) | |