import gradio as gr import os import shlex import gdown import uuid import torch cpu_param = "--cpu" if not torch.cuda.is_available() else "" if (not os.path.exists("synpretrained.pt")): gdown.download("https://drive.google.com/u/0/uc?id=1EqFMIbvxffxtjiVrtykroF6_mUh-5Z3s&export=download&confirm=t", "synpretrained.pt", quiet=False) gdown.download("https://drive.google.com/uc?export=download&id=1q8mEGwCkFy23KZsinbuvdKAQLqNKbYf1", "encpretrained.pt", quiet=False) gdown.download("https://drive.google.com/uc?export=download&id=1cf2NO6FtI0jDuy8AV3Xgn6leO6dHjIgu", "vocpretrained.pt", quiet=False) def inference(audio_path, text, mic_path=None): if mic_path: audio_path = mic_path output_path = f"/tmp/output_{uuid.uuid4()}.wav" os.system( f"python demo_cli.py --no_sound {cpu_param} --audio_path {audio_path} --text {shlex.quote(text.strip())} --output_path {output_path}") return output_path title = "Real-Time-Voice-Cloning" description = "Gradio demo for Real-Time-Voice-Cloning: Clone a voice in 5 seconds to generate arbitrary speech in real-time. To use it, simply upload your audio, or click one of the examples to load them. Read more at the links below." article = "

Real-Time Voice Cloning | Github Repo

" examples = [['test.wav', "This is real time voice cloning on huggingface spaces"]] def toggle(choice): if choice == "mic": return gr.update(visible=True, value=None), gr.update(visible=False, value=None) else: return gr.update(visible=False, value=None), gr.update(visible=True, value=None) with gr.Blocks() as demo: with gr.Row(): with gr.Column(): radio = gr.Radio(["mic", "file"], value="mic", label="How would you like to upload your audio?") mic_input = gr.Mic(label="Input", type="filepath", visible=False) audio_file = gr.Audio( type="filepath", label="Input", visible=True) text_input = gr.Textbox(label="Text") with gr.Column(): audio_output = gr.Audio(label="Output") gr.Examples(examples, fn=inference, inputs=[audio_file, text_input], outputs=audio_output, cache_examples=True) btn = gr.Button("Generate") btn.click(inference, inputs=[audio_file, text_input, mic_input], outputs=audio_output) radio.change(toggle, radio, [mic_input, audio_file]) demo.launch(enable_queue=True)