import torch import gradio as gr import torchaudio from transformers import AutoModel import spaces checkpoint_path = "/content/VoiceRestore" model = AutoModel.from_pretrained(checkpoint_path, trust_remote_code=True) @spaces.GPU() def restore_audio(input_audio): # load the audio file output_path = "restored_output.wav" model(input_audio, output_path) return output_path with gr.Blocks() as demo: gr.Markdown("

🔊 Voice Restoration with Transformer-based Model

") gr.Markdown( """

Upload a degraded audio file or select an example, and the space will restore it using the VoiceRestore model!
Based on this repo by @StanKirdey,
and the HF Transformers model by @jadechoghari.

""" ) with gr.Row(): with gr.Column(): gr.Markdown("### 🎧 Select an Example or Upload Your Audio:") input_audio = gr.Audio(label="Upload Degraded Audio", type="filepath") gr.Examples( examples=["example1.wav", "example2.wav", "example3.wav"], inputs=input_audio, label="Sample Degraded Audios" ) with gr.Column(): gr.Markdown("### 🎶 Restored Audio Output:") output_audio = gr.Audio(label="Restored Audio", type="filepath") with gr.Row(): restore_btn = gr.Button("✨ Restore Audio") # Connect the button to the function restore_btn.click(restore_audio, inputs=input_audio, outputs=output_audio) # Launch the demo demo.launch(debug=True)