VoiceRestore / app.py
jadechoghari's picture
Create app.py
dc7123b verified
raw
history blame
1.8 kB
import torch
import gradio as gr
import torchaudio
from transformers import AutoModel
import spaces
checkpoint_path = "/content/VoiceRestore"
model = AutoModel.from_pretrained(checkpoint_path, trust_remote_code=True)
@spaces.GPU()
def restore_audio(input_audio):
# load the audio file
output_path = "restored_output.wav"
model(input_audio, output_path)
return output_path
with gr.Blocks() as demo:
gr.Markdown("<h1 style='text-align: center;'>๐Ÿ”Š Voice Restoration with Transformer-based Model</h1>")
gr.Markdown(
"""
<p style='text-align: center;'>Upload a degraded audio file or select an example, and the space will restore it using the <b>VoiceRestore</b> model!<br>
Based on this <a href='https://github.com/skirdey/voicerestore' target='_blank'>repo</a> by @StanKirdey,<br>
and the HF Transformers model by <a href='https://github.com/jadechoghari' target='_blank'>@jadechoghari</a>.
</p>
"""
)
with gr.Row():
with gr.Column():
gr.Markdown("### ๐ŸŽง Select an Example or Upload Your Audio:")
input_audio = gr.Audio(label="Upload Degraded Audio", type="filepath")
gr.Examples(
examples=["example1.wav", "example2.wav", "example3.wav"],
inputs=input_audio,
label="Sample Degraded Audios"
)
with gr.Column():
gr.Markdown("### ๐ŸŽถ Restored Audio Output:")
output_audio = gr.Audio(label="Restored Audio", type="filepath")
with gr.Row():
restore_btn = gr.Button("โœจ Restore Audio")
# Connect the button to the function
restore_btn.click(restore_audio, inputs=input_audio, outputs=output_audio)
# Launch the demo
demo.launch(debug=True)