Tonic's picture
Update app.py
a848fc8
raw
history blame
2.09 kB
import gradio as gr
import torchaudio
import torch
# Define the list of target languages
languages = {
"English": "eng",
"Hindi": "hin",
"Portuguese": "por",
"Russian": "rus",
"Spanish": "spa"
}
def speech_to_text(audio_data, tgt_lang):
audio_input, _ = torchaudio.load(audio_data)
s2t_model = torch.jit.load("unity_on_device_s2t.ptl")
with torch.no_grad():
text = s2t_model(audio_input, tgt_lang=languages[tgt_lang])
return text
def speech_to_speech_translation(audio_data, tgt_lang):
audio_input, _ = torchaudio.load(audio_data)
s2st_model = torch.jit.load("unity_on_device.ptl")
with torch.no_grad():
text, units, waveform = s2st_model(audio_input, tgt_lang=languages[tgt_lang])
output_file = "/tmp/result.wav"
torchaudio.save(output_file, waveform.unsqueeze(0), sample_rate=16000)
return text, output_file
def create_interface():
with gr.Blocks(theme='ParityError/Anime') as interface:
# Dropdown for language selection
input_language = gr.Dropdown(list(languages.keys()), label="Select Target Language", value="English")
with gr.Accordion("Speech to Text", open=False) as stt_accordion:
audio_input_stt = gr.Audio(label="Upload or Record Audio")
text_output_stt = gr.Text(label="Transcribed Text")
stt_button = gr.Button("Transcribe")
stt_button.click(speech_to_text, inputs=[audio_input_stt, input_language], outputs=text_output_stt)
with gr.Accordion("Speech to Speech Translation", open=False) as s2st_accordion:
audio_input_s2st = gr.Audio(label="Upload or Record Audio")
text_output_s2st = gr.Text(label="Translated Text")
audio_output_s2st = gr.Audio(label="Translated Audio", type="filepath")
s2st_button = gr.Button("Translate")
s2st_button.click(speech_to_speech_translation, inputs=[audio_input_s2st, input_language], outputs=[text_output_s2st, audio_output_s2st])
return interface
app = create_interface()
app.launch(show_error=True, debug=True)