Spaces:
Runtime error
Runtime error
import gradio as gr | |
import time | |
import torch | |
import scipy.io.wavfile | |
from espnet2.bin.tts_inference import Text2Speech | |
from espnet2.utils.types import str_or_none | |
from espnet2.bin.asr_inference import Speech2Text | |
# tagen = 'kan-bayashi/ljspeech_vits' | |
# vocoder_tagen = "none" | |
speech2text = Speech2Text.from_pretrained( | |
asr_train_config="slurp/config.yaml", | |
asr_model_file="slurp/valid.acc.ave_10best.pth", | |
# Decoding parameters are not included in the model file | |
nbest=1 | |
) | |
# Confirm the sampling rate is equal to that of the training corpus. | |
# If not, you need to resample the audio data before inputting to speech2text | |
# speech, rate = soundfile.read("audio--1504190171-headset.flac") | |
# nbests = speech2text(speech) | |
# text, *_ = nbests[0] | |
# print(text) | |
# exit() | |
# text2speechen = Text2Speech.from_pretrained( | |
# model_tag=str_or_none(tagen), | |
# vocoder_tag=str_or_none(vocoder_tagen), | |
# device="cpu", | |
# # Only for Tacotron 2 & Transformer | |
# threshold=0.5, | |
# # Only for Tacotron 2 | |
# minlenratio=0.0, | |
# maxlenratio=10.0, | |
# use_att_constraint=False, | |
# backward_window=1, | |
# forward_window=3, | |
# # Only for FastSpeech & FastSpeech2 & VITS | |
# speed_control_alpha=1.0, | |
# # Only for VITS | |
# noise_scale=0.333, | |
# noise_scale_dur=0.333, | |
# ) | |
# tagjp = 'kan-bayashi/jsut_full_band_vits_prosody' | |
# vocoder_tagjp = 'none' | |
# text2speechjp = Text2Speech.from_pretrained( | |
# model_tag=str_or_none(tagjp), | |
# vocoder_tag=str_or_none(vocoder_tagjp), | |
# device="cpu", | |
# # Only for Tacotron 2 & Transformer | |
# threshold=0.5, | |
# # Only for Tacotron 2 | |
# minlenratio=0.0, | |
# maxlenratio=10.0, | |
# use_att_constraint=False, | |
# backward_window=1, | |
# forward_window=3, | |
# # Only for FastSpeech & FastSpeech2 & VITS | |
# speed_control_alpha=1.0, | |
# # Only for VITS | |
# noise_scale=0.333, | |
# noise_scale_dur=0.333, | |
# ) | |
# tagch = 'kan-bayashi/csmsc_full_band_vits' | |
# vocoder_tagch = "none" | |
# text2speechch = Text2Speech.from_pretrained( | |
# model_tag=str_or_none(tagch), | |
# vocoder_tag=str_or_none(vocoder_tagch), | |
# device="cpu", | |
# # Only for Tacotron 2 & Transformer | |
# threshold=0.5, | |
# # Only for Tacotron 2 | |
# minlenratio=0.0, | |
# maxlenratio=10.0, | |
# use_att_constraint=False, | |
# backward_window=1, | |
# forward_window=3, | |
# # Only for FastSpeech & FastSpeech2 & VITS | |
# speed_control_alpha=1.0, | |
# # Only for VITS | |
# noise_scale=0.333, | |
# noise_scale_dur=0.333, | |
# ) | |
def inference(wav,lang): | |
with torch.no_grad(): | |
if lang == "english": | |
speech, rate = soundfile.read("audio--1504190171-headset.flac") | |
nbests = speech2text(speech) | |
text, *_ = nbests[0] | |
# if lang == "chinese": | |
# wav = text2speechch(text)["wav"] | |
# scipy.io.wavfile.write("out.wav",text2speechch.fs , wav.view(-1).cpu().numpy()) | |
# if lang == "japanese": | |
# wav = text2speechjp(text)["wav"] | |
# scipy.io.wavfile.write("out.wav",text2speechjp.fs , wav.view(-1).cpu().numpy()) | |
return text | |
title = "ESPnet2-SLU" | |
description = "Gradio demo for ESPnet2-SLU: Extending the Edge of SLU Research. To use it, simply record your audio. Read more at the links below." | |
article = "<p style='text-align: center'><a href='https://github.com/espnet/espnet' target='_blank'>Github Repo</a></p>" | |
examples=[['audio-_slurp.flac',"english"]] | |
# gr.inputs.Textbox(label="input text",lines=10),gr.inputs.Radio(choices=["english"], type="value", default="english", label="language") | |
gr.Interface( | |
inference, | |
[gr.inputs.Audio(label="input audio"),gr.inputs.Radio(choices=["english"], type="value", default="english", label="language")], | |
gr.outputs.Textbox(type="str", label="Output"), | |
title=title, | |
description=description, | |
article=article, | |
enable_queue=True, | |
examples=examples | |
).launch(debug=True) | |