Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import librosa | |
| import torch | |
| from transformers import SpeechT5Processor, SpeechT5ForSpeechToText | |
| checkpoint = "microsoft/speecht5_asr" | |
| processor = SpeechT5Processor.from_pretrained(checkpoint) | |
| model = SpeechT5ForSpeechToText.from_pretrained(checkpoint) | |
| def process_audio(sampling_rate, waveform): | |
| # convert from int16 to floating point | |
| waveform = waveform / 32678.0 | |
| # convert to mono if stereo | |
| if len(waveform.shape) > 1: | |
| waveform = librosa.to_mono(waveform.T) | |
| # resample to 16 kHz if necessary | |
| if sampling_rate != 16000: | |
| waveform = librosa.resample(waveform, orig_sr=sampling_rate, target_sr=16000) | |
| # limit to 30 seconds | |
| waveform = waveform[:16000*30] | |
| # make PyTorch tensor | |
| waveform = torch.tensor(waveform) | |
| return waveform | |
| def predict(audio, mic_audio=None): | |
| # audio = tuple (sample_rate, frames) or (sample_rate, (frames, channels)) | |
| if mic_audio is not None: | |
| sampling_rate, waveform = mic_audio | |
| elif audio is not None: | |
| sampling_rate, waveform = audio | |
| else: | |
| return "(please provide audio)" | |
| waveform = process_audio(sampling_rate, waveform) | |
| inputs = processor(audio=waveform, sampling_rate=16000, return_tensors="pt") | |
| predicted_ids = model.generate(**inputs, max_length=400) | |
| transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) | |
| return transcription[0] | |
| title = " 😍🥰Prolove 🧑🎤 👨🎤 " | |
| description = """aplikasi prolove merupakan aplikasi untuk membantu ejaan kata yang diucapkan oleh user dalam bahasa inggris menjadi benar""" | |
| article = """ | |
| <div style='margin:20px auto;'> | |
| @article{Ao2021SpeechRecog, | |
| title = {PROLOVE}, | |
| author = {M_ALVI_ADNAN}, | |
| archivePrefix={arXiv}, | |
| primaryClass={eess.AS}, | |
| year={2021} | |
| } | |
| </pre> | |
| <p>Example sound credits:<p> | |
| <ul> | |
| <li>"i wanna tell u smth <a href="https://freesound.org/people/InspectorJ/sounds/519189/">InspectorJ</a> (CC BY 4.0 license) | |
| <li>"let me know <a href="https://freesound.org/people/acclivity/sounds/24096/">acclivity</a> (CC BY-NC 4.0 license) | |
| <li>"lets do it <a href="https://freesound.org/people/JoyOhJoy/sounds/165348/">JoyOhJoy</a> (CC0 license) | |
| <li>"listen to me <a href="https://freesound.org/people/Sample_Me/sounds/610529/">Sample_Me</a> (CC0 license) | |
| </ul> | |
| </div> | |
| """ | |
| examples = [ | |
| ["examples/I wanna tell you something_alvi.wav", None], | |
| ["examples/Let me know_fazrin.wav", None], | |
| ["examples/Lets do it_arka.wav", None], | |
| ["examples/Listen to me_shifa.wav", None], | |
| ] | |
| gr.Interface( | |
| fn=predict, | |
| inputs=[ | |
| gr.Audio(label="Upload Speech", source="upload", type="numpy"), | |
| gr.Audio(label="Record Speech", source="microphone", type="numpy"), | |
| ], | |
| outputs=[ | |
| gr.Text(label="Transcription"), | |
| ], | |
| title=title, | |
| description=description, | |
| article=article, | |
| examples=examples, | |
| ).launch() |