#!/usr/bin/env python3 # # Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang) # # See LICENSE for clarification regarding multiple authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # References: # https://gradio.app/docs/#dropdown import os import time from datetime import datetime import gradio as gr import torchaudio from model import get_pretrained_model, language_to_models, sample_rate languages = sorted(language_to_models.keys()) def convert_to_wav(in_filename: str) -> str: """Convert the input audio file to a wave file""" out_filename = in_filename + ".wav" print(f"Converting '{in_filename}' to '{out_filename}'") _ = os.system(f"ffmpeg -hide_banner -i '{in_filename}' '{out_filename}'") return out_filename def process( in_filename: str, language: str, repo_id: str, decoding_method: str, num_active_paths: int, ) -> str: print("in_filename", in_filename) print("language", language) print("repo_id", repo_id) print("decoding_method", decoding_method) print("num_active_paths", num_active_paths) filename = convert_to_wav(in_filename) now = datetime.now() date_time = now.strftime("%Y-%m-%d %H:%M:%S.%f") print(f"Started at {date_time}") start = time.time() wave, wave_sample_rate = torchaudio.load(filename) if wave_sample_rate != sample_rate: print( f"Expected sample rate: {sample_rate}. Given: {wave_sample_rate}. " f"Resampling to {sample_rate}." ) wave = torchaudio.functional.resample( wave, orig_freq=wave_sample_rate, new_freq=sample_rate, ) wave = wave[0] # use only the first channel. hyp = get_pretrained_model(repo_id).decode_waves( [wave], decoding_method=decoding_method, num_active_paths=num_active_paths, )[0] date_time = now.strftime("%Y-%m-%d %H:%M:%S.%f") end = time.time() duration = wave.shape[0] / sample_rate rtf = (end - start) / duration print(f"Finished at {date_time} s. Elapsed: {end - start: .3f} s") print(f"Duration {duration: .3f} s") print(f"RTF {rtf: .3f}") print("hyp") print(hyp) html_output = f"""
{hyp}
""" return html_output title = "# Automatic Speech Recognition with Next-gen Kaldi" description = """ This space shows how to do automatic speech recognition with Next-gen Kaldi. See more information by visiting the following links: - - - - """ def update_model_dropdown(language: str): if language in language_to_models: choices = language_to_models[language] return gr.Dropdown.update(choices=choices, value=choices[0]) raise ValueError(f"Unsupported language: {language}") # The css style is copied from # https://huggingface.co/spaces/alphacep/asr/blob/main/app.py#L112 demo = gr.Blocks( css=""" .result {display:flex;flex-direction:column} .result_item {padding:15px;margin-bottom:8px;border-radius:15px;width:100%} .result_item_success {background-color:mediumaquamarine;color:white;align-self:start} .result_item_error {background-color:#ff7070;color:white;align-self:start} """, ) with demo: gr.Markdown(title) language_choices = list(language_to_models.keys()) language_radio = gr.Radio( label="Language", choices=language_choices, value=language_choices[0], ) model_dropdown = gr.Dropdown( choices=language_to_models[language_choices[0]], label="Select a model", value=language_to_models[language_choices[0]][0], ) language_radio.change( update_model_dropdown, inputs=language_radio, outputs=model_dropdown, ) decoding_method_radio = gr.Radio( label="Decoding method", choices=["greedy_search", "modified_beam_search"], value="greedy_search", ) num_active_paths_slider = gr.Slider( minimum=1, value=4, step=1, label="Number of active paths for modified_beam_search", ) with gr.Tabs(): with gr.TabItem("Upload from disk"): uploaded_file = gr.Audio( source="upload", # Choose between "microphone", "upload" type="filepath", optional=False, label="Upload from disk", ) uploaded_output = gr.HTML(label="Recognized speech from uploaded file") upload_button = gr.Button("Submit for recognition") with gr.TabItem("Record from microphone"): microphone = gr.Audio( source="microphone", # Choose between "microphone", "upload" type="filepath", optional=False, label="Record from microphone", ) record_button = gr.Button("Submit for recognition") recorded_output = gr.HTML(label="Recognized speech from recordings") upload_button.click( process, inputs=[ uploaded_file, language_radio, model_dropdown, decoding_method_radio, num_active_paths_slider, ], outputs=uploaded_output, ) record_button.click( process, inputs=[ microphone, language_radio, model_dropdown, decoding_method_radio, num_active_paths_slider, ], outputs=recorded_output, ) gr.Markdown(description) if __name__ == "__main__": demo.launch()