import gradio as gr from transformers import pipeline p = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline import torch # this model was loaded from https://hf.co/models model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-600M") tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-distilled-600M") device = 0 if torch.cuda.is_available() else -1 LANGS = ["ace_Arab", "eng_Latn", "fra_Latn", "spa_Latn", "yue_Hant","zho_Hans","zho_Hant"] LANGS_source = ["eng_Latn"] # Yue Chinese - yue_Hant, Chinese (Simplified)-Zho_Hans, Chinese(Traditional)-zho_Hant # https://github.com/facebookresearch/flores/tree/main/flores200#languages-in-flores-200 def translate(text, src_lang, tgt_lang): """ Translate the text from source lang to target lang """ translation_pipeline = pipeline("translation", model=model, tokenizer=tokenizer, src_lang=src_lang, tgt_lang=tgt_lang, max_length=400, device=device) result = translation_pipeline(text) return result[0]['translation_text'] def transcribe(audio): text = p(audio)["text"] text=translate(text,"eng_Latn","zho_Hans") return text gr.Interface( fn=transcribe, inputs=gr.Audio(source="microphone", type="filepath"), outputs="text").launch()