from transformers import pipeline import gradio as gr trans = pipeline("automatic-speech-recognition", model ="facebook/wav2vec2-large-xlsr-53-spanish") traductor = pipeline("translation", model = "Helsinki-NLP/opus-mt-es-en") ner = pipeline("ner", model = "d4data/biomedical-ner-all") def audio2text(audio): text = trans(audio)["text"] return text def text2eng(text): return traductor(text)[0]["translation_text"] def eng2ner(text): output = ner(text) return {"text": text, "entities": output} demo = gr.Blocks() with demo: gr.Markdown("Demo sobre historia clínica en español a entidades en ingles") audio = gr.Audio(sources="microphone", type="filepath") b_text = gr.Button("Transcribir") texto = gr.Textbox() b_text.click(audio2text, inputs=audio, outputs=texto) b_trans = gr.Button("Traducir historia") transcripcion = gr.Textbox() b_trans.click(text2eng, inputs=texto, outputs=transcripcion) b_ner =gr.Button("Search entities") entidades = gr.HighlightedText() b_ner.click(eng2ner, inputs=transcripcion, outputs=entidades) demo.launch()