import gradio as gr from transformers import pipeline transcripcion = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-large-xlsr-53-spanish") clasificador = pipeline("text-classification", model="pysentimiento/robertuito-sentiment-analysis") def audio_a_texto(audio): texto = transcripcion(audio)["text"] return texto, texto def texto_a_sentimiento(texto): sentimiento = clasificador(texto)[0]["label"] return sentimiento demo = gr.Blocks() with demo: gr.Markdown("## Transcribe audio2text and sentimental classification - Spanish") with gr.Tabs(): with gr.TabItem("Transcribe"): with gr.Row(): audio_input = gr.Audio(sources=["microphone"], type="filepath") texto_output = gr.Textbox(label="Audio to text") with gr.Row(): b1 = gr.Button("Transcribe 🎙️✍🏻") with gr.TabItem("Sentimental Classification"): with gr.Row(): texto_input = gr.Textbox(label="Text to sentimental") sentimiento_output = gr.Label() with gr.Row(): b2 = gr.Button("Sentimental Classification 🤖") # Keep the 2 text box with the same text. b1.click(audio_a_texto, inputs=audio_input, outputs=[texto_output,texto_input]) b2.click(texto_a_sentimiento, inputs=texto_input, outputs=sentimiento_output) demo.launch()