|
import gradio as gr |
|
from transformers import pipeline |
|
|
|
trans = pipeline("automatic-speech-recognition", model = "facebook/wav2vec2-large-xlsr-53-spanish") |
|
clasificador = pipeline("text-classification", model = "pysentimiento/robertuito-sentiment-analysis") |
|
|
|
def audio_to_text(audio): |
|
text = trans(audio)["text"] |
|
return text |
|
|
|
def text_to_feel(text): |
|
return clasificador(text)[0]["label"] |
|
|
|
demo = gr.Blocks() |
|
with demo: |
|
gr.Markdown("Demo de blocks") |
|
with gr.Tabs(): |
|
with gr.TabItem("trans"): |
|
with gr.Row(): |
|
audio = gr.Audio(source="microphone", type="filepath") |
|
transcription = gr.Textbox() |
|
b1 = gr.Button("Transcribe pf") |
|
with gr.TabItem("sent"): |
|
with gr.Row(): |
|
texto = gr.Textbox() |
|
label = gr.Label() |
|
b2 = gr.Button("Sent porfa") |
|
|
|
b1.click(audio_to_text, inputs=audio, outputs=transcription) |
|
b2.click(text_to_feel, inputs=texto, outputs=label) |
|
|
|
demo.launch() |