import gradio as gr
from transformers import pipeline
def classify_sentiment(audio, model):
pipe = pipeline("audio-classification", model=model)
pred = pipe(audio)
return {dic["label"]: dic["score"] for dic in pred}
input_audio = [gr.inputs.Audio(source="microphone", type="filepath", label="Record/ Drop audio"), gr.inputs.Dropdown(["hackathon-pln-es/wav2vec2-base-finetuned-sentiment-classification-MESD", "hackathon-pln-es/wav2vec2-base-finetuned-sentiment-mesd"], label="Model Name")]
label = gr.outputs.Label(num_top_classes=5)
################### Gradio Web APP ################################
title = "Audio Sentiment Classifier"
description = """
This application classifies the sentiment of the audio input provided by the user.
#
#
#
#
#
"""
gr.Interface(
fn = classify_sentiment,
inputs = input_audio,
outputs = label,
examples=[["Examples/basta_neutral.wav", "hackathon-pln-es/wav2vec2-base-finetuned-sentiment-classification-MESD"], ["Examples/detras_disgust.wav", "hackathon-pln-es/wav2vec2-base-finetuned-sentiment-classification-MESD"], ["Examples/mortal_sadness.wav", "hackathon-pln-es/wav2vec2-base-finetuned-sentiment-classification-MESD"], ["Examples/respiracion_happiness.wav", "hackathon-pln-es/wav2vec2-base-finetuned-sentiment-classification-MESD"], ["Examples/robo_fear.wav", "hackathon-pln-es/wav2vec2-base-finetuned-sentiment-classification-MESD"]],
theme="grass").launch()