|
import gradio as gr |
|
import librosa |
|
from transformers import AutoFeatureExtractor, pipeline |
|
|
|
|
|
def load_and_fix_data(input_file, model_sampling_rate): |
|
speech, sample_rate = librosa.load(input_file) |
|
if len(speech.shape) > 1: |
|
speech = speech[:, 0] + speech[:, 1] |
|
if sample_rate != model_sampling_rate: |
|
speech = librosa.resample(speech, sample_rate, model_sampling_rate) |
|
return speech |
|
|
|
|
|
feature_extractor = AutoFeatureExtractor.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-spanish") |
|
sampling_rate = feature_extractor.sampling_rate |
|
|
|
asr = pipeline("automatic-speech-recognition", model="jonatasgrosman/wav2vec2-large-xlsr-53-spanish") |
|
|
|
|
|
def predict_and_ctc_lm_decode(input_file): |
|
speech = load_and_fix_data(input_file, sampling_rate) |
|
transcribed_text = asr(speech, chunk_length_s=5, stride_length_s=1)["text"] |
|
pipe1 = pipeline("sentiment-analysis", model = "finiteautomata/beto-sentiment-analysis") |
|
sentiment = pipe1(transcribed_text) |
|
sentiment={dic["label"]: dic["score"] for dic in sentiment} |
|
pipe2 = pipeline("text-classification", model = "hackathon-pln-es/twitter_sexismo-finetuned-robertuito-exist2021") |
|
sexism_detection = pipe2(transcribed_text) |
|
sexism_detection={dic["label"]: dic["score"] for dic in sexism_detection} |
|
|
|
pipe3 = pipeline("text-classification", model = "hackathon-pln-es/twitter_sexismo-finetuned-robertuito-exist2021") |
|
harassment_detection = pipe3(transcribed_text) |
|
harassment_detection={dic["label"]: dic["score"] for dic in harassment_detection} |
|
|
|
return sentiment |
|
|
|
|
|
gr.Interface( |
|
predict_and_ctc_lm_decode, |
|
inputs=[ |
|
gr.inputs.Audio(source="microphone", type="filepath", label="Record your audio") |
|
], |
|
|
|
outputs=[gr.outputs.Label(num_top_classes=2)], |
|
examples=[["respiracion_happiness.wav"]], |
|
title="Sentiment Analysis of Spanish Transcribed Audio", |
|
description="This is a Gradio demo for Sentiment Analysis of Transcribed Spanish Audio", |
|
layout="horizontal", |
|
theme="huggingface", |
|
).launch(enable_queue=True, cache_examples=True) |
|
|