import gradio as gr import librosa from transformers import AutoFeatureExtractor, pipeline def load_and_fix_data(input_file, model_sampling_rate): speech, sample_rate = librosa.load(input_file) if len(speech.shape) > 1: speech = speech[:, 0] + speech[:, 1] if sample_rate != model_sampling_rate: speech = librosa.resample(speech, sample_rate, model_sampling_rate) return speech feature_extractor = AutoFeatureExtractor.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-spanish") sampling_rate = feature_extractor.sampling_rate asr = pipeline("automatic-speech-recognition", model="jonatasgrosman/wav2vec2-large-xlsr-53-spanish") def predict_and_ctc_lm_decode(input_file): speech = load_and_fix_data(input_file, sampling_rate) transcribed_text = asr(speech, chunk_length_s=5, stride_length_s=1)["text"] pipe2 = pipeline("text-classification", model = "hackathon-pln-es/twitter_sexismo-finetuned-robertuito-exist2021") sexism_detection = pipe2(transcribed_text)[0]['label'] return sexism_detection gr.Interface( predict_and_ctc_lm_decode, inputs=[gr.inputs.Audio(source="microphone", type="filepath", label="Record your audio")], #outputs=[gr.outputs.Label(num_top_classes=2),gr.outputs.Label(num_top_classes=2), gr.outputs.Label(num_top_classes=2)], outputs=[gr.outputs.Textbox(label="Predicción")], examples=[["audio1.wav"], ["audio2.wav"], ["audio3.wav"], ["audio4.wav"], ["sample_audio.wav"]], title="Spanish-Audio-Transcription-based-Sexism-Detection", description="This is a Gradio demo for Sentiment Analysis of Transcribed Spanish Audio", layout="horizontal", theme="huggingface", ).launch(enable_queue=True, cache_examples=True)