import torchaudio from transformers import pipeline # Load the Hugging Face ASR model asr_pipeline = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") def inference(audio, sentiment_option): # Load and preprocess audio audio_tensor, _ = torchaudio.load(audio) # Perform ASR transcription = asr_pipeline(audio_tensor.numpy()[0], sampling_rate=audio_tensor.sampling_rate) # Perform sentiment analysis sentiment_results = analyze_sentiment(transcription[0]['sentence']) sentiment_output = display_sentiment_results(sentiment_results, sentiment_option) return "N/A", transcription[0]['sentence'], sentiment_output