import os
from app.common.custom import CustomResponse
import torchaudio
import numpy as np
from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2ForSequenceClassification
import torch


class StutteringRecognition:
    def __init__(self, model_path,threshold):
        self.model = Wav2Vec2ForSequenceClassification.from_pretrained(model_path)
        self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(model_path)
        self.model.eval()
        self.threshold = threshold
    def predict_stuttering(self, audio, sample_rate):
        # Load and preprocess audio
        # audio, sample_rate = load_audio(audio_file_path)
        
        # Extract features
        inputs = self.feature_extractor(audio, sampling_rate=sample_rate, return_tensors="pt", padding=True)
        
        # Run the model on the input and get logits
        with torch.no_grad():
            outputs = self.model(**inputs)
            logits = outputs.logits

        # Print raw logits for debugging
        print("Raw Logits:", logits)
        
        # Apply sigmoid to convert logits to probabilities for multi-label classification
        probabilities = torch.sigmoid(logits).squeeze(0).tolist()
        
        # If probabilities is nested, flatten it
        if isinstance(probabilities[0], list):
            probabilities = [item for sublist in probabilities for item in sublist]
        
        # Define the stuttering types based on the labels in your dataset
        stuttering_types = ["Prolongation", "Block", "SoundRep", "WordRep", "NaturalPause", "NoStutteredWords", "Interjection"]
        
        # Classify each stuttering type based on the threshold
        results = {}
        print("Stuttering Type Classification:")
        for stuttering_type, prob in zip(stuttering_types, probabilities):
            # status = "Detected" if prob >= self.threshold else "Not Detected"
            results[stuttering_type] = prob
            # print(f"{stuttering_type}: {status} (Probability: {prob:.4f})")
        return results