import os

os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
try:
    import torch
    import torch_npu

    if torch.npu.is_available() :
        device = "npu:0"
    elif torch.cuda.is_available() :
        device = "cuda"
    else:
        device = 'cpu'
    device = 'npu' if torch.npu.is_available() else 'cpu'
except Exception as e :
    device = 'cpu'

try:
    import soundfile as sf
except ImportError:
    # 如果没有 soundfile，使用 scipy 作为备选
    from scipy.io import wavfile
    sf = None

import numpy as np
import torch.nn as nn
from transformers import Wav2Vec2Processor
from transformers.models.wav2vec2.modeling_wav2vec2 import (
    Wav2Vec2Model,
    Wav2Vec2PreTrainedModel,
)

class RegressionHead(nn.Module):
    r"""Classification head."""

    def __init__(self, config):

        super().__init__()

        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.dropout = nn.Dropout(config.final_dropout)
        self.out_proj = nn.Linear(config.hidden_size, config.num_labels)

    def forward(self, features, **kwargs):

        x = features
        x = self.dropout(x)
        x = self.dense(x)
        x = torch.tanh(x)
        x = self.dropout(x)
        x = self.out_proj(x)

        return x


class EmotionModel(Wav2Vec2PreTrainedModel):
    r"""Speech emotion classifier."""

    def __init__(self, config):

        super().__init__(config)

        self.config = config
        self.wav2vec2 = Wav2Vec2Model(config)
        self.classifier = RegressionHead(config)
        self.init_weights()

    def forward(
            self,
            input_values,
    ):

        outputs = self.wav2vec2(input_values)
        hidden_states = outputs[0]
        hidden_states = torch.mean(hidden_states, dim=1)
        logits = self.classifier(hidden_states)

        return hidden_states, logits

class wavvecModel():
    _instance = None
    _initialized = False

    def __new__(cls):
        """
        单例模式：确保只创建一个实例
        """
        if cls._instance is None:
            cls._instance = super(wavvecModel, cls).__new__(cls)
        return cls._instance

    def __init__(self):
        # 避免重复初始化
        if wavvecModel._initialized:
            return

        model_name = os.getenv('WAV2VEC2_MODEL_PATH')
        processor = Wav2Vec2Processor.from_pretrained(model_name)
        model = EmotionModel.from_pretrained(model_name).to(device)

        self._model = model
        self._processor = processor
        print("######## self._instance ", type(self._instance))
        wavvecModel._initialized = True

async def load_wav_to_numpy(wav_path: str, target_sr: int = None):
    r"""
    将 WAV 文件加载为 numpy 数组。

    Args:
        wav_path: WAV 文件路径
        target_sr: 目标采样率，如果为 None 则保持原始采样率

    Returns:
        (audio_data, sampling_rate): 音频数据（numpy 数组）和采样率
    """
    if sf is not None:
        # 使用 soundfile（推荐）
        audio_data, sr = sf.read(wav_path, dtype='float32')
        # soundfile 返回的是 1D 数组（单声道）或 2D 数组（立体声）
        # 如果是立体声，转换为单声道（取平均值）
        if len(audio_data.shape) > 1:
            audio_data = np.mean(audio_data, axis=1)
    else:
        # 使用 scipy.io.wavfile（备选方案）
        sr, audio_data = wavfile.read(wav_path)
        # 转换为 float32 并归一化到 [-1, 1]
        if audio_data.dtype == np.int16:
            audio_data = audio_data.astype(np.float32) / 32768.0
        elif audio_data.dtype == np.int32:
            audio_data = audio_data.astype(np.float32) / 2147483648.0
        elif audio_data.dtype == np.uint8:
            audio_data = (audio_data.astype(np.float32) - 128.0) / 128.0
        else:
            audio_data = audio_data.astype(np.float32)

    # 如果需要重采样到目标采样率
    if target_sr is not None and sr != target_sr:
        from scipy import signal as scipy_signal
        num_samples = int(len(audio_data) * target_sr / sr)
        audio_data = scipy_signal.resample(audio_data, num_samples)
        sr = target_sr

    return audio_data, sr

wavvecModel()

async def process_func(
    x: np.ndarray,
    sampling_rate: int,
    embeddings: bool = False,
) -> np.ndarray:
    '''
    @return vector：语音向量
    @return 类别信息：

    Arousal（唤醒度）：指情感引发的生理激活水平，即情绪的强烈程度。
    高唤醒度：兴奋、紧张、愤怒、狂喜等。
    低唤醒度：平静、放松、困倦、无聊等。
    Dominance（优势度 / 控制感）：指个体在情感体验中感受到的控制感或影响力。
    高优势度：自信、掌控、主导、坚定等。
    低优势度：无助、顺从、被动、焦虑等。
    Valence（效价）：指情感的积极或消极属性，即情绪的 “好坏” 倾向。
    正效价：愉悦、快乐、满足、喜爱等。
    负效价：悲伤、愤怒、恐惧、厌恶等。

    '''
    model = wavvecModel()
    vector = model._processor(x, sampling_rate=sampling_rate)
    y = vector['input_values'][0]
    y = y.reshape(1, -1)
    y = torch.from_numpy(y).to(device)

    with torch.no_grad():
        y = model._model(y)[0 if embeddings else 1]

    y = y.detach().cpu().numpy()
    y = y.tolist()

    classmap = {
        'Arousal': y[0][0] ,
        'Dominance': y[0][1],
        'Valence': y[0][2],
    }
    return vector['input_values'][0].tolist(), classmap

async def wav2vecService(wav_path:str , target_sr: int):
    signal, sampling_rate = await load_wav_to_numpy(wav_path, target_sr)
    vector , classmap = await process_func(signal, sampling_rate)
    return vector , classmap