import os
import audonnx
import torchaudio
import numpy as np
from pathlib import Path
from collections import deque
from src.utils.other_to_wav import AudioConverter

# 获取项目根目录的绝对路径
def get_project_root():
    """获取项目根目录路径，兼容不同操作系统和部署环境"""
    current_file = Path(__file__).resolve()
    # 从当前文件向上查找到包含model目录的根目录
    project_root = current_file.parent.parent.parent  # src/service -> src -> project_root
    return project_root

# 获取模型路径的统一函数
def get_model_path(model_name):
    """获取模型路径，自动适配不同操作系统"""
    project_root = get_project_root()
    model_path = project_root / "model" / model_name
    return str(model_path)

class RobustAudioEmotionAnalyzer:
    """
    增强版音频情感分析系统（满足最新需求）
    功能流程：
    1. VAD预处理 - 过滤静音/背景噪声
    2. 滑动窗口分块处理
    3. 中位数滤波+均值聚合
    4. 异常值鲁棒处理
    5. 输出每个窗口中位数的情感标签序列（以双斜杠分隔）
    """

    def __init__(self, model_path, window_size=3, chunk_size=1.8):
        """
        :param vad_aggressiveness: 1-3级VAD灵敏度
        :param window_size: 滑动窗口大小（建议奇数）
        :param chunk_size: 分析块长度（秒）
        """
        # 初始化模型和工具
        self.model = audonnx.load(model_path)
        self.converter = AudioConverter()
        self.temp_wav_path = None  # 新增：记录临时文件路径

        # 分析参数配置
        self.window_size = window_size
        self.chunk_size = chunk_size
        self.sample_rate = 16000
        self.frame_duration = 0.03  # 30ms VAD帧

        # 结果缓存
        self.vad_windows = {
            'valence': deque(maxlen=window_size),
            'arousal': deque(maxlen=window_size),
            'dominance': deque(maxlen=window_size)
        }
        self.median_results = []
        # 新增：存储每个窗口中位数的情感标签
        self.emotion_sequence = []
        self.emotion_labels = []  # 存储原始情感标签字符串

    # 分析音频块
    def _analyze_audio_chunk(self, chunk):
        """确保输入为float32格式"""
        if chunk.dtype != np.float32:
            chunk = chunk.astype(np.float32)
        outputs = self.model(chunk, self.sample_rate)
        logits = outputs['logits'][0]

        print(f"Logits 原始值: {logits}")

        return {
            'arousal': logits[0],
            'valence': logits[1],
            'dominance': logits[2]
        }

    # 更新滑动窗口
    def _update_sliding_window(self, vad_result):
        """更新滑动窗口并返回中位数"""
        for dim in self.vad_windows:
            self.vad_windows[dim].append(vad_result[dim])

        # 当窗口填满时计算中位数并生成情感标签
        if len(self.vad_windows['valence']) == self.window_size:
            # 计算当前窗口中位数
            median_result = {
                dim: np.median(self.vad_windows[dim])
                for dim in self.vad_windows
            }
            self.median_results.append(median_result)

            # 新增：为当前窗口中位数生成情感标签
            emotion_label, _ = self.classify_emotion(
                valence=median_result['valence'],
                arousal=median_result['arousal'],
                dominance=median_result['dominance']
            )
            self.emotion_sequence.append(emotion_label)
            self.emotion_labels.append(emotion_label)  # 存储原始标签

    # 鲁棒聚合
    def _robust_aggregate(self):
        """鲁棒聚合（中位数+均值）"""
        if not self.median_results:
            # 当没有检测到有效语音段时，返回默认值而不是抛出异常
            print("警告：未检测到足够的有效语音段，使用默认情感值")
            return {
                'arousal': 0.5,  # 中等激活度
                'valence': 0.5,  # 中等愉悦度
                'dominance': 0.5  # 中等控制度
            }

        return {
            'arousal': np.mean([r['arousal'] for r in self.median_results]),
            'valence': np.mean([r['valence'] for r in self.median_results]),
            'dominance': np.mean([r['dominance'] for r in self.median_results])
        }

    # 格式化情感标签序列
    def _format_emotion_sequence(self):
        """将情感标签序列格式化为双斜杠分隔的字符串，只保留英文部分"""
        # 只保留情感名称的英文部分，去掉中文描述
        english_labels = []
        for label in self.emotion_labels:
            if "（" in label:
                english_part = label.split("（")[0].strip()
                english_labels.append(english_part)
            else:
                english_labels.append(label)
        return "//".join(english_labels)


    # 清理临时文件
    def _cleanup_temp_files(self):
        """清理临时生成的WAV文件"""
        if self.temp_wav_path and os.path.exists(self.temp_wav_path):
            try:
                os.remove(self.temp_wav_path)
                print(f"已清理临时文件: {self.temp_wav_path}")
            except Exception as e:
                print(f"清理临时文件失败: {e}")

    # 完整流程
    def process_audio(self, audio_path, min_volume_threshold=0.01):
        """完整处理流程
        
        参数:
            audio_path: 音频文件路径
            min_volume_threshold: 最小音量阈值，低于此值的音频会被自动增强
        """
        # 重置结果缓存
        self.median_results = []
        self.emotion_sequence = []
        self.emotion_labels = []
        for dim in self.vad_windows:
            self.vad_windows[dim].clear()

        try:
            # 1. 格式转换（如果需要）
            audio_path = Path(audio_path)
            if audio_path.suffix.lower() != '.wav':
                converted_path = self.converter.convert(str(audio_path))

                self.temp_wav_path = converted_path  # 记录临时文件路径
                audio_path = Path(converted_path)

            print(audio_path)

            # 2. 加载并预处理音频
            waveform, sr = torchaudio.load(str(audio_path))
            if sr != self.sample_rate:
                waveform = torchaudio.functional.resample(waveform, orig_freq=sr, new_freq=self.sample_rate)
            audio = waveform.numpy().squeeze()
            
            # 音量增强处理
            # 计算音频的RMS值
            rms = np.sqrt(np.mean(audio**2))
            print(f"原始音频RMS值: {rms}")
            
            # 如果RMS值过低，进行音量增强
            target_rms = 0.05  # 目标RMS值
            if rms < target_rms and rms > 0:
                gain_factor = target_rms / rms
                audio = np.clip(audio * gain_factor, -1.0, 1.0)  # 应用增益并限制在[-1,1]范围内
                print(f"应用音量增益: {gain_factor}倍")
                print(f"增强后RMS值: {np.sqrt(np.mean(audio**2))}")
            
            speech_audio = audio

            # 3. 分块处理
            samples_per_chunk = int(self.chunk_size * self.sample_rate)
            
            # 确保至少有一个完整块用于分析
            if len(speech_audio) < samples_per_chunk:
                print("警告：音频长度不足，进行零填充")
                # 零填充使其达到一个完整块的长度
                padded_audio = np.pad(speech_audio, (0, samples_per_chunk - len(speech_audio)), 'constant')
                speech_audio = padded_audio
            
            for i in range(0, len(speech_audio), samples_per_chunk):
                chunk = speech_audio[i:i + samples_per_chunk]
                if len(chunk) < samples_per_chunk * 0.5:  # 允许最后一块至少有一半长度
                    continue

                # 4. 分析并更新窗口
                vad_result = self._analyze_audio_chunk(chunk)
                self._update_sliding_window(vad_result)

            # 5. 鲁棒聚合
            print(f"\n当前所有窗口的中位数值：")
            for i, result in enumerate(self.median_results):
                print(f"窗口 {i + 1}: {result}")

            final_vad = self._robust_aggregate()
            emotion, description = self.classify_emotion(**final_vad)
            
            # 如果没有检测到有效语音段，至少生成一个默认的情感标签
            if not self.emotion_labels:
                default_emotion, _ = self.classify_emotion(**final_vad)
                self.emotion_labels.append(default_emotion)

            # 格式化情感标签序列
            formatted_sequence = self._format_emotion_sequence()
        finally:
            self._cleanup_temp_files()

        # 打印最终结果
        print(f"\n最终分析结果：")
        print(f"- 情感状态: {emotion}")
        print(f"- 特征描述: {description}")
        print(f"- VAD保留比例: {len(speech_audio) / len(audio)}")
        print(f"- 处理块数量: {len(self.median_results)}")
        print(f"- 情感序列: {formatted_sequence}")
        print(f"- 激活度: {final_vad['arousal']:.3f} (平静→兴奋)")
        print(f"- 愉悦度: {final_vad['valence']:.3f} (消极→积极)")
        print(f"- 控制度: {final_vad['dominance']:.3f} (被动→掌控)")
        
        # # 验证情感序列是否符合向量化要求
        # print(f"\n情感序列格式验证:")
        # print(f"- 序列格式: {formatted_sequence}")
        # print(f"- 是否只包含英文: {'是' if all(c.isascii() for c in formatted_sequence) else '否'}")
        # print(f"- 是否符合向量化要求: {'是' if formatted_sequence in ['Calm', 'Nervous', 'Confident'] or '//' in formatted_sequence else '否'}")

        return {
            "emotion": emotion,
            "description": description,
            "emotion_sequence": formatted_sequence,  # 双斜杠分隔的字符串
            "vad_values": self.median_results,
            **final_vad,
            "vad_ratio": len(speech_audio) / len(audio),
            "processed_chunks": len(self.median_results)
        }

    @staticmethod
    def classify_emotion(valence, arousal, dominance):
        """
        参数范围：[0.1,0.9] 映射值域
        理论依据：面试场景VAD特性划分
        
        三种情感标签划分依据：
        - TENSE (紧张): 高激活(arousal)、低愉悦(valence)，表现为紧张不安
        - CALM (冷静): 中等激活和愉悦，表现为情绪平稳
        - CONFIDENT (自信): 高愉悦、高控制，表现为积极自信
        """
        # 1. 自信状态（高愉悦+高控制）
        if valence > 0.6 and dominance > 0.6:
            return "Confident（自信）", "语调平稳有力，逻辑清晰，表达流畅"
            
        # 2. 紧张状态（高激活+低愉悦）
        elif arousal > 0.6 and valence < 0.5:
            return "Nervous（紧张）", "语速加快，语句重复，表达不连贯"
            
        # 3. 冷静状态（其他情况）
        else:
            return "Calm（冷静）", "语速适中，表达平稳，情绪稳定"


# 使用示例
if __name__ == "__main__":
    import requests
    import tempfile
    import uuid

    try:
        analyzer = RobustAudioEmotionAnalyzer(
            model_path=get_model_path("w2v2-L-robust"),
            window_size=3,
            chunk_size=1.8
        )
        print("情感分析模型加载成功")
    except Exception as e:
        print(f"情感分析模型加载失败: {str(e)}")
        exit(1)

    # 测试音频URL - 您可以替换为您的COS音频链接
    audio_url = "https://zx-1343343346.cos.ap-chongqing.myqcloud.com/ai_interview/user_audio/1916016714226561026/1754304843005.webm"
    
    # 本地测试文件路径（备用）
    project_root = get_project_root()
    local_audio_path = project_root / "src" / "temp_audio_wav" / "tts_output_20250510_143101.wav"
    
    temp_file_path = None
    try:
        # 判断是网络链接还是本地路径
        if audio_url.startswith(('http://', 'https://')):
            print(f"正在下载网络音频文件: {audio_url}")
            
            # 下载网络音频文件
            response = requests.get(audio_url, timeout=30)
            response.raise_for_status()
            
            # 从URL中提取文件扩展名
            from urllib.parse import urlparse
            parsed_url = urlparse(audio_url)
            original_filename = os.path.basename(parsed_url.path)
            file_extension = os.path.splitext(original_filename)[1] or '.webm'  # 默认为.webm
            
            # 创建临时文件，保持原始扩展名
            temp_dir = tempfile.gettempdir()
            temp_filename = f"test_audio_{uuid.uuid4().hex}{file_extension}"
            temp_file_path = os.path.join(temp_dir, temp_filename)
            
            # 保存音频数据
            with open(temp_file_path, 'wb') as temp_file:
                temp_file.write(response.content)
            
            print(f"音频文件已下载到: {temp_file_path}")
            
            # 分析下载的音频
            result = analyzer.process_audio(temp_file_path)
        else:
            # 使用本地文件路径
            if local_audio_path.exists():
                print(f"分析本地音频文件: {local_audio_path}")
                result = analyzer.process_audio(str(local_audio_path))
            else:
                print(f"本地测试文件不存在: {local_audio_path}")
                print("请确保本地测试文件存在，或使用网络音频URL")

    except requests.exceptions.RequestException as e:
        print(f"下载音频文件失败: {e}")
    except Exception as e:
        print(f"分析过程中出错: {e}")
    finally:
        # 清理临时文件
        if temp_file_path and os.path.exists(temp_file_path):
            try:
                os.remove(temp_file_path)
                print(f"已清理临时文件: {temp_file_path}")
            except Exception as e:
                print(f"清理临时文件失败: {e}")