import torch
import torch.nn as nn
import numpy as np
import re
from sentence_transformers import SentenceTransformer

# 多模态数据向量化类
class MultimodalVectorizer:
    def __init__(self, modality_dim=128, max_emotion_seq_len=10):
        self.modality_dim = modality_dim
        self.max_emotion_seq_len = max_emotion_seq_len
        
        local_model_path = r'E:\backend-project\AI_Interview\python_AI_Interview_backend\model\sbert_all_MiniLM_L6_v2'
        self.model = SentenceTransformer(local_model_path)

        # 音频情感映射（大写标签）
        self.audio_emotion_map = {
            "Calm": [1, 0, 0], 
            "Nervous": [0, 1, 0], 
            "Confident": [0, 0, 1]
        }
        
        # 微表情情感映射（小写标签）
        self.expression_emotion_map = {
            "calm": [1, 0, 0],
            "nervous": [0, 1, 0],
            "confident": [0, 0, 1]
        }
        
        self.face_keys = ["eyeBlink", "mouthPuff", "eyeUpLeft", "eyeUpRight"]

        # 计算原始向量维度
        text_input_dim = 384
        
        # 情感特征的统一维度
        emotion_feature_dim = 16  # 每个情感序列的特征维度
        
        # 更新音频和视频模态的输入维度计算
        audio_input_dim = emotion_feature_dim + 2  # 情感特征 + stress + speech_rate
        video_input_dim = emotion_feature_dim + len(self.face_keys) + 2  # 情感特征 + face_actions + body_actions

        # 定义三大模态的投影层
        self.projection_text = nn.Linear(text_input_dim, self.modality_dim)
        self.projection_audio = nn.Linear(audio_input_dim, self.modality_dim)
        self.projection_video = nn.Linear(video_input_dim, self.modality_dim)
        
        # 情感序列处理的投影层 (将任意长度的情感序列映射到固定维度)
        # 音频情感和表情情感都是3种状态，所以维度相同
        emotion_raw_dim = 3 * self.max_emotion_seq_len
        self.projection_audio_emotion = nn.Linear(emotion_raw_dim, emotion_feature_dim)
        self.projection_expression_emotion = nn.Linear(emotion_raw_dim, emotion_feature_dim)

    def _safe_float(self, value, default=0.0):
        if value is None: return default
        if isinstance(value, (int, float)): return float(value)
        if isinstance(value, str):
            matches = re.findall(r'[-+]?\d*\.\d+|\d+', value)
            return float(matches[0]) if matches else default
        return default

    def _process_audio_emotion_sequence(self, seq_str: str):
        """处理音频情感序列（大写标签）并返回扁平化的向量"""
        if not isinstance(seq_str, str): seq_str = ""
        emotions = seq_str.split('//')
        one_hot_vectors = [self.audio_emotion_map.get(e.strip(), [0,0,0]) for e in emotions if e.strip()]
        
        # 填充或截断到 max_emotion_seq_len
        padded_vectors = one_hot_vectors[:self.max_emotion_seq_len]
        while len(padded_vectors) < self.max_emotion_seq_len:
            padded_vectors.append([0, 0, 0]) # 补零向量
            
        return np.array(padded_vectors).flatten()
    
    def _process_expression_sequence(self, seq_str: str):
        """处理微表情序列（小写标签）并返回扁平化的向量"""
        if not isinstance(seq_str, str): seq_str = ""
        expressions = seq_str.split('//')
        one_hot_vectors = [self.expression_emotion_map.get(e.strip(), [0,0,0]) for e in expressions if e.strip()]
        
        # 填充或截断到 max_emotion_seq_len
        padded_vectors = one_hot_vectors[:self.max_emotion_seq_len]
        while len(padded_vectors) < self.max_emotion_seq_len:
            padded_vectors.append([0, 0, 0]) # 补零向量
            
        return np.array(padded_vectors).flatten()
        
    def _extract_audio_emotion_features(self, seq_str: str):
        """从音频情感序列中提取固定维度的特征向量"""
        # 首先获取原始的扁平化向量
        flat_vector = self._process_audio_emotion_sequence(seq_str)
        
        # 将numpy数组转换为tensor
        tensor_vector = torch.from_numpy(flat_vector).float()
        
        # 通过投影层将其映射到固定维度
        with torch.no_grad():
            emotion_features = self.projection_audio_emotion(tensor_vector)
            
        return emotion_features
    
    def _extract_expression_features(self, seq_str: str):
        """从微表情序列中提取固定维度的特征向量"""
        # 首先获取原始的扁平化向量
        flat_vector = self._process_expression_sequence(seq_str)
        
        # 将numpy数组转换为tensor
        tensor_vector = torch.from_numpy(flat_vector).float()
        
        # 通过投影层将其映射到固定维度
        with torch.no_grad():
            emotion_features = self.projection_expression_emotion(tensor_vector)
            
        return emotion_features

    def vectorize(self, data):
        # 1. 文本模态
        v_text_raw = self.model.encode([data.get("text", "")])[0]
        t_text = torch.from_numpy(v_text_raw).float()

        # 2. 音频模态
        # 从音频情感序列中提取特征（使用大写标签处理）
        audio_emotion_features = self._extract_audio_emotion_features(data.get("audio_emotion", ""))
        
        v_stress = np.array([min(max(self._safe_float(data.get("stress", 0.0)), 0.0), 1.0)])
        speech_rate = self._safe_float(data.get("speech_rate", 0.0))
        v_rate = np.array([min(max(speech_rate, 0.0), 10.0) / 10.0]) # 归一化
        
        # 拼接音频特征
        t_audio_additional = torch.from_numpy(np.concatenate([v_stress, v_rate])).float()
        t_audio_raw = torch.cat([audio_emotion_features, t_audio_additional])

        # 3. 视频模态
        # 从表情序列中提取特征（使用小写标签处理）
        expr_features = self._extract_expression_features(data.get("expression_seq", ""))
        
        # 处理面部动作
        face_actions = data.get("face_actions", {})
        if not face_actions:  # 如果没有提供face_actions字典，尝试从顶层获取
            v_face = np.array([
                self._safe_float(data.get("eyeBlink", 0.0)),
                self._safe_float(data.get("mouthPuff", 0.0)),
                self._safe_float(data.get("eyeUpLeft", 0.0)),
                self._safe_float(data.get("eyeUpRight", 0.0))
            ])
        else:
            v_face = np.array([self._safe_float(face_actions.get(key, 0.0)) for key in self.face_keys])
        
        # 处理身体动作
        body_actions = data.get("body_actions", {})
        if not body_actions:  # 如果没有提供body_actions字典，尝试从顶层获取
            v_body = np.array([
                self._safe_float(data.get("earTouch", 0.0)),
                self._safe_float(data.get("noseTouch", 0.0))
            ])
        else:
            v_body = np.array([
                self._safe_float(body_actions.get("earTouch", 0.0)), 
                self._safe_float(body_actions.get("noseTouch", 0.0))
            ])
        
        # 拼接视频特征
        t_video_additional = torch.from_numpy(np.concatenate([v_face, v_body])).float()
        t_video_raw = torch.cat([expr_features, t_video_additional])

        # 投影到统一维度
        with torch.no_grad():
            t_text_proj = self.projection_text(t_text)
            t_audio_proj = self.projection_audio(t_audio_raw)
            t_video_proj = self.projection_video(t_video_raw)
            
        # 拼接最终的特征向量
        feature_vector = torch.cat([t_text_proj, t_audio_proj, t_video_proj]).tolist()
        return {"feature_vector": feature_vector}


# 测试样例
if __name__ == "__main__":
    test_data = {
        "text": "我对这个问题的理解是，它主要涉及几个方面。",
        "audio_emotion": "Confident//Calm//Confident", # 大写标签
        "stress": 0.25,
        "expression_seq": "calm//confident//calm", # 小写标签
        "eyeBlink": "0.8", 
        "mouthPuff": "0", 
        "eyeUpLeft": "1", 
        "eyeUpRight": "0",
        "earTouch": "0", 
        "noseTouch": "1",
        "speech_rate": "3.1字/秒"
    }

    vectorizer = MultimodalVectorizer(modality_dim=128)
    result = vectorizer.vectorize(test_data)
    # 期望长度: 128 * 3 = 384
    print(f"Feature vector length: {len(result['feature_vector'])}")
    print(f"Expected length: {128 * 3}")
    
    # 测试不同长度的情感序列
    audio_test_sequences = [
        "",  # 空序列
        "Calm",  # 单一情感
        "Confident//Calm",  # 短序列
        "Nervous//Calm//Confident//Nervous//Calm//Confident"  # 长序列
    ]
    
    expression_test_sequences = [
        "",  # 空序列
        "calm",  # 单一情感
        "confident//calm",  # 短序列
        "nervous//calm//confident//nervous//calm//confident"  # 长序列
    ]
    
    print("\n测试不同长度的音频情感序列(大写):")
    for seq in audio_test_sequences:
        features = vectorizer._extract_audio_emotion_features(seq)
        print(f"序列 '{seq}' -> 特征维度: {features.shape}")
        
    print("\n测试不同长度的表情情感序列(小写):")
    for seq in expression_test_sequences:
        features = vectorizer._extract_expression_features(seq)
        print(f"序列 '{seq}' -> 特征维度: {features.shape}")
        
    # 测试完整数据处理
    print("\n测试完整数据处理:")
    # 这条数据同时包含大写和小写的情感标签
    mixed_data = {
        "text": "这是一个测试文本",
        "audio_emotion": "Confident//Nervous",
        "expression_seq": "calm//confident",
        "eyeBlink": 1.2,
        "mouthPuff": 0,
        "eyeUpLeft": 1,
        "eyeUpRight": 0,
        "earTouch": 1,
        "noseTouch": 0,
        "stress": 0.5,
        "speech_rate": "2.5字/秒"
    }
    
    result = vectorizer.vectorize(mixed_data)
    print(f"处理成功，特征向量长度: {len(result['feature_vector'])}")
