import torch
import torch.nn as nn
import torch.nn.functional as F

# --------------------
# Step 1: 多模态预处理模块
# --------------------
class MultiModalPreprocessor:
    def __init__(self):
        pass

    def process_face(self, img_tensor):  # [B,1,112,112]
        mean = img_tensor.mean(dim=[1,2,3], keepdim=True)
        std = img_tensor.std(dim=[1,2,3], keepdim=True) + 1e-5
        return (img_tensor - mean) / std

    def process_bio(self, signal_tensor):  # [B,T,2]
        from scipy.signal import butter, lfilter

        def butter_lowpass_filter(data, cutoff=50, fs=200, order=5):
            b, a = butter(order, cutoff / (0.5 * fs), btype='low', analog=False)
            return lfilter(b, a, data, axis=1)

        np_signal = signal_tensor.detach().cpu().numpy()
        filtered = torch.tensor(butter_lowpass_filter(np_signal), dtype=torch.float32)
        return filtered

    def process_env(self, env_tensor):  # [B,2]
        env_min = torch.tensor([10.0, 20.0])
        env_max = torch.tensor([35.0, 80.0])
        return torch.clamp((env_tensor - env_min) / (env_max - env_min), 0, 1)

    def temporal_align(self, face, bio, env):  # 1:10:1
        return face, bio[:, ::10, :], env


# --------------------
# Step 2: 动态加权融合模块 + 模态注意力机制
# --------------------
class DynamicFeatureFusion(nn.Module):
    def __init__(self):
        super().__init__()
        self.score_face = nn.Linear(256, 1)
        self.score_bio = nn.Linear(256, 1)
        self.score_env = nn.Linear(32, 1)

        self.cos = nn.CosineSimilarity(dim=1)
        self.final_fuse = nn.Linear(256 + 256 + 32, 512)

    def forward(self, f_face, f_bio, f_env, env_condition="normal"):
        # 模态打分
        w_face = self.score_face(f_face)
        w_bio = self.score_bio(f_bio)
        w_env = self.score_env(f_env)

        raw_weights = torch.cat([w_face, w_bio, w_env], dim=1)
        weights = F.softmax(raw_weights, dim=1)

        # 根据环境状态微调权重
        if env_condition == "abnormal":
            weights[:, 2] += 0.1  # 增强环境特征权重
            weights = weights / weights.sum(dim=1, keepdim=True)

        # 拼接并融合
        fused_input = torch.cat([f_face * weights[:, 0:1],
                                 f_bio * weights[:, 1:2],
                                 f_env * weights[:, 2:3]], dim=1)
        return self.final_fuse(fused_input), weights


# --------------------
# Step 3: 决策级融合输出模块
# --------------------
class FinalEmotionHead(nn.Module):
    def __init__(self):
        super().__init__()
        self.classifier = nn.Sequential(
            nn.Linear(512, 128),
            nn.ReLU(),
            nn.Linear(128, 8)  # 输出8类情绪
        )
        self.intensity = nn.Sequential(
            nn.Linear(512, 64),
            nn.ReLU(),
            nn.Linear(64, 1),
        )

    def forward(self, fused_feat):
        logits = self.classifier(fused_feat)
        intensity = torch.clamp(self.intensity(fused_feat), 0, 3)
        return logits, intensity


# --------------------
# 汇总模型：集成预处理、动态加权、输出决策
# --------------------
class XiaoZhishiEmotionModel(nn.Module):
    def __init__(self):
        super().__init__()
        from multi_modal_emotion_model import FaceCNN, AttnBiGRU, EnvironmentMLP  # 前文模型
        self.face_net = FaceCNN()
        self.bio_net = AttnBiGRU(input_dim=2)
        self.env_net = EnvironmentMLP()
        self.fusion = DynamicFeatureFusion()
        self.head = FinalEmotionHead()

    def forward(self, face_img, bio_seq, env_data, env_cond="normal"):
        f1 = self.face_net(face_img)
        f2 = self.bio_net(bio_seq)
        f3 = self.env_net(env_data)
        fused, weights = self.fusion(f1, f2, f3, env_condition=env_cond)
        logits, intensity = self.head(fused)
        return logits, intensity, weights
