import torch
import torch.nn as nn
import torch.nn.functional as F


# ------------------------
# 微表情CNN提取模块
# ------------------------
class FaceCNN(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv_layers = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, stride=2, padding=1),  # 输出: [32, H/2, W/2]
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.MaxPool2d(2),  # 输出: [32, H/4, W/4]

            nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.MaxPool2d(2),  # 输出: [64, H/8, W/8]

            nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),

            nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
            nn.AdaptiveAvgPool2d((1, 1))  # 最终输出特征维度: 256
        )

    def forward(self, x):  # x: [B, 1, H, W]
        out = self.conv_layers(x)
        return out.view(x.size(0), -1)  # [B, 256]


# ------------------------
# 生理信号 Attn-BiGRU 模块
# ------------------------
class AttnBiGRU(nn.Module):
    def __init__(self, input_dim, hidden_dim=128, num_heads=4):
        super().__init__()
        self.bigru = nn.GRU(input_dim, hidden_dim, bidirectional=True, batch_first=True)
        self.attn = nn.MultiheadAttention(embed_dim=2 * hidden_dim, num_heads=num_heads, batch_first=True)

    def forward(self, x):  # x: [B, T, F]
        gru_out, _ = self.bigru(x)  # [B, T, 2*H]
        attn_out, _ = self.attn(gru_out, gru_out, gru_out)
        pooled = attn_out.mean(dim=1)  # [B, 2*H]
        return pooled  # 输出 256 维（默认）


# ------------------------
# 温湿度场景特征提取模块
# ------------------------
class EnvironmentMLP(nn.Module):
    def __init__(self, input_dim=2, hidden_dim=64, output_dim=32):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(0.3),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(0.3),
            nn.Linear(hidden_dim, output_dim),
        )

    def forward(self, x):  # x: [B, 2]
        return self.net(x)  # [B, 32]

# 跨模态注意力融合模块
class CrossModalAttention(nn.Module):
    def __init__(self, dim=512):
        super().__init__()
        self.cross_attn = nn.Linear(dim * 3, dim)
        self.modality_weighting = nn.Sequential(
            nn.Linear(3, 1),
            nn.Softmax(dim=-1)
        )

    def forward(self, face_feat, bio_feat, env_feat, bio_var_ratio):
        # 拼接后进行融合
        combined = torch.cat([face_feat, bio_feat, env_feat], dim=1)  # [B, 256+256+32]
        fused = self.cross_attn(combined)  # [B, 512]

        # 自适应权重策略
        if bio_var_ratio > 0.15:  # 生理信号波动大
            weights = torch.tensor([0.2, 0.6, 0.2], device=fused.device).unsqueeze(0)
        else:
            weights = torch.tensor([0.6, 0.2, 0.2], device=fused.device).unsqueeze(0)

        final = fused * weights.sum(dim=1, keepdim=True)
        return final

# 情绪决策网络
class EmotionDecisionNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.classifier = nn.Sequential(
            nn.Linear(512, 128),
            nn.ReLU(),
            nn.Linear(128, 7),  # 7类情绪
        )
        self.intensity_regressor = nn.Sequential(
            nn.Linear(512, 64),
            nn.ReLU(),
            nn.Linear(64, 1),  # 0-3 强度
        )

    def forward(self, fused_feat):
        cls_logits = self.classifier(fused_feat)
        intensity = self.intensity_regressor(fused_feat)
        return cls_logits, torch.clamp(intensity, 0, 3)


# 总模型：整合以上所有子模块
class MultiModalEmotionModel(nn.Module):
    def __init__(self):
        super().__init__()
        self.face_net = FaceCNN()
        self.bio_net = AttnBiGRU(input_dim=2)  # 心率 + 血氧
        self.env_net = EnvironmentMLP()
        self.fusion_net = CrossModalAttention()
        self.decision_net = EmotionDecisionNet()

    def forward(self, face_img, bio_seq, env_data, bio_var_ratio=0.1):
        face_feat = self.face_net(face_img)
        bio_feat = self.bio_net(bio_seq)
        env_feat = self.env_net(env_data)
        fused = self.fusion_net(face_feat, bio_feat, env_feat, bio_var_ratio)
        return self.decision_net(fused)
