import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import numpy as np


# ---------------------------
# Block_T: 时域特征提取模块
#     现在 in_channels 是 EEG 通道数（64）
# ---------------------------
class Block_T(nn.Module):
    def __init__(self, in_channels, t):
        super().__init__()
        self.conv1 = nn.Conv3d(in_channels, 32, kernel_size=(1,1,t//2),
                               padding=(0,0,(t//2)//2))
        self.conv2 = nn.Conv3d(in_channels, 32, kernel_size=(1,1,t//8),
                               padding=(0,0,(t//8)//2))
        self.conv3 = nn.Conv3d(in_channels, 32, kernel_size=(1,1,t//4),
                               padding=(0,0,(t//4)//2))
        self.pool = nn.AdaptiveAvgPool3d((None, None, 5))
        self.bn   = nn.BatchNorm3d(32*3)

    def forward(self, x):
        # x: [N, 64, D=1, F, t]
        o1 = F.relu(self.conv1(x))
        o2 = F.relu(self.conv2(x))
        o3 = F.relu(self.conv3(x))
        o1 = self.pool(o1)
        o2 = self.pool(o2)
        o3 = self.pool(o3)
        out = torch.cat([o1,o2,o3], dim=1)
        out = self.bn(out)
        return F.relu(out)

# ---------------------------
# Block_S: 频域特征提取模块
#     in_channels 固定为 96 （Block_T 输出）
# ---------------------------
class Block_S(nn.Module):
    def __init__(self, in_channels, s1, s2, s):
        super().__init__()
        # … 保持原样 …
        self.conv1a = nn.Conv3d(in_channels, 64, kernel_size=(1,s1//2,1),
                                padding=(0,s1//4,0))
        self.conv1b = nn.Conv3d(in_channels, 64, kernel_size=(1,s2//2,11),
                                padding=(0,s2//4,5))
        self.conv1c = nn.Conv3d(in_channels, 64, kernel_size=(1,s//2,1),
                                padding=(0,s//4,0))
        self.conv2a = nn.Conv3d(192,128, kernel_size=(1,s1//2,1),
                                padding=(0,s1//4,0))
        self.conv2b = nn.Conv3d(192,128, kernel_size=(1,s2//2,11),
                                padding=(0,s2//4,5))
        self.conv2c = nn.Conv3d(192,128, kernel_size=(1,s//2,1),
                                padding=(0,s//4,0))
        self.pool    = nn.AdaptiveAvgPool3d((None,None,5))
        self.bn      = nn.BatchNorm3d(128*3)
        self.dropout = nn.Dropout3d(0.25)

    def forward(self, x):
        x1 = F.relu(self.conv1a(x))
        x2 = F.relu(self.conv1b(x))
        x3 = F.relu(self.conv1c(x))
        out = torch.cat([x1, x2, x3], dim=1)  # (N,192, …)
        y1 = F.relu(self.conv2a(out))
        y2 = F.relu(self.conv2b(out))
        y3 = F.relu(self.conv2c(out))
        p1 = self.pool(y1)
        p2 = self.pool(y2)
        p3 = self.pool(y3)
        out = torch.cat([p1, p2, p3], dim=1)  # (N,384,…)
        out = self.bn(out)
        out = F.relu(out)
        return self.dropout(out)


# ---------------------------
# Block_C: 空间（通道）特征提取模块
#     in_channels 固定为 384（Block_S 输出），
#     kernel_size 的第一个维度设为 1 因为已经在 Block_T 里沿通道卷积过
# ---------------------------
class Block_C(nn.Module):
    def __init__(self, in_channels):
        super().__init__()
        # 先沿 (D=1,H=1) 把通道和频域平均，然后只在 time 上卷积
        self.pool = nn.AdaptiveAvgPool3d((1,1,None))
        self.conv = nn.Conv3d(in_channels, 128, kernel_size=(1,1,3), padding=(0,0,1))
        self.bn   = nn.BatchNorm3d(128)

    def forward(self, x):
        # x: (N,384,D=1,F,t)
        x = self.pool(x)       # -> (N,384,1,1,t)
        out = F.relu(self.conv(x))
        return self.bn(out)

class Backbone(nn.Module):
    def __init__(self, psd_channels, t, s1, s2, s):
        super().__init__()
        # 初始化部分保持不变
        self.block_t_id = Block_T(64, t)
        self.block_s_id = Block_S(96, s1, s2, s)
        self.block_c_id = Block_C(384)
        self.block_t_task = Block_T(64, t)
        self.block_s_task = Block_S(96, s1, s2, s)
        self.block_c_task = Block_C(384)
        self.temporal_pool = nn.AdaptiveAvgPool1d(1)

    def forward(self, x):
        x = x.unsqueeze(2)  # 形状变为 [N, 64, 1, 33, 10]

        # 身份分支
        x_id = self.block_t_id(x)
        x_id = self.block_s_id(x_id)
        x_id = self.block_c_id(x_id)    # 输出形状: [N, 128, 1, 1, 5]
        x_id = x_id.squeeze(2).squeeze(2)  # 移除两个单维度 -> [N, 128, 5]
        fID = self.temporal_pool(x_id)     # 输出形状: [N, 128, 1]
        fID = fID.squeeze(-1)              # 输出形状: [N, 128]

        # 任务分支同理
        x_task = self.block_t_task(x)
        x_task = self.block_s_task(x_task)
        x_task = self.block_c_task(x_task)  # 输出形状: [N, 128, 1, 1, 5]
        x_task = x_task.squeeze(2).squeeze(2)  # -> [N, 128, 5]
        fT = self.temporal_pool(x_task)    # -> [N, 128, 1]
        fT = fT.squeeze(-1)                # -> [N, 128]

        return fID, fT

# ---------------------------
# 整体 DAGN：不变
# ---------------------------
class DAGN(nn.Module):
    def __init__(self, psd_channels, t, s1, s2, s, num_id_classes, num_task_classes, att_hidden_dim, att_temp=0.5):
        super().__init__()
        self.backbone       = Backbone(psd_channels, t, s1, s2, s)
        self.attention      = AdversarialAttention(128, att_hidden_dim, att_temp)
        self.id_classifier  = IdentityClassifier(128, num_id_classes)
        self.aux_id_clf     = IdentityClassifier(128, num_id_classes)
        self.task_classifier= TaskClassifier(128, num_task_classes)

    def forward(self, x):
        fID, fT = self.backbone(x)
        fD, fI, alpha = self.attention(fID)
        out_id   = self.id_classifier(fD)
        out_aux  = self.aux_id_clf(fI)
        out_task = self.task_classifier(fT)
        return out_id, out_aux, out_task, fID, fT, alpha


# ---------------------------
# 特征解耦损失
# ---------------------------
def feature_decorrelation_loss(f):
    """
    f: 拼接后的特征 [fID, fT]，形状 (N, 2*feature_dim)
    这里示例使用协方差 off-diagonal 项的 Frobenius 范数作为损失
    """
    batch, dim = f.shape
    f_centered = f - f.mean(dim=0, keepdim=True)
    cov = torch.mm(f_centered.t(), f_centered) / (batch - 1)
    diag = torch.diag(torch.diag(cov))
    off_diag = cov - diag
    loss = torch.norm(off_diag, p='fro')
    return loss

# ---------------------------
# 分类器模块
# ---------------------------
class IdentityClassifier(nn.Module):
    def __init__(self, in_features, num_classes):
        super(IdentityClassifier, self).__init__()
        self.fc = nn.Linear(in_features, num_classes)

    def forward(self, x):
        return self.fc(x)


class TaskClassifier(nn.Module):
    def __init__(self, in_features, num_tasks):
        super(TaskClassifier, self).__init__()
        self.fc = nn.Linear(in_features, num_tasks)

    def forward(self, x):
        return self.fc(x)


# ---------------------------
# Adversarial Attention 模块
# ---------------------------
class AdversarialAttention(nn.Module):
    def __init__(self, in_features, hidden_dim, temperature=0.5):
        """
        Args:
            in_features: 输入身份特征的维度
            hidden_dim: 隐藏层维度
            temperature: Gumbel-Softmax 温度参数
        """
        super(AdversarialAttention, self).__init__()
        self.fc1 = nn.Linear(in_features, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, in_features)
        self.temperature = temperature

    def forward(self, fID):
        x = F.relu(self.fc1(fID))
        x = F.relu(self.fc2(x))
        logits = self.fc3(x)
        # 利用 Gumbel-Softmax 得到注意力权重（soft分布）
        alpha = F.gumbel_softmax(logits, tau=self.temperature, hard=False, dim=-1)
        fD = alpha * fID  # 主导特征
        fI = (1 - alpha) * fID  # 次要特征
        return fD, fI, alpha

