"""
GFA (Gated Feature Aggregation) 模块
用于融合时空特征和通道交互特征

网络结构：
1. 输入：AFTM+DSTM输出 (E_ST) 和 CWM输出 (E_C)
2. Scale层：分别对两种特征进行归一化
3. Concat层：拼接两种特征
4. Gating机制：通过Linear+Sigmoid生成门控信号
5. 按元素相乘：门控信号与拼接特征相乘
6. 分类层：全连接层 + Softmax
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from typing import Tuple, Optional

class ScaleLayer(nn.Module):
    """
    缩放层：将特征除以最大绝对值进行归一化
    """
    def __init__(self):
        super(ScaleLayer, self).__init__()
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        前向传播
        Args:
            x (torch.Tensor): 输入特征
                形状: (batch_size, feature_dim)     
        Returns:
            torch.Tensor: 归一化后的特征
                形状: (batch_size, feature_dim)
        """
        # 计算每个样本的最大绝对值
        max_abs = torch.max(torch.abs(x), dim=1, keepdim=True)[0]
        
        # 避免除零错误
        max_abs = torch.clamp(max_abs, min=1e-8)
        
        # 归一化
        scaled_x = x / max_abs
        
        return scaled_x

class GFA(nn.Module):
    """
    GFA (Gated Feature Aggregation) 模块
    融合时空特征和通道交互特征：
    1. Scale层：分别归一化两种特征
    2. Concat层：拼接特征
    3. Gating机制：生成门控信号
    4. 按元素相乘：门控融合
    5. 分类层：全连接 + Softmax
    """
    
    def __init__(self, 
                 spatio_temporal_dim: int = 256,  # ASTM(128) + DSTM(128) = 256
                 channel_wise_dim: int = 64,
                 hidden_dim: int = 256,
                 num_classes: int = 4):
        """
        初始化GFA模块
        Args:
            spatio_temporal_dim (int): 时空特征维度（AFTM+DSTM输出）
            channel_wise_dim (int): 通道交互特征维度（CWM输出）
            hidden_dim (int): 隐藏层维度
            num_classes (int): 分类类别数，默认为4（DEAP数据集）
        """
        super(GFA, self).__init__()
        
        self.spatio_temporal_dim = spatio_temporal_dim
        self.channel_wise_dim = channel_wise_dim
        self.hidden_dim = hidden_dim
        self.num_classes = num_classes
        
        # 计算拼接后的特征维度
        self.concat_dim = spatio_temporal_dim + channel_wise_dim
        
        # Scale层：分别对两种特征进行归一化
        self.scale_st = ScaleLayer()  # 时空特征缩放
        self.scale_cw = ScaleLayer()  # 通道交互特征缩放
        
        # Gating机制
        self.gating_linear = nn.Linear(self.concat_dim, self.concat_dim)
        self.sigmoid = nn.Sigmoid()
        
        # 分类层
        self.classifier = nn.Sequential(
            nn.Linear(self.concat_dim, hidden_dim),
            nn.BatchNorm1d(hidden_dim),
            nn.ELU(inplace=True),
            nn.Dropout(0.3),
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.BatchNorm1d(hidden_dim // 2),
            nn.ELU(inplace=True),
            nn.Dropout(0.2),
            nn.Linear(hidden_dim // 2, num_classes)
        )
        
    def forward(self, 
                spatio_temporal_features: torch.Tensor, 
                channel_wise_features: torch.Tensor) -> torch.Tensor:
        """
        前向传播
        Args:
            spatio_temporal_features (torch.Tensor): 时空特征（AFTM+DSTM输出）
                形状: (batch_size, spatio_temporal_dim)
            channel_wise_features (torch.Tensor): 通道交互特征（CWM输出）
                形状: (batch_size, channel_wise_dim)
        Returns:
            torch.Tensor: 分类概率
                形状: (batch_size, num_classes)
        """
        batch_size = spatio_temporal_features.size(0)
        
        # Step 1: Scale层 - 分别归一化两种特征
        scaled_st = self.scale_st(spatio_temporal_features)
        scaled_cw = self.scale_cw(channel_wise_features)
      
        # Step 2: Concat层 - 拼接两种特征
        concat_features = torch.cat([scaled_st, scaled_cw], dim=1)
       
        # Step 3: Gating机制
        # 生成门控信号
        gating_signal = self.gating_linear(concat_features)
        gating_signal = self.sigmoid(gating_signal)
       
        # Step 4: 按元素相乘 - 门控融合
        gated_features = concat_features * gating_signal
       
        # Step 5: 分类层
        logits = self.classifier(gated_features)
        print(f"分类logits形状: {logits.shape}")
        
        return logits
    
    def get_probabilities(self, 
                         spatio_temporal_features: torch.Tensor, 
                         channel_wise_features: torch.Tensor) -> torch.Tensor:
        """
        获取分类概率
        Args:
            spatio_temporal_features (torch.Tensor): 时空特征
            channel_wise_features (torch.Tensor): 通道交互特征
        Returns:
            torch.Tensor: 分类概率
                形状: (batch_size, num_classes)
        """
        logits = self.forward(spatio_temporal_features, channel_wise_features)
        probabilities = F.softmax(logits, dim=1)
        return probabilities

def demo_with_models():
    """演示与AFTM、DSTM、CWM模型的集成"""
    print("\n" + "=" * 60)
    print("与AFTM、DSTM、CWM模型集成演示")
    print("=" * 60)
    
    # 导入其他模块
    from ASTM_main import ASTM
    from DSTM_main import DSTM
    from CWF_main import CWFCalculator
    
    # 创建模型
    astm_model = ASTM()
    dstm_model = DSTM()
    cwf_calc = CWFCalculator()
    gfa_model = GFA()
    
    # 设置为评估模式
    astm_model.eval()
    dstm_model.eval()
    gfa_model.eval()
    
    # 创建测试数据
    batch_size = 2
    eeg_data = torch.randn(batch_size, 128, 9, 9)  # 1秒的128个EEG矩阵
    eeg_raw_data = np.random.randn(batch_size, 32, 128)  # 原始32通道数据
    
    print(f"测试数据:")
    print(f"EEG矩阵数据形状: {eeg_data.shape}")
    print(f"原始EEG数据形状: {eeg_raw_data.shape}")
    
    # 通过AFTM提取时空特征
    print(f"\n--- ASTM特征提取 ---")
    with torch.no_grad():
        astm_features = astm_model(eeg_data)
    print(f"ASTM输出形状: {astm_features.shape}")
    
    # 通过DSTM提取时空特征
    print(f"\n--- DSTM特征提取 ---")
    with torch.no_grad():
        dstm_features = dstm_model(eeg_data)
    print(f"DSTM输出形状: {dstm_features.shape}")
    
    # 拼接ASTM和DSTM特征
    spatio_temporal_features = torch.cat([astm_features, dstm_features], dim=1)
    print(f"拼接后时空特征形状: {spatio_temporal_features.shape}")
    
    # 通过CWM提取通道交互特征
    print(f"\n--- CWM特征提取 ---")
    channel_wise_features = []
    for i in range(batch_size):
        cwf_matrix = cwf_calc.calculate_cwf_matrix(eeg_raw_data[i])
        # 这里简化处理，实际应该通过通道交互特征提取模块
        # 为了演示，我们使用模拟的64维特征
        channel_wise_feat = torch.randn(64)
        channel_wise_features.append(channel_wise_feat)
    
    channel_wise_features = torch.stack(channel_wise_features, dim=0)
    print(f"CWM输出形状: {channel_wise_features.shape}")
    
    # 通过GFA进行特征融合和分类
    print(f"\n--- GFA特征融合和分类 ---")
    with torch.no_grad():
        logits = gfa_model(spatio_temporal_features, channel_wise_features)
        probabilities = gfa_model.get_probabilities(spatio_temporal_features, channel_wise_features)
    
    print(f"最终分类logits形状: {logits.shape}")
    print(f"最终分类概率形状: {probabilities.shape}")
    
    # 显示分类结果
    print(f"\n分类结果:")
    for i in range(batch_size):
        pred_class = torch.argmax(probabilities[i]).item()
        confidence = probabilities[i][pred_class].item()
        print(f"样本 {i+1}: 预测类别 {pred_class}, 置信度 {confidence:.4f}")


if __name__ == "__main__":
    
    # 演示与AFTM、DSTM、CWM模型集成
    demo_with_models()
