import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np


class SpatialConvNetA(nn.Module):
    """
    Spatial convNet-A 模块
    使用3个深度可分离卷积进行相邻脑电通道的空间特征提取
    输入: 单个EEG矩阵 (1, 9, 9) 或 (batch_size, 1, 9, 9)
    输出: 提取的空间特征
    网络结构:
    1. 深度卷积(5x5) + 32个逐点卷积(1x1)
    2. 32个深度卷积(5x5) + 64个逐点卷积(1x1)  
    3. 64个深度卷积(5x5) + 128个逐点卷积(1x1)
    每层都包含: Padding -> 卷积 -> BatchNorm -> ELU激活
    """
    def __init__(self, input_channels=1):
        """
        初始化 Spatial convNet-A 模块
        Args:
            input_channels (int): 输入通道数，默认为1（单个EEG矩阵）
        """
        super(SpatialConvNetA, self).__init__()
        
        # 第一个深度可分离卷积块
        # 1个深度卷积 + 32个逐点卷积
        self.depthwise_conv1 = nn.Conv2d(
            in_channels=input_channels, 
            out_channels=input_channels, 
            kernel_size=5, 
            padding=2,  # same padding
            groups=input_channels  # 深度卷积
        )
        self.pointwise_conv1 = nn.Conv2d(
            in_channels=input_channels, 
            out_channels=32, 
            kernel_size=1
        )
        self.bn1_depth = nn.BatchNorm2d(input_channels)
        self.bn1_point = nn.BatchNorm2d(32)
        
        # 第二个深度可分离卷积块
        # 32个深度卷积 + 64个逐点卷积
        self.depthwise_conv2 = nn.Conv2d(
            in_channels=32, 
            out_channels=32, 
            kernel_size=5, 
            padding=2,  # same padding
            groups=32  # 深度卷积
        )
        self.pointwise_conv2 = nn.Conv2d(
            in_channels=32, 
            out_channels=64, 
            kernel_size=1
        )
        self.bn2_depth = nn.BatchNorm2d(32)
        self.bn2_point = nn.BatchNorm2d(64)
        
        # 第三个深度可分离卷积块
        # 64个深度卷积 + 128个逐点卷积
        self.depthwise_conv3 = nn.Conv2d(
            in_channels=64, 
            out_channels=64, 
            kernel_size=5, 
            padding=2,  # same padding
            groups=64  # 深度卷积
        )
        self.pointwise_conv3 = nn.Conv2d(
            in_channels=64, 
            out_channels=128, 
            kernel_size=1
        )
        self.bn3_depth = nn.BatchNorm2d(64)
        self.bn3_point = nn.BatchNorm2d(128)
        
        # ELU激活函数
        self.elu = nn.ELU(inplace=True)
        
    def forward(self, x):
        """
        前向传播
        Args:
            x (torch.Tensor): 输入EEG矩阵
                形状: (batch_size, 1, 9, 9) 或 (batch_size, channels, 9, 9)  
        Returns:
            torch.Tensor: 提取的空间特征
                形状: (batch_size, 128, 9, 9)
        """
        # 第一个深度可分离卷积块
        # 1个深度卷积
        x = self.depthwise_conv1(x)  # (batch_size, 1, 9, 9)
        x = self.bn1_depth(x)
        x = self.elu(x)
        
        # 32个逐点卷积
        x = self.pointwise_conv1(x)  # (batch_size, 32, 9, 9)
        x = self.bn1_point(x)
        x = self.elu(x)
        
        # 第二个深度可分离卷积块
        # 32个深度卷积
        x = self.depthwise_conv2(x)  # (batch_size, 32, 9, 9)
        x = self.bn2_depth(x)
        x = self.elu(x)
        
        # 64个逐点卷积
        x = self.pointwise_conv2(x)  # (batch_size, 64, 9, 9)
        x = self.bn2_point(x)
        x = self.elu(x)
        
        # 第三个深度可分离卷积块
        # 64个深度卷积
        x = self.depthwise_conv3(x)  # (batch_size, 64, 9, 9)
        x = self.bn3_depth(x)
        x = self.elu(x)
        
        # 128个逐点卷积
        x = self.pointwise_conv3(x)  # (batch_size, 128, 9, 9)
        x = self.bn3_point(x)
        x = self.elu(x)
        
        return x
    
    def get_output_shape(self, input_shape):
        """
        获取输出形状
        Args:
            input_shape (tuple): 输入形状 (batch_size, channels, height, width)
        Returns:
            tuple: 输出形状
        """
        batch_size, _, height, width = input_shape
        return (batch_size, 128, height, width)

class TemporalConvNet(nn.Module):
    """
    时序卷积网络模块
    对空间特征进行时序建模：
    1. 32个 16×1152 一维卷积 (低时间步)
    2. 64个 32×32 一维卷积 (中时间步)  
    3. 128个 64×64 一维卷积 + 平均池化 (高时间步)
    """
    def __init__(self, input_features=1152):
        """
        初始化时序卷积网络
        Args:
            input_features (int): 输入特征维度 (128*9*9=10368 flattened)
        """
        super(TemporalConvNet, self).__init__()
        
        # 第一层：32个 16×1152 一维卷积
        self.conv1d_1 = nn.Conv1d(
            in_channels=input_features,
            out_channels=32,
            kernel_size=16,
            padding=7  # 保持序列长度
        )
        self.bn1 = nn.BatchNorm1d(32)
        
        # 第二层：64个 32×32 一维卷积
        self.conv1d_2 = nn.Conv1d(
            in_channels=32,
            out_channels=64,
            kernel_size=32,
            padding=15  # 保持序列长度
        )
        self.bn2 = nn.BatchNorm1d(64)
        
        # 第三层：128个 64×64 一维卷积
        self.conv1d_3 = nn.Conv1d(
            in_channels=64,
            out_channels=128,
            kernel_size=64,
            padding=31  # 保持序列长度
        )
        self.bn3 = nn.BatchNorm1d(128)
        
        # 平均池化
        self.avg_pool = nn.AdaptiveAvgPool1d(1)
        
        # 激活函数
        self.elu = nn.ELU(inplace=True)
        
    def forward(self, x):
        """
        前向传播
        Args:
            x (torch.Tensor): 输入特征
                形状: (batch_size, features, sequence_length)      
        Returns:
            torch.Tensor: 时序特征
                形状: (batch_size, 128, 1)
        """
        # 第一层
        x = self.conv1d_1(x)
        x = self.bn1(x)
        x = self.elu(x)
        
        # 第二层
        x = self.conv1d_2(x)
        x = self.bn2(x)
        x = self.elu(x)
        
        # 第三层
        x = self.conv1d_3(x)
        x = self.bn3(x)
        x = self.elu(x)
        
        # 平均池化
        x = self.avg_pool(x)
        
        return x

class ASTM(nn.Module):
    """
    ASTM (Adaptive Spatio-Temporal Model) 完整模型
    处理流程：
    1. 128个Spatial convNet-A模块并行处理每秒的EEG矩阵
    2. Flatten和Concat成二维特征矩阵
    3. 3层一维卷积提取时序特征
    """
    def __init__(self):
        super(ASTM, self).__init__()
        # 128个Spatial convNet-A模块
        self.spatial_modules = nn.ModuleList([
            SpatialConvNetA() for _ in range(128)
        ])
        # 时序卷积网络
        self.temporal_net = TemporalConvNet(input_features=10368)  # 128*9*9
        
    def forward(self, x):
        """
        前向传播
        Args:
            x (torch.Tensor): 输入EEG数据
                形状: (batch_size, 128, 9, 9) - 1秒的128个EEG矩阵    
        Returns:
            torch.Tensor: 最终特征
                形状: (batch_size, 128)
        """
        batch_size, n_matrices, height, width = x.shape
        
        # 并行处理128个EEG矩阵
        spatial_features = []
        for i in range(n_matrices):
            # 每个矩阵单独处理
            matrix = x[:, i:i+1, :, :]  # (batch_size, 1, 9, 9)
            spatial_feat = self.spatial_modules[i](matrix)  # (batch_size, 128, 9, 9)
            spatial_features.append(spatial_feat)
        
        # 拼接所有空间特征
        spatial_concat = torch.cat(spatial_features, dim=1)  # (batch_size, 128*128, 9, 9)
        
        # Flatten成二维特征矩阵
        spatial_flattened = spatial_concat.view(batch_size, -1, n_matrices)  # (batch_size, 10368, 128)
        
        # 时序特征提取
        temporal_features = self.temporal_net(spatial_flattened)  # (batch_size, 128, 1)
        
        # 压缩最后一维
        output = temporal_features.squeeze(-1)  # (batch_size, 128)
        
        return output

def test_astm_model():
    """测试ASTM完整模型"""
    print("测试ASTM完整模型")
    print("-" * 40)
    
    # 创建模型
    model = ASTM()
    model.eval()
    
    # 打印架构
    model.print_architecture()
    
    # 创建测试数据 (1秒的128个EEG矩阵)
    batch_size = 4
    test_input = torch.randn(batch_size, 128, 9, 9)
    
    # 前向传播
    with torch.no_grad():
        output = model(test_input)
    
    return model

def demo_with_deap_format():
    """演示与DEAP数据格式的集成"""
    print("\n" + "=" * 60)
    print("与DEAP数据格式集成演示")
    print("=" * 60)
    
    # 创建模型
    model = ASTM()
    model.eval()
    
    # 模拟DEAP数据: (40试次, 60秒, 128时间点, 9x9矩阵)
    print("模拟DEAP数据处理:")
    print("原始数据形状: (40试次, 60秒, 128时间点, 9x9矩阵)")
    
    # 处理单秒数据
    batch_size = 8
    second_data = torch.randn(batch_size, 128, 9, 9)  # 8个试次的1秒数据

    with torch.no_grad():
        features = model(second_data)
     
    # 处理多秒数据
    print(f"\n处理多秒数据:")
    n_seconds = 5
    multi_second_data = torch.randn(batch_size, n_seconds, 128, 9, 9)
    
    print(f"多秒输入形状: {multi_second_data.shape}")
    
    # 逐秒处理
    all_features = []
    for second in range(n_seconds):
        second_input = multi_second_data[:, second, :, :, :]  # (batch_size, 128, 9, 9)
        with torch.no_grad():
            second_features = model(second_input)
        all_features.append(second_features)
    
    # 拼接所有秒的特征
    final_features = torch.stack(all_features, dim=1)  # (batch_size, n_seconds, 128)
    

if __name__ == "__main__":
    # 测试完整模型
    model = test_astm_model()
    
    # 演示与DEAP数据集成
    demo_with_deap_format()
