import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np

class SpatialConvNetB(nn.Module):
    """
    Spatial convNet-B 模块
    使用3种空洞卷积进行空间特征提取：
    1. 16个空洞卷积 (3×3, dilation=2)
    2. 32个空洞卷积 (3×3, dilation=2)  
    3. 64个空洞卷积 (3×3, dilation=2)
    特点：
    - 使用瓶颈结构 (bottleneck)
    - 空洞卷积扩大感受野
    - Padding防止边缘信息丢失
    - 每层都有BatchNorm和ELU激活
    - 无池化操作
    """
    def __init__(self, input_channels=1):
        """
        初始化 Spatial convNet-B 模块
        Args:
            input_channels (int): 输入通道数，默认为1（单个EEG矩阵）
        """
        super(SpatialConvNetB, self).__init__()
        # 第一层：16个空洞卷积 (3×3, dilation=2)
        self.dilated_conv1 = nn.Conv2d(
            in_channels=input_channels,
            out_channels=16,
            kernel_size=3,
            dilation=2,
            padding=2  # 保持尺寸不变
        )
        self.bn1 = nn.BatchNorm2d(16)
        
        # 第二层：32个空洞卷积 (16×3×3, dilation=2)
        self.dilated_conv2 = nn.Conv2d(
            in_channels=16,
            out_channels=32,
            kernel_size=3,
            dilation=2,
            padding=2  # 保持尺寸不变
        )
        self.bn2 = nn.BatchNorm2d(32)
        
        # 第三层：64个空洞卷积 (32×3×3, dilation=2)
        self.dilated_conv3 = nn.Conv2d(
            in_channels=32,
            out_channels=64,
            kernel_size=3,
            dilation=2,
            padding=2  # 保持尺寸不变
        )
        self.bn3 = nn.BatchNorm2d(64)
        
        # 瓶颈结构：1×1卷积进行通道降维和升维
        self.bottleneck_down = nn.Conv2d(
            in_channels=input_channels,
            out_channels=8,
            kernel_size=1
        )
        self.bottleneck_up = nn.Conv2d(
            in_channels=64,
            out_channels=64,
            kernel_size=1
        )
        self.bn_bottleneck_down = nn.BatchNorm2d(8)
        self.bn_bottleneck_up = nn.BatchNorm2d(64)
        
        # ELU激活函数
        self.elu = nn.ELU(inplace=True)
        
    def forward(self, x):
        """
        前向传播
        Args:
            x (torch.Tensor): 输入EEG矩阵
                形状: (batch_size, 1, 9, 9)     
        Returns:
            torch.Tensor: 提取的空间特征
                形状: (batch_size, 64, 9, 9)
        """
        
        # 瓶颈结构：降维
        residual = self.bottleneck_down(x)
        residual = self.bn_bottleneck_down(residual)
        residual = self.elu(residual)
        
        # 第一层空洞卷积
        x = self.dilated_conv1(x)
        x = self.bn1(x)
        x = self.elu(x)
        
        # 第二层空洞卷积
        x = self.dilated_conv2(x)
        x = self.bn2(x)
        x = self.elu(x)
        
        # 第三层空洞卷积
        x = self.dilated_conv3(x)
        x = self.bn3(x)
        x = self.elu(x)
        
        # 瓶颈结构：升维
        x = self.bottleneck_up(x)
        x = self.bn_bottleneck_up(x)
        x = self.elu(x)

        return x

class DSTM(nn.Module):
    """
    DSTM (Dilated Spatio-Temporal Model) 完整模型
    处理流程：
    1. 128个Spatial convNet-B模块并行处理每秒的EEG矩阵
    2. Flatten和Concat成二维特征矩阵
    3. 3层一维卷积提取时序特征
    """
    def __init__(self):
        super(DSTM, self).__init__()
        
        # 128个Spatial convNet-B模块
        self.spatial_modules = nn.ModuleList([
            SpatialConvNetB() for _ in range(128)
        ])
        
        # 时序卷积网络 (输入通道数已修正为 64*9*9 = 5184)
        self.temporal_net = self._create_temporal_net()
        
    def _create_temporal_net(self):
        """创建时序卷积网络"""
        INPUT_CHANNELS = 64 * 9 * 9  # 5184
        
        return nn.Sequential(
            # 第一层：32个 16×5184 一维卷积
            nn.Conv1d(INPUT_CHANNELS, 32, kernel_size=16, padding=7),
            nn.BatchNorm1d(32),
            nn.ELU(inplace=True),
            
            # 第二层：64个 32×32 一维卷积
            nn.Conv1d(32, 64, kernel_size=32, padding=15),
            nn.BatchNorm1d(64),
            nn.ELU(inplace=True),
            
            # 第三层：128个 64×64 一维卷积
            nn.Conv1d(64, 128, kernel_size=64, padding=31),
            nn.BatchNorm1d(128),
            nn.ELU(inplace=True),
            
            # 平均池化
            nn.AdaptiveAvgPool1d(1)
        )
        
    def forward(self, x):
        """
        前向传播
        Args:
            x (torch.Tensor): 输入EEG数据
                形状: (batch_size, 128, 9, 9) - 1秒的128个EEG矩阵      
        Returns:
            torch.Tensor: 最终特征
                形状: (batch_size, 128)
        """
        batch_size, n_matrices, height, width = x.shape
        # 128个模块处理128个EEG矩阵 (每个模块处理1个矩阵)
        spatial_features = []
        
        print(f"\n--- 空间特征提取阶段 ---")
        for i in range(min(3, n_matrices)):  # 只检查前3个模块以节省输出
            # 每个模块处理1个EEG矩阵
            matrix = x[:, i:i+1, :, :]  # (batch_size, 1, 9, 9)
          
            # 通过Spatial convNet-B处理
            spatial_feat = self.spatial_modules[i](matrix)  # (batch_size, 64, 9, 9)
           
            # Flatten输出: 64*9*9 = 5184
            spatial_feat_flat = spatial_feat.view(batch_size, -1)  # (batch_size, 5184)
            spatial_features.append(spatial_feat_flat)
        
        # 处理剩余模块
        for i in range(3, n_matrices):
            matrix = x[:, i:i+1, :, :]
            spatial_feat = self.spatial_modules[i](matrix)
            spatial_feat_flat = spatial_feat.view(batch_size, -1)
            spatial_features.append(spatial_feat_flat)
              
        # 拼接128个模块的特征 (按时间维度拼接)
        spatial_concat = torch.stack(spatial_features, dim=2)  # (batch_size, 5184, 128)
        
        # 时序特征提取 (输入通道 5184, 时间步长 128)
        temporal_features = self.temporal_net(spatial_concat)  # (batch_size, 128, 1)
        
        # 压缩最后一维
        output = temporal_features.squeeze(-1)  # (batch_size, 128)
        print(f"最终输出形状: {output.shape}")
      
        return output

def demo_with_deap_format():
    
    # 创建模型
    model = DSTM()
    model.eval()
    
    batch_size = 8
    second_data = torch.randn(batch_size, 128, 9, 9)  # 8个试次的1秒数据
    
    with torch.no_grad():
        features = model(second_data)

    n_seconds = 5
    multi_second_data = torch.randn(batch_size, n_seconds, 128, 9, 9)
    
    # 逐秒处理
    all_features = []
    for second in range(n_seconds):
        second_input = multi_second_data[:, second, :, :, :]  # (batch_size, 128, 9, 9)
        with torch.no_grad():
            second_features = model(second_input)
        all_features.append(second_features)
    
    # 拼接所有秒的特征
    final_features = torch.stack(all_features, dim=1)  # (batch_size, n_seconds, 128)

if __name__ == "__main__":
    # 演示与DEAP数据集成
    demo_with_deap_format()
