import torch
import torch.nn as nn
import torch.nn.functional as F
import math


class DepthwiseConv2d(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, depth_multiplier=1, padding=0, bias=False):
        super(DepthwiseConv2d, self).__init__()
        self.depthwise = nn.Conv2d(in_channels, in_channels * depth_multiplier, kernel_size=kernel_size,
                                   padding=padding, groups=in_channels, bias=bias)
        self.pointwise = nn.Conv2d(in_channels * depth_multiplier, out_channels, kernel_size=1, bias=bias)
        
        # Initialize weights for better convergence
        nn.init.kaiming_normal_(self.depthwise.weight, mode='fan_out', nonlinearity='relu')
        nn.init.kaiming_normal_(self.pointwise.weight, mode='fan_out', nonlinearity='relu')

    def forward(self, x):
        x = self.depthwise(x)
        x = self.pointwise(x)
        return x


class SqueezeExciteBlock(nn.Module):
    """Squeeze-and-Excitation block for channel attention"""
    def __init__(self, channels, reduction=16):
        super(SqueezeExciteBlock, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(channels, channels // reduction, bias=False),
            nn.ReLU(inplace=True),
            nn.Linear(channels // reduction, channels, bias=False),
            nn.Sigmoid()
        )

    def forward(self, x):
        b, c, _, _ = x.shape
        y = self.avg_pool(x).view(b, c)
        y = self.fc(y).view(b, c, 1, 1)
        return x * y.expand_as(x)


class EEGNetPytorch(nn.Module):
    def __init__(self, nb_classes=2, Chans=None, Samples=None, dropoutRate=0.4, kernLength=64, F1=16, D=2, F2=32,
                 device=None):
        super(EEGNetPytorch, self).__init__()
        self.device = device or torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.nb_classes = nb_classes
        
        # 时间卷积块
        self.firstConv = nn.Sequential(
            nn.Conv2d(1, F1, (1, kernLength), padding=(0, kernLength // 2), bias=False),
            nn.BatchNorm2d(F1),
            nn.ELU(),  # Changed to ELU for better generalization
        )
        
        # 动态空间卷积块将在forward中创建
        self.depthwiseConv = None
        self.se1 = SqueezeExciteBlock(F1 * D)
        
        # 第一阶段池化和正则化
        self.depthwiseBN = nn.Sequential(
            nn.BatchNorm2d(F1 * D),
            nn.ELU(),
            nn.AvgPool2d((1, 4)),
            nn.Dropout(dropoutRate)
        )
        
        # 可分离卷积块
        self.separableConv = nn.Sequential(
            DepthwiseConv2d(F1 * D, F2, (1, 16), padding=(0, 8), bias=False),
            nn.BatchNorm2d(F2),
            nn.ELU(),
            nn.AvgPool2d((1, 8)),
            nn.Dropout(dropoutRate)
        )
        
        self.se2 = SqueezeExciteBlock(F2)
        
        # 这些将在forward中动态创建
        self.classifier = None
        self.dense_block = None
        
        # 保存参数
        self.F1 = F1
        self.D = D
        self.F2 = F2
        self.dropoutRate = dropoutRate
        
    def _create_dense_classifier(self, feature_size):
        """动态创建密集块和分类器"""
        hidden_size = min(256, feature_size)  # 限制隐藏层大小
        
        self.dense_block = nn.Sequential(
            nn.Linear(feature_size, hidden_size),
            nn.BatchNorm1d(hidden_size),
            nn.ELU(),
            nn.Dropout(self.dropoutRate)
        ).to(self.device)
        
        if self.nb_classes == 2:
            self.classifier = nn.Sequential(
                nn.Linear(hidden_size, 1),
                nn.Sigmoid()  # 二分类使用sigmoid
            ).to(self.device)
        else:
            self.classifier = nn.Sequential(
                nn.Linear(hidden_size, self.nb_classes),
                nn.LogSoftmax(dim=1)  # 多分类使用LogSoftmax
            ).to(self.device)

    def forward(self, x, return_features=False, apply_activation=True):
        try:
            batch_size, _, num_channels, time_samples = x.shape
            
            # 第一阶段：时间卷积
            x = self.firstConv(x)  # [batch, F1, num_channels, time]
            
            # 动态创建或更新深度卷积层
            if self.depthwiseConv is None or self.depthwiseConv.depthwise.weight.size(2) != num_channels:
                self.depthwiseConv = DepthwiseConv2d(
                    self.F1, self.F1 * self.D, (num_channels, 1), 
                    depth_multiplier=self.D, bias=False
                ).to(self.device)
            
            # 深度卷积与通道注意力
            x = self.depthwiseConv(x)  # [batch, F1*D, 1, time]
            x = self.se1(x)  # 应用通道注意力
            x = self.depthwiseBN(x)  # [batch, F1*D, 1, time/4]
            
            # 可分离卷积与通道注意力
            x = self.separableConv(x)  # [batch, F2, 1, time/32]
            x = self.se2(x)  # 应用通道注意力
            
            # 展平
            flat_features = x.view(batch_size, -1)  # [batch, F2 * (time/32)]
            
            # 动态创建密集块和分类器
            if self.dense_block is None or self.classifier is None:
                feature_size = flat_features.shape[1]
                self._create_dense_classifier(feature_size)
            
            # 应用密集块
            features = self.dense_block(flat_features)
            
            if return_features:
                return features
                
            # 应用分类器 - 得到logits
            logits = self.classifier[0](features)  # 仅应用线性层，不应用激活函数
            
            # 如果使用CrossEntropyLoss（不需要激活函数），则直接返回logits
            if not apply_activation:
                return logits
                
            # 应用激活函数
            outputs = self.classifier(features)
            
            return outputs
            
        except Exception as e:
            print(f"EEGNetPytorch forward传播错误: {str(e)}")
            print(f"输入x形状: {x.shape}")
            
            # 返回合理的默认输出
            if return_features:
                if self.dense_block is not None:
                    hidden_size = list(self.dense_block.children())[-3].out_features  # 获取Linear层的输出大小
                    return torch.zeros(batch_size, hidden_size, device=self.device)
                else:
                    return torch.zeros(batch_size, 256, device=self.device)  # 默认大小
            elif self.nb_classes == 2:
                return torch.zeros(batch_size, 1, device=self.device)
            else:
                return torch.ones(batch_size, self.nb_classes, device=self.device) / self.nb_classes
