import torch
import torch.nn as nn
import torch.nn.functional as F
from einops.layers.torch import Rearrange

class CNNFeatureExtractor(nn.Module):
    def __init__(self, emb_size=40):
        super().__init__()
        # 分离时间卷积和空间卷积，以便动态适应不同的电极数量
        self.emb_size = emb_size
        
        # 时间卷积层
        self.time_conv = nn.Sequential(
            nn.Conv2d(1, 32, (1, 25), (1, 1)),
            nn.BatchNorm2d(32),
            nn.LeakyReLU(0.1)
        )
        
        # 空间卷积将在forward中动态创建
        self.spatial_conv = None
        
        # 后处理层
        self.post_process = nn.Sequential(
            nn.BatchNorm2d(64),
            nn.LeakyReLU(0.1),
            nn.AvgPool2d((1, 45), (1, 15)),
            nn.Dropout(0.4),
            nn.Conv2d(64, emb_size, (1, 1), stride=(1, 1)),
            nn.BatchNorm2d(emb_size),
            nn.LeakyReLU(0.1),
            nn.AdaptiveAvgPool2d((1, 1)),
            Rearrange('b e (h) (w) -> b (e h w)')
        )

    def forward(self, x):
        batch_size, _, num_electrodes, time_samples = x.shape
        
        try:
            # 应用时间卷积
            x = self.time_conv(x)  # [batch, 32, num_electrodes, time]
            
            # 动态创建或更新空间卷积层
            if self.spatial_conv is None or self.spatial_conv[0].weight.size(2) != num_electrodes:
                self.spatial_conv = nn.Sequential(
                    nn.Conv2d(32, 64, (num_electrodes, 1), (1, 1)),
                    nn.BatchNorm2d(64),
                    nn.LeakyReLU(0.1)
                ).to(x.device)
            
            # 应用空间卷积
            x = self.spatial_conv(x)  # [batch, 64, 1, time]
            
            # 应用后处理
            features = self.post_process(x)  # [batch, emb_size]
            
            return features
            
        except Exception as e:
            print(f"CNNFeatureExtractor forward传播错误: {str(e)}")
            print(f"输入x形状: {x.shape}")
            if 'x' in locals():
                print(f"中间特征形状: {x.shape}")
                
            # 如果出错，返回默认大小的特征
            return torch.zeros(batch_size, self.emb_size, device=x.device)

class CNNSimple(nn.Module):
    def __init__(self, emb_size=40, n_classes=2, device=None):
        super().__init__()
        self.emb_size = emb_size
        self.n_classes = n_classes
        self.feature_extractor = CNNFeatureExtractor(emb_size=emb_size)
        self.dropout = nn.Dropout(0.3)
        
        self.classifier = nn.Sequential(
            nn.Linear(emb_size, 64),
            nn.BatchNorm1d(64),
            nn.LeakyReLU(0.1),
            nn.Dropout(0.3),
            nn.Linear(64, n_classes)
        )
        
        self.device = device or torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        
    def forward(self, x, return_features=False, apply_activation=True):
        try:
            batch_size = x.size(0)
            
            # 提取特征
            features = self.feature_extractor(x)
            features = self.dropout(features)
            
            if return_features:
                return features
                
            # 分类
            logits = self.classifier(features)
            
            # 如果不需要激活函数（用于CrossEntropyLoss），直接返回logits
            if not apply_activation:
                return logits
                
            # 输出结果 - 仅当需要激活函数时应用
            if self.n_classes == 1 or logits.shape[1] == 1:
                return torch.sigmoid(logits)
            else:
                return F.softmax(logits, dim=1)
                
        except Exception as e:
            print(f"CNNSimple forward传播错误: {str(e)}")
            print(f"输入x形状: {x.shape}")
            
            # 返回默认值
            batch_size = x.size(0)
            if return_features:
                return torch.zeros(batch_size, self.emb_size, device=self.device)
            elif self.n_classes == 1 or self.n_classes == 2 and not return_features:
                return torch.zeros(batch_size, 1, device=self.device)
            else:
                return torch.ones(batch_size, self.n_classes, device=self.device) / self.n_classes

