import torch
import torch.nn as nn
import torch.nn.functional as F
from einops.layers.torch import Rearrange
from torch import Tensor

class CNNFeatureExtractor(nn.Module):
    def __init__(self, emb_size=32):
        super().__init__()
        self.emb_size = emb_size
        
        # 时间维度卷积
        self.time_conv = nn.Sequential(
            nn.Conv2d(1, 32, (1, 25), (1, 1)),
            nn.BatchNorm2d(32),
        )
        
        # 空间卷积层会在forward中动态创建
        self.spatial_conv = None
        
        # 后续处理层
        self.post_process = nn.Sequential(
            nn.BatchNorm2d(64),
            nn.ELU(),
            nn.AvgPool2d((1, 45), (1, 15)),
            nn.Dropout(0.4),
            nn.Conv2d(64, emb_size, (1, 1), stride=(1, 1)),
            nn.BatchNorm2d(emb_size),
            nn.ELU(),
            Rearrange('b e (h) (w) -> b (h w) e')
        )

    def forward(self, x):
        batch_size, _, num_electrodes, time_samples = x.shape
        
        try:
            # 应用时间维度卷积
            x = self.time_conv(x)  # [batch, 32, num_electrodes, time]
            
            # 动态创建或更新空间卷积层
            if self.spatial_conv is None or self.spatial_conv[0].weight.size(2) != num_electrodes:
                self.spatial_conv = nn.Sequential(
                    nn.Conv2d(32, 64, (num_electrodes, 1), (1, 1)),
                    nn.BatchNorm2d(64),
                    nn.ELU()
                ).to(x.device)
                
            # 应用空间卷积
            x = self.spatial_conv(x)  # [batch, 64, 1, time]
            
            # 应用后续处理
            features = self.post_process(x)  # [batch, seq_len, emb_size]
            
            return features
            
        except Exception as e:
            print(f"CNNFeatureExtractor forward传播错误: {str(e)}")
            print(f"输入x形状: {x.shape}")
            
            # 如果出错，返回一个合理的默认张量
            # 使用AdaptiveAvgPool2d处理完后的序列长度大约是time_samples/15
            seq_len = max(1, time_samples // 15)
            return torch.zeros(batch_size, seq_len, self.emb_size, device=x.device)

class AttentionLayer(nn.Module):
    def __init__(self, hidden_size):
        super(AttentionLayer, self).__init__()
        self.attention = nn.Sequential(
            nn.Linear(hidden_size, hidden_size),
            nn.Tanh(),
            nn.Linear(hidden_size, 1)
        )
        
    def forward(self, lstm_output):
        attention_weights = F.softmax(self.attention(lstm_output), dim=1)
        context_vector = torch.sum(attention_weights * lstm_output, dim=1)
        return context_vector

class LSTMBlock(nn.Module):
    def __init__(self, input_dim, hidden_size, num_layers, dropout):
        super().__init__()
        self.lstm = nn.LSTM(
            input_dim, 
            hidden_size, 
            num_layers, 
            batch_first=True, 
            dropout=dropout,
            bidirectional=True
        )
        self.attention = AttentionLayer(hidden_size * 2)
        
    def forward(self, x):
        batch_size, seq_len, input_dim = x.shape
        
        try:
            # 对于非常长的序列，可能需要截断
            if seq_len > 500:  # 如果序列太长，会导致LSTM内存不足
                x = x[:, :500, :]  # 只使用前500个时间点
                
            lstm_out, _ = self.lstm(x)
            attention_out = self.attention(lstm_out)
            return attention_out
            
        except Exception as e:
            print(f"LSTMBlock forward传播错误: {str(e)}")
            print(f"输入x形状: {x.shape}")
            
            # 返回合理的默认输出
            hidden_size = self.lstm.hidden_size
            return torch.zeros(batch_size, hidden_size * 2, device=x.device)

class CNNLSTM(nn.Module):
    def __init__(self, emb_size=32, hidden_size=128, num_layers=2, dropout=0.5, n_classes=2, device=None):
        super().__init__()
        self.feature_extractor = CNNFeatureExtractor(emb_size=emb_size)
        self.device = device or torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.lstm_block = LSTMBlock(emb_size, hidden_size, num_layers, dropout)
        self.n_classes = n_classes
        self.hidden_size = hidden_size
        
        self.classifier = nn.Sequential(
            nn.Linear(hidden_size * 2, hidden_size),
            nn.BatchNorm1d(hidden_size),
            nn.ReLU(),
            nn.Dropout(0.3),
            nn.Linear(hidden_size, n_classes)
        )
        
    def forward(self, x, return_features=False, apply_activation=True):
        batch_size = x.size(0)
        
        try:
            # 特征提取
            features = self.feature_extractor(x)
            
            # LSTM处理
            lstm_output = self.lstm_block(features)
            
            if return_features:
                return lstm_output
                
            # 分类得到logits
            logits = self.classifier(lstm_output)
            
            # 如果不需要激活函数（用于CrossEntropyLoss），直接返回logits
            if not apply_activation:
                return logits
            
            # 根据分类类型返回结果
            if self.n_classes == 1 or logits.shape[1] == 1:
                return torch.sigmoid(logits)
            else:
                return F.softmax(logits, dim=1)
                
        except Exception as e:
            print(f"CNNLSTM forward传播错误: {str(e)}")
            print(f"输入x形状: {x.shape}")
            
            # 返回合理的默认输出
            if return_features:
                return torch.zeros(batch_size, self.hidden_size * 2, device=self.device)
            elif not apply_activation:
                # 返回原始logits
                if self.n_classes == 1 or self.n_classes == 2:
                    return torch.zeros(batch_size, 1, device=self.device)
                else:
                    return torch.zeros(batch_size, self.n_classes, device=self.device)
            elif self.n_classes == 1 or self.n_classes == 2:
                return torch.zeros(batch_size, 1, device=self.device)
            else:
                return torch.ones(batch_size, self.n_classes, device=self.device) / self.n_classes
