import torch
import torch.nn as nn
import torch.nn.functional as F
from einops.layers.torch import Rearrange
from einops import rearrange
import math

class MixedAttentionLayer(nn.Module):
    """
    Combined spatial, temporal, and cross-domain attention mechanism for EEG signals
    """
    def __init__(self, in_features, out_features, n_heads=8, dropout=0.4):
        super(MixedAttentionLayer, self).__init__()
        self.n_heads = n_heads
        self.head_dim = out_features // n_heads
        
        # Multi-head projections
        self.query = nn.Linear(in_features, out_features)
        self.key = nn.Linear(in_features, out_features)
        self.value = nn.Linear(in_features, out_features)
        
        # Learnable attention scaling factor
        self.scale = nn.Parameter(torch.ones(1) * (self.head_dim ** -0.5))
        
        # Learnable domain interactions
        self.domain_interactions = nn.Parameter(torch.ones(2, 2))  # [spatial, temporal] x [spatial, temporal]
        
        # Dimension-mixing projections
        self.spatial_temporal_mix = nn.Linear(in_features, out_features)
        self.temporal_spatial_mix = nn.Linear(in_features, out_features)
        
        # Output projection
        self.output_projection = nn.Linear(out_features, out_features)
        
        # Layer normalization
        self.layer_norm1 = nn.LayerNorm(out_features)
        self.layer_norm2 = nn.LayerNorm(out_features)
        
        # Dropout
        self.dropout = nn.Dropout(dropout)
        
    def forward(self, x, adjacency=None):
        # x: [batch, nodes, features]
        batch_size, nodes, _ = x.size()
        
        # Compute queries, keys, values
        q = self.query(x).view(batch_size, nodes, self.n_heads, self.head_dim).transpose(1, 2)  # [batch, heads, nodes, head_dim]
        k = self.key(x).view(batch_size, nodes, self.n_heads, self.head_dim).transpose(1, 2)    # [batch, heads, nodes, head_dim]
        v = self.value(x).view(batch_size, nodes, self.n_heads, self.head_dim).transpose(1, 2)  # [batch, heads, nodes, head_dim]
        
        # Compute attention scores with numerical stability
        attn_scores = torch.matmul(q, k.transpose(-2, -1)) * self.scale  # [batch, heads, nodes, nodes]
        
        # Apply adjacency mask if provided
        if adjacency is not None:
            # Expand adjacency to match attention dimensions
            adj_mask = adjacency.unsqueeze(1).expand(-1, self.n_heads, -1, -1)  # [batch, heads, nodes, nodes]
            
            # Apply mask - set very negative value for non-connected pairs (but not too extreme)
            attn_scores = attn_scores.masked_fill(adj_mask == 0, -1e4)
            
        # 在softmax前，使用clamp限制极端值，防止数值不稳定
        attn_scores = torch.clamp(attn_scores, min=-10.0, max=10.0)
            
        # Normalized attention weights with numerical stability
        attn_weights = F.softmax(attn_scores, dim=-1)  # [batch, heads, nodes, nodes]
        attn_weights = self.dropout(attn_weights)
        
        # Apply attention to values
        attn_output = torch.matmul(attn_weights, v)  # [batch, heads, nodes, head_dim]
        
        # Reshape and process
        attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, nodes, -1)  # [batch, nodes, features]
        attn_output = self.output_projection(attn_output)
        
        # Apply first residual connection and normalization
        x = self.layer_norm1(x + attn_output)
        
        # Apply domain mixing
        domain_mix = torch.softmax(self.domain_interactions, dim=-1)
        
        # Create spatial-temporal and temporal-spatial mixes
        spatial_temporal = self.spatial_temporal_mix(x)
        temporal_spatial = self.temporal_spatial_mix(x)
        
        # Weighted combination of domain interactions
        mixed_output = (
            domain_mix[0, 0] * x + 
            domain_mix[0, 1] * spatial_temporal +
            domain_mix[1, 0] * temporal_spatial + 
            domain_mix[1, 1] * (spatial_temporal + temporal_spatial) / 2
        )
        
        # Final layer norm
        output = self.layer_norm2(x + self.dropout(mixed_output))
        
        return output


class DynamicGraphConvolution(nn.Module):
    """
    Adaptive graph convolution with dynamic edge weighting
    """
    def __init__(self, in_features, out_features, dropout=0.2):
        super(DynamicGraphConvolution, self).__init__()
        self.weight = nn.Parameter(torch.Tensor(in_features, out_features))
        self.bias = nn.Parameter(torch.Tensor(out_features))
        
        # 简化边重要性计算，避免动态计算可能导致的不稳定性
        self.edge_importance = nn.Sequential(
            nn.Linear(in_features * 2, 32),
            nn.ReLU(),  # 替换GELU为更稳定的ReLU
            nn.BatchNorm1d(32),  # 添加批归一化
            nn.Linear(32, 1),
            nn.Sigmoid()
        )
        self.dropout = nn.Dropout(dropout)
        self.reset_parameters()
        
    def reset_parameters(self):
        # 使用更保守的初始化
        nn.init.xavier_normal_(self.weight, gain=0.5)
        nn.init.constant_(self.bias, 0)
    
    def forward(self, x, adj):
        # x: [batch, nodes, features]
        # adj: [batch, nodes, nodes]
        batch_size, num_nodes, in_features = x.size()
        
        # 检查输入是否包含NaN或Inf
        if torch.isnan(x).any() or torch.isinf(x).any():
            x = torch.where(torch.isnan(x) | torch.isinf(x), torch.zeros_like(x), x)
        
        # Initial node features transformation
        # 更稳定的矩阵乘法，使用fp32精度
        support = torch.matmul(x.float(), self.weight.float()).type_as(x)  # [batch, nodes, out_features]
        
        # 简化边重要性计算，避免双循环
        edge_weights = torch.ones_like(adj)  # 使用固定权重代替动态计算
        
        # 为了简化和避免不稳定性，我们可以将其修改为使用固定的权重
        # 然后只考虑邻接矩阵提供的拓扑结构
        weighted_adj = adj
        
        # 添加自连接以确保稳定性
        eye = torch.eye(num_nodes, device=adj.device).unsqueeze(0).expand(batch_size, -1, -1)
        weighted_adj = weighted_adj + eye
        
        # 对邻接矩阵进行行归一化而不是复杂的归一化
        row_sum = weighted_adj.sum(dim=-1, keepdim=True) + 1e-6
        normalized_adj = weighted_adj / row_sum
        
        # 确保归一化后没有NaN或Inf
        if torch.isnan(normalized_adj).any() or torch.isinf(normalized_adj).any():
            normalized_adj = torch.where(torch.isnan(normalized_adj) | torch.isinf(normalized_adj),
                                          torch.zeros_like(normalized_adj), normalized_adj)
        
        # 图卷积操作
        output = torch.bmm(normalized_adj, support)  # [batch, nodes, out_features]
        
        # 添加偏置并应用激活函数
        output = output + self.bias
        output = F.relu(output)  # 使用ReLU代替GELU以提高稳定性
        output = self.dropout(output)
        
        # 再次检查输出是否包含NaN
        if torch.isnan(output).any() or torch.isinf(output).any():
            output = torch.where(torch.isnan(output) | torch.isinf(output),
                                  torch.zeros_like(output), output)
        
        return output


class TemporalFeatureExtraction(nn.Module):
    """
    Advanced temporal feature extraction with dilated convolutions and attention
    """
    def __init__(self, in_channels, out_channels, kernel_size=3, dilation_rates=[1, 2, 4, 8], dropout=0.3):
        super(TemporalFeatureExtraction, self).__init__()
        
        self.conv_blocks = nn.ModuleList()
        
        # Multi-scale dilated convolutions
        for dilation in dilation_rates:
            padding = (kernel_size - 1) * dilation // 2
            
            block = nn.Sequential(
                nn.Conv1d(in_channels, out_channels, kernel_size, padding=padding, dilation=dilation),
                nn.BatchNorm1d(out_channels),
                nn.GELU(),
                nn.Dropout(dropout)
            )
            
            self.conv_blocks.append(block)
        
        # Attention-based feature fusion
        self.attention_weights = nn.Parameter(torch.ones(len(dilation_rates), 1, 1))
        
        # Output projection
        self.output_projection = nn.Conv1d(out_channels, out_channels, 1)
        self.layer_norm = nn.GroupNorm(8, out_channels)  # Group norm as an alternative to LayerNorm for 1D
        
    def forward(self, x):
        # x: [batch, channels, time]
        
        # Apply each dilated convolution block
        outputs = []
        for block in self.conv_blocks:
            outputs.append(block(x))
        
        # Apply attention weights
        attention_weights = F.softmax(self.attention_weights, dim=0)
        
        # Weighted sum of multi-scale features
        weighted_sum = torch.zeros_like(outputs[0])
        for i, output in enumerate(outputs):
            weighted_sum += attention_weights[i] * output
        
        # Final projection and normalization
        result = self.output_projection(weighted_sum)
        result = self.layer_norm(result)
        
        return result


class AdaptiveFeatureIntegration(nn.Module):
    """
    Adaptively integrates features across spatial and temporal domains
    """
    def __init__(self, feature_dim, num_domains=2, dropout=0.3):
        super(AdaptiveFeatureIntegration, self).__init__()
        
        # 修改domain_attention以接受concatenated features
        self.domain_attention = nn.Sequential(
            nn.Linear(feature_dim * num_domains, feature_dim),
            nn.GELU(),
            nn.Linear(feature_dim, num_domains),
            nn.Softmax(dim=-1)
        )
        
        self.feature_transform = nn.ModuleList([
            nn.Sequential(
                nn.Linear(feature_dim, feature_dim),
                nn.LayerNorm(feature_dim),
                nn.GELU(),
                nn.Dropout(dropout)
            ) for _ in range(num_domains)
        ])
        
        self.output_projection = nn.Sequential(
            nn.Linear(feature_dim, feature_dim),
            nn.LayerNorm(feature_dim)
        )
        
    def forward(self, features_list):
        # features_list: list of tensors [batch, nodes, features]
        batch_size = features_list[0].size(0) 
        
        # Concatenate features for attention computation
        # 避免使用stack可能导致的维度不匹配
        concat_features = []
        for feature in features_list:
            # 添加数值稳定性检查
            if torch.isnan(feature).any() or torch.isinf(feature).any():
                # 如果检测到NaN或Inf，用0替换
                feature = torch.where(torch.isnan(feature) | torch.isinf(feature), 
                                       torch.zeros_like(feature), feature)
            
            concat_features.append(torch.mean(feature, dim=1))  # [batch, features]
        concat_features = torch.cat(concat_features, dim=-1)  # [batch, features*num_domains]
        
        # 确保concat_features的batch维度正确
        concat_features = concat_features[:batch_size]
        
        # 添加数值稳定性检查
        if torch.isnan(concat_features).any() or torch.isinf(concat_features).any():
            concat_features = torch.where(torch.isnan(concat_features) | torch.isinf(concat_features),
                                           torch.zeros_like(concat_features), concat_features)
        
        # Compute domain attention weights
        domain_weights = self.domain_attention(concat_features)  # [batch, domains]
        
        # Transform each domain's features
        transformed_features = []
        for i, features in enumerate(features_list):
            # 确保每个特征的batch维度一致
            if features.size(0) != batch_size:
                features = features[:batch_size]
            transformed_features.append(self.feature_transform[i](features))
        
        # Weighted combination of transformed features
        integrated_features = torch.zeros_like(transformed_features[0])
        for i, features in enumerate(transformed_features):
            # 使用reshape代替view以处理可能不连续的张量
            weight = domain_weights[:, i].reshape(-1, 1, 1)  # [batch, 1, 1]
            integrated_features += weight * features
        
        # 添加数值稳定性检查
        if torch.isnan(integrated_features).any() or torch.isinf(integrated_features).any():
            integrated_features = torch.where(torch.isnan(integrated_features) | torch.isinf(integrated_features),
                                               torch.zeros_like(integrated_features), integrated_features)
        
        # Final projection
        output = self.output_projection(integrated_features)
        
        return output


class EEGMixedAttentionModel(nn.Module):
    """
    Advanced model combining graph neural networks and mixed attention mechanisms
    for EEG-based depression recognition with multi-domain feature integration
    """
    def __init__(self, 
                 input_channels=1,
                 num_electrodes=22,
                 num_time_points=1000,
                 output_dim=2,
                 hidden_channels=128,
                 num_layers=6,
                 num_heads=16,
                 dropout=0.3,
                 graph_type='learned',
                 device=None):
        super(EEGMixedAttentionModel, self).__init__()
        self.device = device or torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.num_electrodes = num_electrodes
        self.num_time_points = num_time_points
        self.num_classes = output_dim
        self.hidden_channels = hidden_channels
        
        # 使用BN来稳定训练
        self.input_bn = nn.BatchNorm2d(input_channels)
        
        # Feature extraction CNN
        self.feature_extraction = nn.Sequential(
            self.input_bn,
            nn.Conv2d(input_channels, 64, (1, 25), padding=(0, 12)),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.AvgPool2d((1, 4)),
            nn.Conv2d(64, 128, (1, 15), padding=(0, 7)),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.AvgPool2d((1, 2)),
            nn.Conv2d(128, 256, (1, 5), padding=(0, 2)),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.AvgPool2d((1, 2)),
            nn.Conv2d(256, hidden_channels, (num_electrodes, 1)),
            nn.BatchNorm2d(hidden_channels),
            nn.ReLU(),
            nn.Dropout(dropout),
            Rearrange('b c 1 t -> b c t')  # Reshape to [batch, channels, time]
        )
        
        # Calculate time dimension after feature extraction
        time_after_pool1 = (num_time_points + 2*12 - 25 + 1) // 4
        self.time_dimension = (time_after_pool1 + 2*7 - 15 + 1) // 2
        
        # Temporal feature extraction
        self.temporal_processor = TemporalFeatureExtraction(
            hidden_channels, hidden_channels, kernel_size=3, 
            dilation_rates=[1, 2, 4, 8], dropout=dropout
        )
        
        # Electrode positional encoding
        self.electrode_pos_encoding = nn.Parameter(torch.randn(1, num_electrodes, hidden_channels))
        
        # Learnable adjacency matrix
        if graph_type == 'learned':
            self.adjacency = nn.Parameter(torch.randn(num_electrodes, num_electrodes))
            # Initialize with distance-based connectivity
            with torch.no_grad():
                for i in range(num_electrodes):
                    for j in range(num_electrodes):
                        self.adjacency[i, j] = 1.0 / (1.0 + abs(i - j))
        else:
            # Fixed adjacency based on electrode proximity
            adj = torch.zeros(num_electrodes, num_electrodes)
            for i in range(num_electrodes):
                for j in range(num_electrodes):
                    if abs(i - j) <= 2:  # Connect nearby electrodes
                        adj[i, j] = 1.0
            self.register_buffer('adjacency', adj)
        
        # Mixed attention layers
        self.mixed_attention_layers = nn.ModuleList()
        for _ in range(num_layers):
            self.mixed_attention_layers.append(
                MixedAttentionLayer(hidden_channels, hidden_channels, n_heads=num_heads, dropout=dropout)
            )
        
        # Dynamic graph convolution layers
        self.graph_conv_layers = nn.ModuleList()
        for _ in range(num_layers):
            self.graph_conv_layers.append(
                DynamicGraphConvolution(hidden_channels, hidden_channels, dropout=dropout)
            )
        
        # Adaptive feature integration
        self.feature_integration = AdaptiveFeatureIntegration(
            hidden_channels, num_domains=2, dropout=dropout
        )
        
        # Global attention pooling
        self.global_attention = nn.Sequential(
            nn.Linear(hidden_channels, hidden_channels // 4),
            nn.GELU(),
            nn.Linear(hidden_channels // 4, 1)
        )
        
        # Advanced classifier with skip connections
        self.classifier = nn.Sequential(
            nn.Linear(hidden_channels, hidden_channels * 4),
            nn.LayerNorm(hidden_channels * 4),
            nn.GELU(),
            nn.Dropout(dropout * 1.5),
            nn.Linear(hidden_channels * 4, hidden_channels * 2),
            nn.LayerNorm(hidden_channels * 2),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_channels * 2, hidden_channels),
            nn.LayerNorm(hidden_channels),
            nn.GELU(),
            nn.Dropout(dropout * 0.5),
            nn.Linear(hidden_channels, output_dim)
        )
        
        # Initialize parameters
        self.apply(self._init_weights)
        
    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            nn.init.xavier_uniform_(m.weight)
            if m.bias is not None:
                nn.init.zeros_(m.bias)
        elif isinstance(m, nn.Conv2d):
            nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            if m.bias is not None:
                nn.init.zeros_(m.bias)
    
    def get_adjacency(self, batch_size):
        """Get normalized adjacency matrix with learnable weights"""
        if hasattr(self, 'adjacency'):
            # Learned adjacency
            adj = torch.sigmoid(self.adjacency)  # Scale to [0,1]
            
            # Add self-loops
            adj = adj + torch.eye(self.num_electrodes, device=adj.device)
            
            # Make symmetric
            adj = 0.5 * (adj + adj.transpose(0, 1))
            
            # Normalize
            d_inv_sqrt = torch.pow(adj.sum(dim=-1) + 1e-6, -0.5)  # 添加小的epsilon防止除零
            d_inv_sqrt[torch.isinf(d_inv_sqrt)] = 0.
            d_mat_inv_sqrt = torch.diag(d_inv_sqrt)
            adj_normalized = torch.mm(torch.mm(d_mat_inv_sqrt, adj), d_mat_inv_sqrt)
        else:
            # Predefined adjacency
            adj_normalized = self.adjacency
            
        # Expand for batch
        return adj_normalized.unsqueeze(0).expand(batch_size, -1, -1)
    
    def forward(self, x, return_features=False, apply_activation=True):
        # 输入稳定性检查
        if torch.isnan(x).any() or torch.isinf(x).any():
            x = torch.where(torch.isnan(x) | torch.isinf(x), torch.zeros_like(x), x)
        
        # 确保输入在合理范围内
        x = torch.clamp(x, min=-100.0, max=100.0)
        
        # Input shape: [batch, channels=1, electrodes, time]
        batch_size = x.size(0)
        
        # Feature extraction
        x = self.feature_extraction(x)  # [batch, channels, time]
        
        # 检查并纠正特征提取后的输出
        if torch.isnan(x).any() or torch.isinf(x).any():
            x = torch.where(torch.isnan(x) | torch.isinf(x), torch.zeros_like(x), x)
        
        # Apply temporal processing
        temporal_features = self.temporal_processor(x)  # [batch, channels, time]
        
        # 检查并纠正时间处理后的输出
        if torch.isnan(temporal_features).any() or torch.isinf(temporal_features).any():
            temporal_features = torch.where(torch.isnan(temporal_features) | torch.isinf(temporal_features),
                                           torch.zeros_like(temporal_features), temporal_features)
        
        # 确保时间特征在合理范围内
        temporal_features = torch.clamp(temporal_features, min=-100.0, max=100.0)
        
        # Reshape for electrode processing
        # First reshape to [batch, time, channels] then to [batch, electrodes, channels*time//electrodes]
        x_t = temporal_features.transpose(1, 2)  # [batch, time, channels]
        
        # Use adaptive pooling to reshape time dimension to match number of electrodes
        electrode_features = F.adaptive_avg_pool1d(
            x_t.transpose(1, 2),  # [batch, channels, time]
            self.num_electrodes
        ).transpose(1, 2)  # [batch, electrodes, channels]
        
        # 检查并纠正电极特征
        if torch.isnan(electrode_features).any() or torch.isinf(electrode_features).any():
            electrode_features = torch.where(torch.isnan(electrode_features) | torch.isinf(electrode_features),
                                            torch.zeros_like(electrode_features), electrode_features)
        
        # Add positional encoding for electrodes
        electrode_features = electrode_features + self.electrode_pos_encoding
        
        # Get adjacency matrix
        adj = self.get_adjacency(batch_size)
        
        # Apply graph and attention processing in parallel branches
        attention_features = electrode_features
        graph_features = electrode_features
        
        # Process through mixed attention layers
        for layer in self.mixed_attention_layers:
            attention_features = layer(attention_features, adj)
            # 检查并纠正注意力特征
            if torch.isnan(attention_features).any() or torch.isinf(attention_features).any():
                attention_features = torch.where(torch.isnan(attention_features) | torch.isinf(attention_features),
                                                torch.zeros_like(attention_features), attention_features)
        
        # Process through graph convolution layers
        for layer in self.graph_conv_layers:
            graph_features = layer(graph_features, adj)
            # 检查并纠正图特征
            if torch.isnan(graph_features).any() or torch.isinf(graph_features).any():
                graph_features = torch.where(torch.isnan(graph_features) | torch.isinf(graph_features),
                                            torch.zeros_like(graph_features), graph_features)
        
        # Integrate features from both branches
        integrated_features = self.feature_integration([attention_features, graph_features])
        
        # 检查并纠正集成特征
        if torch.isnan(integrated_features).any() or torch.isinf(integrated_features).any():
            integrated_features = torch.where(torch.isnan(integrated_features) | torch.isinf(integrated_features),
                                            torch.zeros_like(integrated_features), integrated_features)
        
        # Global attention pooling
        attention_scores = self.global_attention(integrated_features)  # [batch, electrodes, 1]
        
        # 检查并纠正注意力分数
        if torch.isnan(attention_scores).any() or torch.isinf(attention_scores).any():
            attention_scores = torch.where(torch.isnan(attention_scores) | torch.isinf(attention_scores),
                                          torch.zeros_like(attention_scores), attention_scores)
        
        # 使用加1e-10防止softmax输入全为零的情况
        attention_weights = F.softmax(attention_scores + 1e-10, dim=1)
        pooled_features = torch.sum(integrated_features * attention_weights, dim=1)  # [batch, channels]
        
        # 检查并纠正池化特征
        if torch.isnan(pooled_features).any() or torch.isinf(pooled_features).any():
            pooled_features = torch.where(torch.isnan(pooled_features) | torch.isinf(pooled_features),
                                         torch.zeros_like(pooled_features), pooled_features)
        
        # Return features if needed
        if return_features:
            return pooled_features
        
        # Classification
        logits = self.classifier(pooled_features)
        
        # 检查并纠正logits
        if torch.isnan(logits).any() or torch.isinf(logits).any():
            logits = torch.where(torch.isnan(logits) | torch.isinf(logits),
                                torch.zeros_like(logits), logits)
        
        # Apply activation based on the task
        if not apply_activation:
            return logits
        
        # Output based on classification type
        if self.num_classes == 1 or (self.num_classes == 2 and logits.shape[1] == 1):
            return torch.sigmoid(logits)
        else:
            return F.softmax(logits, dim=1) 