import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv, GATConv, global_mean_pool, global_add_pool
import numpy as np
import math


class GraphAttentionLayer(nn.Module):
    """
    Enhanced Graph Attention layer with multi-head attention
    """
    def __init__(self, in_features, out_features, n_heads=8, dropout=0.5, alpha=0.2, residual=True):
        super(GraphAttentionLayer, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.n_heads = n_heads
        self.dropout = dropout
        self.alpha = alpha
        self.residual = residual
        
        # Multi-head linear projections
        self.W = nn.Parameter(torch.Tensor(n_heads, in_features, out_features // n_heads))
        self.a = nn.Parameter(torch.Tensor(n_heads, 2 * (out_features // n_heads)))
        
        # Layer normalization per head
        self.layer_norms = nn.ModuleList([
            nn.LayerNorm(out_features // n_heads) for _ in range(n_heads)
        ])
        
        # Initialize parameters
        nn.init.xavier_uniform_(self.W.data)
        nn.init.xavier_uniform_(self.a.data)
        
        # Learnable residual connection
        if residual and in_features != out_features:
            self.residual_proj = nn.Linear(in_features, out_features)
        else:
            self.residual_proj = nn.Identity()
        
        # Activation and dropout
        self.leakyrelu = nn.LeakyReLU(self.alpha)
        self.dropout_layer = nn.Dropout(dropout)
        
    def forward(self, x, adj):
        # x: [batch_size, num_nodes, in_features]
        # adj: [batch_size, num_nodes, num_nodes]
        batch_size, num_nodes = x.size(0), x.size(1)
        
        # Initialize output tensor for all heads
        all_head_outputs = []
        
        for head in range(self.n_heads):
            # Linear transformation for this head
            Wh = torch.matmul(x, self.W[head])  # [batch, nodes, out_features//n_heads]
            
            # Prepare for attention coefficient calculation
            a_input = torch.cat([
                Wh.repeat(1, 1, num_nodes).view(batch_size, num_nodes, num_nodes, -1),
                Wh.repeat(1, num_nodes, 1).view(batch_size, num_nodes, num_nodes, -1)
            ], dim=-1)  # [batch, nodes, nodes, 2*(out_features//n_heads)]
            
            # Compute attention coefficients
            e = self.leakyrelu(torch.matmul(
                a_input.view(batch_size, num_nodes*num_nodes, -1),
                self.a[head].view(-1, 1)
            ).view(batch_size, num_nodes, num_nodes))
            
            # Mask attention coefficients using adjacency
            zero_vec = -9e15 * torch.ones_like(e)
            attention = torch.where(adj > 0, e, zero_vec)
            attention = F.softmax(attention, dim=2)
            attention = self.dropout_layer(attention)
            
            # Apply attention to get node features
            head_output = torch.bmm(attention, Wh)  # [batch, nodes, out_features//n_heads]
            
            # Apply layer normalization
            head_output = self.layer_norms[head](head_output)
            
            all_head_outputs.append(head_output)
        
        # Concatenate all head outputs
        h_prime = torch.cat(all_head_outputs, dim=2)  # [batch, nodes, out_features]
        
        # Apply residual connection if enabled
        if self.residual:
            h_prime = h_prime + self.residual_proj(x)
        
        return h_prime


class ChannelAttention(nn.Module):
    """
    Channel attention module to emphasize important feature channels
    """
    def __init__(self, in_channels, reduction_ratio=8):
        super(ChannelAttention, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)
        
        # Ensure reduction_ratio doesn't reduce channels to zero
        reduced_channels = max(1, in_channels // reduction_ratio)
        
        # Shared MLP for both pooling branches
        self.mlp = nn.Sequential(
            nn.Conv2d(in_channels, reduced_channels, 1, bias=False),
            nn.ReLU(inplace=True),
            nn.Conv2d(reduced_channels, in_channels, 1, bias=False)
        )
        
        # Initialize weights
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
        
        self.sigmoid = nn.Sigmoid()
        
    def forward(self, x):
        avg_out = self.mlp(self.avg_pool(x))
        max_out = self.mlp(self.max_pool(x))
        out = avg_out + max_out
        return self.sigmoid(out) * x


class SpatialAttention(nn.Module):
    """
    Spatial attention module to focus on important spatial regions
    """
    def __init__(self, kernel_size=7):
        super(SpatialAttention, self).__init__()
        padding = kernel_size // 2
        self.conv = nn.Conv2d(2, 1, kernel_size=kernel_size, padding=padding)
        self.sigmoid = nn.Sigmoid()
        
        # Initialize weights
        nn.init.xavier_uniform_(self.conv.weight, gain=1.414)
        if self.conv.bias is not None:
            nn.init.zeros_(self.conv.bias)
        
    def forward(self, x):
        # Generate spatial attention map
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        attention_input = torch.cat([avg_out, max_out], dim=1)
        attention_map = self.sigmoid(self.conv(attention_input))
        
        # Apply attention
        return x * attention_map


class SpatioTemporalBlock(nn.Module):
    """
    Combined spatial and temporal processing block for EEG signals
    """
    def __init__(self, in_features, out_features, n_heads=8, dropout=0.3):
        super(SpatioTemporalBlock, self).__init__()
        
        # Spatial attention with graph attention
        self.spatial_attention = GraphAttentionLayer(
            in_features, out_features, n_heads=n_heads, dropout=dropout
        )
        
        # Temporal convolution with multi-scale processing
        self.temporal_conv = nn.Sequential(
            nn.Conv1d(out_features, out_features, kernel_size=3, padding=1),
            nn.BatchNorm1d(out_features),
            nn.GELU(),
            nn.Conv1d(out_features, out_features, kernel_size=5, padding=2),
            nn.BatchNorm1d(out_features),
            nn.GELU(),
            nn.Dropout(dropout)
        )
        
        # Feature gate for adaptive feature selection
        self.feature_gate = nn.Sequential(
            nn.Linear(out_features, out_features),
            nn.Sigmoid()
        )
        
        # Skip connection
        self.skip_connection = nn.Sequential(
            nn.Linear(in_features, out_features) if in_features != out_features else nn.Identity(),
            nn.LayerNorm(out_features)
        )
        
    def forward(self, x, adj):
        # x: [batch, nodes, features]
        # adj: [batch, nodes, nodes]
        
        # Skip connection
        residual = self.skip_connection(x)
        
        # Apply spatial attention
        x = self.spatial_attention(x, adj)
        
        # Apply temporal convolution
        x_t = x.transpose(1, 2)  # [batch, features, nodes]
        x_t = self.temporal_conv(x_t)
        x = x_t.transpose(1, 2)  # [batch, nodes, features]
        
        # Apply feature gating
        gate = self.feature_gate(x)
        x = x * gate
        
        # Add residual connection
        x = x + residual
        
        return x


class PositionalEncoding(nn.Module):
    """
    Positional encoding for capturing electrode positions
    """
    def __init__(self, d_model, max_len=1000):
        super(PositionalEncoding, self).__init__()
        
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
        
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        
        self.register_buffer('pe', pe.unsqueeze(0))
        
    def forward(self, x):
        # x: [batch, seq_len, features]
        return x + self.pe[:, :x.size(1), :]


class EEGGNN(nn.Module):
    """
    Enhanced Graph Neural Network for EEG-based depression recognition
    """
    def __init__(self, 
                 input_channels=1,
                 num_electrodes=22,
                 output_dim=2,
                 hidden_channels=64, 
                 num_time_points=1000,
                 num_layers=4,
                 dropout=0.2,  # 降低dropout以增强训练准确率
                 device=None):
        super(EEGGNN, self).__init__()
        self.device = device or torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.hidden_channels = hidden_channels
        self.num_electrodes = num_electrodes
        self.num_classes = output_dim
        
        # 分支1：捕获细粒度时间模式
        self.feature_branch1 = nn.Sequential(
            nn.Conv2d(input_channels, 32, kernel_size=(1, 15), stride=(1, 1), padding=(0, 7)),
            nn.BatchNorm2d(32),
            nn.GELU(),
            nn.AvgPool2d(kernel_size=(1, 4), stride=(1, 4)),
            ChannelAttention(32),
            nn.Conv2d(32, 64, kernel_size=(1, 9), stride=(1, 1), padding=(0, 4)),
            nn.BatchNorm2d(64),
            nn.GELU(),
            nn.AvgPool2d(kernel_size=(1, 2), stride=(1, 2)),
        )
        
        # 分支2：捕获更广泛的时间模式
        self.feature_branch2 = nn.Sequential(
            nn.Conv2d(input_channels, 32, kernel_size=(1, 31), stride=(1, 1), padding=(0, 15)),
            nn.BatchNorm2d(32),
            nn.GELU(),
            nn.AvgPool2d(kernel_size=(1, 8), stride=(1, 8)),
            ChannelAttention(32),
        )
        
        # 特征融合 - 两个分支的通道总和
        combined_channels = 64 + 32
        
        self.feature_fusion = nn.Sequential(
            nn.Conv2d(combined_channels, hidden_channels, kernel_size=(num_electrodes, 1), stride=(1, 1)),
            nn.BatchNorm2d(hidden_channels),
            nn.GELU(),
            nn.Dropout(dropout * 0.5)
        )
        
        # 电极位置编码
        self.pos_encoder = PositionalEncoding(hidden_channels, max_len=num_electrodes)
        
        # 可学习邻接矩阵
        self.adjacency = nn.Parameter(torch.randn(num_electrodes, num_electrodes))
        self.adj_scaling = nn.Parameter(torch.ones(1))
        
        # 距离先验
        distance_prior = torch.zeros(num_electrodes, num_electrodes)
        for i in range(num_electrodes):
            for j in range(num_electrodes):
                distance_prior[i, j] = 1.0 / (1.0 + 0.8 * abs(i - j))
        self.register_buffer('distance_prior', distance_prior)
        
        # 时空处理块
        self.st_blocks = nn.ModuleList()
        for i in range(num_layers):
            self.st_blocks.append(
                SpatioTemporalBlock(
                    hidden_channels, 
                    hidden_channels,
                    n_heads=8 if i < num_layers-1 else 4,
                    dropout=dropout * (1.0 - 0.1*i)
                )
            )
        
        # 注意力读取
        self.readout_attention = nn.Sequential(
            nn.Linear(hidden_channels, hidden_channels // 2),
            nn.GELU(),
            nn.Linear(hidden_channels // 2, 1),
        )
        
        # 简化频域特征处理
        self.freq_features = nn.Sequential(
            nn.Linear(hidden_channels, hidden_channels // 2),
            nn.GELU(),
            nn.Linear(hidden_channels // 2, hidden_channels // 4),
            nn.GELU(),
        )
        
        # 特征组合
        combined_features_size = hidden_channels + hidden_channels // 4
        
        # 分类器
        self.classifier = nn.Sequential(
            nn.Linear(combined_features_size, hidden_channels * 2),
            nn.LayerNorm(hidden_channels * 2),
            nn.GELU(),
            nn.Dropout(dropout * 0.5),
            nn.Linear(hidden_channels * 2, hidden_channels),
            nn.LayerNorm(hidden_channels),
            nn.GELU(),
            nn.Dropout(dropout * 0.3),
            nn.Linear(hidden_channels, output_dim)
        )
    
    def get_adjacency(self, batch_size):
        # 获取归一化邻接矩阵
        adj = torch.sigmoid(self.adjacency) + self.adj_scaling * self.distance_prior
        
        # 添加自环
        adj = adj + torch.eye(self.num_electrodes, device=adj.device)
        
        # 使对称
        adj = 0.5 * (adj + adj.transpose(0, 1))
        
        # 行归一化
        d_inv_sqrt = torch.pow(adj.sum(dim=-1), -0.5)
        d_inv_sqrt[torch.isinf(d_inv_sqrt)] = 0.
        d_mat_inv_sqrt = torch.diag(d_inv_sqrt)
        adj_normalized = torch.mm(torch.mm(d_mat_inv_sqrt, adj), d_mat_inv_sqrt)
        
        # 扩展到批次维度
        return adj_normalized.unsqueeze(0).expand(batch_size, -1, -1)
    
    def forward(self, x, return_features=False, apply_activation=True):
        # 输入形状: [batch, channels=1, electrodes, time]
        batch_size = x.size(0)
        
        # 特征提取 - 只使用两个分支
        x1 = self.feature_branch1(x)  # [batch, 64, 1, time/8]
        x2 = self.feature_branch2(x)  # [batch, 32, 1, time/8]
        
        # 连接特征
        x_cat = torch.cat([x1, x2], dim=1)  # [batch, 96, 1, time/8]
        
        # 应用融合
        x = self.feature_fusion(x_cat)  # [batch, hidden, 1, time/8]
        
        # 重塑为时间序列
        x = x.squeeze(2).transpose(1, 2)  # [batch, time/8, hidden]
        
        # 计算频域特征
        x_freq = torch.fft.rfft(x, dim=1)
        x_freq = torch.abs(x_freq)  # 获取幅度谱
        x_freq = torch.mean(x_freq, dim=1)  # 沿时间维度平均
        x_freq = self.freq_features(x_freq)  # [batch, hidden/4]
        
        # 重塑为节点特征
        x = x.reshape(batch_size, self.num_electrodes, -1)  # [batch, electrodes, hidden*time/electrodes]
        
        # 调整特征维度
        if x.size(2) > self.hidden_channels:
            projection = nn.Linear(x.size(2), self.hidden_channels, device=x.device)
            x = projection(x)  # [batch, electrodes, hidden]
        
        # 添加位置编码
        x = self.pos_encoder(x)
        
        # 获取邻接矩阵
        adj = self.get_adjacency(batch_size)
        
        # 应用时空块
        x_residual = x
        for i, block in enumerate(self.st_blocks):
            x = block(x, adj)
            # 每隔几层添加全局残差连接，提高梯度流
            if i > 0 and i % 2 == 0:
                x = x + x_residual
                x_residual = x
        
        # 注意力读取
        attention_scores = self.readout_attention(x).squeeze(-1)  # [batch, nodes]
        attention_weights = F.softmax(attention_scores, dim=1).unsqueeze(2)  # [batch, nodes, 1]
        x = (x * attention_weights).sum(dim=1)  # [batch, hidden]
        
        # 组合特征
        x_combined = torch.cat([x, x_freq], dim=1)  # [batch, hidden + hidden/4]
        
        # 返回特征如果需要
        if return_features:
            return x_combined
        
        # 分类
        logits = self.classifier(x_combined)
        
        # 应用激活
        if not apply_activation:
            return logits
        
        # 输出
        if self.num_classes == 1 or (self.num_classes == 2 and logits.shape[1] == 1):
            return torch.sigmoid(logits)
        else:
            return F.softmax(logits, dim=1) 