"""
Model components for DAT-SNet
Including Inception modules, SE-Block, and attention mechanisms
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np


class SBLEBranch(nn.Module):
    """
    Statistical Behavior of Local Extrema (SBLE) branch for capturing temporal patterns
    """

    def __init__(self, in_channels, out_channels):
        super(SBLEBranch, self).__init__()

        # 修正：更新输入通道为in_channels*2，因为extrema_map有两倍的通道数
        self.sble_features = nn.Sequential(
            nn.Conv1d(in_channels * 2, out_channels, kernel_size=1),
            nn.BatchNorm1d(out_channels),
            nn.ReLU(inplace=True)
        )

        # Adaptive detection thresholds
        self.threshold = nn.Parameter(torch.tensor(0.05))

    def detect_local_extrema(self, x):
        # Flatten spatial dimensions for computation
        batch_size, channels, seq_len = x.size()

        # Compute differences to find local extrema
        diff_prev = x[:, :, 1:] - x[:, :, :-1]  # Difference with previous point
        # Ensure consistent size by padding
        diff_prev = F.pad(diff_prev, (0, 1), mode='reflect')

        diff_next = x[:, :, :-1] - x[:, :, 1:]  # Difference with next point
        # Ensure consistent size by padding
        diff_next = F.pad(diff_next, (1, 0), mode='reflect')

        # Find local maxima and minima
        local_max = (diff_prev > self.threshold) & (diff_next > self.threshold)
        local_min = (diff_prev < -self.threshold) & (diff_next < -self.threshold)

        # Combine local extrema information
        extrema_map = torch.stack([local_max.float(), local_min.float()], dim=1)
        extrema_map = extrema_map.view(batch_size, channels * 2, seq_len)

        return extrema_map

    def forward(self, x):
        extrema_map = self.detect_local_extrema(x)
        # 确保extrema_map的时间维度与输入相同
        if extrema_map.size(-1) != x.size(-1):
            # 裁剪或填充以匹配
            if extrema_map.size(-1) > x.size(-1):
                extrema_map = extrema_map[..., :x.size(-1)]
            else:
                # 如果需要填充(极少数情况)
                pad_size = x.size(-1) - extrema_map.size(-1)
                extrema_map = F.pad(extrema_map, (0, pad_size), 'replicate')

        sble_feats = self.sble_features(extrema_map)
        return sble_feats


class EEMDBranch(nn.Module):
    """
    Simplified EEMD (Ensemble Empirical Mode Decomposition) branch
    Using convolutions to approximate IMF extraction
    """

    def __init__(self, in_channels, out_channels, num_imfs=4):
        super(EEMDBranch, self).__init__()

        self.imf_extractors = nn.ModuleList()
        # Different kernel sizes to approximate different IMFs
        kernel_sizes = [5, 9, 17, 33]  # Increasing sizes for lower frequency components

        for i in range(min(num_imfs, len(kernel_sizes))):
            # 确保每个提取器输出的通道数相加等于out_channels
            self.imf_extractors.append(nn.Sequential(
                nn.Conv1d(in_channels, out_channels // num_imfs,
                          kernel_size=kernel_sizes[i],
                          padding=kernel_sizes[i] // 2),
                nn.BatchNorm1d(out_channels // num_imfs),
                nn.ReLU(inplace=True)
            ))

    def forward(self, x):
        if not self.imf_extractors:
            return torch.zeros(x.size(0), 0, x.size(2), device=x.device)

        # 提取IMF特征
        imf_features = [extractor(x) for extractor in self.imf_extractors]

        # 确保所有IMF特征具有相同的时间维度
        min_size = min([imf.size(-1) for imf in imf_features])
        imf_features = [imf[..., :min_size] for imf in imf_features]

        return torch.cat(imf_features, dim=1)


class SelfAttention1D(nn.Module):
    """
    Self-attention module for 1D temporal data
    """

    def __init__(self, in_channels, reduction=8):
        super(SelfAttention1D, self).__init__()

        # 使用整数除法确保通道数是整数
        reduced_channels = max(1, in_channels // reduction)

        self.query = nn.Conv1d(in_channels, reduced_channels, kernel_size=1)
        self.key = nn.Conv1d(in_channels, reduced_channels, kernel_size=1)
        self.value = nn.Conv1d(in_channels, in_channels, kernel_size=1)

        self.gamma = nn.Parameter(torch.zeros(1))
        self.softmax = nn.Softmax(dim=-1)

    def forward(self, x):
        batch_size, C, length = x.size()

        # [B, C', L]
        proj_query = self.query(x)
        # [B, C', L]
        proj_key = self.key(x)
        # [B, C, L]
        proj_value = self.value(x)

        # 转置为[B, L, C']以便进行矩阵乘法
        proj_query = proj_query.permute(0, 2, 1)

        # 注意力矩阵 [B, L, L]
        energy = torch.bmm(proj_query, proj_key)

        # 添加安全检查，确保不会出现NaN
        if torch.isnan(energy).any():
            attention = torch.ones_like(energy) / length
        else:
            attention = self.softmax(energy / (C ** 0.5))  # 添加缩放因子以提高稳定性

        # [B, C, L]
        out = torch.bmm(proj_value, attention.permute(0, 2, 1))

        # 残差连接
        out = self.gamma * out + x

        return out


class InceptionBlock(nn.Module):
    """
    Enhanced Inception module for temporal feature extraction
    Incorporates multi-scale convolutions, residual connections, and attention
    """

    def __init__(self, in_channels, kernel_sizes=[10, 20, 40, 80],
                 out_channels_per_kernel=16, use_residual=True,
                 use_attention=True, use_sble=True, use_eemd=True):
        super(InceptionBlock, self).__init__()

        self.use_residual = use_residual
        self.use_attention = use_attention
        self.use_sble = use_sble
        self.use_eemd = use_eemd

        # 标准inception分支
        self.branches = nn.ModuleList()
        for k_size in kernel_sizes:
            branch = nn.Sequential(
                nn.Conv1d(in_channels, out_channels_per_kernel,
                          kernel_size=k_size, padding=k_size // 2),
                nn.BatchNorm1d(out_channels_per_kernel),
                nn.ReLU(inplace=True)
            )
            self.branches.append(branch)

        # SBLE分支
        if use_sble:
            self.sble_branch = SBLEBranch(in_channels, out_channels_per_kernel)

        # EEMD分支
        if use_eemd:
            self.eemd_branch = EEMDBranch(in_channels, out_channels_per_kernel)

        # 计算总输出通道
        standard_branches = len(kernel_sizes) * out_channels_per_kernel
        self.total_out_channels = standard_branches
        if use_sble:
            self.total_out_channels += out_channels_per_kernel
        if use_eemd:
            self.total_out_channels += out_channels_per_kernel

        # 残差连接
        if use_residual:
            self.residual_conv = None
            if in_channels != self.total_out_channels:
                self.residual_conv = nn.Conv1d(in_channels, self.total_out_channels, kernel_size=1)

        # 自注意力模块
        if use_attention:
            self.attention = SelfAttention1D(self.total_out_channels)

        # 瓶颈层
        self.bottleneck = nn.Sequential(
            nn.Conv1d(self.total_out_channels, self.total_out_channels // 2, kernel_size=1),
            nn.BatchNorm1d(self.total_out_channels // 2),
            nn.ReLU(inplace=True),
            nn.Conv1d(self.total_out_channels // 2, self.total_out_channels, kernel_size=1),
            nn.BatchNorm1d(self.total_out_channels),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        # 标准inception分支
        branch_outputs = [branch(x) for branch in self.branches]

        # 添加SBLE特征
        if self.use_sble:
            sble_output = self.sble_branch(x)
            branch_outputs.append(sble_output)

        # 添加EEMD特征
        if self.use_eemd:
            eemd_output = self.eemd_branch(x)
            branch_outputs.append(eemd_output)

        # 确保所有分支输出具有相同的时间维度（最后一个维度）
        # 找到所有输出中最小的时间维度
        min_size = min([b.size(-1) for b in branch_outputs])

        # 裁剪所有输出到相同的大小
        branch_outputs = [b[..., :min_size] for b in branch_outputs]

        # 连接所有分支输出
        if branch_outputs:
            output = torch.cat(branch_outputs, dim=1)
        else:
            # 如果没有分支启用，直接使用输入
            return x

        # 应用瓶颈层
        output = self.bottleneck(output)

        # 如果使用残差连接，确保残差的时间维度也匹配
        if self.use_residual:
            if self.residual_conv is not None:
                residual = self.residual_conv(x)
                # 确保残差的时间维度匹配
                residual = residual[..., :output.size(-1)]
            else:
                # 如果直接使用x作为残差，确保它的时间维度匹配
                residual = x[..., :output.size(-1)]
            output = output + residual

        # 应用自注意力
        if self.use_attention:
            output = self.attention(output)

        return output


class TemporalBranch(nn.Module):
    """
    Enhanced temporal branch for extracting features from the time domain
    """

    def __init__(self, input_channels=1,
                 kernel_sizes=[5, 15, 30, 75, 150],  # Optimized for sleep EEG rhythms
                 filters_per_kernel=16,
                 dropout=0.5,
                 use_residual=True,
                 use_attention=True,
                 use_sble=True,
                 use_eemd=True):
        super(TemporalBranch, self).__init__()

        # First convolutional layer with reduced stride for better feature retention
        self.conv1 = nn.Sequential(
            nn.Conv1d(input_channels, 16, kernel_size=50, stride=4, padding=25),
            nn.BatchNorm1d(16),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=4, stride=2, padding=2)
        )

        # Enhanced Inception module
        self.inception = InceptionBlock(
            in_channels=16,
            kernel_sizes=kernel_sizes,
            out_channels_per_kernel=filters_per_kernel,
            use_residual=use_residual,
            use_attention=use_attention,
            use_sble=use_sble,
            use_eemd=use_eemd
        )

        # Calculate total filters
        standard_branches = filters_per_kernel * len(kernel_sizes)
        total_filters = standard_branches
        if use_sble:
            total_filters += filters_per_kernel
        if use_eemd:
            total_filters += filters_per_kernel

        # Output convolutional layer with skip connection
        self.conv_out = nn.Sequential(
            nn.Conv1d(total_filters, total_filters, kernel_size=8, stride=1, padding=4),
            nn.BatchNorm1d(total_filters),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=4, stride=2, padding=2),
            nn.Dropout(dropout)
        )

        # Final attention and feature refinement
        self.final_attention = SelfAttention1D(total_filters) if use_attention else nn.Identity()

    def forward(self, x):
        # 检查是否需要转置
        if x.size(1) > x.size(2):  # 如果输入是[batch, length, channels]
            x = x.transpose(1, 2)  # 转置为[batch, channels, length]

        # 第一个卷积层
        x_conv1 = self.conv1(x)

        # Inception模块
        x_inception = self.inception(x_conv1)

        # 确保输出卷积的时间维度与Inception模块的输出匹配
        if x_inception.size(-1) != x_conv1.size(-1):
            # 裁剪或调整大小
            x_inception = x_inception[..., :x_conv1.size(-1)]

        # 输出卷积层
        x_out = self.conv_out(x_inception)

        # 最终注意力机制
        x_final = self.final_attention(x_out)

        return x_final


class PhaseAmplitudeCouplingModule(nn.Module):
    """
    Extracts phase-amplitude coupling features between different frequency bands
    """

    def __init__(self, in_channels, reduction_ratio=8):
        super(PhaseAmplitudeCouplingModule, self).__init__()

        # Reduced dimension for computational efficiency
        self.reduced_dim = max(1, in_channels // reduction_ratio)

        # Phase extraction path for low frequencies (delta, theta)
        self.phase_path = nn.Sequential(
            nn.Conv2d(in_channels, self.reduced_dim, kernel_size=1),
            nn.BatchNorm2d(self.reduced_dim),
            nn.Tanh()  # Tanh for circular data like phase
        )

        # Amplitude extraction path for higher frequencies (alpha, beta, gamma)
        self.amplitude_path = nn.Sequential(
            nn.Conv2d(in_channels, self.reduced_dim, kernel_size=1),
            nn.BatchNorm2d(self.reduced_dim),
            nn.ReLU(inplace=True)
        )

        # Coupling module
        self.coupling = nn.Sequential(
            nn.Conv2d(self.reduced_dim * 2, in_channels, kernel_size=1),
            nn.BatchNorm2d(in_channels),
            nn.Sigmoid()
        )

        # Channel attention (SE block) for focusing on relevant frequency bands
        self.se_block = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(in_channels, in_channels // reduction_ratio, 1),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels // reduction_ratio, in_channels, 1),
            nn.Sigmoid()
        )

    def forward(self, x):
        # Extract phase and amplitude information
        phase_info = self.phase_path(x)
        amplitude_info = self.amplitude_path(x)

        # Concatenate along channel dimension
        combined = torch.cat([phase_info, amplitude_info], dim=1)

        # Generate coupling mask
        coupling_mask = self.coupling(combined)

        # Apply mask to input
        enhanced = x * coupling_mask

        # Apply channel attention for frequency band importance
        channel_weights = self.se_block(enhanced)
        enhanced = enhanced * channel_weights

        # Residual connection to preserve original spectral information
        return enhanced + x


class SpectralBranch(nn.Module):
    """
    Enhanced spectral branch with cross-frequency coupling mechanism
    """

    def __init__(self, input_channels=32, output_channels=64, dropout=0.5):
        """
        Initialize the enhanced spectral branch

        Args:
            input_channels (int): Number of input channels (frequency subbands)
            output_channels (int): Number of output channels
            dropout (float): Dropout probability
        """
        super(SpectralBranch, self).__init__()

        # First CNN layer for spectral features
        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 16, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2)),
            nn.BatchNorm2d(16),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))
        )

        # PAC module after first convolution to enhance cross-frequency interactions
        self.pac_module1 = PhaseAmplitudeCouplingModule(16)

        # Second CNN layer
        self.conv2 = nn.Sequential(
            nn.Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))
        )

        # Second PAC module
        self.pac_module2 = PhaseAmplitudeCouplingModule(32)

        # Final CNN layer
        self.conv3 = nn.Sequential(
            nn.Conv2d(32, output_channels, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            nn.BatchNorm2d(output_channels),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)),
            nn.Dropout(dropout)
        )

    def forward(self, x):
        """
        Forward pass through the enhanced spectral branch

        Args:
            x (torch.Tensor): Input tensor of shape (batch_size, 1, frequency_bins, time_frames)

        Returns:
            torch.Tensor: Enhanced spectral features
        """
        # First CNN layer with PAC enhancement
        x = self.conv1(x)
        x = self.pac_module1(x)

        # Second CNN layer with PAC enhancement
        x = self.conv2(x)
        x = self.pac_module2(x)

        # Final CNN layer
        x = self.conv3(x)

        # Reshape: (batch_size, channels, freq, time) -> (batch_size, channels, time)
        batch_size, channels, freq, time = x.size()
        x = x.view(batch_size, channels * freq, time)

        return x


class SEBlock(nn.Module):
    """
    Squeeze-and-Excitation Block for channel attention
    """

    def __init__(self, channels, reduction=16):
        """
        Initialize the SE-Block

        Args:
            channels (int): Number of input channels
            reduction (int): Reduction ratio for the bottleneck
        """
        super(SEBlock, self).__init__()

        self.squeeze = nn.AdaptiveAvgPool1d(1)
        self.excitation = nn.Sequential(
            nn.Linear(channels, channels // reduction),
            nn.ReLU(inplace=True),
            nn.Linear(channels // reduction, channels),
            nn.Sigmoid()
        )

    def forward(self, x):
        """
        Forward pass through the SE-Block

        Args:
            x (torch.Tensor): Input tensor of shape (batch_size, channels, length)

        Returns:
            torch.Tensor: Output tensor with recalibrated channel weights
        """
        batch_size, channels, _ = x.size()

        # Squeeze
        y = self.squeeze(x).view(batch_size, channels)

        # Excitation
        y = self.excitation(y).view(batch_size, channels, 1)

        # Scale
        x = x * y

        return x


class TemporalAttention(nn.Module):
    """
    Temporal attention mechanism for sequence learning
    """

    def __init__(self, hidden_size, attention_size=None):
        """
        Initialize the temporal attention mechanism

        Args:
            hidden_size (int): Size of hidden state
            attention_size (int, optional): Size of attention layer
        """
        super(TemporalAttention, self).__init__()

        if attention_size is None:
            attention_size = hidden_size

        self.attention = nn.Sequential(
            nn.Linear(hidden_size, attention_size),
            nn.Tanh(),
            nn.Linear(attention_size, 1)
        )

    def forward(self, hidden_states):
        """
        Forward pass through the temporal attention mechanism

        Args:
            hidden_states (torch.Tensor): Hidden states from RNN
                Shape: (batch_size, sequence_length, hidden_size)

        Returns:
            tuple: (context_vector, attention_weights)
                context_vector: Shape (batch_size, hidden_size)
                attention_weights: Shape (batch_size, sequence_length)
        """
        # hidden_states: (batch_size, sequence_length, hidden_size)

        # Calculate attention scores
        energy = self.attention(hidden_states)  # (batch_size, sequence_length, 1)
        energy = energy.squeeze(2)  # (batch_size, sequence_length)

        # Apply softmax to get attention weights
        attention_weights = F.softmax(energy, dim=1)  # (batch_size, sequence_length)

        # Apply attention weights to sum hidden states
        context_vector = torch.bmm(
            attention_weights.unsqueeze(1),  # (batch_size, 1, sequence_length)
            hidden_states  # (batch_size, sequence_length, hidden_size)
        )  # (batch_size, 1, hidden_size)

        context_vector = context_vector.squeeze(1)  # (batch_size, hidden_size)

        return context_vector, attention_weights


class BiGRUWithAttention(nn.Module):
    """
    Bidirectional GRU with attention mechanism for sequence learning
    """

    def __init__(self, input_size, hidden_size, num_layers=1, dropout=0.0, attention_size=None):
        """
        Initialize the BiGRU with attention

        Args:
            input_size (int): Size of input features
            hidden_size (int): Size of hidden state
            num_layers (int): Number of GRU layers
            dropout (float): Dropout probability
            attention_size (int, optional): Size of attention layer
        """
        super(BiGRUWithAttention, self).__init__()

        self.hidden_size = hidden_size
        self.num_layers = num_layers

        # Use a more efficient packed sequence approach
        self.bigru = nn.GRU(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=num_layers,
            batch_first=True,
            bidirectional=True,
            dropout=dropout if num_layers > 1 else 0
        )

        # Optimize attention with a more efficient implementation
        self.attention = TemporalAttention(hidden_size * 2, attention_size)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x, hidden=None):
        """
        Forward pass through the BiGRU with attention

        Args:
            x (torch.Tensor): Input tensor of shape (batch_size, sequence_length, input_size)
            hidden (torch.Tensor, optional): Initial hidden state

        Returns:
            tuple: (output, hidden, attention_weights)
                output: Context vector after attention, shape (batch_size, hidden_size*2)
                hidden: Final hidden state
                attention_weights: Attention weights
        """
        batch_size = x.size(0)

        # Apply GRU and attention more efficiently
        output, hidden = self.bigru(x, hidden)
        context, attention_weights = self.attention(output)
        context = self.dropout(context)

        return context, hidden, attention_weights