"""
DAT-SNet: Dual-Attention Temporal-Spectral Network for sleep stage classification
"""
import torch
import torch.nn as nn
import torch.nn.functional as F

from models.modules import TemporalBranch, SpectralBranch, SEBlock, BiGRUWithAttention
from models.wavelet import TimeFrequencyRepresentation


class FeatureExtractionModule(nn.Module):
    """
    Feature extraction module of DAT-SNet
    Integrates temporal and spectral branches with SE-Blocks
    """

    def __init__(self, config):
        """
        Initialize the feature extraction module

        Args:
            config: Model configuration
        """
        super(FeatureExtractionModule, self).__init__()

        # Temporal branch
        self.temporal_branch = TemporalBranch(
            input_channels=1,
            kernel_sizes=config.temporal_kernel_sizes,
            filters_per_kernel=config.temporal_filters,
            dropout=config.dropout,
            use_residual=config.use_residual,
            use_attention=config.use_attention,
            use_sble=config.use_sble,
            use_eemd=config.use_eemd
        )

        # Spectral branch
        # Initialize time-frequency representation
        self.tf_repr = TimeFrequencyRepresentation(
            wavelet=config.wavelet_family,
            level=config.wavelet_level
        )

        # Spectral CNN for processing time-frequency representation
        self.spectral_branch = SpectralBranch(
            input_channels=2 ** config.wavelet_level,  # Number of frequency subbands
            output_channels=config.spectral_filters,
            dropout=config.dropout
        )

        # Calculate output feature dimensions
        # These are approximate and depend on input signal length
        base_temporal_channels = len(config.temporal_kernel_sizes) * config.temporal_filters
        additional_channels = 0
        if config.use_sble:
            additional_channels += config.temporal_filters
        if config.use_eemd:
            additional_channels += config.temporal_filters

        self.temporal_channels = base_temporal_channels + additional_channels
        self.spectral_channels = config.spectral_filters * 4

        # Channel attention (SE-Blocks)
        self.temporal_se = SEBlock(self.temporal_channels, reduction=config.se_reduction)
        self.spectral_se = SEBlock(self.spectral_channels, reduction=config.se_reduction)

        # Final classification layer
        self.feature_dim = self.temporal_channels + self.spectral_channels
        self.classifier = nn.Sequential(
            nn.Linear(self.feature_dim, 256),
            nn.ReLU(inplace=True),
            nn.Dropout(config.dropout),
            nn.Linear(256, 5)  # 5 sleep stages
        )

    def forward(self, x, extract_features=False):
        """
        Forward pass through the feature extraction module

        Args:
            x (torch.Tensor): Input tensor of shape (batch_size, 1, signal_length)
                             or (batch_size, signal_length, 1)
            extract_features (bool): Whether to return features or class scores

        Returns:
            torch.Tensor or tuple: Class scores or (features, class_scores)
        """
        batch_size = x.size(0)

        # Temporal branch
        temporal_features = self.temporal_branch(x)

        # Spectral branch
        time_freq_repr = self.tf_repr(x)
        spectral_features = self.spectral_branch(time_freq_repr)

        # Ensure consistent feature map size
        min_length = min(temporal_features.size(2), spectral_features.size(2))
        temporal_features = temporal_features[:, :, :min_length]
        spectral_features = spectral_features[:, :, :min_length]

        # Channel attention
        enhanced_temporal = self.temporal_se(temporal_features)
        enhanced_spectral = self.spectral_se(spectral_features)

        # Global average pooling
        temporal_vector = F.adaptive_avg_pool1d(enhanced_temporal, 1).view(batch_size, -1)
        spectral_vector = F.adaptive_avg_pool1d(enhanced_spectral, 1).view(batch_size, -1)

        # Concatenate features
        fused_features = torch.cat([temporal_vector, spectral_vector], dim=1)

        # Classification
        logits = self.classifier(fused_features)

        if extract_features:
            return fused_features, logits
        else:
            return logits

    def extract_temporal_spectral_features(self, x):
        """
        Extract temporal and spectral features separately

        Args:
            x (torch.Tensor): Input tensor of shape (batch_size, 1, signal_length)

        Returns:
            tuple: (temporal_features, spectral_features)
        """
        batch_size = x.size(0)

        # Temporal branch
        temporal_features = self.temporal_branch(x)

        # Spectral branch
        time_freq_repr = self.tf_repr(x)
        spectral_features = self.spectral_branch(time_freq_repr)

        # Ensure consistent feature map size
        min_length = min(temporal_features.size(2), spectral_features.size(2))
        temporal_features = temporal_features[:, :, :min_length]
        spectral_features = spectral_features[:, :, :min_length]

        # Channel attention
        enhanced_temporal = self.temporal_se(temporal_features)
        enhanced_spectral = self.spectral_se(spectral_features)

        return enhanced_temporal, enhanced_spectral


class SequenceLearningModule(nn.Module):
    """
    Sequence learning module of DAT-SNet
    Uses BiGRU with attention for capturing temporal dependencies
    """

    def __init__(self, input_size, hidden_size, num_layers, attention_heads, dropout=0.5):
        """
        Initialize the sequence learning module

        Args:
            input_size (int): Size of input features
            hidden_size (int): Size of hidden state
            num_layers (int): Number of GRU layers
            attention_heads (int): Number of attention heads
            dropout (float): Dropout probability
        """
        super(SequenceLearningModule, self).__init__()

        # BiGRU with attention
        self.bigru_attention = BiGRUWithAttention(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=num_layers,
            dropout=dropout
        )

        # Classification layer
        self.classifier = nn.Sequential(
            nn.Linear(hidden_size * 2, 128),
            nn.ReLU(inplace=True),
            nn.Dropout(dropout),
            nn.Linear(128, 5)  # 5 sleep stages
        )

    def forward(self, x, hidden=None):
        """
        Forward pass through the sequence learning module

        Args:
            x (torch.Tensor): Input tensor of shape (batch_size, sequence_length, input_size)
            hidden (torch.Tensor, optional): Initial hidden state

        Returns:
            tuple: (logits, hidden, attention_weights)
        """
        # BiGRU with attention
        context, hidden, attention_weights = self.bigru_attention(x, hidden)

        # Classification
        logits = self.classifier(context)

        return logits, hidden, attention_weights


class DATSNet(nn.Module):
    """
    Complete DAT-SNet model
    Integrates feature extraction and sequence learning modules
    """

    def __init__(self, config):
        """
        Initialize the DAT-SNet model

        Args:
            config: Model configuration
        """
        super(DATSNet, self).__init__()

        # Feature extraction module
        self.feature_extraction = FeatureExtractionModule(config)

        # Calculate feature dimension
        feature_dim = self.feature_extraction.feature_dim

        # Sequence learning module
        self.sequence_learning = SequenceLearningModule(
            input_size=feature_dim,
            hidden_size=config.hidden_size,
            num_layers=config.num_layers,
            attention_heads=config.attention_heads,
            dropout=config.dropout
        )

        # Initialize parameters
        self._init_weights()

    def _init_weights(self):
        """Initialize model weights"""
        for m in self.modules():
            if isinstance(m, (nn.Conv1d, nn.Conv2d, nn.Linear)):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.GRU):
                for name, param in m.named_parameters():
                    if 'weight_ih' in name:
                        nn.init.xavier_uniform_(param)
                    elif 'weight_hh' in name:
                        nn.init.orthogonal_(param)
                    elif 'bias' in name:
                        nn.init.constant_(param, 0)

    def forward(self, x, mode='full', hidden=None):
        """
        Forward pass through the DAT-SNet model

        Args:
            x (torch.Tensor): Input tensor
                For mode='feature': (batch_size, 1, signal_length)
                For mode='sequence': (batch_size, sequence_length, 1, signal_length)
                For mode='full': (batch_size, sequence_length, 1, signal_length)
            mode (str): Operating mode ('feature', 'sequence', 'full')
            hidden (torch.Tensor, optional): Initial hidden state for sequence learning

        Returns:
            tuple: (logits, hidden, attention_weights) or logits depending on mode
        """
        if mode == 'feature':
            # Only use feature extraction module
            return self.feature_extraction(x)

        elif mode == 'sequence':
            # Only use sequence learning module
            # Assume x contains pre-extracted features
            return self.sequence_learning(x, hidden)

        elif mode == 'full':
            # Use both modules
            batch_size, seq_length = x.size(0), x.size(1)

            # Reshape for feature extraction
            x_reshaped = x.view(batch_size * seq_length, 1, -1)

            # Extract features
            features, _ = self.feature_extraction(x_reshaped, extract_features=True)

            # Reshape for sequence learning
            features = features.view(batch_size, seq_length, -1)

            # Sequence learning
            logits, hidden, attention_weights = self.sequence_learning(features, hidden)

            return logits, hidden, attention_weights

        else:
            raise ValueError(f"Unknown mode: {mode}")

    def extract_features(self, x):
        """
        Extract features from the feature extraction module

        Args:
            x (torch.Tensor): Input tensor of shape (batch_size, 1, signal_length)

        Returns:
            torch.Tensor: Extracted features
        """
        features, _ = self.feature_extraction(x, extract_features=True)
        return features


class FeatureExtractionLoss(nn.Module):
    """
    Loss function for feature extraction module
    Uses weighted cross-entropy loss
    """

    def __init__(self, class_weights=None):
        """
        Initialize the feature extraction loss

        Args:
            class_weights (torch.Tensor, optional): Class weights for handling imbalance
        """
        super(FeatureExtractionLoss, self).__init__()
        self.class_weights = class_weights

    def forward(self, logits, targets):
        """
        Calculate loss

        Args:
            logits (torch.Tensor): Predicted logits
            targets (torch.Tensor): Ground truth labels

        Returns:
            torch.Tensor: Loss value
        """
        return F.cross_entropy(logits, targets, weight=self.class_weights)


class SequenceLearningLoss(nn.Module):
    """
    Loss function for sequence learning module
    Uses cross-entropy loss
    """

    def __init__(self):
        """Initialize the sequence learning loss"""
        super(SequenceLearningLoss, self).__init__()

    def forward(self, logits, targets):
        """
        Calculate loss

        Args:
            logits (torch.Tensor): Predicted logits
            targets (torch.Tensor): Ground truth labels

        Returns:
            torch.Tensor: Loss value
        """
        return F.cross_entropy(logits, targets)