"""
Wavelet transform implementation for spectral feature extraction
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pywt


class WaveletPacketTransform(nn.Module):
    """
    Wavelet Packet Transform for spectral feature extraction
    Optimized PyTorch implementation that balances accuracy and speed
    """

    def __init__(self, wavelet='db4', level=5, mode='constant'):
        """
        Initialize the Wavelet Packet Transform

        Args:
            wavelet (str): Wavelet family to use (e.g., 'db4', 'sym4', 'coif1')
            level (int): Decomposition level
            mode (str): Signal extension mode ('constant', 'reflect', 'replicate', 'circular')
        """
        super().__init__()
        self.wavelet_name = wavelet
        self.level = level
        self.mode = mode

        # Pre-compute wavelet filters for efficiency
        wavelet_obj = pywt.Wavelet(wavelet)

        # Register decomposition filters as buffers (non-parameters) for both low and high pass
        dec_lo = torch.tensor(wavelet_obj.dec_lo, dtype=torch.float32)
        dec_hi = torch.tensor(wavelet_obj.dec_hi, dtype=torch.float32)

        # Flip filters for convolution
        self.register_buffer('dec_lo_r', dec_lo.flip(0))
        self.register_buffer('dec_hi_r', dec_hi.flip(0))

        # Store filter length for padding
        self.filter_length = len(wavelet_obj.dec_lo)

        # Precompute the padding required for each level
        self.paddings = []
        for i in range(level):
            # Calculate appropriate padding for each level to ensure correct signal length
            pad_len = (self.filter_length - 1) // 2
            self.paddings.append(pad_len)

    def dwt_conv1d(self, x, h_lo, h_hi):
        """
        Perform single-level DWT using 1D convolution

        Args:
            x (torch.Tensor): Input signal [batch_size, length]
            h_lo (torch.Tensor): Low-pass filter
            h_hi (torch.Tensor): High-pass filter

        Returns:
            tuple: (approximation coefficients, detail coefficients)
        """
        # Pad the signal for valid convolution
        pad = self.filter_length - 1
        x_padded = F.pad(x.unsqueeze(1), (pad, pad), mode=self.mode)

        # Apply convolution with filters
        lo = F.conv1d(x_padded, h_lo.unsqueeze(0).unsqueeze(0))
        hi = F.conv1d(x_padded, h_hi.unsqueeze(0).unsqueeze(0))

        # Downsample by 2 (keeping even indices)
        lo = lo[:, 0, ::2]
        hi = hi[:, 0, ::2]

        return lo, hi

    def packet_transform(self, x):
        """
        Perform wavelet packet transform

        Args:
            x (torch.Tensor): Input signal [batch_size, length]

        Returns:
            torch.Tensor: Wavelet packet coefficients
        """
        batch_size = x.size(0)

        # Initialize list to store coefficients at each level
        coeffs_list = [x]

        # Compute wavelet packet transform level by level
        for level in range(self.level):
            # Get current level coefficients
            current_coeffs = []

            # Process each coefficient node at this level
            for node_coeffs in coeffs_list:
                # Apply DWT to each node
                lo, hi = self.dwt_conv1d(node_coeffs, self.dec_lo_r, self.dec_hi_r)

                # Store both approximation and detail coefficients
                current_coeffs.append(lo)
                current_coeffs.append(hi)

            # Replace previous level with current level
            coeffs_list = current_coeffs

        # Stack all subbands for final output
        num_subbands = 2 ** self.level

        # Check if all subbands have the same length (important for proper stacking)
        min_length = min([c.size(1) for c in coeffs_list])
        coeffs_list = [c[:, :min_length] for c in coeffs_list]

        # Stack to create final output [batch_size, num_subbands, coeff_length]
        output = torch.stack(coeffs_list, dim=1)

        return output

    def forward(self, x):
        """
        Apply Wavelet Packet Transform to the input signal

        Args:
            x (torch.Tensor): Input signal, shape (batch_size, 1, signal_length)
                             or (batch_size, signal_length, 1)

        Returns:
            torch.Tensor: Wavelet packet coefficients
        """
        # Check if the tensor shape needs to be transposed
        if x.size(1) > x.size(2):
            x = x.transpose(1, 2)

        # Extract batch size
        batch_size = x.size(0)

        # Squeeze to handle single channel
        x = x.squeeze(1)

        # Apply wavelet packet transform
        wpt_coeffs = self.packet_transform(x)

        # Ensure proper shape for the spectral branch
        return wpt_coeffs


class WaveletTransform(nn.Module):
    """
    Enhanced wavelet transform with specific frequency bands targeting sleep stages
    """

    def __init__(self, wavelet='db4', level=5, mode='constant'):
        """
        Initialize the enhanced wavelet transform

        Args:
            wavelet (str): Wavelet family to use
            level (int): Decomposition level
            mode (str): Signal extension mode ('constant', 'reflect', 'replicate', 'circular')
        """
        super().__init__()
        self.wavelet_name = wavelet
        self.level = level
        self.mode = mode

        # Pre-compute wavelet filters
        wavelet_obj = pywt.Wavelet(wavelet)

        # Create filter tensors
        dec_lo = torch.tensor(wavelet_obj.dec_lo, dtype=torch.float32).flip(0)
        dec_hi = torch.tensor(wavelet_obj.dec_hi, dtype=torch.float32).flip(0)

        # Register filters
        self.register_buffer('dec_lo', dec_lo)
        self.register_buffer('dec_hi', dec_hi)

        self.filter_length = len(wavelet_obj.dec_lo)
        self.pad_size = (self.filter_length - 1) // 2

        # Adaptive attention weights for different frequency bands
        # Initialize with focus on sleep-relevant frequency bands
        # Delta (0.5-4 Hz), Theta (4-8 Hz), Alpha (8-13 Hz), Sigma (12-16 Hz)
        init_weights = torch.ones(2**level)
        # Increase weights for bands corresponding to delta, theta, alpha, and sigma rhythms
        # The exact indices depend on the level and sampling rate
        self.band_weights = nn.Parameter(init_weights)

    def forward(self, x):
        """
        Forward pass implementing a sleep-targeted wavelet transform

        Args:
            x (torch.Tensor): Input signal [batch_size, 1, signal_length]

        Returns:
            torch.Tensor: Enhanced time-frequency representation
        """
        # Ensure correct input shape
        if x.size(1) > x.size(2):
            x = x.transpose(1, 2)

        # Remove channel dimension
        x = x.squeeze(1)
        batch_size = x.size(0)

        # Apply wavelet packet decomposition
        subbands = []

        # Initial set of nodes to process
        nodes = [x]

        # Process each level
        for level in range(self.level):
            next_level_nodes = []

            for node in nodes:
                # Pad the signal
                padded = F.pad(node.unsqueeze(1), (self.pad_size, self.pad_size), mode=self.mode)

                # Apply low and high pass filters
                lo = F.conv1d(padded, self.dec_lo.view(1, 1, -1))
                hi = F.conv1d(padded, self.dec_hi.view(1, 1, -1))

                # Downsample by 2
                lo = lo[:, 0, ::2]
                hi = hi[:, 0, ::2]

                # Add to next level nodes
                next_level_nodes.append(lo)
                next_level_nodes.append(hi)

            # Update nodes for next level
            nodes = next_level_nodes

        # Stack final level subbands and apply adaptive weights
        min_length = min(node.size(1) for node in nodes)
        subbands = [node[:, :min_length] for node in nodes]
        output = torch.stack(subbands, dim=1)

        # Apply frequency band attention weights
        weighted_output = output * self.band_weights.view(1, -1, 1)

        # Reshape to match the expected shape for spectral branch: [batch_size, 1, subbands, time]
        return weighted_output.unsqueeze(1)


class TimeFrequencyRepresentation(nn.Module):
    """
    Time-Frequency Representation using Enhanced Wavelet Transform
    """

    def __init__(self, wavelet='db4', level=5, mode='constant'):
        """
        Initialize the Time-Frequency Representation

        Args:
            wavelet (str): Wavelet family to use
            level (int): Decomposition level
            mode (str): Signal extension mode ('constant', 'reflect', 'replicate', 'circular')
        """
        super().__init__()

        # Convert from pywt modes to torch.nn.functional.pad modes
        # In pywt: 'zero', 'symmetric', 'reflect', 'periodic', 'antisymmetric', 'antireflect'
        # In torch: 'constant', 'reflect', 'replicate', 'circular'
        self.mode_map = {
            'zero': 'constant',
            'symmetric': 'reflect',
            'reflect': 'reflect',
            'periodic': 'circular',
            'constant': 'constant',
        }

        # Map the mode if needed
        torch_mode = self.mode_map.get(mode, 'constant')

        # Choose the enhanced implementation
        self.wpt = WaveletTransform(wavelet, level, torch_mode)

        # Store parameters for reference
        self.wavelet = wavelet
        self.level = level
        self.mode = torch_mode

    def forward(self, x):
        """
        Generate time-frequency representation from the input signal

        Args:
            x (torch.Tensor): Input signal, shape (batch_size, 1, signal_length)
                             or (batch_size, signal_length, 1)

        Returns:
            torch.Tensor: Time-frequency representation
        """
        # Apply enhanced WPT
        tf_repr = self.wpt(x)  # Shape: [batch_size, 1, num_subbands, coeff_length]

        return tf_repr