import math
import torch
import torch.nn.functional as F

from torch import nn


class Interpolate(nn.Module):
    """
    Interpolation module to resize tensors to a target size.
    """
    def __init__(self, size, mode='linear'):
        super().__init__()
        self.size = size
        self.mode = mode
                
    def forward(self, x):
        """
        Forward pass for interpolation.

        :param x: Input tensor to interpolate.
        :return: Interpolated tensor.
        """
        x = F.interpolate(x, size=self.size, mode=self.mode, align_corners=False)
        return x

class ECGHeartBeatRepresentation(nn.Module):
    """
    ECG Heartbeat Representation class that supports both fixed-duration cropping
    and R-peak to R-peak cropping methods. It processes ECG embeddings and returns
    a mean heartbeat representation per sample.
    """
    def __init__(
        self, 
        target_length: int, 
        time_patch_size: int, 
        channel_patch_size: int, 
        num_channels: int = 12, 
        num_timesteps: int = 2500,
        n_mri_frames: int = 50,
        sampling_rate: int = 500,
        crop_method: str = 'rpeak_to_rpeak',  # Options: 'fixed_duration' or 'rpeak_to_rpeak'
        channel_reduction: str = 'mean' # 'mean' or 'linear'
    ) -> None:
        """
        Initializes the ECGHeartBeatRepresentation class.

        :param target_length: Desired number of time steps after interpolation.
        :param time_patch_size: Size of each time patch in ECG data.
        :param channel_patch_size: Size of each channel patch in ECG data.
        :param num_channels: Number of ECG channels.
        :param num_timesteps: Total number of time patches in ECG data.
        :param n_mri_frames: Number of MRI frames per heartbeat before downsampling.
        :param sampling_rate: ECG sampling rate in Hz.
        :param crop_method: Cropping method to use ('fixed_duration' or 'rpeak_to_rpeak').
        """
        super().__init__()
        self.target_length = target_length
        self.time_patch_size = time_patch_size    
        self.channel_patch_size = channel_patch_size
        self.num_channels = num_channels
        self.num_timesteps = num_timesteps
        self.n_mri_frames = n_mri_frames
        self.sampling_rate = sampling_rate
        self.crop_method = crop_method
        self.channel_reduction = channel_reduction

        self.num_time_patches = num_timesteps // time_patch_size
        self.num_channel_patches = num_channels // channel_patch_size

        if self.channel_reduction == 'linear':
            self.channel_reduction_linear = nn.Linear(self.num_channel_patches, 1)

        self.interpolation = Interpolate(size=self.target_length, mode='linear')

    def forward(self, ecg_embeddings: torch.Tensor, rpeaks: torch.Tensor, tpfs: torch.Tensor=None) -> torch.Tensor:
        """
        Processes ECG embeddings to form heartbeat representations based on the selected cropping method.

        :param ecg_embeddings: Tensor of shape [batch_size, num_tokens, hidden_size].
        :param rpeaks: Tensor of shape [batch_size, max_num_rpeaks], containing R-peak sample indices.
        :param tpfs: Tensor of shape [batch_size], containing time per frame for each sample.
        :return: Tensor of shape [batch_size, target_length, hidden_size].
        """
        batch_size, num_tokens, hidden_size = ecg_embeddings.shape

        ecg_embeddings = ecg_embeddings.view(
            batch_size, self.num_time_patches, self.num_channel_patches, hidden_size
        )

        all_mean_heartbeats = []

        for i in range(batch_size):
            current_rpeaks = rpeaks[i][rpeaks[i] > 0]
            current_rpeaks = torch.sort(current_rpeaks).values

            if len(current_rpeaks) == 0:
                mean_heartbeat = torch.zeros(self.target_length, hidden_size, device=ecg_embeddings.device)
                all_mean_heartbeats.append(mean_heartbeat)
                continue

            if self.crop_method == 'fixed_duration':
                # =======================
                # Fixed Duration Cropping
                # =======================
                
                tpf = tpfs[i].item()  # Time per frame in seconds
                
                # Calculate the number of patches per heartbeat based on MRI parameters
                num_of_patches_float = (tpf * self.n_mri_frames * self.sampling_rate) / self.time_patch_size
                num_of_patches = int(math.ceil(num_of_patches_float)) + 1  # Added +1 as per user's code

                # Determine start and end patch indices
                rpeak_indices = current_rpeaks.long()
                start_patch_idx = rpeak_indices // self.time_patch_size
                end_patch_idx = start_patch_idx + num_of_patches

                # Ensure indices are within valid bounds
                valid_intervals = (start_patch_idx >= 0) & (end_patch_idx <= self.num_time_patches)
                start_patch_idx = start_patch_idx[valid_intervals]
                end_patch_idx = end_patch_idx[valid_intervals]

                if len(start_patch_idx) == 0:
                    mean_heartbeat = torch.zeros(self.target_length, hidden_size, device=ecg_embeddings.device)
                    all_mean_heartbeats.append(mean_heartbeat)
                    continue

                num_heartbeats = len(start_patch_idx)

                segments = []

                for idx in range(num_heartbeats):
                    s_idx = start_patch_idx[idx].item()
                    e_idx = end_patch_idx[idx].item()

                    segment = ecg_embeddings[i, s_idx:e_idx]  # [num_of_patches, num_channel_patches, hidden_size]
                    segments.append(segment)

                segments = torch.stack(segments, dim=0)  # [num_heartbeats, num_of_patches, num_channel_patches, hidden_size]

                num_heartbeats, seq_len, num_channel_patches, hidden_size = segments.shape
                segments = segments.view(num_heartbeats, seq_len, -1)  # [num_heartbeats, num_of_patches, num_features]
                segments = segments.permute(0, 2, 1)  # [num_heartbeats, num_features, num_of_patches]
                segments = segments.float()
                segments = self.interpolation(segments)  # [num_heartbeats, num_features, target_length]
                segments = segments.permute(0, 2, 1)  # [num_heartbeats, target_length, num_features]
                segments = segments.view(num_heartbeats, self.target_length, self.num_channel_patches, hidden_size)

                if self.channel_reduction == 'mean':
                    segments = segments.mean(dim=2) # [num_heartbeats, target_length, hidden_size]
                elif self.channel_reduction == 'linear':
                    segments = segments.permute(0, 1, 3, 2) # [num_heartbeats, target_length, hidden_size, num_channel_patches]
                    segments = self.channel_reduction_linear(segments) # [num_heartbeats, target_length, hidden_size, 1]
                    segments = segments.squeeze(-1) # [num_heartbeats, target_length, hidden_size]

                mean_heartbeat = segments.mean(dim=0)  # [target_length, hidden_size]

                all_mean_heartbeats.append(mean_heartbeat)

            elif self.crop_method == 'rpeak_to_rpeak':
                # ==========================
                # R-Peak to R-Peak Cropping
                # ==========================

                rpeak_indices = current_rpeaks.long()
                start_patch_idx = rpeak_indices[:-1] // self.time_patch_size
                end_patch_idx = (rpeak_indices[1:] // self.time_patch_size) + 1  # TODO: LOOK HERE!!!!!!!!!!!!!!!!!!!!!!!!!!!

                start_patch_idx = torch.clamp(start_patch_idx, 0, self.num_time_patches - 1)
                end_patch_idx = torch.clamp(end_patch_idx, 0, self.num_time_patches)

                if len(start_patch_idx) == 0:
                    mean_heartbeat = torch.zeros(self.target_length, hidden_size, device=ecg_embeddings.device)
                    all_mean_heartbeats.append(mean_heartbeat)
                    continue
                
                heartbeats = []

                for s_idx, e_idx in zip(start_patch_idx, end_patch_idx):
                    s_idx = s_idx.item()
                    e_idx = e_idx.item()
                    
                    if s_idx >= e_idx:
                        continue

                    segment = ecg_embeddings[i, s_idx:e_idx]  # Shape: [segment_length, num_channel_patches, hidden_size]

                    if segment.shape[0] == 0:
                        continue

                    seq_len = segment.size(0)
                    segment = segment.view(seq_len, -1)  # Shape: [seq_len, num_channel_patches * hidden_size]
                    segment = segment.permute(1, 0).unsqueeze(0).float()  # Shape: [1, num_features, seq_len]
                    segment = self.interpolation(segment)  # [1, num_features, target_length]
                    segment = segment.squeeze(0).permute(1, 0)  # [target_length, num_features]

                    heartbeats.append(segment)
                
                if heartbeats:
                    heartbeats_tensor = torch.stack(heartbeats, dim=0)  # [num_heartbeats, target_length, num_channel_patches * hidden_size]
                    heartbeats_tensor = heartbeats_tensor.view(len(heartbeats), self.target_length, self.num_channel_patches, hidden_size)
                    
                    if self.channel_reduction == 'mean':
                        heartbeats_tensor = heartbeats_tensor.mean(dim=2)  # [num_heartbeats, target_length, hidden_size]
                    elif self.channel_reduction == 'max':
                        heartbeats_tensor = heartbeats_tensor.max(dim=2).values  # [num_heartbeats, target_length, hidden_size]
                    elif self.channel_reduction == 'linear':
                        heartbeats_tensor = heartbeats_tensor.permute(0, 1, 3, 2)
                        heartbeats_tensor = self.channel_reduction_linear(heartbeats_tensor)  # [num_heartbeats, target_length, hidden_size, 1]
                        heartbeats_tensor = heartbeats_tensor.squeeze(-1)

                    mean_heartbeat = heartbeats_tensor.mean(dim=0)  # [target_length, hidden_size]
                else:
                    mean_heartbeat = torch.zeros(self.target_length, hidden_size, device=ecg_embeddings.device)

                all_mean_heartbeats.append(mean_heartbeat)

        return torch.stack(all_mean_heartbeats, dim=0)  # [batch_size, target_length, hidden_size]


def form_ed_ed_patches(ecg_embeddings: torch.Tensor,
                       rpeaks: torch.Tensor,
                       contrastive_T: int,
                       ecg_patch_size: int) -> torch.Tensor:
    """
    Forms ECG patches based on R-peaks, interpolates each segment to a fixed size,
    and averages the heart beats for each patient.

    Args:
        ecg_embeddings (torch.Tensor): Tensor of shape [batch_size, num_tokens, encoded_dim].
        rpeaks (torch.Tensor): Tensor of shape [batch_size, num_rpeaks], containing sample indices of R-peaks.
        contrastive_T (int): Target number of tokens after interpolation.
        ecg_patch_size (int): Number of samples per patch (used to map R-peaks to token indices).

    Returns:
        torch.Tensor: Tensor of shape [batch_size, contrastive_T, encoded_dim].
    """
    batch_size, num_tokens, encoded_dim = ecg_embeddings.shape
    all_heart_beats = []

    zero_tensor = torch.tensor([0], device=ecg_embeddings.device, dtype=rpeaks.dtype)

    for i in range(batch_size):
        current_rpeaks = rpeaks[i][rpeaks[i] > 0]
        current_rpeaks = torch.cat([zero_tensor, current_rpeaks])
        current_rpeaks = torch.sort(current_rpeaks).values  # Ensure sorted

        # Map R-peak sample indices to token indices
        current_rpeaks_patches = current_rpeaks // ecg_patch_size

        # Ensure the last token is included
        if current_rpeaks_patches[-1] < num_tokens - 1:
            current_rpeaks_patches = torch.cat(
                [current_rpeaks_patches, torch.tensor([num_tokens - 1], device=ecg_embeddings.device)]
            )
            
        intervals = [
            (current_rpeaks_patches[j].item(), current_rpeaks_patches[j + 1].item())
            for j in range(len(current_rpeaks_patches) - 1)
        ]

        heart_beats = []

        for start_patch_index, end_patch_index in intervals:
            start_patch_index = max(int(start_patch_index), 0)
            end_patch_index = min(int(end_patch_index), num_tokens - 1)

            if start_patch_index > end_patch_index:
                continue

            # Extract the segment between R-peaks using token indices
            segment = ecg_embeddings[i, start_patch_index:end_patch_index + 1, :]  # Shape: [N, d]

            if segment.shape[0] == 0:
                continue

            # Interpolate to [contrastive_T, encoded_dim]
            segment = segment.transpose(0, 1).unsqueeze(0)  # Shape: [1, d, N]
            segment = F.interpolate(
                segment, size=contrastive_T, mode='linear', align_corners=True
            )  # Shape: [1, d, T]
            segment = segment.squeeze(0).transpose(0, 1)  # Shape: [T, d]

            heart_beats.append(segment)

        if len(heart_beats) == 0:
            mean_heart_beat_patient = torch.zeros(contrastive_T, encoded_dim, device=ecg_embeddings.device)
        else:
            heart_beats_tensor = torch.stack(heart_beats, dim=0)  # Shape: [num_beats, T, d]
            mean_heart_beat_patient = heart_beats_tensor.mean(dim=0)  # Shape: [T, d]

        all_heart_beats.append(mean_heart_beat_patient)

    return torch.stack(all_heart_beats, dim=0)
