import torch
import torch.nn.functional as F


def forward_ed_ed_patches(self, embeddings: torch.Tensor, rpeaks: torch.Tensor) -> torch.Tensor:
    """
    Finds segments in embeddings corresponding to one rpeak-rpeak, interpolates timepoints in it into t_contrastive
    After (optionally) applies projection and takes mean from all found cycles
    """
    batch_size = embeddings.shape[0]
    local_embeddings_batch = []

    for i in range(batch_size):
        current_rpeaks = rpeaks[i]
        current_embeddings = embeddings[i]

        intervals = [(current_rpeaks[j], current_rpeaks[j + 1]) for j in range(len(current_rpeaks) - 1)]
        segments = []
    
        for start, end in intervals:
            start_patch_index = start // self.patch_size[1]
            end_patch_index = (end - 1) // self.patch_size[1] + 1  # -1 ensures inclusivity
            
            segment = current_embeddings[start_patch_index:end_patch_index] # l, d

            segment_interpolated = F.interpolate(segment.unsqueeze(0).unsqueeze(0),
                                                    size=(self.t_contrastive, segment.size(-1)),
                                                    mode='bilinear', align_corners=None)
            segment_interpolated = torch.squeeze(segment_interpolated) # T_contr, d
            
            segments.append(segment_interpolated)

        stacked_segments = torch.stack(segments)

        stacked_segments = self.signal_local_projection.forward_2(stacked_segments=stacked_segments)

        local_embeddings_sample = stacked_segments.mean(dim=0) # mean along number of segments for one sample
        local_embeddings_batch.append(local_embeddings_sample)
    
    local_embeddings_batch = torch.stack(local_embeddings_batch)

    return local_embeddings_batch


def form_ed_ed_patches_v2(ecg_embeddings: torch.Tensor,
                       rpeaks: torch.Tensor,
                       contrastive_T: int,
                       ecg_patch_size: int) -> torch.Tensor:
    batch_size = ecg_embeddings.shape[0]
    all_heart_beats = []

    for i in range(batch_size):
        current_rpeaks = torch.cat([torch.tensor([0], device=ecg_embeddings.device), rpeaks[i][rpeaks[i] > 0]])
        intervals = [(current_rpeaks[j], current_rpeaks[j + 1]) for j in range(len(current_rpeaks) - 1)]
        
        if current_rpeaks[-1] < ecg_embeddings.shape[-1]:
            intervals.append((current_rpeaks[-1], ecg_embeddings.shape[-1]))
        
        heart_beats = []

        for _, (start, end) in enumerate(intervals):

            start_patch_index = start // ecg_patch_size
            end_patch_index = end // ecg_patch_size

            segment = ecg_embeddings[i, start_patch_index:end_patch_index]
            segment = F.interpolate(segment.unsqueeze(0).unsqueeze(0),
                                    size=(contrastive_T, ecg_embeddings.size(-1)),
                                    mode='bilinear',
                                    align_corners=True) # 1, 1, T, d
            heart_beats.append(segment.squeeze(0))

        heart_beats_tensor = torch.cat(heart_beats, dim=0) # num_heart_beats, T, d
        mean_heart_beat_patient = heart_beats_tensor.mean(dim=0) # T, d
        all_heart_beats.append(mean_heart_beat_patient) 

    return torch.stack(all_heart_beats, dim=0) # b, T, d




    # if self.cfg.training_mode.loss.type in ["local", "both"]:
    #     if self.cfg.training_mode.reduction.ecg.interpolate_2d:
    #         heart_beats = self.heartbeat_representation(ecg_embeddings=all_tokens, p_offset=p_offset, t_offset=t_offset)

    #     # else:
    #     #     reshaped_all_tokens = torch.reshape(all_tokens, (batch_size, 
    #     #                                                      self.cfg.num_ecg_time_patches, # time_steps // patch_size
    #     #                                                      self.cfg.num_ecg_channel_patches,  # 12 // patch_size
    #     #                                                      encoder_config.hidden_size)) # d
            
    #     #     method = self.cfg.training_mode.reduction.ecg.all_tokens

    #     #     if method == 'mean':
    #     #         all_tokens_mean_channel = reshaped_all_tokens.mean(dim=2) # b, time_steps // patch_size, d

    #     #     elif method == 'linear':
    #     #         reshaped_all_tokens = reshaped_all_tokens.view(-1, self.cfg.num_ecg_channel_patches)
    #     #         linear_all_tokens = self.ecg_all_tokens_linear_layer_reduction_channel(reshaped_all_tokens)
    #     #         all_tokens_mean_channel = linear_all_tokens.view(batch_size, self.cfg.num_ecg_time_patches, encoder_config.hidden_size)
            
    #     #     heart_beats = form_ed_ed_patches(ecg_embeddings=all_tokens_mean_channel,
    #     #                                      rpeaks=rpeaks,
    #     #                                      contrastive_T=self.cfg.num_image_time_patches,
    #     #                                      ecg_patch_size=encoder_config.patch_size[1]) # b, T // tubelet_size, d
        
    #     all_tokens = self.ecg_layernorm_local(heart_beats)