from typing import List, Dict, Union

import torch
from torch.nn.utils.rnn import pad_sequence
from torch import nn

from transformers import VideoMAEModel, VideoMAEConfig, ViTMAEConfig, ViTMAEModel
from transformers.models.videomae.modeling_videomae import get_multi_sincos_pos_embed

from ecgcmr.multimodal.multimodal_models.Projections import ProjectionHeadSimple


def get_collate_fn(use_peaks_location: bool):
    def collate_fn(batch: List[Dict[str, Union[torch.Tensor, int, float]]]) -> Dict[str, torch.Tensor]:
        """
        Custom collate function to handle batches with or without peak locations.

        :param batch: List of dictionaries containing the data for each sample.
        :return: Dictionary containing the batched data.
        """
        batched_data = {
            "ecg_aug": torch.stack([item["ecg_aug"] for item in batch]),
            "image_aug": torch.stack([item["image_aug"] for item in batch]),
            "patient_id": torch.tensor([item["patient_id"] for item in batch]),
        }

        if use_peaks_location:
            tpf_values = [item["tpf"] for item in batch]

            batched_data.update({
                "tpfs": torch.tensor(tpf_values, dtype=torch.float32)
            })

            rpeaks = [item["rpeaks"] for item in batch]
            batched_data.update({
                "rpeaks": pad_sequence(rpeaks, batch_first=True, padding_value=0.0),
            })

        return batched_data
    
    return collate_fn

def update_image_pos_embed(imag_encoder, imag_encoder_config, used_image_frames):
    tubelet_size, patch_size = imag_encoder_config.tubelet_size, imag_encoder_config.patch_size

    grid_size = (used_image_frames//tubelet_size,
                 imag_encoder_config.image_size//patch_size,
                 imag_encoder_config.image_size//patch_size)

    imag_encoder.embeddings.patch_embeddings.grid_size = grid_size

    imag_encoder.embeddings.position_embeddings = nn.Parameter(
        get_multi_sincos_pos_embed(grid_size=grid_size,
                                   embed_dim=imag_encoder_config.hidden_size,
                                   add_cls_token=imag_encoder_config.use_cls_token),
        requires_grad=False
    )

def update_ecg_pos_embed(ecg_encoder, ecg_encoder_config, used_ecg_steps) -> None:
    grid_size = (ecg_encoder_config.image_size[0] // ecg_encoder_config.patch_size[0],
                 used_ecg_steps // ecg_encoder_config.patch_size[1])
    
    new_image_size = (ecg_encoder_config.image_size[0], used_ecg_steps)

    ecg_encoder.embeddings.patch_embeddings.image_size = new_image_size
    ecg_encoder.embeddings.patch_embeddings.grid_size = grid_size

    ecg_encoder.embeddings.position_embeddings = nn.Parameter(
        get_multi_sincos_pos_embed(grid_size=grid_size,
                                   embed_dim=ecg_encoder_config.hidden_size,
                                   add_cls_token=ecg_encoder_config.use_cls_token),
        requires_grad=False
    )

def initialize_image_encoder(pretrained_model_name_or_path, cfg, used_image_frames, from_scratch=False):
    imag_encoder_config = VideoMAEConfig.from_pretrained(pretrained_model_name_or_path=pretrained_model_name_or_path)
    if from_scratch:
        imag_encoder = VideoMAEModel(config=imag_encoder_config)  # Initialize without loading weights
        print(f"Initialized Image Encoder from scratch with config from {pretrained_model_name_or_path}")
    else:
        imag_encoder = VideoMAEModel.from_pretrained(pretrained_model_name_or_path=pretrained_model_name_or_path, config=imag_encoder_config)
        print(f"Loaded Image Encoder from {pretrained_model_name_or_path}")

    train = cfg.training_mode.encoders.image.train
    freeze_first_n_layers = cfg.training_mode.encoders.image.freeze_first_n_layers

    # Set training or eval mode
    if train:
        imag_encoder.train()
        for param in imag_encoder.parameters():
            param.requires_grad = True
        
        for param in imag_encoder.embeddings.parameters():
            param.requires_grad = False

        # Freeze first N layers
        encoder_layers = imag_encoder.encoder.layer
        for i, layer in enumerate(encoder_layers):
            if i < freeze_first_n_layers:
                for param in layer.parameters():
                    param.requires_grad = False
            else:
                break
        
        for param in imag_encoder.layernorm.parameters():
            param.requires_grad = False

    else:
        imag_encoder.eval()
        for param in imag_encoder.parameters():
            param.requires_grad = False

    # Update position embeddings if necessary
    if used_image_frames != imag_encoder_config.num_frames:
        update_image_pos_embed(imag_encoder, imag_encoder_config, used_image_frames)
        print('Updated image pos embed')
        
    print(f'Loaded model from {pretrained_model_name_or_path}')
    return imag_encoder, imag_encoder_config

def initialize_ecg_encoder(pretrained_model_name_or_path, cfg, used_ecg_steps, from_scratch=False):
    ecg_encoder_config = ViTMAEConfig.from_pretrained(pretrained_model_name_or_path=pretrained_model_name_or_path)
    if from_scratch:
        ecg_encoder = ViTMAEModel(config=ecg_encoder_config)  # Initialize without loading weights
        print(f"Initialized ECG Encoder from scratch with config from {pretrained_model_name_or_path}")
    else:
        ecg_encoder = ViTMAEModel.from_pretrained(pretrained_model_name_or_path=pretrained_model_name_or_path, config=ecg_encoder_config)
        print(f"Loaded ECG Encoder from {pretrained_model_name_or_path}")

    train = cfg.training_mode.encoders.ecg.train
    freeze_first_n_layers = cfg.training_mode.encoders.ecg.freeze_first_n_layers

    if train:
        ecg_encoder.train()
        for param in ecg_encoder.parameters():
            param.requires_grad = True

        for param in ecg_encoder.embeddings.parameters():
            param.requires_grad = False
            
        encoder_layers = ecg_encoder.encoder.layer
        for i, layer in enumerate(encoder_layers):
            if i < freeze_first_n_layers:
                for param in layer.parameters():
                    param.requires_grad = False
            else:
                break
        
        for param in ecg_encoder.layernorm.parameters():
            param.requires_grad = False

    else:
        ecg_encoder.eval()
        for param in ecg_encoder.parameters():
            param.requires_grad = False

    # Update position embeddings if necessary
    if used_ecg_steps != ecg_encoder_config.image_size[1]:
        update_ecg_pos_embed(ecg_encoder, ecg_encoder_config, used_ecg_steps)
        print('Updated ecg pos embed')

    print(f'Loaded model from {pretrained_model_name_or_path}')
    return ecg_encoder, ecg_encoder_config

def init_image_projection_heads(cfg, imag_encoder_config, loss_type):
    if loss_type in ["global", "both"]:
        projection_image_cls = ProjectionHeadSimple(input_dim=imag_encoder_config.hidden_size,
                                                    hidden_dim=cfg.models.projection.hidden_dim,
                                                    output_dim=cfg.models.projection.d_contrastive)
    else:
        projection_image_cls = None

    if loss_type in ["local", "both"]:
        projection_image_tokens = ProjectionHeadSimple(input_dim=imag_encoder_config.hidden_size,
                                                       hidden_dim=cfg.models.projection.hidden_dim,
                                                       output_dim=cfg.models.projection.d_contrastive)
    else:
        projection_image_tokens = None

    return projection_image_cls, projection_image_tokens

def init_ecg_projection_heads(cfg, ecg_encoder_config, loss_type):
    if loss_type in ["global", "both"]:
        projection_ecg_cls = ProjectionHeadSimple(input_dim=ecg_encoder_config.hidden_size,
                                                  hidden_dim=cfg.models.projection.hidden_dim,
                                                  output_dim=cfg.models.projection.d_contrastive)
    else:
        projection_ecg_cls = None

    if loss_type in ["local", "both"]:
        projection_ecg_tokens = ProjectionHeadSimple(input_dim=ecg_encoder_config.hidden_size,
                                                     hidden_dim=cfg.models.projection.hidden_dim,
                                                     output_dim=cfg.models.projection.d_contrastive)
    else:
        projection_ecg_tokens = None

    return projection_ecg_cls, projection_ecg_tokens

def downstream_collate_fn(batch):
    """
    batch: list of tuples (ecg_data_tensor, task_labels, disease_labels, rpeaks_t)
    We want to produce a dictionary or tuple with properly stacked Tensors.
    """
    ecg_list = []
    task_list = []
    disease_list = []
    rpeaks_list = []

    for ecg_data, task_lab, disease_lab, rpeaks in batch:
        ecg_list.append(ecg_data)
        task_list.append(task_lab)
        disease_list.append(disease_lab)
        
        if rpeaks is None:
            rpeaks_list.append(torch.empty(0, dtype=torch.float32))
        else:
            rpeaks_list.append(rpeaks)

    ecg_tensor = torch.stack(ecg_list, dim=0)              # [batch, ...]
    task_tensor = torch.stack(task_list, dim=0)            # [batch, num_tasks]
    disease_tensor = torch.stack(disease_list, dim=0)      # [batch, num_diseases]

    rpeaks_pad = pad_sequence(rpeaks_list, batch_first=True, padding_value=0.0) # shape [batch, max_len_of_rpeaks]

    return {
        "ecg_data": ecg_tensor,
        "task_labels": task_tensor,
        "disease_labels": disease_tensor,
        "rpeaks": rpeaks_pad
    }