import os

from omegaconf import DictConfig, OmegaConf

import torch
from torch import nn

from ecgcmr.multimodal.multimodal_loss.CombinedLoss import MultiModalLoss
from ecgcmr.multimodal.multimodal_models.EmbeddingsReduction import EmbedReduction
from ecgcmr.multimodal.multimodal_utils.misc import initialize_image_encoder, initialize_ecg_encoder, init_image_projection_heads, init_ecg_projection_heads

from ecgcmr.utils.LastLayer import LastLayerConfiguration
from ecgcmr.utils.misc import create_optimizer_and_scheduler_multiple_lr, create_optimizer_and_scheduler
from ecgcmr.utils.misc import compute_batch_similarity, compute_sample_similarity


class MultiModalCLIPWithEval(nn.Module):
    def __init__(self,
                 cfg: DictConfig,
                 save_dir: str,
                 pretrained_model_name_or_path_image: str,
                 pretrained_model_name_or_path_ecg: str,
                 ) -> None:
        super().__init__()

        self.cfg = cfg
        self.save_dir = save_dir

        self.main_batch_size = cfg.dataset.batch_size
        self.downstream_batch_size = cfg.downstream_task.batch_size

        self.loss_type = cfg.training_mode.loss.type

        self.used_image_frames = cfg.augmentations.imaging.time_sample.result_n_frames if cfg.augmentations.imaging.time_sample.enable else cfg.dataset.image_time_steps
        self.used_ecg_steps = cfg.augmentations.ecg.random_crop.ecg_time_steps if cfg.augmentations.ecg.random_crop.enable else cfg.dataset.ecg_time_steps

        self.imag_encoder, self.imag_encoder_config = initialize_image_encoder(pretrained_model_name_or_path_image, cfg, self.used_image_frames, from_scratch=cfg.training_mode.encoders.image.from_scratch)
        self.ecg_encoder, self.ecg_encoder_config = initialize_ecg_encoder(pretrained_model_name_or_path_ecg, cfg, self.used_ecg_steps, from_scratch=cfg.training_mode.encoders.ecg.from_scratch)

        self.projection_image_global, self.projection_image_local = init_image_projection_heads(cfg, self.imag_encoder_config, self.loss_type)
        self.projection_ecg_global, self.projection_ecg_local = init_ecg_projection_heads(cfg, self.ecg_encoder_config, self.loss_type)

        self.num_ecg_channel_patches = self.ecg_encoder_config.image_size[0] // self.ecg_encoder_config.patch_size[0]
        self.num_ecg_time_patches = self.used_ecg_steps // self.ecg_encoder_config.patch_size[1]
        self.num_ecg_patches = self.num_ecg_channel_patches * self.num_ecg_time_patches

        self.num_image_time_patches = self.used_image_frames//self.imag_encoder_config.tubelet_size
        self.num_image_spatial_patches = (self.imag_encoder_config.image_size//self.imag_encoder_config.patch_size) * (self.imag_encoder_config.image_size//self.imag_encoder_config.patch_size)
        self.num_image_patches = self.num_image_time_patches * self.num_image_spatial_patches

        OmegaConf.set_struct(cfg, False)  # Temporarily disable struct mode
        cfg.num_ecg_channel_patches = self.num_ecg_channel_patches
        cfg.num_ecg_time_patches = self.num_ecg_time_patches
        cfg.num_ecg_patches = self.num_ecg_patches

        cfg.num_image_time_patches = self.num_image_time_patches
        cfg.num_image_spatial_patches = self.num_image_spatial_patches
        cfg.num_image_patches = self.num_image_patches
        cfg.used_image_frames = self.used_image_frames
        OmegaConf.set_struct(cfg, True)  # Re-enable struct mode

        self.reducer = EmbedReduction(cfg=cfg, imag_encoder_config=self.imag_encoder_config, ecg_encoder_config=self.ecg_encoder_config)
        self.criterion = MultiModalLoss(cfg=cfg, loss_type=self.loss_type, T_contrastive=self.num_image_time_patches)

        self.initialize_task_dependent_components(cfg)

        self.ecg_trainable = cfg.training_mode.encoders.ecg.train
        self.image_trainable = cfg.training_mode.encoders.image.train

        self.freeze_first_n_layers_image = cfg.training_mode.encoders.image.freeze_first_n_layers
        self.freeze_first_n_layers_ecg = cfg.training_mode.encoders.ecg.freeze_first_n_layers
    
    def initialize_task_dependent_components(self, cfg: DictConfig) -> None:
        use_mlp = cfg.downstream_task.get('use_mlp', False)
        training_scheme = cfg.downstream_task.get('training_scheme', 'linear_probing')
        
        if training_scheme == 'linear_probing':
            self.training_scheme_setting = "LP"
        elif training_scheme == 'fine_tune':
            self.training_scheme_setting = "FN"
        
        self.use_mlp_setting = 'MLP' if use_mlp else 'LinearLayer'
    
        last_layer_config = LastLayerConfiguration(cfg=cfg,
                                                   encoded_dim=self.ecg_encoder_config.hidden_size,
                                                   mask_labels=False,
                                                   training_scheme=self.training_scheme_setting,
                                                   use_mlp=self.use_mlp_setting)
        self.task = last_layer_config.task
        self.last_layer = last_layer_config.create_last_layer()

        self.last_layer_criterion = last_layer_config.create_criterion()

        self.downstream_task_metrics = last_layer_config.create_metrics()
        self.plotter_val = last_layer_config.create_plotter()
        self.regression = last_layer_config.create_sklearn_model()

        self.train_every_n_epochs = self.cfg.downstream_task.epoch_interval
        self.reinitialize_every_epoch = self.cfg.downstream_task.reinitialize_every_epoch

        self.initial_last_layer_config = last_layer_config

        self.ecg_layernorm_regression = nn.LayerNorm(normalized_shape=self.ecg_encoder_config.hidden_size,
                                                     eps=self.ecg_encoder_config.layer_norm_eps, dtype=torch.float32)
        nn.init.constant_(self.ecg_layernorm_regression.weight, 1.0)
        nn.init.constant_(self.ecg_layernorm_regression.bias, 0.0)

    def reinitialize_last_layer_and_norm(self, device):
        self.last_layer = self.initial_last_layer_config.create_last_layer()
        self.last_layer = self.last_layer.to(device)

        self.ecg_layernorm_regression = nn.LayerNorm(normalized_shape=self.ecg_encoder_config.hidden_size,
                                                     eps=self.ecg_encoder_config.layer_norm_eps, dtype=torch.float32)
        nn.init.constant_(self.ecg_layernorm_regression.weight, 1.0)
        nn.init.constant_(self.ecg_layernorm_regression.bias, 0.0)
        self.ecg_layernorm_regression = self.ecg_layernorm_regression.to(device)

    def _apply_projection(self, proj_type, tokens, global_token):
        if proj_type == 'ecg':
            projected_tokens = self.projection_ecg_local(tokens) if self.projection_ecg_local is not None else None
            projected_global = self.projection_ecg_global(global_token) if self.projection_ecg_global is not None else None
        
        elif proj_type == 'image':
            projected_tokens = self.projection_image_local(tokens) if self.projection_image_local is not None else None
            projected_global = self.projection_image_global(global_token) if self.projection_image_global is not None else None
            
        return projected_tokens, projected_global

    def forward_ecg(self, ecg_view: torch.Tensor, rpeaks: torch.Tensor = None, tpfs: torch.Tensor = None) -> torch.Tensor:
        hidden_state = self.ecg_encoder(pixel_values=ecg_view, apply_masking=False, use_layernorm=False).last_hidden_state
        ecg_tokens_dict = self.reducer(hidden_state=hidden_state, rpeaks=rpeaks, tpfs=tpfs, encoder_config=self.ecg_encoder_config, mode='ecg')
        proj_ecg_local_emb, proj_ecg_global_emb = self._apply_projection(proj_type='ecg',
                                                                         tokens=ecg_tokens_dict["ecg_all_tokens"],
                                                                         global_token=ecg_tokens_dict["ecg_global_token"])
        return {
            "ecg_all_tokens" : ecg_tokens_dict["ecg_all_tokens"],
            "ecg_global_token" : ecg_tokens_dict["ecg_global_token"],
            "proj_ecg_local_emb" : proj_ecg_local_emb,
            "proj_ecg_global_emb" : proj_ecg_global_emb,
        }

    def forward_mri(self, img_view: torch.Tensor) -> torch.Tensor:
        hidden_state = self.imag_encoder(pixel_values=img_view, apply_masking=False, use_layernorm=False).last_hidden_state
        img_tokens_dict = self.reducer(hidden_state=hidden_state, encoder_config=self.imag_encoder_config, mode='image')
        proj_img_local_emb, proj_img_global_emb = self._apply_projection(proj_type='image',
                                                                         tokens=img_tokens_dict["img_all_tokens"],
                                                                         global_token=img_tokens_dict["img_global_token"])
        return {
            "img_all_tokens" : img_tokens_dict["img_all_tokens"],
            "img_global_token" : img_tokens_dict["img_global_token"],
            "proj_img_local_emb" : proj_img_local_emb,
            "proj_img_global_emb" : proj_img_global_emb,
        }

    def forward_contrastive(self, ecg_aug, image_aug, rpeaks=None, tpfs=None):
        ecg_embeddings_dict = self.forward_ecg(ecg_view=ecg_aug, rpeaks=rpeaks, tpfs=tpfs)
        img_embeddings_dict = self.forward_mri(img_view=image_aug)

        losses_dict = self.criterion(ecg_embeddings_dict, img_embeddings_dict)

        batch_sample_similarity = compute_sample_similarity(
            ecg_embeddings_dict["ecg_global_token"],
            img_embeddings_dict["img_global_token"]
        )
        
        similarities_dict = {"batch_sample_similarity": batch_sample_similarity}
        
        if self.loss_type == 'both':
            batch_time_similarity_for_projected = compute_batch_similarity(
            ecg_embeddings_dict["proj_ecg_local_emb"],
            img_embeddings_dict["proj_img_local_emb"]
        )
            similarities_dict.update({"batch_time_similarity_for_projected": batch_time_similarity_for_projected})
        
        return losses_dict, similarities_dict
    
    def forward_linear_probe(self, batch):
        ecg, labels = batch
        hidden_state = self.ecg_encoder(pixel_values=ecg, apply_masking=False, use_layernorm=False).last_hidden_state

        if self.ecg_encoder_config.use_cls_token:
            all_tokens, global_token = hidden_state[:, 1:], hidden_state[:, 0]
            features = all_tokens.mean(dim=1)
        else:
            all_tokens = hidden_state
            features = torch.mean(all_tokens, dim=1)

        features_normed = self.ecg_layernorm_regression(features)
        logits = self.last_layer(features_normed)
        loss = self.last_layer_criterion(logits, labels)

        return {
            "downstream_loss": loss,
            "features": features,
            "logits": logits,
            "labels": labels
        }
        
    def save_checkpoint(self, name: str, main_optimizer, main_scheduler, optimizer_last, epoch: int) -> None:
        hf_save_path_ecg = os.path.join(self.save_dir, f"{name}_hf_ecg_model") if self.ecg_trainable else None
        hf_save_path_mri = os.path.join(self.save_dir, f"{name}_hf_mri_model") if self.image_trainable else None

        if self.ecg_trainable:
            os.makedirs(hf_save_path_ecg, exist_ok=True)
            self.ecg_encoder.save_pretrained(hf_save_path_ecg)
        if self.image_trainable:
            os.makedirs(hf_save_path_mri, exist_ok=True)
            self.imag_encoder.save_pretrained(hf_save_path_mri)
            
        # Save other components using PyTorch
        checkpoint = {
            "epoch": epoch,
            "reducer": self.reducer.state_dict(),
            "projection_image_global": self.projection_image_global.state_dict(),
            "projection_image_local": self.projection_image_local.state_dict() if self.projection_image_local else None,
            "projection_ecg_global": self.projection_ecg_global.state_dict(),
            "projection_ecg_local": self.projection_ecg_local.state_dict() if self.projection_ecg_local else None,
            "ecg_layernorm": self.ecg_layernorm_regression.state_dict(),
            "last_layer": self.last_layer.state_dict(),
            "main_optimizer": main_optimizer.state_dict(),
            "main_scheduler": main_scheduler['scheduler'].state_dict(),
            "optimizer_last": optimizer_last.state_dict(),
        }
        
        os.makedirs(os.path.join(self.save_dir, f"{name}_model"), exist_ok=True)
        save_path_model = os.path.join(self.save_dir, f"{name}_model/checkpoint.pth")
        torch.save(checkpoint, save_path_model)
    
    def configure_optimizers(self, num_main_steps, num_downstream_steps):
        model_group_dict = {
            'ecg_encoder': self.ecg_encoder,
            'imag_encoder': self.imag_encoder,
            'lr': [self.reducer]
        }
        if self.loss_type in ['global', 'both']:
            model_group_dict['lr'].extend([self.projection_image_global, self.projection_ecg_global])
        if self.loss_type in ['local', 'both']:
            model_group_dict['lr'].extend([self.projection_image_local, self.projection_ecg_local])

        main_optimizer, main_scheduler = create_optimizer_and_scheduler_multiple_lr(
            model_group_dict=model_group_dict,
            optimizer_params=self.cfg.models.params,
            num_batches_per_epoch=num_main_steps
        )

        last_layer_optimizer, last_layer_scheduler = create_optimizer_and_scheduler(
            models=[self.last_layer, self.ecg_layernorm_regression],
            optimizer_params=self.cfg.downstream_task.params,
            num_batches_per_epoch=num_downstream_steps,
        )

        return [main_optimizer, last_layer_optimizer], [main_scheduler, last_layer_scheduler]
    
    def recreate_downstream_optimizer(self, num_downstream_steps):
        last_layer_optimizer, last_layer_scheduler = create_optimizer_and_scheduler(
            models=[self.last_layer, self.ecg_layernorm_regression],
            optimizer_params=self.cfg.downstream_task.params,
            num_batches_per_epoch=num_downstream_steps,
        )
        return last_layer_optimizer, last_layer_scheduler
    
    def prepare_for_main_training(self):
        """Prepare model for main task training."""

        self.freeze_embeddings(self.imag_encoder)
        self.freeze_embeddings(self.ecg_encoder)

        # Image Encoder
        if self.image_trainable:
            self.imag_encoder.train()
            self.freeze_first_n_layers(self.imag_encoder, self.freeze_first_n_layers_image)
        else:
            self.imag_encoder.eval()
            self.freeze_model(self.imag_encoder)

        # ECG Encoder
        if self.ecg_trainable:
            self.ecg_encoder.train()
            self.freeze_first_n_layers(self.ecg_encoder, self.freeze_first_n_layers_ecg)
        else:
            self.ecg_encoder.eval()
            self.freeze_model(self.ecg_encoder)

        # Unfreeze reducer and projection heads
        self.unfreeze_model(self.reducer)
        self.reducer.train()
        self.unfreeze_projection_heads()

        # Freeze last_layer
        self.freeze_model(self.last_layer)
        self.freeze_model(self.ecg_layernorm_regression)

    def prepare_for_main_validation(self):
        """Prepare model for main task validation."""
        self.eval()  # Set the entire model to eval mode

        # Freeze embeddings layers
        self.freeze_embeddings(self.imag_encoder)
        self.freeze_embeddings(self.ecg_encoder)

        # Freeze entire image encoder
        self.freeze_model(self.imag_encoder)
        # Freeze entire ECG encoder
        self.freeze_model(self.ecg_encoder)

        # Freeze reducer and projections
        self.freeze_model(self.reducer)
        self.freeze_projection_heads()

        # Freeze last_layer
        self.freeze_model(self.last_layer)
        self.freeze_model(self.ecg_layernorm_regression)

    def prepare_for_downstream_training(self, device, num_downstream_steps):
        """Prepare model for downstream task training."""
        last_layer_optimizer = None

        if self.reinitialize_every_epoch:
            self.reinitialize_last_layer_and_norm(device=device)
            last_layer_optimizer, _ = self.recreate_downstream_optimizer(num_downstream_steps)

        self.eval()

        # Freeze embeddings layers
        self.freeze_embeddings(self.imag_encoder)
        self.freeze_embeddings(self.ecg_encoder)

        # Freeze everything else
        self.freeze_model(self.imag_encoder)
        self.freeze_model(self.ecg_encoder)
        self.freeze_model(self.reducer)
        self.freeze_projection_heads()

        self.unfreeze_model(self.last_layer)
        self.last_layer.train()

        self.unfreeze_model(self.ecg_layernorm_regression)
        self.ecg_layernorm_regression.train()

        return last_layer_optimizer

    def prepare_for_downstream_validation(self):
        """Prepare model for downstream task validation."""
        self.eval()  # Set the entire model to eval mode

        # Freeze embeddings layers
        self.freeze_embeddings(self.imag_encoder)
        self.freeze_embeddings(self.ecg_encoder)

        # Freeze everything else
        self.freeze_model(self.imag_encoder)
        self.freeze_model(self.ecg_encoder)
        self.freeze_model(self.reducer)
        self.freeze_projection_heads()
        self.freeze_model(self.last_layer)
        self.freeze_model(self.ecg_layernorm_regression)

    def freeze_embeddings(self, model):
        """Freeze embeddings layer of the given model."""
        for param in model.embeddings.parameters():
            param.requires_grad = False

    def freeze_model(self, model):
        """Freeze the given model, embeddings remain frozen."""
        for _, param in model.named_parameters():
            param.requires_grad = False

    def unfreeze_model(self, model):
        """Unfreeze the given model, embeddings remain frozen."""
        for _, param in model.named_parameters():
            param.requires_grad = True

    def freeze_first_n_layers(self, model, n):
        """Freeze first n layers of the model's encoder (embeddings already frozen)."""
        layers = self._get_encoder_layers(model)
        for i, layer in enumerate(layers):
            if i < n:
                for param in layer.parameters():
                    param.requires_grad = False
            else:
                for param in layer.parameters():
                    param.requires_grad = True

        for param in model.layernorm.parameters():
            param.requires_grad = False

    def _get_encoder_layers(self, model):
        """Helper method to get encoder layers."""
        if hasattr(model.encoder, 'layer'):
            return model.encoder.layer
        elif hasattr(model.encoder, 'layers'):
            return model.encoder.layers
        else:
            raise AttributeError('Model encoder does not have "layer" or "layers" attribute.')

    def freeze_projection_heads(self):
        """Freeze projection heads."""
        if self.projection_image_global:
            self.freeze_model(self.projection_image_global)
            self.projection_image_global.eval()
        if self.projection_image_local:
            self.freeze_model(self.projection_image_local)
            self.projection_image_local.eval()
        if self.projection_ecg_global:
            self.freeze_model(self.projection_ecg_global)
            self.projection_ecg_global.eval()
        if self.projection_ecg_local:
            self.freeze_model(self.projection_ecg_local)
            self.projection_ecg_local.eval()

    def unfreeze_projection_heads(self):
        """Unfreeze projection heads."""
        if self.projection_image_global:
            self.unfreeze_model(self.projection_image_global)
            self.projection_image_global.train()
        if self.projection_image_local:
            self.unfreeze_model(self.projection_image_local)
            self.projection_image_local.train()
        if self.projection_ecg_global:
            self.unfreeze_model(self.projection_ecg_global)
            self.projection_ecg_global.train()
        if self.projection_ecg_local:
            self.unfreeze_model(self.projection_ecg_local)
            self.projection_ecg_local.train()