import os

from typing import Sequence
from omegaconf import DictConfig, OmegaConf

import torch
import lightning as L

from lightning import Callback
from lightning.pytorch.callbacks import ModelCheckpoint, LearningRateMonitor

from ecgcmr.multimodal.multimodal_loss.CombinedLoss import MultiModalLoss
from ecgcmr.multimodal.multimodal_models.EmbeddingsReduction import EmbedReduction
from ecgcmr.multimodal.multimodal_utils.misc import initialize_image_encoder, initialize_ecg_encoder, init_image_projection_heads, init_ecg_projection_heads

from ecgcmr.utils.misc import create_optimizer_and_scheduler_multiple_lr


class ImageECGClip(L.LightningModule):
    def __init__(
        self,
        cfg: DictConfig,
        save_dir: str,
        pretrained_model_name_or_path_image: str,
        pretrained_model_name_or_path_ecg: str,
        ) -> None:
        super().__init__()

        self.cfg = cfg
        self.save_dir = save_dir

        self.batch_size = cfg.dataset.batch_size
        
        self.loss_type = cfg.training_mode.loss.type

        self.used_image_frames = cfg.augmentations.imaging.time_sample.result_n_frames if cfg.augmentations.imaging.time_sample.enable else cfg.dataset.image_time_steps
        self.used_ecg_steps = cfg.augmentations.ecg.random_crop.ecg_time_steps if cfg.augmentations.ecg.random_crop.enable else cfg.dataset.ecg_time_steps

        self.imag_encoder, self.imag_encoder_config = initialize_image_encoder(pretrained_model_name_or_path_image, cfg, self.used_image_frames)
        self.ecg_encoder, self.ecg_encoder_config = initialize_ecg_encoder(pretrained_model_name_or_path_ecg, cfg, self.used_ecg_steps)

        self.projection_image_global, self.projection_image_local = init_image_projection_heads(cfg, self.imag_encoder_config, self.loss_type)
        self.projection_ecg_global, self.projection_ecg_local = init_ecg_projection_heads(cfg, self.ecg_encoder_config, self.loss_type)

        self.num_ecg_channel_patches = self.ecg_encoder_config.image_size[0] // self.ecg_encoder_config.patch_size[0]
        self.num_ecg_time_patches = self.used_ecg_steps // self.ecg_encoder_config.patch_size[1]
        self.num_ecg_patches = self.num_ecg_channel_patches * self.num_ecg_time_patches

        self.num_image_time_patches = self.used_image_frames//self.imag_encoder_config.tubelet_size
        self.num_image_spatial_patches = (self.imag_encoder_config.image_size//self.imag_encoder_config.patch_size) * (self.imag_encoder_config.image_size//self.imag_encoder_config.patch_size)
        self.num_image_patches = self.num_image_time_patches * self.num_image_spatial_patches

        OmegaConf.set_struct(cfg, False)  # Temporarily disable struct mode
        cfg.num_ecg_channel_patches = self.num_ecg_channel_patches
        cfg.num_ecg_time_patches = self.num_ecg_time_patches
        cfg.num_ecg_patches = self.num_ecg_patches

        cfg.num_image_time_patches = self.num_image_time_patches
        cfg.num_image_spatial_patches = self.num_image_spatial_patches
        cfg.num_image_patches = self.num_image_patches
        cfg.used_image_frames = self.used_image_frames
        OmegaConf.set_struct(cfg, True)  # Re-enable struct mode

        self.reducer = EmbedReduction(cfg=cfg, imag_encoder_config=self.imag_encoder_config, ecg_encoder_config=self.ecg_encoder_config)
        self.criterion_train = MultiModalLoss(cfg=cfg, loss_type=self.loss_type, T_contrastive=self.num_image_time_patches)
        self.criterion_val = MultiModalLoss(cfg=cfg, loss_type=self.loss_type, T_contrastive=self.num_image_time_patches)

        self.ecg_trainable = cfg.training_mode.encoders.ecg.train
        self.image_trainable = cfg.training_mode.encoders.image.train
    
    def _apply_projection(self, proj_type, tokens, global_token):
        if proj_type == 'ecg':
            projected_tokens = self.projection_ecg_local(tokens) if self.projection_ecg_local is not None else None
            projected_global = self.projection_ecg_global(global_token) if self.projection_ecg_global is not None else None
        
        elif proj_type == 'image':
            projected_tokens = self.projection_image_local(tokens) if self.projection_image_local is not None else None
            projected_global = self.projection_image_global(global_token) if self.projection_image_global is not None else None
            
        return projected_tokens, projected_global

    def forward_ecg(self, ecg_view: torch.Tensor, rpeaks: torch.Tensor = None, tpfs: torch.Tensor = None) -> torch.Tensor:
        hidden_state = self.ecg_encoder(pixel_values=ecg_view, apply_masking=False, use_layernorm=False).last_hidden_state
        ecg_tokens_dict = self.reducer(hidden_state=hidden_state, rpeaks=rpeaks, tpfs=tpfs, encoder_config=self.ecg_encoder_config, mode='ecg')
        proj_ecg_local_emb, proj_ecg_global_emb = self._apply_projection(proj_type='ecg',
                                                                         tokens=ecg_tokens_dict["ecg_all_tokens"],
                                                                         global_token=ecg_tokens_dict["ecg_global_token"])
        return {
            "ecg_all_tokens" : ecg_tokens_dict["ecg_all_tokens"],
            "ecg_global_token" : ecg_tokens_dict["ecg_global_token"],
            "proj_ecg_local_emb" : proj_ecg_local_emb,
            "proj_ecg_global_emb" : proj_ecg_global_emb,
        }

    def forward_mri(self, img_view: torch.Tensor) -> torch.Tensor:
        hidden_state = self.imag_encoder(pixel_values=img_view, apply_masking=False, use_layernorm=False).last_hidden_state
        img_tokens_dict = self.reducer(hidden_state=hidden_state, encoder_config=self.imag_encoder_config, mode='image')
        proj_img_local_emb, proj_img_global_emb = self._apply_projection(proj_type='image',
                                                                         tokens=img_tokens_dict["img_all_tokens"],
                                                                         global_token=img_tokens_dict["img_global_token"])
        return {
            "img_all_tokens" : img_tokens_dict["img_all_tokens"],
            "img_global_token" : img_tokens_dict["img_global_token"],
            "proj_img_local_emb" : proj_img_local_emb,
            "proj_img_global_emb" : proj_img_global_emb,
        }
    
    def forward(self, batch: torch.Tensor):
        ecg_aug, image_aug = batch.get('ecg_aug'), batch.get('image_aug')
        rpeaks = batch.get("rpeaks", None)
        tpfs = batch.get("tpfs", None)

        ecg_embeddigs_dict = self.forward_ecg(ecg_view=ecg_aug, rpeaks=rpeaks, tpfs=tpfs)
        img_embeddigs_dict = self.forward_mri(img_view=image_aug)

        return ecg_embeddigs_dict, img_embeddigs_dict

    def training_step(self, batch: torch.Tensor, batch_idx: int) -> torch.Tensor:
        ecg_embeddigs_dict, img_embeddigs_dict = self(batch)

        losses_dict = self.criterion_train(ecg_embeddigs_dict, img_embeddigs_dict)
        
        total_loss = losses_dict["total_loss"]
        global_loss = losses_dict["global_loss"]
        local_loss = losses_dict["local_loss"]

        self.log("train/total_loss", total_loss, on_step=True, on_epoch=True, logger=True, batch_size=self.batch_size, prog_bar=True)
        if global_loss is not None:
            self.log("train/global_loss", global_loss, on_step=True, on_epoch=True, logger=True, batch_size=self.batch_size, sync_dist=True)
        if local_loss is not None:
            self.log("train/local_loss", local_loss, on_step=True, on_epoch=True, logger=True, batch_size=self.batch_size, sync_dist=True)

        return total_loss

    def validation_step(self, batch: torch.Tensor, batch_idx: int) -> torch.Tensor:
        ecg_embeddigs_dict, img_embeddigs_dict = self(batch)
        
        losses_dict = self.criterion_val(ecg_embeddigs_dict, img_embeddigs_dict)

        total_loss = losses_dict["total_loss"]
        global_loss = losses_dict["global_loss"]
        local_loss = losses_dict["local_loss"]

        self.log("val/total_loss", total_loss, on_step=False, on_epoch=True, logger=True, batch_size=self.batch_size, prog_bar=True, sync_dist=True)
        if global_loss is not None:
            self.log("val/global_loss", global_loss, on_step=False, on_epoch=True, logger=True, batch_size=self.batch_size, sync_dist=True)
        if local_loss is not None:
            self.log("val/local_loss", local_loss, on_step=False, on_epoch=True, logger=True, batch_size=self.batch_size, sync_dist=True)

        return total_loss
    
    def on_save_checkpoint(self, checkpoint: dict) -> None:
        hf_save_path_ecg = os.path.join(self.save_dir, "hf_ecg_model") if self.ecg_trainable else None
        hf_save_path_mri = os.path.join(self.save_dir, "hf_mri_model") if self.image_trainable else None

        if self.ecg_trainable:
            os.makedirs(hf_save_path_ecg, exist_ok=True)
            self.ecg_encoder.save_pretrained(hf_save_path_ecg)
        if self.image_trainable:
            os.makedirs(hf_save_path_mri, exist_ok=True)
            self.imag_encoder.save_pretrained(hf_save_path_mri)
        
    def num_steps(self) -> int:
        dataset = self.trainer.fit_loop._data_source.dataloader()
        dataset_size = len(dataset)
        num_devices = max(1, self.trainer.num_devices)
        num_steps = dataset_size // (self.trainer.accumulate_grad_batches * num_devices)
        return num_steps
    
    def configure_optimizers(self) -> dict:
        model_group_dict = {
            'ecg_encoder': self.ecg_encoder,
            'imag_encoder': self.imag_encoder,
            'lr': [self.reducer]
        }
        if self.loss_type in ['global', 'both']:
            model_group_dict['lr'].extend([self.projection_image_global, self.projection_ecg_global])
        if self.loss_type in ['local', 'both']:
            model_group_dict['lr'].extend([self.projection_image_local, self.projection_ecg_local])

        optimizer, scheduler = create_optimizer_and_scheduler_multiple_lr(
            model_group_dict=model_group_dict,
            optimizer_params=self.cfg.models.params,
            num_batches_per_epoch=self.num_steps()
        )

        return {"optimizer": optimizer, "lr_scheduler": scheduler}
    
    def configure_callbacks(self) -> Sequence[Callback] | Callback:
        checkpoint_folder = os.path.join(self.save_dir, 'checkpoints')
        os.makedirs(checkpoint_folder, exist_ok=True)

        checkpoint_callback = ModelCheckpoint(
            dirpath=checkpoint_folder,
            auto_insert_metric_name=True,
            monitor='val/total_loss',
            mode='min',
            save_top_k=self.cfg.save_top_k,
            every_n_epochs=self.cfg.check_val_every_n_epoch,
            save_on_train_epoch_end=False,
            save_weights_only=False,
            verbose=True,
            save_last=True,
        )
        
        learning_rate_callback = LearningRateMonitor(logging_interval='epoch',
                                                     log_weight_decay=True,
                                                     log_momentum=False)

        return [learning_rate_callback, checkpoint_callback]
    