import os

from typing import Sequence
from omegaconf import DictConfig, OmegaConf

import torch
import lightning as L

from torch import nn
from lightning import Callback
from lightning.pytorch.callbacks import ModelCheckpoint, LearningRateMonitor

from ecgcmr.multimodal.multimodal_loss.CombinedLoss import MultiModalLoss
from ecgcmr.multimodal.multimodal_models.EmbeddingsReduction import EmbedReduction
from ecgcmr.multimodal.multimodal_utils.misc import initialize_image_encoder, initialize_ecg_encoder, init_image_projection_heads, init_ecg_projection_heads

from ecgcmr.utils.LastLayer import LastLayerConfiguration
from ecgcmr.utils.misc import create_optimizer_and_scheduler_multiple_lr, create_optimizer_and_scheduler


class ImageECGClip_LinearProbe(L.LightningModule):
    def __init__(self,
                 cfg: DictConfig,
                 save_dir: str,
                 pretrained_model_name_or_path_image: str,
                 pretrained_model_name_or_path_ecg: str,
                 ) -> None:
        super().__init__()

        self.cfg = cfg
        self.save_dir = save_dir

        self.main_batch_size = cfg.dataset.batch_size
        self.downstream_batch_size = cfg.downstream_task.batch_size

        self.loss_type = cfg.training_mode.loss.type

        self.used_image_frames = cfg.augmentations.imaging.time_sample.result_n_frames if cfg.augmentations.imaging.time_sample.enable else cfg.dataset.image_time_steps
        self.used_ecg_steps = cfg.augmentations.ecg.random_crop.ecg_time_steps if cfg.augmentations.ecg.random_crop.enable else cfg.dataset.ecg_time_steps

        self.imag_encoder, self.imag_encoder_config = initialize_image_encoder(pretrained_model_name_or_path_image, cfg, self.used_image_frames)
        self.ecg_encoder, self.ecg_encoder_config = initialize_ecg_encoder(pretrained_model_name_or_path_ecg, cfg, self.used_ecg_steps)
        
        self.projection_image_cls, self.projection_image_tokens = init_image_projection_heads(cfg, self.imag_encoder_config, self.loss_type)
        self.projection_ecg_cls, self.projection_ecg_tokens = init_ecg_projection_heads(cfg, self.ecg_encoder_config, self.loss_type)

        self.num_ecg_channel_patches = self.ecg_encoder_config.image_size[0] // self.ecg_encoder_config.patch_size[0]
        self.num_ecg_time_patches = self.used_ecg_steps // self.ecg_encoder_config.patch_size[1]
        self.num_ecg_patches = self.num_ecg_channel_patches * self.num_ecg_time_patches

        self.num_image_time_patches = self.used_image_frames//self.imag_encoder_config.tubelet_size
        self.num_image_spatial_patches = (self.imag_encoder_config.image_size//self.imag_encoder_config.patch_size) * (self.imag_encoder_config.image_size//self.imag_encoder_config.patch_size)
        self.num_image_patches = self.num_image_time_patches * self.num_image_spatial_patches

        OmegaConf.set_struct(cfg, False)  # Temporarily disable struct mode
        cfg.num_ecg_channel_patches = self.num_ecg_channel_patches
        cfg.num_ecg_time_patches = self.num_ecg_time_patches
        cfg.num_ecg_patches = self.num_ecg_patches

        cfg.num_image_time_patches = self.num_image_time_patches
        cfg.num_image_spatial_patches = self.num_image_spatial_patches
        cfg.num_image_patches = self.num_image_patches
        OmegaConf.set_struct(cfg, True)  # Re-enable struct mode

        self.reducer = EmbedReduction(cfg=cfg, imag_encoder_config=self.imag_encoder_config, ecg_encoder_config=self.ecg_encoder_config)
        self.criterion = MultiModalLoss(cfg=cfg, loss_type=self.loss_type, T_contrastive=self.num_image_time_patches)

        self._initialize_task_dependent_components(cfg)

        # Important: This property activates manual optimization.
        self.automatic_optimization = False

    def _initialize_task_dependent_components(self, cfg: DictConfig) -> None:
        last_layer_config = LastLayerConfiguration(cfg=cfg, encoded_dim=self.ecg_encoder_config.hidden_size, mask_labels=False)

        self.task = last_layer_config.task
        self.last_layer = last_layer_config.create_last_layer()

        self.last_layer_criterion_train = last_layer_config.create_criterion()
        self.last_layer_criterion_val = last_layer_config.create_criterion()

        self.downstream_task_metrics_val = last_layer_config.create_metrics()

        self.train_every_n_epochs = self.cfg.downstream_task.epoch_interval

        self.ecg_layernorm_regression = nn.LayerNorm(normalized_shape=self.ecg_encoder_config.hidden_size, eps=1e-12)
    
    def _apply_projection(self, type, tokens, global_token):
        if type == 'ecg':
            projected_tokens = self.projection_ecg_tokens(tokens) if self.projection_ecg_tokens else None
            projected_global = self.projection_ecg_cls(global_token) if self.projection_ecg_cls else None
        
        if type == 'image':
            projected_tokens = self.projection_image_tokens(tokens) if self.projection_image_tokens else None
            projected_global = self.projection_image_cls(global_token) if self.projection_image_cls else None
            
        return projected_tokens, projected_global

    def forward_ecg(self, ecg_view: torch.Tensor, rpeaks: torch.Tensor = None) -> torch.Tensor:
        hidden_state = self.ecg_encoder(pixel_values=ecg_view, apply_masking=False).last_hidden_state
        ecg_all_tokens, ecg_global_token = self.reducer(hidden_state=hidden_state, rpeaks=rpeaks, encoder_config=self.ecg_encoder_config, mode='ecg')
        proj_ecg_local_emb, proj_ecg_global_emb = self._apply_projection('ecg', ecg_all_tokens, ecg_global_token)
        return proj_ecg_local_emb, proj_ecg_global_emb

    def forward_mri(self, img_view: torch.Tensor) -> torch.Tensor:
        hidden_state = self.imag_encoder(pixel_values=img_view, apply_masking=False).last_hidden_state
        img_all_tokens, img_global_token = self.reducer(hidden_state=hidden_state, encoder_config=self.imag_encoder_config, mode='image')
        proj_img_local_emb, proj_img_global_emb = self._apply_projection('image', img_all_tokens, img_global_token)
        return proj_img_local_emb, proj_img_global_emb
    
    def forward_contrastive(self, batch):
        ecg_aug, image_aug = batch.get('ecg_aug'), batch.get('image_aug')
        rpeaks = batch.get("rpeaks", None)

        proj_ecg_local_emb, proj_ecg_global_emb = self.forward_ecg(ecg_aug, rpeaks=rpeaks)
        proj_img_local_emb, proj_img_global_emb = self.forward_mri(image_aug)

        global_loss, local_loss, total_loss = self.criterion(proj_ecg_global_emb, proj_img_global_emb, proj_ecg_local_emb, proj_img_local_emb)

        return global_loss, local_loss, total_loss
    
    def forward_linear_probe(self, batch, mode='train'):
        ecg, label = batch

        hidden_state = self.ecg_encoder(pixel_values=ecg, apply_masking=False).last_hidden_state

        if self.ecg_encoder_config.use_cls_token:
            all_tokens, _ = hidden_state[:, 1:], hidden_state[:, 0]
        else:
            all_tokens = hidden_state

        features = torch.mean(all_tokens, dim=1)
        features = self.ecg_layernorm_regression(features)

        logits = self.last_layer(features)

        if mode == 'train':
            loss = self.last_layer_criterion_train(logits, label)

        elif mode == 'val':
            loss = self.last_layer_criterion_val(logits, label)
            self.downstream_task_metrics_val.update(preds=logits, labels=label)

        return loss
        
    def training_step(self, batch, batch_idx: int) -> torch.Tensor:
        backbone_opt, last_layer_opt = self.optimizers()
        backbone_sch, last_layer_sch = self.lr_schedulers()

        batch_main = batch['main']

        self.toggle_optimizer(backbone_opt)
        global_loss, local_loss, total_loss = self.forward_contrastive(batch_main)
        
        self.log("train/total_loss", total_loss, on_step=True, on_epoch=True, logger=True, batch_size=self.main_batch_size, prog_bar=True)
        
        if global_loss is not None:
            self.log("train/global_loss", global_loss, on_step=True, on_epoch=True, logger=True, batch_size=self.main_batch_size, prog_bar=False)
        if local_loss is not None:
            self.log("train/local_loss", local_loss, on_step=True, on_epoch=True, logger=True, batch_size=self.main_batch_size, prog_bar=False)
        
        backbone_opt.zero_grad()
        self.manual_backward(total_loss)

        backbone_opt.step()
        backbone_sch.step()
            
        self.untoggle_optimizer(backbone_opt)
        
        if self.trainer.current_epoch % self.train_every_n_epochs == 0:
            batch_downstream = batch['downstream']

            self.toggle_optimizer(last_layer_opt)
            downstream_loss = self.forward_linear_probe(batch_downstream, mode='train')

            self.log("train/downstream_loss", downstream_loss, on_step=False, on_epoch=True, logger=True, batch_size=self.downstream_batch_size, prog_bar=False)
            
            last_layer_opt.zero_grad()
            self.manual_backward(downstream_loss)

            last_layer_opt.step()
            last_layer_sch.step()

            self.untoggle_optimizer(last_layer_opt)

    def validation_step(self, batch, batch_idx: int, dataloader_idx: int = 0) -> torch.Tensor:
        if dataloader_idx == 0:
            global_loss, local_loss, total_loss = self.forward_contrastive(batch)

            self.log("val/total_loss", total_loss, on_step=False, on_epoch=True, logger=True, batch_size=self.main_batch_size, prog_bar=True)
            if global_loss is not None:
                self.log("val/global_loss", global_loss, on_step=False, on_epoch=True, logger=True, batch_size=self.main_batch_size)
            if local_loss is not None:
                self.log("val/local_loss", local_loss, on_step=False, on_epoch=True, logger=True, batch_size=self.main_batch_size)

        if dataloader_idx == 1:
            if self.trainer.current_epoch % self.train_every_n_epochs == 0:
                downstream_loss = self.forward_linear_probe(batch, mode='val')
                self.log("val/downstream_loss", downstream_loss, on_step=False, on_epoch=True, logger=True, batch_size=self.downstream_batch_size, prog_bar=False)
        
    def on_validation_epoch_end(self):
        if self.trainer.current_epoch % self.train_every_n_epochs == 0:
            metrics = self.downstream_task_metrics_val.compute()
        
            for name, value in metrics.items():
                self.log(f'val/downstream_{name}', value, logger=True, batch_size=self.downstream_batch_size, add_dataloader_idx=True, prog_bar=False)
                monitor_metric = metrics['Mean_RV'] + metrics['Mean_LV']
                self.log('val/downstream_monitor_metric', monitor_metric, logger=True, batch_size=self.downstream_batch_size, add_dataloader_idx=True, prog_bar=False)

            self.downstream_task_metrics_val.reset()

    def on_save_checkpoint(self, checkpoint: dict) -> None:
        hf_save_path_ecg = os.path.join(self.save_dir, "hf_ecg_model")
        hf_save_path_mri = os.path.join(self.save_dir, "hf_mri_model")

        os.makedirs(hf_save_path_ecg, exist_ok=True)
        os.makedirs(hf_save_path_mri, exist_ok=True)
        
        self.ecg_encoder.save_pretrained(hf_save_path_ecg)
        self.imag_encoder.save_pretrained(hf_save_path_mri)
        
    def num_steps(self, mode='main') -> int:
        """Get number of steps per epoch and batch size"""
        dataset = self.trainer.fit_loop._data_source.dataloader()[mode]
        dataset_size = len(dataset)
        num_devices = max(1, self.trainer.num_devices)
        num_steps = dataset_size // (self.trainer.accumulate_grad_batches * num_devices)
        return num_steps
    
    def configure_optimizers(self):
        main_models_list = [self.imag_encoder, self.ecg_encoder, self.reducer]
        if self.loss_type in ['global', 'both']:
            main_models_list.append(self.projection_image_cls)
            main_models_list.append(self.projection_ecg_cls)
        if self.loss_type in ['local', 'both']:
            main_models_list.append(self.projection_image_tokens)
            main_models_list.append(self.projection_ecg_tokens)

        main_optimizer, main_scheduler = create_optimizer_and_scheduler_multiple_lr(
            models=main_models_list,
            optimizer_params=self.cfg.models.params,
            num_batches_per_epoch=self.num_steps(mode='main')
        )

        last_layer_optimizer, last_layer_scheduler = create_optimizer_and_scheduler(
            models=[self.last_layer, self.ecg_layernorm_regression],
            optimizer_params=self.cfg.downstream_task.params,
            num_batches_per_epoch=self.num_steps(mode='downstream'),
        )

        return [main_optimizer, last_layer_optimizer], [main_scheduler, last_layer_scheduler]
    
    def configure_callbacks(self) -> Sequence[Callback] | Callback:
        checkpoint_folder = os.path.join(self.save_dir, 'checkpoints')
        os.makedirs(checkpoint_folder, exist_ok=True)

        checkpoint_callback = ModelCheckpoint(
            dirpath=checkpoint_folder,
            auto_insert_metric_name=True,
            monitor='val/total_loss/dataloader_idx_0',
            mode='min',
            save_top_k=self.cfg.save_top_k,
            every_n_epochs=self.cfg.check_val_every_n_epoch,
            save_on_train_epoch_end=False,
            save_weights_only=False,
            verbose=True
            )
        
        learning_rate_callback = LearningRateMonitor(logging_interval='epoch',
                                                     log_weight_decay=True,
                                                     log_momentum=False)

        return [learning_rate_callback, checkpoint_callback]
    