import os

from typing import Sequence
from omegaconf import DictConfig

import torch
import lightning as L

from lightning import Callback
from lightning.pytorch.callbacks import ModelCheckpoint, LearningRateMonitor, GradientAccumulationScheduler

from transformers import VideoMAEConfig, VideoMAEModel
from ecgcmr.multimodal.multimodal_models.Projections import ProjectionHeadSimple
from ecgcmr.imaging.img_loss.CustomImagingNTXLoss import NTXentLoss
from ecgcmr.utils.misc import create_optimizer_and_scheduler


class ImageEncoderSimCLR_ViT(L.LightningModule):
    def __init__(
            self,
            cfg: DictConfig,
            save_dir: str
        ) -> None:
        super().__init__()

        self.save_hyperparameters()
        
        self.cfg = cfg
        self.save_dir = save_dir

        model_cfg = cfg.models[cfg.models.backbone]

        self.image_model_config = VideoMAEConfig(
            image_size=model_cfg.image_size,
            patch_size=model_cfg.patch_size,
            num_channels=model_cfg.num_channels,
            num_frames=cfg.augmentations.imaging.time_sample.result_n_frames,
            tubelet_size=model_cfg.tubelet_size,
            hidden_size=model_cfg.hidden_size,
            num_hidden_layers=model_cfg.num_hidden_layers,
            num_attention_heads=model_cfg.num_attention_heads,
            intermediate_size=model_cfg.intermediate_size,
            hidden_act=model_cfg.hidden_act,
            hidden_dropout_prob=model_cfg.hidden_dropout_prob,
            attention_probs_dropout_prob=model_cfg.attention_probs_dropout_prob,
            drop_path_rate=model_cfg.drop_path_rate,
            initializer_range=model_cfg.initializer_range,
            layer_norm_eps=model_cfg.layer_norm_eps,
            use_learnable_pos_emb=model_cfg.use_learnable_pos_emb,
            mask_ratio=model_cfg.mask_ratio,
            attention_type=model_cfg.attention_type,
            mask_loss=model_cfg.mask_loss,
            use_cls_token=model_cfg.use_cls_token,
            layerscale_init_values=model_cfg.layerscale_init_values,
        )
        
        self.mri_encoder = VideoMAEModel(config=self.image_model_config)
        self.encoded_dim = model_cfg.hidden_size
        
        self.img_projection = ProjectionHeadSimple(input_dim=self.encoded_dim,
                                                   hidden_dim=self.cfg.models.projection.hidden_dim,
                                                   output_dim=self.cfg.models.projection.proj_out_dim)

        self.criterion_train = NTXentLoss(temperature=cfg.training_mode.loss.NTXentLoss.temperature,
                                          gather_distributed=True)
        self.criterion_val = NTXentLoss(temperature=cfg.training_mode.loss.NTXentLoss.temperature)

        self.mean_pooling = cfg.models.mean_pooling

    def forward(self, img_view: torch.Tensor) -> torch.Tensor:
        hidden_state = self.mri_encoder(pixel_values=img_view, apply_masking=False).last_hidden_state  # B, L+1, C 

        if self.mean_pooling:
            features = hidden_state[:, 1:, :].mean(dim=1)
        else:
            features = hidden_state[:, 0, :]

        output = self.img_projection(features)  # B, D

        return output

    def training_step(self, batch: torch.Tensor, batch_idx: int, dataloader_idx: int = 0) -> torch.Tensor:
        img_view1, img_view2 = batch

        img_embed1 = self(img_view1)
        img_embed2 = self(img_view2)
        
        loss, logits, labels = self.criterion_train(img_embed1, img_embed2)
        
        self.log("train/contrastive.loss", loss, on_step=True,
                 on_epoch=True, logger=True, add_dataloader_idx=False, sync_dist=True)
        return loss
    
    def validation_step(self, batch: torch.Tensor, batch_idx: int, dataloader_idx: int = 0) -> torch.Tensor:
        img_view1, img_view2 = batch

        img_embed1 = self.forward(img_view1)
        img_embed2 = self.forward(img_view2)
        
        loss, logits, labels = self.criterion_val(img_embed1, img_embed2)

        self.log(f"val/contrastive.loss", loss, on_step=False,
                 on_epoch=True, logger=True, add_dataloader_idx=False, sync_dist=True)
        return loss

    def num_steps(self) -> int:
        """Get number of steps per epoch and batch size"""
        dataset = self.trainer.fit_loop._data_source.dataloader()
        dataset_size = len(dataset)
        num_devices = max(1, self.trainer.num_devices)
        num_steps = dataset_size // (self.trainer.accumulate_grad_batches * num_devices)
        return num_steps
    
    def on_save_checkpoint(self, checkpoint: dict) -> None:
        hf_save_path = os.path.join(self.save_dir, "hf_model")
        os.makedirs(hf_save_path, exist_ok=True)
        self.mri_encoder.save_pretrained(hf_save_path)
    
    def configure_optimizers(self) -> dict:
        optimizer, scheduler = create_optimizer_and_scheduler(
            models=[self.mri_encoder, self.img_projection],
            optimizer_params=self.cfg.models.params,
            num_batches_per_epoch=self.num_steps(),
        )

        return {"optimizer": optimizer, "lr_scheduler": scheduler}

    def configure_callbacks(self) -> Sequence[Callback] | Callback:
        checkpoint_folder = os.path.join(self.save_dir, 'checkpoints')
        os.makedirs(checkpoint_folder, exist_ok=True)

        checkpoint_callback = ModelCheckpoint(
            dirpath=checkpoint_folder,
            auto_insert_metric_name=True,
            monitor='val/contrastive.loss',
            mode='min',
            save_top_k=self.cfg.save_top_k,
            every_n_epochs=self.cfg.check_val_every_n_epoch,
            save_on_train_epoch_end=False,
            save_weights_only=False,
            verbose=True
        )
        
        learning_rate_callback = LearningRateMonitor(logging_interval='epoch', log_momentum=False)
        
        return [learning_rate_callback, checkpoint_callback]
    