import os

from typing import Sequence
from omegaconf import DictConfig

import torch
import lightning as L

from lightning import Callback
from lightning.pytorch.callbacks import ModelCheckpoint, LearningRateMonitor

from ecgcmr.utils.callbacks import WeightDecayAdjustmentCallback

from transformers import VideoMAEConfig, VideoMAEForPreTraining

from ecgcmr.utils.misc import create_optimizer_and_scheduler


class ImageViTMAE(L.LightningModule):
    def __init__(self, cfg: DictConfig, save_dir: str) -> None:
        super().__init__()

        self.cfg = cfg
        self.save_dir = save_dir

        model_cfg = cfg.models[cfg.models.model_size]

        self.image_model_config = VideoMAEConfig(
            image_size=model_cfg.image_size,
            patch_size=model_cfg.patch_size,
            num_channels=model_cfg.num_channels,
            num_frames=cfg.augmentations.imaging.time_sample.result_n_frames,
            tubelet_size=model_cfg.tubelet_size,
            hidden_size=model_cfg.hidden_size,
            num_hidden_layers=model_cfg.num_hidden_layers,
            num_attention_heads=model_cfg.num_attention_heads,
            intermediate_size=model_cfg.intermediate_size,
            hidden_act=model_cfg.hidden_act,
            hidden_dropout_prob=model_cfg.hidden_dropout_prob,
            attention_probs_dropout_prob=model_cfg.attention_probs_dropout_prob,
            drop_path_rate=model_cfg.drop_path_rate,
            initializer_range=model_cfg.initializer_range,
            layer_norm_eps=model_cfg.layer_norm_eps,
            decoder_num_attention_heads=model_cfg.decoder_num_attention_heads,
            decoder_hidden_size=model_cfg.decoder_hidden_size,
            decoder_num_hidden_layers=model_cfg.decoder_num_hidden_layers,
            decoder_intermediate_size=model_cfg.decoder_intermediate_size,
            use_learnable_pos_emb=model_cfg.use_learnable_pos_emb,
            mask_ratio=model_cfg.mask_ratio,
            attention_type=model_cfg.attention_type,
            mask_loss=model_cfg.mask_loss,
            use_cls_token=model_cfg.use_cls_token,
            layerscale_init_values=model_cfg.layerscale_init_values,
        )

        self.image_model = VideoMAEForPreTraining(config=self.image_model_config)
        self.model_cfg = model_cfg
        self.batch_size = cfg.dataset.batch_size
        
    def forward(self, pixel_values: torch.Tensor):
        return self.image_model(pixel_values=pixel_values, apply_masking=True, use_layernorm=True)
    
    def training_step(self, batch: torch.Tensor, batch_idx: int) -> torch.Tensor:
        outputs = self(batch)
        loss = outputs.loss
        self.log(f"train/loss", loss, on_step=True, on_epoch=True, logger=True, sync_dist=True, prog_bar=True)
       
        return loss
    
    def validation_step(self, batch: torch.Tensor, batch_idx: int) -> torch.Tensor:
        outputs = self(batch)
        loss = outputs.loss
        self.log(f"val/loss", loss, on_step=False, on_epoch=True, logger=True, sync_dist=True, prog_bar=True)

        return loss

    def on_save_checkpoint(self, checkpoint: dict) -> None:
        hf_save_path = os.path.join(self.save_dir, "hf_model")
        os.makedirs(hf_save_path, exist_ok=True)
        self.image_model.videomae.save_pretrained(hf_save_path)

    def num_steps(self) -> int:
        dataset = self.trainer.fit_loop._data_source.dataloader()
        dataset_size = len(dataset)
        num_devices = max(1, self.trainer.num_devices)
        num_steps = dataset_size // (self.trainer.accumulate_grad_batches * num_devices)
        return num_steps

    def configure_optimizers(self) -> dict:
        optimizer, scheduler = create_optimizer_and_scheduler(
            models=[self],
            optimizer_params=self.cfg.models.params,
            num_batches_per_epoch=self.num_steps()
        )
        return {"optimizer": optimizer, "lr_scheduler": scheduler}
    
    def configure_callbacks(self) -> Sequence[Callback] | Callback:
        checkpoint_folder = os.path.join(self.save_dir, 'checkpoints')
        os.makedirs(checkpoint_folder, exist_ok=True)

        weight_decay_update = WeightDecayAdjustmentCallback(increase_epoch=int(self.cfg.max_epochs*0.8), new_weight_decay=0.05)

        checkpoint_callback = ModelCheckpoint(
            dirpath=checkpoint_folder,
            auto_insert_metric_name=True,
            monitor='val/loss',
            mode='min',
            save_top_k=self.cfg.save_top_k,
            every_n_epochs=self.cfg.check_val_every_n_epoch,
            save_on_train_epoch_end=False,
            save_weights_only=False,
            verbose=True
            )
        
        learning_rate_callback = LearningRateMonitor(logging_interval='epoch',
                                                     log_weight_decay=True,
                                                     log_momentum=False)

        return [learning_rate_callback, checkpoint_callback, weight_decay_update]
