import os

from typing import Sequence
from omegaconf import DictConfig

import torch
from torch import nn
import lightning as L

from lightning import Callback
from lightning.pytorch.callbacks import ModelCheckpoint, LearningRateMonitor

from ecgcmr.utils.callbacks import WeightDecayAdjustmentCallback

from transformers import VideoMAEConfig, VideoMAEForPreTraining
from ecgcmr.utils.LastLayer import LastLayerConfiguration

from ecgcmr.utils.misc import create_optimizer_and_scheduler


class ImageViTMAE_LinearProbe(L.LightningModule):
    def __init__(self, cfg: DictConfig, save_dir: str) -> None:
        super().__init__()

        self.cfg = cfg
        self.save_dir = save_dir

        model_cfg = cfg.models[cfg.models.model_size]

        self.main_batch_size = cfg.dataset.batch_size
        self.downstream_batch_size = cfg.downstream_task.batch_size

        self.image_model_config = VideoMAEConfig(
            image_size=model_cfg.image_size,
            patch_size=model_cfg.patch_size,
            num_channels=model_cfg.num_channels,
            num_frames=cfg.augmentations.imaging.time_sample.result_n_frames,
            tubelet_size=model_cfg.tubelet_size,
            hidden_size=model_cfg.hidden_size,
            num_hidden_layers=model_cfg.num_hidden_layers,
            num_attention_heads=model_cfg.num_attention_heads,
            intermediate_size=model_cfg.intermediate_size,
            hidden_act=model_cfg.hidden_act,
            hidden_dropout_prob=model_cfg.hidden_dropout_prob,
            attention_probs_dropout_prob=model_cfg.attention_probs_dropout_prob,
            drop_path_rate=model_cfg.drop_path_rate,
            initializer_range=model_cfg.initializer_range,
            layer_norm_eps=model_cfg.layer_norm_eps,
            decoder_num_attention_heads=model_cfg.decoder_num_attention_heads,
            decoder_hidden_size=model_cfg.decoder_hidden_size,
            decoder_num_hidden_layers=model_cfg.decoder_num_hidden_layers,
            decoder_intermediate_size=model_cfg.decoder_intermediate_size,
            use_learnable_pos_emb=model_cfg.use_learnable_pos_emb,
            mask_ratio=model_cfg.mask_ratio,
            attention_type=model_cfg.attention_type,
            mask_loss=model_cfg.mask_loss,
            use_cls_token=model_cfg.use_cls_token
        )

        self.image_model = VideoMAEForPreTraining(config=self.image_model_config)
        self.model_cfg = model_cfg
        
        self._init_task_dependent_components(cfg=cfg)

        self.automatic_optimization = False

    def _init_task_dependent_components(self, cfg: DictConfig):
        last_layer_config = LastLayerConfiguration(cfg=cfg, encoded_dim=self.image_model_config.hidden_size, mask_labels=False)
        
        self.task = last_layer_config.task
        self.last_layer = last_layer_config.create_last_layer()

        self.last_layer_criterion_train = last_layer_config.create_criterion()
        self.last_layer_criterion_val = last_layer_config.create_criterion()

        self.downstream_task_metrics_val = last_layer_config.create_metrics()

        self.train_every_n_epochs = self.cfg.downstream_task.epoch_interval
        
        self.imag_layernorm_regression = nn.LayerNorm(normalized_shape=self.image_model_config.hidden_size, eps=1e-12)
        
    def forward_masked(self, batch: torch.Tensor):
        return self.image_model(pixel_values=batch, apply_masking=True).loss
    
    def forward_linear_probe(self, batch, mode='train'):
        pixel_values, labels = batch
        hidden_state = self.image_model.videomae(pixel_values=pixel_values, apply_masking=False).last_hidden_state
        
        if self.image_model_config.use_cls_token:
            all_tokens, _ = hidden_state[:, 1:], hidden_state[:, 0]
        else:
            all_tokens = hidden_state

        features = torch.mean(all_tokens, dim=1)
        features = self.imag_layernorm_regression(features)

        logits = self.last_layer(features)

        if mode == 'train':
            loss = self.last_layer_criterion_train(logits, labels)

        elif mode == 'val':
            loss = self.last_layer_criterion_val(logits, labels)
            self.downstream_task_metrics_val.update(preds=logits, labels=labels)
        
        return loss
    
    def training_step(self, batch: torch.Tensor, batch_idx: int, dataloader_idx: int = 0) -> torch.Tensor:
        backbone_opt, last_layer_opt = self.optimizers()
        backbone_sch, last_layer_sch = self.lr_schedulers()

        if dataloader_idx == 0:
            batch_main = batch['main']

            masked_loss = self.forward_masked(batch_main)
            
            self.log("train/masked_loss", masked_loss, on_step=True, on_epoch=True, logger=True, sync_dist=True, prog_bar=True, batch_size=self.main_batch_size)

            self.manual_backward(masked_loss)
            backbone_opt.step()
            backbone_sch.step()
            backbone_opt.zero_grad()
            
            return masked_loss

        elif dataloader_idx == 1:
            if self.trainer.current_epoch % self.train_every_n_epochs == 0:
                batch_downstream = batch['downstream']
                
                downstream_loss = self.forward_linear_probe(batch_downstream, mode='train')

                self.log("train/downstream_loss", downstream_loss, on_step=False, on_epoch=True, logger=True, prog_bar=True, batch_size=self.downstream_batch_size)

                self.manual_backward(downstream_loss)
                last_layer_opt.step()
                last_layer_sch.step()
                last_layer_opt.zero_grad()

            return downstream_loss
    
    def validation_step(self, batch, batch_idx: int, dataloader_idx: int = 0) -> torch.Tensor:
        if dataloader_idx == 0:
            masked_loss = self.forward_masked(batch)

            self.log("val/masked_loss", masked_loss, on_step=False, on_epoch=True, logger=True, batch_size=self.main_batch_size, prog_bar=True)

        if dataloader_idx == 1:
            if self.trainer.current_epoch % self.train_every_n_epochs == 0:
                downstream_loss = self.forward_linear_probe(batch, mode='val')
                self.log("val/downstream_loss", downstream_loss, on_step=False, on_epoch=True, logger=True, batch_size=self.downstream_batch_size, prog_bar=False)
    
    def on_validation_epoch_end(self):
        if self.trainer.current_epoch % self.train_every_n_epochs == 0:
            metrics = self.downstream_task_metrics_val.compute()
        
            for name, value in metrics.items():
                self.log(f'val/downstream_{name}', value, logger=True, batch_size=self.downstream_batch_size, add_dataloader_idx=True, prog_bar=False)
                monitor_metric = metrics['Mean_RV'] + metrics['Mean_LV']
                self.log('val/downstream_monitor_metric', monitor_metric, logger=True, batch_size=self.downstream_batch_size, add_dataloader_idx=True, prog_bar=False)

            self.downstream_task_metrics_val.reset()

    def on_save_checkpoint(self, checkpoint: dict) -> None:
        hf_save_path = os.path.join(self.save_dir, "hf_model")
        os.makedirs(hf_save_path, exist_ok=True)
        self.image_model.videomae.save_pretrained(hf_save_path)

    def num_steps(self, mode='main') -> int:
        """Get number of steps per epoch and batch size"""
        dataset = self.trainer.fit_loop._data_source.dataloader()[mode]
        dataset_size = len(dataset)
        num_devices = max(1, self.trainer.num_devices)
        num_steps = dataset_size // (self.trainer.accumulate_grad_batches * num_devices)
        return num_steps

    def configure_optimizers(self):
        main_optimizer, main_scheduler = create_optimizer_and_scheduler(
            models=[self.image_model],
            optimizer_params=self.cfg.models.params,
            num_batches_per_epoch=self.num_steps(mode='main')
        )

        last_layer_optimizer, last_layer_scheduler = create_optimizer_and_scheduler(
            models=[self.last_layer, self.imag_layernorm_regression],
            optimizer_params=self.cfg.downstream_task.params,
            num_batches_per_epoch=self.num_steps(mode='downstream'),
        )
        return [main_optimizer, last_layer_optimizer], [main_scheduler, last_layer_scheduler]
    
    def configure_callbacks(self) -> Sequence[Callback] | Callback:
        checkpoint_folder = os.path.join(self.save_dir, 'checkpoints')
        os.makedirs(checkpoint_folder, exist_ok=True)

        weight_decay_update = WeightDecayAdjustmentCallback(increase_epoch=int(self.cfg.max_epochs*0.8),
                                                            new_weight_decay=0.05)

        checkpoint_callback = ModelCheckpoint(
            dirpath=checkpoint_folder,
            auto_insert_metric_name=True,
            monitor='val/masked_loss/dataloader_idx_0',
            mode='min',
            save_top_k=self.cfg.save_top_k,
            every_n_epochs=self.cfg.check_val_every_n_epoch,
            save_on_train_epoch_end=False,
            save_weights_only=False,
            verbose=True
            )
        
        learning_rate_callback = LearningRateMonitor(logging_interval='epoch',
                                                     log_weight_decay=True,
                                                     log_momentum=False)

        return [learning_rate_callback, checkpoint_callback, weight_decay_update]
