import os

from typing import Sequence
from omegaconf import DictConfig

import torch
from torch import nn
import lightning as L

from lightning import Callback
from lightning.pytorch.callbacks import ModelCheckpoint, LearningRateMonitor

from transformers import VideoMAEConfig, VideoMAEModel
from ecgcmr.utils.positional_embeddings import get_multi_sincos_pos_embed

from ecgcmr.utils.LastLayer import LastLayerConfiguration
from ecgcmr.utils.misc import create_optimizer_and_scheduler

from ecgcmr.multimodal.multimodal_models.EmbeddingsReduction import AttentionPooling


class ImageViTEval(L.LightningModule):
    def __init__(
        self,
        cfg: DictConfig,
        save_dir: str,
        pretrained_model_name_or_path: str=None,
        ) -> None:

        super().__init__()

        self.cfg = cfg
        self.save_dir = save_dir
        self.mask_labels = False

        self.batch_size = cfg.downstream_task.batch_size

        if cfg.training_mode.task == 'img_finetune_masked':
            self._initialize_model(pretrained_model_name_or_path=pretrained_model_name_or_path)
        
        elif cfg.training_mode.task == 'img_pretrain_supervised':
            model_cfg = cfg.models[cfg.models.model_size]

            self.imag_encoder_config = VideoMAEConfig(
                image_size=model_cfg.image_size,
                patch_size=model_cfg.patch_size,
                num_channels=model_cfg.num_channels,
                num_frames=cfg.augmentations.imaging.time_sample.result_n_frames,
                tubelet_size=model_cfg.tubelet_size,
                hidden_size=model_cfg.hidden_size,
                num_hidden_layers=model_cfg.num_hidden_layers,
                num_attention_heads=model_cfg.num_attention_heads,
                intermediate_size=model_cfg.intermediate_size,
                hidden_act=model_cfg.hidden_act,
                hidden_dropout_prob=model_cfg.hidden_dropout_prob,
                attention_probs_dropout_prob=model_cfg.attention_probs_dropout_prob,
                drop_path_rate=model_cfg.drop_path_rate,
                initializer_range=model_cfg.initializer_range,
                layer_norm_eps=model_cfg.layer_norm_eps,
                use_learnable_pos_emb=model_cfg.use_learnable_pos_emb,
                mask_ratio=model_cfg.mask_ratio,
                attention_type=model_cfg.attention_type,
                mask_loss=model_cfg.mask_loss,
                use_cls_token=model_cfg.use_cls_token,
                layerscale_init_values=model_cfg.layerscale_init_values,
            )

            self.imag_encoder = VideoMAEModel(config=self.imag_encoder_config)

        self._init_task_dependent_components(cfg=cfg)
        
        self.best_mean_R2 = -float('inf')
        self.best_mean_MAE = float('inf')

    def _initialize_model(self, pretrained_model_name_or_path: str) -> None:
        self.imag_encoder_config = VideoMAEConfig.from_pretrained(pretrained_model_name_or_path=pretrained_model_name_or_path)
        self.imag_encoder = VideoMAEModel.from_pretrained(pretrained_model_name_or_path=pretrained_model_name_or_path, config=self.imag_encoder_config)

        self._freeze_encoder()

        if self.imag_encoder_config.num_frames != self.cfg.downstream_task.augmentations.imaging.time_sample.result_n_frames:
            print(f'Updating pos embed')
            self._update_grid_pos_embed()
    
    def _freeze_encoder(self):
        training_scheme = self.cfg.downstream_task.training_scheme
        n_blocks_to_unfreeze = self.cfg.downstream_task.n_blocks_to_unfreeze

        for param in self.imag_encoder.parameters():
            param.requires_grad = False
        
        self.imag_encoder.eval()

        for param in self.imag_encoder.layernorm.parameters(): # we don't use it here
            param.requires_grad = False

        if training_scheme == "fine_tune":
            if n_blocks_to_unfreeze == -1:
                self.imag_encoder.train()

                for param in self.imag_encoder.parameters():
                    param.requires_grad = True

            elif n_blocks_to_unfreeze != 0:
                self.imag_encoder.train()
                encoder_layers = self.imag_encoder.encoder.layer
                for layer in encoder_layers[-n_blocks_to_unfreeze:]:
                    for param in layer.parameters():
                        param.requires_grad = True

    def _update_grid_pos_embed(self):
        tubelet_size, patch_size = self.imag_encoder_config.tubelet_size, self.imag_encoder_config.patch_size
        result_n_frames = self.cfg.downstream_task.augmentations.imaging.time_sample.result_n_frames

        grid_size = (result_n_frames//tubelet_size,
                     self.imag_encoder_config.image_size//patch_size,
                     self.imag_encoder_config.image_size//patch_size)

        self.imag_encoder.embeddings.patch_embeddings.grid_size = grid_size

        self.imag_encoder.embeddings.position_embeddings = nn.Parameter(
            get_multi_sincos_pos_embed(grid_size=grid_size,
                                       embed_dim=self.imag_encoder_config.hidden_size,
                                       add_cls_token=self.imag_encoder_config.use_cls_token),
            requires_grad=True)

    def _init_task_dependent_components(self, cfg: DictConfig):
        use_mlp = cfg.downstream_task.get('use_mlp', False)
        training_scheme = cfg.downstream_task.get('training_scheme', 'linear_probing')
        
        if training_scheme == 'linear_probing':
            self.training_scheme_setting = "LP"
        elif training_scheme == 'fine_tune':
            self.training_scheme_setting = "FN"
        
        self.use_mlp_setting = 'MLP' if use_mlp else 'LinearLayer'
    
        last_layer_config = LastLayerConfiguration(cfg=cfg,
                                                   encoded_dim=self.imag_encoder_config.hidden_size,
                                                   mask_labels=False,
                                                   training_scheme=self.training_scheme_setting,
                                                   use_mlp=self.use_mlp_setting)
        self.task = last_layer_config.task
        self.last_layer = last_layer_config.create_last_layer()

        self.last_layer_criterion = last_layer_config.create_criterion()

        self.downstream_task_metrics_val = last_layer_config.create_metrics()
        self.plotter_val = last_layer_config.create_plotter()
        self.regression = last_layer_config.create_sklearn_model()

        self.imag_layernorm_regression = nn.LayerNorm(normalized_shape=self.imag_encoder_config.hidden_size,
                                                      eps=self.imag_encoder_config.layer_norm_eps, dtype=torch.float32)
        
        nn.init.constant_(self.imag_layernorm_regression.weight, 1.0)
        nn.init.constant_(self.imag_layernorm_regression.bias, 0.0)
        
        if self.cfg.downstream_task.reduction.imag == 'attention_pooling':
            self.imag_global_token_attention_pool = AttentionPooling(embed_dim=self.imag_encoder_config.hidden_size,
                                                                     num_heads=self.cfg.downstream_task.reduction.num_heads)
    def forward(self, pixel_values: torch.Tensor):
        hidden_state = self.imag_encoder(pixel_values=pixel_values, apply_masking=False, use_layernorm=False).last_hidden_state
        
        if self.imag_encoder_config.use_cls_token:
            all_tokens, global_token = hidden_state[:, 1:], hidden_state[:, 0]
        else:
            all_tokens = hidden_state
        
        method = self.cfg.downstream_task.reduction.imag
        if method == 'mean':
            features = torch.mean(all_tokens, dim=1)
            features = self.imag_layernorm_regression(features)

        elif method == 'attention_pooling':
            features = self.imag_global_token_attention_pool(all_tokens)
            features = self.imag_layernorm_regression(features)
            features = torch.squeeze(features)
        
        elif method == 'cls_token':
            features = global_token

        logits = self.last_layer(features)
        return logits, features

    def training_step(self, batch: torch.Tensor, batch_idx: int) -> torch.Tensor:
        pixel_values, labels = batch
        logits, features = self(pixel_values)
        loss = self.last_layer_criterion(logits, labels)
        
        self.regression.update(feats=features.detach().cpu().numpy(), labels=labels.detach().cpu().numpy(), stage='train')

        self.log(f"train/{self.task}.loss", loss, on_step=True,
                 on_epoch=True, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)
        
        return {'loss': loss,
                'pred': logits,
                'features': features,
                'label': labels}
    
    def validation_step(self, batch: torch.Tensor, batch_idx: int) -> torch.Tensor:
        pixel_values, labels = batch
        logits, features = self(pixel_values)
        loss = self.last_layer_criterion(logits, labels)

        self.downstream_task_metrics_val.update(preds=logits, labels=labels)
        self.regression.update(feats=features.detach().cpu().numpy(), labels=labels.detach().cpu().numpy(), stage='val')

        self.log(f"val/{self.task}.loss", loss, on_step=False,
                  on_epoch=True, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)
        
        return {'loss': loss,
                'pred': logits,
                'features': features,
                'label': labels}
    
    def on_validation_epoch_end(self):
        metrics, preds, labels = self.downstream_task_metrics_val.compute()
        
        mean_R2 = metrics[f'val_{self.training_scheme_setting}_{self.use_mlp_setting}/Mean_RV_R2'] + metrics[f'val_{self.training_scheme_setting}_{self.use_mlp_setting}/Mean_LV_R2']   
        self.log('downstream_mean_R2', mean_R2, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)

        if mean_R2 > self.best_mean_R2:
            self.best_mean_R2 = mean_R2

            best_metrics = {f"best_mean_R2/best_mean_R2": self.best_mean_R2}
            for name, value in metrics.items():
                best_metrics[f"best_mean_R2/{name}"] = value

            for name, value in best_metrics.items():
                self.log(name, value, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)

            self.plotter_val.plot_results(preds=preds, labels=labels, prefix=f"best_mean_R2_plots")
        
        self.downstream_task_metrics_val.reset()
        
        is_best = self.regression.compute()

    def num_steps(self) -> int:
        dataset = self.trainer.fit_loop._data_source.dataloader()
        dataset_size = len(dataset)
        num_devices = max(1, self.trainer.num_devices)
        num_steps = dataset_size // (self.trainer.accumulate_grad_batches * num_devices)
        return num_steps
    
    def on_save_checkpoint(self, checkpoint: dict) -> None:
        hf_save_path = os.path.join(self.save_dir, "hf_model")
        os.makedirs(hf_save_path, exist_ok=True)
        self.imag_encoder.save_pretrained(hf_save_path)

    def configure_optimizers(self) -> dict:
        optimizer, scheduler = create_optimizer_and_scheduler(
            models=[self],
            optimizer_params=self.cfg.downstream_task.params if self.cfg.training_mode.task == 'imaging_finetune_masked' else self.cfg.models.params,
            num_batches_per_epoch=self.num_steps()
        )
        return {"optimizer": optimizer, "lr_scheduler": scheduler}
    
    def configure_callbacks(self) -> Sequence[Callback] | Callback:
        checkpoint_folder = os.path.join(self.save_dir, 'checkpoints')
        os.makedirs(checkpoint_folder, exist_ok=True)

        checkpoint_callback = ModelCheckpoint(
            dirpath=checkpoint_folder,
            auto_insert_metric_name=True,
            monitor='val/regression.loss',
            mode='min',
            save_top_k=self.cfg.save_top_k,
            every_n_epochs=self.cfg.check_val_every_n_epoch,
            save_on_train_epoch_end=False,
            save_weights_only=False,
            verbose=True)
        
        learning_rate_callback = LearningRateMonitor(logging_interval='epoch', log_momentum=False, log_weight_decay=True)

        return [learning_rate_callback, checkpoint_callback]
    