import os

from typing import Sequence
from omegaconf import DictConfig

import torch
import lightning as L
from torch import nn

from lightning import Callback
from lightning.pytorch.callbacks import ModelCheckpoint, LearningRateMonitor

from transformers import ViTMAEConfig, ViTMAEModel
from ecgcmr.utils.positional_embeddings import get_multi_sincos_pos_embed

from ecgcmr.utils.LastLayer import LastLayerConfiguration
from ecgcmr.multimodal.multimodal_models.EmbeddingsReduction import AttentionPooling
from ecgcmr.utils.misc import create_optimizer_and_scheduler


class ECGViTEval(L.LightningModule):
    def __init__(
            self,
            cfg: DictConfig,
            save_dir: str,
            pretrained_model_name_or_path: str = None,
        ) -> None:

        super().__init__()

        self.cfg = cfg
        self.save_dir = save_dir
        self.mask_labels = False

        self.batch_size = cfg.downstream_task.batch_size

        if cfg.training_mode.task == 'ecg_finetune_masked':
            self._initialize_model(pretrained_model_name_or_path)

        elif cfg.training_mode.task  == 'ecg_pretrain_supervised':
            model_cfg = cfg.models[cfg.models.model_size]

            self.image_size = tuple([cfg.downstream_task.input_electrodes, cfg.downstream_task.augmentations.ecg.random_crop.ecg_time_steps])
            self.patch_size = tuple(model_cfg.patch_size)

            self.ecg_encoder_config = ViTMAEConfig(
                hidden_size=model_cfg.hidden_size,
                num_hidden_layers=model_cfg.num_hidden_layers,
                num_attention_heads=model_cfg.num_attention_heads,
                intermediate_size=model_cfg.intermediate_size,
                hidden_act=model_cfg.hidden_act,
                hidden_dropout_prob=model_cfg.hidden_dropout_prob,
                attention_probs_dropout_prob=model_cfg.attention_probs_dropout_prob,
                drop_path_rate=model_cfg.drop_path_rate,
                initializer_range=model_cfg.initializer_range,
                layer_norm_eps=model_cfg.layer_norm_eps,
                qkv_bias=model_cfg.qkv_bias,
                image_size=self.image_size,
                patch_size=self.patch_size,
                num_channels=1,
                decoder_intermediate_size=model_cfg.decoder_intermediate_size,
                decoder_num_attention_heads=model_cfg.decoder_num_attention_heads,
                decoder_hidden_size=model_cfg.decoder_hidden_size,
                decoder_num_hidden_layers=model_cfg.decoder_num_hidden_layers,
                mask_ratio=model_cfg.mask_ratio,
                mask_loss=model_cfg.mask_loss,
                use_cls_token=model_cfg.use_cls_token,
                use_learnable_pos_emb=model_cfg.use_learnable_pos_emb,
                layerscale_init_values=model_cfg.layerscale_init_values,
            )

            self.ecg_encoder = ViTMAEModel(config=self.ecg_encoder_config)

        self._initialize_task_dependent_components(cfg)

        self.best_mean_R2 = -float('inf')
        self.best_mean_MAE = float('inf')

    def _initialize_model(self, pretrained_model_name_or_path: str) -> None:
        self.ecg_encoder_config = ViTMAEConfig.from_pretrained(pretrained_model_name_or_path)
        self.ecg_encoder = ViTMAEModel.from_pretrained(pretrained_model_name_or_path, config=self.ecg_encoder_config)

        self._freeze_encoder()

        if self.ecg_encoder_config.image_size[1] != self.cfg.downstream_task.augmentations.ecg.random_crop.ecg_time_steps:
            print(f'Updating pos embed')
            self._init_pos_embed()

    def _init_pos_embed(self) -> None:
        self.input_electrodes = self.cfg.downstream_task.input_electrodes
        self.ecg_time_steps = self.cfg.downstream_task.augmentations.ecg.random_crop.ecg_time_steps
        
        grid_size = (self.input_electrodes // self.ecg_encoder_config.patch_size[0],
                     self.ecg_time_steps // self.ecg_encoder_config.patch_size[1])
        
        new_image_size = (self.input_electrodes, self.ecg_time_steps)

        self.ecg_encoder.embeddings.patch_embeddings.image_size = new_image_size
        self.ecg_encoder.embeddings.patch_embeddings.grid_size = grid_size

        self.ecg_encoder.embeddings.position_embeddings = nn.Parameter(
            get_multi_sincos_pos_embed(grid_size=grid_size,
                                       embed_dim=self.ecg_encoder_config.hidden_size,
                                       add_cls_token=self.ecg_encoder_config.use_cls_token),
            requires_grad=False
        )

    def _freeze_encoder(self) -> None:
        training_scheme = self.cfg.downstream_task.training_scheme
        n_blocks_to_unfreeze = self.cfg.downstream_task.n_blocks_to_unfreeze

        for param in self.ecg_encoder.parameters():
            param.requires_grad = False
        
        self.ecg_encoder.eval()

        if training_scheme == "fine_tune":
            if n_blocks_to_unfreeze == -1:
                self.ecg_encoder.train()

                for param in self.ecg_encoder.parameters():
                    param.requires_grad = True

            elif n_blocks_to_unfreeze != 0:
                self.ecg_encoder.train()
                encoder_layers = self.ecg_encoder.encoder.layer
                for layer in encoder_layers[-n_blocks_to_unfreeze:]:
                    for param in layer.parameters():
                        param.requires_grad = True
        
        for param in self.ecg_encoder.layernorm.parameters():  # we don't use it 
            param.requires_grad = False
            
    def _initialize_task_dependent_components(self, cfg: DictConfig) -> None:
        use_mlp = cfg.downstream_task.get('use_mlp', False)
        training_scheme = cfg.downstream_task.get('training_scheme', 'linear_probing')
        
        if training_scheme == 'linear_probing':
            self.training_scheme_setting = "LP"
        elif training_scheme == 'fine_tune':
            self.training_scheme_setting = "FN"
        
        self.use_mlp_setting = 'MLP' if use_mlp else 'LinearLayer'
    
        last_layer_config = LastLayerConfiguration(cfg, self.ecg_encoder_config.hidden_size, self.mask_labels,
                                                   training_scheme=self.training_scheme_setting,
                                                   use_mlp=self.use_mlp_setting)
        self.task = last_layer_config.task
        self.last_layer = last_layer_config.create_last_layer()

        self.last_layer_criterion = last_layer_config.create_criterion()
        
        self.downstream_task_metrics = last_layer_config.create_metrics()
        self.plotter_val = last_layer_config.create_plotter()

        self.ecg_layernorm_regression = nn.LayerNorm(normalized_shape=self.ecg_encoder_config.hidden_size,
                                                     eps=self.ecg_encoder_config.layer_norm_eps, dtype=torch.float32)
        
        nn.init.constant_(self.ecg_layernorm_regression.weight, 1.0)
        nn.init.constant_(self.ecg_layernorm_regression.bias, 0.0)

        if self.cfg.downstream_task.reduction.ecg == 'attention_pooling':
            self.ecg_global_token_attention_pool = AttentionPooling(embed_dim=self.ecg_encoder_config.hidden_size,
                                                                    num_heads=self.cfg.downstream_task.reduction.num_heads)
    
    def forward(self, pixel_values: torch.Tensor):
        hidden_state = self.ecg_encoder(pixel_values=pixel_values, apply_masking=False, use_layernorm=False).last_hidden_state

        if self.ecg_encoder_config.use_cls_token:
            all_tokens, global_token = hidden_state[:, 1:], hidden_state[:, 0]
        else:
            all_tokens = hidden_state
        
        method = self.cfg.downstream_task.reduction.ecg
        
        if method == 'mean':
            features = torch.mean(all_tokens, dim=1)
            features = self.ecg_layernorm_regression(features)

        elif method == 'attention_pooling':
            features = self.ecg_global_token_attention_pool(all_tokens)
            features = self.ecg_layernorm_regression(features)
            features = torch.squeeze(features)
        
        elif method == 'cls_token':
            features = global_token

        logits = self.last_layer(features)
        return logits, features

    def training_step(self, batch: torch.Tensor, batch_idx: int) -> torch.Tensor:
        ecg, label = batch
        logits, features = self(ecg)
        loss = self.last_layer_criterion(logits, label)

        self.log(f"train/{self.task}.loss", loss, on_step=True, on_epoch=True, logger=True,
                 batch_size=self.batch_size, add_dataloader_idx=False)
        
        return {'loss': loss,
                'pred': logits,
                'features': features,
                'label': label}
    
    def validation_step(self, batch: torch.Tensor, batch_idx: int) -> torch.Tensor:
        ecg, label = batch
        logits, features = self(ecg)
        loss = self.last_layer_criterion(logits, label)

        self.downstream_task_metrics.update(preds=logits, labels=label)

        self.log(f"val/{self.task}.loss", loss, on_step=False,
                 on_epoch=True, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)
        
        return {'loss': loss,
                'pred': logits,
                'features': features,
                'label': label}
    
    def on_validation_epoch_end(self):
        metrics, preds, labels = self.downstream_task_metrics.compute()
        
        mean_R2 = metrics[f'val_{self.training_scheme_setting}_{self.use_mlp_setting}/Mean_RV_R2'] + metrics[f'val_{self.training_scheme_setting}_{self.use_mlp_setting}/Mean_LV_R2']
        self.log('downstream mean R2 val', mean_R2, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)

        mean_MAE = metrics[f'val_{self.training_scheme_setting}_{self.use_mlp_setting}/Mean_RV_MAE'] + metrics[f'val_{self.training_scheme_setting}_{self.use_mlp_setting}/Mean_LV_MAE']
        self.log('downstream mean MAE val', mean_MAE, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)

        if mean_R2 > self.best_mean_R2:
            self.best_mean_R2 = mean_R2

            best_metrics = {f"best_mean_R2/best_mean_R2": self.best_mean_R2}
            for name, value in metrics.items():
                best_metrics[f"best_mean_R2/{name}"] = value

            for name, value in best_metrics.items():
                self.log(name, value, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)

            self.plotter_val.plot_results(preds=preds, labels=labels, prefix=f"best_mean_R2_plots")
        
        if mean_MAE < self.best_mean_MAE:
            self.best_mean_MAE = mean_MAE

            best_metrics = {f"best_mean_MAE": self.best_mean_MAE}
            for name, value in metrics.items():
                best_metrics[f"best_mean_MAE/{name}"] = value
            
            for name, value in best_metrics.items():
                self.log(name, value, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)
            
            self.plotter_val.plot_results(preds=preds, labels=labels, prefix=f"best_mean_MAE_plots")

        self.downstream_task_metrics.reset()

    def num_steps(self) -> int:
        dataset = self.trainer.fit_loop._data_source.dataloader()
        dataset_size = len(dataset)
        num_devices = max(1, self.trainer.num_devices)
        num_steps = dataset_size // (self.trainer.accumulate_grad_batches * num_devices)
        return num_steps

    def configure_optimizers(self) -> dict:
        optimizer, scheduler = create_optimizer_and_scheduler(
            models=[self],
            optimizer_params=self.cfg.downstream_task.params if self.cfg.training_mode.task == 'ecg_finetune_masked' else self.cfg.models.params,
            num_batches_per_epoch=self.num_steps()
        )
        if scheduler is not None:
            return {"optimizer": optimizer, "lr_scheduler": scheduler}
        else:
            return optimizer
    
    def configure_callbacks(self) -> Sequence[Callback] | Callback:
        checkpoint_folder = os.path.join(self.save_dir, 'checkpoints')
        os.makedirs(checkpoint_folder, exist_ok=True)

        checkpoint_callback = ModelCheckpoint(
            dirpath=checkpoint_folder,
            auto_insert_metric_name=True,
            monitor='downstream mean R2 val',
            mode='max',
            save_top_k=self.cfg.save_top_k,
            every_n_epochs=self.cfg.check_val_every_n_epoch,
            save_on_train_epoch_end=False,
            save_weights_only=False,
            verbose=True
            )
        learning_rate_callback = LearningRateMonitor(logging_interval='epoch', log_momentum=False, log_weight_decay=True)

        return [learning_rate_callback, checkpoint_callback]
    