import os
import wandb

from typing import Sequence
from omegaconf import DictConfig

import torch
import yaml
import lightning as L

from lightning import Callback
from lightning.pytorch.callbacks import ModelCheckpoint, LearningRateMonitor

from ecgcmr.utils.LastLayer import LastLayerConfiguration
from ecgcmr.utils.misc import create_optimizer_and_scheduler
from ecgcmr.utils.metrics import plot_confusion_matrix

from ecgcmr.utils.callbacks import LogCMRPhenoTypesGraph
from transformers import VideoMAEConfig, VideoMAEModel
from transformers.models.videomae.modeling_videomae import create_positional_embeddings


class ImageSimCLREval_ViT(L.LightningModule):
    def __init__(self,
                cfg: DictConfig,
                save_dir: str,
                checkpoint_path: str,
                ) -> None:
        super().__init__()

        self.save_hyperparameters()

        self.cfg = cfg
        self.mask_labels = False

        with open(os.path.join(checkpoint_path, ".hydra/config.yaml"), 'r') as file:
            self.loaded_cfg = yaml.safe_load(file)

        checkpoint_path = os.path.join(checkpoint_path, "hf_model")

        self.save_dir = save_dir
        self.batch_size = cfg.downstream_task.batch_size

        self.mri_encoder_config = VideoMAEConfig.from_pretrained(pretrained_model_name_or_path=checkpoint_path)
        self.mri_encoder = VideoMAEModel.from_pretrained(pretrained_model_name_or_path=checkpoint_path)
        self.init_task_dependent_components(cfg=cfg, input_dim=self.mri_encoder_config.hidden_size)

        self.imag_encoder.embeddings.position_embeddings = create_positional_embeddings(num_patches_t=cfg.dataset.result_n_frames//self.imag_encoder_config.tubelet_size,
                                                                                        num_patches_x=self.imag_encoder_config.image_size//self.imag_encoder_config.patch_size,
                                                                                        num_patches_y=self.imag_encoder_config.image_size//self.imag_encoder_config.patch_size,
                                                                                        embed_dim=self.imag_encoder_config.hidden_size)
        self.imag_encoder.embeddings.position_embeddings.requires_grad_(False)

        self.mri_encoder.eval()
        self.freeze_backbone()
        
    def freeze_backbone(self) -> None:
        for param in self.mri_encoder.parameters():
            param.requires_grad = False

    def init_task_dependent_components(self, cfg: DictConfig, input_dim: int):
        last_layer_config = LastLayerConfiguration(cfg=cfg, encoded_dim=input_dim, mask_labels=self.mask_labels)
        self.task = last_layer_config.task
        self.last_layer = last_layer_config.create_last_layer()

        self.last_layer_criterion_train = last_layer_config.create_criterion()
        self.last_layer_criterion_val = last_layer_config.create_criterion()

        self.downstream_task_metrics_train = last_layer_config.create_metrics()
        self.downstream_task_metrics_val = last_layer_config.create_metrics()
        self.downstream_task_metrics_test = last_layer_config.create_metrics()

    def forward(self, pixel_values: torch.Tensor, task: str) -> torch.Tensor:
        with torch.no_grad():
            outputs = self.mri_encoder(pixel_values=pixel_values, apply_masking=False)
            embeddings = outputs.last_hidden_state # B, L, C 
            embeddings = embeddings.mean(dim=1) # B, C - global mean

        output = self.last_layer(embeddings) # B, D

        if task == 'classification':
            output = torch.squeeze(output)
        return output

    def training_step(self, batch: torch.Tensor, batch_idx: int) -> torch.Tensor:
        img, label = batch
        
        img_logits = self(pixel_values=img, task=self.task)
        loss = self.last_layer_criterion_train(img_logits, label)

        if self.task == 'classification':
            label = label.int()

        self.downstream_task_metrics_train.update(preds=img_logits, labels=label)

        self.log(f"train/{self.task}.loss", loss, on_step=True,
                  on_epoch=True, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)
        
        return loss
    
    def on_train_epoch_end(self) -> None:
        metrics = self.downstream_task_metrics_train.compute()
        
        for name, value in metrics.items():
            if name == "confusion_matrix":
                fig = plot_confusion_matrix(cm=value.cpu().numpy(),
                                            class_names=[f"No {self.cfg.downstream_task.target}",
                                                        f"{self.cfg.downstream_task.target}"])
                self.logger.experiment.log({"train/confusion_matrix": wandb.Image(fig)})
            else:
                self.log(f'train/{name}', value, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)

                if self.task == 'regression':
                    monitor_metric = metrics['Mean_RV'] + metrics['Mean_LV']
                    self.log('train/monitor_metric', monitor_metric, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)

        self.downstream_task_metrics_train.reset()
    
    def validation_step(self, batch: torch.Tensor, batch_idx: int) -> torch.Tensor:
        img, label = batch

        img_logits = self(pixel_values=img, task=self.task)
        loss = self.last_layer_criterion_val(img_logits, label)

        if self.task == 'classification':
            label = label.int()

        self.downstream_task_metrics_val.update(preds=img_logits, labels=label)

        self.log(f"val/{self.task}.loss", loss, on_step=False,
                    on_epoch=True, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)
        
        return {'loss': loss,
                'pred': img_logits,
                'label': label}

    def on_validation_epoch_end(self):
        metrics = self.downstream_task_metrics_val.compute()
        
        for name, value in metrics.items():
            if name == "confusion_matrix":
                fig = plot_confusion_matrix(cm=value.cpu().numpy(),
                                            class_names=[f"No {self.cfg.downstream_task.target}",
                                                        f"{self.cfg.downstream_task.target}"])
                self.logger.experiment.log({"val/confusion_matrix": wandb.Image(fig)})
            else:
                self.log(f'val/{name}', value, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)

                if self.task == 'regression':
                    monitor_metric = metrics['Mean_RV'] + metrics['Mean_LV']
                    self.log('val/monitor_metric', monitor_metric, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)

        self.downstream_task_metrics_val.reset()
    
    def test_step(self, batch: torch.Tensor, batch_idx: int) -> torch.Tensor:
        img, label = batch

        img_logits = self(pixel_values=img, task=self.task)

        if self.task == 'classification':
            label = label.int()

        self.downstream_task_metrics_test.update(preds=img_logits, labels=label)

    def on_test_epoch_end(self):
        metrics = self.downstream_task_metrics_test.compute() 
        for name, value in metrics.items():
            if name == "confusion_matrix":
                fig = plot_confusion_matrix(cm=value.cpu().numpy(),
                                            class_names=[f"No {self.cfg.downstream_task.target}",
                                                        f"{self.cfg.downstream_task.target}"])
                self.logger.experiment.log({"test/confusion_matrix": wandb.Image(fig)})
            else:
                self.log(f'test/{name}', value, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)
                if self.task == 'regression':
                    monitor_metric = metrics['Mean_RV'] + metrics['Mean_LV']
                    self.log('test/monitor_metric', monitor_metric, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)

        self.downstream_task_metrics_test.reset()

    def num_steps(self) -> int:
        """Get number of steps per epoch and batch size"""
        dataset = self.trainer.fit_loop._data_source.dataloader()
        dataset_size = len(dataset)
        num_devices = max(1, self.trainer.num_devices)
        num_steps = dataset_size // (self.trainer.accumulate_grad_batches * num_devices)
        return num_steps

    def configure_optimizers(self) -> dict:
        optimizer, scheduler = create_optimizer_and_scheduler(
            models=self.last_layer,
            optimizer_params=self.cfg.downstream_task.params,
            name='last layer',
            num_batches_per_epoch=self.num_steps()
        )

        return {"optimizer": optimizer, "lr_scheduler": scheduler}
    
    def configure_callbacks(self) -> Sequence[Callback] | Callback:

        checkpoint_folder = os.path.join(self.save_dir, 'checkpoints')
        os.makedirs(checkpoint_folder, exist_ok=True)

        if self.task == 'regression':
            regression_plot_callback = LogCMRPhenoTypesGraph(cfg=self.cfg, mask_labels=self.mask_labels)
        else:
            regression_plot_callback = None

        checkpoint_callback = ModelCheckpoint(
            dirpath=checkpoint_folder,
            auto_insert_metric_name=True,
            monitor='val/monitor_metric',
            mode='max',
            save_top_k=self.cfg.save_top_k,
            every_n_epochs=self.cfg.check_val_every_n_epoch,
            save_on_train_epoch_end=False,
            save_weights_only=False,
            verbose=True
            )
        learning_rate_callback = LearningRateMonitor(logging_interval='epoch',
                                                     log_momentum=False)
        
        return [learning_rate_callback, checkpoint_callback, regression_plot_callback]
    