import os

from typing import Sequence, Tuple
from omegaconf import DictConfig

import torch
import lightning as L

from torch import nn

from lightning import Callback
from lightning.pytorch.callbacks import ModelCheckpoint, LearningRateMonitor

from ecgcmr.imaging.img_models.ImageResNet3D import ResNet3D_Encoder
from ecgcmr.utils.LastLayer import LastLayerConfiguration
from ecgcmr.utils.callbacks import SkLearn_RegressionCallback
from ecgcmr.utils.misc import create_optimizer_and_scheduler


class ImageEncoderSupervised_ResNet3D(L.LightningModule):
    def __init__(self, cfg: DictConfig, save_dir: str) -> None:
        super().__init__()
        self.cfg = cfg
        self.save_dir = save_dir
        
        self.mask_labels = False

        self.batch_size = cfg.downstream_task.batch_size

        self.mri_encoder = ResNet3D_Encoder(model_depth=cfg.models.resnet.model_depth,
                                            zero_init_residual=cfg.models.resnet.zero_init_residual)

        self._init_task_dependent_components(cfg=cfg)
        
        self.best_mean_R2 = -float('-inf')
        self.best_mean_MAE = float('inf')

    def _init_task_dependent_components(self, cfg: DictConfig):
        last_layer_config = LastLayerConfiguration(cfg=cfg, encoded_dim=self.mri_encoder.encoded_dim, mask_labels=self.mask_labels)
        self.task = last_layer_config.task
        self.last_layer = last_layer_config.create_last_layer()

        self.last_layer_criterion = last_layer_config.create_criterion()

        self.downstream_task_metrics_val = last_layer_config.create_metrics()
        self.plotter_val = last_layer_config.create_plotter()

        self.imag_layernorm_regression = nn.LayerNorm(normalized_shape=self.mri_encoder.encoded_dim,
                                                      eps=1e-12, dtype=torch.float32)
        nn.init.constant_(self.imag_layernorm_regression.weight, 1.0)
        nn.init.constant_(self.imag_layernorm_regression.bias, 0.0)

    def forward(self, img_view: torch.Tensor) -> torch.Tensor:
        img_embeddings = self.mri_encoder(img_view) # B, T', C
        features = img_embeddings.mean(dim=1) # B, C'
        features = self.imag_layernorm_regression(features)

        logits = self.last_layer(features)
        return logits, features

    def training_step(self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int, dataloader_idx: int = 0) -> torch.Tensor:
        imgs, labels = batch
        
        logits, features = self(imgs)
        loss = self.last_layer_criterion(logits, labels)
        
        self.log(f"train/{self.task}.loss", loss, on_step=True, on_epoch=True, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)
        
        return {'loss': loss,
                'pred': logits,
                'features': features,
                'label': labels}
    
    def validation_step(self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int, dataloader_idx: int = 0) -> torch.Tensor:
        imgs, labels = batch
        
        logits, features = self(imgs)
        loss = self.last_layer_criterion(logits, labels)

        self.downstream_task_metrics_val.update(preds=logits, labels=labels)

        self.log(f"val/{self.task}.loss", loss, on_step=False,
                  on_epoch=True, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)
        
        return {'loss': loss,
                'pred': logits,
                'features': features,
                'label': labels}
    
    def on_validation_epoch_end(self):
        metrics, preds, labels = self.downstream_task_metrics_val.compute()
        
        for name, value in metrics.items():
            self.log(f'val_metrics/{name}', value, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)

        mean_R2 = metrics['Mean_RV_R2'] + metrics['Mean_LV_R2']
        self.log('val_metrics/mean_R2', mean_R2, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)

        mean_MAE = metrics['Mean_RV_MAE'] + metrics['Mean_LV_MAE']
        self.log('val_metrics/mean_MAE', mean_MAE, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)

        self.plotter_val.plot_results(preds=preds, labels=labels, prefix='val_plot')

        if mean_R2 > self.best_mean_R2:
            self.best_mean_R2 = mean_R2

            best_metrics = {"best_mean_R2": self.best_mean_R2}
            for name, value in metrics.items():
                best_metrics[f"best_R2/{name}"] = value

            for name, value in best_metrics.items():
                self.log(name, value, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)

            self.plotter_val.plot_results(preds=preds, labels=labels, prefix='best_R2_plot')
        
        if mean_MAE < self.best_mean_MAE:
            self.best_mean_MAE = mean_MAE

            best_metrics = {"best_mean_MAE": self.best_mean_MAE}
            for name, value in metrics.items():
                best_metrics[f"best_MAE/{name}"] = value
            
            for name, value in best_metrics.items():
                self.log(name, value, logger=True, batch_size=self.batch_size, add_dataloader_idx=False)
            
            self.plotter_val.plot_results(preds=preds, labels=labels, prefix='best_MAE_plot')

        self.downstream_task_metrics_val.reset()

    def num_steps(self) -> int:
        """Get number of steps per epoch and batch size"""
        dataset = self.trainer.fit_loop._data_source.dataloader()
        dataset_size = len(dataset)
        num_devices = max(1, self.trainer.num_devices)
        num_steps = dataset_size // (self.trainer.accumulate_grad_batches * num_devices)
        return num_steps
    
    def configure_optimizers(self) -> dict:
        optimizer, scheduler = create_optimizer_and_scheduler(
            models=[self],
            optimizer_params=self.cfg.models.params,
            num_batches_per_epoch=self.num_steps(),
        )

        return {"optimizer": optimizer, "lr_scheduler": scheduler}

    def configure_callbacks(self) -> Sequence[Callback] | Callback:
        checkpoint_folder = os.path.join(self.save_dir, 'checkpoints')
        os.makedirs(checkpoint_folder, exist_ok=True)

        regression_callback = SkLearn_RegressionCallback(cfg=self.cfg, mask_labels=self.mask_labels)

        checkpoint_callback = ModelCheckpoint(
            dirpath=checkpoint_folder,
            auto_insert_metric_name=True,
            monitor='val/monitor_metric',
            mode='max',
            save_top_k=self.cfg.save_top_k,
            every_n_epochs=self.cfg.check_val_every_n_epoch,
            save_on_train_epoch_end=False,
            save_weights_only=False,
            verbose=True
            )
        learning_rate_callback = LearningRateMonitor(logging_interval='epoch', log_momentum=False, log_weight_decay=True)

        return [learning_rate_callback, checkpoint_callback,regression_callback]