import os

from typing import Sequence, Union, Tuple
from omegaconf import DictConfig

import torch
import lightning as L
from torch import nn

from lightning import Callback
from lightning.pytorch.callbacks import ModelCheckpoint, LearningRateMonitor, GradientAccumulationScheduler

from ecgcmr.imaging.img_models.ImageResNet3D import ResNet3D_Encoder
from ecgcmr.multimodal.multimodal_models.Projections import ProjectionHead
from ecgcmr.imaging.img_loss.CustomImagingNTXLoss import NTXentLoss
from ecgcmr.utils.misc import create_optimizer_and_scheduler


class ImageEncoderSimCLR_ResNet3D(L.LightningModule):
    def __init__(
            self,
            cfg: DictConfig,
            save_dir: str
            ) -> None:
        super().__init__()
        self.cfg = cfg
        self.save_dir = save_dir

        self.mri_encoder = ResNet3D_Encoder(model_depth=cfg.models.resnet.model_depth,
                                            zero_init_residual=cfg.model.resnet.zero_init_residual)
        
        self.img_projection = ProjectionHead(non_linear=self.cfg.models.projection.non_linear,
                                             input_dim=self.mri_encoder.encoded_dim, 
                                             hidden_dim=self.cfg.models.projection.hidden_dim,
                                             proj_out_dim=self.cfg.models.projection.proj_out_dim,
                                             num_layers=self.cfg.models.projection.num_layers)

        self.criterion_train = NTXentLoss(temperature=cfg.training_mode.loss.NTXentLoss.temperature, gather_distributed=True)
        self.criterion_val = NTXentLoss(temperature=cfg.training_mode.loss.NTXentLoss.temperature, gather_distributed=True)

        self.mean_first = cfg.models.mean_first

        self.imag_layernorm = nn.LayerNorm(normalized_shape=self.mri_encoder.encoded_dim,
                                                      eps=1e-12, dtype=torch.float32)
        nn.init.constant_(self.imag_layernorm.weight, 1.0)
        nn.init.constant_(self.imag_layernorm.bias, 0.0)

    def forward(self, img_view: torch.Tensor) -> torch.Tensor:
        img_embedding = self.mri_encoder(img_view) # B, T', C
        img_embedding = self.imag_layernorm(img_embedding)
        
        if self.mean_first:
            img_embedding = img_embedding.mean(dim=1) # B, C'
            output = self.img_projection(img_embedding)  # B, D
        else:
            output = self.img_projection(img_embedding)  # B, T', D
            output = output.mean(dim=1) # B, D

        return output

    def training_step(self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int, dataloader_idx: int = 0) -> torch.Tensor:
        img_view1, img_view2 = batch

        img_embed1 = self(img_view1)
        img_embed2 = self(img_view2)
        
        loss, logits, labels = self.criterion_train(img_embed1, img_embed2)
        
        self.log("train/contrastive_loss", loss, on_step=True, on_epoch=True, logger=True, add_dataloader_idx=False, sync_dist=True)
        return loss
    
    def validation_step(self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int, dataloader_idx: int = 0) -> torch.Tensor:
        img_view1, img_view2 = batch

        img_embed1 = self.forward(img_view1)
        img_embed2 = self.forward(img_view2)
        
        loss, logits, labels = self.criterion_val(img_embed1, img_embed2)

        self.log(f"val/contrastive_loss", loss, on_step=False, on_epoch=True, logger=True, add_dataloader_idx=False, sync_dist=True)
        return loss

    def num_steps(self) -> int:
        """Get number of steps per epoch and batch size"""
        dataset = self.trainer.fit_loop._data_source.dataloader()
        dataset_size = len(dataset)
        num_devices = max(1, self.trainer.num_devices)
        num_steps = dataset_size // (self.trainer.accumulate_grad_batches * num_devices)
        return num_steps
    
    def configure_optimizers(self) -> dict:
        optimizer, scheduler = create_optimizer_and_scheduler(
            models=[self],
            optimizer_params=self.cfg.models.params,
            num_batches_per_epoch=self.num_steps(),
        )

        return {"optimizer": optimizer, "lr_scheduler": scheduler}

    def configure_callbacks(self) -> Union[Sequence[Callback], Callback]:
        checkpoint_folder = os.path.join(self.save_dir, 'checkpoints')
        os.makedirs(checkpoint_folder, exist_ok=True)

        accumulator = GradientAccumulationScheduler(scheduling={0: self.cfg.grad_accumulation_steps})

        checkpoint_callback = ModelCheckpoint(
            dirpath=checkpoint_folder,
            auto_insert_metric_name=True,
            monitor='val/contrastive_loss',
            mode='min',
            save_top_k=self.cfg.save_top_k,
            every_n_epochs=self.cfg.check_val_every_n_epoch,
            save_on_train_epoch_end=False,
            save_weights_only=False,
            verbose=True
            )
        
        learning_rate_callback = LearningRateMonitor(logging_interval='epoch', log_momentum=False, log_weight_decay=True)
        
        return [accumulator, learning_rate_callback, checkpoint_callback]
     