import os
import wandb

from typing import Sequence
from omegaconf import DictConfig

import copy
import torch
import torch.nn as nn
import lightning as L

from lightning import Callback
from lightning.pytorch.callbacks import ModelCheckpoint, LearningRateMonitor

from ecgcmr.imaging.img_models.ImageEncoder import ResNet3D_Encoder
from ecgcmr.imaging.img_models.ImageProjection import ImageGlobalProjectionUnimodalSimCLR, ImageGlobalProjectionUnimodalMoCo
from ecgcmr.imaging.img_models.LastLayer import LastLayerConfiguration
from ecgcmr.imaging.img_loss.CustomImagingNTXLoss import NTXentLoss
from ecgcmr.imaging.img_utils.imaging_metrics import CustomAccuracy, plot_confusion_matrix

from ecgcmr.utils.misc import create_optimizer_and_scheduler


class ImageEncoderSimCLR(L.LightningModule):
    def __init__(self, cfg: DictConfig, save_dir: str) -> None:
        super().__init__()

        self.save_hyperparameters()
        
        self.cfg = cfg
        self.save_dir = save_dir

        self.epoch_interval = cfg.downstream_task.epoch_interval
        self.topk_accuracy = cfg.training_mode.topk_accuracy
        self.main_batch_size = cfg.dataset.batch_size
        self.downstream_batch_size = cfg.downstream_task.batch_size

        self.mri_encoder = ResNet3D_Encoder(cfg=cfg)
        
        if cfg.training_mode.loss.NTXentLoss.use:
            self.img_projection = ImageGlobalProjectionUnimodalSimCLR(encoded_dim=self.mri_encoder.encoded_dim,
                                                                      output_dim=cfg.models.projection.d_contrastive)

            self.criterion_train = NTXentLoss(temperature=cfg.training_mode.loss.NTXentLoss.temperature,
                                              memory_bank_size=(cfg.training_mode.loss.NTXentLoss.memory_bank_size, cfg.models.projection.d_contrastive))
            
            self.criterion_val = NTXentLoss(temperature=cfg.training_mode.loss.NTXentLoss.temperature)

        elif cfg.training_mode.loss.moco.use:
            self.img_projection = ImageGlobalProjectionUnimodalMoCo(encoded_dim=self.mri_encoder.encoded_dim,
                                                                    output_dim=cfg.models.projection.d_contrastive,
                                                                    batch_norm=cfg.models.projection.batch_norm)

            self.lagging_encoder = copy.deepcopy(self.mri_encoder)

            for param in self.lagging_encoder.parameters():
                param.requires_grad = False

            self.lagging_projection = copy.deepcopy(self.img_projection)

            for param in self.lagging_projection.parameters():
                param.requires_grad = False
            
            # create the queue
            self.register_buffer("queue", torch.randn(cfg.models.projection.d_contrastive, cfg.training_mode.loss.moco.queue_size))
            self.queue = nn.functional.normalize(self.queue, dim=0)
            self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))

            self.criterion_train = nn.CrossEntropyLoss()
            self.criterion_val = nn.CrossEntropyLoss()
            
        self.init_task_dependent_components(cfg=cfg, input_dim=self.mri_encoder.encoded_dim)

        self.accuracy_train = CustomAccuracy(topk=cfg.training_mode.topk_accuracy)
        self.accuracy_val = CustomAccuracy(topk=cfg.training_mode.topk_accuracy)

        # Important: This property activates manual optimization.
        self.automatic_optimization = False

    def init_task_dependent_components(self, cfg: DictConfig, input_dim: int):
        last_layer_config = LastLayerConfiguration(
            cfg=cfg,
            encoded_dim=input_dim,
        )
        self.task = last_layer_config.task
        self.last_layer = last_layer_config.create_last_layer()
        self.last_layer_criterion_train = last_layer_config.create_criterion()
        self.last_layer_criterion_val = last_layer_config.create_criterion()
        self.downstream_task_metrics = last_layer_config.create_metrics()

    @torch.no_grad()
    def _momentum_update_key_encoder(self):
        """
        Momentum update of the key encoder
        """
        m = self.cfg.training_mode.loss.moco.momentum
        for param_q, param_k in zip(self.mri_encoder.parameters(), self.lagging_encoder.parameters()):
            param_k.data = param_k.data * m + param_q.data * (1.0 - m)
        for param_q, param_k in zip(self.img_projection.parameters(), self.lagging_projection.parameters()):
            param_k.data = param_k.data * m + param_q.data * (1.0 - m)

    @torch.no_grad()
    def _dequeue_and_enqueue(self, keys):
        batch_size = keys.shape[0]

        ptr = int(self.queue_ptr)
        assert self.cfg.training_mode.loss.moco.queue_size % batch_size == 0  # for simplicity

        # replace the keys at ptr (dequeue and enqueue)
        self.queue[:, ptr : ptr + batch_size] = keys.T
        ptr = (ptr + batch_size) % self.cfg.training_mode.loss.moco.queue_size  # move pointer
        self.queue_ptr[0] = ptr

    @torch.no_grad()
    def forward_with_shuffle(self, encoder, projection, x):
        """
        Forward pass with shuffling for batch normalization.
        """
        # Generate a random permutation of indices
        batch_size = x.size(0)
        shuffle_idx = torch.randperm(batch_size).to(x.device)

        # Shuffle the batch
        x_shuffled = x[shuffle_idx]

        # Forward pass through the encoder
        x_encoded = encoder.forward_unimodal(x_shuffled)

        # Apply projection and normalization
        x_projected = projection(x_encoded)
        x_normalized = nn.functional.normalize(x_projected, dim=1)

        # Unshuffle to align with original batch order for consistency in loss computation
        reverse_idx = torch.argsort(shuffle_idx)
        x_unshuffled = x_normalized[reverse_idx]

        return x_unshuffled

    def forward(self, img_view: torch.Tensor) -> torch.Tensor:
        img_embedding = self.mri_encoder.forward_unimodal(img_view)
        output = self.img_projection(img_embedding)
        return output

    def forward_task(self, img: torch.Tensor, task: str) -> torch.Tensor:
        img_embedding = self.mri_encoder.forward_unimodal(img)
        output = self.last_layer(img_embedding)
        if task == 'classification':
            output = torch.squeeze(output)
        return output

    def training_step(self, batch: torch.Tensor, batch_idx: int) -> torch.Tensor:
        backbone_opt, last_layer_opt = self.optimizers()
        backbone_sch, last_layer_sch = self.lr_schedulers()

        batch_main = batch['main']

        # Main Model Training Step
        self.toggle_optimizer(backbone_opt)

        img_view1, img_view2 = batch_main

        if self.cfg.training_mode.loss.NTXentLoss.use:
            img_embed1 = self.forward(img_view1)
            img_embed2 = self.forward(img_view2)
            
            loss, logits, labels = self.criterion_train(img_embed1, img_embed2)

        elif self.cfg.training_mode.loss.moco.use:
            q = self.img_projection(self.mri_encoder.forward_unimodal(img_view1))
            q = nn.functional.normalize(q, dim=1)

            # compute key features
            with torch.no_grad():  # no gradient to keys
                self._momentum_update_key_encoder()  # update the key encoder
                k = self.forward_with_shuffle(self.lagging_encoder, self.lagging_projection, img_view2)
            # compute logits
            # Einstein sum is more intuitive
            # positive logits: Nx1
            l_pos = torch.einsum("nc,nc->n", [q, k]).unsqueeze(-1)
            # negative logits: NxK
            l_neg = torch.einsum("nc,ck->nk", [q, self.queue.clone().detach()])

            # logits: Nx(1+K)
            logits = torch.cat([l_pos, l_neg], dim=1)

            # apply temperature
            logits /= self.cfg.training_mode.loss.moco.temperature

            # labels: positive key indicators
            labels = torch.zeros(logits.shape[0], dtype=torch.long).cuda()

            # dequeue and enqueue
            self._dequeue_and_enqueue(k)

            loss = self.criterion_train(logits, labels)

        self.accuracy_train.update(logits=logits, labels=labels)
        
        self.log("train/contrastive.loss", loss, on_step=True,
                 on_epoch=True, logger=True, batch_size=self.main_batch_size, add_dataloader_idx=False)

        backbone_opt.zero_grad()
        self.manual_backward(loss)

        backbone_opt.step()
        backbone_sch.step()
            
        self.untoggle_optimizer(backbone_opt)

        # Last Layer Training Step            
        if (self.current_epoch + 1) % self.epoch_interval == 0:

            batch_downstream = batch['downstream']
            
            self.toggle_optimizer(last_layer_opt)

            im_orig, label = batch_downstream

            img_logits = self.forward_task(img=im_orig, task=self.task)
            loss = self.last_layer_criterion_train(img_logits, label)

            self.log(f"train/{self.task}.loss", loss, on_step=False,
                     on_epoch=True, logger=True, batch_size=self.downstream_batch_size, add_dataloader_idx=False)
        
            last_layer_opt.zero_grad()
            self.manual_backward(loss)

            last_layer_opt.step()
            last_layer_sch.step()

            self.untoggle_optimizer(last_layer_opt)
    
    def on_train_epoch_end(self) -> None:
        accuracy_train = self.accuracy_train.compute()
        self.log(f"train/contrastive.top{self.topk_accuracy}_accuracy", accuracy_train,
                 on_step=False, on_epoch=True, logger=True, batch_size=self.main_batch_size, add_dataloader_idx=False)
        self.accuracy_train.reset()
    
    @torch.no_grad()
    def validation_step(self, batch: torch.Tensor, batch_idx: int, dataloader_idx: int = 0) -> torch.Tensor:
        if dataloader_idx == 0:
            im_orig, label = batch

            img_logits = self.forward_task(img=im_orig, task=self.task)

            loss = self.last_layer_criterion_val(img_logits, label)

            if self.task == 'classification':
                label = label.int()

            self.downstream_task_metrics.update(preds=img_logits, labels=label)

            self.log(f"val/{self.task}.loss", loss, on_step=False,
                     on_epoch=True, logger=True, batch_size=self.downstream_batch_size, add_dataloader_idx=False)
        
        if dataloader_idx == 1:
            if self.cfg.training_mode.loss.NTXentLoss.use:
                img_view1, img_view2 = batch

                img_embed1 = self.forward(img_view1)
                img_embed2 = self.forward(img_view2)
                
                loss, logits, labels = self.criterion_val(img_embed1, img_embed2)

                self.accuracy_val.update(logits=logits, labels=labels)

                self.log(f"val/contrastive.loss", loss, on_step=False,
                         on_epoch=True, logger=True, batch_size=self.main_batch_size, add_dataloader_idx=False)

            elif self.cfg.training_mode.loss.moco.use:
                pass

    def on_validation_epoch_end(self):
        metrics = self.downstream_task_metrics.compute()
        accuracy_val = self.accuracy_val.compute()
        
        for name, value in metrics.items():
            if name == "confusion_matrix":
                fig = plot_confusion_matrix(cm=value.cpu().numpy(),
                                            class_names=[f"No {self.cfg.downstream_task.target}",
                                                        f"{self.cfg.downstream_task.target}"])
                self.logger.experiment.log({"val/confusion_matrix": wandb.Image(fig)})
            else:
                self.log(f'val/{name}', value, logger=True,
                         batch_size=self.downstream_batch_size, add_dataloader_idx=False)

                if self.task == 'regression':
                    monitor_metric = metrics['Mean_RV'] + metrics['Mean_LV']
                    self.log('val/monitor_metric', monitor_metric, logger=True,
                             batch_size=self.downstream_batch_size, add_dataloader_idx=False)
                    
        self.log('val/accuracy', accuracy_val, logger=True,
                  batch_size=self.main_batch_size, add_dataloader_idx=False)

        self.downstream_task_metrics.reset()
        self.accuracy_val.reset()
    
    @torch.no_grad()
    def test_step(self, batch: torch.Tensor, batch_idx: int) -> torch.Tensor:
        im_orig, label = batch

        img_logits = self.forward_task(img=im_orig, task=self.task)
        if self.task == 'classification':
            label = label.int()

        self.downstream_task_metrics.update(preds=img_logits, labels=label)

    def on_test_epoch_end(self):
        metrics = self.downstream_task_metrics.compute() 
        for name, value in metrics.items():
            if name == "confusion_matrix":
                fig = plot_confusion_matrix(cm=value.cpu().numpy(),
                                            class_names=[f"No {self.cfg.downstream_task.target}",
                                                        f"{self.cfg.downstream_task.target}"])
                self.logger.experiment.log({"test/confusion_matrix": wandb.Image(fig)})
            else:
                self.log(f'test/{name}', value, logger=True,
                         batch_size=self.downstream_batch_size, add_dataloader_idx=False)

        self.downstream_task_metrics.reset()

    def num_steps(self, mode='main') -> int:
        """Get number of steps per epoch and batch size"""
        dataset = self.trainer.fit_loop._data_source.dataloader()[mode]
        dataset_size = len(dataset)
        num_devices = max(1, self.trainer.num_devices)
        num_steps = dataset_size // (self.trainer.accumulate_grad_batches * num_devices)
        return num_steps
    
    def configure_optimizers(self) -> dict:
        # Main model optimizer and scheduler
        main_optimizer, main_scheduler = create_optimizer_and_scheduler(
            models=[self.mri_encoder, self.img_projection],
            optimizer_params=self.cfg.models.params,
            name='main',
            num_batches_per_epoch=self.num_steps(mode='main'),
        )

        # Last layer optimizer and scheduler
        last_layer_optimizer, last_layer_scheduler = create_optimizer_and_scheduler(
            models=[self.last_layer],
            optimizer_params=self.cfg.downstream_task.params,
            name='last_layer',
            num_batches_per_epoch=self.num_steps(mode='downstream'),
        )

        return [main_optimizer, last_layer_optimizer], [main_scheduler, last_layer_scheduler]

    def configure_callbacks(self) -> Sequence[Callback] | Callback:
        checkpoint_folder = os.path.join(self.save_dir, 'checkpoints')
        os.makedirs(checkpoint_folder, exist_ok=True)

        checkpoint_callback = ModelCheckpoint(
            dirpath=checkpoint_folder,
            auto_insert_metric_name=True,
            monitor='val/monitor_metric',
            mode='max',
            save_top_k=self.cfg.save_top_k,
            every_n_epochs=self.cfg.check_val_every_n_epoch,
            save_on_train_epoch_end=False,
            save_weights_only=False,
            verbose=True
            )
        
        learning_rate_callback = LearningRateMonitor(
            logging_interval='epoch',
            log_momentum=True
            )
        
        return [learning_rate_callback, checkpoint_callback]
    