import os
from typing import Sequence
from omegaconf import DictConfig

import torch
import torch.nn.functional as F

import lightning as L
from lightning import Callback
from lightning.pytorch.callbacks import ModelCheckpoint, LearningRateMonitor

from transformers import ViTMAEConfig, ViTMAEModel
from ecgcmr.multimodal.multimodal_models.Projections import ProjectionHeadSimple

from ecgcmr.utils.misc import create_optimizer_and_scheduler


class ECGSimCLR(L.LightningModule):
    def __init__(self, cfg: DictConfig, save_dir: str) -> None:
        super().__init__()
        self.cfg = cfg
        self.save_dir = save_dir

        model_cfg = cfg.models[cfg.models.model_size]

        self.image_size = (cfg.dataset.input_electrodes, cfg.augmentations.ecg.random_crop.ecg_time_steps)
        self.patch_size = tuple(model_cfg.patch_size)

        self.ecg_encoder_config = ViTMAEConfig(
            hidden_size=model_cfg.hidden_size,
            num_hidden_layers=model_cfg.num_hidden_layers,
            num_attention_heads=model_cfg.num_attention_heads,
            intermediate_size=model_cfg.intermediate_size,
            hidden_act=model_cfg.hidden_act,
            hidden_dropout_prob=model_cfg.hidden_dropout_prob,
            attention_probs_dropout_prob=model_cfg.attention_probs_dropout_prob,
            drop_path_rate=model_cfg.drop_path_rate,
            initializer_range=model_cfg.initializer_range,
            layer_norm_eps=model_cfg.layer_norm_eps,
            qkv_bias=model_cfg.qkv_bias,
            image_size=self.image_size,
            patch_size=self.patch_size,
            num_channels=1,
            decoder_intermediate_size=model_cfg.decoder_intermediate_size,
            decoder_num_attention_heads=model_cfg.decoder_num_attention_heads,
            decoder_hidden_size=model_cfg.decoder_hidden_size,
            decoder_num_hidden_layers=model_cfg.decoder_num_hidden_layers,
            mask_ratio=model_cfg.mask_ratio,  # Not used for SimCLR but can remain in config
            mask_loss=model_cfg.mask_loss,    # Not used for SimCLR but can remain in config
            use_cls_token=model_cfg.use_cls_token,
            use_learnable_pos_emb=model_cfg.use_learnable_pos_emb,
            layerscale_init_values=model_cfg.layerscale_init_values,
        )

        self.ecg_encoder = ViTMAEModel(config=self.ecg_encoder_config)

        self.hidden_size = model_cfg.hidden_size

        proj_cfg = cfg.models.projection
        
        self.projection_head = ProjectionHeadSimple(
            input_dim=self.hidden_size,
            hidden_dim=proj_cfg.hidden_dim,
            output_dim=proj_cfg.proj_out_dim,
        )

        self.temperature = cfg.training_mode.loss.temperature

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        outputs = self.ecg_encoder(pixel_values=x, apply_masking=False)

        hidden_state = outputs.last_hidden_state  # [B, num_tokens, hidden_size]

        if self.ecg_encoder_config.use_cls_token:
            features = hidden_state[:, 1:, :].mean(dim=1)
        else:
            features = hidden_state.mean(dim=1)

        return self.projection_head(features)

    def training_step(self, batch: torch.Tensor, batch_idx: int) -> torch.Tensor:
        """
        SimCLR training step using two augmented views of the same ECG sample.
        batch = (ecg_view1, ecg_view2) 
        """
        ecg_view1, ecg_view2 = batch

        z1 = self.forward(ecg_view1)
        z2 = self.forward(ecg_view2)

        loss = self.simclr_loss(z1, z2, temperature=self.temperature)

        self.log("train/loss", loss, on_step=True, on_epoch=True, logger=True, sync_dist=True, prog_bar=True)

        return loss

    def validation_step(self, batch: torch.Tensor, batch_idx: int) -> torch.Tensor:
        """
        Optional validation step. For self-supervised tasks, 
        we can compute the same contrastive loss on a val subset 
        just to monitor overfitting (no labels needed).
        """
        ecg_view1, ecg_view2 = batch
        z1 = self.forward(ecg_view1)
        z2 = self.forward(ecg_view2)

        loss = self.simclr_loss(z1, z2, temperature=self.temperature)
        self.log("val/loss", loss, on_step=False, on_epoch=True, logger=True, sync_dist=True, prog_bar=True)
        return loss

    def simclr_loss(self, z1: torch.Tensor, z2: torch.Tensor, temperature: float) -> torch.Tensor:
        """
        Implements the NT-Xent (Normalized Temperature-scaled Cross Entropy) loss for SimCLR:
            L = -log( exp( sim(z1_i, z2_i)/temp ) / sum_{k != i} exp( sim(z1_i, z2_k)/temp ) )
        We'll do a simple version with 2*N batch => 2*N * 2*N similarities.
        """
        z1 = F.normalize(z1, dim=-1)
        z2 = F.normalize(z2, dim=-1)

        batch_size = z1.size(0)
        embeddings = torch.cat([z1, z2], dim=0)  # shape: (2B, dim)
        sim_matrix = embeddings @ embeddings.T  # shape: (2B, 2B)
        sim_matrix /= temperature

        self_mask = torch.eye(2 * batch_size, device=sim_matrix.device, dtype=bool)
        sim_matrix = sim_matrix.masked_fill(self_mask, float('-inf'))

        pos_targets = torch.arange(batch_size, device=sim_matrix.device) + batch_size
        pos_targets_reversed = torch.arange(batch_size, device=sim_matrix.device)
        pos_targets_full = torch.cat([pos_targets, pos_targets_reversed], dim=0)  # shape: (2B,)

        log_probs = F.log_softmax(sim_matrix, dim=1)
        loss = -log_probs[torch.arange(2 * batch_size), pos_targets_full].mean()

        return loss

    def on_save_checkpoint(self, checkpoint: dict) -> None:
        """
        Save the encoder in a Hugging Face-compatible format for later usage.
        """
        hf_save_path = os.path.join(self.save_dir, "hf_model")
        os.makedirs(hf_save_path, exist_ok=True)
        self.ecg_encoder.save_pretrained(hf_save_path)

    def num_steps(self) -> int:
        dataset = self.trainer.fit_loop._data_source.dataloader()
        dataset_size = len(dataset)
        num_devices = max(1, self.trainer.num_devices)
        num_steps = dataset_size // (self.trainer.accumulate_grad_batches * num_devices)
        return num_steps

    def configure_optimizers(self) -> dict:
        optimizer, scheduler = create_optimizer_and_scheduler(
            models=[self],
            optimizer_params=self.cfg.models.params,
            num_batches_per_epoch=self.num_steps()
        )
        return {"optimizer": optimizer, "lr_scheduler": scheduler}

    def configure_callbacks(self) -> Sequence[Callback] | Callback:
        checkpoint_folder = os.path.join(self.save_dir, "checkpoints")
        os.makedirs(checkpoint_folder, exist_ok=True)

        checkpoint_callback = ModelCheckpoint(
            dirpath=checkpoint_folder,
            auto_insert_metric_name=True,
            monitor="val/loss",
            mode="min",
            save_top_k=self.cfg.save_top_k,
            every_n_epochs=self.cfg.check_val_every_n_epoch,
            save_on_train_epoch_end=False,
            save_weights_only=False,
            verbose=True,
        )
        learning_rate_callback = LearningRateMonitor(
            logging_interval="epoch",
            log_weight_decay=True,
            log_momentum=False,
        )

        return [learning_rate_callback, checkpoint_callback]
