import os
from typing import Sequence
from omegaconf import DictConfig

import torch
import torch.nn.functional as F
import lightning as L
from lightning import Callback
from lightning.pytorch.callbacks import ModelCheckpoint, LearningRateMonitor

from transformers import VideoMAEConfig, VideoMAEModel  # or your chosen VideoViT
from ecgcmr.multimodal.multimodal_models.Projections import ProjectionHeadSimple
from ecgcmr.utils.misc import create_optimizer_and_scheduler


class BYOLVideoEncoder(L.LightningModule):
    """
    A BYOL pre-training module that uses a Video Transformer (e.g. VideoMAEModel) for
    encoding frames. It implements:
       - An online encoder + projection + prediction network.
       - A target encoder + projection network updated via EMA.
       - A BYOL loss function for self-supervised representation learning.
    """
    def __init__(self, cfg: DictConfig, save_dir: str) -> None:
        super().__init__()
        self.save_hyperparameters()
        self.cfg = cfg
        self.save_dir = save_dir

        # ----------------------------------------------------
        # 1) Video Transformer Model Configuration
        # ----------------------------------------------------
        model_cfg = cfg.models[cfg.models.backbone]

        self.image_model_config = VideoMAEConfig(
            image_size=model_cfg.image_size,
            patch_size=model_cfg.patch_size,
            num_channels=model_cfg.num_channels,
            num_frames=cfg.augmentations.imaging.time_sample.result_n_frames,
            tubelet_size=model_cfg.tubelet_size,
            hidden_size=model_cfg.hidden_size,
            num_hidden_layers=model_cfg.num_hidden_layers,
            num_attention_heads=model_cfg.num_attention_heads,
            intermediate_size=model_cfg.intermediate_size,
            hidden_act=model_cfg.hidden_act,
            hidden_dropout_prob=model_cfg.hidden_dropout_prob,
            attention_probs_dropout_prob=model_cfg.attention_probs_dropout_prob,
            drop_path_rate=model_cfg.drop_path_rate,
            initializer_range=model_cfg.initializer_range,
            layer_norm_eps=model_cfg.layer_norm_eps,
            use_learnable_pos_emb=model_cfg.use_learnable_pos_emb,
            attention_type=model_cfg.attention_type,
            mask_loss=model_cfg.mask_loss,
            use_cls_token=model_cfg.use_cls_token,
            layerscale_init_values=model_cfg.layerscale_init_values,
        )

        # Online Encoder
        self.online_encoder = VideoMAEModel(config=self.image_model_config)
        self.encoded_dim = model_cfg.hidden_size
        
        # Online Projection and Prediction Heads
        self.online_projection = ProjectionHeadSimple(
            input_dim=self.encoded_dim,
            hidden_dim=cfg.models.projection.hidden_dim,
            output_dim=cfg.models.projection.proj_out_dim
        )
        self.online_prediction = ProjectionHeadSimple(
            input_dim=cfg.models.projection.proj_out_dim,
            hidden_dim=cfg.models.projection.hidden_dim,
            output_dim=cfg.models.projection.proj_out_dim
        )

        # Target Network (Encoder + Projection)
        self.target_encoder = VideoMAEModel(config=self.image_model_config)
        self.target_projection = ProjectionHeadSimple(
            input_dim=self.encoded_dim,
            hidden_dim=cfg.models.projection.hidden_dim,
            output_dim=cfg.models.projection.proj_out_dim
        )

        # BYOL Parameters
        self.moving_average_decay = cfg.training_mode.loss.byol.moving_average_decay

        # Make sure target network starts with same weights
        self._init_target_network()

    def _init_target_network(self):
        """
        Initialize target network parameters with a copy of the online parameters.
        """
        for param_o, param_t in zip(self.online_encoder.parameters(),
                                    self.target_encoder.parameters()):
            param_t.data.copy_(param_o.data)

        for param_o, param_t in zip(self.online_projection.parameters(),
                                    self.target_projection.parameters()):
            param_t.data.copy_(param_o.data)

    def forward(self, x: torch.Tensor, online: bool = True) -> torch.Tensor:
        """
        Forward pass that returns the projection output (not the final BYOL prediction).
        - We apply mean pooling over all tokens except the [CLS] token (index 0).
        """
        if online:
            encoder = self.online_encoder
            projection = self.online_projection
        else:
            encoder = self.target_encoder
            projection = self.target_projection

        # We do not apply masking since we are not doing standard MAE tasks in BYOL.
        hidden_state = encoder(pixel_values=x, apply_masking=False).last_hidden_state
        # Usually token 0 is the [CLS], so we often skip it and pool over the rest
        features = hidden_state[:, 1:, :].mean(dim=1)  # Mean pooling
        return projection(features)

    def update_target_network(self):
        """
        EMA update of the target network parameters using online network parameters.
        """
        for param_o, param_t in zip(self.online_encoder.parameters(), self.target_encoder.parameters()):
            param_t.data = self.moving_average_decay * param_t.data + (1 - self.moving_average_decay) * param_o.data

        for param_o, param_t in zip(self.online_projection.parameters(), self.target_projection.parameters()):
            param_t.data = self.moving_average_decay * param_t.data + (1 - self.moving_average_decay) * param_o.data

    def training_step(self, batch: torch.Tensor, batch_idx: int) -> torch.Tensor:
        """
        BYOL training step. We expect the batch to be a tuple of two augmentations:
        (img_view1, img_view2).
        """
        img_view1, img_view2 = batch

        # 1) Forward pass on Online Network
        online_proj1 = self.forward(img_view1, online=True)
        online_proj2 = self.forward(img_view2, online=True)

        # 2) Prediction heads
        pred1 = self.online_prediction(online_proj1)
        pred2 = self.online_prediction(online_proj2)

        # 3) Forward pass on Target Network (no gradient)
        with torch.no_grad():
            target_proj1 = self.forward(img_view1, online=False)
            target_proj2 = self.forward(img_view2, online=False)

        # 4) BYOL Loss
        loss = (self.byol_loss(pred1, target_proj2) + self.byol_loss(pred2, target_proj1)) / 2
        self.log("train/byol_loss", loss, on_step=True, on_epoch=True, prog_bar=True)

        # 5) Update target network
        self.update_target_network()

        return loss

    @staticmethod
    def byol_loss(prediction: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
        """
        Standard BYOL loss = 2 - 2 * cosine_similarity(prediction, target)
        where prediction and target are L2 normalized.
        """
        prediction = F.normalize(prediction, dim=-1)
        target = F.normalize(target, dim=-1)
        return 2 - 2 * (prediction * target).sum(dim=-1).mean()

    def configure_optimizers(self):
        """
        Use your utility function to create an optimizer and LR scheduler.
        It must handle all trainable parts: encoder, projection, prediction.
        """
        optimizer, scheduler = create_optimizer_and_scheduler(
            models=[self.online_encoder, self.online_projection, self.online_prediction],
            optimizer_params=self.cfg.models.params,
            num_batches_per_epoch=self.num_steps()
        )
        return {"optimizer": optimizer, "lr_scheduler": scheduler}

    def num_steps(self):
        """
        Helper that tries to get the effective number of steps per epoch
        to initialize the LR scheduler properly.
        """
        dataset = self.trainer.fit_loop._data_source.dataloader()
        dataset_size = len(dataset)
        num_devices = max(1, self.trainer.num_devices)
        return dataset_size // (self.trainer.accumulate_grad_batches * num_devices)

    def on_save_checkpoint(self, checkpoint: dict) -> None:
        """
        Save the Hugging Face-compatible pretrained weights to disk.
        """
        hf_save_path = os.path.join(self.save_dir, "hf_model")
        os.makedirs(hf_save_path, exist_ok=True)
        self.online_encoder.save_pretrained(hf_save_path)

    def configure_callbacks(self) -> Sequence[Callback]:
        """
        Standard callbacks: model checkpointing and LR monitor.
        """
        checkpoint_folder = os.path.join(self.save_dir, 'checkpoints')
        os.makedirs(checkpoint_folder, exist_ok=True)

        checkpoint_callback = ModelCheckpoint(
            dirpath=checkpoint_folder,
            monitor='train/byol_loss',
            mode='min',
            save_top_k=self.cfg.save_top_k,
            every_n_epochs=self.cfg.check_val_every_n_epoch,
        )

        learning_rate_callback = LearningRateMonitor(logging_interval='epoch', log_momentum=False)

        return [checkpoint_callback, learning_rate_callback]
