import os
from omegaconf import DictConfig

import torch
from torch import nn

from ecgcmr.imaging.img_models.ImageResNet3D import ResNet3D_Encoder
from ecgcmr.multimodal.multimodal_models.Projections import ProjectionHead
from ecgcmr.utils.LastLayer import LastLayerConfiguration
from ecgcmr.imaging.img_loss.CustomImagingNTXLoss import NTXentLoss

from ecgcmr.utils.misc import create_optimizer_and_scheduler


class ImageEncoderSimCLRResNet3D_PyTorch(nn.Module):
    def __init__(self, cfg: DictConfig, save_dir: str) -> None:
        super().__init__()

        self.cfg = cfg
        self.mask_labels = False

        self.save_dir = save_dir
        self.main_batch_size = cfg.dataset.batch_size
        self.downstream_batch_size = cfg.downstream_task.batch_size

        self.mri_encoder = ResNet3D_Encoder(model_depth=cfg.models.resnet.model_depth,
                                            zero_init_residual=cfg.models.resnet.zero_init_residual)

        self.img_projection = ProjectionHead(non_linear=self.cfg.models.projection.non_linear,
                                             encoded_dim=self.mri_encoder.encoded_dim, 
                                             hidden_dim=self.cfg.models.projection.hidden_dim,
                                             output_dim=self.cfg.models.projection.proj_out_dim,
                                             num_layers=self.cfg.models.projection.num_layers)

        self.criterion_train = NTXentLoss(temperature=cfg.training_mode.loss.NTXentLoss.temperature, gather_distributed=True)
        self.criterion_val = NTXentLoss(temperature=cfg.training_mode.loss.NTXentLoss.temperature, gather_distributed=True)

        self.init_task_dependent_components(cfg=cfg)

        self.best_mean_R2 = -float('-inf')
        self.best_mean_MAE = float('inf')

        self.mean_first = cfg.models.mean_first

    def init_task_dependent_components(self, cfg: DictConfig):
        last_layer_config = LastLayerConfiguration(cfg=cfg, encoded_dim=self.mri_encoder.encoded_dim, mask_labels=self.mask_labels)
        
        self.task = last_layer_config.task
        self.last_layer = last_layer_config.create_last_layer()

        self.last_layer_criterion = last_layer_config.create_criterion()

        self.downstream_task_metrics_val = last_layer_config.create_metrics()
        self.plotter_val = last_layer_config.create_plotter()

        self.train_every_n_epochs = self.cfg.downstream_task.epoch_interval
        self.reinitialize_every_epoch = self.cfg.downstream_task.reinitialize_every_epoch

        self.initial_last_layer_config = self.last_layer_config

        self.imag_layernorm = nn.LayerNorm(normalized_shape=self.mri_encoder.encoded_dim,
                                                      eps=1e-12, dtype=torch.float32)
        nn.init.constant_(self.imag_layernorm.weight, 1.0)
        nn.init.constant_(self.imag_layernorm.bias, 0.0)

    def reinitialize_last_layer(self, device):
        self.last_layer = self.initial_last_layer_config.create_last_layer()
        self.last_layer = self.last_layer.to(device)
    
    def forward(self, img_view: torch.Tensor, mean_first = None) -> torch.Tensor:
        img_embedding = self.mri_encoder(img_view) # B, T', C
        img_embedding = self.imag_layernorm(img_embedding)

        if mean_first is not None:
            if mean_first:
                img_embedding = img_embedding.mean(dim=1) # B, C'
                output = self.img_projection(img_embedding)  # B, D
            else:
                output = self.img_projection(img_embedding)  # B, T', D
                output = output.mean(dim=1) # B, D
        else:
            output = img_embedding.mean(dim=1)

        return output

    def forward_contrastive(self, batch):
        img_view1, img_view2 = batch

        img_embed1 = self(img_view=img_view1, mean_first=self.mean_first)
        img_embed2 = self(img_view=img_view2, mean_first=self.mean_first)
        
        loss, logits, labels = self.criterion_train(img_embed1, img_embed2)

        return loss, logits, labels

    def forward_linear_probe(self, batch):
        img_view, labels = batch
        
        img_embed = self(img_view=img_view, mean_first=None) # B, T', C

        img_logits = self.last_layer(img_embed) # We take only ED FRAME
        loss = self.last_layer_criterion(img_logits, labels)

        return loss, img_logits, labels

    def configure_optimizers(self, num_main_steps, num_downstream_steps):
        main_optimizer, main_scheduler = create_optimizer_and_scheduler(
            models=[self.mri_encoder, self.imag_layernorm, self.img_projection],
            optimizer_params=self.cfg.models.params,
            num_batches_per_epoch=num_main_steps
        )

        last_layer_optimizer, last_layer_scheduler = create_optimizer_and_scheduler(
            models=[self.last_layer],
            optimizer_params=self.cfg.downstream_task.params,
            num_batches_per_epoch=num_downstream_steps,
        )
        return [main_optimizer, last_layer_optimizer], [main_scheduler, last_layer_scheduler]
    
    def save_checkpoint(self, name: str) -> None:
        save_folder = os.path.join(self.save_dir, f"{name}_checkpoint")
        os.makedirs(save_folder, exist_ok=True)
        save_path = os.path.join(save_folder, 'model.pth')

        checkpoint = {
            'model_state_dict': self.state_dict(),
            'cfg': self.cfg,
        }
        torch.save(checkpoint, save_path)

    def prepare_for_main_training(self):
        """Prepare model for main task training."""
        # Unfreeze reducer and projection heads
        self.unfreeze_model(self.mri_encoder)
        self.mri_encoder.train()

        self.unfreeze_model(self.img_projection)
        self.img_projection.train()

        self.unfreeze_model(self.imag_layernorm)
        self.imag_layernorm.train()

        # Freeze ecg_layernorm_regression and last_layer
        self.freeze_model(self.last_layer)

    def prepare_for_main_validation(self):
        """Prepare model for main task validation."""
        self.eval()

        self.freeze_model(self.mri_encoder)
        self.freeze_model(self.img_projection)
        self.freeze_model(self.imag_layernorm)
        self.freeze_model(self.last_layer)

    def prepare_for_downstream_training(self, device, num_downstream_steps):
        """Prepare model for downstream task training."""
        last_layer_optimizer = None

        if self.reinitialize_every_epoch:
            self.reinitialize_last_layer_and_norm(device=device)
            last_layer_optimizer, _ = self.recreate_downstream_optimizer(num_downstream_steps)
            
        self.eval()

        self.freeze_model(self.mri_encoder)
        self.freeze_model(self.img_projection)
        self.freeze_model(self.imag_layernorm)

        self.unfreeze_model(self.last_layer)
        self.last_layer.train()

        return last_layer_optimizer

    def prepare_for_downstream_validation(self):
        """Prepare model for downstream task validation."""
        self.eval()  # Set the entire model to eval mode

        self.freeze_model(self.mri_encoder)
        self.freeze_model(self.img_projection)
        self.freeze_model(self.imag_layernorm)
        self.freeze_model(self.last_layer)

    def freeze_model(self, model):
        """Freeze the given model, embeddings remain frozen."""
        for _, param in model.named_parameters():
            param.requires_grad = False

    def unfreeze_model(self, model):
        """Unfreeze the given model, embeddings remain frozen."""
        for _, param in model.named_parameters():
            param.requires_grad = True