import os
from omegaconf import DictConfig

import torch
from torch import nn

from transformers import ViTMAEConfig, ViTMAEForPreTraining
from ecgcmr.utils.LastLayer import LastLayerConfiguration

from ecgcmr.utils.misc import create_optimizer_and_scheduler


class ECGViTMAEPyTorch_LinearProbe(nn.Module):
    def __init__(self, cfg: DictConfig, save_dir: str) -> None:
        super().__init__()

        self.cfg = cfg
        self.save_dir = save_dir

        model_cfg = cfg.models[cfg.models.model_size]

        self.image_size = tuple([cfg.dataset.input_electrodes, cfg.augmentations.ecg.random_crop.ecg_time_steps])
        self.patch_size = tuple(model_cfg.patch_size)

        self.ecg_model_config = ViTMAEConfig(
            hidden_size=model_cfg.hidden_size,
            num_hidden_layers=model_cfg.num_hidden_layers,
            num_attention_heads=model_cfg.num_attention_heads,
            intermediate_size=model_cfg.intermediate_size,
            hidden_act=model_cfg.hidden_act,
            hidden_dropout_prob=model_cfg.hidden_dropout_prob,
            attention_probs_dropout_prob=model_cfg.attention_probs_dropout_prob,
            drop_path_rate=model_cfg.drop_path_rate,
            initializer_range=model_cfg.initializer_range,
            layer_norm_eps=model_cfg.layer_norm_eps,
            qkv_bias=model_cfg.qkv_bias,
            image_size=self.image_size,
            patch_size=self.patch_size,
            num_channels=1,
            decoder_intermediate_size=model_cfg.decoder_intermediate_size,
            decoder_num_attention_heads=model_cfg.decoder_num_attention_heads,
            decoder_hidden_size=model_cfg.decoder_hidden_size,
            decoder_num_hidden_layers=model_cfg.decoder_num_hidden_layers,
            mask_ratio=model_cfg.mask_ratio,
            mask_loss=model_cfg.mask_loss,
            use_cls_token=model_cfg.use_cls_token,
            use_learnable_pos_emb=model_cfg.use_learnable_pos_emb,
            layerscale_init_values=model_cfg.layerscale_init_values,
        )

        self.ecg_model = ViTMAEForPreTraining(config=self.ecg_model_config)
        
        self.init_task_dependent_components(cfg=cfg)

    def init_task_dependent_components(self, cfg: DictConfig):
        use_mlp = cfg.downstream_task.get('use_mlp', False)
        training_scheme = cfg.downstream_task.get('training_scheme', 'linear_probing')
        
        if training_scheme == 'linear_probing':
            self.training_scheme_setting = "LP"
        elif training_scheme == 'fine_tune':
            self.training_scheme_setting = "FN"
        
        self.use_mlp_setting = 'MLP' if use_mlp else 'LinearLayer'

        last_layer_config = LastLayerConfiguration(cfg=cfg,
                                                   encoded_dim=self.ecg_model_config.hidden_size,
                                                   mask_labels=False,
                                                   training_scheme=self.training_scheme_setting,
                                                   use_mlp=self.use_mlp_setting)
        
        self.task = last_layer_config.task
        self.last_layer = last_layer_config.create_last_layer()

        self.last_layer_criterion = last_layer_config.create_criterion()

        self.downstream_task_metrics = last_layer_config.create_metrics()
        self.plotter_val = last_layer_config.create_plotter()
        self.regression = last_layer_config.create_sklearn_model()

        self.train_every_n_epochs = self.cfg.downstream_task.epoch_interval
        self.reinitialize_every_epoch = self.cfg.downstream_task.reinitialize_every_epoch

        self.ecg_layernorm_regression = nn.LayerNorm(normalized_shape=self.ecg_model_config.hidden_size,
                                                     eps=self.ecg_model_config.layer_norm_eps, dtype=torch.float32)
        nn.init.constant_(self.ecg_layernorm_regression.weight, 1.0)
        nn.init.constant_(self.ecg_layernorm_regression.bias, 0.0)

        self.initial_last_layer_config = last_layer_config
    
    def reinitialize_last_layer_and_norm(self, device):
        self.last_layer = self.initial_last_layer_config.create_last_layer()
        self.last_layer = self.last_layer.to(device)

        self.ecg_layernorm_regression = nn.LayerNorm(normalized_shape=self.ecg_model_config.hidden_size,
                                                     eps=self.ecg_model_config.layer_norm_eps, dtype=torch.float32)
        nn.init.constant_(self.ecg_layernorm_regression.weight, 1.0)
        nn.init.constant_(self.ecg_layernorm_regression.bias, 0.0)
        self.ecg_layernorm_regression = self.ecg_layernorm_regression.to(device)
        
    def forward_masked(self, batch: torch.Tensor):
        return self.ecg_model(pixel_values=batch, apply_masking=True, use_layernorm=True)
    
    def forward_linear_probe(self, batch):
        pixel_values, labels = batch
        hidden_state = self.ecg_model.vit(pixel_values=pixel_values, apply_masking=False, use_layernorm=False).last_hidden_state
        
        if self.ecg_model_config.use_cls_token:
            all_tokens, cls_token = hidden_state[:, 1:], hidden_state[:, 0]
            features = cls_token
        else:
            all_tokens = hidden_state
            features = torch.mean(all_tokens, dim=1)

        features_normed = self.ecg_layernorm_regression(features)
        logits = self.last_layer(features_normed)
        loss = self.last_layer_criterion(logits, labels)

        return loss, features, logits, labels
    
    def configure_optimizers(self, num_main_steps, num_downstream_steps):
        main_optimizer, main_scheduler = create_optimizer_and_scheduler(
            models=[self.ecg_model],
            optimizer_params=self.cfg.models.params,
            num_batches_per_epoch=num_main_steps
        )

        last_layer_optimizer, last_layer_scheduler = create_optimizer_and_scheduler(
            models=[self.last_layer, self.ecg_layernorm_regression],
            optimizer_params=self.cfg.downstream_task.params,
            num_batches_per_epoch=num_downstream_steps,
        )
        return [main_optimizer, last_layer_optimizer], [main_scheduler, last_layer_scheduler]

    def recreate_downstream_optimizer(self, num_downstream_steps):
        last_layer_optimizer, last_layer_scheduler = create_optimizer_and_scheduler(
            models=[self.last_layer, self.ecg_layernorm_regression],
            optimizer_params=self.cfg.downstream_task.params,
            num_batches_per_epoch=num_downstream_steps,
        )
        return last_layer_optimizer, last_layer_scheduler
    
    def save_checkpoint(self, name: str) -> None:
        hf_save_path = os.path.join(self.save_dir, f"{name}_hf_model")
        os.makedirs(hf_save_path, exist_ok=True)
        self.ecg_model.vit.save_pretrained(hf_save_path)

    def prepare_for_main_training(self):
        """Prepare model for main task training."""

        self.unfreeze_model(self.ecg_model)
        self.ecg_model.train()

        self.freeze_model(self.ecg_layernorm_regression)
        self.freeze_model(self.last_layer)

    def prepare_for_main_validation(self):
        """Prepare model for main task validation."""
        self.eval()

        self.freeze_model(self.ecg_model)
        self.freeze_model(self.last_layer)
        self.freeze_model(self.ecg_layernorm_regression)

    def prepare_for_downstream_training(self, device, num_downstream_steps):
        """Prepare model for downstream task training."""
        last_layer_optimizer = None

        if self.reinitialize_every_epoch:
            self.reinitialize_last_layer_and_norm(device=device)
            last_layer_optimizer, _ = self.recreate_downstream_optimizer(num_downstream_steps)
        
        self.eval()

        self.freeze_model(self.ecg_model)

        self.unfreeze_model(self.last_layer)
        self.last_layer.train()

        self.unfreeze_model(self.ecg_layernorm_regression)
        self.ecg_layernorm_regression.train()

        return last_layer_optimizer

    def prepare_for_downstream_validation(self):
        """Prepare model for downstream task validation."""
        self.eval()

        self.freeze_model(self.ecg_model)
        self.freeze_model(self.ecg_layernorm_regression)
        self.freeze_model(self.last_layer)

    def freeze_model(self, model):
        """Freeze the given model, embeddings remain frozen."""
        for _, param in model.named_parameters():
            param.requires_grad = False

    def unfreeze_model(self, model):
        """Unfreeze the given model, embeddings remain frozen."""
        for _, param in model.named_parameters():
            param.requires_grad = True