import json
import torch

import pytorch_lightning as pl
import wandb
from abc import ABC, abstractmethod
from monai.data.meta_tensor import MetaTensor

from loss import CombinedLoss

# for MultiMAEBaseTrainer
from models import create_multimae, calc_patchified_dim, interpolate_pos_embed


class BaseTrainer(pl.LightningModule): 
    def __init__(self, cfg):
        super().__init__()
        self.cfg = cfg

    @staticmethod
    def gather_outputs(outputs: list[dict]):
        gathered_outputs = {}
        for key in outputs[0].keys():
            if key == "patient_id":
                gathered_outputs[key] = [outs for out in outputs for outs in out[key]]
            elif key == "partial_losses":
                # list of size <num_losses> of tensors with shape (<cohort_size>,)
                gathered_outputs[key] = torch.cat([torch.tensor(out[key]).unsqueeze(0) for out in outputs]).unbind(dim=1)
            elif not outputs[0][key].shape:
                gathered_outputs[key] = torch.tensor([out[key] for out in outputs])
            else:
                gathered_outputs[key] = torch.cat([out[key] for out in outputs])

            if isinstance(gathered_outputs[key], MetaTensor):
                gathered_outputs[key] = gathered_outputs[key].as_tensor()

        return gathered_outputs

    def configure_optimizers(self) -> dict:
        """Configure the optimizer for your classifier."""

        self.optimizer = [getattr(torch.optim, name)(self.parameters(), **values)
                           for name,values in self.cfg.training.optimizer.items()][0]  # get the class from the name
        if self.cfg.training.scheduler is not None:
            self.scheduler_constructor = [lambda x: getattr(torch.optim.lr_scheduler, name)(x, **values) 
                                          for name,values in self.cfg.training.scheduler.items()][0]
            self.scheduler = self.scheduler_constructor(self.optimizer)
            scheduler_dict = {
                "scheduler": self.scheduler,
                **self.cfg.training.monitoring, # for monitor and frequency params
            }
            return {"optimizer": self.optimizer, "lr_scheduler": scheduler_dict}
        else:
            self.scheduler_constructor = None
            return self.optimizer

    def _get_loss(self):
        loss_cfg = self.cfg.training.loss
        if not hasattr(loss_cfg, "loss_weights"):
            loss_cfg.loss_weights = None
        elif isinstance(loss_cfg.loss_weights, int) or isinstance(loss_cfg.loss_weights, float):
            loss_cfg.loss_weights = list(loss_cfg.loss_weights)
        self.loss = CombinedLoss(
            self.cfg, 
            loss_instances=loss_cfg.loss_instances, 
            loss_weights=loss_cfg.loss_weights,
            repeat_inputs=loss_cfg.repeat_inputs
            )
        
    @abstractmethod
    def _get_metrics(self):
        raise NotImplementedError


class MultiMAEBaseTrainer(BaseTrainer): 
    def __init__(self, cfg): 
        super().__init__(cfg)
        
        self._get_loss()
        self._get_model()
        
        self.input_tasks = cfg.model.model_params.input_tasks if "input_tasks" in cfg.model.model_params else cfg.datasets.sequences_to_use
        self.output_tasks = cfg.model.model_params.output_tasks if "output_tasks" in cfg.model.model_params else cfg.datasets.sequences_to_use
        
        
    def _get_model(self):
        self.model = create_multimae(
            self.cfg,
            **self.cfg.model.model_params
            )
        self.only_masked_loss = self.cfg.model.only_masked_loss
        
    @abstractmethod
    def _training_loss(self, outputs: dict, batch: dict) -> tuple: 
        pass 
    
    def load_model_checkpoints(self, checkpoint_path: str, load_input_adapters=None, load_output_adapters=None):
        
        map_location = None if torch.cuda.is_available() else torch.device("cpu")
        
        state_dict = self.state_dict()
        model_patchified_dim = self.model.patchified_dim
        
        checkpoint = torch.load(checkpoint_path, map_location=map_location, weights_only=False)
        checkpoint_state_dict = checkpoint['state_dict']
        checkpoint_model_img_size = checkpoint['hyper_parameters']['model']['model_params']['img_size']
        checkpoint_model_patch_size = checkpoint['hyper_parameters']['model']['model_params']['patch_size']
        checkpoint_patchified_dim = calc_patchified_dim(checkpoint_model_img_size, checkpoint_model_patch_size)
        
        def check_and_interpolate_pos_embed(pos_embed): 
            if not model_patchified_dim == checkpoint_patchified_dim:
                pos_embed = interpolate_pos_embed(pos_embed, checkpoint_patchified_dim, model_patchified_dim)
            return pos_embed
        
        exclude_loading = ["pos_embed", "loss"]
        exclude_loading += [] if load_input_adapters is None else ["input_adapters"]
        exclude_loading += [] if load_output_adapters is None else ["output_adapters"]
        
        for key in checkpoint_state_dict.keys(): 
            if not any([ex in key for ex in exclude_loading]):
                state_dict[key] = checkpoint_state_dict[key]
            elif ("input_adapters" in key) and (load_input_adapters is not None):
                if any([adapter in key for adapter in load_input_adapters]):
                    state_dict[key] = check_and_interpolate_pos_embed(checkpoint_state_dict[key]) if "pos_embed" in key else checkpoint_state_dict[key]
            elif ("output_adapters" in key) and (load_output_adapters is not None):
                if any([adapter in key for adapter in load_output_adapters]):
                    state_dict[key] = check_and_interpolate_pos_embed(checkpoint_state_dict[key]) if "pos_embed" in key else checkpoint_state_dict[key]
            elif "pos_embed" in key:
                state_dict[key] = check_and_interpolate_pos_embed(checkpoint_state_dict[key])
            elif "loss" in key: 
                continue
            else: 
                raise ValueError(f"Key {key} not found in model state dict") 
    
        self.load_state_dict(state_dict)

    def freeze_weights(self, parts: list): 
        for part in parts: 
            if part == "input_adapters": 
                for param in self.model.input_adapters.parameters(): 
                    param.requires_grad = False
            if part == "input_pos_embed": 
                self.model.pos_embed.requires_grad = False
            if part == "encoder": 
                for param in self.model.encoder.parameters(): 
                    param.requires_grad = False
            if part == "output_adapters": 
                for param in self.model.parameters(): 
                    param.requires_grad = False