print("start importing...")
import os
import hydra
import wandb
from math import ceil
from pathlib import Path
import torch

from pytorch_lightning.trainer import Trainer
from pytorch_lightning.callbacks import (
    ModelCheckpoint,
    EarlyStopping,
    LearningRateMonitor,
)

from config import Config, load_config_store
from omegaconf import OmegaConf, open_dict
from data import get_datasets, get_dataloaders
from util import init_experiment, wandblogger, MultiMAEModularTrainer

print("finished importing.")
os.environ["WANDB__SERVICE_WAIT"] = "1000"

load_config_store()

def get_downstream_cfg(cfg):

    # 是否有预训练的模型
    pretrain_checkpoint = cfg['model']['pretrain_path'] 
    assert pretrain_checkpoint is not None, "cfg.model.pretrain_path must be specified"
    pretrain_path = pretrain_checkpoint.parent.parent
    print(f"{pretrain_path=}")
   
    # sadly hydra requires relative paths, so we need to ensure that absolute 
    # paths are converted into relative ones. 
    # Also the version of pathlib used for this project cannot compute relative
    # paths on a different root directory, thus we need os.path.relpath 
    if pretrain_path.is_absolute(): 
        src = str(Path.cwd())
        dest = str(pretrain_path)
        pretrain_path = Path(os.path.relpath(dest, src))

    with hydra.initialize(version_base=None, config_path=str(pretrain_path)):
        pretrain_cfg = hydra.compose(config_name='config.yaml')
    # print(OmegaConf.to_yaml(pretrain_cfg))
    
    combined_image = pretrain_cfg['datasets']['combined_image']
    downstream_image_size = cfg['model']['model_params']['img_size'] 
    
    linear = cfg['training'].get('linear', False)
    if linear: 
        cfg['training']['freeze_weights'] = ['input_adapters', 'input_pos_embed', 'encoder']
    cfg_name = f"{cfg['name']}_{pretrain_cfg['name']}{'_linear' if linear else ''}"
    cfg['name'] = cfg_name
    
    print(f"Generate config '{cfg_name}'")
    print(f"{combined_image=}")
    print(f"{downstream_image_size=}")
    print(f"{linear=}")

    pretrain_input_adapters = pretrain_cfg['model']['model_params']['input_tasks']
    print(f"{pretrain_input_adapters=}")

    pretrain_output_adapters = []
    print(f"{pretrain_output_adapters=}")

    # maybe i need to completely rework this part ... if there is time... 
    downstream_input_adapters  = cfg['model']['model_params']['input_tasks']
    downstream_output_adapters = cfg['model']['model_params']['output_tasks']
    downstream_use_dirichlet = cfg['model']['model_params'].get('use_dirichlet', None)
    downstream_use_seg_masking = cfg['model']['model_params'].get('use_seg_masking', None)
    downstream_mask_ratio = cfg['model']['model_params'].get('mask_ratio', 0.0)
    downstream_decoder_pos_embed_type = cfg['model']['model_params']['decoder_pos_embed_type'] 
    downstream_full_masked_val = cfg['model']['model_params'].get('full_masked_val', None)
    downstream_deep_copy_batch = cfg['model']['model_params'].get('deep_copy_batch', False)
    downstream_leave_one_out = cfg['model']['model_params'].get('leave_one_out', False)
    print(f"{downstream_output_adapters=}")
    cfg['datasets']['combined_image'] = combined_image
    # cfg['model']['pretrain_path'] = f"{pretrain_path}/checkpoints/epoch1199.ckpt"
    cfg['model']['pretrain_input_adapters'] = pretrain_input_adapters
    cfg['model']['pretrain_output_adapters'] = pretrain_output_adapters
    cfg['model']['model_params'] = pretrain_cfg['model']['model_params']
    cfg['model']['model_params']['input_tasks'] = downstream_input_adapters
    cfg['model']['model_params']['output_tasks'] = downstream_output_adapters
    cfg['model']['model_params']['mask_ratio'] = downstream_mask_ratio
    cfg['model']['model_params']['decoder_pos_embed_type'] = downstream_decoder_pos_embed_type 
    # cfg['model']['model_params']['use_dirichlet'] = False
    cfg['model']['model_params']['img_size'] = downstream_image_size
    with open_dict(cfg):
        if downstream_full_masked_val is not None: 
            cfg['model']['model_params']['full_masked_val'] = downstream_full_masked_val 
        if downstream_deep_copy_batch is not None: 
            cfg['model']['model_params']['deep_copy_batch'] = downstream_deep_copy_batch 
        if downstream_leave_one_out is not None: 
            cfg['model']['model_params']['leave_one_out'] = downstream_leave_one_out 
        if downstream_use_dirichlet is not None: 
            cfg['model']['model_params']['use_dirichlet'] = downstream_use_dirichlet
        if downstream_use_seg_masking is not None: 
            cfg['model']['model_params']['use_seg_masking'] = downstream_use_seg_masking
    

    return cfg

def train(cfg, train_loader, val_loader):

    base = MultiMAEModularTrainer(cfg)

    trainer_opts = {
        "max_epochs": cfg.training.num_epochs,
        # "log_every_n_steps": ceil(len(train_loader) / max(1,cfg.general.gpus)),
        "log_every_n_steps": 1,
        "logger": False,
        "accelerator": "gpu" if cfg.general.gpus else "cpu",
        "devices": cfg.general.gpus if cfg.general.gpus else None,
        # "strategy": "ddp" if cfg.general.gpus > 1 else None,
        "check_val_every_n_epoch": cfg.training.monitoring.frequency,
        "accumulate_grad_batches": cfg.training.grad_acc_steps,
        "callbacks": [],
        "num_sanity_val_steps": 0,
        "gradient_clip_val": cfg.training.gradient_clip_val,
        "gradient_clip_algorithm": cfg.training.gradient_clip_algorithm,
        "profiler": cfg.training.profiler,
        ##"logger": False # just write on csv (maybe improve later & do with wandb)
    }
    if cfg.general.log_wandb:
        logger = wandblogger(cfg, cfg.general.log_pth)
        trainer_opts["logger"] = logger

    if cfg.training.early_stopping is not None:
        trainer_opts["callbacks"].append(EarlyStopping(**cfg.training.early_stopping))

    # Load pretrained model, but needs to be saved through lightning
    if cfg.model.pretrain_path:
        base.load_model_checkpoints(
            cfg.model.pretrain_path,
            load_input_adapters=cfg.model.pretrain_input_adapters,
            load_output_adapters=cfg.model.pretrain_output_adapters,
        )

    if "freeze_weights" in cfg.training and cfg.training.freeze_weights is not None:
        base.freeze_weights(cfg.training.freeze_weights)

    log_path = cfg.general.log_pth
    model_checkpoints = [
        ModelCheckpoint(
            dirpath=log_path / "checkpoints",
            filename=f"best_model",
            save_top_k=1,
            verbose=True,
            monitor=cfg.training.monitoring.monitor,
            mode="max" if "loss" not in cfg.training.monitoring.monitor else "min",
        ),
        ModelCheckpoint(
            dirpath=log_path / "checkpoints",
            filename="epoch{epoch}",
            verbose=True,
            auto_insert_metric_name=False,
            every_n_epochs=cfg.training.ckpt_frequency,
            save_top_k=-1,
        ),
    ]

    trainer_opts["callbacks"].extend(
        [
            *(model_checkpoints if cfg.general.log_wandb else []),
            *(
                [LearningRateMonitor(logging_interval="epoch")]
                if cfg.general.log_wandb
                else []
            ),
        ]
    )

    trainer = Trainer(**trainer_opts)
    return trainer.fit(base, train_loader, val_loader)


@hydra.main(version_base=None, config_path=str(Path.cwd() / "config"))
def main(cfg: Config) -> None:
    
    if cfg['model']['pretrain_path'] is not None: 
        print(f"Pretrain path is not None")
        hydra.core.global_hydra.GlobalHydra.instance().clear()
        cfg = get_downstream_cfg(cfg)
    cfg = init_experiment(cfg)

    print(OmegaConf.to_yaml(cfg))
    print(f"Has CUDA: {torch.cuda.is_available()}")

    torch.multiprocessing.set_sharing_strategy("file_system")

    train_val_data, test_data, train_transforms, test_transforms = get_datasets(cfg)
    train_loader, val_loader, _ = get_dataloaders(
        cfg, train_val_data, test_data, train_transforms, test_transforms
    )

    train(cfg, train_loader, val_loader)


if __name__ == "__main__":
    main()
