import argparse
import os

import MinkowskiEngine as ME
import pytorch_lightning as pl
import torch.nn as nn
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.strategies import DDPStrategy

from distil.lightning_datamodule import DistilDataModule
from distil.lightning_trainer import LightningDistil
from distil.model_builder import make_model
from utils.read_config import generate_config


def main():
    """
    Code for launching the distilation
    """
    parser = argparse.ArgumentParser(description="arg parser")
    parser.add_argument(
        "--cfg_file",
        type=str,
        default="config/distil.yaml",
        help="specify the config for distilation",
    )
    parser.add_argument(
        "--resume_path",
        type=str,
        default=None,
        help="provide a path to resume an incomplete distilation",
    )
    parser.add_argument(
        "--pretrained_ckpt_path",
        type=str,
        default=None,
        help="provide a path for pretrained ckpt file",
    )
    parser.add_argument(
        "--img_cfg_file",
        type=str,
        default="config/segnet.yaml",
        help="specify the original config for pretrained image model",
    )
    args = parser.parse_args()
    config = generate_config(args.cfg_file)
    img_config = generate_config(args.img_cfg_file)

    config["resume_path"] = args.resume_path
    config["pretrained_ckpt_path"] = args.pretrained_ckpt_path

    config["pretrained_ckpt_path"] = "imgmodel_segnet_83.5.ckpt"

    if os.environ.get("LOCAL_RANK", 0) == 0:
        print(
            "\n" + "\n".join(list(map(lambda x: f"{x[0]:20}: {x[1]}", config.items())))
        )

    dm = DistilDataModule(config)
    model_points, model_images, model_classifier, model_ori_classifier, model_da = (
        make_model(config, img_config)
    )
    if config["num_gpus"] > 1:
        model_points = ME.MinkowskiSyncBatchNorm.convert_sync_batchnorm(model_points)
        model_da = ME.MinkowskiSyncBatchNorm.convert_sync_batchnorm(model_da)
        model_images = nn.SyncBatchNorm.convert_sync_batchnorm(model_images)
    if config["model_points"] == "minkunet":
        module = LightningDistil(
            model_points,
            model_images,
            model_classifier,
            model_ori_classifier,
            model_da,
            config,
        )
    path = os.path.join(config["working_dir"], config["datetime"])
    trainer = pl.Trainer(
        devices=config["num_gpus"],
        accelerator="gpu",
        default_root_dir=path,
        enable_checkpointing=True,
        max_epochs=config["num_epochs"],
        strategy=DDPStrategy(find_unused_parameters=False),
        num_sanity_val_steps=2,
        check_val_every_n_epoch=1,
        callbacks=[
            ModelCheckpoint(
                monitor="m_IoU",
                save_last=True,
                save_top_k=3,
                mode="max",
                filename="lidarmodel-{epoch:d}-{m_IoU:.4f}",
            ),
            LearningRateMonitor(log_momentum=True),
        ],
        # limit_train_batches=0.1,
        # limit_val_batches=0.1,
        # reload_dataloaders_every_n_epochs=4
    )
    print("Starting the training")
    trainer.fit(module, dm, ckpt_path=config["resume_path"])


if __name__ == "__main__":
    main()
