import argparse
import os

import pytorch_lightning as pl
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.strategies import DDPStrategy

from train_img.evaluate import evaluate
from train_img.lightning_datamodule import TrainImgDataModule
from train_img.lightning_trainer import LightningTrainImg
from train_img.model_builder import make_model
from utils.read_config import generate_config


def main():
    """
    Code for launching the pretraining
    """
    parser = argparse.ArgumentParser(description="arg parser")
    parser.add_argument(
        "--cfg_file",
        type=str,
        default="config/resfcn.yaml",
        help="specify the config for training",
    )
    parser.add_argument(
        "--resume_path",
        type=str,
        default=None,
        help="provide a path to resume an incomplete training",
    )
    args = parser.parse_args()
    # config = generate_config(args.cfg_file)
    config = generate_config("config/segnet.yaml")
    if args.resume_path:
        config["resume_path"] = args.resume_path

    # config['resume_path'] = "output/segnet_12_new_new/200323-1125/lightning_logs/version_0/checkpoints/imgmodel-epoch= 22-m_IoU= 0.8501.ckpt"

    if os.environ.get("LOCAL_RANK", 0) == 0:
        print(
            "\n" + "\n".join(list(map(lambda x: f"{x[0]:20}: {x[1]}", config.items())))
        )

    dm = TrainImgDataModule(config)
    model, classifier = make_model(config)

    # module = LightningTrainImg(model, classifier, config)
    # path = os.path.join(config["working_dir"], config["datetime"])
    # trainer = pl.Trainer(
    #     devices=config["num_gpus"],
    #     accelerator="gpu",
    #     default_root_dir=path,
    #     enable_checkpointing=True,
    #     max_epochs=config["num_epochs"],
    #     strategy=DDPStrategy(find_unused_parameters=False),
    #     num_sanity_val_steps=2,
    #     check_val_every_n_epoch=1,
    #     sync_batchnorm=True,
    #     callbacks=[ModelCheckpoint(
    #         monitor='m_IoU', save_last=True, save_top_k=3, mode="max", filename="imgmodel-{epoch: d}-{m_IoU: .4f}"),
    #         LearningRateMonitor(log_momentum=True)],
    #     # limit_train_batches=0.01,
    #     limit_val_batches=0.2,
    #     # reload_dataloaders_every_n_epochs=4
    # )
    # print("Starting the training")
    # trainer.fit(module, dm, ckpt_path=config["resume_path"])

    dm.setup("validate")
    evaluate(model.to(0), classifier.to(0), dm.val_dataloader(), config)


if __name__ == "__main__":
    main()
