# coding=utf-8
# Copyright (C) idata team - All Rights Reserved
#
# @Version:   3.10.9
# @Software:  PyCharm
# @FileName:  train.py
# @CTime:     2023/5/31 15:59   
# @Author:    yhy
# @Email:     yhy@cyber.com
# @UTime:     2023/5/31 15:59
#
# @Description:
#     
#     multirun: python train.py -m lr=1e-3,1e-4
#
import logging
from typing import NewType, Any, Optional
import hydra
from omegaconf import DictConfig, OmegaConf
from lightning import Trainer
from lightning.pytorch.callbacks import ModelSummary, EarlyStopping, StochasticWeightAveraging, DeviceStatsMonitor
from lightning.pytorch.profilers import AdvancedProfiler
from lightning.pytorch.accelerators import find_usable_cuda_devices
from lightning.pytorch.callbacks.progress import Tqdm
from lightning.pytorch import loggers as pl_loggers
from lightning.pytorch.callbacks import ModelCheckpoint


# self
from architecture import Architecture as Model
from datamodule import DataModule

logger = logging.getLogger(__name__)


@hydra.main(config_path="conf", config_name="config", version_base=None)
def main(cfg: DictConfig) -> None:
    logger.info(OmegaConf.to_yaml(cfg))

    # load the model and dataset
    model = Model(cfg)
    datamodule = DataModule(cfg)

    # callbacks
    early_stop_callback = EarlyStopping(monitor="val_accuracy", min_delta=0.00, patience=3, verbose=False, mode="max")
    modelsummary = ModelSummary(max_depth=-1)
    swa = StochasticWeightAveraging()

    # monitor="my_metric"
    checkpoint_callback = ModelCheckpoint(dirpath="my/path/", save_top_k=2, monitor="val_loss")

    # saves top-K checkpoints based on "val_loss" metric
    checkpoint_callback = ModelCheckpoint(
        save_top_k=10,
        monitor="val_loss",
        mode="min",
        dirpath="my/path/",
        filename="sample-mnist-{epoch:02d}-{val_loss:.2f}",
    )

    # saves last-K checkpoints based on "global_step" metric
    # make sure you log it inside your LightningModule
    checkpoint_callback = ModelCheckpoint(
        save_top_k=10,
        monitor="global_step",
        mode="max",
        dirpath="my/path/",
        filename="sample-mnist-{epoch:02d}-{global_step}",
    )


    if cfg.profile == 'simple':
        profile = 'simple'
    else:
        profile = AdvancedProfiler(dirpath=".", filename="perf_logs")
    DeviceStatsMonitor(cpu_stats=True)

    class CustomProgressBar(Tqdm):
        def get_metrics(self, *args, **kwargs):
            # don't show the version number
            items = super().get_metrics()
            items.pop("v_num", None)
            return items


    # log
    tensorboard = pl_loggers.TensorBoardLogger(save_dir="")
    comet_logger = pl_loggers.CometLogger(
        save_dir=".",
        workspace=os.environ.get("COMET_WORKSPACE"),  # Optional
        project_name="default_project",  # Optional
        rest_api_key=os.environ.get("COMET_REST_API_KEY"),  # Optional
        experiment_name="lightning_logs",  # Optional
    )

    # train
    trainer = Trainer(
        accelerator='gpu',
        devices=4,  # "auto"  -1      # devices=[0, 1]    # devices="0, 1"
        # # Find two GPUs on the system that are not already occupied
        # , devices=find_usable_cuda_devices(2))

        strategy="deepspeed_stage_2",
        default_root_dir="some/path/",
        callbacks=[
            swa,
            early_stop_callback,
            modelsummary,
            CustomProgressBar,
            checkpoint_callback
                   ],
        ckpt_path="some/path/to/my_checkpoint.ckpt",
        enable_model_summary=True,
        profiler=profile,  # advanced
        log_every_n_steps=k,
        logger=[tensorboard, comet_logger],
        precision='16',  # 'bf16'
    )


if __name__ == '__main__':
    main()
