# coding=utf-8
# Copyright (C) xxx team - All Rights Reserved
#
# @Version:   3.10.4
# @Software:  PyCharm
# @FileName:  train.py
# @CTime:     2022/8/4 22:10
# @Author:    yhy
# @Email:     yhy@yhy.com
# @UTime:     2022/8/20 12:10
#
# @Description:
#
#     multirun: python train.py -m lr=1e-3,1e-4
#
import os
import logging
import torch
import pytorch_lightning as pl
from pytorch_lightning import seed_everything
from pytorch_lightning.callbacks import *
from pytorch_lightning.plugins import *
import hydra
from omegaconf import DictConfig, OmegaConf
# self
from architecture import Architecture as Model
from datamodule import DataModule
from utils import weights_update

logger = logging.getLogger(__name__)


@hydra.main(config_path="conf", config_name="config", version_base=None)
def main(cfg: DictConfig) -> None:
    logger.info(OmegaConf.to_yaml(cfg))

    # random seed
    seed_everything(cfg.seed)

    # load the model and dataset
    model = Model(cfg)
    datamodule = DataModule(cfg)

    # loader from checkpoint only model weights
    if cfg.loaded_from_checkpoint:
        model = weights_update(model, checkpoint=torch.load(cfg.loaded_from_checkpoint))

    # start training...
    if cfg.use_gpu and torch.cuda.is_available():
        gpus = cfg.gpus
    else:
        gpus = None

    # callbacks
    checkpoint_callback = ModelCheckpoint(
        monitor=cfg.model_checkpoint_monitor,
        # dirpath='my/path/',
        filename='{epoch:02d}--{step:02d}--{val_acc:.4f}--{valid_loss:.4f}',
        save_top_k=cfg.save_top_k,
        mode=cfg.model_checkpoint_mode,
        verbose=True,
    )
    lr_monitor = LearningRateMonitor(logging_interval=cfg.lr_scheduler_interval)
    modelsummary = ModelSummary(max_depth=-1)
    accumulate_grad_batches = GradientAccumulationScheduler(scheduling={0: 8, 4: 4, 8: 1})
    sw = StochasticWeightAveraging(swa_lrs=1e-2)

    callbacks = [checkpoint_callback, lr_monitor, modelsummary, accumulate_grad_batches, sw]
    # if gpus: callbacks.append(GPUStatsMonitor())  # 严重拖慢gpu train 速度，不要用！
    if cfg.early_stopping: callbacks.append(EarlyStopping(cfg.model_checkpoint_monitor,
                                                          mode=cfg.model_checkpoint_mode,
                                                          patience=cfg.early_stopping_patience,
                                                          verbose=True))
    plugins = []
    if cfg.precision:
        class CunstomPrecisionPlugin(PrecisionPlugin):
            precision = cfg.precision

        plugins.append(CunstomPrecisionPlugin())
    trainer = pl.Trainer(gpus=gpus,
                         #  auto_select_gpus=True,
                         #  accelerator='ddp',   # 多 gpu
                         #  plugins=DDPPlugin(find_unused_parameters=False),  # 多 gpu
                         max_epochs=cfg.epochs,
                         log_every_n_steps=cfg.log_every_n_steps,
                         weights_summary='full',
                         callbacks=callbacks,
                         plugins=plugins if plugins else None
                         #  profiler='simple',
                         #  precision=16,
                         )
    trainer.fit(model, datamodule)

    if cfg.continue_test_dataset:
        trainer.test()


if __name__ == '__main__':
    main()
