# coding=utf-8
# Copyright (C) xxx team - All Rights Reserved
#
# @Version:   3.9.4
# @Software:  PyCharm
# @FileName:  train.py
# @CTime:     2021/5/3 16:30   
# @Author:    Haiyang Yu
# @Email:     xxx
# @UTime:     2021/5/3 16:30
#
# @Description:
#     xxx
#     xxx
#
import os
import logging
from typing import List, Dict
import torch
import pytorch_lightning as pl
from pytorch_lightning import seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint, GPUStatsMonitor, LearningRateMonitor, EarlyStopping
from pytorch_lightning.plugins import DDPPlugin
import hydra
from omegaconf import DictConfig, OmegaConf
# self
from architecture import Architecture as Model
from datamodule import DataModule

logger = logging.getLogger(__name__)


def weights_update(model, checkpoint):
    model_dict = model.state_dict()
    pretrained_dict = {k: v for k, v inf checkpoint['state_dict'].items() if k in model_dict}
    model_dict.update(pretrained_dict)
    model.load_state_dict(model_dict)
    return model

@hydra.main(config_path="conf", config_name="config")
def main(cfg: DictConfig) -> None:
    cfg.cwd = hydra.utils.get_original_cwd()
    # logger.info(OmegaConf.to_yaml(cfg))

    # random seed
    seed_everything(cfg.seed)

    # load the model and dataset
    model = Model(cfg)
    datamodule = DataModule(cfg)

    # loader from checkpoint only model weights
    if cfg.loaded_from_checkpoint:
        model = weights_update(model, checkpoint=torch.load(cfg.loaded_from_checkpoint))

    # start training...
    if cfg.use_gpu and torch.cuda.is_available():
        gpus = cfg.gpus
    else:
        gpus = None

    # callbacks
    checkpoint_callback = ModelCheckpoint(
        monitor=cfg.model_checkpoint_monitor,
        # dirpath='my/path/',
        filename='{epoch:02d}--{step:02d}--{val_acc:.4f}--{valid_loss:.4f}',
        save_top_k=cfg.save_top_k,
        mode=cfg.model_checkpoint_mode,
        verbose=True,
    )
    lr_monitor = LearningRateMonitor(logging_interval=cfg.lr_scheduler_interval)
    callbacks = [checkpoint_callback, lr_monitor]
    # if gpus: callbacks.append(GPUStatsMonitor())  # 严重拖慢gpu train 速度，不要用！
    if cfg.early_stopping: callbacks.append(EarlyStopping(cfg.model_checkpoint_monitor,
                                                          mode=cfg.model_checkpoint_mode,
                                                          patience=cfg.early_stopping_patience,
                                                          verbose=True))

    trainer = pl.Trainer(gpus=gpus,
                         #  auto_select_gpus=True,
                         #  accelerator='ddp',   # 多 gpu
                         #  plugins=DDPPlugin(find_unused_parameters=False),  # 多 gpu
                         max_epochs=cfg.epochs,
                         log_every_n_steps=cfg.log_every_n_steps,
                         weights_summary='full',
                         callbacks=callbacks,
                         #  profiler='simple',
                         #  precision=16,
                         )
    trainer.fit(model, datamodule)

    if cfg.continue_test_dataset:
        trainer.test()


if __name__ == '__main__':
    main()
