# coding=utf-8
# Copyright (C) xxx team - All Rights Reserved
#
# @Version:   3.9.4
# @Software:  PyCharm
# @FileName:  test.py
# @CTime:     2021/5/3 16:30
# @Author:    Haiyang Yu
# @Email:     xxx
# @UTime:     2021/5/3 16:30
#
# @Description:
#     only test
#     xxx
#
import os
import logging
from typing import List, Dict
import torch
import pytorch_lightning as pl
from pytorch_lightning import seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint, GPUStatsMonitor, LearningRateMonitor, EarlyStopping
from pytorch_lightning.plugins import DDPPlugin
import hydra
from omegaconf import DictConfig, OmegaConf
# self
from architecture import Architecture as Model
from datamodule import DataModule

logger = logging.getLogger(__name__)


@hydra.main(config_path="conf", config_name="config")
def main(cfg: DictConfig) -> None:
    cfg.cwd = hydra.utils.get_original_cwd()
    # logger.info(OmegaConf.to_yaml(cfg))

    # load the model and dataset
    model = Model(cfg)
    datamodule = DataModule(cfg)

    # start training...
    if cfg.use_gpu and torch.cuda.is_available():
        gpus = cfg.gpus
    else:
        gpus = None

    trainer = pl.Trainer(gpus=gpus,
                         #  auto_select_gpus=True,
                         #  accelerator='ddp',   # 多 gpu
                         #  plugins=DDPPlugin(find_unused_parameters=False),  # 多 gpu
                         log_every_n_steps=1,
                         weights_summary='full',
                         profiler='simple',
                         )

    model = model.load_from_checkpoint(os.path.join(cfg.cwd, cfg.load_ckpt))
    trainer.test(model=model, datamodule=datamodule)


if __name__ == '__main__':
    main()
