# coding=utf-8
# Copyright (C) xxx team - All Rights Reserved
#
# @Version:   3.9.4
# @Software:  PyCharm
# @FileName:  inference.py
# @CTime:     2021/5/3 16:30
# @Author:    Haiyang Yu
# @Email:     xxx
# @UTime:     2021/5/3 16:30
#
# @Description:
#     used for production
#     xxx
#
import os
import logging
from typing import List, Dict
import torch
import pytorch_lightning as pl
from pytorch_lightning import seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint, GPUStatsMonitor, LearningRateMonitor, EarlyStopping
from pytorch_lightning.plugins import DDPPlugin
import hydra
from omegaconf import DictConfig, OmegaConf
# self
from architecture import Architecture as Model
from datamodule import DataModule

logger = logging.getLogger(__name__)


@hydra.main(config_path="conf", config_name="config")
def main(cfg: DictConfig):
    cfg.cwd = hydra.utils.get_original_cwd()
    # logger.info(OmegaConf.to_yaml(cfg))

    # load the model and dataset
    model = Model(cfg)
    model = model.load_from_checkpoint(os.path.join(cfg.cwd, cfg.load_ckpt))

    if cfg.prod_type == 'jit':
        script = model.to_torchscript()
        torch.jit.save(script, 'production_jit.pt')
    else:
        input_sample = torch.arange(6)
        model.to_onnx('production.onnx', input_sample, export_params=True, verbose=True)



if __name__ == '__main__':
    main()
