# !/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.

import argparse
import os
import random
import warnings
from loguru import logger
import numpy as np

import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.nn.parallel import DistributedDataParallel as DDP

from reprod_log import ReprodLogger
from yolox.core import launch
from yolox.exp import get_exp
from yolox.utils import configure_nccl, fuse_model, get_local_rank, get_model_info, setup_logger


# import os
#
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"


def make_parser():
    parser = argparse.ArgumentParser("YOLOX Eval")
    parser.add_argument("-expn", "--experiment-name", type=str, default=None)
    parser.add_argument("-n", "--name", type=str, default="yolox-x", help="model name")

    # distributed
    parser.add_argument(
        "--dist-backend", default="nccl", type=str, help="distributed backend"
    )
    parser.add_argument(
        "--dist-url",
        default=None,
        type=str,
        help="url used to set up distributed training",
    )
    parser.add_argument("-b", "--batch-size", type=int, default=8, help="batch size")
    parser.add_argument(
        "-d", "--devices", default=1, type=int, help="device for training"
    )
    parser.add_argument(
        "--num_machines", default=1, type=int, help="num of node for training"
    )
    parser.add_argument(
        "--machine_rank", default=0, type=int, help="node rank for multi-node training"
    )
    parser.add_argument(
        "-f",
        "--exp_file",
        default=None,
        type=str,
        help="pls input your expriment description file",
    )
    parser.add_argument("-c", "--ckpt", default='../../weights/yolox_x.pth', type=str, help="ckpt for eval")
    parser.add_argument("--conf", default=0.001, type=float, help="test conf")
    parser.add_argument("--nms", default=None, type=float, help="test nms threshold")
    parser.add_argument("--tsize", default=None, type=int, help="test img size")
    parser.add_argument("--seed", default=2021, type=int, help="eval seed")
    parser.add_argument(
        "--fp16",
        dest="fp16",
        default=False,
        action="store_true",
        help="Adopting mix precision evaluating.",
    )
    parser.add_argument(
        "--fuse",
        dest="fuse",
        default=False,
        action="store_true",
        help="Fuse conv and bn for testing.",
    )
    parser.add_argument(
        "--trt",
        dest="trt",
        default=False,
        action="store_true",
        help="Using TensorRT model for testing.",
    )
    parser.add_argument(
        "--legacy",
        dest="legacy",
        default=False,
        action="store_true",
        help="To be compatible with older versions",
    )
    parser.add_argument(
        "--test",
        dest="test",
        default=False,
        action="store_true",
        help="Evaluating on test-dev set.",
    )
    parser.add_argument(
        "--speed",
        dest="speed",
        default=False,
        action="store_true",
        help="speed test only.",
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )
    return parser


@logger.catch
def main(exp, args, num_gpu):
    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        # Pytorch框架，deterministic=True，每次返回的卷积算法是默认算法
        cudnn.deterministic = True
        warnings.warn(
            "You have chosen to seed testing. This will turn on the CUDNN deterministic setting, "
        )
    # 启动cudnn_find自动寻找最快的操作，当计算图不会改变的情况下可以提高性能，反之降低性能
    cudnn.benchmark = True

    is_distributed = num_gpu > 1

    # set environment variables for distributed training
    configure_nccl()

    # 姑且手动切换GPU，原repo不支持GPU选定
    rank = get_local_rank()

    file_name = os.path.join(exp.output_dir, args.experiment_name)

    if rank == 0:
        os.makedirs(file_name, exist_ok=True)

    setup_logger(file_name, distributed_rank=rank, filename="val_log.txt", mode="a")
    logger.info("Args: {}".format(args))

    if args.conf is not None:
        exp.test_conf = args.conf
    if args.nms is not None:
        exp.nmsthre = args.nms
    if args.tsize is not None:
        exp.test_size = (args.tsize, args.tsize)
    model = exp.get_model()
    logger.info("Model Summary: {}".format(get_model_info(model, exp.test_size)))
    logger.info("Model Structure:\n{}".format(str(model)))

    torch.cuda.set_device(rank)
    model.cuda(rank)

    if not args.speed and not args.trt:
        if args.ckpt is None:
            ckpt_file = os.path.join(file_name, "best_ckpt.pth")
        else:
            ckpt_file = args.ckpt
        logger.info("loading checkpoint from {}".format(ckpt_file))
        loc = "cuda:{}".format(rank)
        ckpt = torch.load(ckpt_file, map_location=loc)
        model.load_state_dict(ckpt["model"])
        logger.info("loaded checkpoint done.")

    if is_distributed:
        model = DDP(model, device_ids=[rank])

    if args.fuse:
        logger.info("\tFusing model...")
        model = fuse_model(model)
    if args.trt:
        assert (
                not args.fuse and not is_distributed and args.batch_size == 1
        ), "TensorRT model is not support model fusing and distributed inferencing!"
        trt_file = os.path.join(file_name, "model_trt.pth")
        assert os.path.exists(
            trt_file
        ), "TensorRT model is not found!\n Run tools/trt.py first!"
        model.head.decode_in_inference = False
        decoder = model.head.decode_outputs
    else:
        trt_file = None
        decoder = None

    # step1，模型结构对齐
    # 便于观察中间变量
    # torch.set_printoptions(precision=12)
    # # read or gen fake data
    # fake_data = np.load("../../fake_data/fake_data.npy")
    # fake_data = torch.from_numpy(fake_data)
    # tensor_type = torch.cuda.HalfTensor if args.fp16 else torch.cuda.FloatTensor
    # fake_data = fake_data.type(tensor_type)
    # if args.fp16:
    #     model = model.half()
    # # forward
    # out = model(fake_data)
    # # 保存输出
    # reprod_logger = ReprodLogger()
    # reprod_logger.add("logits", out.cpu().detach().numpy())
    # reprod_logger.save("../forward_torch.npy")

    # 数据集读取对齐
    # get_yolox_datadir()函数中设置数据集路径
    # evaluator = exp.get_evaluator(args.batch_size, is_distributed, args.test, args.legacy)
    #
    # *_, summary = evaluator.evaluate(
    #     model, is_distributed, args.fp16, trt_file, decoder, exp.test_size
    # )
    # logger.info("\n" + summary)

    # step3，损失函数对齐
    fake_data = np.load("../../fake_data/fake_data.npy")
    fake_data = torch.from_numpy(fake_data)

    fake_label = np.load("../../fake_data/fake_label.npy")
    fake_label = torch.from_numpy(fake_label)

    tensor_type = torch.cuda.HalfTensor if args.fp16 else torch.cuda.FloatTensor
    fake_data = fake_data.type(tensor_type)
    fake_label = fake_label.type(tensor_type)
    model.train()
    loss = model(fake_data, fake_label)
    reprod_logger = ReprodLogger()

    reprod_logger.add("total_loss", loss['total_loss'].cpu().detach().numpy())
    reprod_logger.add("iou_loss", loss['iou_loss'].cpu().detach().numpy())
    reprod_logger.add("conf_loss", loss['conf_loss'].cpu().detach().numpy())
    reprod_logger.add("cls_loss", loss['total_loss'].cpu().detach().numpy())
    # reprod_logger.add("l1_loss", loss['l1_loss'])

    reprod_logger.save("loss_torch.npy")


if __name__ == "__main__":
    args = make_parser().parse_args()
    exp = get_exp(args.exp_file, args.name)
    exp.merge(args.opts)

    if not args.experiment_name:
        args.experiment_name = exp.exp_name

    num_gpu = torch.cuda.device_count() if args.devices is None else args.devices
    assert num_gpu <= torch.cuda.device_count()

    dist_url = "auto" if args.dist_url is None else args.dist_url
    launch(
        main,
        num_gpu,
        args.num_machines,
        args.machine_rank,
        backend=args.dist_backend,
        dist_url=dist_url,
        args=(exp, args, num_gpu),
    )
