# !/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.

import argparse
import os
import random
import warnings
from loguru import logger
import numpy as np
# import torch
# import torch.backends.cudnn as cudnn
# from torch.nn.parallel import DistributedDataParallel as DDP
import paddle
import paddle.distributed as dist

from reprod_log import ReprodLogger
from yolox.core import launch
from yolox.exp import get_exp
from yolox.utils import configure_nccl, fuse_model, get_local_rank, get_model_info, setup_logger


def make_parser():
    parser = argparse.ArgumentParser("YOLOX Eval")
    parser.add_argument("-expn", "--experiment-name", type=str, default=None)
    parser.add_argument("-n", "--name", type=str, default="yolox-x", help="model name")

    # distributed
    parser.add_argument(
        "--dist-backend", default="nccl", type=str, help="distributed backend"
    )
    parser.add_argument(
        "--dist-url",
        default=None,
        type=str,
        help="url used to set up distributed training",
    )
    parser.add_argument("-b", "--batch-size", type=int, default=8, help="batch size")
    parser.add_argument(
        "-d", "--devices", default=1, type=int, help="device for training"
    )
    parser.add_argument(
        "--num_machines", default=1, type=int, help="num of node for training"
    )
    parser.add_argument(
        "--machine_rank", default=0, type=int, help="node rank for multi-node training"
    )
    parser.add_argument(
        "-f",
        "--exp_file",
        default=None,
        type=str,
        help="pls input your expriment description file",
    )
    parser.add_argument("-c", "--ckpt", default='../../weights/yolox_x.pdparams', type=str, help="ckpt for eval")
    parser.add_argument("--conf", default=0.001, type=float, help="test conf")
    parser.add_argument("--nms", default=None, type=float, help="test nms threshold")
    parser.add_argument("--tsize", default=None, type=int, help="test img size")
    parser.add_argument("--seed", default=2021, type=int, help="eval seed")
    parser.add_argument(
        "--fp16",
        dest="fp16",
        default=False,
        action="store_true",
        help="Adopting mix precision evaluating.",
    )
    parser.add_argument(
        "--fuse",
        dest="fuse",
        default=False,
        action="store_true",
        help="Fuse conv and bn for testing.",
    )
    parser.add_argument(
        "--trt",
        dest="trt",
        default=False,
        action="store_true",
        help="Using TensorRT model for testing.",
    )
    parser.add_argument(
        "--legacy",
        dest="legacy",
        default=False,
        action="store_true",
        help="To be compatible with older versions",
    )
    parser.add_argument(
        "--test",
        dest="test",
        default=False,
        action="store_true",
        help="Evaluating on test-dev set.",
    )
    parser.add_argument(
        "--speed",
        dest="speed",
        default=False,
        action="store_true",
        help="speed test only.",
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )

    parser.add_argument(
        "--local_rank", default=0, type=int, help="local rank for dist training"
    )
    return parser


@logger.catch
def main(exp, args, num_gpu):
    if args.seed is not None:
        random.seed(args.seed)
        paddle.seed(args.seed)
        # cudnn.deterministic = True
        # warnings.warn(
        #     "You have chosen to seed testing. This will turn on the CUDNN deterministic setting, "
        # )

    is_distributed = num_gpu > 1

    # # set environment variables for distributed training
    # configure_nccl()
    # cudnn.benchmark = True

    # rank = get_local_rank()
    rank = args.local_rank
    file_name = os.path.join(exp.output_dir, args.experiment_name)

    if rank == 0:
        os.makedirs(file_name, exist_ok=True)

    setup_logger(file_name, distributed_rank=rank, filename="val_log.txt", mode="a")
    logger.info("Args: {}".format(args))

    if args.conf is not None:
        exp.test_conf = args.conf
    if args.nms is not None:
        exp.nmsthre = args.nms
    if args.tsize is not None:
        exp.test_size = (args.tsize, args.tsize)
    # def logger
    model = exp.get_model()
    # logger.info("Model Summary: {}".format(get_model_info(model, exp.test_size)))
    logger.info("Model Structure:\n{}".format(str(model)))

    if is_distributed:
        # model = DDP(model, device_ids=[rank])
        dist.init_parallel_env()
        model = paddle.DataParallel(model)
    elif rank >= 0:
        # paddle.set_device(rank)
        paddle.set_device("GPU:" + str(rank))

    if not args.speed and not args.trt:
        if args.ckpt is None:
            ckpt_file = os.path.join(file_name, "best_ckpt.pth")
        else:
            ckpt_file = args.ckpt
        logger.info("loading checkpoint from {}".format(ckpt_file))
        ckpt = paddle.load(ckpt_file)
        model.load_dict(ckpt)
        logger.info("loaded checkpoint done.")
    if args.fuse:
        logger.info("\tFusing model...")
        model = fuse_model(model)
    if args.trt:
        assert (
                not args.fuse and not is_distributed and args.batch_size == 1
        ), "TensorRT model is not support model fusing and distributed inferencing!"
        trt_file = os.path.join(file_name, "model_trt.pth")
        assert os.path.exists(
            trt_file
        ), "TensorRT model is not found!\n Run tools/trt.py first!"
        model.head.decode_in_inference = False
        decoder = model.head.decode_outputs
    else:
        trt_file = None
        decoder = None
    # step1，模型结构对齐
    # read or gen fake data
    # paddle.set_printoptions(precision=12)
    # fake_data = np.load("../../fake_data/fake_data.npy")
    # # fake_data = np.around(fake_data, 4)
    # fake_data = paddle.to_tensor(fake_data)
    # # tensor_type = torch.cuda.HalfTensor if args.fp16 else torch.cuda.FloatTensor
    # tensor_type = paddle.float16 if args.fp16 else paddle.float32
    #
    # fake_data = paddle.cast(fake_data, tensor_type)
    #
    # # if args.fp16:
    # #     model = model.half()
    # # forward
    # out = model(fake_data)
    # # 保存输出
    # reprod_logger = ReprodLogger()
    # reprod_logger.add("logits", out.cpu().detach().numpy())
    # reprod_logger.save("YOLOX_paddle/forward_paddle.npy")
    #
    # # step3，损失函数对齐
    fake_data = np.load("../../fake_data/fake_data.npy")
    fake_data = paddle.to_tensor(fake_data)

    fake_label = np.load("../../fake_data/fake_label.npy")
    fake_label = paddle.to_tensor(fake_label)

    tensor_type = paddle.float16 if args.fp16 else paddle.float32

    fake_data = paddle.cast(fake_data, tensor_type)
    fake_label = paddle.cast(fake_label, tensor_type)
    model.train()
    loss = model(fake_data, fake_label)

    reprod_logger = ReprodLogger()
    reprod_logger.add("total_loss", loss['total_loss'].cpu().detach().numpy())
    reprod_logger.add("iou_loss", loss['iou_loss'].cpu().detach().numpy())
    reprod_logger.add("conf_loss", loss['conf_loss'].cpu().detach().numpy())
    reprod_logger.add("cls_loss", loss['total_loss'].cpu().detach().numpy())
    # reprod_logger.add("l1_loss", loss['l1_loss'])
    reprod_logger.save("loss_torch.npy")


if __name__ == "__main__":
    args = make_parser().parse_args()
    exp = get_exp(args.exp_file, args.name)
    exp.merge(args.opts)

    if not args.experiment_name:
        args.experiment_name = exp.exp_name

    # num_gpu = torch.cuda.device_count() if args.devices is None else args.devices
    # assert num_gpu <= torch.cuda.device_count()
    num_gpu = len(paddle.fluid.cuda_places()) if args.devices is None else args.devices
    assert num_gpu <= len(paddle.fluid.cuda_places())

    dist_url = "auto" if args.dist_url is None else args.dist_url
    launch(
        main,
        num_gpu,
        args.num_machines,
        args.machine_rank,
        backend=args.dist_backend,
        dist_url=dist_url,
        args=(exp, args, num_gpu),
    )
