import argparse
import sys, os

import numpy as np
import tritonclient.http as httpclient
import cv2

import torch
from models.experimental import attempt_load
from utils.torch_utils import select_device
from utils.general import non_max_suppression

def letterbox_part(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleup=True, stride=32):
    # 对输入图像进行前处理（做灰条填充）+变为tensor能认的4维
    # Resize and pad image while meeting stride-multiple constraints 调整大小和垫图像，同时满足跨步多约束
    shape = im.shape[:2]  # current shape [height, width]
    if isinstance(new_shape, int):
        new_shape = (new_shape, new_shape)

    # Scale ratio (new / old) 尺度比 (new / old)
    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
    if not scaleup:  # only scale down, do not scale up (for better val mAP) # 只缩小，不扩大(为了更好的val mAP)
        r = min(r, 1.0)

    # Compute padding 计算填充
    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
    dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding

    if auto:  # minimum rectangle 最小矩形区域
        dw, dh = np.mod(dw, stride), np.mod(dh, stride)  # wh padding

    dw /= 2  # divide padding into 2 sides
    dh /= 2

    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
    return (new_unpad[1], new_unpad[0]), (left, top)

def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleup=True, stride=32):
    # 对输入图像进行前处理（做灰条填充）+变为tensor能认的4维
    # Resize and pad image while meeting stride-multiple constraints 调整大小和垫图像，同时满足跨步多约束
    shape = im.shape[:2]  # current shape [height, width]
    if isinstance(new_shape, int):
        new_shape = (new_shape, new_shape)

    # Scale ratio (new / old) 尺度比 (new / old)
    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
    if not scaleup:  # only scale down, do not scale up (for better val mAP) # 只缩小，不扩大(为了更好的val mAP)
        r = min(r, 1.0)

    # Compute padding 计算填充
    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
    dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding

    if auto:  # minimum rectangle 最小矩形区域
        dw, dh = np.mod(dw, stride), np.mod(dh, stride)  # wh padding

    dw /= 2  # divide padding into 2 sides
    dh /= 2

    if shape[::-1] != new_unpad:  # resize
        im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
    im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
    return im, r, (dw, dh)

def preprocess(img):
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # bgr->rgb
    image, ratio, dwdh = letterbox(img.copy(), auto=False)
    image = image.transpose((2, 0, 1))
    image = np.expand_dims(image, 0)
    image = np.ascontiguousarray(image)
    im = image.astype(np.float32)
    im /= 255  # (1, 3, 640, 640)
    return im, ratio, dwdh

def check_num_dets(cpp_num_dets, py_result):
    if cpp_num_dets[0] == py_result.shape[0]:
        return True
    else:
        print('num_dets not equal')
        return False

def check_det_boxes(cpp_num_dets, cpp_det_boxes, py_result):
    cpp_det_boxes_cut = cpp_det_boxes[:cpp_num_dets[0], :]
    py_det_boxes_cut = py_result[:, :4]

    #严格对齐
    # equal = np.array_equal(cpp_det_boxes_cut, py_det_boxes_cut)

    #精度略微损失对齐
    equal = np.where(abs(cpp_det_boxes_cut - py_det_boxes_cut) > 1e-1 * 5)[0].shape[0] == 0

    if equal == False:
        print('det_boxes not equal')
    return equal

def check_det_classes(cpp_num_dets, cpp_det_classes, py_result):
    cpp_det_classes_cut = cpp_det_classes[:cpp_num_dets[0]]
    py_det_classes_cut = py_result[:, 5].astype(np.int32)

    # 严格对齐
    equal = np.array_equal(cpp_det_classes_cut, py_det_classes_cut)

    if equal == False:
        print('det_classes not equal')
    return equal

def check_det_scores(cpp_num_dets, cpp_det_scores, py_result):
    cpp_det_scores_cut = cpp_det_scores[:cpp_num_dets[0]]
    py_det_scores_cut = py_result[:, 4]

    # 严格对齐
    # equal = np.array_equal(cpp_det_scores_cut, py_det_scores_cut)

    # 精度略微损失对齐
    equal = np.where(abs(cpp_det_scores_cut - py_det_scores_cut) > 1e-1 * 5)[0].shape[0] == 0

    if equal == False:
        print('det_scores not equal')
    return equal

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-u",
        "--url",
        type=str,
        required=False,
        default="192.168.2.220:38010",
        help="Inference server URL. Default is localhost:8000.",
    )
    parser.add_argument('--weights', nargs='+', type=str,
                        default='/home/zhicheng.luo/Weights/chuwei_best_20240805.pt',
                        help='model.pt path(s)')
    parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    parser.add_argument('--augment', action='store_true', help='augmented inference')
    parser.add_argument('--conf-thres', type=float, default=0.35, help='object confidence threshold')
    parser.add_argument('--iou-thres', type=float, default=0.65, help='IOU threshold for NMS')
    parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
    parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
    FLAGS = parser.parse_args()

    device = select_device(FLAGS.device)
    model = attempt_load(FLAGS.weights, map_location=device)  # load FP32 model
    model.eval()

    mat_input_list = []
    mat_input_tensor_list = []
    mat_input_hw_list = []
    mat_input_rsz_hw_list = []
    mat_input_pos_list = []
    imgs_dir = "/home/zhicheng.luo/data/chuwei/test"
    for file_name in os.listdir(imgs_dir):
        if file_name.endswith('.jpg'):
            print(file_name)
            file_path = os.path.join(imgs_dir, file_name)
            img = cv2.imread(file_path)
            im, ratio, dwdh = preprocess(img)
            mat_input_list.append(im)

            rsz_hw, pos_xy = letterbox_part(img, auto=False)
            mat_input_tensor_list.append(img)
            mat_input_hw_list.append(np.asarray([img.shape[0], img.shape[1]], dtype=np.int32))
            mat_input_rsz_hw_list.append(np.array(rsz_hw, dtype=np.int32))
            mat_input_pos_list.append(np.array(pos_xy, dtype=np.int32))

    #pytorch infer
    result_pytorch_list = []
    with torch.no_grad():
        for im in mat_input_list:
            input = torch.from_numpy(im).to(device)
            pred = model(input, augment=FLAGS.augment)
            if isinstance(pred, tuple):
                pred = pred[0]
            pred = non_max_suppression(pred, FLAGS.conf_thres, FLAGS.iou_thres, classes=FLAGS.classes,
                                       agnostic=FLAGS.agnostic_nms)
            pred_np = pred[0].cpu().numpy()
            result_pytorch_list.append(pred_np)

    try:
        # 这里concurrent_request_count设置至少要大于或等于config.pbtxt的max_batch数量
        concurrent_request_count = 8
        triton_client = httpclient.InferenceServerClient(
            url=FLAGS.url, concurrency=concurrent_request_count
        )
    except Exception as e:
        print("channel creation failed: " + str(e))
        sys.exit(1)

    print("\n=========")

    async_requests = []
    for i in range(len(mat_input_tensor_list)):
        mat_input_tensor_expand = np.expand_dims(mat_input_tensor_list[i], axis=0)
        mat_input_hw_expand = np.expand_dims(mat_input_hw_list[i], axis=0)
        mat_input_rsz_hw_expand = np.expand_dims(mat_input_rsz_hw_list[i], axis=0)
        mat_input_pos_expand = np.expand_dims(mat_input_pos_list[i], axis=0)
        h, w = int(mat_input_hw_list[i][0]), int(mat_input_hw_list[i][1])
        input_hw = np.asarray([h, w], dtype=np.int32)
        input_hw_expand = np.expand_dims(input_hw, axis=0)
        inputs = [httpclient.InferInput("in_tensors", [1, h, w, 3], "UINT8"),
                  httpclient.InferInput("in_hw", [1, 2], "INT32"),
                  httpclient.InferInput("rsz_hw", [1, 2], "INT32"),
                  httpclient.InferInput("in_pos", [1, 2], "INT32")]
        inputs[0].set_data_from_numpy(mat_input_tensor_expand)
        inputs[1].set_data_from_numpy(mat_input_hw_expand)
        inputs[2].set_data_from_numpy(mat_input_rsz_hw_expand)
        inputs[3].set_data_from_numpy(mat_input_pos_expand)
        async_requests.append(triton_client.async_infer("ensemble_yolov7_chuwei_trt", inputs))

    idx = 0
    for async_request in async_requests:
        # Get the result from the initiated asynchronous inference
        # request. This call will block till the server responds.
        result = async_request.get_result()
        #print("Response: {}".format(result.get_response()))
        num_dets_batch = result.as_numpy("num_dets")
        det_boxes_batch = result.as_numpy("det_boxes")
        det_scores_batch = result.as_numpy("det_scores")
        det_classes_batch = result.as_numpy("det_classes")
        for i in range(num_dets_batch.shape[0]):
            num_dets = int(num_dets_batch[i])
            if num_dets > 0:
                det_boxes = det_boxes_batch[i][:num_dets, :]
                det_scores = det_scores_batch[i][:num_dets]
                det_classes = det_classes_batch[i][:num_dets]

                #py_result是经过排序的，所以det_boxes、det_scores、det_classes等都需要排序
                sort_inds = np.argsort(-det_scores)
                det_boxes = det_boxes[sort_inds]
                det_scores = det_scores[sort_inds]
                det_classes = det_classes[sort_inds]

                py_result = result_pytorch_list[idx]
                num_dets = np.array([num_dets], dtype=np.int32)
                # 判断
                flag = True
                if flag == True:
                    flag = check_num_dets(num_dets, py_result)
                if flag == True:
                    flag = check_det_boxes(num_dets, det_boxes, py_result)
                if flag == True:
                    flag = check_det_classes(num_dets, det_classes, py_result)
                if flag == True:
                    flag = check_det_scores(num_dets, det_scores, py_result)

                print('result equal: {0}'.format(flag))

            idx = idx + 1

    print('finish')