import argparse
import sys, os

import numpy as np
import tritonclient.http as httpclient
import cv2

import torch
from models.experimental import attempt_load
from utils.torch_utils import select_device

def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleup=True, stride=32):
    # 对输入图像进行前处理（做灰条填充）+变为tensor能认的4维
    # Resize and pad image while meeting stride-multiple constraints 调整大小和垫图像，同时满足跨步多约束
    shape = im.shape[:2]  # current shape [height, width]
    if isinstance(new_shape, int):
        new_shape = (new_shape, new_shape)

    # Scale ratio (new / old) 尺度比 (new / old)
    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
    if not scaleup:  # only scale down, do not scale up (for better val mAP) # 只缩小，不扩大(为了更好的val mAP)
        r = min(r, 1.0)

    # Compute padding 计算填充
    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
    dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding

    if auto:  # minimum rectangle 最小矩形区域
        dw, dh = np.mod(dw, stride), np.mod(dh, stride)  # wh padding

    dw /= 2  # divide padding into 2 sides
    dh /= 2

    if shape[::-1] != new_unpad:  # resize
        im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
    im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
    return im, r, (dw, dh)

def preprocess(img):
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # bgr->rgb
    image, ratio, dwdh = letterbox(img.copy(), auto=False)
    image = image.transpose((2, 0, 1))
    image = np.expand_dims(image, 0)
    image = np.ascontiguousarray(image)
    im = image.astype(np.float32)
    im /= 255  # (1, 3, 640, 640)
    return im, ratio, dwdh

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-u",
        "--url",
        type=str,
        required=False,
        default="localhost:8000",
        help="Inference server URL. Default is localhost:8000.",
    )
    parser.add_argument('--weights', nargs='+', type=str,
                        default='/home/lzc/work/Code/git/infer_service_engine/bangtu-yolov7/origin_files/weight/chuwei_best_0110.pt',
                        help='model.pt path(s)')
    parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    parser.add_argument('--augment', action='store_true', help='augmented inference')
    FLAGS = parser.parse_args()

    device = select_device(FLAGS.device)
    model = attempt_load(FLAGS.weights, map_location=device)  # load FP32 model
    model.eval()

    mat_input_list = []
    imgs_dir = "/home/lzc/work/Code/git/infer_service_engine/bangtu-yolov7/origin_files/data/chuwei"
    for file_name in os.listdir(imgs_dir):
        if file_name.endswith('.jpg'):
            file_path = os.path.join(imgs_dir, file_name)
            img = cv2.imread(file_path)
            im, ratio, dwdh = preprocess(img)
            mat_input_list.append(im)

    #pytorch infer
    result_pytorch_list = []
    with torch.no_grad():
        for im in mat_input_list:
            input = torch.from_numpy(im).to(device)
            pred = model(input, augment=FLAGS.augment)
            if isinstance(pred, tuple):
                pred = pred[0]
            pred_np_bofore_nms = pred.cpu().numpy()
            result_pytorch_list.append(pred_np_bofore_nms)

    try:
        # 这里concurrent_request_count设置至少要大于或等于config.pbtxt的max_batch数量
        concurrent_request_count = 8
        triton_client = httpclient.InferenceServerClient(
            url=FLAGS.url, concurrency=concurrent_request_count
        )
    except Exception as e:
        print("channel creation failed: " + str(e))
        sys.exit(1)

    print("\n=========")

    '''
    #batch=1
    async_requests = []
    for mat_input in mat_input_list:
        inputs = [httpclient.InferInput("INPUT__0", [1, 3, 640, 640], "FP32")]
        inputs[0].set_data_from_numpy(mat_input)
        async_requests.append(triton_client.async_infer("yolov7_chuwei_pt", inputs))
    '''

    #'''
    batch=4
    async_requests = []
    loop_num = int(len(mat_input_list) / batch) + 1
    for i in range(loop_num):
        mat_inputs = []
        for j in range(batch):
            if i * batch + j < len(mat_input_list):
                mat_inputs.append(mat_input_list[i * batch + j][0])
        mat_inputs = np.asarray(mat_inputs)
        inputs = [httpclient.InferInput("INPUT__0", [mat_inputs.shape[0], 3, 640, 640], "FP32")]
        inputs[0].set_data_from_numpy(mat_inputs)
        async_requests.append(triton_client.async_infer("yolov7_chuwei_pt", inputs))
    #'''

    idx = 0
    for async_request in async_requests:
        # Get the result from the initiated asynchronous inference
        # request. This call will block till the server responds.
        result = async_request.get_result()
        #print("Response: {}".format(result.get_response()))
        batch_tensors = result.as_numpy("OUTPUT__0")
        print('batchsize: {}'.format(batch_tensors.shape[0]))
        for i in range(batch_tensors.shape[0]):
            tensors_one = batch_tensors[i]

            # 判断两个np是否完全相等
            equal = np.array_equal(tensors_one, result_pytorch_list[idx][0])
            np.testing.assert_allclose(tensors_one, result_pytorch_list[idx][0])
            #np.testing.assert_allclose(tensors_one, result_pytorch_list[idx][0], rtol=1e-03, atol=1e-05)
            print('result equal: {0}'.format(equal))

            idx = idx + 1

    print('finish')