import argparse
import numpy as np
import tritonclient.http as httpclient
import cv2
import time


def get_argparser():
    parser = argparse.ArgumentParser()

    parser.add_argument(
        "-u",
        "--url",
        type=str,
        required=False,
        default='192.168.2.220:28010',
        help="Inference server URL. Default is localhost:8000.",
    )
    parser.add_argument(
        "-m",
        "--model-name",
        type=str,
        required=False,
        default="bls_slideWinSync_bgr2rgb_py",#bls_slideWinCrop_bgr2rgb_py #bls_slideWinSync_bgr2rgb_py
        help="model name in triton server",
    )
    parser.add_argument(
        "-t",
        "--client-timeout",
        type=float,
        required=False,
        default=3,
        help="Client timeout in seconds. Default is 3.",
    )
    parser.add_argument(
        "--max_batch_size",
        type=int,
        default=1,
        help="",
    )
    parser.add_argument(
        "--window_w",
        type=int,
        default=640,
        help="",
    )
    parser.add_argument(
        "--window_h",
        type=int,
        default=640,
        help="",
    )
    parser.add_argument(
        "--ratio_w",
        type=float,
        default=0.6,
        help="",
    )
    parser.add_argument(
        "--ratio_h",
        type=float,
        default=0.6,
        help="",
    )
    parser.add_argument(
        "--max_input_w",
        type=int,
        default=40000,#8320 #10000 #40000
        help="",
    )
    parser.add_argument(
        "--max_input_h",
        type=int,
        default=40000,#8320 #10000 #40000
        help="",
    )

    return parser


def triton_infer(opts):
    input_names_types = [("input_image", "UINT8"),
                         ("window_size", "INT32"),
                         ("step_ratio", "FP32"),]
    output_names = ["crop_infers",
                    "num_wh",]
    
    def ping_server(triton_client):
        try:
            result = triton_client.is_server_live()
            return result
        except:
            return False

    def infer_async_http(opts):
        async_request_list = []

        for b in range(0, len(opts.input), opts.max_batch_size):
            input = opts.input[b:min(b+opts.max_batch_size, len(opts.input))]
            input_arr = np.stack(input, axis=0)
            batch = input_arr.shape[0]
            # print('batch_size: {}'.format(batch))
            assert input_arr.shape[1] <= opts.max_input_h and input_arr.shape[2] <= opts.max_input_w, '目前支持最大宽高({}, {})'.format(opts.max_input_w, opts.max_input_h)

            input_list = []
            input_list.append(input_arr)
            input_list.append(np.array([[opts.window_w, opts.window_h]], dtype=np.int32))
            input_list.append(np.array([[opts.ratio_w, opts.ratio_h]], dtype=np.float32))

            inputs = []
            for i in range(len(input_names_types)):
                inputs.append(httpclient.InferInput(input_names_types[i][0], input_list[i].shape, input_names_types[i][1]))
                inputs[i].set_data_from_numpy(input_list[i])

            outputs = []
            for i in range(len(output_names)):
                outputs.append(httpclient.InferRequestedOutput(output_names[i]))

            # Asynchronous inference call.
            async_request = triton_client.async_infer(
                model_name=opts.model_name,
                inputs=inputs,
                outputs=outputs
            )
            async_request_list.append(async_request)
        return async_request_list
    
    try:
        # 这里concurrency设置至少要大于或等于config.pbtxt的max_batch数量
        triton_client = httpclient.InferenceServerClient(
            url=opts.url,
            concurrency=opts.max_batch_size,
            connection_timeout=opts.client_timeout,
        )

        res_ping = ping_server(triton_client)
        if res_ping:
            requests_list = infer_async_http(opts)

            output_list = []
            num_wh_list = []
            for async_request in requests_list:
                result = async_request.get_result()

                # 拿到推理结果并使用
                output_list.append(result.as_numpy("crop_infers"))
                num_wh_list.append(result.as_numpy("num_wh"))
            return output_list, num_wh_list
        else:
            print('triton-server ping bad')
    except Exception as e:
        print("channel creation failed: " + str(e))
        print('triton-server exception')


def main():
    opts = get_argparser().parse_args()

    # opts.input替换成自己的list[np.array]
    img_list = [
        '/data/dog_8k.png',
    ]
    opts.input = [cv2.imread(img) for img in img_list]

    try:
        print('running triton_infer...')
        st = time.time()
        outputs = triton_infer(opts)
        print(f'triton-server total inference time:{time.time() - st:.4f}s')

        return outputs
    except Exception as e:
        print("Exception: " + str(e))
        # 服务调用失败，可用本地推理
        pass


if __name__ == '__main__':
    outputs = main()

    # save
    num_w, num_h = outputs[1][0][0][0], outputs[1][0][0][1]
    res = outputs[0][0][0]
    # below code copy from s1_slideWinCropImg.py
    import os
    save_dir = "/culn/crop_triton"
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    for i in range(num_h):
        for j in range(num_w):
            idx = i * num_w + j
            crop_img = res[idx]
            crop_img = cv2.cvtColor(crop_img, cv2.COLOR_RGB2BGR)
            save_path = os.path.join(save_dir, str(i) + '_' + str(j) + '.png')
            cv2.imwrite(save_path, crop_img)

    pass
