import argparse
import numpy as np
import tritonclient.http as httpclient
import cv2
import time
from Iutils import diff_precision


def get_argparser():
    parser = argparse.ArgumentParser()

    parser.add_argument(
        "-u",
        "--url",
        type=str,
        required=False,
        # default='192.168.2.220:38017',
        default='localhost:38017',
        help="Inference server URL. Default is localhost:8000.",
    )
    parser.add_argument(
        "-m",
        "--model-name",
        type=str,
        required=False,
        default="ensemble_sam2.1_encoder_pre_fp16",
        help="model name in triton server",
    )
    parser.add_argument(
        "-t",
        "--client-timeout",
        type=float,
        required=False,
        default=3,
        help="Client timeout in seconds. Default is 3.",
    )
    # parser.add_argument(
    #     "--input", type=str,
    #     nargs="+",
    #     default='/data/window/seg_1.png',
    #     help="path to a single image or image directory"
    # )

    return parser


def triton_infer(opts):
    input_names_types = [("esb_images", "UINT8"),]
    output_names = ["esb_high_res_feats_0",
                    "esb_high_res_feats_1",
                    "esb_image_embed",
                    "esb_org_img_hw",]
    
    def ping_server(triton_client):
        try:
            result = triton_client.is_server_live()
            return result
        except:
            return False
    
    def infer_async_http(opts):
        async_request_list = []
        for input in opts.input:
            input_list = []
            input_list.append(np.expand_dims(input, axis=0))

            inputs = []
            for i in range(len(input_names_types)):
                inputs.append(httpclient.InferInput(input_names_types[i][0], input_list[i].shape, input_names_types[i][1]))
                inputs[i].set_data_from_numpy(input_list[i])

            outputs = []
            for i in range(len(output_names)):
                outputs.append(httpclient.InferRequestedOutput(output_names[i]))

            # Asynchronous inference call.
            async_request = triton_client.async_infer(
                model_name=opts.model_name,
                inputs=inputs,
                outputs=outputs
            )
            async_request_list.append(async_request)
        return async_request_list
    
    try:
        triton_client = httpclient.InferenceServerClient(
            url=opts.url,
            concurrency=1,
            connection_timeout=opts.client_timeout,
        )

        res_ping = ping_server(triton_client)
        if res_ping:
            requests_list = infer_async_http(opts)

            outputs = []
            for async_request in requests_list:
                result = async_request.get_result()
                outputs.append(result.as_numpy(output_names[0]).astype(np.float32))
                outputs.append(result.as_numpy(output_names[1]).astype(np.float32))
                outputs.append(result.as_numpy(output_names[2]).astype(np.float32))
                outputs.append(result.as_numpy(output_names[3]).astype(np.int32))
            return outputs
        else:
            print('triton-server ping bad')
    except Exception as e:
        print("channel creation failed: " + str(e))
        print('triton-server exception')


def main():
    opts = get_argparser().parse_args()
    img_list = [
        "/data/Racing_Terriers.jpg",
        # "/data/Racing_Terriers.jpg",
    ]
    opts.input = [cv2.imread(img) for img in img_list]

    try:
        print('running triton_infer...')
        st = time.time()
        outputs = triton_infer(opts)
        print(f'triton-server total inference time:{time.time() - st:.4f}s')

        return outputs
    except Exception as e:
        print("Exception: " + str(e))
        # 服务调用失败，可用本地推理
        pass


def comp(outputs):
    high_res_feats_0_onnx_fp32 = np.fromfile('/data/high_res_feats_0_onnx_fp32.bin', dtype=np.float32).reshape(outputs[0].shape)
    high_res_feats_1_onnx_fp32 = np.fromfile('/data/high_res_feats_1_onnx_fp32.bin', dtype=np.float32).reshape(outputs[1].shape)
    image_embed_onnx_fp32 = np.fromfile('/data/image_embed_onnx_fp32.bin', dtype=np.float32).reshape(outputs[2].shape)

    opts = get_argparser().parse_args()
    if 'fp16' in opts.model_name:
        precision = 1e-1
    else:
        precision = 1e-3
    eq0 = diff_precision(high_res_feats_0_onnx_fp32, outputs[0], False, precision)
    eq1 = diff_precision(high_res_feats_1_onnx_fp32, outputs[1], False, precision)
    eq2 = diff_precision(image_embed_onnx_fp32, outputs[2], False, precision)
    print('equal0: {}'.format(eq0))
    print('equal1: {}'.format(eq1))
    print('equal2: {}'.format(eq2))


if __name__ == '__main__':
    outputs = main()
    comp(outputs)
    pass
