import cv2
import os
import tritonclient.grpc as grpcclient
import numpy as np
import time

def plot_box_label(ori_image, box, label=None, color=(0, 0, 255), txt_color=(255, 255, 255), pil = False, text_lw = 2):
    """
    在原始图像上绘制矩形框和标签。

    Args:
        ori_image (numpy.ndarray): 原始图像，可以是numpy数组或者PIL图像。
        box (tuple): 矩形框的坐标，格式为(x_min, y_min, x_max, y_max)。
        label (str, optional): 矩形框的标签。默认为None。
        color (tuple, optional): 矩形框的颜色，格式为(B, G, R)。
        txt_color (tuple, optional): 标签文字的颜色，格式为(B, G, R)。默认为(255, 255, 255)，即白色。
        pil (bool, optional): 指定输入图像是否为PIL图像。默认为False。
        text_lw (int, optional): 标签文字的线宽。默认为2。

    Returns:
        numpy.ndarray: 绘制矩形框和标签后的图像，以numpy数组的形式返回。

    """
    if pil:
        image = np.asarray(ori_image).copy()
    else:
        image = ori_image
        
    p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
    cv2.rectangle(image, p1, p2, color, thickness=text_lw, lineType=cv2.LINE_AA)
    if label:
        tf = max(text_lw - 1, 1)  # font thickness
        w, h = cv2.getTextSize(label, 0, fontScale=text_lw / 3, thickness=tf)[0]  # text width, height
        outside = p1[1] - h >= 3
        p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
        
        cv2.rectangle(image, p1, p2, color, thickness=-1, lineType=cv2.LINE_AA)  # filled
        cv2.putText(image,
                    label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2),
                    0,
                    text_lw / 3,
                    txt_color,
                    thickness=tf,
                    lineType=cv2.LINE_AA)
    return np.asarray(image)


def infer_with_triton(image, triton_client, model_name, score_threshold, base_model_url):
    """
    使用 Triton Inference Server 进行图像推理。
    
    参数:
    image (numpy.ndarray): 输入图像。
    triton_client (grpcclient.InferenceServerClient): Triton gRPC 客户端。
    model_name (str): Triton 服务器上模型的名称。
    score_threshold (float): 分数阈值，用于过滤低置信度的检测结果。
    
    返回:
    dict: 包含推理结果的字典。
    """
    # 设置输入
    inputs = [
        grpcclient.InferInput('image', image.shape, "UINT8"),
        grpcclient.InferInput('score', [1], "FP16"),
        grpcclient.InferInput('base_model_url', [1], "BYTES")
    ]
    inputs[0].set_data_from_numpy(image)
    inputs[1].set_data_from_numpy(np.array([score_threshold], dtype=np.float16))
    inputs[2].set_data_from_numpy(np.array([base_model_url.encode('utf-8')], dtype=np.bytes_))

    # 设置输出
    outputs = [
        grpcclient.InferRequestedOutput('scores'),
        grpcclient.InferRequestedOutput('bboxes')
    ]

    # 推理
    t1 = time.time()
    infer_result = triton_client.infer(model_name, inputs=inputs, outputs=outputs)
    t2 = time.time()

    # 获取推理结果
    bboxes = infer_result.as_numpy('bboxes')
    scores = infer_result.as_numpy('scores')
    print("bboxes:", bboxes)
    print("scores:", scores)

    for i in range(len(bboxes)):
        print(
            f"score:[{round(scores[i], 4)}] bbox:{bboxes[i]}")

    print('inference time is: {}ms'.format(1000 * (t2 - t1)))

    # 绘制结果并保存
    if len(bboxes) > 0:
            img_bgr = image
            frame_name = str(time.time()).replace('.', '')[:12]
            for i, box in enumerate(bboxes):
                img_bgr = plot_box_label(
                    ori_image=img_bgr,
                    box=box,
                    label=f"{scores[i]:.2f}"
                )
            cv2.imwrite(f"/workspace/wumh/wuminghui/15_Regional_flow_statistics/result/{frame_name}.jpg", img_bgr)


def infer_with_image(input_path, url, model_name, score_threshold, base_model_url):
    """
    使用 Triton Inference Server 进行图像推理。
    输入可以是图片、文件夹、视频、rtsp等。
    """
    triton_client = grpcclient.InferenceServerClient(url=url)

    if os.path.isdir(input_path):
        # 输入是一个文件夹，遍历文件夹中的所有图片文件
        for filename in os.listdir(input_path):
            file_path = os.path.join(input_path, filename)
            image = cv2.imread(file_path)
            if image is not None:
                infer_with_triton(image, triton_client, model_name, score_threshold, base_model_url)
            else:
                print(f"Skipping non-image file: {file_path}")
    elif os.path.isfile(input_path):
        # 输入是一个文件，可能是图片、视频等
        if input_path.endswith(('.jpg', '.jpeg', '.png')):
            image = cv2.imread(input_path)
            if image is not None:
                infer_with_triton(image, triton_client, model_name, score_threshold, base_model_url)
            else:
                print(f"Image file not found or cannot be read: {input_path}")
        elif input_path.endswith(('.avi', '.mp4')):
            cap = cv2.VideoCapture(input_path)
            while cap.isOpened():
                ret, frame = cap.read()
                if not ret:
                    break
                infer_with_triton(frame, triton_client, model_name, score_threshold, base_model_url)
            cap.release()
        else:
            print(f"Unsupported file type: {input_path}")
    elif input_path.startswith('rtsp://'):
        # 输入是一个rtsp流
        cap = cv2.VideoCapture(input_path)
        while cap.isOpened():
            ret, frame = cap.read()
            if not ret:
                break
            infer_with_triton(frame, triton_client, model_name, score_threshold, base_model_url)
        cap.release()
    else:
        print(f"Unsupported input type: {input_path}")


if __name__ == '__main__':
    url='192.168.96.136:8835'
    base_model_url = '192.168.96.136:8832'
    model_name='base'
    score_threshold=0.2
    # input_path =  "/workspace/wumh/wuminghui/14_fire-escape-occupied-detection/result/173407370302.jpg"
    input_path = 'rtsp://admin:Hzby*12345@192.168.96.223:554/h264/ch1/main/av_stream'
    infer_with_image(input_path, url, model_name, score_threshold, base_model_url)
 