from TRT.common import *
def compute_iou(bboxA, bboxB):
    """Compute the Intersection over Union (IoU) between two boxes .

    Args:
        bboxA (list): The first bbox info (left, top, right, bottom, score).
        bboxB (list): The second bbox info (left, top, right, bottom, score).

    Returns:
        float: The IoU value.
    """

    x1 = max(bboxA[0], bboxB[0])
    y1 = max(bboxA[1], bboxB[1])
    x2 = min(bboxA[2], bboxB[2])
    y2 = min(bboxA[3], bboxB[3])

    inter_area = max(0, x2 - x1) * max(0, y2 - y1)

    bboxA_area = (bboxA[2] - bboxA[0]) * (bboxA[3] - bboxA[1])
    bboxB_area = (bboxB[2] - bboxB[0]) * (bboxB[3] - bboxB[1])
    union_area = float(bboxA_area + bboxB_area - inter_area)
    if union_area == 0:
        union_area = 1e-5
        warnings.warn('union_area=0 is unexpected')

    iou = inter_area / union_area

    return iou


def pose_to_bbox(keypoints: np.ndarray, expansion: float = 1.25) -> np.ndarray:
    """Get bounding box from keypoints.

    Args:
        keypoints (np.ndarray): Keypoints of person.
        expansion (float): Expansion ratio of bounding box.

    Returns:
        np.ndarray: Bounding box of person.
    """
    x = keypoints[:, 0]
    y = keypoints[:, 1]
    bbox = np.array([x.min(), y.min(), x.max(), y.max()])
    center = np.array([bbox[0] + bbox[2], bbox[1] + bbox[3]]) / 2
    bbox = np.concatenate([
        center - (center - bbox[:2]) * expansion,
        center + (bbox[2:] - center) * expansion
    ])
    return bbox


import cv2

halpe26 = dict(name='halpe26',
    keypoint_info={
        0: dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''),
        1: dict(name='left_eye', id=1, color=[51, 153, 255], type='upper', swap='right_eye'),
        2: dict(name='right_eye', id=2, color=[51, 153, 255], type='upper', swap='left_eye'),
        3: dict(name='left_ear', id=3, color=[51, 153, 255], type='upper', swap='right_ear'),
        4: dict(name='right_ear', id=4, color=[51, 153, 255], type='upper', swap='left_ear'),
        5: dict(name='left_shoulder', id=5, color=[0, 255, 0], type='upper', swap='right_shoulder'),
        6: dict(name='right_shoulder', id=6, color=[255, 128, 0], type='upper', swap='left_shoulder'),
        7: dict(name='left_elbow', id=7, color=[0, 255, 0], type='upper', swap='right_elbow'),
        8: dict(name='right_elbow', id=8, color=[255, 128, 0], type='upper', swap='left_elbow'),
        9: dict(name='left_wrist', id=9, color=[0, 255, 0], type='upper', swap='right_wrist'),
        10: dict(name='right_wrist', id=10, color=[255, 128, 0], type='upper', swap='left_wrist'),
        11: dict(name='left_hip', id=11, color=[0, 255, 0], type='lower', swap='right_hip'),
        12: dict(name='right_hip', id=12, color=[255, 128, 0], type='lower', swap='left_hip'),
        13: dict(name='left_knee', id=13, color=[0, 255, 0], type='lower', swap='right_knee'),
        14: dict(name='right_knee', id=14, color=[255, 128, 0], type='lower', swap='left_knee'),
        15: dict(name='left_ankle', id=15, color=[0, 255, 0], type='lower', swap='right_ankle'),
        16: dict(name='right_ankle', id=16, color=[255, 128, 0], type='lower', swap='left_ankle'),
        17: dict(name='head', id=17, color=[255, 128, 0], type='upper', swap=''),
        18: dict(name='neck', id=18, color=[255, 128, 0], type='upper', swap=''),
        19: dict(name='hip', id=19, color=[255, 128, 0], type='lower', swap=''),
        20: dict(name='left_big_toe', id=20, color=[255, 128, 0], type='lower', swap='right_big_toe'),
        21: dict(name='right_big_toe', id=21, color=[255, 128, 0], type='lower', swap='left_big_toe'),
        22: dict(name='left_small_toe', id=22, color=[255, 128, 0], type='lower', swap='right_small_toe'),
        23: dict(name='right_small_toe', id=23, color=[255, 128, 0], type='lower', swap='left_small_toe'),
        24: dict(name='left_heel', id=24, color=[255, 128, 0], type='lower', swap='right_heel'),
        25: dict(name='right_heel', id=25, color=[255, 128, 0], type='lower', swap='left_heel')
    },
    skeleton_info={
        0: dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]),
        1: dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]),
        2: dict(link=('left_hip', 'hip'), id=2, color=[0, 255, 0]),
        3: dict(link=('right_ankle', 'right_knee'), id=3, color=[255, 128, 0]),
        4: dict(link=('right_knee', 'right_hip'), id=4, color=[255, 128, 0]),
        5: dict(link=('right_hip', 'hip'), id=5, color=[255, 128, 0]),
        6: dict(link=('head', 'neck'), id=6, color=[51, 153, 255]),
        7: dict(link=('neck', 'hip'), id=7, color=[51, 153, 255]),
        8: dict(link=('neck', 'left_shoulder'), id=8, color=[0, 255, 0]),
        9: dict(link=('left_shoulder', 'left_elbow'), id=9, color=[0, 255, 0]),
        10: dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]),
        11: dict(link=('neck', 'right_shoulder'), id=11, color=[255, 128, 0]),
        12: dict(link=('right_shoulder', 'right_elbow'), id=12, color=[255, 128, 0]),
        13: dict(link=('right_elbow', 'right_wrist'), id=13, color=[255, 128, 0]),
        14: dict(link=('left_eye', 'right_eye'), id=14, color=[51, 153, 255]),
        15: dict(link=('nose', 'left_eye'), id=15, color=[51, 153, 255]),
        16: dict(link=('nose', 'right_eye'), id=16, color=[51, 153, 255]),
        17: dict(link=('left_eye', 'left_ear'), id=17, color=[51, 153, 255]),
        18: dict(link=('right_eye', 'right_ear'), id=18, color=[51, 153, 255]),
        19: dict(link=('left_ear', 'left_shoulder'), id=19, color=[51, 153, 255]),
        20: dict(link=('right_ear', 'right_shoulder'), id=20, color=[51, 153, 255]),
        21: dict(link=('left_ankle', 'left_big_toe'), id=21, color=[0, 255, 0]),
        22: dict(link=('left_ankle', 'left_small_toe'), id=22, color=[0, 255, 0]),
        23: dict(link=('left_ankle', 'left_heel'), id=23, color=[0, 255, 0]),
        24: dict(link=('right_ankle', 'right_big_toe'), id=24, color=[255, 128, 0]),
        25: dict(link=('right_ankle', 'right_small_toe'), id=25, color=[255, 128, 0]),
        26: dict(link=('right_ankle', 'right_heel'), id=26, color=[255, 128, 0]),
    }
)
def draw_mmpose(img,
                keypoints,
                scores,
                keypoint_info,
                skeleton_info,
                kpt_thr=0.1,
                radius=2,
                line_width=2):
    assert len(keypoints.shape) == 2

    vis_kpt = [s >= kpt_thr for s in scores]

    link_dict = {}
    for i, kpt_info in keypoint_info.items():
        kpt_color = tuple(kpt_info['color'])
        link_dict[kpt_info['name']] = kpt_info['id']

        kpt = keypoints[i]
        if vis_kpt[i]:
            img = cv2.circle(img, (int(kpt[0]), int(kpt[1])), int(radius),
                             kpt_color, 2)

    # for i, ske_info in skeleton_info.items():
    #     link = ske_info['link']
    #     pt0, pt1 = link_dict[link[0]], link_dict[link[1]]
    #
    #     if vis_kpt[pt0] and vis_kpt[pt1]:
    #         link_color = ske_info['color']
    #         kpt0 = keypoints[pt0]
    #         kpt1 = keypoints[pt1]
    #
    #         img = cv2.line(img, (int(kpt0[0]), int(kpt0[1])),
    #                        (int(kpt1[0]), int(kpt1[1])),
    #                        link_color,
    #                        thickness=line_width)

    return img
def draw_skeleton(img,
                  keypoints,
                  scores,
                  openpose_skeleton=False,
                  kpt_thr=0.5,
                  radius=2,
                  line_width=2):
    num_keypoints = keypoints.shape[1]

    if openpose_skeleton:
        if num_keypoints == 18:
            skeleton = 'openpose18'
        elif num_keypoints == 134:
            skeleton = 'openpose134'
        elif num_keypoints == 26:
            skeleton = 'halpe26'
        else:
            raise NotImplementedError
    else:
        if num_keypoints == 17:
            skeleton = 'coco17'
        elif num_keypoints == 133:
            skeleton = 'coco133'
        elif num_keypoints == 21:
            skeleton = 'hand21'
        elif num_keypoints == 26:
            skeleton = 'halpe26'
        else:
            raise NotImplementedError

    skeleton_dict = eval(f'{skeleton}')
    keypoint_info = skeleton_dict['keypoint_info']
    skeleton_info = skeleton_dict['skeleton_info']

    if len(keypoints.shape) == 2:
        keypoints = keypoints[None, :, :]
        scores = scores[None, :, :]

    num_instance = keypoints.shape[0]
    if skeleton in ['coco17', 'coco133', 'hand21', 'halpe26']:
        for i in range(num_instance):
            img = draw_mmpose(img, keypoints[i], scores[i], keypoint_info,
                              skeleton_info, kpt_thr, radius, line_width)
    else:
        raise NotImplementedError
    return img
def get_simcc_maximum(simcc_x: np.ndarray,
                      simcc_y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
    """Get maximum response location and value from simcc representations.

    Note:
        instance number: N
        num_keypoints: K
        heatmap height: H
        heatmap width: W

    Args:
        simcc_x (np.ndarray): x-axis SimCC in shape (K, Wx) or (N, K, Wx)
        simcc_y (np.ndarray): y-axis SimCC in shape (K, Wy) or (N, K, Wy)

    Returns:
        tuple:
        - locs (np.ndarray): locations of maximum heatmap responses in shape
            (K, 2) or (N, K, 2)
        - vals (np.ndarray): values of maximum heatmap responses in shape
            (K,) or (N, K)
    """
    N, K, Wx = simcc_x.shape
    simcc_x = simcc_x.reshape(N * K, -1)
    simcc_y = simcc_y.reshape(N * K, -1)

    # get maximum value locations
    x_locs = np.argmax(simcc_x, axis=1)
    y_locs = np.argmax(simcc_y, axis=1)
    locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32)
    max_val_x = np.amax(simcc_x, axis=1)
    max_val_y = np.amax(simcc_y, axis=1)

    # get maximum value across x and y axis
    mask = max_val_x > max_val_y
    max_val_x[mask] = max_val_y[mask]
    vals = max_val_x
    locs[vals <= 0.] = -1

    # reshape
    locs = locs.reshape(N, K, 2)
    vals = vals.reshape(N, K)

    return locs, vals
def decode(simcc_x: np.ndarray, simcc_y: np.ndarray,
           simcc_split_ratio) -> Tuple[np.ndarray, np.ndarray]:
    """Modulate simcc distribution with Gaussian.

    Args:
        simcc_x (np.ndarray[K, Wx]): model predicted simcc in x.
        simcc_y (np.ndarray[K, Wy]): model predicted simcc in y.
        simcc_split_ratio (int): The split ratio of simcc.

    Returns:
        tuple: A tuple containing center and scale.
        - np.ndarray[float32]: keypoints in shape (K, 2) or (n, K, 2)
        - np.ndarray[float32]: scores in shape (K,) or (n, K)
    """
    keypoints, scores = get_simcc_maximum(simcc_x, simcc_y)
    keypoints /= simcc_split_ratio

    return keypoints, scores
def _get_3rd_point(a: np.ndarray, b: np.ndarray) -> np.ndarray:
    """To calculate the affine matrix, three pairs of points are required. This
    function is used to get the 3rd point, given 2D points a & b.

    The 3rd point is defined by rotating vector `a - b` by 90 degrees
    anticlockwise, using b as the rotation center.

    Args:
        a (np.ndarray): The 1st point (x,y) in shape (2, )
        b (np.ndarray): The 2nd point (x,y) in shape (2, )

    Returns:
        np.ndarray: The 3rd point.
    """
    direction = a - b
    c = b + np.r_[-direction[1], direction[0]]
    return c
def get_warp_matrix(center: np.ndarray,
                    scale: np.ndarray,
                    rot: float,
                    output_size: Tuple[int, int],
                    shift: Tuple[float, float] = (0., 0.),
                    inv: bool = False) -> np.ndarray:
    """Calculate the affine transformation matrix that can warp the bbox area
    in the input image to the output size.

    Args:
        center (np.ndarray[2, ]): Center of the bounding box (x, y).
        scale (np.ndarray[2, ]): Scale of the bounding box
            wrt [width, height].
        rot (float): Rotation angle (degree).
        output_size (np.ndarray[2, ] | list(2,)): Size of the
            destination heatmaps.
        shift (0-100%): Shift translation ratio wrt the width/height.
            Default (0., 0.).
        inv (bool): Option to inverse the affine transform direction.
            (inv=False: src->dst or inv=True: dst->src)

    Returns:
        np.ndarray: A 2x3 transformation matrix
    """
    shift = np.array(shift)
    src_w = scale[0]
    dst_w = output_size[0]
    dst_h = output_size[1]

    # compute transformation matrix
    rot_rad = np.deg2rad(rot)
    src_dir = _rotate_point(np.array([0., src_w * -0.5]), rot_rad)
    dst_dir = np.array([0., dst_w * -0.5])

    # get four corners of the src rectangle in the original image
    src = np.zeros((3, 2), dtype=np.float32)
    src[0, :] = center + scale * shift
    src[1, :] = center + src_dir + scale * shift
    src[2, :] = _get_3rd_point(src[0, :], src[1, :])

    # get four corners of the dst rectangle in the input image
    dst = np.zeros((3, 2), dtype=np.float32)
    dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
    dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
    dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :])

    if inv:
        warp_mat = cv2.getAffineTransform(np.float32(dst), np.float32(src))
    else:
        warp_mat = cv2.getAffineTransform(np.float32(src), np.float32(dst))

    return warp_mat
def _fix_aspect_ratio(bbox_scale: np.ndarray,
                      aspect_ratio: float) -> np.ndarray:
    """Extend the scale to match the given aspect ratio.

    Args:
        scale (np.ndarray): The image scale (w, h) in shape (2, )
        aspect_ratio (float): The ratio of ``w/h``

    Returns:
        np.ndarray: The reshaped image scale in (2, )
    """
    w, h = np.hsplit(bbox_scale, [1])
    bbox_scale = np.where(w > h * aspect_ratio,
                          np.hstack([w, w / aspect_ratio]),
                          np.hstack([h * aspect_ratio, h]))
    return bbox_scale


def _rotate_point(pt: np.ndarray, angle_rad: float) -> np.ndarray:
    """Rotate a point by an angle.

    Args:
        pt (np.ndarray): 2D point coordinates (x, y) in shape (2, )
        angle_rad (float): rotation angle in radian

    Returns:
        np.ndarray: Rotated point in shape (2, )
    """
    sn, cs = np.sin(angle_rad), np.cos(angle_rad)
    rot_mat = np.array([[cs, -sn], [sn, cs]])
    return rot_mat @ pt
def bbox_xyxy2cs(bbox: np.ndarray,
                 padding: float = 1.) -> Tuple[np.ndarray, np.ndarray]:
    """Transform the bbox format from (x,y,w,h) into (center, scale)

    Args:
        bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted
            as (left, top, right, bottom)
        padding (float): BBox padding factor that will be multilied to scale.
            Default: 1.0

    Returns:
        tuple: A tuple containing center and scale.
        - np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or
            (n, 2)
        - np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or
            (n, 2)
    """
    # convert single bbox from (4, ) to (1, 4)
    dim = bbox.ndim
    if dim == 1:
        bbox = bbox[None, :]

    # get bbox center and scale
    x1, y1, x2, y2 = np.hsplit(bbox, [1, 2, 3])
    center = np.hstack([x1 + x2, y1 + y2]) * 0.5
    scale = np.hstack([x2 - x1, y2 - y1]) * padding

    if dim == 1:
        center = center[0]
        scale = scale[0]

    return center, scale
def top_down_affine(input_size: dict, bbox_scale: dict, bbox_center: dict,
                    img: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
    """Get the bbox image as the model input by affine transform.

    Args:
        input_size (dict): The input size of the model.
        bbox_scale (dict): The bbox scale of the img.
        bbox_center (dict): The bbox center of the img.
        img (np.ndarray): The original image.

    Returns:
        tuple: A tuple containing center and scale.
        - np.ndarray[float32]: img after affine transform.
        - np.ndarray[float32]: bbox scale after affine transform.
    """
    w, h = input_size
    warp_size = (int(w), int(h))

    # reshape bbox to fixed aspect ratio
    bbox_scale = _fix_aspect_ratio(bbox_scale, aspect_ratio=w / h)

    # get the affine matrix
    center = bbox_center
    scale = bbox_scale
    rot = 0
    warp_mat = get_warp_matrix(center, scale, rot, output_size=(w, h))

    # do affine transform
    img = cv2.warpAffine(img, warp_mat, warp_size, flags=cv2.INTER_LINEAR)

    return img, bbox_scale

class TRTInference:
    def __init__(self, engine_path):
        self.engine_path = engine_path
        self.engine = self.load_engine()
        self.context = self.engine.create_execution_context()
        self.inputs, self.outputs, self.bindings, self.stream = self.allocate_buffers()

        # names = [self.engine.get_binding_name(i) for i in range(self.engine.num_bindings)]
        names = [self.engine.get_tensor_name(i) for i in range(self.engine.num_bindings)]


        self.num_bindings = self.engine.num_bindings
        num_inputs, num_outputs = 0, 0

        for i in range(self.engine.num_bindings):
            tensor_name = self.engine.get_tensor_name(i)
            if self.engine.get_tensor_mode(tensor_name) == trt.TensorIOMode.INPUT:
                num_inputs += 1
            else:
                num_outputs += 1
        self.inputs_shape = [self.engine.get_tensor_shape(names[i]) for i in range(num_inputs)]
        self.outputs_shape = [self.engine.get_tensor_shape(names[i + num_inputs]) for i in range(num_outputs)]

        self.num_inputs = num_inputs
        self.num_outputs = num_outputs
        self.input_names = names[:num_inputs]
        self.output_names = names[num_inputs:]
        self.input_h, self.input_w = self.inputs_shape[0][2:]
        print('输入输出数量', self.engine.num_bindings, '名称', names)
        print(self.num_inputs, self.num_outputs, self.input_names, self.output_names, self.inputs_shape, self.outputs_shape)

        self.__warm_up()





    def load_engine(self):
        TRT_LOGGER = trt.Logger(trt.Logger.INFO)
        with open(self.engine_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
            return runtime.deserialize_cuda_engine(f.read())

    def __warm_up(self) -> None:
        for _ in range(10):
            input = np.ones([self.input_h, self.input_w, 3], dtype=np.uint8)
            resized_img, center, scale = self.preprocess(input)
            results = self.do_inference(resized_img)
            keypoints, scores = self.postprocess(results, (self.input_h, self.input_w), center, scale)
        print("model warmup")

    def allocate_buffers(self):
        inputs = []
        outputs = []
        bindings = []
        stream = cuda.Stream()
        for binding in self.engine:
            size = trt.volume(self.engine.get_tensor_shape(binding))
            dtype = trt.nptype(self.engine.get_tensor_dtype(binding))
            host_mem = cuda.pagelocked_empty(size, dtype)
            device_mem = cuda.mem_alloc(host_mem.nbytes)
            bindings.append(int(device_mem))
            if self.engine.get_tensor_mode(binding) == trt.TensorIOMode.INPUT:
                inputs.append({'host': host_mem, 'device': device_mem, 'dtype': dtype})
            else:
                outputs.append({'host': host_mem, 'device': device_mem})
        return inputs, outputs, bindings, stream

    def do_inference(self, input_image):
        np.copyto(self.inputs[0]['host'], input_image.ravel())
        [cuda.memcpy_htod_async(inp['device'], inp['host'], self.stream) for inp in self.inputs]
        self.context.execute_async_v2(bindings=self.bindings, stream_handle=self.stream.handle)
        [cuda.memcpy_dtoh_async(out['host'], out['device'], self.stream) for out in self.outputs]
        self.stream.synchronize()

        # 根据 self.outputs_shape 自动调整结果形状
        reshaped_outputs = [
            out['host'].reshape(self.outputs_shape[i])
            for i, out in enumerate(self.outputs)
        ]
        reshaped_outputs = (self.outputs[0]['host'].reshape(self.outputs_shape[0]),
                            self.outputs[1]['host'].reshape(self.outputs_shape[1]))
        return reshaped_outputs

    def preprocess(self,
            img: np.ndarray, bboxes : list = None ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
        """Do preprocessing for RTMPose model inference.

        Args:
            img (np.ndarray): Input image in shape.
            bbox (list):  xyxy-format bounding box of target.

        Returns:
            tuple:
            - resized_img (np.ndarray): Preprocessed image.
            - center (np.ndarray): Center of image.
            - scale (np.ndarray): Scale of image.
        """
        # get shape of image
        if bboxes is None:
            # HWC
            img_shape = img.shape[:2]
            bbox = np.array([0, 0, img_shape[1], img_shape[0]])
        else:
            bbox = np.array(bboxes)

        # get center and scale
        center, scale = bbox_xyxy2cs(bbox, padding=1.25)

        # do affine transformation
        resized_img, scale = top_down_affine((self.input_w, self.input_h), scale, center, img)

        # normalize image
        mean = np.array([123.675, 116.28, 103.53])
        std = np.array([58.395, 57.12, 57.375])
        resized_img = (resized_img - mean) / std
        # resized_img = np.array(resized_img, dtype=np.float32)

        # HWC to CHW format:
        resized_img = np.transpose(resized_img, [2, 0, 1])
        # CHW to NCHW format
        resized_img = np.expand_dims(resized_img, axis=0)
        # Convert the image to row-major order, also known as "C order":
        resized_img = np.ascontiguousarray(resized_img)
        return resized_img, center, scale


    def postprocess(self, outputs: List[np.ndarray],
                    model_input_size: Tuple[int, int],
                    center: Tuple[int, int],
                    scale: Tuple[int, int],
                    simcc_split_ratio: float = 2.0
                    ) -> Tuple[np.ndarray, np.ndarray]:
        """Postprocess for RTMPose model output.

        Args:
            outputs (np.ndarray): Output of RTMPose model.
            model_input_size (tuple): RTMPose model Input image size.
            center (tuple): Center of bbox in shape (x, y).
            scale (tuple): Scale of bbox in shape (w, h).
            simcc_split_ratio (float): Split ratio of simcc.

        Returns:
            tuple:
            - keypoints (np.ndarray): Rescaled keypoints.
            - scores (np.ndarray): Model predict scores.
        """
        # use simcc to decode
        simcc_x, simcc_y = outputs
        keypoints, scores = decode(simcc_x, simcc_y, simcc_split_ratio)

        # rescale keypoints
        keypoints = keypoints / model_input_size * scale + center - scale / 2

        return keypoints, scores

    def infer(self, raw_bgr_image: np.ndarray, bboxes: list = None):
        t1 = time.time()
        resized_img, center, scale = self.preprocess(raw_bgr_image, bboxes)
        results = self.do_inference(resized_img)
        keypoints, scores = self.postprocess(results, (self.input_w, self.input_h), center, scale)

        return keypoints, scores

    def pose_tracker(self):
        pass
class PoseTracker(TRTInference):
    MIN_AREA = 1000
    def __init__(self,
                 engine_path,
                 det_frequency: int = 1,
                 tracking: bool = True,
                 tracking_thr: float = 0.3):
        super(PoseTracker, self).__init__(engine_path)

        self.pose_model = self.infer
        self.det_frequency = det_frequency  # 检测频率
        self.tracking = tracking  # 是否启用跟踪
        self.tracking_thr = tracking_thr  # 跟踪的置信度阈值
        self.reset()

    def reset(self):
        """Reset pose tracker."""
        self.frame_cnt = 0
        self.next_id = 0
        self.bboxes_last_frame = []
        self.track_ids_last_frame = []
    def track_by_iou(self, bbox):
        """Get track id using IoU tracking greedily.

        Args:
            bbox (list): The bbox info (left, top, right, bottom, score).
            next_id (int): The next track id.

        Returns:
            track_id (int): The track id.
            match_result (list): The matched bbox.
            next_id (int): The updated next track id.
        """

        area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])

        max_iou_score = -1
        max_index = -1
        match_result = None
        for index, each_bbox in enumerate(self.bboxes_last_frame):

            iou_score = compute_iou(bbox, each_bbox)
            if iou_score > max_iou_score:
                max_iou_score = iou_score
                max_index = index

        if max_iou_score > self.tracking_thr:
            # if the bbox has a match and the IoU is larger than threshold
            track_id = self.track_ids_last_frame.pop(max_index)
            match_result = self.bboxes_last_frame.pop(max_index)

        elif area >= self.MIN_AREA:
            # no match, but the bbox is large enough,
            # assign a new track id
            track_id = self.next_id
            self.next_id += 1

        else:
            # if the bbox is too small, ignore it
            track_id = -1

        return track_id, match_result

    def __call__(self, image: np.ndarray, bboxes: list = None):

        if self.frame_cnt % self.det_frequency == 0:
            if bboxes is None:
                keypoints, scores = self.pose_model(image)
                for kpts in keypoints:
                    bboxes = pose_to_bbox(kpts)
        else:
            bboxes = self.bboxes_last_frame

        keypoints, scores = self.pose_model(image, bboxes=bboxes)

        if not self.tracking:
            # without tracking

            bboxes_current_frame = []
            for kpts in keypoints:
                bbox = pose_to_bbox(kpts)
                bboxes_current_frame.append(bbox)
        else:
            # with tracking

            if len(self.track_ids_last_frame) == 0:
                self.next_id = len(self.bboxes_last_frame)
                self.track_ids_last_frame = list(range(self.next_id))

            bboxes_current_frame = []
            track_ids_current_frame = []
            for kpts in keypoints:
                bbox = pose_to_bbox(kpts)

                track_id, _ = self.track_by_iou(bbox)

                if track_id > -1:
                    track_ids_current_frame.append(track_id)
                    bboxes_current_frame.append(bbox)

            self.track_ids_last_frame = track_ids_current_frame

        self.bboxes_last_frame = bboxes_current_frame
        self.frame_cnt += 1

        return keypoints, scores

if __name__ == '__main__':
    infer_id = 0
    if infer_id==0:
        # 使用该类进行推理
        TRTPATH = r"E:\Sarcopenia physical function test\TRT\rtmpose\end2end.engine"

        trt_inference = TRTInference(TRTPATH)
        image_path = r"\\DS218\tmp\sjh\3dRGB-DEPTH\kinect\zxc\flip1\save_frame\color/0.jpg"
        image_bgr = cv2.imread(image_path) #.convert("RGB")
        t1 = time.time()
        keypoints, scores = trt_inference.infer(image_bgr)
        print('pre time:', time.time() - t1)

        img_show = image_bgr.copy()

        img_show = draw_skeleton(img_show,
                                 keypoints,
                                 scores,
                                 openpose_skeleton=False,
                                 kpt_thr=0.3,
                                 line_width=3)

        img_show = cv2.resize(img_show, (640, 480))
        cv2.imshow('Body and Feet Pose Estimation', img_show)
        key = cv2.waitKey(0)

    if infer_id==1:

        TRTPATH = "./rtmpose-m/end2end.engine"
        PoseTracker_trt_inference = PoseTracker(TRTPATH)
        cap = cv2.VideoCapture(0)  # Video file path
        frame_idx = 0

        while cap.isOpened():
            success, frame = cap.read()
            frame_idx += 1

            if not success:
                break
            s = time.time()
            keypoints, scores = PoseTracker_trt_inference(frame)
            det_time = time.time() - s
            print("inference time: ", det_time)
            print(keypoints, scores)
            img_show = frame.copy()

            img_show = draw_skeleton(img_show,
                                     keypoints,
                                     scores,
                                     openpose_skeleton=False,
                                     kpt_thr=0.3,
                                     line_width=3)

            img_show = cv2.resize(img_show, (640, 480))
            cv2.imshow('Body and Feet Pose Estimation', img_show)
            key = cv2.waitKey(1)
            if key == ord('q'):  # Press 'q' to exit
                break

        cap.release()
        cv2.destroyAllWindows()

