# -*- coding: utf-8 -*-
# @Time    : 2024/11/18 18:04
# @Author  : sjh
# @Site    : 
# @File    : common.py
# @Comment :
import os
os.environ['CUDA_MODULE_LOADING'] = 'LAZY'
import tensorrt as trt
import pycuda.autoinit
import pycuda.driver as cuda
from typing import List, Optional, Tuple, Union
import warnings
import matplotlib.pyplot as plt
import numpy as np
import time


class BaseTRTInference:
    def __init__(self, engine_path, set_input_shape=None):
        """
                初始化推理类。

                参数:
                engine_path (str): TensorRT 引擎文件的路径。
                set_dynamic_batch (tuple, optional): 设置动态批次的形状，默认为 None。如果指定了动态形状（例如 (2)），
                                                      则会在模型初始化时设置输入张量batch=2的动态形状。
        """
        self.cuda_ctx = pycuda.autoinit.context
        # self.cfx = cuda.Device(0).make_context()
        self.set_dynamic_batch = None
        self.inputs_shape = []
        self.outputs_shape = []
        self.engine = self.__load_engine(engine_path)
        self.context = self.engine.create_execution_context()

        """设置输入的shape"""
        # 设置输入的形状
        if set_input_shape:
            self.__set_input_shapes(set_input_shape)
        self.inputs, self.outputs, self.bindings, self.stream = self.__allocate_buffers()

        self.__cal_params(engine_path)
        self.__warm_up(engine_path)

    def __load_engine(self, engine_path):
        TRT_LOGGER = trt.Logger(trt.Logger.ERROR)
        with open(engine_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
            return runtime.deserialize_cuda_engine(f.read())

    def __set_input_shapes(self, set_input_shape: dict):
        """
        使用字典批量设置输入的形状。
        # self.context.set_input_shape('input', ((1, 96, 32)))

        参数:
        set_input_shape (dict): 包含输入张量名称和形状的字典。
        """
        for tensor_name, shape in set_input_shape.items():
            if self.context.has_tensor(tensor_name):  # 检查张量是否存在
                self.context.set_input_shape(tensor_name, shape)
                self.inputs_shape.append(shape)
            else:
                print(f"Warning: Tensor '{tensor_name}' not found in the model.")

    def __allocate_buffers(self):
        inputs = []
        outputs = []
        bindings = []
        stream = cuda.Stream()
        for binding in self.engine:
            tensor_shape = self.context.get_tensor_shape(binding)
            size = trt.volume(tensor_shape)
            dtype = trt.nptype(self.engine.get_tensor_dtype(binding))
            host_mem = cuda.pagelocked_empty(size, dtype)
            device_mem = cuda.mem_alloc(host_mem.nbytes)
            bindings.append(int(device_mem))
            if self.engine.get_tensor_mode(binding) == trt.TensorIOMode.INPUT:
                inputs.append({'host': host_mem, 'device': device_mem, 'dtype': dtype})
                self.inputs_shape.append(tensor_shape)
            else:
                outputs.append({'host': host_mem, 'device': device_mem})
                self.outputs_shape.append(tensor_shape)
        return inputs, outputs, bindings, stream

    def __cal_params(self, engine_path):
        names = [self.engine.get_tensor_name(i) for i in range(self.engine.num_bindings)]
        print("模型路径：", engine_path, '输入输出数量:', self.engine.num_bindings, '名称:', names)
        self.num_bindings = self.engine.num_bindings
        num_inputs, num_outputs = 0, 0

        for i in range(self.engine.num_bindings):
            tensor_name = self.engine.get_tensor_name(i)
            if self.engine.get_tensor_mode(tensor_name) == trt.TensorIOMode.INPUT:
                num_inputs += 1
            else:
                num_outputs += 1

        self.num_inputs = num_inputs
        self.num_outputs = num_outputs
        self.input_names = names[:num_inputs]
        self.output_names = names[num_inputs:]
        print('输入输出数量', self.engine.num_bindings, '名称', names)
        print(self.num_inputs, self.num_outputs, self.input_names, self.output_names, self.inputs_shape, self.outputs_shape)

    def __warm_up(self, engine_path) -> None:
        print(f"{engine_path} model warmup")
        t1 = time.time()
        for _ in range(10):
            input_data_list = self.__pre_data()
            self.do_inference(input_data_list)
        print('inference once time', (time.time() - t1) / 10)

    def __pre_data(self):
        # 为推理创建随机输入数据 (注意确保输入数据符合引擎的输入尺寸要求)
        input_data_list = []
        for i in range(self.num_inputs):
            input_shape = self.context.get_tensor_shape(self.engine.get_tensor_name(i))  # 获取每个输入的形状
            # 替换动态批次大小 -1 为具体的 batch_size
            input_shape = tuple([self.set_dynamic_batch if dim == -1 else dim for dim in input_shape])
            input_data = np.random.rand(*input_shape).astype(np.float32)
            input_data_list.append(input_data)
        return input_data_list

    def do_inference(self, input_images):
        self.cuda_ctx.push()

        if isinstance(input_images, list):  # 多个输入
            assert len(input_images) == len(self.inputs), "输入图像数量与模型输入数量不匹配"
            [np.copyto(self.inputs[i]['host'], input_image.ravel()) for i, input_image in enumerate(input_images)]
        else:
            np.copyto(self.inputs[0]['host'], input_images.ravel())  # 单输入图像
        [cuda.memcpy_htod_async(inp['device'], inp['host'], self.stream) for inp in self.inputs]
        self.context.execute_async_v2(bindings=self.bindings, stream_handle=self.stream.handle)
        [cuda.memcpy_dtoh_async(out['host'], out['device'], self.stream) for out in self.outputs]
        self.stream.synchronize()

        # 根据 self.outputs_shape 自动调整结果形状
        reshaped_outputs = [
            out['host'].reshape(self.outputs_shape[i])
            for i, out in enumerate(self.outputs)
        ]
        if self.cuda_ctx:
            self.cuda_ctx.pop()
        return reshaped_outputs
    def destroy(self):
        # Remove any context from the top of the context stack, deactivating it.
        self.ctx.pop()



CONF_THRESH = 0.5
IOU_THRESHOLD = 0.4
TORCH = False

def timing_decorator(func):
    def wrapper(*args, **kwargs):
        start_time = time.time()
        result = func(*args, **kwargs)
        end_time = time.time()
        print(f"{func.__name__} took {end_time - start_time:.4f} seconds")
        return result
    return wrapper
def keep_highest_conf_per_class_numpy(det):
    if det.shape[0] == 0:
        return det  # 如果没有检测到任何对象，直接返回

    unique_classes = np.unique(det[:, 5])  # 获取所有独特的类标签
    max_conf_indices = []

    # 对每一个类别找到最高置信度的检测框
    for cls in unique_classes:
        cls_mask = det[:, 5] == cls  # 找到所有该类别的检测框
        cls_detections = det[cls_mask]  # 提取该类别的所有检测框

        # 计算每个检测框的面积
        areas = (cls_detections[:, 2] - cls_detections[:, 0]) * (
                cls_detections[:, 3] - cls_detections[:, 1])

        # 合并置信度和面积为一个复合评分，这里用置信度 + 面积的小部分作为评分
        scores = cls_detections[:, 4] * 0.1 + 1.0 * areas

        # 找到评分最高的检测框
        max_score_index = np.argmax(scores)
        max_conf_indices.append(np.where(cls_mask)[0][max_score_index])

    # 选取评分最高的检测框并保持二维
    return det[max_conf_indices]



def non_max_suppression_numpy(
        prediction,
        conf_thres=0.25,
        iou_thres=0.45,
        classes=None,
        max_det=300,
):
    # Checks
    assert 0 <= conf_thres <= 1, f"Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0"
    assert 0 <= iou_thres <= 1, f"Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0"

    if isinstance(prediction, (list, tuple)):  # YOLOv8 model in validation mode, output = (inference_out, loss_out)
        prediction = prediction[0]  # select only inference output

    if classes is not None:
        classes = np.array(classes)  # convert to numpy array

    if prediction.shape[-1] == 6:  # end-to-end model (BNC, i.e. 1,300,6)
        output = [pred[pred[:, 4] > conf_thres][:max_det] for pred in prediction]

        if classes is not None:
            output = [pred[np.isin(pred[:, 5], classes)] for pred in output]  # Filter based on classes
        return output
    return None
def clip_boxes(boxes, shape):
    """
    Takes a list of bounding boxes and a shape (height, width) and clips the bounding boxes to the shape.

    Args:
        boxes (numpy.ndarray): the bounding boxes to clip
        shape (tuple): the shape of the image (height, width)

    Returns:
        numpy.ndarray: Clipped boxes
    """
    boxes[..., [0, 2]] = np.clip(boxes[..., [0, 2]], 0, shape[1])  # clip x1, x2 to image width
    boxes[..., [1, 3]] = np.clip(boxes[..., [1, 3]], 0, shape[0])  # clip y1, y2 to image height
    return boxes
def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None, padding=True, xywh=False):
    """
    Rescales bounding boxes (in the format of xyxy by default) from the shape of the image they were originally
    specified in (img1_shape) to the shape of a different image (img0_shape).

    Args:
        img1_shape (tuple): The shape of the image that the bounding boxes are for, in the format of (height, width).
        boxes (numpy.ndarray): the bounding boxes of the objects in the image, in the format of (x1, y1, x2, y2)
        img0_shape (tuple): the shape of the target image, in the format of (height, width).
        ratio_pad (tuple): a tuple of (ratio, pad) for scaling the boxes. If not provided, the ratio and pad will be
            calculated based on the size difference between the two images.
        padding (bool): If True, assuming the boxes are based on image augmented by yolo style. If False, do regular
            rescaling.
        xywh (bool): The box format is xywh or not, default=False.

    Returns:
        numpy.ndarray: The scaled bounding boxes, in the format of (x1, y1, x2, y2)
    """
    if ratio_pad is None:  # calculate from img0_shape
        gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1])  # gain  = old / new
        pad = (
            round((img1_shape[1] - img0_shape[1] * gain) / 2 - 0.1),
            round((img1_shape[0] - img0_shape[0] * gain) / 2 - 0.1),
        )  # wh padding
    else:
        gain = ratio_pad[0][0]
        pad = ratio_pad[1]

    if padding:
        boxes[..., 0] -= pad[0]  # x padding
        boxes[..., 1] -= pad[1]  # y padding
        if not xywh:
            boxes[..., 2] -= pad[0]  # x padding
            boxes[..., 3] -= pad[1]  # y padding
    boxes[..., :4] /= gain  # Rescale to original image
    return clip_boxes(boxes, img0_shape)

class YOLOv10RTInference:
    def __init__(self, engine_path):
        self.cuda_ctx = pycuda.autoinit.context
        self.engine_path = engine_path
        self.engine = self.load_engine()
        self.context = self.engine.create_execution_context()
        self.inputs, self.outputs, self.bindings, self.stream = self.allocate_buffers()

        # names = [self.engine.get_binding_name(i) for i in range(self.engine.num_bindings)]
        names = [self.engine.get_tensor_name(i) for i in range(self.engine.num_bindings)]


        self.num_bindings = self.engine.num_bindings
        num_inputs, num_outputs = 0, 0

        for i in range(self.engine.num_bindings):
            tensor_name = self.engine.get_tensor_name(i)
            if self.engine.get_tensor_mode(tensor_name) == trt.TensorIOMode.INPUT:
                num_inputs += 1
            else:
                num_outputs += 1
        self.inputs_shape = [self.engine.get_tensor_shape(names[i]) for i in range(num_inputs)]
        self.outputs_shape = [self.engine.get_tensor_shape(names[i + num_inputs]) for i in range(num_outputs)]

        self.num_inputs = num_inputs
        self.num_outputs = num_outputs
        self.input_names = names[:num_inputs]
        self.output_names = names[num_inputs:]
        self.input_h, self.input_w = self.inputs_shape[0][2:]
        print('输入输出数量', self.engine.num_bindings, '名称', names)
        print(self.num_inputs, self.num_outputs, self.input_names, self.output_names, self.inputs_shape, self.outputs_shape)
        self.__warm_up()
    def load_engine(self):
        TRT_LOGGER = trt.Logger(trt.Logger.INFO)
        with open(self.engine_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
            return runtime.deserialize_cuda_engine(f.read())

    def __warm_up(self) -> None:
        print("model warmup")
        for _ in range(10):
            input = np.ones([self.input_h, self.input_w, 3], dtype=np.uint8)
            input_image, raw_bgr_image, origin_h, origin_w = self.preprocess_image(input)
            self.do_inference(input_image)

    def allocate_buffers(self):
        inputs = []
        outputs = []
        bindings = []
        stream = cuda.Stream()
        for binding in self.engine:
            size = trt.volume(self.engine.get_tensor_shape(binding))
            dtype = trt.nptype(self.engine.get_tensor_dtype(binding))
            host_mem = cuda.pagelocked_empty(size, dtype)
            device_mem = cuda.mem_alloc(host_mem.nbytes)
            bindings.append(int(device_mem))
            if self.engine.get_tensor_mode(binding) == trt.TensorIOMode.INPUT:
                inputs.append({'host': host_mem, 'device': device_mem, 'dtype': dtype})
            else:
                outputs.append({'host': host_mem, 'device': device_mem})
        return inputs, outputs, bindings, stream

    def do_inference(self, input_image):
        self.cuda_ctx.push()
        np.copyto(self.inputs[0]['host'], input_image.ravel())
        [cuda.memcpy_htod_async(inp['device'], inp['host'], self.stream) for inp in self.inputs]
        self.context.execute_async_v2(bindings=self.bindings, stream_handle=self.stream.handle)
        [cuda.memcpy_dtoh_async(out['host'], out['device'], self.stream) for out in self.outputs]
        self.stream.synchronize()

        # 根据 self.outputs_shape 自动调整结果形状
        reshaped_outputs = [
            out['host'].reshape(self.outputs_shape[i])
            for i, out in enumerate(self.outputs)
        ]
        if self.cuda_ctx:
            self.cuda_ctx.pop()
        return reshaped_outputs

    def preprocess_image(self, raw_bgr_image):
        """
            description: Convert BGR image to RGB,
                         resize and pad it to target size, normalize to [0,1],
                         transform to NCHW format.
            param:
                input_image_path: str, image path
            return:
                image:  the processed image
                image_raw: the original image
                h: original height
                w: original width
        """
        # 读取图像并转换为RGB格式
        image_raw = np.array(raw_bgr_image)
        h, w, c = image_raw.shape
        image = cv2.cvtColor(image_raw, cv2.COLOR_BGR2RGB)
        # Calculate widht and height and paddings
        r_w = self.input_w / w
        r_h = self.input_h / h
        if r_h > r_w:
            tw = self.input_w
            th = int(r_w * h)
            tx1 = tx2 = 0
            ty1 = int((self.input_h - th) / 2)
            ty2 = self.input_h - th - ty1
        else:
            tw = int(r_h * w)
            th = self.input_h
            tx1 = int((self.input_w - tw) / 2)
            tx2 = self.input_w - tw - tx1
            ty1 = ty2 = 0
        # Resize the image with long side while maintaining ratio
        image = cv2.resize(image, (tw, th))
        # Pad the short side with (128,128,128)
        image = cv2.copyMakeBorder(
            image, ty1, ty2, tx1, tx2, cv2.BORDER_CONSTANT, None, (128, 128, 128)
        )
        # 这样加快速度
        # image = np.ascontiguousarray(image)
        if self.inputs[0]['dtype'] == np.float16:
            image = np.asarray(image, np.float32)
            # Normalize to [0,1]
            image /= 255.0
            image = np.asarray(image, np.float16)
        else:
            image = np.asarray(image, np.float32)
            image /= 255.0
        # HWC to CHW format:
        image = np.transpose(image, [2, 0, 1])
        # CHW to NCHW format
        image = np.expand_dims(image, axis=0)
        # Convert the image to row-major order, also known as "C order":
        image = np.ascontiguousarray(image)

        return image, image_raw, h, w

    @timing_decorator
    def infer(self, raw_bgr_image):
        input_image, raw_bgr_image, origin_h, origin_w = self.preprocess_image(raw_bgr_image)
        results = self.do_inference(input_image)
        # results (1, 300, 6)
        outputs = results[:2][0]
        det = non_max_suppression_numpy(outputs, CONF_THRESH, IOU_THRESHOLD, classes=0, max_det=300)[0]  # 将检测结果映射回原图尺寸
        det[:, :4] = scale_boxes(input_image.shape[2:], det[:, :4], (origin_h, origin_w))
        det = keep_highest_conf_per_class_numpy(det)
        if len(det):
            for *box, conf, class_id in det:
                x_min, y_min, x_max, y_max = map(int, box)

                # 绘制边界框
                cv2.rectangle(raw_bgr_image, (x_min, y_min), (x_max, y_max), (0, 255, 0), 2)  # 绿色边框

                # 显示置信度
                label = f"Conf: {conf:.2f}"
                cv2.putText(raw_bgr_image, label, (x_min, y_min - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2)

            # 使用 plt 显示图像
            plt.imshow(raw_bgr_image)
            plt.show()

    def get_bbox(self, raw_bgr_image):
        input_image, raw_bgr_image, origin_h, origin_w = self.preprocess_image(raw_bgr_image)
        results = self.do_inference(input_image)
        # results (1, 300, 6)
        outputs = results[:2][0]
        det = non_max_suppression_numpy(outputs, CONF_THRESH, IOU_THRESHOLD, classes=0, max_det=300)[0]  # 将检测结果映射回原图尺寸
        det[:, :4] = scale_boxes(input_image.shape[2:], det[:, :4], (origin_h, origin_w))
        det = keep_highest_conf_per_class_numpy(det)
        if len(det):
            for *box, conf, class_id in det:
                return box
        else:
            return None

def compute_iou(bboxA, bboxB):
    """Compute the Intersection over Union (IoU) between two boxes .

    Args:
        bboxA (list): The first bbox info (left, top, right, bottom, score).
        bboxB (list): The second bbox info (left, top, right, bottom, score).

    Returns:
        float: The IoU value.
    """

    x1 = max(bboxA[0], bboxB[0])
    y1 = max(bboxA[1], bboxB[1])
    x2 = min(bboxA[2], bboxB[2])
    y2 = min(bboxA[3], bboxB[3])

    inter_area = max(0, x2 - x1) * max(0, y2 - y1)

    bboxA_area = (bboxA[2] - bboxA[0]) * (bboxA[3] - bboxA[1])
    bboxB_area = (bboxB[2] - bboxB[0]) * (bboxB[3] - bboxB[1])
    union_area = float(bboxA_area + bboxB_area - inter_area)
    if union_area == 0:
        union_area = 1e-5
        warnings.warn('union_area=0 is unexpected')

    iou = inter_area / union_area

    return iou


def pose_to_bbox(keypoints: np.ndarray, expansion: float = 1.25) -> np.ndarray:
    """Get bounding box from keypoints.

    Args:
        keypoints (np.ndarray): Keypoints of person.
        expansion (float): Expansion ratio of bounding box.

    Returns:
        np.ndarray: Bounding box of person.
    """
    x = keypoints[:, 0]
    y = keypoints[:, 1]
    bbox = np.array([x.min(), y.min(), x.max(), y.max()])
    center = np.array([bbox[0] + bbox[2], bbox[1] + bbox[3]]) / 2
    bbox = np.concatenate([
        center - (center - bbox[:2]) * expansion,
        center + (bbox[2:] - center) * expansion
    ])
    return bbox


import cv2

halpe26 = dict(name='halpe26',
    keypoint_info={
        0: dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''),
        1: dict(name='left_eye', id=1, color=[51, 153, 255], type='upper', swap='right_eye'),
        2: dict(name='right_eye', id=2, color=[51, 153, 255], type='upper', swap='left_eye'),
        3: dict(name='left_ear', id=3, color=[51, 153, 255], type='upper', swap='right_ear'),
        4: dict(name='right_ear', id=4, color=[51, 153, 255], type='upper', swap='left_ear'),
        5: dict(name='left_shoulder', id=5, color=[0, 255, 0], type='upper', swap='right_shoulder'),
        6: dict(name='right_shoulder', id=6, color=[255, 128, 0], type='upper', swap='left_shoulder'),
        7: dict(name='left_elbow', id=7, color=[0, 255, 0], type='upper', swap='right_elbow'),
        8: dict(name='right_elbow', id=8, color=[255, 128, 0], type='upper', swap='left_elbow'),
        9: dict(name='left_wrist', id=9, color=[0, 255, 0], type='upper', swap='right_wrist'),
        10: dict(name='right_wrist', id=10, color=[255, 128, 0], type='upper', swap='left_wrist'),
        11: dict(name='left_hip', id=11, color=[0, 255, 0], type='lower', swap='right_hip'),
        12: dict(name='right_hip', id=12, color=[255, 128, 0], type='lower', swap='left_hip'),
        13: dict(name='left_knee', id=13, color=[0, 255, 0], type='lower', swap='right_knee'),
        14: dict(name='right_knee', id=14, color=[255, 128, 0], type='lower', swap='left_knee'),
        15: dict(name='left_ankle', id=15, color=[0, 255, 0], type='lower', swap='right_ankle'),
        16: dict(name='right_ankle', id=16, color=[255, 128, 0], type='lower', swap='left_ankle'),
        17: dict(name='head', id=17, color=[255, 128, 0], type='upper', swap=''),
        18: dict(name='neck', id=18, color=[255, 128, 0], type='upper', swap=''),
        19: dict(name='hip', id=19, color=[255, 128, 0], type='lower', swap=''),
        20: dict(name='left_big_toe', id=20, color=[255, 128, 0], type='lower', swap='right_big_toe'),
        21: dict(name='right_big_toe', id=21, color=[255, 128, 0], type='lower', swap='left_big_toe'),
        22: dict(name='left_small_toe', id=22, color=[255, 128, 0], type='lower', swap='right_small_toe'),
        23: dict(name='right_small_toe', id=23, color=[255, 128, 0], type='lower', swap='left_small_toe'),
        24: dict(name='left_heel', id=24, color=[255, 128, 0], type='lower', swap='right_heel'),
        25: dict(name='right_heel', id=25, color=[255, 128, 0], type='lower', swap='left_heel')
    },
    skeleton_info={
        0: dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]),
        1: dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]),
        2: dict(link=('left_hip', 'hip'), id=2, color=[0, 255, 0]),
        3: dict(link=('right_ankle', 'right_knee'), id=3, color=[255, 128, 0]),
        4: dict(link=('right_knee', 'right_hip'), id=4, color=[255, 128, 0]),
        5: dict(link=('right_hip', 'hip'), id=5, color=[255, 128, 0]),
        6: dict(link=('head', 'neck'), id=6, color=[51, 153, 255]),
        7: dict(link=('neck', 'hip'), id=7, color=[51, 153, 255]),
        8: dict(link=('neck', 'left_shoulder'), id=8, color=[0, 255, 0]),
        9: dict(link=('left_shoulder', 'left_elbow'), id=9, color=[0, 255, 0]),
        10: dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]),
        11: dict(link=('neck', 'right_shoulder'), id=11, color=[255, 128, 0]),
        12: dict(link=('right_shoulder', 'right_elbow'), id=12, color=[255, 128, 0]),
        13: dict(link=('right_elbow', 'right_wrist'), id=13, color=[255, 128, 0]),
        14: dict(link=('left_eye', 'right_eye'), id=14, color=[51, 153, 255]),
        15: dict(link=('nose', 'left_eye'), id=15, color=[51, 153, 255]),
        16: dict(link=('nose', 'right_eye'), id=16, color=[51, 153, 255]),
        17: dict(link=('left_eye', 'left_ear'), id=17, color=[51, 153, 255]),
        18: dict(link=('right_eye', 'right_ear'), id=18, color=[51, 153, 255]),
        19: dict(link=('left_ear', 'left_shoulder'), id=19, color=[51, 153, 255]),
        20: dict(link=('right_ear', 'right_shoulder'), id=20, color=[51, 153, 255]),
        21: dict(link=('left_ankle', 'left_big_toe'), id=21, color=[0, 255, 0]),
        22: dict(link=('left_ankle', 'left_small_toe'), id=22, color=[0, 255, 0]),
        23: dict(link=('left_ankle', 'left_heel'), id=23, color=[0, 255, 0]),
        24: dict(link=('right_ankle', 'right_big_toe'), id=24, color=[255, 128, 0]),
        25: dict(link=('right_ankle', 'right_small_toe'), id=25, color=[255, 128, 0]),
        26: dict(link=('right_ankle', 'right_heel'), id=26, color=[255, 128, 0]),
    }
)
def draw_mmpose(img,
                keypoints,
                scores,
                keypoint_info,
                skeleton_info,
                kpt_thr=0.1,
                radius=2,
                line_width=2):
    assert len(keypoints.shape) == 2

    vis_kpt = [s >= kpt_thr for s in scores]

    link_dict = {}
    for i, kpt_info in keypoint_info.items():
        kpt_color = tuple(kpt_info['color'])
        link_dict[kpt_info['name']] = kpt_info['id']

        kpt = keypoints[i]
        if vis_kpt[i]:
            img = cv2.circle(img, (int(kpt[0]), int(kpt[1])), int(radius),
                             kpt_color, 2)

    # for i, ske_info in skeleton_info.items():
    #     link = ske_info['link']
    #     pt0, pt1 = link_dict[link[0]], link_dict[link[1]]
    #
    #     if vis_kpt[pt0] and vis_kpt[pt1]:
    #         link_color = ske_info['color']
    #         kpt0 = keypoints[pt0]
    #         kpt1 = keypoints[pt1]
    #
    #         img = cv2.line(img, (int(kpt0[0]), int(kpt0[1])),
    #                        (int(kpt1[0]), int(kpt1[1])),
    #                        link_color,
    #                        thickness=line_width)

    return img
def draw_skeleton(img,
                  keypoints,
                  scores,
                  openpose_skeleton=False,
                  kpt_thr=0.5,
                  radius=2,
                  line_width=2):
    num_keypoints = keypoints.shape[1]

    if openpose_skeleton:
        if num_keypoints == 18:
            skeleton = 'openpose18'
        elif num_keypoints == 134:
            skeleton = 'openpose134'
        elif num_keypoints == 26:
            skeleton = 'halpe26'
        else:
            raise NotImplementedError
    else:
        if num_keypoints == 17:
            skeleton = 'coco17'
        elif num_keypoints == 133:
            skeleton = 'coco133'
        elif num_keypoints == 21:
            skeleton = 'hand21'
        elif num_keypoints == 26:
            skeleton = 'halpe26'
        else:
            raise NotImplementedError

    skeleton_dict = eval(f'{skeleton}')
    keypoint_info = skeleton_dict['keypoint_info']
    skeleton_info = skeleton_dict['skeleton_info']

    if len(keypoints.shape) == 2:
        keypoints = keypoints[None, :, :]
        scores = scores[None, :, :]

    num_instance = keypoints.shape[0]
    if skeleton in ['coco17', 'coco133', 'hand21', 'halpe26']:
        for i in range(num_instance):
            img = draw_mmpose(img, keypoints[i], scores[i], keypoint_info,
                              skeleton_info, kpt_thr, radius, line_width)
    else:
        raise NotImplementedError
    return img
def get_simcc_maximum(simcc_x: np.ndarray,
                      simcc_y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
    """Get maximum response location and value from simcc representations.

    Note:
        instance number: N
        num_keypoints: K
        heatmap height: H
        heatmap width: W

    Args:
        simcc_x (np.ndarray): x-axis SimCC in shape (K, Wx) or (N, K, Wx)
        simcc_y (np.ndarray): y-axis SimCC in shape (K, Wy) or (N, K, Wy)

    Returns:
        tuple:
        - locs (np.ndarray): locations of maximum heatmap responses in shape
            (K, 2) or (N, K, 2)
        - vals (np.ndarray): values of maximum heatmap responses in shape
            (K,) or (N, K)
    """
    N, K, Wx = simcc_x.shape
    simcc_x = simcc_x.reshape(N * K, -1)
    simcc_y = simcc_y.reshape(N * K, -1)

    # get maximum value locations
    x_locs = np.argmax(simcc_x, axis=1)
    y_locs = np.argmax(simcc_y, axis=1)
    locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32)
    max_val_x = np.amax(simcc_x, axis=1)
    max_val_y = np.amax(simcc_y, axis=1)

    # get maximum value across x and y axis
    mask = max_val_x > max_val_y
    max_val_x[mask] = max_val_y[mask]
    vals = max_val_x
    locs[vals <= 0.] = -1

    # reshape
    locs = locs.reshape(N, K, 2)
    vals = vals.reshape(N, K)

    return locs, vals
def decode(simcc_x: np.ndarray, simcc_y: np.ndarray,
           simcc_split_ratio) -> Tuple[np.ndarray, np.ndarray]:
    """Modulate simcc distribution with Gaussian.

    Args:
        simcc_x (np.ndarray[K, Wx]): model predicted simcc in x.
        simcc_y (np.ndarray[K, Wy]): model predicted simcc in y.
        simcc_split_ratio (int): The split ratio of simcc.

    Returns:
        tuple: A tuple containing center and scale.
        - np.ndarray[float32]: keypoints in shape (K, 2) or (n, K, 2)
        - np.ndarray[float32]: scores in shape (K,) or (n, K)
    """
    keypoints, scores = get_simcc_maximum(simcc_x, simcc_y)
    keypoints /= simcc_split_ratio

    return keypoints, scores
def _get_3rd_point(a: np.ndarray, b: np.ndarray) -> np.ndarray:
    """To calculate the affine matrix, three pairs of points are required. This
    function is used to get the 3rd point, given 2D points a & b.

    The 3rd point is defined by rotating vector `a - b` by 90 degrees
    anticlockwise, using b as the rotation center.

    Args:
        a (np.ndarray): The 1st point (x,y) in shape (2, )
        b (np.ndarray): The 2nd point (x,y) in shape (2, )

    Returns:
        np.ndarray: The 3rd point.
    """
    direction = a - b
    c = b + np.r_[-direction[1], direction[0]]
    return c
def get_warp_matrix(center: np.ndarray,
                    scale: np.ndarray,
                    rot: float,
                    output_size: Tuple[int, int],
                    shift: Tuple[float, float] = (0., 0.),
                    inv: bool = False) -> np.ndarray:
    """Calculate the affine transformation matrix that can warp the bbox area
    in the input image to the output size.

    Args:
        center (np.ndarray[2, ]): Center of the bounding box (x, y).
        scale (np.ndarray[2, ]): Scale of the bounding box
            wrt [width, height].
        rot (float): Rotation angle (degree).
        output_size (np.ndarray[2, ] | list(2,)): Size of the
            destination heatmaps.
        shift (0-100%): Shift translation ratio wrt the width/height.
            Default (0., 0.).
        inv (bool): Option to inverse the affine transform direction.
            (inv=False: src->dst or inv=True: dst->src)

    Returns:
        np.ndarray: A 2x3 transformation matrix
    """
    shift = np.array(shift)
    src_w = scale[0]
    dst_w = output_size[0]
    dst_h = output_size[1]

    # compute transformation matrix
    rot_rad = np.deg2rad(rot)
    src_dir = _rotate_point(np.array([0., src_w * -0.5]), rot_rad)
    dst_dir = np.array([0., dst_w * -0.5])

    # get four corners of the src rectangle in the original image
    src = np.zeros((3, 2), dtype=np.float32)
    src[0, :] = center + scale * shift
    src[1, :] = center + src_dir + scale * shift
    src[2, :] = _get_3rd_point(src[0, :], src[1, :])

    # get four corners of the dst rectangle in the input image
    dst = np.zeros((3, 2), dtype=np.float32)
    dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
    dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
    dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :])

    if inv:
        warp_mat = cv2.getAffineTransform(np.float32(dst), np.float32(src))
    else:
        warp_mat = cv2.getAffineTransform(np.float32(src), np.float32(dst))

    return warp_mat
def _fix_aspect_ratio(bbox_scale: np.ndarray,
                      aspect_ratio: float) -> np.ndarray:
    """Extend the scale to match the given aspect ratio.

    Args:
        scale (np.ndarray): The image scale (w, h) in shape (2, )
        aspect_ratio (float): The ratio of ``w/h``

    Returns:
        np.ndarray: The reshaped image scale in (2, )
    """
    w, h = np.hsplit(bbox_scale, [1])
    bbox_scale = np.where(w > h * aspect_ratio,
                          np.hstack([w, w / aspect_ratio]),
                          np.hstack([h * aspect_ratio, h]))
    return bbox_scale


def _rotate_point(pt: np.ndarray, angle_rad: float) -> np.ndarray:
    """Rotate a point by an angle.

    Args:
        pt (np.ndarray): 2D point coordinates (x, y) in shape (2, )
        angle_rad (float): rotation angle in radian

    Returns:
        np.ndarray: Rotated point in shape (2, )
    """
    sn, cs = np.sin(angle_rad), np.cos(angle_rad)
    rot_mat = np.array([[cs, -sn], [sn, cs]])
    return rot_mat @ pt
def bbox_xyxy2cs(bbox: np.ndarray,
                 padding: float = 1.) -> Tuple[np.ndarray, np.ndarray]:
    """Transform the bbox format from (x,y,w,h) into (center, scale)

    Args:
        bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted
            as (left, top, right, bottom)
        padding (float): BBox padding factor that will be multilied to scale.
            Default: 1.0

    Returns:
        tuple: A tuple containing center and scale.
        - np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or
            (n, 2)
        - np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or
            (n, 2)
    """
    # convert single bbox from (4, ) to (1, 4)
    dim = bbox.ndim
    if dim == 1:
        bbox = bbox[None, :]

    # get bbox center and scale
    x1, y1, x2, y2 = np.hsplit(bbox, [1, 2, 3])
    center = np.hstack([x1 + x2, y1 + y2]) * 0.5
    scale = np.hstack([x2 - x1, y2 - y1]) * padding

    if dim == 1:
        center = center[0]
        scale = scale[0]

    return center, scale
def top_down_affine(input_size: dict, bbox_scale: dict, bbox_center: dict,
                    img: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
    """Get the bbox image as the model input by affine transform.

    Args:
        input_size (dict): The input size of the model.
        bbox_scale (dict): The bbox scale of the img.
        bbox_center (dict): The bbox center of the img.
        img (np.ndarray): The original image.

    Returns:
        tuple: A tuple containing center and scale.
        - np.ndarray[float32]: img after affine transform.
        - np.ndarray[float32]: bbox scale after affine transform.
    """
    w, h = input_size
    warp_size = (int(w), int(h))

    # reshape bbox to fixed aspect ratio
    bbox_scale = _fix_aspect_ratio(bbox_scale, aspect_ratio=w / h)

    # get the affine matrix
    center = bbox_center
    scale = bbox_scale
    rot = 0
    warp_mat = get_warp_matrix(center, scale, rot, output_size=(w, h))

    # do affine transform
    img = cv2.warpAffine(img, warp_mat, warp_size, flags=cv2.INTER_LINEAR)

    return img, bbox_scale

class PoseTRTInference:
    def __init__(self, engine_path):
        self.cuda_ctx = pycuda.autoinit.context
        self.engine_path = engine_path
        self.engine = self.load_engine()
        self.context = self.engine.create_execution_context()
        self.inputs, self.outputs, self.bindings, self.stream = self.allocate_buffers()

        # names = [self.engine.get_binding_name(i) for i in range(self.engine.num_bindings)]
        names = [self.engine.get_tensor_name(i) for i in range(self.engine.num_bindings)]


        self.num_bindings = self.engine.num_bindings
        num_inputs, num_outputs = 0, 0

        for i in range(self.engine.num_bindings):
            tensor_name = self.engine.get_tensor_name(i)
            if self.engine.get_tensor_mode(tensor_name) == trt.TensorIOMode.INPUT:
                num_inputs += 1
            else:
                num_outputs += 1
        self.inputs_shape = [self.engine.get_tensor_shape(names[i]) for i in range(num_inputs)]
        self.outputs_shape = [self.engine.get_tensor_shape(names[i + num_inputs]) for i in range(num_outputs)]

        self.num_inputs = num_inputs
        self.num_outputs = num_outputs
        self.input_names = names[:num_inputs]
        self.output_names = names[num_inputs:]
        self.input_h, self.input_w = self.inputs_shape[0][2:]
        print('输入输出数量', self.engine.num_bindings, '名称', names)
        print(self.num_inputs, self.num_outputs, self.input_names, self.output_names, self.inputs_shape, self.outputs_shape)

        self.__warm_up()





    def load_engine(self):
        TRT_LOGGER = trt.Logger(trt.Logger.INFO)
        with open(self.engine_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
            return runtime.deserialize_cuda_engine(f.read())

    def __warm_up(self) -> None:
        for _ in range(10):
            input = np.ones([self.input_h, self.input_w, 3], dtype=np.uint8)
            resized_img, center, scale = self.preprocess(input)
            results = self.do_inference(resized_img)
            keypoints, scores = self.postprocess(results, (self.input_h, self.input_w), center, scale)
        print("model warmup")

    def allocate_buffers(self):
        inputs = []
        outputs = []
        bindings = []
        stream = cuda.Stream()
        for binding in self.engine:
            size = trt.volume(self.engine.get_tensor_shape(binding))
            dtype = trt.nptype(self.engine.get_tensor_dtype(binding))
            host_mem = cuda.pagelocked_empty(size, dtype)
            device_mem = cuda.mem_alloc(host_mem.nbytes)
            bindings.append(int(device_mem))
            if self.engine.get_tensor_mode(binding) == trt.TensorIOMode.INPUT:
                inputs.append({'host': host_mem, 'device': device_mem, 'dtype': dtype})
            else:
                outputs.append({'host': host_mem, 'device': device_mem})
        return inputs, outputs, bindings, stream

    def do_inference(self, input_image):
        np.copyto(self.inputs[0]['host'], input_image.ravel())
        [cuda.memcpy_htod_async(inp['device'], inp['host'], self.stream) for inp in self.inputs]
        self.context.execute_async_v2(bindings=self.bindings, stream_handle=self.stream.handle)
        [cuda.memcpy_dtoh_async(out['host'], out['device'], self.stream) for out in self.outputs]
        self.stream.synchronize()

        # 根据 self.outputs_shape 自动调整结果形状
        reshaped_outputs = [
            out['host'].reshape(self.outputs_shape[i])
            for i, out in enumerate(self.outputs)
        ]
        reshaped_outputs = (self.outputs[0]['host'].reshape(self.outputs_shape[0]),
                            self.outputs[1]['host'].reshape(self.outputs_shape[1]))
        return reshaped_outputs

    def preprocess(self,
            img: np.ndarray, bboxes : list = None ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
        """Do preprocessing for RTMPose model inference.

        Args:
            img (np.ndarray): Input image in shape.
            bbox (list):  xyxy-format bounding box of target.

        Returns:
            tuple:
            - resized_img (np.ndarray): Preprocessed image.
            - center (np.ndarray): Center of image.
            - scale (np.ndarray): Scale of image.
        """
        # get shape of image
        if bboxes is None:
            # HWC
            img_shape = img.shape[:2]
            bbox = np.array([0, 0, img_shape[1], img_shape[0]])
        else:
            bbox = np.array(bboxes)

        # get center and scale
        center, scale = bbox_xyxy2cs(bbox, padding=1.25)

        # do affine transformation
        resized_img, scale = top_down_affine((self.input_w, self.input_h), scale, center, img)

        # normalize image
        mean = np.array([123.675, 116.28, 103.53])
        std = np.array([58.395, 57.12, 57.375])
        resized_img = (resized_img - mean) / std
        # resized_img = np.array(resized_img, dtype=np.float32)

        # HWC to CHW format:
        resized_img = np.transpose(resized_img, [2, 0, 1])
        # CHW to NCHW format
        resized_img = np.expand_dims(resized_img, axis=0)
        # Convert the image to row-major order, also known as "C order":
        resized_img = np.ascontiguousarray(resized_img)
        return resized_img, center, scale


    def postprocess(self, outputs: List[np.ndarray],
                    model_input_size: Tuple[int, int],
                    center: Tuple[int, int],
                    scale: Tuple[int, int],
                    simcc_split_ratio: float = 2.0
                    ) -> Tuple[np.ndarray, np.ndarray]:
        """Postprocess for RTMPose model output.

        Args:
            outputs (np.ndarray): Output of RTMPose model.
            model_input_size (tuple): RTMPose model Input image size.
            center (tuple): Center of bbox in shape (x, y).
            scale (tuple): Scale of bbox in shape (w, h).
            simcc_split_ratio (float): Split ratio of simcc.

        Returns:
            tuple:
            - keypoints (np.ndarray): Rescaled keypoints.
            - scores (np.ndarray): Model predict scores.
        """
        # use simcc to decode
        simcc_x, simcc_y = outputs
        keypoints, scores = decode(simcc_x, simcc_y, simcc_split_ratio)

        # rescale keypoints
        keypoints = keypoints / model_input_size * scale + center - scale / 2

        return keypoints, scores

    def infer(self, raw_bgr_image: np.ndarray, bboxes: list = None):
        self.cuda_ctx.push()
        resized_img, center, scale = self.preprocess(raw_bgr_image, bboxes)
        results = self.do_inference(resized_img)
        keypoints, scores = self.postprocess(results, (self.input_w, self.input_h), center, scale)
        self.cuda_ctx.pop()
        return keypoints, scores

    def pose_tracker(self):
        pass
class PoseTracker(PoseTRTInference):
    MIN_AREA = 1000
    def __init__(self,
                 engine_path,
                 det_frequency: int = 1,
                 tracking: bool = True,
                 tracking_thr: float = 0.3):
        super(PoseTracker, self).__init__(engine_path)

        self.pose_model = self.infer
        self.det_frequency = det_frequency  # 检测频率
        self.tracking = tracking  # 是否启用跟踪
        self.tracking_thr = tracking_thr  # 跟踪的置信度阈值
        self.reset()

    def reset(self):
        """Reset pose tracker."""
        self.frame_cnt = 0
        self.next_id = 0
        self.bboxes_last_frame = []
        self.track_ids_last_frame = []
    def track_by_iou(self, bbox):
        """Get track id using IoU tracking greedily.

        Args:
            bbox (list): The bbox info (left, top, right, bottom, score).
            next_id (int): The next track id.

        Returns:
            track_id (int): The track id.
            match_result (list): The matched bbox.
            next_id (int): The updated next track id.
        """

        area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])

        max_iou_score = -1
        max_index = -1
        match_result = None
        for index, each_bbox in enumerate(self.bboxes_last_frame):

            iou_score = compute_iou(bbox, each_bbox)
            if iou_score > max_iou_score:
                max_iou_score = iou_score
                max_index = index

        if max_iou_score > self.tracking_thr:
            # if the bbox has a match and the IoU is larger than threshold
            track_id = self.track_ids_last_frame.pop(max_index)
            match_result = self.bboxes_last_frame.pop(max_index)

        elif area >= self.MIN_AREA:
            # no match, but the bbox is large enough,
            # assign a new track id
            track_id = self.next_id
            self.next_id += 1

        else:
            # if the bbox is too small, ignore it
            track_id = -1

        return track_id, match_result

    def __call__(self, image: np.ndarray, bboxes: list = None):

        if self.frame_cnt % self.det_frequency == 0:
            if bboxes is None:
                keypoints, scores = self.pose_model(image)
                for kpts in keypoints:
                    bboxes = pose_to_bbox(kpts)
        else:
            bboxes = self.bboxes_last_frame

        keypoints, scores = self.pose_model(image, bboxes=bboxes)

        if not self.tracking:
            # without tracking

            bboxes_current_frame = []
            for kpts in keypoints:
                bbox = pose_to_bbox(kpts)
                bboxes_current_frame.append(bbox)
        else:
            # with tracking

            if len(self.track_ids_last_frame) == 0:
                self.next_id = len(self.bboxes_last_frame)
                self.track_ids_last_frame = list(range(self.next_id))

            bboxes_current_frame = []
            track_ids_current_frame = []
            for kpts in keypoints:
                bbox = pose_to_bbox(kpts)

                track_id, _ = self.track_by_iou(bbox)

                if track_id > -1:
                    track_ids_current_frame.append(track_id)
                    bboxes_current_frame.append(bbox)

            self.track_ids_last_frame = track_ids_current_frame

        self.bboxes_last_frame = bboxes_current_frame
        self.frame_cnt += 1

        return keypoints, scores

if __name__ == "__main__":
    TRTInference1 = BaseTRTInference(engine_path='./yolo/yolov10s.engine')
    TRTInference1 = BaseTRTInference(engine_path='./rtmpose/end2end.engine')
