# -*- coding: utf-8 -*-
# @Time    : 2024/9/11 9:51
# @Author  : sjh
# @Site    : 
# @File    : yolov10_inference.py
# @Comment :
from TRT.common import *


CONF_THRESH = 0.5
IOU_THRESHOLD = 0.4
TORCH = False

def timing_decorator(func):
    def wrapper(*args, **kwargs):
        start_time = time.time()
        result = func(*args, **kwargs)
        end_time = time.time()
        print(f"{func.__name__} took {end_time - start_time:.4f} seconds")
        return result
    return wrapper
def keep_highest_conf_per_class_numpy(det):
    if det.shape[0] == 0:
        return det  # 如果没有检测到任何对象，直接返回

    unique_classes = np.unique(det[:, 5])  # 获取所有独特的类标签
    max_conf_indices = []

    # 对每一个类别找到最高置信度的检测框
    for cls in unique_classes:
        cls_mask = det[:, 5] == cls  # 找到所有该类别的检测框
        cls_detections = det[cls_mask]  # 提取该类别的所有检测框

        # 计算每个检测框的面积
        areas = (cls_detections[:, 2] - cls_detections[:, 0]) * (
                cls_detections[:, 3] - cls_detections[:, 1])

        # 合并置信度和面积为一个复合评分，这里用置信度 + 面积的小部分作为评分
        scores = cls_detections[:, 4] * 0.1 + 1.0 * areas

        # 找到评分最高的检测框
        max_score_index = np.argmax(scores)
        max_conf_indices.append(np.where(cls_mask)[0][max_score_index])

    # 选取评分最高的检测框并保持二维
    return det[max_conf_indices]



def non_max_suppression_numpy(
        prediction,
        conf_thres=0.25,
        iou_thres=0.45,
        classes=None,
        max_det=300,
):
    # Checks
    assert 0 <= conf_thres <= 1, f"Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0"
    assert 0 <= iou_thres <= 1, f"Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0"

    if isinstance(prediction, (list, tuple)):  # YOLOv8 model in validation mode, output = (inference_out, loss_out)
        prediction = prediction[0]  # select only inference output

    if classes is not None:
        classes = np.array(classes)  # convert to numpy array

    if prediction.shape[-1] == 6:  # end-to-end model (BNC, i.e. 1,300,6)
        output = [pred[pred[:, 4] > conf_thres][:max_det] for pred in prediction]

        if classes is not None:
            output = [pred[np.isin(pred[:, 5], classes)] for pred in output]  # Filter based on classes
        return output
    return None
def clip_boxes(boxes, shape):
    """
    Takes a list of bounding boxes and a shape (height, width) and clips the bounding boxes to the shape.

    Args:
        boxes (numpy.ndarray): the bounding boxes to clip
        shape (tuple): the shape of the image (height, width)

    Returns:
        numpy.ndarray: Clipped boxes
    """
    boxes[..., [0, 2]] = np.clip(boxes[..., [0, 2]], 0, shape[1])  # clip x1, x2 to image width
    boxes[..., [1, 3]] = np.clip(boxes[..., [1, 3]], 0, shape[0])  # clip y1, y2 to image height
    return boxes
def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None, padding=True, xywh=False):
    """
    Rescales bounding boxes (in the format of xyxy by default) from the shape of the image they were originally
    specified in (img1_shape) to the shape of a different image (img0_shape).

    Args:
        img1_shape (tuple): The shape of the image that the bounding boxes are for, in the format of (height, width).
        boxes (numpy.ndarray): the bounding boxes of the objects in the image, in the format of (x1, y1, x2, y2)
        img0_shape (tuple): the shape of the target image, in the format of (height, width).
        ratio_pad (tuple): a tuple of (ratio, pad) for scaling the boxes. If not provided, the ratio and pad will be
            calculated based on the size difference between the two images.
        padding (bool): If True, assuming the boxes are based on image augmented by yolo style. If False, do regular
            rescaling.
        xywh (bool): The box format is xywh or not, default=False.

    Returns:
        numpy.ndarray: The scaled bounding boxes, in the format of (x1, y1, x2, y2)
    """
    if ratio_pad is None:  # calculate from img0_shape
        gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1])  # gain  = old / new
        pad = (
            round((img1_shape[1] - img0_shape[1] * gain) / 2 - 0.1),
            round((img1_shape[0] - img0_shape[0] * gain) / 2 - 0.1),
        )  # wh padding
    else:
        gain = ratio_pad[0][0]
        pad = ratio_pad[1]

    if padding:
        boxes[..., 0] -= pad[0]  # x padding
        boxes[..., 1] -= pad[1]  # y padding
        if not xywh:
            boxes[..., 2] -= pad[0]  # x padding
            boxes[..., 3] -= pad[1]  # y padding
    boxes[..., :4] /= gain  # Rescale to original image
    return clip_boxes(boxes, img0_shape)
class TRTInference:
    def __init__(self, engine_path):
        self.engine_path = engine_path
        self.engine = self.load_engine()
        self.context = self.engine.create_execution_context()
        self.inputs, self.outputs, self.bindings, self.stream = self.allocate_buffers()

        # names = [self.engine.get_binding_name(i) for i in range(self.engine.num_bindings)]
        names = [self.engine.get_tensor_name(i) for i in range(self.engine.num_bindings)]


        self.num_bindings = self.engine.num_bindings
        num_inputs, num_outputs = 0, 0

        for i in range(self.engine.num_bindings):
            tensor_name = self.engine.get_tensor_name(i)
            if self.engine.get_tensor_mode(tensor_name) == trt.TensorIOMode.INPUT:
                num_inputs += 1
            else:
                num_outputs += 1
        self.inputs_shape = [self.engine.get_tensor_shape(names[i]) for i in range(num_inputs)]
        self.outputs_shape = [self.engine.get_tensor_shape(names[i + num_inputs]) for i in range(num_outputs)]

        self.num_inputs = num_inputs
        self.num_outputs = num_outputs
        self.input_names = names[:num_inputs]
        self.output_names = names[num_inputs:]
        self.input_h, self.input_w = self.inputs_shape[0][2:]
        print('输入输出数量', self.engine.num_bindings, '名称', names)
        print(self.num_inputs, self.num_outputs, self.input_names, self.output_names, self.inputs_shape, self.outputs_shape)
        self.cuda_ctx = pycuda.autoinit.context
        self.__warm_up()
    def load_engine(self):
        TRT_LOGGER = trt.Logger(trt.Logger.INFO)
        with open(self.engine_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
            return runtime.deserialize_cuda_engine(f.read())

    def __warm_up(self) -> None:
        print("model warmup")
        for _ in range(10):
            input = np.ones([self.input_h, self.input_w, 3], dtype=np.uint8)
            input_image, raw_bgr_image, origin_h, origin_w = self.preprocess_image(input)
            self.do_inference(input_image)

    def allocate_buffers(self):
        inputs = []
        outputs = []
        bindings = []
        stream = cuda.Stream()
        for binding in self.engine:
            size = trt.volume(self.engine.get_tensor_shape(binding))
            dtype = trt.nptype(self.engine.get_tensor_dtype(binding))
            host_mem = cuda.pagelocked_empty(size, dtype)
            device_mem = cuda.mem_alloc(host_mem.nbytes)
            bindings.append(int(device_mem))
            if self.engine.get_tensor_mode(binding) == trt.TensorIOMode.INPUT:
                inputs.append({'host': host_mem, 'device': device_mem, 'dtype': dtype})
            else:
                outputs.append({'host': host_mem, 'device': device_mem})
        return inputs, outputs, bindings, stream

    def do_inference(self, input_image):
        self.cuda_ctx.push()
        np.copyto(self.inputs[0]['host'], input_image.ravel())
        [cuda.memcpy_htod_async(inp['device'], inp['host'], self.stream) for inp in self.inputs]
        self.context.execute_async_v2(bindings=self.bindings, stream_handle=self.stream.handle)
        [cuda.memcpy_dtoh_async(out['host'], out['device'], self.stream) for out in self.outputs]
        self.stream.synchronize()

        # 根据 self.outputs_shape 自动调整结果形状
        reshaped_outputs = [
            out['host'].reshape(self.outputs_shape[i])
            for i, out in enumerate(self.outputs)
        ]
        if self.cuda_ctx:
            self.cuda_ctx.pop()
        return reshaped_outputs

    def preprocess_image(self, raw_bgr_image):
        """
            description: Convert BGR image to RGB,
                         resize and pad it to target size, normalize to [0,1],
                         transform to NCHW format.
            param:
                input_image_path: str, image path
            return:
                image:  the processed image
                image_raw: the original image
                h: original height
                w: original width
        """
        # 读取图像并转换为RGB格式
        image_raw = np.array(raw_bgr_image)
        h, w, c = image_raw.shape
        image = cv2.cvtColor(image_raw, cv2.COLOR_BGR2RGB)
        # Calculate widht and height and paddings
        r_w = self.input_w / w
        r_h = self.input_h / h
        if r_h > r_w:
            tw = self.input_w
            th = int(r_w * h)
            tx1 = tx2 = 0
            ty1 = int((self.input_h - th) / 2)
            ty2 = self.input_h - th - ty1
        else:
            tw = int(r_h * w)
            th = self.input_h
            tx1 = int((self.input_w - tw) / 2)
            tx2 = self.input_w - tw - tx1
            ty1 = ty2 = 0
        # Resize the image with long side while maintaining ratio
        image = cv2.resize(image, (tw, th))
        # Pad the short side with (128,128,128)
        image = cv2.copyMakeBorder(
            image, ty1, ty2, tx1, tx2, cv2.BORDER_CONSTANT, None, (128, 128, 128)
        )
        # 这样加快速度
        # image = np.ascontiguousarray(image)
        if self.inputs[0]['dtype'] == np.float16:
            image = np.asarray(image, np.float32)
            # Normalize to [0,1]
            image /= 255.0
            image = np.asarray(image, np.float16)
        else:
            image = np.asarray(image, np.float32)
            image /= 255.0
        # HWC to CHW format:
        image = np.transpose(image, [2, 0, 1])
        # CHW to NCHW format
        image = np.expand_dims(image, axis=0)
        # Convert the image to row-major order, also known as "C order":
        image = np.ascontiguousarray(image)

        return image, image_raw, h, w

    @timing_decorator
    def infer(self, raw_bgr_image):
        input_image, raw_bgr_image, origin_h, origin_w = self.preprocess_image(raw_bgr_image)
        results = self.do_inference(input_image)
        # results (1, 300, 6)
        outputs = results[:2][0]
        det = non_max_suppression_numpy(outputs, CONF_THRESH, IOU_THRESHOLD, classes=0, max_det=300)[0]  # 将检测结果映射回原图尺寸
        det[:, :4] = scale_boxes(input_image.shape[2:], det[:, :4], (origin_h, origin_w))
        det = keep_highest_conf_per_class_numpy(det)
        if len(det):
            for *box, conf, class_id in det:
                x_min, y_min, x_max, y_max = map(int, box)

                # 绘制边界框
                cv2.rectangle(raw_bgr_image, (x_min, y_min), (x_max, y_max), (0, 255, 0), 2)  # 绿色边框

                # 显示置信度
                label = f"Conf: {conf:.2f}"
                cv2.putText(raw_bgr_image, label, (x_min, y_min - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2)

            # 使用 plt 显示图像
            plt.imshow(raw_bgr_image)
            plt.show()

    def get_bbox(self, raw_bgr_image):
        input_image, raw_bgr_image, origin_h, origin_w = self.preprocess_image(raw_bgr_image)
        results = self.do_inference(input_image)
        # results (1, 300, 6)
        outputs = results[:2][0]
        det = non_max_suppression_numpy(outputs, CONF_THRESH, IOU_THRESHOLD, classes=0, max_det=300)[0]  # 将检测结果映射回原图尺寸
        det[:, :4] = scale_boxes(input_image.shape[2:], det[:, :4], (origin_h, origin_w))
        det = keep_highest_conf_per_class_numpy(det)
        if len(det):
            for *box, conf, class_id in det:
                return box
        else:
            return None
if __name__ == '__main__':
    # 使用该类进行推理
    TRTPATH = r"yolov10s.engine"

    trt_inference = TRTInference(TRTPATH)
    image_path = r"../../data/2.jpg"
    image_bgr = Image.open(image_path)  # .convert("RGB")
    t1 = time.time()
    trt_inference.infer(image_bgr)
    print(f"Inference time: {time.time() - t1} seconds")
