import json
import ctypes
import os
import shutil
import random
import sys
import threading
import time
from venv import logger

import cv2
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
import torch
import torchvision

CONF_THRESH = 0.5
IOU_THRESHOLD = 0.5

from torch._C import *  # noqa: F403


# ImportError: libcudnn.so.8: cannot open shared object file: No such file or directory


# 设置gstreamer管道参数
def gstreamer_pipeline(
        capture_width=800,  # 摄像头预捕获的图像宽度
        capture_height=600,  # 摄像头预捕获的图像高度
        display_width=800,  # 窗口显示的图像宽度
        display_height=600,  # 窗口显示的图像高度
        framerate=60,  # 捕获帧率
        flip_method=0,  # 是否旋转图像
):
    return (
            "nvarguscamerasrc ! "
            "video/x-raw(memory:NVMM), "
            "width=(int)%d, height=(int)%d, "
            "format=(string)NV12, framerate=(fraction)%d/1 ! "
            "nvvidconv flip-method=%d ! "
            "video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
            "videoconvert ! "
            "video/x-raw, format=(string)BGR ! appsink max-buffers=1 drop=True "
            % (
                capture_width,
                capture_height,
                framerate,
                flip_method,
                display_width,
                display_height,
            )
    )


class Colors:
    # Ultralytics 颜色调色板 https://ultralytics.com/
    def __init__(self):
        # hex = matplotlib.colors.TABLEAU_COLORS.values()
        hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
               '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
        self.palette = [self.hex2rgb('#' + c) for c in hex]
        self.n = len(self.palette)

    def __call__(self, i, bgr=False):
        c = self.palette[int(i) % self.n]
        return (c[2], c[1], c[0]) if bgr else c

    @staticmethod
    def hex2rgb(h):  # rgb顺序 (PIL)
        return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))


colors = Colors()  # 创建颜色实例


def plot_one_box(x, img, color=None, label=None, line_thickness=None):
    print(f"画框坐标: {x}")  # 调试输出
    tl = (
            line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1
    )  # 线条/字体粗细
    c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
    cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
    if label:
        tf = max(tl - 1, 1)  # 字体粗细
        t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
        cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA)  # 填充
        cv2.putText(
            img,
            label,
            (c1[0], c1[1] - 2),
            0,
            tl / 3,
            [225, 255, 255],
            thickness=tf,
            lineType=cv2.LINE_AA,
        )


categories = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
              "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
              "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
              "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
              "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
              "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
              "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard",
              "cell phone",
              "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
              "hair drier", "toothbrush"]


class YoLov5TRT(object):
    """
    description: 一个YOLOv5类，封装了TensorRT操作、预处理和后处理操作。
    """

    def __init__(self, engine_file_path):
        self.input_name = "images"
        self.output_name = "output0"
        # 在此设备上创建一个上下文
        self.ctx = cuda.Device(0).make_context()
        stream = cuda.Stream()
        TRT_LOGGER = trt.Logger(trt.Logger.INFO)
        runtime = trt.Runtime(TRT_LOGGER)

        # 从文件反序列化引擎
        with open(engine_file_path, "rb") as f:
            engine = runtime.deserialize_cuda_engine(f.read())

        context = engine.create_execution_context()

        host_inputs = []
        cuda_inputs = []
        host_outputs = []
        cuda_outputs = []
        bindings = []

        for binding in engine:
            print('bingding:', binding, engine.get_tensor_shape(binding))
            shape = engine.get_tensor_shape(binding)  # 获取张量的完整形状（包含batch维度）
            size = trt.volume(shape)  # 直接计算总元素数量
            dtype = trt.nptype(engine.get_tensor_dtype(binding))
            # 分配主机和设备缓冲区
            host_mem = cuda.pagelocked_empty(size, dtype)
            cuda_mem = cuda.mem_alloc(host_mem.nbytes)
            # 将设备缓冲区附加到设备绑定
            bindings.append(int(cuda_mem))
            # 附加到相应的列表
            if engine.get_tensor_mode(binding) == trt.TensorIOMode.INPUT:
                self.input_w = engine.get_tensor_shape(binding)[-1]
                self.input_h = engine.get_tensor_shape(binding)[-2]
                host_inputs.append(host_mem)
                cuda_inputs.append(cuda_mem)
            else:
                host_outputs.append(host_mem)
                cuda_outputs.append(cuda_mem)

        # 存储
        self.stream = stream
        self.context = context
        self.engine = engine
        self.host_inputs = host_inputs
        self.cuda_inputs = cuda_inputs
        self.host_outputs = host_outputs
        self.cuda_outputs = cuda_outputs
        self.bindings = bindings

        # 获取输入张量名称
        for i in range(engine.num_io_tensors):
            name = engine.get_tensor_name(i)
            if engine.get_tensor_mode(name) == trt.TensorIOMode.INPUT:
                self.input_name = name
            elif engine.get_tensor_mode(name) == trt.TensorIOMode.OUTPUT:
                self.output_name = name

        input_shape = engine.get_tensor_shape(self.input_name)
        self.batch_size = input_shape[0]  # 获取形状中的batch维度
        print(f"从输入形状推断的批大小: {self.batch_size}")

    def infer(self, img):

        threading.Thread.__init__(self)
        # 将self设为活动上下文，并将其推入上下文堆栈的顶部
        self.ctx.push()
        # 恢复
        stream = self.stream
        context = self.context
        engine = self.engine
        host_inputs = self.host_inputs
        cuda_inputs = self.cuda_inputs
        host_outputs = self.host_outputs
        cuda_outputs = self.cuda_outputs
        bindings = self.bindings
        # 进行图像预处理
        batch_origin_h = []
        batch_origin_w = []
        input_image, image_raw, origin_h, origin_w = self.preprocess_image(img)
        batch_origin_h.append(origin_h)
        batch_origin_w.append(origin_w)
        # 将输入图像复制到主机缓冲区
        np.copyto(host_inputs[0], input_image.ravel())
        # 在推理前设置输入形状
        context.set_input_shape(self.input_name, (1, 3, self.input_h, self.input_w))
        start = time.time()
        # 将输入数据传输到GPU
        cuda.memcpy_htod_async(cuda_inputs[0], host_inputs[0], stream)
        # 运行推理
        name = None
        for i in range(engine.num_io_tensors):
            name = engine.get_tensor_name(i)
            is_input = engine.get_tensor_mode(name) == trt.TensorIOMode.INPUT
            print("输入" if is_input else "输出", name)
        input_shape = (1, 3, 640, 640)  # 批大小1，3通道，640x640图像

        input_host = np.ascontiguousarray(img.astype(np.float32))  # 转换为 float32

        # context.set_input_shape(name, input_shape)  # 必要时设置输入 shape
        # 在 infer 函数中，在执行推理前添加：
        context.set_input_shape(self.input_name, (1, 3, self.input_h, self.input_w))
        # cuda.memcpy_htod_async(input_ptr, input_host, stream)
        context.set_tensor_address(self.input_name, int(cuda_inputs[0]))

        context.set_tensor_address(self.output_name, int(cuda_outputs[0]))

        context.execute_async_v3(stream_handle=stream.handle)

        # context.execute_async_v3(batch_size=self.batch_size, bindings=bindings, stream_handle=stream.handle)
        # 从GPU传回预测结果
        cuda.memcpy_dtoh_async(host_outputs[0], cuda_outputs[0], stream)
        # 同步流
        stream.synchronize()
        end = time.time()
        self.ctx.pop()
        trt_outputs = host_outputs[0]
        fake_result = {}
        fake_result["algorithm_data"] = {
            "is_alert": False,
            "target_count": 0,
            "target_info": []
        }

        fake_result["model_data"] = {
            "objects": []
        }

        # 进行后处理
        # 修复后处理部分 - YOLOv5的输出格式是 [batch_size, num_detections, 85]
        output_shape = (1, 25200, 85)  # YOLOv5的标准输出形状
        trt_outputs_reshaped = trt_outputs.reshape(output_shape)

        result_boxes, result_scores, result_classid = self.post_process_v5(
            trt_outputs_reshaped[0], origin_h, origin_w
        )

        for j in range(len(result_boxes)):
            box = result_boxes[j]
            conf = result_scores[j]
            cls = result_classid[j]
            xyxy_list = torch.tensor(box).view(1, 4).view(-1).tolist()
            conf_list = conf.tolist()
            label = categories[int(cls)]
            color = colors(int(cls), True)
            plot_one_box(box, img, color=color, label="{}:{:.2f}".format(label, conf))
            # plot_one_box(box,batch_image_raw[i],color=color,label="{}:{:.2f}".format(label, conf))
            # plot_one_box(box,batch_image_raw[i],label="{}:{:.2f}".format(label, conf))
            fake_result['model_data']['objects'].append({
                "xmin": int(xyxy_list[0]),
                "ymin": int(xyxy_list[1]),
                "xmax": int(xyxy_list[2]),
                "ymax": int(xyxy_list[3]),
                "confidence": conf_list,
                "name": label
            })
            if label == 'person':
                fake_result['algorithm_data']['target_info'].append({
                    "xmin": int(xyxy_list[0]),
                    "ymin": int(xyxy_list[1]),
                    "xmax": int(xyxy_list[2]),
                    "ymax": int(xyxy_list[3]),
                    "confidence": conf_list,
                    "name": label
                })
        fake_result['algorithm_data']['is_alert'] = True if len(
            fake_result['algorithm_data']['target_info']) > 0 else False
        fake_result['algorithm_data']["target_count"] = len(fake_result['algorithm_data']['target_info'])

        # 在infer方法中添加
        print(f"检测到目标数量: {len(result_boxes)}")
        for j in range(len(result_boxes)):
            print(f"目标{j}: 类别={categories[int(result_classid[j])]}, 置信度={result_scores[j]:.2f}")

        return json.dumps(fake_result, indent=4), end - start

    def bbox_iou(self, box1, box2, x1y1x2y2=True):
        """
        description: 计算两个边界框的IoU
        param:
            box1: 一个框坐标 (可以是 (x1, y1, x2, y2) 或 (x, y, w, h))
            box2: 一个框坐标 (可以是 (x1, y1, x2, y2) 或 (x, y, w, h))
            x1y1x2y2: 选择坐标格式
        return:
            iou: 计算出的iou值
        """
        if not x1y1x2y2:
            # 从中心和宽高转换为精确坐标
            b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
            b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
            b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
            b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
        else:
            # 获取边界框的坐标
            b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
            b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]

        # 获取相交矩形的坐标
        inter_rect_x1 = np.maximum(b1_x1, b2_x1)
        inter_rect_y1 = np.maximum(b1_y1, b2_y1)
        inter_rect_x2 = np.minimum(b1_x2, b2_x2)
        inter_rect_y2 = np.minimum(b1_y2, b2_y2)
        # 相交区域面积
        inter_area = np.clip(inter_rect_x2 - inter_rect_x1 + 1, 0, None) * \
                     np.clip(inter_rect_y2 - inter_rect_y1 + 1, 0, None)
        # 合并区域面积
        b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
        b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)

        iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)

        return iou

    def destroy(self):
        # 从上下文堆栈顶部移除任何上下文，使其失效
        self.ctx.pop()

    def non_max_suppression(self, prediction, origin_h, origin_w, conf_thres=0.5, nms_thres=0.4):
        """
        description: 移除置信度分数低于'conf_thres'的检测结果，并执行
        非极大值抑制（NMS）以进一步过滤检测结果
        param:
            prediction: 检测结果, (x1, y1, x2, y2, conf, cls_id)
            origin_h: 原始图像高度
            origin_w: 原始图像宽度
            conf_thres: 用于过滤检测的置信度阈值
            nms_thres: 用于过滤检测的iou阈值
        return:
            boxes: 经过NMS处理后的输出，形状为 (x1, y1, x2, y2, conf, cls_id)
        """
        # 获取分数 > CONF_THRESH 的框
        boxes = prediction[prediction[:, 4] >= conf_thres]
        # 将bbox从[center_x, center_y, w, h]转换为[x1, y1, x2, y2]
        boxes[:, :4] = self.xywh2xyxy(origin_h, origin_w, boxes[:, :4])
        # 裁剪坐标
        boxes[:, 0] = np.clip(boxes[:, 0], 0, origin_w - 1)
        boxes[:, 2] = np.clip(boxes[:, 2], 0, origin_w - 1)
        boxes[:, 1] = np.clip(boxes[:, 1], 0, origin_h - 1)
        boxes[:, 3] = np.clip(boxes[:, 3], 0, origin_h - 1)
        # 对象置信度
        confs = boxes[:, 4]
        # 按置信度排序
        boxes = boxes[np.argsort(-confs)]
        # 执行非极大值抑制
        keep_boxes = []
        while boxes.shape[0]:
            large_overlap = self.bbox_iou(np.expand_dims(boxes[0, :4], 0), boxes[:, :4]) > nms_thres
            label_match = boxes[0, -1] == boxes[:, -1]
            # 置信度较低、IOU较大且标签匹配的框的索引
            invalid = large_overlap & label_match
            keep_boxes += [boxes[0]]
            boxes = boxes[~invalid]
        boxes = np.stack(keep_boxes, 0) if len(keep_boxes) else np.array([])
        return boxes

    def post_process_v5(self, output, origin_h, origin_w):
        """
        description: YOLOv5格式的后处理
        param:
            output: 形状为 [25200, 85] 的numpy数组
            origin_h: 原始图像的高度
            origin_w: 原始图像的宽度
        return:
            result_boxes: 最终的框
            result_scores: 最终的分数
            result_classid: 最终的类别id
        """
        # YOLOv5输出格式: [cx, cy, w, h, conf, cls0, cls1, ..., cls79]
        # 提取置信度和类别分数
        box_conf = output[:, 4:5]  # 目标置信度
        class_conf = output[:, 5:]  # 类别置信度

        # 计算最终置信度
        class_max_conf = np.max(class_conf, axis=1, keepdims=True)
        class_max_id = np.argmax(class_conf, axis=1)
        final_conf = box_conf.flatten() * class_max_conf.flatten()

        # 过滤低置信度检测
        valid_indices = final_conf > CONF_THRESH
        if not np.any(valid_indices):
            return np.array([]), np.array([]), np.array([])

        # 获取有效的检测结果
        valid_boxes = output[valid_indices, :4]  # [cx, cy, w, h]
        valid_conf = final_conf[valid_indices]
        valid_class_id = class_max_id[valid_indices]

        # 将有效的检测结果（cx, cy, w, h）和置信度、类别ID组合起来
        detections = np.column_stack([valid_boxes, valid_conf, valid_class_id])

        # 应用NMS
        final_boxes = self.non_max_suppression(detections, origin_h, origin_w,
                                               conf_thres=CONF_THRESH, nms_thres=IOU_THRESHOLD)

        if len(final_boxes) == 0:
            return np.array([]), np.array([]), np.array([])

        result_boxes = final_boxes[:, :4]
        result_scores = final_boxes[:, 4]
        result_classid = final_boxes[:, 5]

        return result_boxes, result_scores, result_classid

    def xywh2xyxy(self, origin_h, origin_w, x):
        """
        description: 将nx4的box从[x, y, w, h]转换为[x1, y1, x2, y2]，其中xy1是左上角，xy2是右下角
        param:
            origin_h: 原始图像的高度
            origin_w: 原始图像的宽度
            x: 一个boxes numpy数组, 每一行是一个box [center_x, center_y, w, h]
        return:
            y: 一个boxes numpy数组, 每一行是一个box [x1, y1, x2, y2]
        """
        y = np.zeros_like(x)
        r_w = self.input_w / origin_w
        r_h = self.input_h / origin_h
        if r_h > r_w:
            y[:, 0] = x[:, 0] - x[:, 2] / 2
            y[:, 2] = x[:, 0] + x[:, 2] / 2
            y[:, 1] = x[:, 1] - x[:, 3] / 2 - (self.input_h - r_w * origin_h) / 2
            y[:, 3] = x[:, 1] + x[:, 3] / 2 - (self.input_h - r_w * origin_h) / 2
            y /= r_w
        else:
            y[:, 0] = x[:, 0] - x[:, 2] / 2 - (self.input_w - r_h * origin_w) / 2
            y[:, 2] = x[:, 0] + x[:, 2] / 2 - (self.input_w - r_h * origin_w) / 2
            y[:, 1] = x[:, 1] - x[:, 3] / 2
            y[:, 3] = x[:, 1] + x[:, 3] / 2
            y /= r_h

            # 计算缩放比例
            scale = min(r_w, r_h)

            # 计算填充
            pad_w = (self.input_w - scale * origin_w) / 2
            pad_h = (self.input_h - scale * origin_h) / 2

            # 转换中心坐标和宽高到左上角和右下角坐标
            y[:, 0] = (x[:, 0] - pad_w) / scale  # x1 = (center_x - pad_w) / scale
            y[:, 1] = (x[:, 1] - pad_h) / scale  # y1 = (center_y - pad_h) / scale
            y[:, 2] = (x[:, 0] - pad_w) / scale + x[:, 2] / scale  # x2 = x1 + w/scale
            y[:, 3] = (x[:, 1] - pad_h) / scale + x[:, 3] / scale  # y2 = y1 + h/scale

            # 裁剪到图像边界
            y[:, 0] = np.clip(y[:, 0], 0, origin_w - 1)
            y[:, 1] = np.clip(y[:, 1], 0, origin_h - 1)
            y[:, 2] = np.clip(y[:, 2], 0, origin_w - 1)
            y[:, 3] = np.clip(y[:, 3], 0, origin_h - 1)

        return y

    def post_process(self, output, origin_h, origin_w):
        """
        description: 对预测进行后处理
        param:
            output: 一个numpy数组，格式类似[num_boxes,cx,cy,w,h,conf,cls_id, cx,cy,w,h,conf,cls_id, ...]
            origin_h: 原始图像的高度
            origin_w: 原始图像的宽度
        return:
            result_boxes: 最终的框，一个boxes numpy数组，每行是一个框[x1, y1, x2, y2]
            result_scores: 最终的分数，一个numpy数组，每个元素是对应框的分数
            result_classid: 最终的类别id，一个numpy数组，每个元素是对应框的类别id
        """
        # 获取检测到的框的数量
        num = int(output[0])
        # 重塑为一个二维ndarray
        pred = np.reshape(output[1:], (-1, 6))[:num, :]
        boxes = self.non_max_suppression(pred, origin_h, origin_w, conf_thres=CONF_THRESH, nms_thres=IOU_THRESHOLD)
        result_boxes = boxes[:, :4] if len(boxes) else np.array([])
        result_scores = boxes[:, 4] if len(boxes) else np.array([])
        result_classid = boxes[:, 5] if len(boxes) else np.array([])
        return result_boxes, result_scores, result_classid

    def preprocess_image(self, raw_bgr_image):
        """
        description: 将BGR图像转换为RGB，
                     调整大小并填充到目标尺寸，归一化到[0,1]，
                     转换为NCHW格式。
        param:
            input_image_path: 字符串，图像路径
        return:
            image: 处理后的图像
            image_raw: 原始图像
            h: 原始高度
            w: 原始宽度
        """
        image_raw = raw_bgr_image
        h, w, c = image_raw.shape
        image = cv2.cvtColor(image_raw, cv2.COLOR_BGR2RGB)
        # 计算宽度和高度以及填充
        r_w = self.input_w / w
        r_h = self.input_h / h
        if r_h > r_w:
            tw = self.input_w
            th = int(r_w * h)
            tx1 = tx2 = 0
            ty1 = int((self.input_h - th) / 2)
            ty2 = self.input_h - th - ty1
        else:
            tw = int(r_h * w)
            th = self.input_h
            tx1 = int((self.input_w - tw) / 2)
            tx2 = self.input_w - tw - tx1
            ty1 = ty2 = 0
        # 在保持长宽比的同时，将图像的长边调整为目标尺寸
        image = cv2.resize(image, (tw, th))
        # 用(128,128,128)填充短边
        image = cv2.copyMakeBorder(
            image, ty1, ty2, tx1, tx2, cv2.BORDER_CONSTANT, None, (128, 128, 128)
        )
        image = image.astype(np.float32)
        # 归一化到[0,1]
        image /= 255.0
        # HWC转CHW格式:
        image = np.transpose(image, [2, 0, 1])
        # CHW转NCHW格式
        image = np.expand_dims(image, axis=0)
        # 将图像转换为行主序，也称为"C顺序"：
        image = np.ascontiguousarray(image)
        return image, image_raw, h, w


if __name__ == "__main__":
    # 加载自定义插件和引擎
    PLUGIN_LIBRARY = "build/libmyplugins.so"
    engine_file_path = "build/yolov5s.engine"

    if len(sys.argv) > 1:
        engine_file_path = sys.argv[1]
    if len(sys.argv) > 2:
        PLUGIN_LIBRARY = sys.argv[2]

    ctypes.CDLL(PLUGIN_LIBRARY)
    # 一个YoLov5TRT实例
    yolov5_wrapper = YoLov5TRT(engine_file_path)
    # ------------------------------------------------------------------------------------------------------------------------ 推理视频

    # video_path=video_path=gstreamer_pipeline(flip_method=0) #调用摄像头 0

    video_path=0      #调用摄像头 0
    cap=cv2.VideoCapture(video_path)

    # fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')  # 视频编解码器
    # fps = cap.get(cv2.CAP_PROP_FPS)  # 帧数
    # width, height = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))  # 宽高
    # frameAll = cap.get(7)
    # parent, filename = os.path.split(video_path)
    # output_dir=os.getcwd()+"/output/"
    # if os.path.exists(output_dir):
    #     shutil.rmtree(output_dir)
    # os.mkdir(output_dir)
    # filenames = filename.split('.')[0]
    # out = cv2.VideoWriter(output_dir+filenames+"_process.mp4", fourcc, fps, (width, height))
    # idx_frame=0
    try:
        while True:
            # idx_frame+=1
            ret ,img = cap.read()
            if ret ==True:
                fake_result,use_time = yolov5_wrapper.infer(img)

                print(fake_result)
                print(use_time)
                # 在主循环cv2.imshow前后加调试输出
                print("显示前img内存地址:", id(img))
                cv2.imshow("win",img)
                print("显示后img内存地址:", id(img))

                if( cv2.waitKey(1)&0xff ==ord('0')):
                    break
                # out.write(img)
                # if idx_frame==20:
                    # time.sleep(0.5)
                    # quit()
            else:
                break
        cap.release()
        # out.release()
    finally:
        yolov5_wrapper.destroy()
        cv2.destroyAllWindows()


    # -------------------------------------------------------------------------------------------------------------------------------  推理单张图片

    # img_path = "image/bus_gai.jpg"
    # parent, filename = os.path.split(img_path)
    # output_dir = os.getcwd() + "/output/"
    # if os.path.exists(output_dir):
    #     shutil.rmtree(output_dir)
    # os.mkdir(output_dir)
    # img = cv2.imread(img_path)
    # try:
    #     fake_result, use_time = yolov5_wrapper.infer(img)
    #     cv2.imwrite(output_dir + filename, img)
    #     print(fake_result)
    #     print(use_time * 1000)
    #
    # finally:
    #     yolov5_wrapper.destroy()

    # -------------------------------------------------------------------------------------------------------------------------------  推理一个文件夹下的所有图片
    """
    work_dir="samples/"
    files=os.listdir(work_dir)
    output_dir=os.getcwd()+"/output/"
    if os.path.exists(output_dir):
        shutil.rmtree(output_dir)
    os.mkdir(output_dir)
    try:
        for file in files:
            file_path=os.path.join(work_dir,file)
            parent, filename = os.path.split(file_path)        
            img=cv2.imread(file_path)
            fake_result,use_time = yolov5_wrapper.infer(img)
            cv2.imwrite(output_dir+filename,img)
            #print(fake_result)
            print(use_time*1000)
    finally:
        yolov5_wrapper.destroy()

   """
# -------------------------------------------------------------------------------------------------------------------------------------
