import os
import cv2
import numpy as np

import tensorrt as trt
import pycuda.autoinit  # 不导入会报错
import pycuda.driver as cuda


class HostDeviceMem(object):
    def __init__(self, host_mem, device_mem):
        """Within this context, host_mom means the cpu memory and device means the GPU memory
        """
        self.host = host_mem
        self.device = device_mem

    def __str__(self):
        return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)

    def __repr__(self):
        return self.__str__()


class yolo_trt():
    def __init__(self, trt_path, clas_names, point_dim, draw_line, kpt_color, line_color, conf_thres=0.25):
        # 创建一个记录器
        logger = trt.Logger(trt.Logger.WARNING)
        runtime = trt.Runtime(logger)

        # 从文件加载引擎
        with open(trt_path, "rb") as f:
            serialized_engine = f.read()
        # 反序列化引擎
        engine = runtime.deserialize_cuda_engine(serialized_engine)

        # 获取模型基本参数，输入形状...  [1,3,416,416]
        input_name, output_name = engine  # 输入输出名称
        self.batchsize, c, IMG_HEIGHT, IMG_WIDTH = engine.get_tensor_shape(input_name)  # (1, 3, 640, 640)
        self.out_shape = engine.get_tensor_shape(output_name)  # (1, 56, 8400)

        # 得到动态输入的三个形状[形状的最小，常规，最大值]，存放在一个列表中  # [(1, 3, 640, 640), (1, 3, 640, 640), (1, 3, 640, 640)]
        self.input_shape = engine.get_tensor_profile_shape(input_name, 0)
        print("trt_input_shape[形状的最小，常规，最大值]:", self.input_shape)

        # 设置动态轴，只考虑动态batchsize
        self.dynamic_n = False
        if self.batchsize == -1:  # batchsize动态
            self.dynamic_n = True
            # 得到设置时的常规图像batchsize
            self.batchsize = self.input_shape[1][0]

        # 构建 context
        self.context = engine.create_execution_context()  # context.get_binding_shape(0)也可获取输入形状

        # bindings是所有input/outpu buffer的地址
        self.inputs, self.outputs, self.bindings, self.stream = self.allocate_buffers(engine)

        # 设置输入形状为常规形状，开辟相应空间
        self.context.set_input_shape(input_name, self.input_shape[1])

        self.engine = engine

        self.draw_line = draw_line
        self.point_dim = point_dim
        self.kpt_color = kpt_color
        self.line_color = line_color

        self.new_shape = [IMG_HEIGHT, IMG_WIDTH]  # 输入形状 [height, width]
        self.clas_names = clas_names  # 类别名
        self.conf_thres = conf_thres  # 框的置信度阈值
        self.color = (114, 114, 114)  # 等比缩放后填充黑边的颜色
        self.color_list = self.SetColor(len(self.clas_names))

    def detect(self, image):
        img = image.copy()  # 深度复制，防止改变原图片

        # 前处理
        img, pad, scale = self.resize(img)  # 加黑边  (640, 640, 3)
        # BGR-->RGB，hwc-->chw-->nchw
        float_img = self.normalize(img)

        # 模型输入数据
        input_imgs = np.concatenate([float_img], axis=0)
        # input_imgs = np.concatenate([img_np_nchw, img_np_nchw], axis=0)

        # 动态batchsize
        if self.dynamic_n:
            # 判断输入的batchsize是否在打包时set_shape的范围内
            if self.input_shape[0][0] <= input_imgs.shape[0] <= self.input_shape[2][0]:
                # 如果输入的batchsize变大，需要重新分配更大的空间
                if self.batchsize < input_imgs.shape[0]:
                    # 重新设置动态输入形状，决定计算时的计算量
                    self.context.set_binding_shape(0, input_imgs.shape)
                    self.batchsize = input_imgs.shape[0]
                    # bindings是所有input/outpu buffer的地址
                    self.inputs, self.outputs, self.bindings, self.stream = self.allocate_buffers(self.engine)
            else:
                # 动态模型，不在设置batchsize内
                print("Error：输入的batchsize在模型范围外，请重新检查....")
                exit()
        else:
            if input_imgs.shape[0] != self.batchsize:
                # 静态模型，batchsize与设置不相同
                print("Error：输入的batchsize与模型不匹配，请重新检查....")
                exit()

        # 将数据加载到缓冲区
        self.inputs[0].host = input_imgs.reshape(-1)
        # print(len(self.inputs[0].host))  # 1228800

        trt_outputs = self.inference_async_v2(self.context, bindings=self.bindings, inputs=self.inputs,
                                              outputs=self.outputs, stream=self.stream)  # numpy data

        # trt_outputs = self.inference_async(self.context, bindings=self.bindings, inputs=self.inputs,
        #                                       outputs=self.outputs, stream=self.stream, batch_size=1)  # numpy data

        # 检测图像640*640
        # 取第一个输出 (1, 56, 8400)  [n, (cx,cy,w,h,类别置信度,keypoints), (20*20+40*40+80*80)]
        # 20是640的图32倍下采样得到的图中的格子数
        # 输出的大小由allocate_buffers开辟的空间大小决定的
        # print("trt_output_size", trt_outputs[0].shape)
        out_shape = self.out_shape
        out_shape[0] = self.batchsize
        # 取第一个输出,切片取到和输入一样大小，形状改为输出形状
        preds = trt_outputs[0][0:trt.volume(out_shape)].reshape(out_shape)  # [n, classes, h, w]
        # print("preds_out_shape:", preds.shape)  # (1, 56, 8400)  cx,cy,w,h,clas,point*3=17*3=51

        # 结果换轴
        # print(preds.shape)  # (1, 56, 8400) 
        preds = preds.transpose(0, 2, 1)
        # print(pred.shape)  # (1, 8400, 56)

        # 后处理
        # 输出为一个list大小为batch  classes为需要检测的类别，None为全检测 (输入[0, 1])
        preds = self.non_max_suppression(preds, conf_thres=self.conf_thres, iou_thres=0.45, classes=None)
        pred = preds[0]  # 取batch的第一个结果
        # print(pred.shape)  # (1, 57)  cx,cy,w,h,conf,clas,point*3=17*3=51

        # [{'clas_id':0,'conf':0.5,'box':[x1, y1, w, h],'keypoints':npArray(17,3)},{...}]
        result = self.pred2result(pred, pad, scale, image.shape)

        # 将结果画在图中
        draw_img = self.draw_img(image, result)

        return result, draw_img

    def inference_async(self, context, bindings, inputs, outputs, stream, batch_size=1):
        # 将输入数据传输到 GPU
        [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
        # 运行推理
        context.execute_async(batch_size=batch_size, bindings=bindings, stream_handle=stream.handle)
        # 将预测从 GPU 传回
        [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
        # 进行同步流
        stream.synchronize()
        # 仅返回主机输出
        return [out.host for out in outputs]

    def inference_async_v2(self, context, bindings, inputs, outputs, stream):
        # 将输入数据传输到 GPU
        [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
        # 运行推理
        context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
        # 从 GPU 传回预测
        [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
        # 同步流
        stream.synchronize()
        # 仅返回主机输出
        return [out.host for out in outputs]

    # 分配缓存区
    def allocate_buffers(self, engine):
        inputs = []
        outputs = []
        bindings = []
        # 创建一个CUDA流
        stream = cuda.Stream()
        # binding是input和output
        for binding in engine:
            # 动态轴engine.get_binding_shape(binding)得到的形状batchsize是-1
            dims = engine.get_tensor_shape(binding)
            if dims[0] == -1:  # 动态batchsize,修改形状为最大n
                dims[0] = self.batchsize
                # dims[0] = 1
            # 第一次分配常规形状的空间，每次输入时超过常规形状重新分配更大空间，没超过就用之前开辟的空间
            size = trt.volume(dims)
            dtype = trt.nptype(engine.get_tensor_dtype(binding))
            # 分配主机内存缓冲区
            host_mem = cuda.pagelocked_empty(size, dtype)
            # 分配设备内存缓冲区
            device_mem = cuda.mem_alloc(host_mem.nbytes)
            # 将设备缓冲区追加到设备绑定
            bindings.append(int(device_mem))
            # 附加到相应的列表
            # print(engine.get_tensor_mode(binding))
            if engine.binding_is_input(binding):
                inputs.append(HostDeviceMem(host_mem, device_mem))
            else:
                outputs.append(HostDeviceMem(host_mem, device_mem))
        return inputs, outputs, bindings, stream

    def resize(self, image):
        # print(self.new_shape)  # [640, 640]  onnx的输入形状
        shape = image.shape[:2]  # 图像大小 [height, width]  输入图像的形状

        scale = min(self.new_shape[0] / shape[0], self.new_shape[1] / shape[1])
        img_new = cv2.resize(image, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)  # (640, 640, 3)

        dh, dw = self.new_shape[0] - img_new.shape[0], self.new_shape[1] - img_new.shape[1]  # 还需要填充的像素

        left, top = dw // 2, dh // 2  # 分别计算到上下左右需要填充的像素
        right, bottom = dw - left, dh - top

        img = cv2.copyMakeBorder(img_new, top, bottom, left, right, cv2.BORDER_CONSTANT, value=self.color)  # 填充黑边

        return img, [left, top], scale  # (576, 1024, 3)  填充后的图像 上左需要填充的像素大小

    def normalize(self, resize_img):
        img = resize_img[:, :, ::-1].transpose(2, 0, 1).astype(np.float32)  # (3, 640, 640)

        img /= 255.0
        if img.ndim == 3:
            img = img[None]  # 加batchsize  # (1, 3, 640, 640)

        return img

    def pred2result(self, pred, pad, scale, img_shape):
        # result_lists = []
        # for i, det in enumerate(pred):  # 循环每张图片的标签
        det = pred
        result_list = []
        if len(det):  # 如果图像有建议框  需要把建议框计算到缩放前的原图上
            # 形状，[几个框，一个框几个点，一个点几个坐标]  (1, 17, 3)
            pred_kpts = det[:, 6:].reshape(det.shape[0], -1, self.point_dim)
            boxes, keypoints = self.scale_coords(det[:, :4], pred_kpts, pad, scale, img_shape)
            boxes = boxes.astype(np.int32)
            conf_clas = det[:, 4:6]
            for i in range(det.shape[0]):  # 遍历出每个框
                result_dict = {}
                conf = conf_clas[i][0]
                cls = int(conf_clas[i][1])
                x1, y1 = boxes[i][0:2]
                w, h = boxes[i][2] - boxes[i][0], boxes[i][3] - boxes[i][1]
                keypoint = keypoints[i]

                result_dict["cls_id"] = cls
                result_dict["conf"] = round(conf, 3)
                result_dict["box"] = [x1, y1, w, h]
                result_dict["keypoints"] = keypoint.tolist()
                result_list.append(result_dict)

        return result_list

    def draw_img(self, img, result_box):
        # 循环多个类的轮廓
        for box_dict in result_box:
            clas_id = box_dict["cls_id"]
            label = self.clas_names[clas_id]
            conf = round(box_dict["conf"], 3)
            box = box_dict["box"]
            keypoints = box_dict["keypoints"]

            x, y, w, h = box

            cv2.rectangle(img, (x, y), (x + w, y + h), color=self.color_list[clas_id], thickness=2)

            # 写类别
            # cv2.putText(img, label + f"_{area}", (x, y - 7), cv2.FONT_HERSHEY_SIMPLEX, 1, self.color_list[clas_id],3)
            cv2.putText(img, label + f"_{str(conf)}", (x, y - 7), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                        self.color_list[clas_id],
                        1)

            # 画关键点
            for i, keypoint in enumerate(keypoints):
                x, y = keypoint[0:2]
                if self.point_dim == 3:
                    conf = keypoint[2]
                    if conf < 0.5:
                        continue
                radius = 5
                cv2.circle(img, (int(x), int(y)), radius, self.kpt_color[i], -1, lineType=cv2.LINE_AA)

            # 画线
            for j, point_index in enumerate(self.draw_line):
                pos1 = keypoints[point_index[0]]
                pos2 = keypoints[point_index[1]]

                x1, y1 = pos1[0:2]
                x2, y2 = pos2[0:2]

                if self.point_dim == 3:
                    conf1 = pos1[2]
                    conf2 = pos2[2]
                    if conf1 < 0.5 or conf2 < 0.5:
                        continue
                cv2.line(img, (int(x1), int(y1)), (int(x2), int(y2)), self.line_color[j], thickness=2,
                         lineType=cv2.LINE_AA)

        return img

    def SetColor(self, num_classes):
        arr_b = [0, 64, 255, 192, 128]
        arr_g = [255, 64, 128, 192, 0]
        arr_r = [128, 192, 0, 64, 255]
        color_list = []
        for a in arr_b:
            for b in arr_g:
                for c in arr_r:
                    color_list.append([a, b, c])
        return color_list

    def nms(self, dets, iou_thresh):  # dets [[xmin,ymin,xmax,ymax,scores]....]  iou阈值
        boxes_area = (dets[:, 2] - dets[:, 0]) * (dets[:, 3] - dets[:, 1])  # 所有框的面积
        index = (-dets[:, -1]).argsort()  # 根据置信度从大到小排序后框的索引
        keep = []  # 保留nms后的框的索引

        def iou(box, boxes, box_area, boxes_area):  # 一个框，多个框，一个框的面积，多个框的面积(防止重复计算)
            xx1 = np.maximum(box[0], boxes[:, 0])  # numpy广播
            yy1 = np.maximum(box[1], boxes[:, 1])
            xx2 = np.minimum(box[2], boxes[:, 2])
            yy2 = np.minimum(box[3], boxes[:, 3])

            w = np.maximum(0, xx2 - xx1)
            h = np.maximum(0, yy2 - yy1)

            inter = w * h  # 交集
            ovr = inter / (box_area + boxes_area - inter)  # 交并比

            return ovr

        while index.size > 0:  # 有框时
            i = index[0]  # 置信度最大的框的索引
            keep.append(i)  # 保留
            idx = np.where(iou(dets[index[0]], dets[index[1:]],
                               boxes_area[index[0]], boxes_area[index[1:]]) <= iou_thresh)[0]  # 交集小于阈值的所有索引
            index = index[idx + 1]

        keep = np.array(keep)

        return keep

    # prediction: (1, 8400, 56)  cx,cy,w,h,clas,keypoints(17*3)
    def non_max_suppression(self, prediction, conf_thres=0.25, iou_thres=0.45, classes=None):

        min_wh, max_wh = 2, 4096  # 为了把不同类的框分的更开，这样就可以直接用普通nms直接做

        out_size = prediction.shape[2] - len(self.clas_names) + 2  # 2是输出类别和输出类别置信度
        output = [np.zeros((0, out_size))] * prediction.shape[0]  # list大小为batch

        for xi, x in enumerate(prediction):  # image index, image inference  遍历batchsize
            if not x.shape[0]:  # 没有候选框就跳过
                continue

            box = self.xywh2xyxy(x[:, :4])  # cx,cy,w,h ---> x1,y1,x2,y2  (8400, 4)
            # print(box.shape)

            conf = np.expand_dims(x[:, 4:4 + len(self.clas_names)].max(1), 1)  # 类别置信度的最大值  np.expand_dims扩展维度
            j = np.expand_dims(x[:, 4:4 + len(self.clas_names)].argmax(1), 1)  # 类别置信度最大值的索引  即类别索引

            # 关键点
            keypoints = x[:, 4 + len(self.clas_names):]

            # [x1,y1,x2,y2  置信度  类别索引  关键点]  # (10, 57)
            x = np.concatenate((box, conf, j.astype(conf.dtype), keypoints), 1)[conf.reshape(-1) > conf_thres]

            if classes is not None:
                x = x[(x[:, 5:6] == np.array(classes)).any(1)]  # 把需要的类检索出来  any符合任一条件返回True

            if not x.shape[0]:  # 没有候选框就跳过
                continue

            c = x[:, 5:6] * max_wh  # 类别 * 一个很大的值，加到坐标上，可以把不同类的框分的更开，直接就可以用普通nms
            boxes, scores = x[:, :4] + c, x[:, 4:5]  # 坐标 置信度
            dets = np.concatenate((boxes, scores), 1)  # (71, 5)

            i = self.nms(dets, iou_thres)  # 最终符合条件的索引  (5,)

            output[xi] = x[i]

        return output

    def xywh2xyxy(self, x):
        # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
        y = np.copy(x)
        y[:, 0] = x[:, 0] - x[:, 2] / 2  # top left x
        y[:, 1] = x[:, 1] - x[:, 3] / 2  # top left y
        y[:, 2] = x[:, 0] + x[:, 2] / 2  # bottom right x
        y[:, 3] = x[:, 1] + x[:, 3] / 2  # bottom right y
        return y

    def scale_coords(self, boxes, keypoints, pad, scale, image_shape):  # pad: [top, left]
        boxes[:, [0, 2]] -= pad[0]
        boxes[:, [1, 3]] -= pad[1]

        keypoints[..., 0] -= pad[0]
        keypoints[..., 1] -= pad[1]

        boxes[:, :4] /= scale

        keypoints[..., :2] /= scale

        def clip_coords(boxes, keypoints, img_shape):  # 把坐标限制在图像范围内
            # Clip bounding xyxy bounding boxes to image shape (height, width)
            boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, img_shape[1])  # x1, x2
            boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, img_shape[0])  # y1, y2

            keypoints[..., 0] = keypoints[..., 0].clip(0, img_shape[1])  # x
            keypoints[..., 1] = keypoints[..., 1].clip(0, img_shape[0])  # y

        clip_coords(boxes, keypoints, image_shape)

        return boxes, keypoints


def detect_person(trt_path):
    clas_names = ["person"]
    # 一个关键点几个值(2个[x,y],3个[x,y,v]，v是是否可见)
    point_dim = 3
    # 关键点连线
    draw_line = [[0, 1], [0, 2], [1, 3], [2, 4], [3, 5], [4, 6],
                 [5, 7], [6, 8], [7, 9], [8, 10], [5, 6],
                 [5, 11], [6, 12], [11, 12],
                 [11, 13], [12, 14], [13, 15], [14, 16]]

    color_temp = [[255, 128, 0], [255, 51, 255], [51, 153, 255], [0, 255, 0]]

    # 点的颜色
    kpt_index = [3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2]
    kpt_color = [color_temp[i] for i in kpt_index]

    # 线的颜色
    line_index = [3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
    line_color = [color_temp[i] for i in line_index]

    yolo_detect = yolo_trt(trt_path, clas_names, point_dim, draw_line, kpt_color, line_color, conf_thres=0.30)

    return yolo_detect


if __name__ == '__main__':
    import time
    import matplotlib.pyplot as plt
    import yaml

    trt_path = r"./weights/yolov8s-pose.engine"
    images_path = r"data\coco128-pose\images\train2017"

    # label_path = r"../coco_labels.yaml"
    #
    # f = open(label_path, "r")
    # data = f.read()
    # f.close()
    # clas_names = list(yaml.safe_load(data)["names"].values())

    yolo_detect = detect_person(trt_path)

    for img_name in os.listdir(images_path):
        if img_name.split(".")[-1] != "jpg":
            continue

        img_path = os.path.join(images_path, img_name)

        img = cv2.imdecode(np.fromfile(img_path, dtype=np.uint8), 1)  # 用三通道打开
        # print(img.shape)

        t0 = time.time()

        # for i in range(100):
        # [{'clas_id': 0, 'conf': 0.5, 'box': [x1, y1, w, h], 'keypoints': npArray(17, 3)}, {...}]
        result, out_img = yolo_detect.detect(img.copy())

        t1 = time.time()
        print("time: ", (t1 - t0))  # 0.45675015449523926

        print(result)
        print("==============")
        # if not result:
        plt.subplot(1, 2, 1), plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
        plt.subplot(1, 2, 2), plt.imshow(cv2.cvtColor(out_img, cv2.COLOR_BGR2RGB))
        plt.show()
