import os
import time

#from config.config_setting import logger

import ctypes
import os
import random
import sys
import threading

import cv2
import numpy as np
#import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
import torch
import torchvision


#note：该推理脚本依赖于build文件夹下的.engine和.so文件，这两个文件是训练之后生成，
# 要求部署机器的gpu和训练时候gpu一致，不然会出现错误。也即通过某型号的gpu训练出来的模型只能在该类型的gpu上部署。
#不同型号训练的gpu需要重新在该gpu训练才能使用

# 模型加载和推理
INPUT_W = 640
INPUT_H = 640
CONF_THRESH = 0.5
IOU_THRESHOLD = 0.5

def plot_one_box(x, img, color=None, label=None, line_thickness=None):
    """
    description: Plots one bounding box on image img,
                 this function comes from YoLov5 project.
    param: 
        x:      a box likes [x1,y1,x2,y2]
        img:    a opencv image object
        color:  color to draw rectangle, such as (0,255,0)
        label:  str
        line_thickness: int
    return:
        no return

    """
    tl = (
        line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1
    )  # line/font thickness
    color = color or [random.randint(0, 255) for _ in range(3)]
    c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
    cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
    if label:
        tf = max(tl - 1, 1)  # font thickness
        t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
        cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA)  # filled
        cv2.putText(
            img,
            label,
            (c1[0], c1[1] - 2),
            0,
            tl / 3,
            [225, 255, 255],
            thickness=tf,
            lineType=cv2.LINE_AA,
        )

class YoLov5TRT(object):
    """
    description: A YOLOv5 class that warps TensorRT ops, preprocess and postprocess ops.
    """

    def __init__(self, engine_file_path):
        # Create a Context on this device,
        self.cfx = cuda.Device(0).make_context()
        stream = cuda.Stream()
        TRT_LOGGER = trt.Logger(trt.Logger.INFO)
        runtime = trt.Runtime(TRT_LOGGER)

        # Deserialize the engine from file
        with open(engine_file_path, "rb") as f:
            engine = runtime.deserialize_cuda_engine(f.read())
        context = engine.create_execution_context()

        host_inputs = []
        cuda_inputs = []
        host_outputs = []
        cuda_outputs = []
        bindings = []

        for binding in engine:
            size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
            dtype = trt.nptype(engine.get_binding_dtype(binding))
            # Allocate host and device buffers
            host_mem = cuda.pagelocked_empty(size, dtype)
            cuda_mem = cuda.mem_alloc(host_mem.nbytes)
            # Append the device buffer to device bindings.
            bindings.append(int(cuda_mem))
            # Append to the appropriate list.
            if engine.binding_is_input(binding):
                host_inputs.append(host_mem)
                cuda_inputs.append(cuda_mem)
            else:
                host_outputs.append(host_mem)
                cuda_outputs.append(cuda_mem)

        # Store
        self.stream = stream
        self.context = context
        self.engine = engine
        self.host_inputs = host_inputs
        self.cuda_inputs = cuda_inputs
        self.host_outputs = host_outputs
        self.cuda_outputs = cuda_outputs
        self.bindings = bindings

    def infer(self, frame):
        #threading.Thread.__init__(self)

        # Make self the active context, pushing it on top of the context stack.
        self.cfx.push()
        # Restore
        stream = self.stream
        context = self.context
        engine = self.engine
        host_inputs = self.host_inputs
        cuda_inputs = self.cuda_inputs
        host_outputs = self.host_outputs
        cuda_outputs = self.cuda_outputs
        bindings = self.bindings

        fps = 0.0


        t1 = time.time()

        # Do image preprocess
        input_image, image_raw, origin_h, origin_w = self.preprocess_image(frame)
        # Copy input image to host buffer
        np.copyto(host_inputs[0], input_image.ravel())
        # Transfer input data  to the GPU.
        cuda.memcpy_htod_async(cuda_inputs[0], host_inputs[0], stream)
        # Run inference.
        context.execute_async(bindings=bindings, stream_handle=stream.handle)
        # Transfer predictions back from the GPU.
        cuda.memcpy_dtoh_async(host_outputs[0], cuda_outputs[0], stream)
        # Synchronize the stream
        stream.synchronize()
        # Remove any context from the top of the context stack, deactivating it.
        self.cfx.pop()
        # Here we use the first row of output in that batch_size = 1
        output = host_outputs[0]

        # Do postprocess
        result_boxes, result_scores, result_classid = self.post_process(
            output, origin_h, origin_w
        )

        #计算加载处理时间
        t2 = time.time()
        print("推理时间time={}".format(t2-t1))

        #计算fps
        fps = (fps + (1./(time.time()-t1))) / 2
        print("fps= %.2f" % (fps))

        # Draw rectangles and labels on the original image
        for i in range(len(result_boxes)):
            box = result_boxes[i]
            plot_one_box(
                box,
                image_raw,
                label="{}:{:.2f}".format(
                    categories[int(result_classid[i])], result_scores[i]
                ),
            )

        return image_raw

        # cv2.imshow("capture",image_raw)
        # if cv2.waitKey(100) & 0xFF == ord('q'):
        #    break
        #print(result_boxes, result_scores, result_classid)

        #return result_boxes, result_scores, result_classid

        def destory(self):
        # Remove any context from the top of the context stack, deactivating it.
            self.cfx.pop()

    def preprocess_image(self, frame):
        """
        description: Read an image from image path, convert it to RGB,
                     resize and pad it to target size, normalize to [0,1],
                     transform to NCHW format.
        param:
            input_image_path: str, image path
        return:
            image:  the processed image
            image_raw: the original image
            h: original height
            w: original width
        """
        #image_raw = cv2.imread(input_image_path)
        h, w, c = frame.shape
        image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        # Calculate widht and height and paddings
        r_w = INPUT_W / w
        r_h = INPUT_H / h
        if r_h > r_w:
            tw = INPUT_W
            th = int(r_w * h)
            tx1 = tx2 = 0
            ty1 = int((INPUT_H - th) / 2)
            ty2 = INPUT_H - th - ty1
        else:
            tw = int(r_h * w)
            th = INPUT_H
            tx1 = int((INPUT_W - tw) / 2)
            tx2 = INPUT_W - tw - tx1
            ty1 = ty2 = 0
        # Resize the image with long side while maintaining ratio
        image = cv2.resize(image, (tw, th))
        # Pad the short side with (128,128,128)
        image = cv2.copyMakeBorder(
            image, ty1, ty2, tx1, tx2, cv2.BORDER_CONSTANT, (128, 128, 128)
        )
        image = image.astype(np.float32)
        # Normalize to [0,1]
        image /= 255.0
        # HWC to CHW format:
        image = np.transpose(image, [2, 0, 1])
        # CHW to NCHW format
        image = np.expand_dims(image, axis=0)
        # Convert the image to row-major order, also known as "C order":
        image = np.ascontiguousarray(image)
        return image, frame, h, w

    def xywh2xyxy(self, origin_h, origin_w, x):
        """
        description:    Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
        param:
            origin_h:   height of original image
            origin_w:   width of original image
            x:          A boxes tensor, each row is a box [center_x, center_y, w, h]
        return:
            y:          A boxes tensor, each row is a box [x1, y1, x2, y2]
        """
        y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
        r_w = INPUT_W / origin_w
        r_h = INPUT_H / origin_h
        if r_h > r_w:
            y[:, 0] = x[:, 0] - x[:, 2] / 2
            y[:, 2] = x[:, 0] + x[:, 2] / 2
            y[:, 1] = x[:, 1] - x[:, 3] / 2 - (INPUT_H - r_w * origin_h) / 2
            y[:, 3] = x[:, 1] + x[:, 3] / 2 - (INPUT_H - r_w * origin_h) / 2
            y /= r_w
        else:
            y[:, 0] = x[:, 0] - x[:, 2] / 2 - (INPUT_W - r_h * origin_w) / 2
            y[:, 2] = x[:, 0] + x[:, 2] / 2 - (INPUT_W - r_h * origin_w) / 2
            y[:, 1] = x[:, 1] - x[:, 3] / 2
            y[:, 3] = x[:, 1] + x[:, 3] / 2
            y /= r_h

        return y

    def post_process(self, output, origin_h, origin_w):
        """
        description: postprocess the prediction
        param:
            output:     A tensor likes [num_boxes,cx,cy,w,h,conf,cls_id, cx,cy,w,h,conf,cls_id, ...]
            origin_h:   height of original image
            origin_w:   width of original image
        return:
            result_boxes: finally boxes, a boxes tensor, each row is a box [x1, y1, x2, y2]
            result_scores: finally scores, a tensor, each element is the score correspoing to box
            result_classid: finally classid, a tensor, each element is the classid correspoing to box
        """
        # Get the num of boxes detected
        num = int(output[0])
        # Reshape to a two dimentional ndarray
        pred = np.reshape(output[1:], (-1, 6))[:num, :]
        # to a torch Tensor
        pred = torch.Tensor(pred).cuda()
        # Get the boxes
        boxes = pred[:, :4]
        # Get the scores
        scores = pred[:, 4]
        # Get the classid
        classid = pred[:, 5]
        # Choose those boxes that score > CONF_THRESH
        si = scores > CONF_THRESH
        boxes = boxes[si, :]
        scores = scores[si]
        classid = classid[si]
        # Trandform bbox from [center_x, center_y, w, h] to [x1, y1, x2, y2]
        boxes = self.xywh2xyxy(origin_h, origin_w, boxes)
        # Do nms
        indices = torchvision.ops.nms(boxes, scores, iou_threshold=IOU_THRESHOLD).cpu()
        result_boxes = boxes[indices, :].cpu()
        result_scores = scores[indices].cpu()
        result_classid = classid[indices].cpu()
        return result_boxes, result_scores, result_classid

class myThread(threading.Thread):
    def __init__(self, func, args):
        threading.Thread.__init__(self)
        self.func = func
        self.args = args

    def run(self):
        self.func(*self.args)


# def prepare_trt(origin_img_q, result_img_q, algo_id, reqs):
#     cuda.init()
#     PLUGIN_LIBRARY = './' + MODEL_PATH[algo_id] + '/build/libmyplugins.so'
#     #print(PLUGIN_LIBRARY)
#     #PLUGIN_LIBRARY = "/home/detect/model/safety_helmet_model/build/libmyplugins.so"  #如果不行就给绝对地址
#     ctypes.CDLL(PLUGIN_LIBRARY)
#     #engine_file_path = "/home/detect/model/safety_helmet_model/build/yolov5s.engine"
#     engine_file_path = './' + MODEL_PATH[algo_id] + '/build/yolov5s.engine'
#     #print(engine_file_path)

#     yolov5_wrapper = YoLov5TRT(engine_file_path)

#     devListId = 0
#     dcnt = len(reqs)
#     (devids, devlist) = zip(*reqs.items())

#     while True:
#         imgList = []

#         devid = devids[devListId]
#         devListId += 1
#         if devListId == dcnt: devListId = 0
#         if origin_img_q[devid].qsize() == 0:
#             time.sleep(0.1)
#             #print("未获取到视频流！")
#             continue
#         image = origin_img_q[devid].get()

#         detect_results = None

#         # 判断在当前时间算法是否可运行
#         timer = reqs[devid][algo_id]["timeList"][0] if "timeList" in reqs[devid][algo_id].keys() else []
#         if timer != []:
#             timer_s, timer_e = timer[0]["startTime"], timer[0]["endTime"]  # 提取起止时间
#             if timer_s <= time.strftime('%H:%M') <= timer_e:
#                 if image is not None:
#                     if min(image.shape[:2]) < 100:
#                         logger.error("May the image of camera is None.")
#                         continue
#                     imgList.append((image, devid))
#                     detect_results = yolov5_wrapper.infer(image)
#             else:
#                 continue
#         else:
#             if image is not None:
#                 if min(image.shape[:2]) < 100:
#                     logger.error("May the image of camera is None.")
#                     continue
#                 imgList.append((image, devid))
#                 detect_results = yolov5_wrapper.infer(image)

#         # thread1 = myThread(yolov5_wrapper.infer, image)
#         # thread1.start()
#         # thread1.join()

#         for i in range(len(imgList)):
#             #a,b,c= detect_results
#             #detect_result = a[i], b[i], c[i]
#             image, devid= imgList[i]
#             if detect_results is not None:
#                 result_img_q[devid].put((image, detect_results))
#             else:
#                 result_img_q[devid].put(image)

#     yolov5_wrapper.destory()


def prepare_trt_1(image):
    cuda.init()
    #PLUGIN_LIBRARY = './call_model' +  '/build/libmyplugins.so'
    PLUGIN_LIBRARY = "/home/fcwl/桌面/detect_v2/model/call_model/build/libmyplugins.so"
    ctypes.CDLL(PLUGIN_LIBRARY)
    engine_file_path = './call_model'  + '/build/yolov5s.engine'
    engine_file_path = '/home/fcwl/桌面/detect_v2/model/call_model/build/yolov5s.engine'
    yolov5_wrapper = YoLov5TRT(engine_file_path)

    categories = ['call']
    
    detect_results = yolov5_wrapper.infer(image)

    # thread1 = myThread(yolov5_wrapper.infer, image)
    # thread1.start()
    # thread1.join()
    
    # destory the instance
    yolov5_wrapper.destory()


if __name__ == "__main__":
    cap = cv2.VideoCapture(0)
    while True:
        ret, frame = cap.read()
        if not ret:
            continue
        prepare_trt_1(frame)

        cv2.imshow("capture",image_raw)
        if cv2.waitKey(100) & 0xFF == ord('q'):
           break

    cap.release()
    cv2.destroyAllWindows()