import ctypes
import threading
import time
import cv2
import os
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
from matplotlib.path import Path


def inRegions(c1, c2, c3, c4, Monitoring_Regions):
    for Monitoring_Region in Monitoring_Regions:
        p = Path(eval(Monitoring_Region))
        if not (p.contains_point(c1) and p.contains_point(c2) and p.contains_point(c3) and p.contains_point(c4)):
            return False
    return True


def plot_one_box(x, img, color, Monitoring_Regions, flag_fire, flag_knife, flag_fall, flag_fight, flag_person, flag_gender,flag_chair,label=None, line_thickness=None):
    tl = (line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1)

    Results_Loc = ''
    Results_Cls = ''
    Results_Conf = ''

    print('lable:==================================================================',label)

    if label[:6] == 'person':
        Results_Conf = label[-4:]
        c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
        c3, c4 = (int(x[2]), int(x[1])), (int(x[0]), int(x[3]))
        if not inRegions(c1, c2, c3, c4, Monitoring_Regions):
            return Results_Loc, Results_Cls, Results_Conf, flag_fire, flag_knife, flag_fall, flag_fight, flag_person, flag_gender,flag_chair
        flag_person = 1
        cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
        width = int(x[2]) - int(x[0])
        height = int(x[3]) - int(x[1])
        if width / height < 1.8 or x[3] > 0.95 * img.shape[0]:
            label = 'up'
        else:
            label = 'down'
            flag_fall = 1
        Results_Cls = label
        tf = max(tl - 1, 1)
        t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
        cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA)  # filled
        cv2.putText(
            img,
            label,
            (c1[0], c1[1] - 2),
            0,
            tl / 3,
            [225, 255, 255],
            thickness=tf,
            lineType=cv2.LINE_AA,
        )
        center_x = (int(x[0]) + int(x[2])) // 2
        center_y = (int(x[1]) + int(x[3])) // 2
        Results_Loc = f'{center_x},{center_y},{width},{height}'

    if label[:5] == 'chair':
        Results_Conf = label[-4:]
        c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
        c3, c4 = (int(x[2]), int(x[1])), (int(x[0]), int(x[3]))
        if not inRegions(c1, c2, c3, c4, Monitoring_Regions):
            return Results_Loc, Results_Cls, Results_Conf, flag_fire, flag_knife, flag_fall, flag_fight, flag_person, flag_gender,flag_chair
        flag_chair = 1
        cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
        width = int(x[2]) - int(x[0])
        height = int(x[3]) - int(x[1])
        Results_Cls = 'chair'
        tf = max(tl - 1, 1)
        t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
        cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA)  # filled
        cv2.putText(
            img,
            label,
            (c1[0], c1[1] - 2),
            0,
            tl / 3,
            [225, 255, 255],
            thickness=tf,
            lineType=cv2.LINE_AA,
        )
        center_x = (int(x[0]) + int(x[2])) // 2
        center_y = (int(x[1]) + int(x[3])) // 2
        Results_Loc = f'{center_x},{center_y},{width},{height}'

    if label[:5] == 'woman' or label[:3] == 'man':
        Results_Conf = label[-4:]
        c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
        c3, c4 = (int(x[2]), int(x[1])), (int(x[0]), int(x[3]))
        if not inRegions(c1, c2, c3, c4, Monitoring_Regions):
            return Results_Loc, Results_Cls, Results_Conf, flag_fire, flag_knife, flag_fall, flag_fight, flag_person, flag_gender,flag_chair
        flag_gender = 1
        cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
        width = int(x[2]) - int(x[0])
        height = int(x[3]) - int(x[1])
        label = 'woman' if label[:5] == 'woman' else 'man'
        Results_Cls = label
        tf = max(tl - 1, 1)
        t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
        cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA)  # filled
        cv2.putText(
            img,
            label,
            (c1[0], c1[1] - 2),
            0,
            tl / 3,
            [225, 255, 255],
            thickness=tf,
            lineType=cv2.LINE_AA,
        )
        center_x = (int(x[0]) + int(x[2])) // 2
        center_y = (int(x[1]) + int(x[3])) // 2
        Results_Loc = f'{center_x},{center_y},{width},{height}'

    if label[:5] == 'knife':
        Results_Conf = label[-4:]
        c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
        c3, c4 = (int(x[2]), int(x[1])), (int(x[0]), int(x[3]))
        if not inRegions(c1, c2, c3, c4, Monitoring_Regions):
            return Results_Loc, Results_Cls, Results_Conf, flag_fire, flag_knife, flag_fall, flag_fight, flag_person, flag_gender,flag_chair
        flag_knife = 1
        cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
        width = int(x[2]) - int(x[0])
        height = int(x[3]) - int(x[1])
        label = 'knife'
        Results_Cls = label
        tf = max(tl - 1, 1)
        t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
        cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA)  # filled
        cv2.putText(
            img,
            label,
            (c1[0], c1[1] - 2),
            0,
            tl / 3,
            [225, 255, 255],
            thickness=tf,
            lineType=cv2.LINE_AA,
        )
        center_x = (int(x[0]) + int(x[2])) // 2
        center_y = (int(x[1]) + int(x[3])) // 2
        Results_Loc = f'{center_x},{center_y},{width},{height}'

    if label[:4] == 'fire':
        Results_Conf = label[-4:]
        c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
        c3, c4 = (int(x[2]), int(x[1])), (int(x[0]), int(x[3]))
        if not inRegions(c1, c2, c3, c4, Monitoring_Regions):
            return Results_Loc, Results_Cls, Results_Conf, flag_fire, flag_knife, flag_fall, flag_fight, flag_person, flag_gender,flag_chair
        flag_fire = 1
        cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
        width = int(x[2]) - int(x[0])
        height = int(x[3]) - int(x[1])
        label = 'fire'
        Results_Cls = label
        tf = max(tl - 1, 1)
        t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
        cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA)  # filled
        cv2.putText(
            img,
            label,
            (c1[0], c1[1] - 2),
            0,
            tl / 3,
            [225, 255, 255],
            thickness=tf,
            lineType=cv2.LINE_AA,
        )
        center_x = (int(x[0]) + int(x[2])) // 2
        center_y = (int(x[1]) + int(x[3])) // 2
        Results_Loc = f'{center_x},{center_y},{width},{height}'

    if label[:5] == 'fight':
        Results_Conf = label[-4:]
        c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
        c3, c4 = (int(x[2]), int(x[1])), (int(x[0]), int(x[3]))
        if not inRegions(c1, c2, c3, c4, Monitoring_Regions):
            return Results_Loc, Results_Cls, Results_Conf, flag_fire, flag_knife, flag_fall, flag_fight, flag_person, flag_gender,flag_chair
        flag_fight = 1
        cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
        width = int(x[2]) - int(x[0])
        height = int(x[3]) - int(x[1])
        label = 'fight'
        Results_Cls = label
        tf = max(tl - 1, 1)
        t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
        cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA)  # filled
        cv2.putText(
            img,
            label,
            (c1[0], c1[1] - 2),
            0,
            tl / 3,
            [225, 255, 255],
            thickness=tf,
            lineType=cv2.LINE_AA,
        )
        center_x = (int(x[0]) + int(x[2])) // 2
        center_y = (int(x[1]) + int(x[3])) // 2
        Results_Loc = f'{center_x},{center_y},{width},{height}'

    print('plot_box结束=======================================================================')
    return Results_Loc, Results_Cls, Results_Conf, flag_fire, flag_knife, flag_fall, flag_fight, flag_person, flag_gender,flag_chair


class YoLov5TRT(object):
    def __init__(self, engine_file_path):
        self.ctx = cuda.Device(0).make_context()
        stream = cuda.Stream()
        TRT_LOGGER = trt.Logger(trt.Logger.INFO)
        runtime = trt.Runtime(TRT_LOGGER)

        with open(engine_file_path, "rb") as f:
            engine = runtime.deserialize_cuda_engine(f.read())
        context = engine.create_execution_context()

        host_inputs = []
        cuda_inputs = []
        host_outputs = []
        cuda_outputs = []
        bindings = []

        for binding in engine:
            print('bingding:', binding, engine.get_binding_shape(binding))
            size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
            dtype = trt.nptype(engine.get_binding_dtype(binding))
            host_mem = cuda.pagelocked_empty(size, dtype)
            cuda_mem = cuda.mem_alloc(host_mem.nbytes)
            bindings.append(int(cuda_mem))
            if engine.binding_is_input(binding):
                self.input_w = engine.get_binding_shape(binding)[-1]
                self.input_h = engine.get_binding_shape(binding)[-2]
                host_inputs.append(host_mem)
                cuda_inputs.append(cuda_mem)
            else:
                host_outputs.append(host_mem)
                cuda_outputs.append(cuda_mem)

        self.stream = stream
        self.context = context
        self.engine = engine
        self.host_inputs = host_inputs
        self.cuda_inputs = cuda_inputs
        self.host_outputs = host_outputs
        self.cuda_outputs = cuda_outputs
        self.bindings = bindings
        self.batch_size = engine.max_batch_size

    def infer(self, image_raw, categories, color, CONF_THRESH, IOU_THRESHOLD, 
              Monitoring_Regions, flag_fire, flag_knife, flag_fall, flag_fight, flag_person, flag_gender,flag_chair=0):
        print('come in infer==========================================================================================')
        self.CONF_THRESH = CONF_THRESH
        self.IOU_THRESHOLD = IOU_THRESHOLD
        threading.Thread.__init__(self)
        if color is None:
            color = [0, 0, 255]
        self.ctx.push()
        stream = self.stream
        context = self.context
        host_inputs = self.host_inputs
        cuda_inputs = self.cuda_inputs
        host_outputs = self.host_outputs
        cuda_outputs = self.cuda_outputs
        bindings = self.bindings
        batch_input_image = np.empty(shape=[self.batch_size, 3, self.input_h, self.input_w])
        input_image, image_raw, origin_h, origin_w = self.preprocess_image(image_raw)
        np.copyto(batch_input_image[0], input_image)
        batch_input_image = np.ascontiguousarray(batch_input_image)

        np.copyto(host_inputs[0], batch_input_image.ravel())
        cuda.memcpy_htod_async(cuda_inputs[0], host_inputs[0], stream)
        context.execute_async(batch_size=self.batch_size, bindings=bindings, stream_handle=stream.handle)
        cuda.memcpy_dtoh_async(host_outputs[0], cuda_outputs[0], stream)
        stream.synchronize()
        self.ctx.pop()
        output = host_outputs[0]
        result_boxes, result_scores, result_classid = self.post_process(
            output[0: 6001], origin_h, origin_w)
        Results_Locs = []
        Results_Clses = []
        Results_Confs = []
        for j in range(len(result_boxes)):
            box = result_boxes[j]
            Results_Loc, Results_Cls, Results_Conf, flag_fire, flag_knife, flag_fall, flag_fight, flag_person, flag_gender,flag_chair = plot_one_box(
                box,
                image_raw,
                color,
                Monitoring_Regions,
                flag_fire, flag_knife, flag_fall, flag_fight, flag_person, flag_gender, flag_chair,
                label="{}:{:.2f}".format(categories[int(result_classid[j])], result_scores[j])
            )
            if Results_Loc != '':
                Results_Locs.append(Results_Loc)
                Results_Clses.append(Results_Cls)
                Results_Confs.append(Results_Conf)
        print('检测结束==================================================')
        return image_raw, Results_Locs, Results_Clses, Results_Confs, flag_fire, flag_knife, flag_fall, flag_fight, flag_person, flag_gender,flag_chair

    def destroy(self):
        self.ctx.pop()

    def preprocess_image(self, raw_bgr_image):
        image_raw = raw_bgr_image
        h, w, c = image_raw.shape
        image = cv2.cvtColor(image_raw, cv2.COLOR_BGR2RGB)
        r_w = self.input_w / w
        r_h = self.input_h / h
        if r_h > r_w:
            tw = self.input_w
            th = int(r_w * h)
            tx1 = tx2 = 0
            ty1 = int((self.input_h - th) / 2)
            ty2 = self.input_h - th - ty1
        else:
            tw = int(r_h * w)
            th = self.input_h
            tx1 = int((self.input_w - tw) / 2)
            tx2 = self.input_w - tw - tx1
            ty1 = ty2 = 0
        image = cv2.resize(image, (tw, th))
        image = cv2.copyMakeBorder(
            image, ty1, ty2, tx1, tx2, cv2.BORDER_CONSTANT, None, (128, 128, 128)
        )
        image = image.astype(np.float32)
        image /= 255.0
        image = np.transpose(image, [2, 0, 1])
        image = np.expand_dims(image, axis=0)
        image = np.ascontiguousarray(image)
        return image, image_raw, h, w

    def xywh2xyxy(self, origin_h, origin_w, x):
        y = np.zeros_like(x)
        r_w = self.input_w / origin_w
        r_h = self.input_h / origin_h
        if r_h > r_w:
            y[:, 0] = x[:, 0] - x[:, 2] / 2
            y[:, 2] = x[:, 0] + x[:, 2] / 2
            y[:, 1] = x[:, 1] - x[:, 3] / 2 - (self.input_h - r_w * origin_h) / 2
            y[:, 3] = x[:, 1] + x[:, 3] / 2 - (self.input_h - r_w * origin_h) / 2
            y /= r_w
        else:
            y[:, 0] = x[:, 0] - x[:, 2] / 2 - (self.input_w - r_h * origin_w) / 2
            y[:, 2] = x[:, 0] + x[:, 2] / 2 - (self.input_w - r_h * origin_w) / 2
            y[:, 1] = x[:, 1] - x[:, 3] / 2
            y[:, 3] = x[:, 1] + x[:, 3] / 2
            y /= r_h

        return y

    def post_process(self, output, origin_h, origin_w):
        num = int(output[0])
        pred = np.reshape(output[1:], (-1, 6))[:num, :]
        boxes = self.non_max_suppression(pred, origin_h, origin_w, conf_thres=self.CONF_THRESH, nms_thres=self.IOU_THRESHOLD)
        result_boxes = boxes[:, :4] if len(boxes) else np.array([])
        result_scores = boxes[:, 4] if len(boxes) else np.array([])
        result_classid = boxes[:, 5] if len(boxes) else np.array([])
        return result_boxes, result_scores, result_classid

    def bbox_iou(self, box1, box2, x1y1x2y2=True):
        if not x1y1x2y2:
            b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
            b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
            b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
            b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
        else:
            b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
            b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]

        inter_rect_x1 = np.maximum(b1_x1, b2_x1)
        inter_rect_y1 = np.maximum(b1_y1, b2_y1)
        inter_rect_x2 = np.minimum(b1_x2, b2_x2)
        inter_rect_y2 = np.minimum(b1_y2, b2_y2)
        inter_area = np.clip(inter_rect_x2 - inter_rect_x1 + 1, 0, None) * \
                     np.clip(inter_rect_y2 - inter_rect_y1 + 1, 0, None)
        b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
        b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)

        iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)

        return iou

    def non_max_suppression(self, prediction, origin_h, origin_w, conf_thres=0.5, nms_thres=0.4):
        boxes = prediction[prediction[:, 4] >= conf_thres]
        boxes[:, :4] = self.xywh2xyxy(origin_h, origin_w, boxes[:, :4])
        boxes[:, 0] = np.clip(boxes[:, 0], 0, origin_w - 1)
        boxes[:, 2] = np.clip(boxes[:, 2], 0, origin_w - 1)
        boxes[:, 1] = np.clip(boxes[:, 1], 0, origin_h - 1)
        boxes[:, 3] = np.clip(boxes[:, 3], 0, origin_h - 1)
        confs = boxes[:, 4]
        boxes = boxes[np.argsort(-confs)]
        keep_boxes = []
        while boxes.shape[0]:
            large_overlap = self.bbox_iou(np.expand_dims(boxes[0, :4], 0), boxes[:, :4]) > nms_thres
            label_match = boxes[0, -1] == boxes[:, -1]
            invalid = large_overlap & label_match
            keep_boxes += [boxes[0]]
            boxes = boxes[~invalid]
        boxes = np.stack(keep_boxes, 0) if len(keep_boxes) else np.array([])
        return boxes


def save_clips(frames, sence):
    clock = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
    videoWriter_clips = cv2.VideoWriter(f'result/{sence}/{clock}.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 25, (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))), True)
    for i in frames:
        videoWriter_clips.write(i)
    videoWriter_clips.release()

