import threading
import time
import cv2
import numpy as np
import requests

DETECTION_URL = "http://10.101.0.204:6666/v1/object-detection/yolov5s"
CONFIDENCE = 0.5  # 目标检测置信度,目标检测的敏感度
MIN_AREA = 60000  # 最小面积,动态检测的敏感度
INTERVAL = 3  # 抓图间隔，单位秒
rtsp = 0  # 摄像头序号
detect_region = [0, 0, 1, 1]  # 检测区域划定x1,y1 ,x2,y2
cam_width, cam_height = 1280, 720  # 摄像头分辨率

detect_x1, detect_y1 = int(detect_region[0] * cam_width), int(detect_region[1] * cam_height)
detect_x2, detect_y2 = int(detect_region[2] * cam_width), int(detect_region[3] * cam_height)
points = np.array([(detect_x1, detect_y1), (detect_x2, detect_y1), (detect_x2, detect_y2), (detect_x1, detect_y2)])


def send_pic(frame_to_save):
    time_start_send = time.time()
    # 保存图片
    image_data = cv2.imencode('.png', frame_to_save)[1].tobytes()
    response = requests.post(DETECTION_URL, files={"image": image_data}).json()
    time_end_send = time.time()
    print("detect complete,cousumed {} s".format(str(time_end_send - time_start_send)))

    save_flag = 0
    font = cv2.FONT_HERSHEY_SIMPLEX
    for obj in response:
        # if  obj['confidence'] > CONFIDENCE:
        if obj['name'] == 'person' and obj['confidence'] > CONFIDENCE:

            x1, y1, x2, y2 = int(obj['xmin']), int(obj['ymin']), int(obj['xmax']), int(obj['ymax'])
            center_x, center_y = int((x1 + x2) / 2), int((y1 + y2) / 2)
            # 判断物体中心是否处于区域内
            if detect_x1 <= center_x <= detect_x2 and detect_y1 <= center_y <= detect_y2:
                cv2.rectangle(frame_to_save, (x1, y1), (x2, y2), (255, 255, 0), 1)
                # cv2.putText(frame_to_save, obj['name'] + str(obj['confidence'])[:3],(x1, y1), font, 1, (255, 255, 0),1)
                cv2.putText(frame_to_save, obj['name'], (x1, y1), font, 1.2, (255, 255, 0), 1)
                save_flag += 1

    if save_flag > 0:
        cv2.rectangle(frame_to_save, (detect_x1, detect_y1), (detect_x2, detect_y2), (255, 0, 255), 2)
        cv2.imwrite(filename="image/" + str(time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())) + ".jpg",
                    img=frame_to_save)
        print(str(time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime())), 'detected {} people'.format(str(save_flag)))


def add_new_cam(rtsp_path, points):
    # 创建模型
    mog = cv2.createBackgroundSubtractorMOG2()  # 定义高斯混合模型对象 mog

    # 绘制蒙版
    cap = cv2.VideoCapture(rtsp_path)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, cam_width)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, cam_height)
    ret, frame = cap.read()
    mask = np.zeros(frame.shape, np.uint8)
    mask = cv2.fillPoly(mask, [points], (255, 255, 255))

    # 初始化计时器用于判断时间
    time_now = time.time()

    while 1:
        rect_count = 0
        ret, frame = cap.read()
        frame_to_save = frame.copy()
        frame_to_show = frame.copy()
        frame = cv2.bitwise_and(frame, mask)

        # 混合高斯模型
        fgmask = mog.apply(frame)  # 使用前面定义的高斯混合模型对象 mog 当前帧的运动目标检测，返回二值图像
        gray_frame = fgmask.copy()
        kernel = np.ones((5, 5), np.uint8)
        gray_frame = cv2.morphologyEx(gray_frame, cv2.MORPH_OPEN, kernel)
        # 返回值： contours，轮廓的坐标。 hierarchy，各个框之间父子关系，不常用。
        contours, hierarchy = cv2.findContours(gray_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        # 绘制每一个轮廓框到原始图像 frame 中
        for contour in contours:
            if cv2.contourArea(contour) < MIN_AREA:  # 计算候选框的面积，如果小于2500，跳过当前候选框
                continue
            (x, y, w, h) = cv2.boundingRect(contour)  # 根据轮廓，得到当前最佳矩形框
            cv2.rectangle(frame_to_show, (x, y), (x + w, y + h), (255, 255, 0), 2)  # 将该矩形框画在当前帧 frame 上
            rect_count += 1
        # 根据时间间隔保存图片
        if rect_count > 0 and time.time() - time_now > INTERVAL:
            print("Dynamic object detected,seding detect pic")
            threading.Thread(target=send_pic, args=(frame_to_save,)).start()
            time_now = time.time()

        cv2.rectangle(frame_to_show, (detect_x1, detect_y1), (detect_x2, detect_y2), (255, 0, 255), 2)
        cv2.imshow("gray", gray_frame)
        cv2.imshow("contours", frame_to_show)  # 显示当前帧
        cv2.waitKey(1)


add_new_cam(rtsp, points)


