"""
brief :Editor cjh
"""
import math
import sys
import numpy as np
import cv2
import multiprocessing

sys.path.append("/home/nuc2/PycharmProjects/yolov5-master")
from Map_Reflect_utils import Realsense
import time

# from numba import jit

redcode = 0
bluecode = 1
time_counter_list = []


def runtime_calc(time_list):
    if len(time_list) > 50:
        total = sum(time_list) / len(time_list)
        del time_list[:]
        return round(1 / total, 5)


class HSVTrackball:
    def __init__(self, color_mode=redcode):
        self.min_coord = (640, 360)
        self.share_data = dict  # 共享字典
        self.color_mode = color_mode  # 控制球的颜色
        self.blue_hsv = [np.array([88, 9, 84]), np.array([133, 255, 255]), *np.array([0.64, 103])]
        self.red_hsv = [np.array([162, 23, 0]), np.array([255, 255, 255]), *np.array([0.36, 209])]

    def color_detect(self, frame) -> cv2.UMat:  # 处理hsv阈值
        frame_copy = frame.copy()
        lower, upper, contrast_, Brightness_ = self.red_hsv if self.color_mode == redcode else self.blue_hsv
        # 调整曝光与对比度
        frame = cv2.convertScaleAbs(frame, alpha=contrast_, beta=Brightness_)
        # 相当于宽容度，识别的范围
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(frame, lower, upper)
        frame_mask = cv2.bitwise_and(frame_copy, frame_copy, mask=mask)
        # mask蒙版要覆盖原图
        return frame_mask

    def preProcessing_low(self, frame):  # 对图片进行视觉处理
        # 先对frame进行高斯模糊处理
        frame_copy = frame.copy()
        frame_copy = cv2.GaussianBlur(frame_copy, (7, 7), 5)
        # mask蒙版进行并运算覆盖原图
        frame_mask = self.color_detect(frame_copy)
        # 转换为单通道灰度图，进行二值化处理
        frame_gray = cv2.cvtColor(frame_mask, cv2.COLOR_BGR2GRAY)
        _, frame_thresh = cv2.threshold(frame_gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        # 对frame进行开运算去除细小点、线
        return frame_thresh, frame_mask

    def preProcessing_up(self, frame_thresh, frame):  # 对图片进行分水岭算法，进行边界分割

        image_real = frame.copy()
        frame_opening = cv2.morphologyEx(frame_thresh, cv2.MORPH_CLOSE, kernel=np.ones((3, 3), np.uint8), iterations=3)
        frame_closing = cv2.morphologyEx(frame_opening, cv2.MORPH_CLOSE, kernel=np.ones((9, 9), np.uint8),
                                         iterations=6)  # clear all blank hole after thresh
        frame_dilate = cv2.dilate(frame_opening, kernel=np.ones((9, 9), np.uint8), iterations=6)

        distance = cv2.distanceTransform(frame_closing, cv2.DIST_L2, 5)

        _, img_inseed = cv2.threshold(distance, 0.54 * distance.max(), 255, cv2.THRESH_BINARY)
        img_inseed = np.uint8(img_inseed)
        # img_inseed = cv2.morphologyEx(img_inseed, cv2.MORPH_CLOSE, kernel=np.ones((9, 9), np.uint8), iterations=9)

        unknown = cv2.subtract(frame_dilate, img_inseed)
        _, markers = cv2.connectedComponents(img_inseed)
        markers = markers + 1
        markers[unknown == 255] = 0
        markers = cv2.watershed(image_real, markers)
        # 这一步已经将边缘归于0值，无需再套mask
        image_real[markers == -1] = (0, 0, 0)
        # 使分割出来的边缘膨胀一点，使其可以二值化切除边缘处
        image_real = cv2.erode(image_real, kernel=np.ones((3, 3), np.uint8), iterations=3)
        image_mask_gray = cv2.cvtColor(image_real, cv2.COLOR_BGR2GRAY)
        ret, thresh = cv2.threshold(image_mask_gray, 0, 255, cv2.THRESH_BINARY)
        # 对分割后的图进行噪点处理
        image_opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel=np.ones((9, 9), np.uint8), iterations=9)
        return image_opening

    def draw_contours(self, color_image_Processed_upper, color_image):
        self.min_coord = (color_image.shape[1] / 2, color_image.shape[0] / 2)
        min_distance = np.inf
        contours, hierarchy = cv2.findContours(color_image_Processed_upper, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1)

        for i in range(len(contours)):
            # cv2.drawContours(color_image, contours[i], -1, (0, 0, 255), 3)
            peri = cv2.arcLength(contours[i], True)
            approx = cv2.approxPolyDP(contours[i], 0.05 * peri, True)
            [x, y, w, h] = cv2.boundingRect(approx)
            mid_pos = [int(x + w / 2), int(y + h / 2)]
            if w * h > 50000:  #
                cv2.line(color_image, (int(x + w / 2), int(y + h / 2)), (640, 360), (255, 0, 0), 8)
                if min_distance > math.sqrt(
                        (640 - mid_pos[0]) ** 2 + (360 - mid_pos[1]) ** 2):
                    min_distance = math.sqrt(
                        (640 - mid_pos[0]) ** 2 + (360 - mid_pos[1]) ** 2)
                    self.min_coord = (int(x + w / 2), int(y + h / 2), w * h)
                cv2.rectangle(color_image, (x, y), (x + w, y + h), (0, 255, 0), 2)
                cv2.circle(color_image, (int(x + w / 2), int(y + h / 2)), 1, (0, 255, 0), 2)
                cv2.putText(color_image, f"{int(x + w / 2), int(y + h / 2), w * h}",
                            (int(x + 10 + w / 2), int(y + 10 + h / 2)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
        self.share_data["coord_data"] = self.min_coord

    def color_track_main(self, shareVar):
        self.share_data = shareVar
        # 初始化一些数据包括中心坐标
        realsense_cam = Realsense.realsense(False, False, "213522254578")
        self.share_data["coord_data"] = self.min_coord
        cap = realsense_cam.cam_init(1080)
        while True:
            start = time.time()
            color_image, depth_colormap, depth_intrin, aligned_depth_frame = realsense_cam.cam_run(cap)

            image_contour = color_image.copy()  # 生成一份原画面的副本，进行处理，原画面只用于绘制框

            # image 进行低阶处理
            color_image_thresh, color_image_mask = self.preProcessing_low(image_contour)
            # 对image进行高阶处理
            color_image_Processed_upper = self.preProcessing_up(color_image_thresh, color_image_mask)
            # 进行边框绘制
            self.draw_contours(color_image_Processed_upper, color_image)
            end = time.time() if time.time() != start else start + 0.01
            fps = 1 / (end - start)
            cv2.putText(color_image, f"{round(fps, 1)}", (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1,
                        (255, 0, 0),
                        2)
            # print(self.share_data["coord_data"])
            # self.share_data["im0"] = color_image_Processed_upper
            self.share_data["im0"] = color_image


if __name__ == '__main__':
    shareVar = multiprocessing.Manager().dict()
    track = HSVTrackball(redcode)
    p3 = multiprocessing.Process(target=track.color_track_main, args=(shareVar,))
    p3.start()
    while True:
        try:
            print(shareVar["coord_data"])
        # cv2.imshow("1231", shareVar['im0'])
        # cv2.imshow("1232", shareVar['im1'])
        # cv2.waitKey(1)
        except Exception:
            pass
