from datetime import datetime
from collections import defaultdict
from typing import Optional
from check_video import CheckVideo
from video_recorder import VideoRecorder
from extends.redis import redis_connection
from config.settings import get_settings
from get_logger import get_data_logger, get_server_logger, get_error_logger
from coordinates_parser import coordinates_parser as parser

import os
import cv2
import time
import redis
import locale
import orjson
import platform
import numpy as np


actual_points, green_triangle_coordinates, names, points_graphs, reference_points, reversed_green_triangle_coordinates, special_cases, track_graphs = (
    parser.actual_points,
    parser.green_triangle_coordinates,
    parser.names,
    parser.points_graphs,
    parser.reference_points,
    parser.reversed_green_triangle_coordinates,
    parser.special_cases,
    parser.track_graphs
)

data_logger = get_data_logger()
server_logger = get_server_logger()
error_logger = get_error_logger()


point_statuses = {
    "P1": "U",
    "S312": "U",
    "P3": "U",
    "246": "U",
    "T244": "R",
    "P6": "N",
    "S314": "N",
    "237": "N",
    "236": "R",
    "S309": "N",
    "P11": "N",
    "P12": "N",
    "P13": "N",
    "P14": "N",
    "P15": "N",
    "P16": "N",
    "213": "R",
    "212": "U",
}


# blue
blue_lower_color_range = np.array([109, 228, 197])  
blue_upper_color_range = np.array([130, 255, 232]) 

# green
green_lower_color_range = np.array([48, 180, 121])  
green_upper_color_range = np.array([72, 243, 133]) 

# red
red_lower_color_range = np.array([0, 100, 210])  
red_upper_color_range = np.array([15, 150, 255]) 

# red-single
# red_single_lower_color_range = np.array([165, 150, 140])  
# red_single_upper_color_range = np.array([185, 255, 255]) 

green_single_lower = np.array([35, 100, 100])  # 绿色的低频部分
green_single_upper = np.array([85, 255, 255])  # 绿色的高频部分
# red-single
# red_single_lower_color_range = np.array([0, 150, 240])  
# red_single_upper_color_range = np.array([185, 255, 255]) 

# white
white_lower_color_range = np.array([0, 0, 220])  
white_upper_color_range = np.array([170, 25, 255]) 

# yello
yellow_lower_color_range = np.array([18, 27, 235])  
yellow_upper_color_range = np.array([43, 73, 255]) 

# 背景颜色
background_lower_color_range = np.array([0, 0, 150], dtype=np.uint8) # 灰色
background_upper_color_range = np.array([120, 20, 211], dtype=np.uint8) # 灰色

# 黑色
# green_lower_color_range_2 = np.array([0, 0, 0])
# green_upper_color_range_2 = np.array([180, 255, 47])

# 灰色
# background_lower_color_range = np.array([35, 0, 177], dtype=np.uint8) 
# background_upper_color_range = np.array([110, 10, 206], dtype=np.uint8) 

de_energized_zone_lower_color_range = np.array([40, 100, 130], dtype=np.uint8) # 橙色
de_energized_zone_upper_color_range = np.array([80, 255, 255], dtype=np.uint8) # 深绿色

single_statuses = {
    "[161, 96]": "45",
    "[211, 96]": "47",
    "[233, 49]": "48",
    "[265, 312]": "49",
    "[231, 397]": "50",
    "[336, 504]": "51",
    "[301, 524]": "52",
    "[468, 637]": "53",
    "[546, 607]": "57",
    "[550, 1073]": "56",
    "[587, 1196]": "61",
    "[549, 1184]": "62",
    "[721, 828]": "59",
    "[685, 1161]": "58",
    "[751, 1190]": "60",
    "[788, 1312]": "65",
    "[722, 1356]": "63",
    "[684, 1398]": "64",
    "[304, 175]": "115",
    "[336, 175]": "114",
    "[368, 175]": "113",
    "[400, 175]": "112",
    "[432, 175]": "111",
    "[464, 175]": "110",
    "[497, 175]": "109",
    "[528, 175]": "108",
    "[560, 175]": "107",
    "[592, 175]": "106",
    "[624, 174]": "105",
    "[656, 175]": "104",
    "[688, 175]": "103",
    "[720, 175]": "102",
    "[752, 175]": "101",
    "[784, 175]": "123",
    "[816, 175]": "122",
    "[848, 175]": "121",
    "[880, 175]": "120",
    "[912, 175]": "119",
    "[944, 175]": "118",
    "[976, 175]": "117",
    "[1009, 175]": "116",
}

# 两个信号灯不区分方向， 同一个位置连在一起显示
single_names = set(list(single_statuses.values())+["61_62", "51_52"]) 

settings = get_settings()


class SmartWatchDog:

    # redis
    redis_conn: Optional[redis.Redis] = None

    # video
    video: Optional[cv2.VideoCapture] = None 

    check_video = CheckVideo()
    recorder = VideoRecorder()
    new_results = {
        "Capture_delay": "",
        "Timestamp": "",
        "Remark": "",
        "Valid": "",
        "Status": "",
    }

    start_time: float = 0.0
    found_detection_updates = False
    exit = False
    is_set_time = False
    
    def set_local_tz(self):
        if self.is_set_time == False:
            locale.setlocale(locale.LC_TIME, 'en_US.UTF-8')
            self.is_set_time = True

    def extract_green_single_segments(self, green_single_contours):

        red_single_segments = []
        """准备判断-两个点同时存在时
        """
        for red_single_contur in green_single_contours:
            # red_single_contur_set = {red_single_contur[0,0,1], red_single_contur[0,0,0]}
            red_single_contur_str = str([red_single_contur[0,0,1],red_single_contur[0,0,0]])
            if red_single_contur_str in single_statuses:
                name = single_statuses[red_single_contur_str]

                # 由于不区分方向， 两个信号灯一起显示
                if name in ("51", "52",):
                    name = "51_52"
                elif name in ("61", "62",):
                    name = "61_62"

                red_single_segments.append(name)
            else:
                pass
        return red_single_segments

    def extract_green_tracks_segments(self, white_point_contours):

        white_tracks_segments = []
        """准备判断-两个点同时存在时
        """

        [
            white_tracks_segments.append(
                green_triangle_coordinates[
                    str(
                        [
                            white_point_contour[0, 0, 1],
                            white_point_contour[0, 0, 0]
                        ]
                    )
                ]
            )

            for white_point_contour in white_point_contours

            if str(
                [white_point_contour[0, 0, 1], white_point_contour[0, 0, 0]]
            ) in green_triangle_coordinates

            if green_triangle_coordinates[
                str([white_point_contour[0, 0, 1], white_point_contour[0, 0, 0]])
            ] not in white_tracks_segments
        ]

        # for green_point_contour in green_point_contours:

        #     if str(
        #         [green_point_contour[0, 0, 1], green_point_contour[0, 0, 0]]
        #     ) in green_triangle_coordinates:

        #         if green_triangle_coordinates[
        #             str([green_point_contour[0, 0, 1], green_point_contour[0, 0, 0]])
        #         ] not in green_tracks_segments:
        #             green_tracks_segments.append(
        #                 green_triangle_coordinates[
        #                     str(
        #                         [
        #                             green_point_contour[0, 0, 1],
        #                             green_point_contour[0, 0, 0]
        #                         ]
        #                     )
        #                 ]
        #             )

        self.patch_green_tracks_segments(white_tracks_segments)

        return white_tracks_segments


    def find_reversed_green_tracks_segments(
        self,
        reversed_green_point_contours,
        green_tracks_segments
    ):
        reversed_green_tracks_segments = []

        [
            reversed_green_tracks_segments.append(
                reversed_green_triangle_coordinates[
                    str(
                        [
                            reversed_green_point_contour[0, 0, 1],
                            reversed_green_point_contour[0, 0, 0]
                        ]
                    )
                ]
            )

            for reversed_green_point_contour in reversed_green_point_contours

            if str(
                [reversed_green_point_contour[0, 0, 1],
                    reversed_green_point_contour[0, 0, 0]]
            ) in reversed_green_triangle_coordinates

            if reversed_green_triangle_coordinates[
                str([reversed_green_point_contour[0, 0, 1],
                    reversed_green_point_contour[0, 0, 0]])
            ] not in reversed_green_tracks_segments

            # 去除
            if reversed_green_triangle_coordinates[
                str([reversed_green_point_contour[0, 0, 1],
                    reversed_green_point_contour[0, 0, 0]])
            ] in green_tracks_segments
        ]

        return reversed_green_tracks_segments


    def patch_green_tracks_segments(
        self,
        green_tracks_segments
    ):
        for track_name, special_case in special_cases.items():
            if track_name not in green_tracks_segments:
                for case_logic in special_case.values():
                    if len(case_logic["Tracks"]) == 2 \
                            and len(case_logic["Points"]) == 2:

                        if case_logic["Tracks"][0] in green_tracks_segments \
                            and case_logic["Tracks"][1] in green_tracks_segments \
                            and point_statuses[
                                next(iter(case_logic["Points"].items()))[0]
                        ] == next(iter(case_logic["Points"].items()))[1] \
                            and point_statuses[
                                next(iter(case_logic["Points"].items()))[0]
                        ] == next(iter(case_logic["Points"].items()))[1]:

                            green_tracks_segments.append(track_name)
                            break


    def connect_green_tracks_segments(
        self,
        green_tracks_segments,
        hsv_image
    ):
        adjacent_green_tracks_count = 0
        connected_green_tracks = []
        current_green_track = ""
        last_checked_track = ""
        next_green_track = ""
        point_status = ""
        points = []
        potential_start_end_track = []
        stop = False
        temp = []
        track_list = []

        for track in green_tracks_segments:
            points = track_graphs[track]
            adjacent_green_tracks_count = 0

            for point in points:
                # N和Ｒ。
                point_status = self.get_point_status(
                    actual_points[
                        reference_points[
                            np.where(
                                reference_points[:, -1, -1] == names.index(point)
                            )
                        ][0, -1, 0]
                    ],
                    hsv_image
                )

                point_statuses[point] = point_status

                # 找点，按照点的　ｎ，ｒ，ｕ情况找另外一个轨道。
                track_list = \
                    points_graphs[point][point_status].copy()

                # 用临时组来找到一组一个起点。
                if track in track_list:
                    if names.index(point) not in temp:
                        temp.append(names.index(point))

                    # 轨道深度搜索的代码，
                    for t in track_list:
                        # print(point, track, t)
                        if t != track and t in green_tracks_segments:
                            last_checked_track = t
                            adjacent_green_tracks_count += 1

            # 按照路坐标筛选起点，其实排序就可以了，但是他这里这样写了。　
            # 只有一段路就直接入结果，两端以上就找起点。potential_start_end_track。这个写法有点混乱，反正应该最后就能筛出每条长路径的起点。
            if adjacent_green_tracks_count == 0:
                temp.append(names.index(track))
                connected_green_tracks.append(temp.copy())

            if adjacent_green_tracks_count == 1:
                if reference_points[
                        reference_points[:, -1, -
                                        1] == names.index(last_checked_track)
                    ][0, 0, -1] > reference_points[
                        reference_points[:, -1, -1] == names.index(track)
                ][0, 0, -1]:
                    potential_start_end_track.append(track)

            temp.clear()

        last_checked_track = ""

        # 找到起点之后，就按起点来深度搜索，然后点边点边连起来。
        for track in potential_start_end_track:
            current_green_track = track
            next_green_track = ""
            stop = False

            while not stop:
                points = track_graphs[current_green_track]

                for point in points:
                    point_status = self.get_point_status(
                        actual_points[
                            reference_points[
                                np.where(
                                    reference_points[:, -1, -
                                                    1] == names.index(point)
                                )
                            ][0, -1, 0]
                        ],
                        hsv_image
                    )

                    point_statuses[point] = point_status

                    track_list = \
                        points_graphs[point][point_status]

                    if current_green_track in track_list:
                        if names.index(point) not in temp:
                            temp.append(names.index(point))

                        for t in track_list:
                            if t == current_green_track:
                                continue

                            if t != track and t in green_tracks_segments:
                                # history.append(current_green_track)
                                temp.append(names.index(current_green_track))
                                if current_green_track in \
                                        green_tracks_segments:

                                    green_tracks_segments.remove(
                                        current_green_track)

                                next_green_track = t

                if current_green_track == next_green_track:
                    temp.append(names.index(current_green_track))
                    if current_green_track in green_tracks_segments:
                        green_tracks_segments.remove(current_green_track)

                    connected_green_tracks.append(temp.copy())
                    temp.clear()
                    stop = True

                current_green_track = next_green_track

        return connected_green_tracks


    def get_green_tracks_with_components(self,
        white_point_contours,
        green_single,
        # reversed_green_point_contours,
        # yello_point_contours,
        hsv_image,
        frame
    ):
        green_tracks_segments = \
            self.extract_green_tracks_segments(white_point_contours)

        green_single_segments = self.extract_green_single_segments(green_single)
        # reversed_green_tracks = extract_green_tracks_segments(reversed_green_point_contours)

        # yello_green_tracks = extract_green_tracks_segments(yello_point_contours)

        # all_tracks_segments = green_tracks_segments + reversed_green_tracks + yello_green_tracks
        all_tracks_segments = green_tracks_segments 

        connected_green_tracks = \
            self.connect_green_tracks_segments(
                all_tracks_segments,
                hsv_image
            )

        temp_results = defaultdict(lambda: {"status": {}, "route": []})
        sorted = False

        for (group_index, component_indexes) in \
                enumerate(connected_green_tracks):
            group_index_name = f"GP{group_index + 1}"
            filtered_reference_points = \
                reference_points[
                    np.isin(
                        reference_points[:, 1, 1], component_indexes
                    )
                ]

            filtered_reference_points[:] = \
                filtered_reference_points[
                    np.argsort(filtered_reference_points[:, 0, 1])
            ]

            # green_tracks = filtered_reference_points[
            #     actual_points[filtered_reference_points[:, 1, 0], -1, 0] == 1
            # ]

            # if names[green_tracks[0, 1, 1]] in reversed_green_tracks:
            #     filtered_reference_points[:] = \
            #         filtered_reference_points[::-1]

            #     sorted = True

            # elif names[green_tracks[-1, 1, 1]] in reversed_green_tracks:
            #     filtered_reference_points[:] = \
            #         filtered_reference_points[::-1]

            #     sorted = True

            for point1, point2 in \
                    zip(
                        filtered_reference_points[:, 0],
                        filtered_reference_points[:, 0][1:]
                    ):

                cv2.line(
                    frame,
                    (point1[1], point1[0]),
                    (point2[1], point2[0]),
                    (255, 236, 0),
                    4
                )
            # 判断点和轨道状态。
            for point in filtered_reference_points:
                status = "BK"
                point_name: str = names[point[1, 1]]

                if actual_points[point[1][0]][-1][0] == 2:
                    continue

                if actual_points[point[1][0]][-1][0] == 0:
                    if point_statuses[point_name] == "R":
                        status = "R"

                    if point_statuses[point_name] == "N":
                        status = "N"

                if actual_points[point[1][0]][-1][0] == 1:
                    status = "G"
                
                is_signal = False
                if point_name in single_names:
                    status = "R" # R 红色信号灯
                    if point_name in green_single_segments: # G 绿色信号灯
                        status = "G"
                    is_signal = True

                # temp_results[f"GP{group_index + 1}"][point_name] = status
                if point_name == "51_52":
                    temp_results[group_index_name]["status"]["SIG_51"] = status
                    temp_results[group_index_name]["status"]["SIG_52"] = status
                    temp_results[group_index_name]["route"].append("SIG_51")
                    temp_results[group_index_name]["route"].append("SIG_52")
                elif point_name == "61_62":
                    temp_results[group_index_name]["status"]["SIG_61"] = status
                    temp_results[group_index_name]["status"]["SIG_62"] = status
                    temp_results[group_index_name]["route"].append("SIG_61")
                    temp_results[group_index_name]["route"].append("SIG_62")
                elif not point_name.startswith("S_"):
                    if is_signal:
                        point_name = "SIG_" + point_name
                    temp_results[group_index_name]["status"][point_name] = status
                    temp_results[group_index_name]["route"].append(point_name)

                color = (0, 255, 127)

                if actual_points[point[1][0]][-1][0] == 0:
                    color = (127, 0, 255)

                cv2.circle(
                    frame,
                    (
                        int(point[0][1]),
                        int(point[0][0])
                    ),
                    5,
                    color,
                    -1
                )

                cv2.putText(
                    frame,
                    point_name,
                    (
                        int(point[0][1]),
                        int(point[0][0]) - 10
                    ),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.5,
                    color,
                    1,
                    cv2.LINE_AA
                )
        return dict(temp_results), sorted

    def get_point_status(
        self,
        point_coordinate,
        image
    ):
        if point_coordinate[0, 0] == -1:
            return "N"

        normal_point_area: np.ndarray = \
            image[point_coordinate[0:2, 0][0]:point_coordinate[0:2, 1][0],
                point_coordinate[0:2, 0][1]:point_coordinate[0:2, 1][1]]

        normal_point_area_total_pixels: int = normal_point_area.shape[0] * \
            normal_point_area.shape[1]

        normal_point_area_background_mask = \
            cv2.inRange(
                normal_point_area,
                background_lower_color_range,
                background_upper_color_range
            )

        normal_point_area_ground_mask_white_pixels: int = cv2.countNonZero(
            normal_point_area_background_mask)

        normal_point_area_background_mask_black_pixels_percentage: float = \
            (
                normal_point_area_total_pixels
                - normal_point_area_ground_mask_white_pixels
            ) \
            / normal_point_area_total_pixels * 100

        reverse_point_area: np.ndarray = \
            image[point_coordinate[2:-1, 0][0]:point_coordinate[2:-1, 1][0],
                point_coordinate[2:-1, 0][1]:point_coordinate[2:-1, 1][1]]

        reverse_point_area_total_pixels: int = reverse_point_area.shape[0] * \
            reverse_point_area.shape[1]

        reverse_point_area_background_mask = \
            cv2.inRange(
                reverse_point_area,
                background_lower_color_range,
                background_upper_color_range
            )

        reverse_point_area_ground_mask_white_pixels: int = cv2.countNonZero(
            reverse_point_area_background_mask)

        reverse_point_area_background_mask_black_pixels_percentage: float = \
            (
                reverse_point_area_total_pixels
                - reverse_point_area_ground_mask_white_pixels
            ) \
            / reverse_point_area_total_pixels * 100

        if normal_point_area_background_mask_black_pixels_percentage < 95 \
            and reverse_point_area_background_mask_black_pixels_percentage > 95:
            # 
            return "R"

        if reverse_point_area_background_mask_black_pixels_percentage < 95 \
            and normal_point_area_background_mask_black_pixels_percentage > 95:
            return "N"

        normal_point_area_background_mask = \
            cv2.inRange(
                normal_point_area,
                de_energized_zone_lower_color_range,
                de_energized_zone_upper_color_range
            )

        normal_point_area_ground_mask_white_pixels: int = cv2.countNonZero(
            normal_point_area_background_mask)

        normal_point_area_background_mask_black_pixels_percentage: float = \
            (
                normal_point_area_total_pixels
                - normal_point_area_ground_mask_white_pixels
            ) \
            / normal_point_area_total_pixels * 100

        reverse_point_area_background_mask = \
            cv2.inRange(
                reverse_point_area,
                de_energized_zone_lower_color_range,
                de_energized_zone_upper_color_range
            )

        reverse_point_area_ground_mask_white_pixels: int = cv2.countNonZero(
            reverse_point_area_background_mask)

        reverse_point_area_background_mask_black_pixels_percentage: float = \
            (
                reverse_point_area_total_pixels
                - reverse_point_area_ground_mask_white_pixels
            ) \
            / reverse_point_area_total_pixels * 100

        if normal_point_area_background_mask_black_pixels_percentage < 95 \
            and reverse_point_area_background_mask_black_pixels_percentage > 95:
            # 
            return "R"

        if reverse_point_area_background_mask_black_pixels_percentage < 95 \
            and normal_point_area_background_mask_black_pixels_percentage > 95:
            return "N"

        return "U"

    def picture_process_time(self,frame, start_tick):
        """在屏幕右上角输出响应时间"""
        if settings.ENABLED_PROCESS_TIME:
            end_tick = cv2.getTickCount() 
            process_time = (end_tick - start_tick) / cv2.getTickFrequency()  
            position = (frame.shape[1] - settings.POSITION_X, settings.POSITION_Y)  
            cv2.putText(
                frame,
                f"Smart Watch Dog Process Time : {process_time:.6f}s",
                position,
                cv2.FONT_HERSHEY_SIMPLEX,
                0.7,
                (0, 255, 0),
                2,
                cv2.LINE_AA
            )

    def update_results(self, status, check_res):

        self.new_results["Status"] = status
        self.new_results["Capture_delay"] = check_res.get("datetime_ocr", "")
        self.new_results["Valid"] = "False"  
        self.new_results["Remark"] = check_res.get("remark", "")

        self.set_heart_beat()
        return self.new_results

    def set_heart_beat(self, remark = "Keep Alive - Normal"):
        """
        判断并设置心跳
        remark为空时,才填写心跳??(不确定)
        """
        if settings.ENABLED_HEART_BEAT:
            if time.time() - self.start_time > settings.HEART_INTERVAL:
                self.new_results["Remark"] = remark
                self.found_detection_updates = True

    def send_data_to_redis(self,previous_result, results):
        """
        判断数据是否发生变化 and 是否心跳
        发送数据到redis
        """
        if previous_result != results or self.found_detection_updates:
            self.found_detection_updates = False
            previous_result = results
            self.start_time = time.time()
            self.new_results["Valid"] = "False" if self.new_results["Remark"] else "True" # 没有问题是true 
            data_logger.info(self.new_results)

            if self.redis_conn:
                redis_connection.set_key(settings.DATA_RESULT_KEY, orjson.dumps(self.new_results))
        return previous_result


    def send_error_data_to_redis(self, previous_result, check_res):
        """
        发送异常数据
        """
        if previous_result != check_res or self.found_detection_updates:
            self.found_detection_updates = False
            previous_result = check_res
            self.start_time = time.time()
            self.new_results["Valid"] = "False"
            self.new_results["Remark"] = check_res.get("remark", "")
            data_logger.info(self.new_results)
            if self.redis_conn:
                redis_connection.set_key(settings.DATA_RESULT_KEY, orjson.dumps(self.new_results))
        return previous_result


    def start(self) -> None:
        server_logger.debug(settings.model_dump())

        # 如何是测试模式， 则使用测试视频
        if settings.MODE == "test" and settings.TEST_VIODE_PATH:
            video = cv2.VideoCapture(settings.TEST_VIODE_PATH)
        else:
            video = cv2.VideoCapture(0, cv2.CAP_ANY)
            video.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
            video.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)

        white_point_mask = cv2.imread(
            filename=os.path.join(settings.BASE_DIR, "static",
                                "white_point_mask.png"),
            flags=cv2.IMREAD_GRAYSCALE
        )

        red_single_mask = cv2.imread(
            filename=os.path.join(settings.BASE_DIR, "static",
                                "single_mask.png"),
            flags=cv2.IMREAD_GRAYSCALE
        )


        return_value, frame = video.read()

        for _ in range(10):
            return_value, frame = video.read()

        if not return_value:
            # Error
            error_logger.error("cannot grab frame.")
            return None

        previous_result = {}
        #previous_sorted = False
        self.start_time = time.time()

        #count = 1
        # frame_im = cv2.imread("/home/chf/图片/CWD/4.png")# 图片模拟测试用
        # current_server_name = platform.platform().lower()
        while return_value:
            start_tick = cv2.getTickCount()  # 开始计时           
            return_value, frame = video.read()
            # frame = frame_im.copy()
            # return_value = True
            #if 'linux-6.1.0-31-amd64' in current_server_name:  # 检查linux
            #    frame = cv2.flip(frame, 0)  # 参数0表示沿X轴翻转

            # if count <= 2:
                # return_value, frame = video.read()
                # count += 1
                # continue


            self.found_detection_updates = False

            # if count < 10:
            #     count += 1
            #     continue

            self.recorder.start(frame)
            hsv_image = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            mask_image = cv2.inRange(
                hsv_image,
                white_lower_color_range,
                white_upper_color_range
            )

            # red_single_image = cv2.inRange(
            #     hsv_image,
            #     red_single_lower_color_range,
            #     red_single_upper_color_range
            # )
            green_single_image = cv2.inRange(
                hsv_image,
                green_single_lower,
                green_single_upper
            )

            white_point_mask_image = cv2.bitwise_and(mask_image, white_point_mask)
            # red_signle_mask_image = cv2.bitwise_and(red_single_image, red_single_mask)
            green_single_mask_image = cv2.bitwise_and(green_single_image, red_single_mask)

            white_point_contours, _ = \
                cv2.findContours(
                    white_point_mask_image,
                    cv2.RETR_EXTERNAL,
                    cv2.CHAIN_APPROX_SIMPLE
                )
            
            # red_signle_contours, _ = \
            #     cv2.findContours(
            #         red_signle_mask_image,
            #         cv2.RETR_EXTERNAL,
            #         cv2.CHAIN_APPROX_SIMPLE
            #     )

            green_signle_contours, _ = \
                cv2.findContours(
                    green_single_mask_image,
                    cv2.RETR_EXTERNAL,
                    cv2.CHAIN_APPROX_SIMPLE
                )
            


            check_res = self.check_video.start_checkers(frame,hsv_frame=hsv_image, green_point_contours=white_point_contours, green_signle_contours=green_signle_contours)
            self.new_results["Timestamp"] = datetime.now().strftime( "%m/%d/%Y %H:%M:%S.%f")
            # 检测条件判断
            if (settings.ENABLED_CHECK_WINDOWS and check_res["check_windows"] == False) or \
                (settings.ENABLED_CHECK_LAYOUT and check_res["check_layout"] == False) or \
                (settings.ENABLED_CHECK_FREEZE and check_res["check_freeze"] == False) or \
                (settings.ENABLED_CHECK_CURSOR and check_res["check_cursor"] == False):
                
                self.set_heart_beat()
                previous_result = self.send_error_data_to_redis(previous_result, check_res)

                key = cv2.waitKey(20)
                if key == 113:
                    break
                cv2.imshow(
                    settings.SHOW_WINDOW_NAME,
                    # frame
                    cv2.resize(
                        frame,
                        settings.SHOW_SIZE
                    )
                )
                self.set_local_tz()
                continue

            results, _ = self.get_green_tracks_with_components(
                white_point_contours,
                green_signle_contours,
                hsv_image,
                frame
            )
            self.update_results(status=results, check_res=check_res)
            previous_result = self.send_data_to_redis(previous_result, results)

            self.picture_process_time(frame, start_tick)

            cv2.imshow(
                settings.SHOW_WINDOW_NAME,
                # frame
                cv2.resize(
                    frame,
                    settings.SHOW_SIZE
                )
            )
            self.set_local_tz()

            key = cv2.waitKey(20)

            if key == 113:
                break

            if key == 32:
                # press Space to capture image (Space ASCII value: 32)
                now = datetime.now()
                dt_string = now.strftime("%Y%m%d%H%M%S")
                img_name = \
                    "opencv_frame_{}.png".format(dt_string)

                cv2.imwrite(img_name, frame)
            count = 1

        return None

    def __enter__(self):
        server_logger.info("redis connection ...")
        while not self.redis_conn:
            self.redis_conn = redis_connection.get_redis_connection()
        return self
    
    def video_close(self):
        if self.video is not None:
            self.video.release()

    def close(self):
        redis_connection.close()
        server_logger.info("redis close")

        self.video_close()
        server_logger.info("video close")
        
        self.recorder.close()
        server_logger.info("Recorder close")

        cv2.destroyAllWindows()
        server_logger.info("Exit")

        self.exit = True

    def __exit__(self, exc_type, exc_val, exc_tb):
        if not self.exit:
            self.close()

if __name__ == "__main__":
    smart_watch_dog = None
    try:
        smart_watch_dog = SmartWatchDog()
        with smart_watch_dog:
            smart_watch_dog.start()
    except KeyboardInterrupt as e:
        error_logger.error(e)
    finally:
        if smart_watch_dog:
            smart_watch_dog.close()