from datetime import datetime
from collections import defaultdict
from typing import Optional
from check_video import CheckVideo
from video_recorder import VideoRecorder
from extends.redis import redis_connection
from config.settings import get_settings
from get_logger import get_data_logger, get_server_logger, get_error_logger
from coordinates_parser import coordinates_parser as parser

import os
import cv2
import time
import redis
import locale
import orjson
import platform
import numpy as np


actual_points, green_triangle_coordinates, names, points_graphs, reference_points, reversed_green_triangle_coordinates, special_cases, track_graphs = (
    parser.actual_points,
    parser.green_triangle_coordinates,
    parser.names,
    parser.points_graphs,
    parser.reference_points,
    parser.reversed_green_triangle_coordinates,
    parser.special_cases,
    parser.track_graphs
)

data_logger = get_data_logger()
server_logger = get_server_logger()
error_logger = get_error_logger()

# class DataStatusDict(TypedDict):
#     status: dict[str, str]
#     route: list[str]



# class DataResultsDict(TypedDict):
#     Timestamp: str
#     Remark: str
#     Capture_delay: str
#     Valid: Optional[Literal["True", "False"]]
#     Status: DataStatusDict


point_statuses = {
    "ETS024": "U",
    "ETS020": "U",
    "HUH016": "U",
    "HUH014": "U",
    "HUH012": "R",
    "ETS023": "N",
    "ETS021": "N",
    "HUH017": "N", 
    "HUH015": "R",
    "S_HUH22": "N",
    "S_HUH07": "N",
    "HUH013": "N",
    "HUH011": "N",
    "HUH019": "N",
    "HHS114": "N",
    "HHS113": "N",
    "HHS102": "R",
    "HHS100": "U",
    "HHS109": "U",
    "HHS106": "U",
    "HHS104": "U",
    "HHS101": "U",
    "P21": "R",
    "HHS108": "N",
    "S-HHS24": "N",
    "HHS105": "U",
    "HHS103": "U",
    "HHS110": "U",
    "HHS112": "U",
    "HHS107": "U"
}

green_lower_color_range = np.array([20, 20, 50])
green_upper_color_range = np.array([95, 255, 255])

# 反向黑色
black_reverse_lower_color_range_2 = np.array([0, 0, 0])
black_reverse_upper_color_range_2 = np.array([170, 255, 47])# 47深绿色 # 61深蓝色

# 青色的区域
cyan_lower_color_range = np.array([20, 20, 42])
cyan_upper_color_range = np.array([83, 255, 250])

# 反向绿色
green_reverse_lower_color_range = np.array([50, 90, 52], dtype=np.uint8)
green_reverse_upper_color_range = np.array([82, 255, 250], dtype=np.uint8)

background_lower_color_range = np.array([0, 0, 128], dtype=np.uint8)
background_upper_color_range = np.array([180, 49, 186], dtype=np.uint8)


de_energized_zone_lower_color_range = np.array([40, 100, 130], dtype=np.uint8)
de_energized_zone_upper_color_range = np.array([80, 255, 255], dtype=np.uint8)

settings = get_settings()

class SmartWatchDog:

    # redis
    redis_conn: Optional[redis.Redis] = None

    # video
    video: Optional[cv2.VideoCapture] = None 

    check_video = CheckVideo()
    recorder = VideoRecorder()
    new_results = {
        "Capture_delay": "",
        "Timestamp": "",
        "Remark": "",
        "Valid": "",
        "Status": "",
    }

    start_time: float = 0.0
    found_detection_updates = False
    exit = False
    is_set_time = False

    def set_local_tz(self):
        if self.is_set_time == False:
            locale.setlocale(locale.LC_TIME, 'en_US.UTF-8')
            self.is_set_time = True
            

    def extract_green_tracks_segments(self,green_point_contours):

        green_tracks_segments = []
        """准备判断-两个点同时存在时
        """

        [
            green_tracks_segments.append(
                green_triangle_coordinates[
                    str(
                        [
                            green_point_contour[0, 0, 1],
                            green_point_contour[0, 0, 0]
                        ]
                    )
                ]
            )

            for green_point_contour in green_point_contours

            if str(
                [green_point_contour[0, 0, 1], green_point_contour[0, 0, 0]]
            ) in green_triangle_coordinates

            if green_triangle_coordinates[
                str([green_point_contour[0, 0, 1], green_point_contour[0, 0, 0]])
            ] not in green_tracks_segments
        ]

        self.patch_green_tracks_segments(green_tracks_segments)

        return green_tracks_segments


    def find_reversed_green_tracks_segments(
        self,
        reversed_green_point_contours,
        green_tracks_segments
    ):
        reversed_green_tracks_segments = []

        [
            reversed_green_tracks_segments.append(
                reversed_green_triangle_coordinates[
                    str(
                        [
                            reversed_green_point_contour[0, 0, 1],
                            reversed_green_point_contour[0, 0, 0]
                        ]
                    )
                ]
            )

            for reversed_green_point_contour in reversed_green_point_contours

            if str(
                [reversed_green_point_contour[0, 0, 1],
                    reversed_green_point_contour[0, 0, 0]]
            ) in reversed_green_triangle_coordinates

            if reversed_green_triangle_coordinates[
                str([reversed_green_point_contour[0, 0, 1],
                    reversed_green_point_contour[0, 0, 0]])
            ] not in reversed_green_tracks_segments

            if reversed_green_triangle_coordinates[
                str([reversed_green_point_contour[0, 0, 1],
                    reversed_green_point_contour[0, 0, 0]])
            ] in green_tracks_segments
        ]

        return reversed_green_tracks_segments


    def patch_green_tracks_segments(
        self,
        green_tracks_segments
    ):
        for track_name, special_case in special_cases.items():
            if track_name not in green_tracks_segments:
                for case_logic in special_case.values():
                    if len(case_logic["Tracks"]) == 2 \
                            and len(case_logic["Points"]) == 2:

                        if case_logic["Tracks"][0] in green_tracks_segments \
                            and case_logic["Tracks"][1] in green_tracks_segments \
                            and point_statuses[
                                next(iter(case_logic["Points"].items()))[0]
                        ] == next(iter(case_logic["Points"].items()))[1] \
                            and point_statuses[
                                next(iter(case_logic["Points"].items()))[0]
                        ] == next(iter(case_logic["Points"].items()))[1]:

                            green_tracks_segments.append(track_name)
                            break


    def connect_green_tracks_segments(
        self,
        green_tracks_segments,
        hsv_image
    ):
        adjacent_green_tracks_count = 0

        connected_green_tracks: list[list[int]] = []

        current_green_track = ""

        last_checked_track = ""

        next_green_track = ""

        point_status = ""

        points: list = []
        """
        识别到的绿色轨道， 左右两边的点
        """

        potential_start_end_track = []

        stop = False

        temp = []

        track_list = []

        track_and_point_array = reference_points[:, -1, -1]

        for track in green_tracks_segments:
            points = track_graphs[track] 
            adjacent_green_tracks_count = 0
            track_index = names.index(track)

            for point in points:
                # N和Ｒ。
                point_index = names.index(point)
                point_status = self.get_point_status(
                    actual_points[
                        reference_points[np.where(track_and_point_array == point_index)][0, -1, 0]
                    ],
                    hsv_image
                )

                point_statuses[point] = point_status

                # 找点，按照点的　ｎ，ｒ，ｕ情况找另外一个轨道。
                track_list = \
                    points_graphs[point][point_status].copy()

                # 用临时组来找到一组一个起点。
                if track in track_list:
                    if point_index not in temp:
                        temp.append(point_index)

                    # 　轨道深度搜索的代码，
                    for t in track_list:
                        # print(point, track, t)
                        if t != track and t in green_tracks_segments:
                            last_checked_track = t
                            adjacent_green_tracks_count += 1

            # 按照路坐标筛选起点，其实排序就可以了，但是他这里这样写了。　
            # 只有一段路就直接入结果，两端以上就找起点。potential_start_end_track。这个写法有点混乱，反正应该最后就能筛出每条长路径的起点。
            if adjacent_green_tracks_count == 0:
                temp.append(track_index)
                connected_green_tracks.append(temp.copy())

            if adjacent_green_tracks_count == 1:
                if reference_points[track_and_point_array == names.index(last_checked_track)][0, 0, -1] > reference_points[track_and_point_array == track_index][0, 0, -1]:
                    potential_start_end_track.append(track)

            temp.clear()

        last_checked_track = ""

        # 找到起点之后，就按起点来深度搜索，然后点边点边连起来。
        for track in potential_start_end_track:
            current_green_track = track
            next_green_track = ""
            stop = False

            while not stop:
                points = track_graphs[current_green_track]

                for point in points:
                    point_index = names.index(point)
                    point_status = self.get_point_status(
                        actual_points[
                            reference_points[
                                np.where(
                                    track_and_point_array == point_index
                                )
                            ][0, -1, 0]
                        ],
                        hsv_image
                    )

                    point_statuses[point] = point_status

                    track_list = \
                        points_graphs[point][point_status]

                    if current_green_track in track_list:
                        if point_index not in temp:
                            temp.append(point_index)

                        for t in track_list:
                            if t == current_green_track:
                                continue

                            if t != track and t in green_tracks_segments:
                                # history.append(current_green_track)
                                temp.append(names.index(current_green_track))
                                if current_green_track in \
                                        green_tracks_segments:

                                    green_tracks_segments.remove(
                                        current_green_track)

                                next_green_track = t

                if current_green_track == next_green_track:
                    temp.append(names.index(current_green_track))
                    if current_green_track in green_tracks_segments:
                        green_tracks_segments.remove(current_green_track)

                    connected_green_tracks.append(temp.copy())
                    temp.clear()
                    stop = True

                current_green_track = next_green_track

        return connected_green_tracks


    def get_green_tracks_with_components(
        self,
        green_point_contours,
        # reversed_green_point_contours,
        hsv_image,
        frame
    ):
        green_tracks_segments = \
            self.extract_green_tracks_segments(green_point_contours)

        # reversed_green_tracks = \
        #     self.find_reversed_green_tracks_segments(
        #         reversed_green_point_contours,
        #         green_tracks_segments
        #     )

        connected_green_tracks = \
            self.connect_green_tracks_segments(
                green_tracks_segments,
                hsv_image
            )

        temp_results = defaultdict(lambda: {"status": {}, "route": []})
        sorted = False

        for (group_index, component_indexes) in \
                enumerate(connected_green_tracks):

            filtered_reference_points = \
                reference_points[
                    np.isin(
                        reference_points[:, 1, 1], component_indexes
                    )
                ]

            filtered_reference_points[:] = \
                filtered_reference_points[
                    np.argsort(filtered_reference_points[:, 0, 1])
            ]

            # green_tracks = filtered_reference_points[
            #     actual_points[filtered_reference_points[:, 1, 0], -1, 0] == 1
            # ]

            # if names[green_tracks[0, 1, 1]] in reversed_green_tracks:
            #     filtered_reference_points[:] = \
            #         filtered_reference_points[::-1]

            #     sorted = True

            # elif names[green_tracks[-1, 1, 1]] in reversed_green_tracks:
            #     filtered_reference_points[:] = \
            #         filtered_reference_points[::-1]

            #     sorted = True

            for point1, point2 in \
                    zip(
                        filtered_reference_points[:, 0],
                        filtered_reference_points[:, 0][1:]
                    ):

                cv2.line(
                    frame,
                    (point1[1], point1[0]),
                    (point2[1], point2[0]),
                    (255, 236, 0),
                    4
                )
            # 判断点和轨道状态。
            for point in filtered_reference_points:
                status = "BK"
                point_name = names[point[1, 1]]
                if actual_points[point[1][0]][-1][0] == 2:
                    continue

                if actual_points[point[1][0]][-1][0] == 0:
                    if point_statuses[names[point[1, 1]]] == "R":
                        status = "B"

                    if point_statuses[names[point[1, 1]]] == "N":
                        status = "G"

                if actual_points[point[1][0]][-1][0] == 1:
                    status = "G"

                # if names[point[1, 1]][0] != "S":
                    # temp_results[f"GP{group_index + 1}"][names[point[1, 1]]] = status
                if not point_name.startswith("S"):
                    temp_results[f"GP{group_index + 1}"]["status"][names[point[1, 1]]] = status
                    temp_results[f"GP{group_index + 1}"]["route"].append(names[point[1, 1]])


                color = (0, 255, 127)

                if actual_points[point[1][0]][-1][0] == 0:
                    color = (127, 0, 255)

                cv2.circle(
                    frame,
                    (
                        int(point[0][1]),
                        int(point[0][0])
                    ),
                    5,
                    color,
                    -1
                )

                cv2.putText(
                    frame,
                    names[point[1, 1]],
                    (
                        int(point[0][1]),
                        int(point[0][0]) - 10
                    ),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.5,
                    color,
                    1,
                    cv2.LINE_AA
                )

        return temp_results, sorted


    def get_green_tracks_with_components_filter(
        self,
        green_point_contours,
        reversed_green_point_contours,
        hsv_image,
        frame
    ):
        """过滤版本画图, 输出数据
        """
        green_tracks_segments = \
            self.extract_green_tracks_segments(green_point_contours)

        reversed_green_tracks = \
            self.find_reversed_green_tracks_segments(
                reversed_green_point_contours,
                green_tracks_segments
            )

        connected_green_tracks = \
            self.connect_green_tracks_segments(
                green_tracks_segments,
                hsv_image
            )

        temp_results = {}  # 数据
        draw_list = {}  # 需要绘画的列表
        sorted = False

        for (group_index, component_indexes) in \
                enumerate(connected_green_tracks):

            temp_results[f"GP{group_index + 1}"] = {}
            filtered_reference_points = \
                reference_points[
                    np.isin(
                        reference_points[:, 1, 1], component_indexes
                    )
                ]

            filtered_reference_points[:] = \
                filtered_reference_points[
                    np.argsort(filtered_reference_points[:, 0, 1])
            ]

            green_tracks = filtered_reference_points[
                actual_points[filtered_reference_points[:, 1, 0], -1, 0] == 1
            ]

            if names[green_tracks[0, 1, 1]] in reversed_green_tracks:
                filtered_reference_points[:] = \
                    filtered_reference_points[::-1]

                sorted = True

            elif names[green_tracks[-1, 1, 1]] in reversed_green_tracks:
                filtered_reference_points[:] = \
                    filtered_reference_points[::-1]

                sorted = True
            draw_list[f"GP{group_index + 1}"] = filtered_reference_points

            # 判断点和轨道状态。
            for point in filtered_reference_points:
                status = "BK"

                if actual_points[point[1][0]][-1][0] == 2:
                    continue

                if actual_points[point[1][0]][-1][0] == 0:
                    if point_statuses[names[point[1, 1]]] == "R":
                        status = "B"

                    if point_statuses[names[point[1, 1]]] == "N":
                        status = "G"

                if actual_points[point[1][0]][-1][0] == 1:
                    status = "G"

                if names[point[1, 1]][0] != "S":
                    # temp_results[f"GP{group_index + 1}"][names[point[1, 1]]] = status
                    temp_results[f"GP{group_index + 1}"]["status"][names[point[1, 1]]] = status
                    temp_results[f"GP{group_index + 1}"]["route"].append(names[point[1, 1]])

        delete_track = []
        # 遍历并直接判断是否满足删除条件
        for track_index_name, track_status_dict in temp_results.items():
            t_keys = track_status_dict.keys()
            if sum(key.isdigit() for key in track_status_dict.keys()) == 1 and not any(track in self.last_tracks for track in t_keys):
                delete_track.append(track_index_name)
            elif any(seg in t_keys for seg in ["2066", "2065"]) and not all(seg in t_keys for seg in ["2066", "2065", "S-HHS112"]):
                # 删除2066,2065, 但是还在画线
                temp_results[track_index_name].pop("2066")
                temp_results[track_index_name].pop("2065")

        for track_index in reversed(delete_track):
            temp_results.pop(track_index)
            draw_list.pop(track_index)

        # 重新索引并赋值给 results
        temp_results = {f"GP{green_track_index + 1}": value for green_track_index,
                        value in enumerate(temp_results.values())}

        if settings.MODE != "pro":
            for filtered_reference_points in draw_list.values():
                for point1, point2 in \
                        zip(
                            filtered_reference_points[:, 0],
                            filtered_reference_points[:, 0][1:]
                        ):

                    cv2.line(
                        frame,
                        (point1[1], point1[0]),
                        (point2[1], point2[0]),
                        (255, 236, 0),
                        4
                    )
                    for point in filtered_reference_points:
                        color = (0, 255, 127)

                        if actual_points[point[1][0]][-1][0] == 0:
                            color = (127, 0, 255)

                        cv2.circle(
                            frame,
                            (
                                int(point[0][1]),
                                int(point[0][0])
                            ),
                            5,
                            color,
                            -1
                        )

                        cv2.putText(
                            frame,
                            names[point[1, 1]],
                            (
                                int(point[0][1]),
                                int(point[0][0]) - 10
                            ),
                            cv2.FONT_HERSHEY_SIMPLEX,
                            0.5,
                            color,
                            1,
                            cv2.LINE_AA
                        )

        return temp_results, sorted


    def get_point_status(
        self,
        point_coordinate,
        image
    ):
        if point_coordinate[0, 0] == -1:
            return "U"

        # Normal
        normal_point_area = \
            image[point_coordinate[0:2, 0][0]:point_coordinate[0:2, 1][0],
                point_coordinate[0:2, 0][1]:point_coordinate[0:2, 1][1]]

        normal_point_area_total_pixels: int = normal_point_area.shape[0] * \
            normal_point_area.shape[1]

        normal_point_area_background_mask = \
            cv2.inRange(
                normal_point_area,
                background_lower_color_range,
                background_upper_color_range
            )

        normal_point_area_ground_mask_white_pixels: int = cv2.countNonZero(
            normal_point_area_background_mask)

        normal_point_area_background_mask_black_pixels_percentage: float = \
            (
                normal_point_area_total_pixels
                - normal_point_area_ground_mask_white_pixels
            ) \
            / normal_point_area_total_pixels * 100

        reverse_point_area = \
            image[point_coordinate[2:-1, 0][0]:point_coordinate[2:-1, 1][0],
                point_coordinate[2:-1, 0][1]:point_coordinate[2:-1, 1][1]]

        reverse_point_area_total_pixels: int = reverse_point_area.shape[0] * \
            reverse_point_area.shape[1]

        reverse_point_area_background_mask = \
            cv2.inRange(
                reverse_point_area,
                background_lower_color_range,
                background_upper_color_range
            )

        reverse_point_area_ground_mask_white_pixels: int = cv2.countNonZero(
            reverse_point_area_background_mask)

        reverse_point_area_background_mask_black_pixels_percentage: float = \
            (
                reverse_point_area_total_pixels
                - reverse_point_area_ground_mask_white_pixels
            ) \
            / reverse_point_area_total_pixels * 100

        if normal_point_area_background_mask_black_pixels_percentage \
                < 90 \
                and reverse_point_area_background_mask_black_pixels_percentage \
                > 90:

            return "R"

        if reverse_point_area_background_mask_black_pixels_percentage \
                < 90 \
                and normal_point_area_background_mask_black_pixels_percentage \
                > 90:

            return "N"

        normal_point_area_background_mask = \
            cv2.inRange(
                normal_point_area,
                de_energized_zone_lower_color_range,
                de_energized_zone_upper_color_range
            )

        normal_point_area_ground_mask_white_pixels: int = cv2.countNonZero(
            normal_point_area_background_mask)

        normal_point_area_background_mask_black_pixels_percentage: float = \
            (
                normal_point_area_total_pixels
                - normal_point_area_ground_mask_white_pixels
            ) \
            / normal_point_area_total_pixels * 100

        reverse_point_area_background_mask = \
            cv2.inRange(
                reverse_point_area,
                de_energized_zone_lower_color_range,
                de_energized_zone_upper_color_range
            )

        reverse_point_area_ground_mask_white_pixels: int = cv2.countNonZero(
            reverse_point_area_background_mask)

        reverse_point_area_background_mask_black_pixels_percentage: float = \
            (
                reverse_point_area_total_pixels
                - reverse_point_area_ground_mask_white_pixels
            ) \
            / reverse_point_area_total_pixels * 100

        if normal_point_area_background_mask_black_pixels_percentage \
                < 90 \
                and reverse_point_area_background_mask_black_pixels_percentage \
                > 90:

            return "R"

        if reverse_point_area_background_mask_black_pixels_percentage \
                < 90 \
                and normal_point_area_background_mask_black_pixels_percentage \
                > 90:

            return "N"

        return "U"



    def update_results(self, status, check_res):

        self.new_results["Status"] = status
        self.new_results["Capture_delay"] = check_res.get("datetime_ocr", "")
        self.new_results["Remark"] = check_res.get("remark", "")

        self.set_heart_beat()
        return self.new_results

    def set_heart_beat(self, remark = "Keep Alive - Normal"):
        """
        判断并设置心跳
        remark为空时,才填写心跳??(不确定)
        """
        if settings.ENABLED_HEART_BEAT:
            if time.time() - self.start_time > settings.HEART_INTERVAL:
                self.new_results["Remark"] = remark
                self.found_detection_updates = True


    def picture_process_time(self,frame, start_tick):
        """在屏幕右上角输出响应时间"""
        if settings.ENABLED_PROCESS_TIME:
            end_tick = cv2.getTickCount() 
            process_time = (end_tick - start_tick) / cv2.getTickFrequency()  
            position = (frame.shape[1] - settings.POSITION_X, settings.POSITION_Y)  
            cv2.putText(
                frame,
                f"Smart Watch Dog Process Time : {process_time:.6f}s",
                position,
                cv2.FONT_HERSHEY_SIMPLEX,
                0.7,
                (0, 255, 0),
                2,
                cv2.LINE_AA
            )
    
    def send_data_to_redis(self,previous_result, results):
        """
        判断数据是否发生变化 and 是否心跳
        发送数据到redis
        """
        if previous_result != results or self.found_detection_updates:
            self.found_detection_updates = False
            previous_result = results
            self.start_time = time.time()
            self.new_results["Valid"] = "False" if self.new_results["Remark"] else "True" # 没有问题是true 

            data_logger.info(self.new_results)
            if self.redis_conn:
                redis_connection.set_key(settings.DATA_RESULT_KEY, orjson.dumps(self.new_results))
        return previous_result


    def send_error_data_to_redis(self, previous_result, check_res):
        """
        发送异常数据
        """
        if previous_result != check_res or self.found_detection_updates:
            self.found_detection_updates = False
            previous_result = check_res
            self.start_time = time.time()
            self.new_results["Valid"] = "False"
            self.new_results["Remark"] = check_res.get("remark", "")
            data_logger.info(self.new_results)
            if self.redis_conn:
                redis_connection.set_key(settings.DATA_RESULT_KEY, orjson.dumps(self.new_results))
        return previous_result

    def start(self) -> None:
        server_logger.debug(settings.model_dump())

        # 如何是测试模式， 则使用测试视频
        if settings.MODE == "test" and settings.TEST_VIODE_PATH:
            self.video = cv2.VideoCapture(settings.TEST_VIODE_PATH)
        else:
            self.video = cv2.VideoCapture(0, cv2.CAP_ANY)
        self.video.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
        self.video.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)

        if self.video.isOpened() == False:
            server_logger.warning("读取不到视频!!")
            return None

        green_point_mask = cv2.imread(
            filename=os.path.join(settings.BASE_DIR, "static",
                                "green_point_mask.png"),
            flags=cv2.IMREAD_GRAYSCALE
        )

        # direction_mask = cv2.imread(
        #     filename=os.path.join(settings.BASE_DIR, "static",
        #                         "direction_mask.png"),
        #     flags=cv2.IMREAD_GRAYSCALE
        # )

        cyan_mask = cv2.imread(
            filename=os.path.join(settings.BASE_DIR, "static",
                                "cyan_point_mask.png"),
            flags=cv2.IMREAD_GRAYSCALE
        )

        # green_reverse_mask = cv2.imread(
        #     filename=os.path.join(settings.BASE_DIR, "static",
        #                         "reverse_green_mask.png"),
        #     flags=cv2.IMREAD_GRAYSCALE
        # )

        for _ in range(10):
            return_value, frame = self.video.read()

        if not return_value:
            # Error
            error_logger.error("cannot grab frame.")
            return None

        previous_result = {}
        previous_sorted = False
        self.start_time = time.time()  

        current_server_name = platform.platform().lower()
        while return_value:
            start_tick = cv2.getTickCount()  # 开始计时           
            return_value, frame =  self.video.read()

            # if 'linux-6.1.0-31-amd64' in current_server_name:  # 检查linux
                # frame = cv2.flip(frame, 0)  # 参数0表示沿X轴翻转

            self.found_detection_updates = False
            self.recorder.start(frame)
            hsv_image = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            mask_image = cv2.inRange(
                hsv_image,
                green_lower_color_range,
                green_upper_color_range
            )

            # mask_image_2 = cv2.inRange(
            #     hsv_image,
            #     black_reverse_lower_color_range_2,
            #     black_reverse_upper_color_range_2
            # )

            mask_image_3 = cv2.inRange(
                hsv_image,
                cyan_lower_color_range,
                cyan_upper_color_range
            )

            # mask_image_4 = cv2.inRange(
            #     hsv_image,
            #     green_reverse_lower_color_range,
            #     green_reverse_upper_color_range
            # )

            # direction_mask_image = cv2.bitwise_and(mask_image_2, direction_mask)
            green_point_mask_image = cv2.bitwise_and(mask_image, green_point_mask)
            cyan_point_mask_image = cv2.bitwise_and(mask_image_3, cyan_mask)
            # green_reserver_mask_image = cv2.bitwise_and(mask_image_4, green_reverse_mask)

            # reversed_green_point_contours, _ = \
            #     cv2.findContours(
            #         direction_mask_image,
            #         cv2.RETR_EXTERNAL,
            #         cv2.CHAIN_APPROX_SIMPLE
            #     )
            
            # green_reverser_point_contours, _ = \
            #     cv2.findContours(
            #         green_reserver_mask_image,
            #         cv2.RETR_EXTERNAL,
            #         cv2.CHAIN_APPROX_SIMPLE
            #     )

            green_point_contours, _ = \
                cv2.findContours(
                    green_point_mask_image,
                    cv2.RETR_EXTERNAL,
                    cv2.CHAIN_APPROX_SIMPLE
                )

            cyan_point_contours, _ = \
                cv2.findContours(
                    cyan_point_mask_image,
                    cv2.RETR_EXTERNAL,
                    cv2.CHAIN_APPROX_SIMPLE
                )
            

            positive_direction_point_contours = green_point_contours + cyan_point_contours
            check_res = self.check_video.start_checkers(frame,hsv_frame=hsv_image, green_point_contours=positive_direction_point_contours, green_signle_contours=positive_direction_point_contours)

            self.new_results["Timestamp"] = datetime.now().strftime( "%m/%d/%Y %H:%M:%S.%f")
            # 检测条件判断
            if (settings.ENABLED_CHECK_WINDOWS and check_res["check_windows"] == False) or \
                (settings.ENABLED_CHECK_LAYOUT and check_res["check_layout"] == False) or \
                (settings.ENABLED_CHECK_FREEZE and check_res["check_freeze"] == False) or \
                (settings.ENABLED_CHECK_CURSOR and check_res["check_cursor"] == False):
                
                self.set_heart_beat()
                previous_result = self.send_error_data_to_redis(previous_result, check_res)

                key = cv2.waitKey(20)
                if key == 113:
                    break
                cv2.imshow(
                    settings.SHOW_WINDOW_NAME,
                    # frame
                    cv2.resize(
                        frame,
                        settings.SHOW_SIZE
                    )
                )
                self.set_local_tz()
                continue

            results, _ = self.get_green_tracks_with_components(
                positive_direction_point_contours,
                # reversed_point_contours,
                hsv_image,
                frame
            )

            self.update_results(status=results, check_res=check_res)
            previous_result = self.send_data_to_redis(previous_result, results)
            self.picture_process_time(frame, start_tick)

            cv2.imshow(
                settings.SHOW_WINDOW_NAME,
                cv2.resize(
                    frame,
                    settings.SHOW_SIZE
                )
            )
            self.set_local_tz()
            key = cv2.waitKey(20)

            if key == 113:
                break

            if key == 32:
                # press Space to capture image (Space ASCII value: 32)
                now = datetime.now()
                dt_string = now.strftime("%Y%m%d%H%M%S")
                img_name = \
                    "opencv_frame_{}.png".format(dt_string)

                cv2.imwrite(img_name, frame)
        return None

        
    def __enter__(self):
        server_logger.info("redis connection ...")
        while not self.redis_conn:
            self.redis_conn = redis_connection.get_redis_connection()
        return self
    
    def video_close(self):
        if self.video is not None:
            self.video.release()

    def close(self):
        # self.check_video.close()
        # server_logger.info("check video close")

        redis_connection.close()
        server_logger.info("redis close")

        self.video_close()
        server_logger.info("video close")

        self.recorder.close()
        server_logger.info("Recorder close")
        
        cv2.destroyAllWindows()
        server_logger.info("Exit")

        self.exit = True
    def __exit__(self, exc_type, exc_val, exc_tb):
        if not self.exit:
            self.close()


if __name__ == "__main__":
    smart_watch_dog = None
    try:
        smart_watch_dog = SmartWatchDog()
        with smart_watch_dog:
            smart_watch_dog.start()
    except KeyboardInterrupt as e:
        error_logger.error(e)
    finally:
        if smart_watch_dog:
            smart_watch_dog.close()

