from typing import Optional
from datetime import datetime
from check_video import CheckVideo
from video_recorder import VideoRecorder
from config.settings import get_settings
from collections import defaultdict
from extends.redis import redis_connection
from get_logger import get_data_logger, get_server_logger, get_error_logger
from coordinates_parser import coordinates_parser as parser

import os
import cv2
import time
import redis
import orjson
import locale
import platform
import numpy as np


actual_points, green_triangle_coordinates, names, points_graphs, reference_points, reversed_green_triangle_coordinates, special_cases, track_graphs = (
    parser.actual_points,
    parser.green_triangle_coordinates,
    parser.names,
    parser.points_graphs,
    parser.reference_points,
    parser.reversed_green_triangle_coordinates,
    parser.special_cases,
    parser.track_graphs
)

data_logger = get_data_logger()
server_logger = get_server_logger()
error_logger = get_error_logger()

point_statuses = {
    "TAW901": "U",
    "TAW023": "U",
    "TAW025": "U",
    "TAW029": "U",
    "TAW202": "R",
    "TAW204": "N",
    "TAW206": "N",
    "TAW208": "N",
    "TAW210": "R",
    "TMC102": "N",
    "TMC103": "N",
    "TMC105": "N",
    "TMC108": "N",
    "TMC110": "N",
    "TMC115": "R",
    "S_P16": "U",
    "TMC112": "U",
    "TMC116": "U",
    "TMC119": "U",
    "TMC123": "U",
    "TMC118": "R",
    "TAW201": "R",
    "TAW207": "U",
    "TMC104": "U",
    "TMC107": "U",
    "TMC109": "U",
    "TMC111": "U",
    "TMC114": "U",
    "TMC117": "U",
    "TMC122": "U",
    "TMC121": "U",

}

# green_lower_color_range = np.array([40, 85, 50], dtype=np.uint8)
# green_upper_color_range = np.array([85, 255, 165], dtype=np.uint8)

# 正向绿色
green_lower_color_range = np.array([30, 30, 30], dtype=np.uint8)
green_upper_color_range = np.array([95, 255, 255], dtype=np.uint8)

# 识别方向的绿色
reversed_green_lower_color_range = np.array([40, 85, 105], dtype=np.uint8)
reversed_green_upper_color_range = np.array([95, 255, 255], dtype=np.uint8)

# 青色的区域
cyan_lower_color_range = np.array([50, 100, 150], dtype=np.uint8)
cyan_upper_color_range = np.array([87, 255, 255], dtype=np.uint8)

# 青色车
cyan_car_lower_color_range = np.array([75, 220, 210], dtype=np.uint8)
cyan_car_upper_color_range = np.array([95, 255, 240], dtype=np.uint8)

# 扩大的绿色范围
green_expand_lower_color_range = np.array([0, 0, 0], dtype=np.uint8)
green_expand_upper_color_range = np.array([150, 190, 190], dtype=np.uint8)

# 反向黑色
black_reverse_lower_color_range_2 = np.array([0, 0, 0])
black_reverse_upper_color_range_2 = np.array([170, 255, 47])# 47深绿色 # 61深蓝色

background_lower_color_range = np.array([0, 0, 128], dtype=np.uint8)
background_upper_color_range = np.array([180, 49, 186], dtype=np.uint8)

de_energized_zone_lower_color_range = np.array([40, 100, 130], dtype=np.uint8)
de_energized_zone_upper_color_range = np.array([80, 255, 255], dtype=np.uint8)

last_tracks = [
    "1417",
    "1401",
    "1410",
    "1424",
    "1437",
    "1431",
    "2257", 
    "2258", 
    "2259", 
    "2260", 
    "2290",
    "2206",
    "2203",
    "2212",
    "2209",
    "2219",
    "2216", 
    "2225", 
    "2222", 
    "2232",
    "2229",
    "2255"
]

settings = get_settings()

class SmartWatchDog:

    # redis
    redis_conn: Optional[redis.Redis] = None

    # video
    video: Optional[cv2.VideoCapture] = None 

    check_video = CheckVideo()
    recorder = VideoRecorder()
    new_results = {
        "Capture_delay": "",
        "Timestamp": "",
        "Remark": "",
        "Valid": "",
        "Status": "",
    }

    start_time: float = 0.0
    found_detection_updates = False
    exit = False
    is_set_time = False
    
    def set_local_tz(self):
        if self.is_set_time == False:
            locale.setlocale(locale.LC_TIME, 'en_US.UTF-8')
            self.is_set_time = True

    # def extract_green_single_segments(self, green_single_contours):

    #     green_single_segments = []
    #     """准备判断-两个点同时存在时
    #     """
    #     for green_single_contur in green_single_contours:
    #         # red_single_contur_set = {red_single_contur[0,0,1], red_single_contur[0,0,0]}
    #         green_single_contur_str = str([green_single_contur[0,0,1],green_single_contur[0,0,0]])
    #         if green_single_contur_str in single_statuses:
    #             green_single_segments.append(single_statuses[green_single_contur_str])
    #     return green_single_segments

    def extract_green_tracks_segments(self,green_point_contours):


        green_tracks_segments = []

        [
            green_tracks_segments.append(
                green_triangle_coordinates[
                    str(
                        [
                            green_point_contour[0, 0, 1],
                            green_point_contour[0, 0, 0]
                        ]
                    )
                ]
            )

            for green_point_contour in green_point_contours

            if str(
            [green_point_contour[0, 0, 1], green_point_contour[0, 0, 0]]
        ) in green_triangle_coordinates

            if green_triangle_coordinates[
                str([green_point_contour[0, 0, 1], green_point_contour[0, 0, 0]])
            ] not in green_tracks_segments
        ]

        self.patch_green_tracks_segments(green_tracks_segments)

        return green_tracks_segments


    def find_reversed_green_tracks_segments(
        self,
        reversed_green_point_contours,
        green_tracks_segments
    ):
        reversed_green_tracks_segments = []

        [
            reversed_green_tracks_segments.append(
                reversed_green_triangle_coordinates[
                    str(
                        [
                            reversed_green_point_contour[0, 0, 1],
                            reversed_green_point_contour[0, 0, 0]
                        ]
                    )
                ]
            )

            for reversed_green_point_contour in reversed_green_point_contours

            if str(
                [reversed_green_point_contour[0, 0, 1],
                    reversed_green_point_contour[0, 0, 0]]
            ) in reversed_green_triangle_coordinates

            if reversed_green_triangle_coordinates[
                str([reversed_green_point_contour[0, 0, 1],
                    reversed_green_point_contour[0, 0, 0]])
            ] not in reversed_green_tracks_segments

            if reversed_green_triangle_coordinates[
                str([reversed_green_point_contour[0, 0, 1],
                    reversed_green_point_contour[0, 0, 0]])
            ] in green_tracks_segments
        ]

        return reversed_green_tracks_segments


    def patch_green_tracks_segments(
        self,
        green_tracks_segments
    ):
        for track_name, special_case in special_cases.items():
            if track_name not in green_tracks_segments:
                for case_logic in special_case.values():
                    if len(case_logic["Tracks"]) == 2 \
                            and len(case_logic["Points"]) == 2:

                        if case_logic["Tracks"][0] in green_tracks_segments \
                                and case_logic["Tracks"][1] in green_tracks_segments \
                                and point_statuses[
                            next(iter(case_logic["Points"].items()))[0]
                        ] == next(iter(case_logic["Points"].items()))[1] \
                                and point_statuses[
                            next(iter(case_logic["Points"].items()))[0]
                        ] == next(iter(case_logic["Points"].items()))[1]:
                            green_tracks_segments.append(track_name)
                            break


    def connect_green_tracks_segments(
        self,
        green_tracks_segments,
        hsv_image
    ):

        adjacent_green_tracks_count = 0
        connected_green_tracks = []
        current_green_track = ""
        last_checked_track = ""
        next_green_track = ""
        point_status = ""
        points = []
        potential_start_end_track = []
        stop = False
        temp = []
        track_list = []

        # 遍历每个绿色轨道段，尝试连接相邻的轨道段
        for track in green_tracks_segments:
            points = track_graphs[track]
            adjacent_green_tracks_count = 0

            for point in points:

                point_status = self.get_point_status(
                    actual_points[
                        reference_points[
                            np.where(
                                reference_points[:, -1, -1] == names.index(point)
                            )
                        ][0, -1, 0]
                    ],
                    hsv_image
                )

                point_statuses[point] = point_status

                track_list = \
                    points_graphs[point][point_status].copy()

                if track in track_list:
                    if names.index(point) not in temp:
                        temp.append(names.index(point))

                    for t in track_list:
                        if t != track and t in green_tracks_segments:
                            last_checked_track = t
                            adjacent_green_tracks_count += 1

            if adjacent_green_tracks_count == 0:
                temp.append(names.index(track))
                connected_green_tracks.append(temp.copy())

            if adjacent_green_tracks_count == 1:
                if reference_points[
                    reference_points[:, -1, -
                    1] == names.index(last_checked_track)
                ][0, 0, -1] > reference_points[
                    reference_points[:, -1, -1] == names.index(track)
                ][0, 0, -1]:
                    potential_start_end_track.append(track)

            temp.clear()

        last_checked_track = ""

        # 处理潜在的起始和结束轨道段，以连接孤立的轨道段
        for track in potential_start_end_track:
            current_green_track = track
            next_green_track = ""
            stop = False

            while not stop:
                points = track_graphs[current_green_track]

                for point in points:
                    point_status = self.get_point_status(
                        actual_points[
                            reference_points[
                                np.where(
                                    reference_points[:, -1, -
                                                            1] == names.index(point)
                                )
                            ][0, -1, 0]
                        ],
                        hsv_image
                    )

                    point_statuses[point] = point_status

                    track_list = \
                        points_graphs[point][point_status]

                    if current_green_track in track_list:
                        if names.index(point) not in temp:
                            temp.append(names.index(point))

                        for t in track_list:
                            if t == current_green_track:
                                continue

                            if t != track and t in green_tracks_segments:
                                temp.append(names.index(current_green_track))
                                if current_green_track in \
                                        green_tracks_segments:
                                    green_tracks_segments.remove(
                                        current_green_track)

                                next_green_track = t

                if current_green_track == next_green_track:
                    temp.append(names.index(current_green_track))
                    if current_green_track in green_tracks_segments:
                        green_tracks_segments.remove(current_green_track)

                    connected_green_tracks.append(temp.copy())
                    temp.clear()
                    stop = True

                current_green_track = next_green_track

        return connected_green_tracks

    def get_green_tracks_with_components(self,
        white_point_contours,
        hsv_image,
        frame
    ):
        green_tracks_segments = \
            self.extract_green_tracks_segments(white_point_contours)

        # green_single_segments = self.extract_green_single_segments(green_single_contours)
        # reversed_green_tracks = extract_green_tracks_segments(reversed_green_point_contours)

        # yello_green_tracks = extract_green_tracks_segments(yello_point_contours)

        # all_tracks_segments = green_tracks_segments + reversed_green_tracks + yello_green_tracks
        all_tracks_segments = green_tracks_segments 

        connected_green_tracks = \
            self.connect_green_tracks_segments(
                all_tracks_segments,
                hsv_image
            )

        temp_results = defaultdict(lambda:{"status": {}, "route": []})
        sorted = False

        for (group_index, component_indexes) in \
                enumerate(connected_green_tracks):

            temp_results[f"GP{group_index + 1}"] = {
                "status": {},
                "route": []
            }
            filtered_reference_points = \
                reference_points[
                    np.isin(
                        reference_points[:, 1, 1], component_indexes
                    )
                ]

            filtered_reference_points[:] = \
                filtered_reference_points[
                    np.argsort(filtered_reference_points[:, 0, 1])
            ]


            for point1, point2 in \
                    zip(
                        filtered_reference_points[:, 0],
                        filtered_reference_points[:, 0][1:]
                    ):

                cv2.line(
                    frame,
                    (point1[1], point1[0]),
                    (point2[1], point2[0]),
                    (255, 236, 0),
                    4
                )
            # 判断点和轨道状态。
            for point in filtered_reference_points:
                status = "BK"
                point_name: str = names[point[1, 1]]

                if actual_points[point[1][0]][-1][0] == 2:
                    continue

                if actual_points[point[1][0]][-1][0] == 0:
                    if point_statuses[point_name] == "R":
                        status = "R"

                    if point_statuses[point_name] == "N":
                        status = "N"

                if actual_points[point[1][0]][-1][0] == 1:
                    status = "G"


                
                if not point_name.startswith("S_"):
                    temp_results[f"GP{group_index + 1}"]["status"][point_name] = status
                    temp_results[f"GP{group_index + 1}"]["route"].append(point_name)

                color = (0, 255, 127)

                if actual_points[point[1][0]][-1][0] == 0:
                    color = (127, 0, 255)

                cv2.circle(
                    frame,
                    (
                        int(point[0][1]),
                        int(point[0][0])
                    ),
                    5,
                    color,
                    -1
                )

                cv2.putText(
                    frame,
                    point_name,
                    (
                        int(point[0][1]),
                        int(point[0][0]) - 10
                    ),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.5,
                    color,
                    1,
                    cv2.LINE_AA
                )
        
        return temp_results, sorted

    def negative_direction(self, reversed_green_tracks,current_track_list, proportion = 0.5):
        """判断a里面有几个元素在 b 中, 如果占比有 0.5 以上就返回 True"""
        common_elements = len([item for item in reversed_green_tracks if item in current_track_list])

        common_proprotion = common_elements / len(current_track_list)
        if common_proprotion > proportion:
            return True
        return False

    def get_point_status(
        self,
        point_coordinate,
        image
    ):

        if point_coordinate[0, 0] == -1:
            return "N"

        # Normal
        normal_point_area = \
            image[point_coordinate[0:2, 0][0]:point_coordinate[0:2, 1][0],
            point_coordinate[0:2, 0][1]:point_coordinate[0:2, 1][1]]

        # point_area: Mat = \
        #     image[point_location[0][0]:point_location[0][1],
        #           point_location[1][0]:point_location[1][1]]

        normal_point_area_total_pixels: int = normal_point_area.shape[0] * \
                                            normal_point_area.shape[1]

        # point_area_hsv: Mat = cvtColor(point_area, COLOR_BGR2HSV)
        normal_point_area_background_mask = \
            cv2.inRange(
                normal_point_area,
                background_lower_color_range,
                background_upper_color_range
            )

        normal_point_area_ground_mask_white_pixels: int = cv2.countNonZero(
            normal_point_area_background_mask)

        normal_point_area_background_mask_black_pixels_percentage: float = \
            (
                    normal_point_area_total_pixels
                    - normal_point_area_ground_mask_white_pixels
            ) \
            / normal_point_area_total_pixels * 100

        # reverse
        # print(point_coordinate[2:-1])
        reverse_point_area = \
            image[point_coordinate[2:-1, 0][0]:point_coordinate[2:-1, 1][0],
            point_coordinate[2:-1, 0][1]:point_coordinate[2:-1, 1][1]]

        reverse_point_area_total_pixels: int = reverse_point_area.shape[0] * \
                                            reverse_point_area.shape[1]

        # point_area_hsv: Mat = cvtColor(point_area, COLOR_BGR2HSV)
        reverse_point_area_background_mask = \
            cv2.inRange(
                reverse_point_area,
                background_lower_color_range,
                background_upper_color_range
            )

        reverse_point_area_ground_mask_white_pixels: int = cv2.countNonZero(
            reverse_point_area_background_mask)

        reverse_point_area_background_mask_black_pixels_percentage: float = \
            (
                    reverse_point_area_total_pixels
                    - reverse_point_area_ground_mask_white_pixels
            ) \
            / reverse_point_area_total_pixels * 100

        # print(normal_point_area_background_mask_black_pixels_percentage)
        # print(reverse_point_area_background_mask_black_pixels_percentage)

        if normal_point_area_background_mask_black_pixels_percentage \
                < 90 \
                and reverse_point_area_background_mask_black_pixels_percentage \
                > 90:
            return "R"

        if reverse_point_area_background_mask_black_pixels_percentage \
                < 90 \
                and normal_point_area_background_mask_black_pixels_percentage \
                > 90:
            return "N"

        normal_point_area_background_mask = \
            cv2.inRange(
                normal_point_area,
                de_energized_zone_lower_color_range,
                de_energized_zone_upper_color_range
            )

        normal_point_area_ground_mask_white_pixels: int = cv2.countNonZero(
            normal_point_area_background_mask)

        normal_point_area_background_mask_black_pixels_percentage: float = \
            (
                    normal_point_area_total_pixels
                    - normal_point_area_ground_mask_white_pixels
            ) \
            / normal_point_area_total_pixels * 100

        reverse_point_area_background_mask = \
            cv2.inRange(
                reverse_point_area,
                de_energized_zone_lower_color_range,
                de_energized_zone_upper_color_range
            )

        reverse_point_area_ground_mask_white_pixels: int = cv2.countNonZero(
            reverse_point_area_background_mask)

        reverse_point_area_background_mask_black_pixels_percentage: float = \
            (
                    reverse_point_area_total_pixels
                    - reverse_point_area_ground_mask_white_pixels
            ) \
            / reverse_point_area_total_pixels * 100

        if normal_point_area_background_mask_black_pixels_percentage \
                < 90 \
                and reverse_point_area_background_mask_black_pixels_percentage \
                > 90:
            return "R"

        if reverse_point_area_background_mask_black_pixels_percentage \
                < 90 \
                and normal_point_area_background_mask_black_pixels_percentage \
                > 90:
            return "N"

        return "U"

    def picture_process_time(self,frame, start_tick):
        """在屏幕右上角输出响应时间"""
        if settings.ENABLED_PROCESS_TIME:
            end_tick = cv2.getTickCount() 
            process_time = (end_tick - start_tick) / cv2.getTickFrequency()  
            position = (frame.shape[1] - settings.POSITION_X, settings.POSITION_Y)  
            cv2.putText(
                frame,
                f"Smart Watch Dog Process Time : {process_time:.6f}s",
                position,
                cv2.FONT_HERSHEY_SIMPLEX,
                0.7,
                (0, 255, 0),
                2,
                cv2.LINE_AA
            )

    def update_results(self, status, check_res):
        self.new_results["Timestamp"] = datetime.now().strftime(
        "%m/%d/%Y %H:%M:%S.%f")

        self.new_results["Status"] = status
        self.new_results["Capture_delay"] = check_res.get("datetime_ocr", "")
        self.new_results["Remark"] = check_res.get("remark", "")
        self.new_results["Valid"] = "False" if self.new_results["Remark"] else "True" # 没有问题是true 
        self.set_heart_beat()
        return self.new_results
    
    def set_heart_beat(self, remark = "Keep Alive - Normal"):
        """
        判断并设置心跳
        remark为空时,才填写心跳??(不确定)
        """
        if settings.ENABLED_HEART_BEAT:
            if time.time() - self.start_time > settings.HEART_INTERVAL:
                self.new_results["Remark"] = remark
                self.found_detection_updates = True
    
    def send_data_to_redis(self,previous_result, results):
        """
        判断数据是否发生变化 and 是否心跳
        发送数据到redis
        """
        if previous_result != results or self.found_detection_updates:
            self.found_detection_updates = False
            previous_result = results
            self.start_time = time.time()

            data_logger.info(self.new_results)

            if self.redis_conn:
                redis_connection.set_key(settings.DATA_RESULT_KEY, orjson.dumps(self.new_results))
        return previous_result


    def send_error_data_to_redis(self, previous_result, check_res):
        """
        发送异常数据
        """
        if previous_result != check_res or self.found_detection_updates:
            self.found_detection_updates = False
            previous_result = check_res
            self.start_time = time.time()
            self.new_results["Valid"] = "False"
            self.new_results["Remark"] = check_res.get("remark", "")
            data_logger.info(self.new_results)
            if self.redis_conn:
                redis_connection.set_key(settings.DATA_RESULT_KEY, orjson.dumps(self.new_results))
        return previous_result

    def start(self) -> None:
        server_logger.debug(settings.model_dump())

        # 如何是测试模式， 则使用测试视频
        if settings.MODE == "test" and settings.TEST_VIODE_PATH:
            
            self.video = cv2.VideoCapture(settings.TEST_VIODE_PATH)
        else:
            self.video = cv2.VideoCapture(0, cv2.CAP_ANY)

        if self.video.isOpened() == False:
            server_logger.warning("读取不到视频!!")
            return None

        self.video.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
        self.video.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)

        green_point_mask = cv2.imread(
            filename=os.path.join(settings.BASE_DIR, "static", "green_point_mask.png"),
            flags=cv2.IMREAD_GRAYSCALE
        )
        cyan_point_mask = cv2.imread(
            filename=os.path.join(settings.BASE_DIR, "static", "cyan_point_mask.png"),
            flags=cv2.IMREAD_GRAYSCALE
        )

        # direction_mask = cv2.imread(
        #     filename=os.path.join(settings.BASE_DIR, "static", "direction_mask.png"),
        #     flags=cv2.IMREAD_GRAYSCALE
        # )

        for _ in range(10):
            return_value, frame = self.video.read()

        if not return_value:
            # Error
            error_logger.error("Error, cannot grab frame.")
            return None

        previous_result = {}
        previous_sorted = False
        self.start_time = time.time()  
        # start = 0

        current_server_name = platform.platform().lower()
        # frame_image = cv2.imread("/home/chf/图片/PHD/11.png")
        while return_value:
            start_tick = cv2.getTickCount()  # 开始计时           
            return_value, frame = self.video.read()
            # return_value = True
            # frame = frame_image.copy()

            if 'linux-6.1.0-25-amd64' in current_server_name:  # 检查linux
                frame = cv2.flip(frame, 0)  # 参数0表示沿X轴翻转
            

            self.found_detection_updates = False
            check_res = {}

            hsv_image = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            # print("---start")
            # print(hsv_image[230, 375])
            # print(hsv_image[230, 385])
            # print(hsv_image[230, 1605])
            # print(hsv_image[230, 1618])
            # print("----end")
            mask_image = cv2.inRange(
                hsv_image,
                green_lower_color_range,
                green_upper_color_range
            )
            cyan_mask_image = cv2.inRange(
                hsv_image,
                cyan_lower_color_range,
                cyan_upper_color_range
            )
            cyan_car_mask_image = cv2.inRange(
                hsv_image,
                cyan_car_lower_color_range,
                cyan_car_upper_color_range
            )
            # reversed_mask_image = cv2.inRange(
            #     hsv_image,
            #     reversed_green_lower_color_range,
            #     reversed_green_upper_color_range
            # )

            # direction_mask_image = cv2.bitwise_and(reversed_mask_image, direction_mask)  
            green_point_mask_image = cv2.bitwise_and(mask_image, green_point_mask)
            cyan_point_mask_image = cv2.bitwise_and(cyan_mask_image, cyan_point_mask)
            cyan_car_point_mask_image = cv2.bitwise_and(cyan_car_mask_image, cyan_point_mask)
            # 反向点
            # reversed_green_point_contours, _ = \
            #     cv2.findContours(
            #         direction_mask_image,
            #         cv2.RETR_EXTERNAL,
            #         cv2.CHAIN_APPROX_SIMPLE
            #     )

            # 绿色点
            green_point_contours, hierarchy = \
                cv2.findContours(
                    green_point_mask_image,
                    cv2.RETR_EXTERNAL,
                    cv2.CHAIN_APPROX_SIMPLE
                )

            cyan_point_contours, hierarchy = \
                cv2.findContours(
                    cyan_point_mask_image,
                    cv2.RETR_EXTERNAL,
                    cv2.CHAIN_APPROX_SIMPLE
                )

            cyan_car_point_contours, hierarchy = \
                cv2.findContours(
                    cyan_car_point_mask_image,
                    cv2.RETR_EXTERNAL,
                    cv2.CHAIN_APPROX_SIMPLE
                )
            all_contours = green_point_contours + cyan_point_contours + cyan_car_point_contours
            check_res = self.check_video.start_checkers(frame,hsv_frame=hsv_image, green_point_contours=all_contours, green_signle_contours=all_contours)

            # 检测条件判断
            if (settings.ENABLED_CHECK_WINDOWS and check_res["check_windows"] == False) or \
                (settings.ENABLED_CHECK_LAYOUT and check_res["check_layout"] == False) or \
                (settings.ENABLED_CHECK_FREEZE and check_res["check_freeze"] == False) or \
                (settings.ENABLED_CHECK_CURSOR and check_res["check_cursor"] == False):
                
                self.set_heart_beat()
                previous_result = self.send_error_data_to_redis(previous_result, check_res)

                key = cv2.waitKey(20)
                if key == 113:
                    break
                cv2.imshow(
                    settings.SHOW_WINDOW_NAME,
                    # frame
                    cv2.resize(
                        frame,
                        settings.SHOW_SIZE
                    )
                )
                self.set_local_tz()
                continue
            
            results, _ = self.get_green_tracks_with_components(
                all_contours,
                # reversed_green_point_contours,
                hsv_image,
                frame
            )

            self.update_results(status=results, check_res=check_res)
            previous_result = self.send_data_to_redis(previous_result, results)

            self.recorder.start(frame)
            self.picture_process_time(frame, start_tick)

            cv2.imshow(
                settings.SHOW_WINDOW_NAME,
                # frame
                cv2.resize(
                    frame,
                    settings.SHOW_SIZE
                )
            )
            self.set_local_tz()

            key = cv2.waitKey(20)

            if key == 113:
                break

            if key == 32:
                # press Space to capture image (Space ASCII value: 32)
                now = datetime.now()
                dt_string = now.strftime("%Y%m%d%H%M%S")
                img_name = \
                    "opencv_frame_{}.png".format(dt_string)

                cv2.imwrite(img_name, frame)

        return None

    def __enter__(self):
        server_logger.info("redis connection ...")
        while not self.redis_conn:
            self.redis_conn = redis_connection.get_redis_connection()
        return self
    
    def video_close(self):
        if self.video is not None:
            self.video.release()

    def close(self):
        # self.check_video.close()
        # server_logger.info("check video close")

        redis_connection.close()
        server_logger.info("redis close")

        self.video_close()
        server_logger.info("video close")

        self.recorder.close()
        server_logger.info("Recorder close")
        
        cv2.destroyAllWindows()
        server_logger.info("Exit")

        self.exit = True
    def __exit__(self, exc_type, exc_val, exc_tb):
        if not self.exit:
            self.close()

if __name__ == "__main__":
    smart_watch_dog = None
    try:
        smart_watch_dog = SmartWatchDog()
        with smart_watch_dog:
            smart_watch_dog.start()
    except KeyboardInterrupt as e:
        error_logger.error(e)
    # except Exception as e:
        # error_logger.error(f"Unexpected error: {e}")
    finally:
        if smart_watch_dog:
            smart_watch_dog.close()