import yaml
import cv2
import numpy as np
import collections
import coloredlogs
import logging
import requests
import time
import os
from array import array

# 配置日志
logging.basicConfig(level = logging.INFO, format = '%(asctime)s - %(levelname)s - %(message)s')
# 创建一个logger实例
logging = logging.getLogger(__name__)

# 安装coloredlogs并自定义样式
coloredlogs.install(
    level='INFO',
    logger=logging,
    fmt='%(asctime)s - %(levelname)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S',
    field_styles={
        'asctime': {'color': 'green'},
        'hostname': {'color': 'magenta'},
        'levelname': {'color': 'black', 'bold': True},
        'name': {'color': 'blue'},
    },
    level_styles={
        'debug': {'color': 'white'},
        'info': {'color': 'cyan'},
        'warning': {'color': 'yellow'},
        'error': {'color': 'red'},
        'critical': {'background': 'red'}
    }
)


class hash_similarity:
    def __init__(self, image, template, method='aHash'):
        self.image = image
        self.template = template
        self.method = method
        self.score = 0
        self.calculate_similarity()
    
    def calculate_similarity(self):
        self.getHash()
        dist = self.hamming_distance(self.hash1, self.hash2)
        self.score =  round(1 - dist * 1.0 / 64, 3)
    def getHash(self):
        if self.method == 'pHash':
            self.hash1 = self.pHash(self.image)
            self.hash2 = self.pHash(self.template)
        elif self.method == 'aHash':
            self.hash1 = self.aHash(self.image)
            self.hash2 = self.aHash(self.template)
        else:
            self.hash1 = self.dHash(self.image)
            self.hash2 = self.dHash(self.template)
    def pHash(self, image): 
        image = cv2.resize(image,(32,32), interpolation=cv2.INTER_CUBIC) 
        image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) 
        # 将灰度图转为浮点型，再进行dct变换 
        dct = cv2.dct(np.float32(image))
        dct_roi = dct[0:8,0:8]  
        avreage = np.mean(dct_roi) 
        hash = [] 
        for i in range(dct_roi.shape[0]): 
            for j in range(dct_roi.shape[1]): 
                if dct_roi[i,j] > avreage: 
                    hash.append(1) 
                else: 
                    hash.append(0) 
        return hash
    #均值哈希算法
    def aHash(self, image):
        #缩放为8*8
        image=cv2.resize(image,(8,8),interpolation=cv2.INTER_CUBIC)
        #转换为灰度图
        image=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
        avreage = np.mean(image) 
        hash = [] 
        for i in range(image.shape[0]): 
            for j in range(image.shape[1]): 
                if image[i,j] > avreage: 
                    hash.append(1) 
                else: 
                    hash.append(0) 
        return hash
    #差值感知算法
    def dHash(self, image):
        #缩放9*8
        image=cv2.resize(image,(9,8),interpolation=cv2.INTER_CUBIC)
        #转换灰度图
        image=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
    #     print(image.shape)
        hash=[]
        #每行前一个像素大于后一个像素为1，相反为0，生成哈希
        for i in range(8):
            for j in range(8):
                if image[i,j]>image[i,j+1]:
                    hash.append(1)
                else:
                    hash.append(0)
        return hash
    #计算汉明距离
    def hamming_distance(self, hash1,hash2): 
        num = 0
        for index in range(len(hash1)): 
            if hash1[index] != hash2[index]: 
                num += 1
        return num


class cfg_parser:
    def __init__(self, cfg_path, num_regions=4):
        # 配置参数路径
        self.cfg_path = cfg_path
        # 检测车型数量
        self.num_regions = num_regions
        # 车窗类型
        self.window_type = "null"
        
        self.parser()
       
    
    # 仿射变换
    def get_perspective_transform(self, image, pos):
        if pos is None:
            return []
        points = np.array(pos,dtype="int32")
        # 获取旋转矩形的中心点、宽度、高度和旋转角度信息
        cropped_image_list = []
        for point in points:
            rect = cv2.minAreaRect(point)
            center, size, angle = rect
            # 获取旋转矩阵
            rotation_matrix = cv2.getRotationMatrix2D(center, angle, 1)

            # 根据旋转矩阵进行旋转
            rotated_image = cv2.warpAffine(image, rotation_matrix, (image.shape[1],image.shape[0]))

            # 将旋转矩形区域裁剪出来
            cropped_image = cv2.getRectSubPix(rotated_image, tuple(map(lambda x:int(x),size)), tuple(map(lambda x:int(x),center)))
            cropped_image_list.append(cropped_image)
        
        return cropped_image_list
    
    def update_debug(self):
        if self.debug['all'] != None:
            # 如果有配置，则使用配置文件中的参数
            if self.debug['all']:
                for key, _ in self.debug.items():
                    self.debug[key] = True
            else:
                for key, _ in self.debug.items():
                    self.debug[key] = False
        else:
            pass
    def parser(self):
        with open(self.cfg_path, 'r', encoding='utf-8') as f:
            cfg =  yaml.safe_load(f)
            
            # 模板匹配区域与阈值
            for i in ['1', '2', '3', 'a', 'b', 'a2', 'b2', '1_1', '2_1']:
                setattr(self, "temp%s_roi" % (i), cfg['Template']['temp%s' % (i.upper())]) # 区域初始化
                setattr(self, "temp%s_image" % (i),  cv2.imread(cfg['Path']['temp%s' % (i.upper())])) # 模板图像初始化
                setattr(self, "temp%s_score" % (i), cfg['Threshold']['temp%s' % (i.upper())]) # 匹配分数初始化
            for i in ['a1', 'b1']:
                setattr(self, "temp%s_roi" % (i), cfg['Template']['temp%s' % (i.upper())]) # 区域初始化
            
            # 检测区域
            for i in range(self.num_regions):
                for j in ["top", "bottom", "left", "right"]:
                    if i == 0:
                        setattr(self, "%s_region" % (j), cfg['Region']['%s' % (j)])
                    else:
                        setattr(self, "%s_region%d" % (j, i), cfg['Region']['%s%d' % (j, i)])
                        
            self.clamp_delay_time = cfg['Threshold']['clamp_delay'] # 夹具定位延迟时间（单位：秒）
            self.hole_pre_time = cfg['Threshold']['hole_pre'] # 小孔定位提前时间（单位：秒）
            self.pad = cfg['Threshold']['pad'] # 匹配区域扩充
            self.ssim_score = cfg['Threshold']['ssim'] # 图像相似度阈值
            self.shadom_threshold = cfg['Threshold']['shadow'] # 阴影阈值
            self.shadom_flag = False if not isinstance(cfg['Threshold']['shadow'], int) else True # 阴影去除开关
            self.correct_threshold = cfg['Threshold']['correct'] # 容错修正阈值
            self.debug = cfg['Debug'] # 调试模式
            self.update_debug()
            
            # HTTP发送参数
            self.url = cfg['Path']['url'] # url地址
            self.path = cfg['Path']['path'] # 视频路径
            self.interval = cfg['Threshold']['interval'] # 跳帧间隔
            self.device_id = cfg['Path']['deviceCode'] # 设备编码
            self.device_name = cfg['Path']['deviceName'] # 设备名称
            self.save_path = cfg['Path']['savePath'] # 保存路径
            self.point_name = cfg['Path']['pointName'] # 点位名称
            
            # 绘图
            self.eqm5 = cfg['Plot']['eqm5']
            self.h5 = cfg['Plot']['h5']
            self.h6 = cfg['Plot']['h6']
            self.hq9 = cfg['Plot']['hq9']
    
        
class window_gluing_detector:
    def __init__(self, cfg_data):
        self.rec_size=64
        self.cfg_data = cfg_data
        
        # 定位信号
        self.clamp_singal, self.hole_singal = [False] * 2
        
        # 报警标识位，确保一个事件只报一次
        self.clamp_flag, self.hole_flag = [True] * 2
        
        self.first_singal, self.second_singal = [False] * 2
        self.first_singal_hq9, self.second_singal_hq9 = [False] * 2
        
        # 定位时间戳
        self.clamp_time, self.hole_time = [float('inf')] * 2
        
        # 缓存帧
        self.pre_frame_list = collections.deque(maxlen=int(25.0 * self.cfg_data.hole_pre_time / self.cfg_data.interval))
        
        # 报警前后图片
        self.before_gluing_image, self.after_gluing_image = [None] * 2
        
        # 检测计数
        self.detect_count = 0
        
        self.window_type = None
        
        # 报警标志位
        self.warn_flag = False
        
        self.region_all = None
        
        self.score_HQ9 = [0, 0]
        
        self.first_type = True
        
        self.before_save_singal, self.after_save_singal = False, False
        
    # 模板匹配
    def template_matching(self, roi_img:cv2.Mat, temp_img:cv2.Mat, method=cv2.TM_CCORR_NORMED): 
        """
        参数：
        roi_img: ROI图像, 即待匹配的图像
        temp_img: 模板图像, 即需要在ROI图像中寻找的图像
        method: 模板匹配方法, 默认为cv2.TM_CCORR_NORMED
        
        返回：
        匹配得分, 表示模板图像与ROI图像中匹配区域的相似程度
        """
        if len(roi_img.shape) == 3:
            # 将ROI和模板的BGR图像转换成为灰度图像
            roi_img = cv2.cvtColor(roi_img, cv2.COLOR_BGR2GRAY)
            temp_img = cv2.cvtColor(temp_img, cv2.COLOR_BGR2GRAY)
        # 执行模板匹配
        res = cv2.matchTemplate(roi_img,temp_img, method)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        
        # 根据匹配方法确定匹配区域的得分和位置
        if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]: # TM_SQDIFF：平方差匹配法； TM_SQDIFF_NORMED：归一化平方差匹配法
            score = min_val
        else:
            score = max_val
        return score
    # 阴影去除
    def shadow_removal(self, img):
        if len(img.shape) == 3: 
            gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        else:
            gray_img = img
        img[gray_img > self.cfg_data.shadom_threshold] = 255
        return img
    
    # 计算图像hash值
    def hash(self, img1, img2):
        hash_score = hash_similarity(image=img1, template=img2, method='pHash')
        return np.array([hash_score.score])
    # 计算图像相似度
    def ssim(self, img1, img2):
        """
        计算两张图像的结构相似度指数(SSIM)。
        
        参数:
        img1: 第一张图像，应为灰度图像。
        img2: 第二张图像，应为灰度图像。
        
        返回:
        SSIM值, 表示两张图像的相似度。
        """
        # 将图像转换为灰度图像
        gray_img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
        gray_img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
        
        if self.cfg_data.shadom_flag:
            gray_img1 = self.shadow_removal(gray_img1)
            gray_img2 = self.shadow_removal(gray_img2)
    
        # 计算图像的均值和方差
        mean1, mean2 = np.mean(gray_img1), np.mean(gray_img2)
        var1, var2 = np.var(gray_img1), np.var(gray_img2)
    
        # 计算协方差和SSIM指数
        cov = np.cov(gray_img1.flatten(), gray_img2.flatten())[0, 1]
        c1 = (0.01 * 255) ** 2
        c2 = (0.03 * 255) ** 2
        ssim = abs((2 * mean1 * mean2 + c1) * (2 * cov + c2) / ((mean1 ** 2 + mean2 ** 2 + c1) * (var1 + var2 + c2)))
    
        return np.array([round(ssim, 3)])
    # 计算图像相似度，图像为列表
    def cal_similarity(self, img1:list, img2:list):
        """
        计算两个图像列表之间的相似度。
        
        参数:
        img1 (list): 第一个图像列表。
        img2 (list): 第二个图像列表。
        
        返回:
        np.ndarray: 连接后的相似度得分数组。
        """
        if img1 == []: 
            return np.zeros(8) - 1
        # 初始化一个空列表，用于存储每一对图像的相似度得分
        score = []
        # 遍历两个图像列表，zip函数将两个列表中的元素按顺序配对
        for img1_item, img2_item in zip(img1, img2):
            # 对每一对图像使用ssim方法计算相似度，并将结果添加到score列表中
            score.append(self.ssim(img1_item, img2_item))
        # 将score列表中的所有得分连接成一个数组并返回
        return np.concatenate(score)
    
    def get_perspective_transform(self, roi):
        # 计算透视变换矩阵
        pts_dst = np.float32([[0, 0], [self.rec_size, 0], [
                                self.rec_size, self.rec_size], [0, self.rec_size]])  # 目标图像的四个顶点
        pts_roi = np.float32(roi)
        return (cv2.getPerspectiveTransform(pts_roi, pts_dst), pts_roi.reshape((-1, 1, 2)).astype(np.int32))
    # 计算匹配分数
    def calculate_score(self, img, name="temp1"):
        """
        根据输入的图像和配置名称，裁剪出指定区域的图像，并使用模板匹配方法计算分数。
        
        参数:
        - img: 输入的图像，用于模板匹配。
        - name: 配置名称，默认为'temp1', 用于获取特定的ROI区域和模板图像。
        
        返回:
        - score: 模板匹配的分数。
        """
        roi_region = eval("self.cfg_data." + name + "_roi")
        if len(roi_region) == 2:
            # 根据配置名称获取ROI区域，并应用边距垫补，避免边界问题   
            temp_roi_image = img[
                max(0, eval("self.cfg_data." + name + "_roi")[0][1] - self.cfg_data.pad):min(img.shape[0], eval("self.cfg_data." + name + "_roi")[1][1] + self.cfg_data.pad),
                max(0, eval("self.cfg_data." + name + "_roi")[0][0] - self.cfg_data.pad):min(img.shape[1], eval("self.cfg_data." + name + "_roi")[1][0] + self.cfg_data.pad) 
            ]
            # 使用模板匹配方法计算分数
            if name[-2:] in ["a1", "b1"]:
                return self.template_matching(temp_roi_image, eval("self.cfg_data." + name[:-1] + "_image"))
            return self.template_matching(temp_roi_image, eval("self.cfg_data." + name + "_image"))
        else:
            M, Pts = self.get_perspective_transform(roi_region)
            temp_roi_image = cv2.warpPerspective(img, M, (64, 64))
            roi_img = temp_roi_image
            temp_img = eval("self.cfg_data." + name + "_image")
            return self.template_matching(roi_img, temp_img)   
    
    def window_type_detect(self, img):
        score_a, score_b = self.cfg_data.tempa_score, self.cfg_data.tempb_score
        if self.cfg_data.debug['type_score']:
            logging.warning(f"E-QM5(E111):{self.calculate_score(img, 'tempa')}-{self.calculate_score(img, 'tempb')}, H5(C100):{self.calculate_score(img, 'tempa1')}-{self.calculate_score(img, 'tempb1')}, H6(C101):{self.calculate_score(img, 'tempa2')}-{self.calculate_score(img, 'tempb2')}, HQ9(C095):{self.score_HQ9[0]}-{self.score_HQ9[1]}")
        # 判断车门类别, 若模板匹配都大于阈值，则确定为A类车门
        if self.calculate_score(img, "tempa") >= score_a and self.calculate_score(img, "tempb") >= score_b:
            logging.info("++++++++++++++++++++检测到车门类型为:E-QM5(E111)++++++++++++++++++++")
            self.cfg_data.window_type = "E-QM5(E111)"
            self.region_all = np.concatenate((self.cfg_data.top_region1, self.cfg_data.bottom_region1, self.cfg_data.left_region1, self.cfg_data.right_region1), 0)
        elif self.calculate_score(img, "tempa1") >= score_a and self.calculate_score(img, "tempb1") >= score_b:
            logging.info("++++++++++++++++++++检测到车门类型为:H5(C100)++++++++++++++++++++")
            self.cfg_data.window_type = "H5(C100)"
            self.region_all = np.concatenate((self.cfg_data.top_region, self.cfg_data.bottom_region, self.cfg_data.left_region, self.cfg_data.right_region), 0)
        elif self.calculate_score(img, "tempa2") >= self.cfg_data.tempa2_score and self.calculate_score(img, "tempb2") >= self.cfg_data.tempb2_score:
            logging.info("++++++++++++++++++++检测到车门类型为:H6(C101)++++++++++++++++++++")
            self.cfg_data.window_type = "H6(C101)"
            self.region_all = np.concatenate((self.cfg_data.top_region2, self.cfg_data.bottom_region2, self.cfg_data.left_region2, self.cfg_data.right_region2), 0)
        elif self.window_type == "HQ9(C095)":
            logging.info("++++++++++++++++++++检测到车门类型为:HQ9(C095)++++++++++++++++++++")
            self.cfg_data.window_type = "HQ9(C095)"
            self.region_all = np.concatenate((self.cfg_data.top_region3, self.cfg_data.bottom_region3, self.cfg_data.left_region3, self.cfg_data.right_region3), 0)
        else:
            logging.warning("++++++++++++++++++++车门类型未知++++++++++++++++++++")
            self.cfg_data.window_type = "null"
            self.region_all = None
        
    # 检测主程序
    def detect(self, img:cv2.Mat, frame, buffer):
        """
        主检测函数，用于识别图像中的夹具和小孔，判断车门类型，并保存涂胶前后的图像。

        参数:
        img (cv2.Mat): 当前帧的图像数据。
        frame (int): 当前帧的帧号。

        返回:
        img (cv2.Mat): 处理后的图像数据。
        """
        # 夹具和小孔区域模板匹配，分别对应涂胶前和涂胶后
        temp1_score, temp2_score, temp3_score, temp1_1_score, temp2_1_score = self.calculate_score(img, "temp1"), self.calculate_score(img, "temp2"), self.calculate_score(img, "temp3"), self.calculate_score(img, "temp1_1"), self.calculate_score(img, "temp2_1")
        if self.cfg_data.debug['temp_score']:
            logging.warning(f"temp1_score:{temp1_score}, temp2_score:{temp2_score}, temp3_score:{temp3_score}, temp1_1_score:{temp1_1_score}, temp2_1_score:{temp2_1_score}")
        if temp1_score >= self.cfg_data.temp1_score:
            self.first_singal = True
        if temp1_1_score >= self.cfg_data.temp1_1_score:
            self.first_singal_hq9 = True
        if temp2_score >= self.cfg_data.temp2_score:
            self.second_singal = True
        if temp2_1_score >= self.cfg_data.temp2_1_score:
            self.second_singal_hq9 = True
            self.score_HQ9 = [temp1_1_score, temp2_1_score]
            
        temp_mean = (temp1_score + temp2_score) * 0.5
        temp_mean_hq = (temp1_1_score + temp2_1_score) * 0.5

        if self.first_singal_hq9 and self.second_singal_hq9 and temp_mean_hq >= 0.85:
            self.clamp_singal = True
            self.window_type = "HQ9(C095)"
        elif self.first_singal and self.second_singal and temp_mean >= 0.85:
            self.clamp_singal = True
            self.first_singal, self.second_singal = False, False
            self.first_type = False
        else:
            self.clamp_singal = False
        
        if self.cfg_data.debug['score_show']:
            cv2.putText(img, f"temp1:{temp1_score:.2f}", (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
            cv2.putText(img, f"temp2:{temp2_score:.2f}", (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
            cv2.putText(img, f"temp1_1:{temp1_1_score:.2f}", (10, 150), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
            cv2.putText(img, f"temp2_1:{temp2_1_score:.2f}", (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
            cv2.putText(img, f"temp3:{temp3_score:.2f}", (10, 250), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
        # 绘制temp1 和 temp2
        # cv2.rectangle(img, (eval("self.cfg_data.temp1_roi")[0][0], eval("self.cfg_data.temp1_roi")[0][1]), (eval("self.cfg_data.temp1_roi")[1][0], eval("self.cfg_data.temp1_roi")[1][1]), (0, 255, 0) if self.calculate_score(img, "temp1") < self.cfg_data.temp1_score else (0, 0, 255), 2)
        # cv2.rectangle(img, (eval("self.cfg_data.temp2_roi")[0][0], eval("self.cfg_data.temp2_roi")[0][1]), (eval("self.cfg_data.temp2_roi")[1][0], eval("self.cfg_data.temp2_roi")[1][1]), (0, 255, 0) if self.calculate_score(img, "temp2") < self.cfg_data.temp2_score else (0, 0, 255), 2)
        
        # 判断是否为第一次报警
        if self.clamp_singal and self.clamp_flag:
            self.clamp_time = frame
            self.clamp_flag, self.hole_flag = False, True

        # 保存未涂胶帧，结果延后N帧（等待门稳定）
        if frame - self.clamp_time == int(25 * self.cfg_data.clamp_delay_time / self.cfg_data.interval):
            # 车窗类型检测
            self.before_save_singal = True
            self.window_type_detect(img)
            # 当前帧图像
            current_image = img
            # cv2.imwrite("./images/temp1_image_%d.jpg" % (self.detect_count), current_image)
            # 仿射变换
            self.before_gluing_image = self.cfg_data.get_perspective_transform(current_image,self.region_all)
            # for i in range(len(self.before_gluing_image)):
            #     cv2.imwrite("./var/before_%d_%d.jpg" % (i, self.detect_count), self.before_gluing_image[i])
            self.clamp_time = float('inf')
        # 存储前N帧图像
        self.pre_frame_list.append(img)
        if self.before_save_singal:
            if temp3_score >= self.cfg_data.temp3_score:
                self.first_singal_hq9, self.second_singal_hq9 = False, False
                self.hole_singal = True
            else:
                self.hole_singal = False
            if self.hole_singal and self.hole_flag:
                self.hole_time = frame
                self.clamp_flag, self.hole_flag = True, False
        # 保存已涂胶帧，结果提前N帧（保持与涂胶前状态一致）
        if self.before_save_singal and (frame == self.hole_time):
            self.after_save_singal = True
            logging.warning("====================保存已涂胶帧结果====================")
            current_image = self.pre_frame_list[0]
            
            # cv2.imwrite("./images/temp2_image_%d.jpg" % (self.detect_count), current_image)
            self.after_gluing_image = self.cfg_data.get_perspective_transform(current_image,self.region_all)
            # for i in range(len(self.after_gluing_image)):
            #     cv2.imwrite("./var/after_%d_%d.jpg" % (i, self.detect_count), self.after_gluing_image[i])
            self.hole_time = float('inf')
            self.detect_count += 1
    
            # 涂胶前后比对，计算图像相似度
            if self.before_gluing_image is None or self.after_gluing_image is None:
                logging.info("未获取到图片，跳过")
                return img
            ssim_score = self.cal_similarity(self.before_gluing_image, self.after_gluing_image)

            ssim_flag = ssim_score >= self.cfg_data.ssim_score
            if self.cfg_data.debug['ssim_score']:
                logging.info("SSIM: %s" % self.cal_similarity(self.before_gluing_image, self.after_gluing_image))
            
            # 获取报警时间
            current_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
            # 是否全部符合要求
            if np.any(ssim_flag):
                logging.info("检测结果：未涂胶！！！！！！！！！！")
                self.warn_flag = True
                # # 容错修正机制
                mean_score = np.mean(ssim_score)
                logging.info(f"mean_ssim_score:{mean_score}")
                if mean_score <= 0.75:
                    logging.warning("修正检测结果：点位全部设置已涂胶！！！！！！！！！！")
                    self.warn_flag = False
                    ssim_flag = [False] * len(self.before_gluing_image)
                elif mean_score >= 0.85:
                    logging.warning("修正检测结果：点位全部设置未涂胶！！！！！！！！！！")
                    ssim_flag = [True] * len(self.before_gluing_image)
            else:
                logging.warning("检测结果：已涂胶！！！！！！！！！！")
                self.warn_flag = False

            if frame > 100000:
                frame = 0
            if self.detect_count > 100000:
                self.detect_count = 0
            buffer.put([self.cfg_data, self.detect_count, current_image, self.region_all, ssim_flag, current_time, self.warn_flag])
            self.first_type = True
            self.after_save_singal, self.before_save_singal = False, False
            
        if self.cfg_data.debug['frame_singal']:   
            logging.info("当前帧:%d, 夹具信号:%d, 孔洞信号:%d" % (frame, self.clamp_singal, self.hole_singal))
        return img
    
    
def save_result(cfg_data, detect_count, img, region_all, ssim_score, warn_flag):
    """
    保存处理后的图像结果。

    根据当前的日期和时间创建保存目录，并根据检测结果绘制边界框，
    最后保存原图和压缩后的图像。

    参数:
    img: 待保存的图像。
    region_all: 所有检测到的区域。
    ssim_score: 图像相似度分数(合格为False， 不合格为True)。

    返回:
    pic_save_dir: 保存原图的路径。
    pic_save_compress_dir: 保存压缩图的路径。
    """
    # 获取年月日
    year_month_day = time.strftime("%Y-%m-%d", time.localtime())
    # 获取时分秒
    hour_minute_second = time.strftime("%H_%M_%S", time.localtime())
    # 日期目录
    date_dir = os.path.join(os.path.abspath(cfg_data.save_path), year_month_day)
    # 车窗类型
    window_type = cfg_data.window_type
    # 判断文件是否存在
    if not os.path.exists(date_dir):
        logging.warning(f"The path does not exist. Create:{date_dir}")
        try:
            os.makedirs(date_dir)
        except OSError as e:
            logging.error(f"Failed to create path:{date_dir}, Error: {e}")
    pic_save_dir = os.path.join(date_dir, f"{window_type}_{hour_minute_second}_{detect_count}_original.png")
    pic_save_compress_dir = os.path.join(date_dir, f"{window_type}_{hour_minute_second}_{detect_count}_compress.jpg")
    # 根据检测结果绘制每个边界框
    # for i in range(len(region_all)):
    #     # 合格为绿色，不合格为红色
    #     color = (0, 0, 255) if ssim_score[i] else (0, 255, 0)
    #     cv2.drawContours(img, [region_all[i]], -1, color, 2)
    # 保存原图为png格式(质量为0-10，值越小质量越高)
    img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)
    color = (0, 0, 255) if warn_flag else (0, 255, 0)
    if cfg_data.window_type == "H5(C100)":
        draw1, draw2 = cfg_data.h5
        draw1 = np.array(draw1, dtype=np.int32)
        draw2 = np.array(draw2, dtype=np.int32)
        cv2.drawContours(img, [draw1], -1, color, 2)
        cv2.drawContours(img, [draw2], -1, color, 2)
    elif cfg_data.window_type == "H6(C101)":
        draw1, draw2 = cfg_data.h6
        draw1 = np.array(draw1, dtype=np.int32)
        draw2 = np.array(draw2, dtype=np.int32)
        cv2.drawContours(img, [draw1], -1, color, 2)
        cv2.drawContours(img, [draw2], -1, color, 2)  
    elif cfg_data.window_type == "HQ9(C095)":
        draw1, draw2 = cfg_data.hq9
        draw1 = np.array(draw1, dtype=np.int32)
        draw2 = np.array(draw2, dtype=np.int32)
        cv2.drawContours(img, [draw1], -1, color, 2)
        cv2.drawContours(img, [draw2], -1, color, 2)
    elif cfg_data.window_type == "E-QM5(E111)":
        draw1, draw2 = cfg_data.eqm5
        draw1 = np.array(draw1, dtype=np.int32)
        draw2 = np.array(draw2, dtype=np.int32)
        cv2.drawContours(img, [draw1], -1, color, 2)
        cv2.drawContours(img, [draw2], -1, color, 2)
    try:
        cv2.imwrite(pic_save_dir, img, [cv2.IMWRITE_PNG_COMPRESSION, 8])
    except IOError as e:
        logging.error(f"Error occurred during image saving: {pic_save_dir}, Error: {e}")
    # 保存压缩后的jpg格式(质量为0-100，值越大质量越高)
    try:
        cv2.imwrite(pic_save_compress_dir, img, [cv2.IMWRITE_JPEG_QUALITY, 10])
    except IOError as e:
        logging.error(f"Error occurred during image saving: {pic_save_compress_dir}, Error: {e}")
    return pic_save_dir, pic_save_compress_dir
def post_json(cfg_data, pic1:str, pic2:str, status:str, time:str) -> None:
        """
        发送HTTP请求:
            - pic1: 报警原图路径
            - pic2: 报警压缩图路径
            - status: 报警状态, 1为合格, 2为不合格
            - time: 报警时间
            - name: 报警点名称
            - return: 无
        """
        # 初始化详情列表
        details_list = []
        # 遍历状态列表，构建详情信息
        for i in range(len(status)):
            details_list.append({
                "memo": None,
                "num": None,
                "point": cfg_data.point_name[i],
                "qualified": 2 if status[i] else 1,
            })
        url = cfg_data.url
        name = cfg_data.window_type
        id = cfg_data.device_id
        # 构建请求数据
        data = {
            "dataTime": time,
            "details":details_list,
            "deviceCode": id,
            "name": name,
            "picPath": pic1,
            "picPathCompress": pic2
        }
        # 记录URL和数据信息
        logging.info(f"url: {url}")
        logging.info(f"data: {data}")
        try:
            # 发送POST请求，并设置超时时间
            response = requests.post(url=url, json=data, timeout=5)
            
            # 根据响应状态码处理结果
            if response.status_code == 200:
                # 如果请求成功，记录成功信息并尝试获取响应的JSON数据
                logging.info(f'Request was successful: {name}')
                try:
                    response_json = response.json()
                    logging.debug(f'Response JSON: {response_json}')
                except ValueError:
                    # 如果响应数据不是有效的JSON，记录错误
                    logging.error('Response is not a valid JSON')
            else:
                # 如果请求失败，记录详细的错误信息
                logging.info(f'Request failed with status code {response.status_code} and reason: {response.reason}')
        except requests.exceptions.Timeout:
            # 如果请求超时，记录错误
            logging.error('Request timed out')
        except requests.exceptions.ConnectionError:
            # 如果发生连接错误，记录错误
            logging.error('Connection error occurred')
        except requests.exceptions.RequestException as e:
            # 如果发生其他请求错误，记录错误信息
            logging.error(f'An error occurred: {e}')

# 保存并发送
def save_and_post(buffer):
    """
    从缓冲区获取处理数据，保存结果图片，并将相关数据发送到服务器。

    参数:
    - buffer: 一个队列对象，用于存储待处理的数据，包括配置数据、检测计数、图像、区域信息、SSIM分数和当前时间。

    此函数不断循环，从缓冲区获取数据，调用save_result函数保存处理结果为图片，
    然后调用post_json函数将相关数据和图片信息发送到服务器。
    """
    while True:
        # 从缓冲区获取数据，包括配置数据、检测计数、图像、区域信息、SSIM分数和当前时间
        result = buffer.get()
        
        if result is None:
            break
        cfg_data, detect_count, img, region_all, ssim_score, current_time, warn_flag = result
        if cfg_data.window_type != "null":
            # 调用save_result函数保存处理结果为两张图片
            pic1, pic2 = save_result(cfg_data=cfg_data, detect_count=detect_count, img=img, region_all=region_all, ssim_score=ssim_score, warn_flag=warn_flag)
            
            # 调用post_json函数将配置数据、两张图片、SSIM分数和时间发送到服务器
            post_json(cfg_data=cfg_data, pic1=pic1, pic2=pic2, status=ssim_score, time=current_time)
        # else:
        #     pic1, pic2 = save_result(cfg_data=cfg_data, detect_count=detect_count, img=img, region_all=[], ssim_score=[])
        #     post_json(cfg_data=cfg_data, pic1=pic1, pic2=pic2, status=ssim_score, time=current_time)
def video_process(cfg_data, buffer):
    """
    处理视频文件，将每一帧放入缓冲区中。

    参数:
    cfg_data: 包含视频路径等配置信息的对象。
    buffer: 用于存储视频帧的缓冲区，通常是一个队列或其他线程安全的数据结构。
    """
    # 打开视频文件
    cap = cv2.VideoCapture(cfg_data.path)
    try:
        # 循环读取视频帧
        while cap.isOpened():
            ret, frame = cap.read()
            
            # frame = cv2.resize(frame, (3840, 2160))
            if not ret:
                break

            buffer.put(frame)  # 将帧放入缓冲区
    except Exception as e:
        # 异常处理：打印异常信息
        logging.error(f"An error occurred:{e}")
        # 终止程序
    finally:
        # 释放视频文件资源
        cap.release()
        # 向缓冲区发送一个None标志，表示视频读取完成
        buffer.put(None)

def detector_process(buffer1, buffer2, cfg_data):
    """
    处理检测器进程。

    该函数创建一个窗口粘合检测器，并根据配置数据中的间隔设置，
    对缓冲区中的帧进行循环检测。

    参数:
    - buffer: 帧缓冲区，用于存储待检测的帧。
    - cfg_data: 配置数据，包含检测器运行所需的配置信息。

    返回值:
    无返回值。
    """
    logging.info("Starting detector process...")
    # 创建窗口粘合检测器实例
    detector = window_gluing_detector(cfg_data)
    # 获取检测间隔
    interval = cfg_data.interval
    counter = 0
    while True:
        # 从缓冲区获取帧
        frame = buffer1.get()
        # # 如果帧为空，则退出循环
        if frame is None:
            buffer2.put(None)
            break  # 结束循环
        assert frame.shape == (2160, 3840, 3), "frame shape is not (2160, 3840, 3)"
        # 如果当前计数器值能被间隔整除，则进行检测
        if counter % interval == 0:
            # 调用检测器的detect方法进行检测
            _ = detector.detect(frame, counter, buffer2)
        counter += 1

if __name__ == "__main__":
    # 创建配置解析器实例，指定配置文件路径为"./cfg.yaml"
    parser = cfg_parser("./cfg_new.yaml", 4)
    # 将顶层区域配置信息转换为numpy数组，以方便后续处理
    arr = np.array(parser.top_region)
    # 输出顶层区域信息，用于调试或验证解析结果
    print(arr)
    
