#主要负责关于cfg，checkboxs的模型装配，输出当前帧的检测结果
import time
import numpy as np
import cv2
from PyQt5.QtGui import QImage,QPixmap
from PyQt5.QtCore import Qt
from service.face_utils import getlargest_face,face_plot
from PyQt5 import QtGui
from datetime import datetime
from copy import deepcopy
import torch

from fatigue_algo.fatigue_detector.eye_Detector import EyeDetector_real
from fatigue_algo.fatigue_detector.mouth_detector import MouthDetector_real
from fatigue_algo.fatigue_detector.head_detector import HeadDetector_real
from fatigue_algo.fatigue_detector.align_front_face import landmark_list2dict,landmark_dict2list,align_face,rotate_landmarks
from fatigue_algo.fatigue_detector.frown_img_preprocess import cal_forthROILandmark, crop_ROI, brow_process
from fatigue_algo.fatigue_detector.frown_detector import frown_infer_CE
from fatigue_algo.fatigue_detector import mobilenetv3
from fatigue_algo.fatigue_detector.causalModel.causal_infer import CausalInferModel_real
from logger import _get_logger

import pathlib
from pathlib import Path
import sys
import os
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = str(Path(curPath).parent)
sys.path.append(rootPath)

#创建新的日志文件
filePath = os.path.join(rootPath,"logs/" + f'{str(time.time()).split(".")[0]}.log')
#如果该文件不存在则创建空文件
if not os.path.isfile(filePath):
    pathlib.Path(filePath).touch()
logger = _get_logger(filePath, 'info')
detect_info = ""  #打印的日志内容
# logger.info('日志输出测试')

count = 0

class DetectHandler():
    def __init__(self,cfg,checkboxs,face_detector,landmark_detector,detectWindow):
        '''
        :param cfg: dict 模型配置文件
        :param checkboxs: list 检测选择列表
        :param face_detector: 人脸检测器
        :param landmark_detector: 关键点检测器
        :param detectWindow: DetectorHandler绑定的主界面，用户检测结果的绘制
        '''
        self.cfg = cfg
        self.checkboxs = checkboxs
        self.camera = None
        self.scrfd_detector = face_detector
        self.landmark_detector = landmark_detector
        self.headDetector = HeadDetector_real()
        self.detectWindow = detectWindow

        #加载所有行为检测器
        self.behavior_detector_map = {
            "blink" : EyeDetector_real(),
            "yawn" : MouthDetector_real(),
            "head" : self.headDetector,
            "frown" : None,
            "fatigue" : None
        }

        self.basic_info = dict()  # 在执行behavior_detector时，使用人脸/关键点detector的检测结果
        #行为检测器的中间变量：
        # 比如blink_preDetect: 前一帧是否检测到眨眼
        # 比如yawn_preDetect: 前一帧是否检测到哈欠
        self.behavior_detect_flag = {
            "blink": 0,
            "yawn": 0,
        }

        # 加载皱眉模型
        self.frown_infer = frown_infer_CE  # 模型推理方法
        self.frownModel = mobilenetv3.MobileNetV3_Small(num_classes=2)

        if(torch.cuda.is_available()):
            self.frownModel.load_state_dict(torch.load("fatigue_algo/fatigue_detector/frownMobileNet_CE_sobel.pth"))
        else:
            self.frownModel.load_state_dict(torch.load("fatigue_algo/fatigue_detector/frownMobileNet_CE_sobel.pth",map_location='cpu'))

        # 疲劳检测模型
        self.causalInferModel = CausalInferModel_real(filePath="fatigue_algo/fatigue_detector/causalModel/fatigue_strategies.txt")  # 因果推理模型

    #在根目录下通过EAR_B.txt获取受试者的校准EAR_B
    def getEAR_B(self):
        file = open("EAR_B.txt", encoding='utf-8')
        EAR_list = []
        for line in file:
            EAR_list.append(np.float64(line))
        EAR_mean = np.mean(EAR_list)
        return EAR_mean

    #结束检测
    def detect_finished(self):
        self.camera.release()  #释放摄像头
        self.detectWindow.textBrowser.clear()   #清空textBrower

        # 界面的默认配图
        img = cv2.imread('fatigue.png')
        height, width, bytesPerComponent = img.shape
        bytesPerLine = 3 * width
        cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)

        # 利用QImage加载图片到组件中（直接用self.detectWindow.label来调用）
        qImg = QImage(img.data, width, height, bytesPerLine, QImage.Format_RGB888)  # PIL image
        qImg_scaled = qImg.scaled(self.detectWindow.label.width(), self.detectWindow.label.height(), Qt.IgnoreAspectRatio,
                                  Qt.SmoothTransformation)

        pixmap = QtGui.QPixmap.fromImage(qImg_scaled)
        self.detectWindow.label.setPixmap(pixmap)  # 利用QPixmap组件绘制图片

    def behavior_detect(self,checkbox_name,position):
        '''
        :param checkbox_name: 选择的检测类型
        :param position: 绘制结果的位置
        :return:
        '''
        behavior_detector = self.behavior_detector_map[checkbox_name]
        res = None
        if(checkbox_name == 'blink'):
            self.blink_detect(checkbox_name,behavior_detector,position)
        elif(checkbox_name == 'yawn'):
            self.yawn_detect(checkbox_name,behavior_detector,position)
        elif(checkbox_name == 'head'):
            self.head_action_detect(behavior_detector,position)
        elif(checkbox_name == 'frown'):
            self.frown_action_detect(position)
        elif(checkbox_name == 'fatigue'):
            self.fatigue_detect(position)

        return res

    #执行算法
    def detect(self):
        '''
        :return:
        '''
        print(f"你选择要检测的类型包括: {self.checkboxs}")
        print("正在为你配置算法...")

        self.camera = cv2.VideoCapture(0,cv2.CAP_DSHOW)
        while(self.camera.isOpened()):
            ret, frame = self.camera.read()
            if (ret):
                dets = self.scrfd_detector.detect_faces(frame)[0]

                if (len(dets) != 0):
                    det = dets[getlargest_face(dets)]
                    face_plot(frame, [det])

                    landmark = self.landmark_detector.detect_landmarks(frame, [[det]])
                    pitch, roll, yaw = self.headDetector.get_pitch_roll_yaw(self.landmark_detector, frame, det, landmark)  # 头部姿态
                    self.basic_info['frame'] = frame
                    self.basic_info['det'] = det
                    self.basic_info['landmark'] = landmark
                    self.basic_info['rotate_vector'] = (pitch,roll,yaw)

                    global detect_info
                    detect_info = ""
                    #依次根据复选框，执行已选择的算法
                    for index,key in enumerate(self.checkboxs):
                        position = (60,60 * (index + 1))
                        self.behavior_detect(key,position)

                    # 日志输出
                    logger.info(detect_info)

                '''############################  图片回显至QLabel  ############################'''
                # 将图片回显值qlabel上
                height, width, bytesPerComponent = frame.shape
                bytesPerLine = 3 * width
                cv2.cvtColor(frame, cv2.COLOR_BGR2RGB, frame)

                # 利用QImage加载图片到组件中
                QImg = QImage(frame.data, width, height, bytesPerLine, QImage.Format_RGB888)  # PIL image

                qImg_scaled = QImg.scaled(self.detectWindow.label.width(), self.detectWindow.label.height(), Qt.IgnoreAspectRatio,
                                          Qt.SmoothTransformation)

                pixmap = QPixmap.fromImage(qImg_scaled)
                self.detectWindow.label.setPixmap(pixmap)  # 利用QPixmap组件绘制图片

            key = cv2.waitKey(10)

    '''眨眼检测'''
    def blink_detect(self,checkbox_name,behavior_detector,position):
        '''
        :param checkbox_name: 检测行为标志，用于更新behavior_detect_flag的检测标志位
        :param behavior_detector: 行为检测器
        :param position: tuple 检测结果绘制位置
        :return:
        '''

        global count, detect_info
        # count += 1
        # if(count == 61):
        #     assert (count == 61)
        frame = self.basic_info['frame']
        landmark = self.basic_info['landmark']
        pitch, yaw, roll = self.basic_info['rotate_vector']
        EAR_B = self.getEAR_B()

        behavior_detector.set_blink_params(blink_algo=self.cfg['blink_algo'], EAR_threshold=np.float64(self.cfg['EAR_thres']),
                                           valley_length=self.cfg['valley_length'],
                                           valley_diff_thres=self.cfg['valley_diff_thres'],
                                           PERCLOS_time_window_length=self.cfg['PERCLOS_time_window_length'],
                                           EAR_A=self.cfg['EAR_A'],EAR_B=EAR_B)  # 设置参数
        preDetect = self.behavior_detect_flag[checkbox_name]
        behavior_detector.frameCount += 1  # 帧数累加
        blink_detect = behavior_detector.blinkDetect_with_blinkType(landmark[0][0], preDetect, pitch, yaw, roll)
        # print("blink_detect finished")
        blink_state = "no blink" if blink_detect == 0 else "quick blink"  # 0表示不眨眼，1,2表示眨眼
        preDetect = False if blink_detect == 0 else True
        self.behavior_detect_flag[checkbox_name] = preDetect

        blink_speed_state = blink_state  # 如果眨眼过程未结束，没有眨眼特征，则默认标签为是否眨眼

        # 快眨眼，慢眨眼，正常眨眼检测
        blink_speed_detect, perclos = behavior_detector.get_blinkSpeedDetect_perclos(PERCLOS_range=self.cfg["PERCLOS_range"])
        # print("get_blinkSpeedDetect_perclos finished")
        if (blink_detect == 2):
            # 0表示不眨眼，1表示开始眨眼, 2表示正在眨眼，正在眨眼的时候检测是快眨，慢眨还是正常眨眼
            if (blink_speed_detect == 3):
                blink_speed_state = "quick blink"
            elif (blink_speed_detect == 4):
                blink_speed_state = "normal blink"
            elif (blink_speed_detect == 5):
                blink_speed_state = "slow blink"
            blink_detect = blink_speed_detect  # blink_detect取值范围为:0,1,2（2为正眨眼），取值范围为:3,4,5（快眨眼...）

        # cv2.putText(frame, "Perclos:" + str(perclos), (60, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
        cv2.putText(frame, blink_speed_state, position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)  # Perclos绘制有延迟
        # print(f"putText{count} finished \n\n")
        detect_info += f"blink_speed_state = {blink_speed_state},"

        #将眨眼检测结果保存到疲劳模型中
        self.causalInferModel.set_diagnosis_column(eye_detect=blink_detect)

    '''哈欠检测'''
    def yawn_detect(self,checkbox_name,behavior_detector,position):
        '''
        :param checkbox_name: 检测行为标志，用于更新behavior_detect_flag的检测标志位
        :param behavior_detector: 行为检测器
        :param position: tuple 检测结果绘制位置
        :return:
        '''
        global count,detect_info
        # count += 1
        # if (count == 61):
        #     assert (count == 61)

        frame = self.basic_info['frame']
        landmark = self.basic_info['landmark']

        behavior_detector.set_yawn_params(MAR_threshold=np.float64(self.cfg['MAR_threshold']), MAR_yaw_range=self.cfg['MAR_yaw_range'],
                                           MAR_beta=self.cfg['MAR_beta'],
                                           FOM_TIMEWINDOW_LENGTH=self.cfg['FOM_TIMEWINDOW_LENGTH'],
                                           FOM_threshold=self.cfg['FOM_threshold'])  # 设置参数

        preDetect1 = self.behavior_detect_flag[checkbox_name]
        yawn_detect_first = behavior_detector.yawn_detect(landmark[0][0], preDetect1)
        # yawn_state = "no yawn" if yawn_detect == 0 else "yawn"  # 0表示不哈欠，1,2表示哈欠
        yawn_state = "no yawn"
        yawn_detect = 0  # 哈欠检测状态
        # 二阶段哈欠检测
        if (yawn_detect_first):
            # 基于FOM的哈欠检测
            yawn_by_FOM = behavior_detector.detect_yawn_from_FOM()
            if (yawn_by_FOM):  # 检测到哈欠
                yawn_state = "yawn"
                yawn_detect = 1

        preDetect1 = False if yawn_detect_first == 0 else True
        self.behavior_detect_flag[checkbox_name] = preDetect1
        cv2.putText(frame, yawn_state, position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
        detect_info += f"yawn_state = {yawn_state},"

        # 将哈欠检测结果保存到疲劳模型中
        self.causalInferModel.set_diagnosis_column(mouth_detect=yawn_detect)

    '''头部运动检测'''
    def head_action_detect(self,behavior_detector,position):
        '''
        :param behavior_detector: 行为检测器
        :param position: tuple 检测结果绘制位置
        :return:
        '''
        global count,detect_info
        count += 1
        if (count == 61):
            assert (count == 61)

        frame = self.basic_info['frame']
        det = self.basic_info['det']
        landmark = self.basic_info['landmark']
        behavior_detector.setHeadPose_params(nod_threshold=np.float64(self.cfg['nod_threshold']),
                                          backForth_threshold=self.cfg['backForth_threshold'],
                                          rotate_threshold=self.cfg['rotate_threshold'])  # 设置参数

        nod_flag, rotate_flag, backforth_flag = behavior_detector.head_action_detect(self.landmark_detector, frame, det, landmark)
        nod_state = "nodding" if nod_flag == 1 else ""
        backforth_state = "backforth" if backforth_flag == 1 else ""
        rotate_state = "rotating" if rotate_flag == 1 else ""

        x,y = position
        cv2.putText(frame, str(nod_state), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
        cv2.putText(frame, str(backforth_state), (x * 2, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
        cv2.putText(frame, str(rotate_state), (x * 3, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)

        # 将头部检测结果保存到疲劳模型中
        if(nod_flag == 1): rotate = 1  #点头编码为1
        elif(rotate_flag == 1): rotate = 2  #旋转编码为2
        else: rotate = 0  #静止编码为0
        if(backforth_flag == 1): shift = 3
        else: shift = 0
        head_detect = (rotate,shift)

        detect_info += f"head_detect = {(rotate,shift)},"

        self.causalInferModel.set_diagnosis_column(head_detect=head_detect)

    '''皱眉检测'''
    def frown_action_detect(self,position):

        global detect_info
        frame = self.basic_info['frame']
        frame_copy = deepcopy(frame)  # 深拷贝frame,避免被下面的putText，cv2.rectangle污染，影响皱眉ROI提取，
        landmark = self.basic_info['landmark']

        face_landmarks_dict = landmark_list2dict(landmark)
        aligned_face, eye_center, angle = align_face(image_array=frame_copy, landmarks=face_landmarks_dict)  # 正脸校准
        # cv2.imshow("face",aligned_face)
        rotated_landmarks = rotate_landmarks(landmarks=face_landmarks_dict,
                                             eye_center=eye_center, angle=angle, row=frame.shape[0])  # 关键点旋转
        landmark_list = landmark_dict2list(rotated_landmarks)
        landmark_a = landmark_list[21]
        landmark_b = landmark_list[22]
        landmark_c = landmark_list[27]
        A, B, C, D = cal_forthROILandmark(landmark_a, landmark_b, landmark_c)

        brow_state = "normal"
        x_min, y_min = np.min(np.array([A, B, C, D]), axis=0)  # 求各列的最小值
        x_max, y_max = np.max(np.array([A, B, C, D]), axis=0)  # 求各列的最大值
        x_min, y_min = int(x_min), int(y_min)
        x_max, y_max = int(x_max), int(y_max)
        # cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), color=(0, 0, 255), thickness=2)  # 绘制眉毛ROI区域

        # 提取并裁剪ROI，填充非ROI区域为黑色
        # points = [A.tolist(), B.tolist(), D.tolist(), C.tolist()]
        # # ROI = irregularROI_extract_and_crop(aligned_face, points)  #填充非ROI区域为ROI区域像素均值（效果比填充黑色好，因为这样边缘不会太唐突，用sobel边缘检测不会太放大这些噪声）
        # ROI = irregularROI_extract(aligned_face, points)
        ROI = crop_ROI(aligned_face, A, B, C, D)
        # cv2.imshow("brow",ROI)
        w,h,_ = ROI.shape
        if(w * h > 16):  #如果ROI区域width * height > 16才进行检测
            #ROI图像边缘提取
            canny_ROI = brow_process(ROI)
            # cv2.imshow("canny",canny_ROI)
            # 皱眉检测
            brow_detect = self.frown_infer(canny_ROI, self.frownModel)
            # brow_detect = browDetector.frown_detect_withThreshold(ROI,PIXEL_THRESHOLD=45,RATIO_THRESHOLD=0.3)
            brow_state = "frown" if brow_detect == 1 else "normal"

            # 将头皱眉检测结果保存到疲劳模型中
            self.causalInferModel.set_diagnosis_column(brow_detect=brow_detect)

        cv2.putText(frame, brow_state, position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
        detect_info += f"brow_state = {brow_state},"

    def fatigue_detect(self,position):

        '''
        :param position: tuple 检测结果绘制位置
        :return:
        '''
        global detect_info

        frame = self.basic_info['frame']
        x,y = position
        fatigue_state, suggest, kss_mean = self.causalInferModel.fatigue_Infer()  # 疲劳状态推理
        print(f"fatigue_state = {fatigue_state}, suggest = {suggest} ")
        cv2.putText(frame, "KSS " + str(round(kss_mean, 4)), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
        detect_info += f"KSS = {str(round(kss_mean, 4))},"

        now = datetime.now()
        res_str = ""
        #疲劳检测结果写入textBrower中
        if ('fatigue' in fatigue_state):
            cv2.putText(frame, fatigue_state, (x * 5, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
            detect_info += f"fatigue_state = {fatigue_state},"
            res_str = now.strftime("%Y-%m-%d %H:%M:%S") + "  " + fatigue_state
        else:
            cv2.putText(frame, "Alert", (x * 5, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
            detect_info += f"fatigue_state = Alert,"

        if(res_str != ""):
            self.detectWindow.textBrowser.append(res_str)  # 文本框逐条添加数据
            self.detectWindow.textBrowser.moveCursor(self.detectWindow.textBrowser.textCursor().End)  # 文本框显示到底部