import sys
import os
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = curPath
sys.path.append(rootPath)

import cv2
from fatigue_algo.fatigue_detector.detector import Detector
from matplotlib import pyplot as plt
import numpy as np
from fatigue_algo.fatigue_detector.blink_feature import BlinkFeature
from fatigue_algo.fatigue_detector.head_detector import HeadDetector_real
import pandas as pd
import functools
from fatigue_algo.fatigue_detector.adaptive_threshold.adaptive_thres_setting import Headpose_EAR_thres,my_cmp
import time
from joblib import load
from fatigue_algo.fatigue_detector.adaptive_threshold.adaptive_thres_setting import calibrate_EAR_thres

df = pd.read_csv(rootPath + "/adaptive_threshold/data/adaptive_threshold_85.csv")  #文件加载时读取df
gbdt_model = load(rootPath + "/blink_EAR_thres_gbdt.joblib")   #gbdt模型眨眼阈值推理模型预加载

def get_EAR_thres_withHeadPose(pitch, yaw, roll):
    '''
    :param in_path: 自适应阈值文件
    :param pitch: 当前头部姿态的俯仰角
    :param yaw: 当前头部姿态的偏航角
    :param roll: 当前头部姿态的旋转角
    :return:
    '''
    objects = []
    # startT = time.time()
    array = np.array(df)
    # endT = time.time()
    # print("endT - startT = " + str(endT - startT) + "s")
    for i in range(0,len(array)):
        mean_pitch, mean_yaw, mean_roll, EAR = array[i]  #比loc快点
        delta_pitch = np.abs(pitch - mean_pitch)
        delta_yaw = np.abs(yaw - mean_yaw)
        delta_roll = np.abs(roll - mean_roll)
        object = Headpose_EAR_thres(delta_pitch,delta_yaw,delta_roll,EAR)
        objects.append(object)

    #对objects按自定义排序方式进行排序
    objects = sorted(objects,key=functools.cmp_to_key(my_cmp))
    EAR_thres = objects[0].EAR_thres  #获取最相似的头部姿态的EAR_thres
    return EAR_thres

'''实时眨眼检测模型'''
class EyeDetector_real():

    def __init__(self, TIMEWINDOW_LENGTH = 120):


        self.blinkFeature_seq = []  #眨眼特征序列

        self.blinkFrameCount = 0  #累积眨眼总帧数
        self.frameCount = 0
        self.preDetect = None  #前一帧是否检测为眨眼
        self.begin = 0  #开始眨眼帧数（相对end）
        self.end = 0  #结束眨眼帧数（相对begin）
        self.bottom = 0  #在(begin，end)之间最小EAR所在的帧数
        self.EAR_buffer = []  #眨眼时候暂存EAR序列

        self.fatigue_state = None

        self.blinkType = 0   #眨眼方法类型
        self.EAR_threshold = None  #EAR检测阈值
        self.valley_length = 30  # 波谷算法时窗长度
        self.valley_diff_thres = 0.04
        self.TIMEWINDOW_LENGTH = TIMEWINDOW_LENGTH  # 快眨眼,慢眨眼检测时间窗口长度
        self.EAR_timeWindow = []  # 用于存放当前EAR值的时间窗，用于计算PERCLOS
        self.EAR_diff_length = 30  #EAR一阶差分的时间窗口长度
        self.EAR_diff_window = []   #关于EAR一阶差分的时间窗口
        self.EAR_B = 0  #

    def set_blink_params(self, blink_algo = None, EAR_threshold=None, valley_diff_thres=None, valley_length=None, PERCLOS_time_window_length=None,EAR_A=None,EAR_B=None):
        self.blinkType = blink_algo
        self.EAR_threshold = EAR_threshold
        self.valley_diff_thres = valley_diff_thres
        self.EAR_diff_length = valley_length
        self.TIMEWINDOW_LENGTH = PERCLOS_time_window_length
        self.EAR_A = EAR_A  #数据集中的EAR_A
        self.EAR_B = EAR_B  #受试者的校准EAR_B

    def getEAR(self,landmark):

        '''通过单帧图像的人脸关键点计算img中人眼EAR'''
        temp,temp1,temp2 = 0.4,0.4,0.4   #默认为睁眼
        if (len(landmark) != 0):

            # 左眼
            division = (2 * (abs(landmark[39][0] - landmark[36][0])))
            if (division != 0):
                temp1 = (abs(landmark[37][1] - landmark[41][1]) + abs(landmark[38][1] - landmark[40][1])) / division

            # 右眼
            division = (2 * (abs(landmark[45][0] - landmark[42][0])))
            if (division != 0):
                temp2 = (abs(landmark[43][1] - landmark[47][1]) + abs(
                    landmark[44][1] - landmark[46][1])) / division

            # 如果两眼的EAR比值大于1.2，则选择小的作为当前EAR
            if(temp2 / temp1 > 1.2):
                temp = temp1
            else:
                temp = temp2

        return temp

    '''根据校准的人脸图片和实时的头部姿态（yaw）校准EAR取值'''
    def getEAR_withYaw(self, landmark, yaw, EAR_yaw_threshold=0):

        '''
        通过单帧图像的人脸关键点计算img中人眼EAR（如果单帧的Yaw > 10，则选择左眼计算EAR，如果单帧的Yaw < -10, 则选择右眼计算EAR
        :param landmark: 人脸关键点
        :param yaw: 头部姿态偏航角
        :return:
        '''

        temp = 0.4  # 默认为睁眼
        if (len(landmark) != 0):

            if(yaw < EAR_yaw_threshold):
                # 左眼
                division = (2 * (abs(landmark[39][0] - landmark[36][0])))
                if (division != 0):
                    temp = (abs(landmark[37][1] - landmark[41][1]) + abs(
                        landmark[38][1] - landmark[40][1])) / division
            else:
                # 右眼
                division = (2 * (abs(landmark[45][0] - landmark[42][0])))
                if (division != 0):
                    temp = (abs(landmark[43][1] - landmark[47][1]) + abs(
                        landmark[44][1] - landmark[46][1])) / division

        return temp

    def blinkDetect(self,landmark,preDetect,yaw=None,EAR_yaw_threshold=0):
        '''
        单帧进行眨眼检测: EAR，
        :param landmark: 人脸关键点 ndarray([[]])
        :param preDetect: 前一帧是否眨眼(True,False)
        :return:res: 0:未眨眼，1:开始眨眼，2:正在眨眼,
        '''
        '''眨眼检测'''
        self.preDetect = preDetect
        if(yaw != None):
            EAR = self.getEAR_withYaw(landmark,yaw,EAR_yaw_threshold)
            # print("EAR = ", EAR)
        else:
            EAR = self.getEAR(landmark)

        #将EAR加入到滑动时间窗口中
        if(len(self.EAR_timeWindow) >= self.TIMEWINDOW_LENGTH):
            del(self.EAR_timeWindow[0])  #删除掉list第一个元素
        self.EAR_timeWindow.append(EAR)

        # 将EAR加入到一阶差分的滑动时间窗口中
        if (len(self.EAR_diff_window) >= self.EAR_diff_length):
            del (self.EAR_diff_window[0])  # 删除掉list第一个元素
        self.EAR_diff_window.append(EAR)

        # print(f"EAR = {EAR}, preDetect = {self.preDetect}")
        if(self.preDetect == False and EAR < self.EAR_threshold):  #如果EAR小于阈值且前一帧未检测到眨眼，则当前为开始帧
            #开始眨眼
            self.begin = 0
            self.EAR_buffer.append(EAR)
            return 1
        elif (self.preDetect == True and EAR < self.EAR_threshold):  # 如果EAR小于阈值且前一帧也检测到眨眼（正在眨眼）
            self.EAR_buffer.append(EAR)
            return 2

        self.blinkFeature_seq = []  #清空眨眼特征序列
        #结束眨眼
        if(self.preDetect == True and EAR > self.EAR_threshold):
            self.EAR_buffer.append(EAR)
            self.end = len(self.EAR_buffer) - 1

            self.bottom = np.argmin(self.EAR_buffer)  #bottom

            duration_all = self.TIMEWINDOW_LENGTH
            self.blinkFrameCount = len([el for el in (self.EAR_timeWindow < self.EAR_threshold) if el == True])  #获取滑动时间窗口下小于EAR阈值的总帧数

            '''计算5维特征：眨眼持续时间，振幅，速度，眨眼时间百分比，睁眼状态下平均EAR'''
            blink_feature = BlinkFeature()
            blink_feature.cal_feature(self.EAR_buffer, self.begin, self.bottom, self.end, self.EAR_threshold,
                                      duration_all,self.blinkFrameCount)
            self.blinkFeature_seq = blink_feature.feature_2_list()  #将眨眼特征赋值到到眨眼特征序列上
            self.EAR_buffer = [] #清空EAR序列

        return 0

    '''只计算perclos，不计算5维眨眼特征(必须先执行blinkDetect()),识别眨眼类型：快眨眼，慢眨眼，正常眨眼'''
    def get_blinkSpeedDetect_perclos(self,PERCLOS_range=[0.3,0.5]):
        '''
        :param PERCLOS_range: perclos阈值范围，用于区分快眨眼，慢眨眼和正常眨眼
        :return: 眨眼检测结果，perclos
        '''
        frame_count = len([el for el in (self.EAR_timeWindow < self.EAR_threshold) if el == True])
        perclos = frame_count / self.TIMEWINDOW_LENGTH
        blink_speed_detect = self.slow_blink_detect(perclos,PERCLOS_range)
        return blink_speed_detect,perclos

    def slow_blink_detect(self,perclos,PERCLOS_range=[0.3,0.5]):
        '''
        #快眨眼，慢眨眼检测（perclos阈值判断）
        :return: 3:快眨眼，4:正常眨眼，5:慢眨眼
        '''
        if(perclos < PERCLOS_range[0]):
            return 3
        elif(perclos >= PERCLOS_range[0] and perclos < PERCLOS_range[1]):
            return 4
        else:
            return 5

    '''根据校准的人脸图片和实时的头部姿态（pitch）校准EAR阈值'''
    def adaptive_threshold(self,cal_landmark,pitch,yaw, EAR_alpha=0.8, EAR_pitch_range=[-8,-5,10,16], EAR_beta=[0.75,0.85,1,0.85,0.75], EAR_yaw_threshold=0):
        '''
        :param cal_landmark: 正常睁眼图像（校准图像）的人脸关键点
        :param pitch: pitch越大，EAR阈值越小
        :param EAR_alpha: 通过校准图片的EAR * EAR_alpha得到初始EAR阈值
        :param EAR_pitch_range: 通过头部姿态pitch区间，根据对应的EAR_beta更新初始EAR阈值, len(EAR_pitch_range) = 4, 元素从小到大
        :param EAR_beta: pitch<-8时，通过EAR_threshold * 0.75来更新阈值, len(EAR_beta) = 5
        :param EAR_yaw_threshold: 根据头部姿态yaw值，选择用那只眼睛计算EAR, yaw < -20选择左眼计算EAR； yaw > -20,选择右眼计算EAR
        :return: None
        '''
        # EAR = self.getEAR(landmark[0][0])
        EAR = self.getEAR_withYaw(cal_landmark[0][0],yaw,EAR_yaw_threshold)
        self.EAR_threshold = EAR * EAR_alpha  #根据缩放因子计算初始EAR阈值
        # self.EAR_threshold = np.float64(0.25)  #根据缩放因子计算初始EAR阈值

        #根据pitch自适应调整EAR阈值
        if(pitch < EAR_pitch_range[0]):
            self.EAR_threshold = self.EAR_threshold * EAR_beta[0]

        elif(pitch >= EAR_pitch_range[0] and pitch < EAR_pitch_range[1]):
            self.EAR_threshold = self.EAR_threshold * EAR_beta[1]

        elif(pitch >= EAR_pitch_range[1] and pitch < EAR_pitch_range[2]):
            self.EAR_threshold = self.EAR_threshold * EAR_beta[2]

        elif(pitch >= EAR_pitch_range[2] and pitch < EAR_pitch_range[3]):
            self.EAR_threshold = self.EAR_threshold * EAR_beta[3]
        else:
            self.EAR_threshold = self.EAR_threshold * EAR_beta[4]

        #根据yaw自适应调整EAR阈值
        # if (yaw < EAR_yaw_threshold):
        #     self.EAR_threshold = self.EAR_threshold * 1.4  #右转，左眼EAR值较大，阈值增加

    '''根据校准的人脸图片和实时的头部姿态（pitch）得到EAR阈值'''
    def adaptive_threshold_with_headPoseData(self, pitch, yaw, roll):
        EAR_thres = get_EAR_thres_withHeadPose(pitch, yaw, roll)
        self.EAR_threshold = EAR_thres

    '''根据gbdt模型，通过此刻的头部姿态（pitch）预测EAR阈值'''
    def adaptive_threshold_with_gbdt(self,pitch, yaw, roll):
        X = np.array([[pitch, yaw, roll]])
        EAR_thres = gbdt_model.predict(X)[0]
        self.EAR_threshold = EAR_thres

    '''根据校准的人脸图片和实时的头部姿态（pitch）校准EAR阈值'''
    def adaptive_threshold_with_Valley(self,diff_thres=0.02):
        '''
        :param cal_landmark: 正常睁眼图像（校准图像）的人脸关键点
        :param pitch: pitch越大，EAR阈值越小
        :param EAR_alpha: 通过校准图片的EAR * EAR_alpha得到初始EAR阈值
        :param EAR_pitch_range: 通过头部姿态pitch区间，根据对应的EAR_beta更新初始EAR阈值, len(EAR_pitch_range) = 4, 元素从小到大
        :param EAR_beta: pitch<-8时，通过EAR_threshold * 0.75来更新阈值, len(EAR_beta) = 5
        :param EAR_yaw_threshold: 根据头部姿态yaw值，选择用那只眼睛计算EAR, yaw < -20选择左眼计算EAR； yaw > -20,选择右眼计算EAR
        :return: None
        '''
        if(self.valley_diff_thres != 0):
            diff_thres = self.valley_diff_thres
        data = np.array(self.EAR_diff_window)
        length = data.shape[0]
        locs = np.arange(1,length)
        results = np.ones(length - 1,dtype=bool)
        main = data.take(locs,axis=0,mode='clip')
        plus = data.take(locs + 1,axis=0,mode='clip')
        minus = data.take(locs - 1,axis=0,mode='clip')
        results &= (main <= plus)   #f(x) < f(x+1)
        results &= (main <= minus)  #f(x) < f(x-1)
        results = np.nonzero(results)  #返回非零元素的索引 参考https://blog.csdn.net/lzbmc/article/details/102865602
        mins_val = []  #最小值
        for i,val in enumerate(data[results]):
            if(i != 0):
                if(pre - val > diff_thres):
                    mins_val.append(pre)
            pre = val
        # print(f"type = {type(mins_val)},val = {mins_val}")
        if(len(mins_val) != 0):
            mins_val = [np.min(mins_val)]   #返回最近一次的EAR阈值（用于描述当前的头部姿态）
        return mins_val


    '''根据策略选择眨眼检测的方法'''
    def blinkDetect_with_blinkType(self,landmark,preDetect,pitch,yaw,roll):
        '''
        单帧进行眨眼检测: EAR，
        :param landmark: 人脸关键点 ndarray([[]])
        :param preDetect: 前一帧是否眨眼(True,False)
        :param blinkType: 0-EAR固定阈值，1-波谷方法，2-EAR阈值二次校准方法
        :param cfg: 配置文件
        :return:res: 0:未眨眼，1:开始眨眼，2:正在眨眼
        '''
        '''眨眼检测'''
        self.preDetect = preDetect
        EAR = self.getEAR(landmark)
        if(self.blinkType == 0):   #固定EAR阈值
            self.EAR_threshold = self.EAR_threshold
        elif(self.blinkType == 1):   #波谷算法
            mins = []
            if (len(self.EAR_timeWindow) > 2):
                mins = self.adaptive_threshold_with_Valley()
            if (len(mins) == 0):
                self.EAR_threshold = np.float64(0.23)
            else:
                self.EAR_threshold = mins[0]
        elif(self.blinkType == 2):  #EAR阈值二次校准
            # self.adaptive_threshold_with_gbdt(pitch,yaw,roll)
            # 先使用gbdt进行校准
            self.adaptive_threshold_with_gbdt(pitch, yaw, roll)
            # 再使用EAR_B进行二次校准
            EAR_thres = calibrate_EAR_thres(self.EAR_A, self.EAR_B, self.EAR_threshold)
            # print(f"stage1 : {eyeDetector.EAR_threshold}, EAR_B = {EAR_B}, EAR_A = {EAR_A}, stage2 : {EAR_thres}")
            self.EAR_threshold = EAR_thres

        #将EAR加入到滑动时间窗口中
        if(len(self.EAR_timeWindow) >= self.TIMEWINDOW_LENGTH):
            del(self.EAR_timeWindow[0])  #删除掉list第一个元素
        self.EAR_timeWindow.append(EAR)

        # 将EAR加入到一阶差分的滑动时间窗口中
        if (len(self.EAR_diff_window) >= self.EAR_diff_length):
            del (self.EAR_diff_window[0])  # 删除掉list第一个元素
        self.EAR_diff_window.append(EAR)

        # print(f"EAR = {EAR}, preDetect = {self.preDetect}")
        if(self.preDetect == False and EAR < self.EAR_threshold):  #如果EAR小于阈值且前一帧未检测到眨眼，则当前为开始帧
            #开始眨眼
            self.begin = 0
            self.EAR_buffer.append(EAR)
            return 1
        elif (self.preDetect == True and EAR < self.EAR_threshold):  # 如果EAR小于阈值且前一帧也检测到眨眼（正在眨眼）
            self.EAR_buffer.append(EAR)
            return 2

        self.blinkFeature_seq = []  #清空眨眼特征序列
        #结束眨眼
        if(self.preDetect == True and EAR > self.EAR_threshold):
            self.EAR_buffer.append(EAR)
            self.end = len(self.EAR_buffer) - 1

            self.bottom = np.argmin(self.EAR_buffer)  #bottom

            duration_all = self.TIMEWINDOW_LENGTH
            self.blinkFrameCount = len([el for el in (self.EAR_timeWindow < self.EAR_threshold) if el == True])  #获取滑动时间窗口下小于EAR阈值的总帧数

            '''计算5维特征：眨眼持续时间，振幅，速度，眨眼时间百分比，睁眼状态下平均EAR'''
            blink_feature = BlinkFeature()
            blink_feature.cal_feature(self.EAR_buffer, self.begin, self.bottom, self.end, self.EAR_threshold,
                                      duration_all,self.blinkFrameCount)
            self.blinkFeature_seq = blink_feature.feature_2_list()  #将眨眼特征赋值到到眨眼特征序列上
            self.EAR_buffer = [] #清空EAR序列

        return 0

'''眨眼模型（real）评估（硬阈值）'''
def blinkModel_estimate(video,face_detector,EAR_threshold):
    '''
    模型评估：输出视频中检测到的眨眼次数
    @:param video 要检测的视频
    @:param face_detector 人脸检测器
    @:param EAR_threshold EAR阈值
    @:return blinkCount:检测到的眨眼次数, blinkFeature_seq: 视频中提取的眨眼特征序列
    '''
    blinkCount = 0
    frame_total = video.get(cv2.CAP_PROP_FRAME_COUNT)  #视频总帧数

    eyeDetector = EyeDetector_real()
    eyeDetector.EAR_threshold = EAR_threshold
    eyeDetector.frameCount = 0
    preDetect = False  #前一帧未检测到眨眼
    #视频帧数
    while (eyeDetector.frameCount < frame_total):
        ret,frame = video.read()

        if(ret == True):
            dets = face_detector.detect_faces(frame)[0]
            if(len(dets) > 0):
                landmark = face_detector.detect_landmarks(frame,[dets])

                if (len(landmark) > 0):
                    '''眨眼检测'''
                    blink_detect = eyeDetector.blinkDetect(landmark[0][0],preDetect)
                    preDetect = False if blink_detect == 0 else True
                    if(blink_detect == 1): blinkCount += 1

            # print(f"detect frameNum = {frameCount}")
            # frameCount += 1
            # cv2.putText(frame,str(blinkCount), (60, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
            # cv2.imshow("res",frame)
            # key = cv2.waitKey(15)
            # if(key == 27):  #ESC键退出
            #     break

        eyeDetector.frameCount += 1

    # os.system('cls') #清空控制台
    video.release()
    return blinkCount,eyeDetector.blinkFeature_seq

'''眨眼模型（real）评估（关于参数的自适应阈值）'''
def blinkModel_adaptive_estimate(video,face_detector,cal_landmark):
    '''
    模型评估：输出视频中检测到的眨眼次数
    @:param video 要检测的视频
    @:param face_detector 人脸检测器
    @:param cal_landmark 校准图片的关键点
    @:return blinkCount:检测到的眨眼次数, blinkFeature_seq: 视频中提取的眨眼特征序列
    '''
    blinkCount = 0
    frame_total = video.get(cv2.CAP_PROP_FRAME_COUNT)  #视频总帧数

    eyeDetector = EyeDetector_real()
    eyeDetector.frameCount = 0
    headDetector = HeadDetector_real()
    preDetect = False  #前一帧未检测到眨眼
    frameCount = 0 #视频帧数
    while (eyeDetector.frameCount < frame_total):
        ret,frame = video.read()

        if(ret == True):
            dets = face_detector.detect_faces(frame)[0]
            if(len(dets) > 0):
                landmark = face_detector.detect_landmarks(frame,[dets])

                if (len(landmark) > 0):

                    pitch, _, yaw = headDetector.get_pitch_roll_yaw(face_detector, frame, dets, landmark)  # 头部姿态
                    eyeDetector.adaptive_threshold(cal_landmark, pitch, yaw)  #自适应阈值

                    '''眨眼检测'''
                    blink_detect = eyeDetector.blinkDetect(landmark[0][0],preDetect,yaw)
                    preDetect = False if blink_detect == 0 else True
                    if(blink_detect == 1): blinkCount += 1

                    # cv2.putText(frame, "EAR_threshold:" + str(eyeDetector.EAR_threshold), (60, 80),
                    #             cv2.FONT_HERSHEY_SIMPLEX, 1,
                    #             (0, 0, 255), 2)
                    # cv2.putText(frame, "X:" + str(pitch), (60, 130), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
                    # cv2.putText(frame, "Y:" + str(yaw), (60, 180), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)

            # print(f"detect frameNum = {frameCount}")
            # frameCount += 1
            # cv2.putText(frame,str(blinkCount), (60, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
            #
            # cv2.imshow("res",frame)
            # key = cv2.waitKey(15)
            # if(key == 27):  #ESC键退出
            #     break

        eyeDetector.frameCount += 1

    # os.system('cls') #清空控制台
    video.release()
    return blinkCount,eyeDetector.blinkFeature_seq

'''眨眼模型（real）评估（关于旋转参数的自适应阈值）: 人脸检测器为scrfd，landmark检测器为pyfeat_detector'''
def blinkModel_adaptive_estimate_scrfd(video,face_detector,landmark_detector, cal_landmark, cfg):
    '''
    模型评估：输出视频中检测到的眨眼次数
    @:param video 要检测的视频
    @:param face_detector 人脸检测器
    @:param landmark_detector 人脸关键点检测器
    @:param cal_landmark 校准图片的关键点
    @:param cfg 配置文件
    @:return blinkCount:检测到的眨眼次数, blinkFeature_seq: 视频中提取的眨眼特征序列
    '''
    blinkCount = 0
    frame_total = video.get(cv2.CAP_PROP_FRAME_COUNT)  #视频总帧数

    eyeDetector = EyeDetector_real()
    eyeDetector.frameCount = 0
    headDetector = HeadDetector_real()
    preDetect = False  #前一帧未检测到眨眼
    frameCount = 0 #视频帧数
    while (eyeDetector.frameCount < frame_total):
        ret,frame = video.read()

        if(ret == True):
            dets = face_detector.detect_faces(frame)[0]
            if(len(dets) > 0):
                landmark = landmark_detector.detect_landmarks(frame,[dets])

                if (len(landmark) > 0):

                    pitch, _, yaw = headDetector.get_pitch_roll_yaw(landmark_detector, frame, dets, landmark)  # 头部姿态
                    eyeDetector.adaptive_threshold(cal_landmark, pitch, yaw,
                                                   EAR_alpha=cfg["EAR_alpha"], EAR_pitch_range=cfg["EAR_pitch_range"],
                                                   EAR_beta=cfg["EAR_beta"],
                                                   EAR_yaw_threshold=cfg["EAR_yaw_threshold"])  #自适应阈值
                    # eyeDetector.EAR_threshold = 0.25

                    '''眨眼检测'''
                    blink_detect = eyeDetector.blinkDetect(landmark[0][0],preDetect,yaw)
                    preDetect = False if blink_detect == 0 else True
                    if(blink_detect == 1): blinkCount += 1

                    # cv2.putText(frame, "EAR_threshold:" + str(eyeDetector.EAR_threshold), (60, 80),
                    #             cv2.FONT_HERSHEY_SIMPLEX, 1,
                    #             (0, 0, 255), 2)
                    # cv2.putText(frame, "X:" + str(pitch), (60, 130), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
                    # cv2.putText(frame, "Y:" + str(yaw), (60, 180), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)

            # print(f"detect frameNum = {frameCount}")
            # frameCount += 1
            # cv2.putText(frame,str(blinkCount), (60, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
            #
            # cv2.imshow("res",frame)
            # key = cv2.waitKey(15)
            # if(key == 27):  #ESC键退出
            #     break

        eyeDetector.frameCount += 1

    # os.system('cls') #清空控制台
    video.release()
    return blinkCount,eyeDetector.blinkFeature_seq

'''眨眼模型（real）评估（关于头部姿态数据的自适应阈值）'''
def blinkModel_adaptive_estimate_withHeadPoseData(video,face_detector,landmark_detector,cfg):
    '''
    模型评估：输出视频中检测到的眨眼次数
    @:param video 要检测的视频
    @:param face_detector 人脸检测器
    @:param landmark_detector 人脸关键点检测器
    @:param cfg 配置文件
    @:return blinkCount:检测到的眨眼次数, blinkFeature_seq: 视频中提取的眨眼特征序列
    '''
    blinkCount = 0
    frame_total = video.get(cv2.CAP_PROP_FRAME_COUNT)  #视频总帧数

    eyeDetector = EyeDetector_real()
    eyeDetector.frameCount = 0
    headDetector = HeadDetector_real()
    preDetect = False  #前一帧未检测到眨眼
    frameCount = 0 #视频帧数
    while (eyeDetector.frameCount < frame_total):
        ret,frame = video.read()

        if(ret == True):
            dets = face_detector.detect_faces(frame)[0]
            if(len(dets) > 0):
                landmark = landmark_detector.detect_landmarks(frame,[dets])

                if (len(landmark) > 0):

                    pitch, roll, yaw = headDetector.get_pitch_roll_yaw(landmark_detector, frame, dets, landmark)  # 头部姿态
                    eyeDetector.adaptive_threshold_with_headPoseData(pitch,roll,yaw)
                    # eyeDetector.EAR_threshold = 0.25

                    '''眨眼检测'''
                    blink_detect = eyeDetector.blinkDetect(landmark[0][0],preDetect,yaw,EAR_yaw_threshold=cfg["EAR_yaw_threshold"])
                    preDetect = False if blink_detect == 0 else True
                    if(blink_detect == 1):
                        blinkCount += 1

                    # cv2.putText(frame, "EAR_threshold:" + str(eyeDetector.EAR_threshold), (60, 80),
                    #             cv2.FONT_HERSHEY_SIMPLEX, 1,
                    #             (0, 0, 255), 2)
                    # cv2.putText(frame, "X:" + str(pitch), (60, 130), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
                    # cv2.putText(frame, "Y:" + str(yaw), (60, 180), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)

            # print(f"detect frameNum = {frameCount}")
            # frameCount += 1
            # cv2.putText(frame,str(blinkCount), (60, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
            #
            # cv2.imshow("res",frame)
            # key = cv2.waitKey(15)
            # if(key == 27):  #ESC键退出
            #     break

        eyeDetector.frameCount += 1
    video.release()
    return blinkCount, eyeDetector.blinkFeature_seq

'''眨眼模型（real）评估（使用gbdt，根据此时的头部姿态数据进行自适应阈值的调整）'''
def blinkModel_adaptive_estimate_with_GBRT(video,face_detector,landmark_detector,cfg):
    '''
    模型评估：输出视频中检测到的眨眼次数
    @:param video 要检测的视频
    @:param face_detector 人脸检测器
    @:param landmark_detector 人脸关键点检测器
    @:param cfg 配置文件
    @:return blinkCount:检测到的眨眼次数, blinkFeature_seq: 视频中提取的眨眼特征序列
    '''
    blinkCount = 0
    frame_total = video.get(cv2.CAP_PROP_FRAME_COUNT)  #视频总帧数

    eyeDetector = EyeDetector_real()
    eyeDetector.frameCount = 0
    headDetector = HeadDetector_real()
    preDetect = False  #前一帧未检测到眨眼
    frameCount = 0 #视频帧数
    while (eyeDetector.frameCount < frame_total):
        ret,frame = video.read()

        if(ret == True):
            dets = face_detector.detect_faces(frame)[0]
            if(len(dets) > 0):
                landmark = landmark_detector.detect_landmarks(frame,[dets])

                if (len(landmark) > 0):

                    pitch, roll, yaw = headDetector.get_pitch_roll_yaw(landmark_detector, frame, dets, landmark)  # 头部姿态
                    eyeDetector.adaptive_threshold_with_gbdt(pitch,roll,yaw)
                    # eyeDetector.EAR_threshold = 0.25

                    '''眨眼检测'''
                    blink_detect = eyeDetector.blinkDetect(landmark[0][0],preDetect,yaw,EAR_yaw_threshold=cfg["EAR_yaw_threshold"])
                    preDetect = False if blink_detect == 0 else True
                    if(blink_detect == 1):
                        blinkCount += 1

                    # cv2.putText(frame, "EAR_threshold:" + str(eyeDetector.EAR_threshold), (60, 80),
                    #             cv2.FONT_HERSHEY_SIMPLEX, 1,
                    #             (0, 0, 255), 2)
                    # cv2.putText(frame, "X:" + str(pitch), (60, 130), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
                    # cv2.putText(frame, "Y:" + str(yaw), (60, 180), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)

            # print(f"detect frameNum = {frameCount}")
            # frameCount += 1
            # cv2.putText(frame,str(blinkCount), (60, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
            #
            # cv2.imshow("res",frame)
            # key = cv2.waitKey(15)
            # if(key == 27):  #ESC键退出
            #     break

        eyeDetector.frameCount += 1
    video.release()
    return blinkCount, eyeDetector.blinkFeature_seq

'''眨眼模型（real）评估（使用gbdt，根据此时的头部姿态数据进行自适应阈值的调整）'''
def blinkModel_adaptive_estimate_with_GBRT_calibrateImg(video,face_detector,landmark_detector,cfg,calibrate_img, EAR_A=0.29114606):
    '''
    模型评估：输出视频中检测到的眨眼次数
    @:param video 要检测的视频
    @:param face_detector 人脸检测器
    @:param landmark_detector 人脸关键点检测器
    @:param cfg 配置文件
    @:return blinkCount:检测到的眨眼次数, blinkFeature_seq: 视频中提取的眨眼特征序列
    '''
    blinkCount = 0
    frame_total = video.get(cv2.CAP_PROP_FRAME_COUNT)  #视频总帧数

    eyeDetector = EyeDetector_real()
    eyeDetector.frameCount = 0
    headDetector = HeadDetector_real()
    preDetect = False  #前一帧未检测到眨眼
    frameCount = 0 #视频帧数

    cal_det = face_detector.detect_faces(calibrate_img)[0]
    cal_landmark = landmark_detector.detect_landmarks(calibrate_img,[cal_det])
    EAR_B = eyeDetector.getEAR(cal_landmark[0][0])
    print(f"EAR_A = {EAR_A}, EAR_B = {EAR_B}")
    while (eyeDetector.frameCount < frame_total):
        ret,frame = video.read()

        if(ret == True):
            dets = face_detector.detect_faces(frame)[0]
            if(len(dets) > 0):
                landmark = landmark_detector.detect_landmarks(frame,[dets])

                if (len(landmark) > 0):

                    pitch, roll, yaw = headDetector.get_pitch_roll_yaw(landmark_detector, frame, dets, landmark)  # 头部姿态
                    eyeDetector.adaptive_threshold_with_gbdt(pitch,roll,yaw)   #一阶段阈值校准
                    # 二阶段阈值校准
                    EAR_thres = calibrate_EAR_thres(EAR_A, EAR_B, eyeDetector.EAR_threshold)
                    # print(f"stage1 : {eyeDetector.EAR_threshold}, EAR_B = {EAR_B}, EAR_A = {EAR_A}, stage2 : {EAR_thres}")
                    eyeDetector.EAR_threshold = EAR_thres

                    # eyeDetector.EAR_threshold = np.float64(0.27)

                    # print(eyeDetector.EAR_threshold)

                    '''眨眼检测'''
                    blink_detect = eyeDetector.blinkDetect(landmark[0][0],preDetect,EAR_yaw_threshold=cfg["EAR_yaw_threshold"])
                    preDetect = False if blink_detect == 0 else True
                    if(blink_detect == 1):
                        blinkCount += 1

                    # cv2.putText(frame, "EAR_threshold:" + str(eyeDetector.EAR_threshold), (60, 80),
                    #             cv2.FONT_HERSHEY_SIMPLEX, 1,
                    #             (0, 0, 255), 2)
                    # cv2.putText(frame, "X:" + str(pitch), (60, 130), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
                    # cv2.putText(frame, "Y:" + str(yaw), (60, 180), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)

            # print(f"detect frameNum = {frameCount}")
            # frameCount += 1
            # cv2.putText(frame,str(blinkCount), (60, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
            #
            # cv2.imshow("res",frame)
            # key = cv2.waitKey(15)
            # if(key == 27):  #ESC键退出
            #     break

        eyeDetector.frameCount += 1
    video.release()
    return blinkCount, eyeDetector.blinkFeature_seq


'''眨眼模型（real）评估（使用gbdt，根据此时的头部姿态数据进行自适应阈值的调整）'''


def blinkModel_adaptive_estimate_with_GBRT_valleyAlgo(video, face_detector, landmark_detector, cfg):
    '''
    模型评估：输出视频中检测到的眨眼次数
    @:param video 要检测的视频
    @:param face_detector 人脸检测器
    @:param landmark_detector 人脸关键点检测器
    @:param cfg 配置文件
    @:return blinkCount:检测到的眨眼次数, blinkFeature_seq: 视频中提取的眨眼特征序列
    '''
    blinkCount = 0
    frame_total = video.get(cv2.CAP_PROP_FRAME_COUNT)  # 视频总帧数

    eyeDetector = EyeDetector_real()
    eyeDetector.frameCount = 0
    headDetector = HeadDetector_real()
    preDetect = False  # 前一帧未检测到眨眼
    frameCount = 0  # 视频帧数

    while (eyeDetector.frameCount < frame_total):
        ret, frame = video.read()

        if (ret == True):
            dets = face_detector.detect_faces(frame)[0]
            if (len(dets) > 0):
                landmark = landmark_detector.detect_landmarks(frame, [dets])

                if (len(landmark) > 0):

                    # pitch, roll, yaw = headDetector.get_pitch_roll_yaw(landmark_detector, frame, dets, landmark)  # 头部姿态
                    # eyeDetector.adaptive_threshold_with_gbdt(pitch,roll,yaw)   #一阶段阈值校准
                    # 二阶段阈值校准
                    # EAR_thres = calibrate_EAR_thres(EAR_A, EAR_B, eyeDetector.EAR_threshold)
                    # # print(f"stage1 : {eyeDetector.EAR_threshold}, EAR_B = {EAR_B}, EAR_A = {EAR_A}, stage2 : {EAR_thres}")
                    # eyeDetector.EAR_threshold = EAR_thres

                    # eyeDetector.EAR_threshold = np.float64(0.27)
                    mins = []
                    if (len(eyeDetector.EAR_timeWindow) > 2):
                        mins = eyeDetector.adaptive_threshold_with_Valley(diff_thres=0.04)

                    if (len(mins) == 0):
                        eyeDetector.EAR_threshold = np.float64(0.23)
                    else:
                        eyeDetector.EAR_threshold = mins[0]
                        # print(eyeDetector.EAR_threshold)

                    '''眨眼检测'''
                    blink_detect = eyeDetector.blinkDetect(landmark[0][0], preDetect,
                                                           EAR_yaw_threshold=cfg["EAR_yaw_threshold"])
                    preDetect = False if blink_detect == 0 else True
                    if (blink_detect == 1):
                        blinkCount += 1

                    # cv2.putText(frame, "EAR_threshold:" + str(eyeDetector.EAR_threshold), (60, 80),
                    #             cv2.FONT_HERSHEY_SIMPLEX, 1,
                    #             (0, 0, 255), 2)
                    # cv2.putText(frame, "X:" + str(pitch), (60, 130), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
                    # cv2.putText(frame, "Y:" + str(yaw), (60, 180), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)

            # print(f"detect frameNum = {frameCount}")
            # frameCount += 1
            # cv2.putText(frame,str(blinkCount), (60, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
            #
            # cv2.imshow("res",frame)
            # key = cv2.waitKey(15)
            # if(key == 27):  #ESC键退出
            #     break

        eyeDetector.frameCount += 1
    video.release()
    return blinkCount, eyeDetector.blinkFeature_seq


