import dlib  # 人脸检测的库 Dlib
import numpy as np   # 数据处理的库 Numpy
import cv2  # 图像处理的库 OpenCv
import time
import math
import json
import os
from os import listdir

from rPPG_processing_realtime import extract_pulse
# from timer import *

# 画图相关工具
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt


# timer = timer()
# timer.start()

# 默认参数
fftlength = 178  # fft的数据长度值越大，处理速度越慢，精度越高
fs = 30  # 摄像机采样率

# 获取人脸各部分区域


def coor_to_face(x_list, y_list, frame):
    x_len = len(x_list)
    y_len = len(y_list)
    local_face_list = []
    for i in range(x_len - 1):
        if i < x_len - 2:
            for j in range(y_len - 1):
                local_face = frame[x_list[i]:x_list[i + 1],
                                   y_list[j]:y_list[j + 1]]  # ?
                local_face_list.append(local_face)

        # 对最后一行单独处理
        else:
            for j in range(1, y_len - 2):
                local_face = frame[x_list[i]:x_list[i + 1],
                                   y_list[j]:y_list[j + 1]]
                local_face_list.append(local_face)
    return local_face_list

# 分割人脸


def get_local_face(frame, key):
    '''
    将人脸分成多个局部，鼻子以下，两个部位，或者多个部位  /Divide the face into multiple parts,
    :param frame:
    :param key:
    :return:
    '''

    num_pixels = frame.shape[0] * frame.shape[1]

    frame_local = []
    x_list = []
    y_list = []
    # 坐标 纵中线 top bottom mid
    x1 = key[28][1]
    x2 = key[9][1]
    x3 = key[34][1]
    x_mid_up = int((x1 + x3) / 2)
    x_mid_down = int((x3 + x2) / 2)
    x_list.append(x1)
    # x_list.append(x_mid_up)
    x_list.append(x3)
    # x_list.append(x_mid_down)
    x_list.append(x2)

    # 水平中线  left mid right
    y1 = key[3][0]
    y2 = key[30][0]
    y3 = key[13][0]
    y_mid_left = int((y1 + y2) / 2)
    y_mid_right = int((y2 + y3) / 2)
    y_list.append(y1)
    y_list.append(y_mid_left)
    y_list.append(y2)
    y_list.append(y_mid_right)
    y_list.append(y3)

    local_face_List = coor_to_face(x_list, y_list, frame)
    frame_local = local_face_List

    # 左边背景
    distance = y_mid_left - y1
    y0 = y1 - distance * 3
    y4 = y3 + distance * 3

    # 拉远距离，避免人脸的出现
    y1 = y1 - distance * 2
    y3 = y3 + distance * 2

    if y0 < 0:
        background_left = frame[x1:x3, 0:y1]
    else:
        background_left = frame[x1:x3, y0:y1]
    frame_local.append(background_left)

    # 右边背景
    if y4 > 640:
        background_right = frame[x1:x3, y3:640]
    else:
        background_right = frame[x1:x3, y3:y4]

    frame_local.append(background_right)

    # 绘图
    # cv2.imshow("picture", frame)
    # cv2.imshow("left_face", frame_local[0])
    # cv2.imshow("left_nose", frame_local[1])
    # cv2.imshow("right_nose", frame_local[2])
    # cv2.imshow("right_face", frame_local[3])
    # cv2.imshow("left_mouth", frame_local[4])
    # cv2.imshow("right_mouth", frame_local[5])
    # if list(frame_local[0]) != []:
    #     try:
    #         cv2.imshow("background_left", frame_local[-1])
    #         cv2.imshow("background_right", frame_local[-2])
    #     except Exception as e:
    #         print("debug")

    return frame_local, num_pixels

# 获取rgb值


def calc_ppg(num_pixels, frame):
    '''
    # 求不同颜色分量的均值。并保存，返回   /Find the average of different color components and return
    :param num_pixels:  读取的图片的像素点，而不是感兴趣的区域的像素的。
    :param frame:  人脸图像
    :return:
    '''

    if num_pixels == 0:
        ppg = [0, 0, 0]
        # ppg = [0,0,0,0,0,0]
    else:
        r_avg = np.sum(frame[:, :, 0]) / num_pixels
        g_avg = np.sum(frame[:, :, 1]) / num_pixels
        b_avg = np.sum(frame[:, :, 2]) / num_pixels
        if num_pixels == 0:
            print(num_pixels)
        ppg = [r_avg, g_avg, b_avg]
        if len(frame[1]) != 0:
            # rgb 转 ycrcb
            # ycbcr = cv2.cvtColor(frame, cv2.COLOR_BGR2YCrCb)
            # y, cb, cr = cv2.split(ycbcr)
            # y = np.sum(y) / num_pixels
            # cb = np.sum(cb) / num_pixels
            # cr = np.sum(cr) / num_pixels
            # hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            # h, s, v = cv2.split(hsv)
            # h = np.sum(h) / num_pixels
            # s = np.sum(s) / num_pixels
            # v = np.sum(v) / num_pixels
            if num_pixels == 0:
                print(num_pixels)
            ppg = [r_avg, g_avg, b_avg]
            # print('rgb',ppg)
            # ppg = [r_avg, g_avg, b_avg,h,s,v]
            # ppg = [y,cb,cr,h,s,v]
            # print('ycbcr',ycbcr_ppg)
            for i, col in enumerate(ppg):
                if math.isnan(col):
                    ppg[i] = 0
    # print("one ppg",ppg)
    # print('ppg',np.array(ppg).shape) (3,)
    return ppg

# 检测人脸


def detectFace(frame):
    # 人脸数
    faces = detector(frame, 0)

    # 待会要写的字体
    font = cv2.FONT_HERSHEY_SIMPLEX

    # 标 68 个点
    if len(faces) != 0:
        # 检测到人脸
        for i in range(len(faces)):
            # 取特征点坐标
            landmarks = np.matrix([[p.x, p.y]
                                   for p in predictor(frame, faces[i]).parts()])

            # 存取特征点
            points_keys = []
            for idx, point in enumerate(landmarks):
                # 68 点的坐标
                pos = (point[0, 0], point[0, 1])

                points_keys.append([point[0, 0], point[0, 1]])

                # 利用 cv2.circle 给每个特征点画一个圈，共 68 个
                # cv2.circle(img_rd, pos, 2, color=(139, 0, 0))
                # 利用 cv2.putText 写数字 1-68
                # cv2.putText(img_rd, str(idx + 1), pos, font, 0.2, (187, 255, 255), 1, cv2.LINE_AA)

        # 人脸分割
        frame_cropped, num_pixels = get_local_face(frame, points_keys)
        return frame_cropped, num_pixels
    #   cv2.putText(img_rd, "faces: " + str(len(faces)),(20, 50), font, 1, (0, 0, 0), 1, cv2.LINE_AA)
    # else:
    #   # 没有检测到人脸
    #   cv2.putText(img_rd, "no face", (20, 50), font,1, (0, 0, 0), 1, cv2.LINE_AA)

# 傅立叶变换后的信号


def extract_pulse_local(rppg, fs):
    '''
    输入所有的face的rppg信号,初始采样时间戳，摄像头采样率，然后输出重采样之后的rppg以及获取的pulse信号
    :param rppg:rppg data from rPPG_Extracter.py
    : fs sampling rate
    :return:
    '''

    # 初始化
    pulse = []
    # 操作
    rPPG_len = len(rppg)
    for i in range(rPPG_len):
        rppg_one = rppg[i]
        pulse_one = extract_pulse(rppg_one, fftlength, fs)
        pulse.append(pulse_one)
        # if pulse[0][0] != 0:
        #   end = datetime.datetime.now()
        #   time_sub = end - begin
        #   print("time", time_sub.total_seconds())

    return pulse

# 提取rppg信号


def rppg_extracter(frames, num_pixels):
    # 保存刷新时间，然后求帧率显示而已
    # dt = time.time() - time_start[0]
    # fps = 1 / (dt)
    # time_start[0] = time.time()

    # left_face = frames[0]
    # right_face = frames[3]

    # rPPG = []
    roi_num = len(frames)
    for i in range(roi_num):
        roi_data = frames[i]
        # 三维数组，RGB，每一个是一个一维数组，组合起来是二维数据，多个部位，多个二维数组组合起来是三维数组
        rPPG[i].append(calc_ppg(num_pixels, roi_data))  # rppg[i] 三维数组
    return rPPG
    # rPPG.append(calc_ppg(num_pixels, roi_data))
    # 提取rppg信号分量
    # print('rPPG_sample',rPPG[0])
    # rPPG_sample_left = rPPG[0]
    # rPPG_sample_right = rPPG[3]
    # rPPG_sample_bgleft = rPPG[-1]
    # rPPG_sample_bgright = rPPG[-2]

def cross_enhace(data_list):
    '''
    输入二维数组，输出二维数组，求每一行与其他行的互相关增强
    :param data_list:
    :return:
    '''
    data_len = len(data_list)
    data_enhance = []
    # 相关增强
    for i in range(data_len):
        j = i + 1
        while j < data_len:
            data_enhance.append(data_list[i] * data_list[j])
            j += 1
    return data_enhance


def data_cross_data(data_list_one, data_list_two):

  '''
  输入两个二维数组求互相关，增强  /mutual correlation
  :param data_list_one:
  :param data_list_two:
  :return:
  '''
  len_one = len(data_list_one)
  len_two = len(data_list_two)
  data_cross = []
  for i in range(len_one):
    for j in range(len_two):
      data = np.array([data_list_one[i], data_list_two[j]])
      data_corff = np.corrcoef(data) #返回一个矩阵

      # 只保留相关系数
      data_cross.append(data_corff[0][1])

  return data_cross



# 相关性
def pulse_process(pulse_list):
  '''
  对获取的pluse信号做处理,判断是否是欺骗  /Process the acquired pluse signal to determine whether it is spoofing attacks
  :param pulse_list:
  :return:
  '''
  '''
  :param pulse_list:
  :return:
  '''
  pulse_len = len(pulse_list)
  face_pulse = pulse_list[0:pulse_len - 2]
  ground_pulse = pulse_list[pulse_len - 2:pulse_len]

  # 相关增强
  face_pulse_enhance = cross_enhace(face_pulse)
  ground_pulse_enhance = cross_enhace(ground_pulse)

  # 相减
  face_sub_face = pulse_list[0] - pulse_list[1]
  face_sub_ground = pulse_list[0] - pulse_list[2]
  face_distance_face = face_sub_face ** 2
  face_distance_ground = face_sub_ground ** 2

  # 相关
  face_cor_face = data_cross_data([pulse_list[0]], [pulse_list[1]])
  ground_cor_ground = data_cross_data([pulse_list[-1]], [pulse_list[-2]])
  face_cor_ground = data_cross_data(face_pulse_enhance, ground_pulse_enhance)
  
  for _ in range(3):
      face_cor_ground.remove(max(face_cor_ground))
  mean_cor = np.mean(np.array(face_cor_ground))
  # if mean_cor < 0.50:
  #     print("true")
  # else:
  #     print("false")
  # print("mean_cor", np.mean(np.array(face_cor_ground)))
  # print("face_cor_face", face_cor_face)
  # print("ground_cor_ground", ground_cor_ground)

  return face_pulse_enhance, ground_pulse_enhance, mean_cor



# Dlib 检测器和预测器
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(
    '/Users/zhanglan/Desktop/rely/shape_predictor_68_face_landmarks.dat')

# 检测图片
# img_rd = cv2.imread("/Users/zhanglan/Desktop/1.jpg")
# img_gray = cv2.cvtColor(img_rd, cv2.COLOR_RGB2GRAY)
# detectFace(img_gray)

# 检测视频
# video_all = 0
# video_true = 0
# video_attack = 0
TP=0
FP=0
TN=0
FN=0
bo_real = False
folder_dir = "/Users/zhanglan/Desktop/活体人脸检测/数据集/HKBU_MARs_V1/HKBU_MARs_V1+.7z"
# folder_dir = "/Users/zhanglan/Desktop/活体人脸检测/数据集/CASIA-FASD/CASIA_faceAntisp/test_release"
# real_list = ['1.avi','2.avi']
# attack_list = ['3.avi','4.avi','5.avi','6.avi','7.avi','8.avi']
# null_list = ['HR_1.avi','HR_2.avi','HR_3.avi','HR_4.avi']

for folder in listdir(folder_dir):
  # 排除.DS_store的干扰
  data_real = []
  data_attack = []
  if folder == '.DS_Store': continue
  sub_dir = folder_dir+ "/" + folder
  if folder == 's1' or folder == 's2':
    bo_real = True
  if folder == 's3attack':
    bo_real = False
  for video_name in listdir(sub_dir):
    if video_name == 'Thumbs.db' or video_name == '.DS_Store': #or video_name not in video_list:
      continue
    # if video_name in real_list:
    #   bo_real = True
      
    # if video_name in attack_list:
    #   bo_real = False
      
    # 视频路径
    print(sub_dir+'/'+video_name)
    camera = cv2.VideoCapture(sub_dir+'/'+video_name)
    num_frames = 0
    rPPG = [[], [], [], [], [], [], [], [], [], [], [], [],
            [], [], [], [], [], [], [], [], [], [], [], []]  # 24
    final_rppg = []
    if not camera.isOpened():
      print("cannot open camear")
      exit(0)
    while True:
      ret, frame = camera.read()
      if not ret:
          break
      num_frames += 1
      rows, cols = frame.shape[:2]
      frame = cv2.resize(frame, None, fx=0.5, fy=0.5,
                        interpolation=cv2.INTER_CUBIC)
      # 人脸检测 与 人脸分割
      roi, num_pixels = detectFace(frame)
      # rppg信号提取
      rppg_extracter(roi, num_pixels)
      # final_rppg.append(pluse[0])
      # print(pluse)
      # 输出rppg信号

    # 傅立叶转换
    rPPG_sample = rPPG[0]  # 左脸rgb
    # print('rPPG_sample',rPPG_sample.shape)
    rPPG_sample = np.transpose(rPPG_sample)

    if rPPG_sample.shape[1] > 10:  #大于10帧开始进行傅立叶转换
      rppg = []
      rppg_len = len(rPPG)
      for i in range(rppg_len):
        rppg_one = rPPG[i] 
        if rppg_one != []:
          rppg_one = np.transpose(rppg_one)
          rppg.append(rppg_one)
      # rppg：roi的rgb列表 pulse：roi傅立叶变换后的信号
      fftpulse = extract_pulse_local(rppg, fs)
      
      # 测试相关性
      face_pulse, ground_pulse, mean_cor = pulse_process(fftpulse)
      print("mean_cor", mean_cor)
      if(bo_real):
          data_real.append(mean_cor)
      else:
          data_attack.append(mean_cor)
      # if mean_cor < 0.5:
      #   if(bo_real):
      #     TP += 1
      #     f = open('/Users/zhanglan/Desktop/real.txt', 'a')
      #     f.write(str(mean_cor)+'\n')
      #   else:
      #     FP += 1
      #     f = open('/Users/zhanglan/Desktop/attack-wrong.txt', 'a')
      #     # f.write(str(mean_cor)+'\n')
      #     f.write(str(mean_cor)+'-'+sub_dir+'/'+video_name+'\n')
        
      # else:
      #   if(bo_real):
      #     FN += 1
      #     f = open('/Users/zhanglan/Desktop/real-wrong.txt', 'a')
      #     f.write(str(mean_cor)+'-'+sub_dir+'/'+video_name+'\n')
      #   else:
      #     TN += 1
      #     f = open('/Users/zhanglan/Desktop/attack.txt', 'a')
      #     f.write(str(mean_cor)+'\n')
        
      #   # f.write(str(mean_cor)+'\n')
    # print("mean_cor", mean_cor)
  with open(folder+'-3d-real.json', 'w') as f:
    json.dump(data_real, f)
    print(folder+'-Done')      
  with open(folder+'-3d-attack.json', 'w') as f:
    json.dump(data_attack, f)
    print(folder+'-Done')
  # 窗口显示
# 参数取 0 可以拖动缩放窗口，为 1 不可以
# cv2.namedWindow("image", 0)
# cv2.imshow("image", img_rd)
# APCER = FP/(FP+TN)
# BPCER = FN/(TP+FN)
# ACER = (APCER+BPCER)/2
# print("检测准确率：", (TP+TN)/(TP+TN+FP+FN))
# print("检测精确率：", TP/(TP+FP))
# print("APCER", APCER)
# print("BPCER", BPCER)
# print("ACER", ACER)

