import dlib  # 人脸检测的库 Dlib
import numpy as np   # 数据处理的库 Numpy
import cv2  # 图像处理的库 OpenCv
import time
import math

from s2r import extract_pulse
# from timer import *

# 画图相关工具
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt


# timer = timer()
# timer.start()

# 默认参数
fftlength = 178  # fft的数据长度值越大，处理速度越慢，精度越高
fs = 30  # 摄像机采样率

# 获取人脸各部分区域


def coor_to_face(x_list, y_list, frame):
    x_len = len(x_list)
    y_len = len(y_list)
    local_face_list = []
    for i in range(x_len - 1):
        if i < x_len - 2:
            for j in range(y_len - 1):
                local_face = frame[x_list[i]:x_list[i + 1],
                                   y_list[j]:y_list[j + 1]]  # ?
                local_face_list.append(local_face)

        # 对最后一行单独处理
        else:
            for j in range(1, y_len - 2):
                local_face = frame[x_list[i]:x_list[i + 1],
                                   y_list[j]:y_list[j + 1]]
                local_face_list.append(local_face)
    return local_face_list

# 分割人脸


def get_local_face(frame, key):
    '''
    将人脸分成多个局部，鼻子以下，两个部位，或者多个部位  /Divide the face into multiple parts,
    :param frame:
    :param key:
    :return:
    '''

    num_pixels = frame.shape[0] * frame.shape[1]

    frame_local = []
    x_list = []
    y_list = []
    # 坐标 纵中线 top bottom mid
    x1 = key[28][1]
    x2 = key[9][1]
    x3 = key[34][1]
    x_mid_up = int((x1 + x3) / 2)
    x_mid_down = int((x3 + x2) / 2)
    x_list.append(x1)
    # x_list.append(x_mid_up)
    x_list.append(x3)
    # x_list.append(x_mid_down)
    x_list.append(x2)

    # 水平中线  left mid right
    y1 = key[3][0]
    y2 = key[30][0]
    y3 = key[13][0]
    y_mid_left = int((y1 + y2) / 2)
    y_mid_right = int((y2 + y3) / 2)
    y_list.append(y1)
    y_list.append(y_mid_left)
    y_list.append(y2)
    y_list.append(y_mid_right)
    y_list.append(y3)

    local_face_List = coor_to_face(x_list, y_list, frame)


    # frame_local = local_face_List

    # 左边背景
    distance = y_mid_left - y1
    y0 = y1 - distance * 3
    y4 = y3 + distance * 3

    # 拉远距离，避免人脸的出现
    y1 = y1 - distance * 2
    y3 = y3 + distance * 2

    if y0 < 0:
        background_left = frame[x1:x3, 0:y1]
    else:
        background_left = frame[x1:x3, y0:y1]
    frame_local.append(background_left)

    # 右边背景
    if y4 > 640:
        background_right = frame[x1:x3, y3:640]
    else:
        background_right = frame[x1:x3, y3:y4]

    frame_local.append(background_right)

    # 绘图
    # cv2.imshow("picture", frame)
    # cv2.imshow("left_face", frame_local[0])
    # cv2.imshow("left_nose", frame_local[1])
    # cv2.imshow("right_nose", frame_local[2])
    # cv2.imshow("right_face", frame_local[3])
    # cv2.imshow("left_mouth", frame_local[4])
    # cv2.imshow("right_mouth", frame_local[5])
    # if list(frame_local[0]) != []:
    #     try:
    #         cv2.imshow("background_left", frame_local[0])
    #         cv2.imshow("background_right", frame_local[1])
    #         cv2.imshow("face", frame_local[-1])
    #     except Exception as e:
    #         print("debug")

    return frame_local, num_pixels


# 转换颜色空间
def rgb2ycbcr(im):
    im = np.array(im, dtype=int)
    xform = np.array([[.2568, .5041, .0979], [-.1482, -.291, .4392], [.4392, -.3678, -.0714]])
    ycbcr = im.dot(xform.T)
    ycbcr[:,:,[1,2]] += 128
    ycbcr[:,:,0] += 16
    return np.uint8(ycbcr)

def generate_skinmap(img):
    
    height, width = img.shape[:-1] 
    output = np.zeros([height, width])
    img_ycbcr = rgb2ycbcr(img) #转换颜色空间
    cb = img_ycbcr[:,:,1]
    cr = img_ycbcr[:,:,2]
    r,c = np.where((cb>=98) & (cb<=142) & (cr>=133) & (cr<=177))
    for i in range(len(r)):
        output[r[i], c[i]] = 1

    return output


# 获取rgb值

def calc_ppg(num_pixels, frame,number_faces):
    '''
    # 求不同颜色分量的均值。并保存，返回   /Find the average of different color components and return
    :param num_pixels:  读取的图片的像素点，而不是感兴趣的区域的像素的。
    :param frame:  人脸部位
    :return:
    '''

    if num_pixels == 0:
        ppg = [0, 0, 0]
    else:
      # 自加start
        rows, cols, chn = frame.shape
        r, g, b = frame[:, :, 0], frame[:, :, 1], frame[:, :, 2]
        print('r,g,b',len(r),len(g),len(b))
        skin_mask = generate_skinmap(frame)  # 转换颜色空间
        r_masked = r[skin_mask == 1]
        g_masked = g[skin_mask == 1]
        b_masked = b[skin_mask == 1]
        print('r_masked',len(r_masked),len(g_masked),len(b_masked))
        values = np.array([r_masked, g_masked, b_masked])  # 单张图片颜色3通道特征数组
        print('values',values)
        # i是第i张人脸的某部位
        # if number_faces == 0:
        #   trace = np.mean(values,axis=1) #求平均值
        # else:
        #   trace = np.vstack((trace, np.mean(values, axis=1)))
        # spatial RGB correlation 空间相关性
        
        print('len',values.shape)
        if(values.shape[1] != 0):
          print('values',values)
          C = np.matmul(values, values.T) / (rows*cols)
          D, V = np.linalg.eigh(C)
          diag_ele = D
          U_, S_, V_ = np.linalg.svd(C)
          sort_index = np.argsort(diag_ele)[::-1]
          sort_diag = sorted(diag_ele)[::-1]
          V = V[:, sort_index]
          print('V',V)
          print('sort_diag',sort_diag)
        else:
          V = [[0,0,0],[0,0,0],[0,0,0]]
          sort_diag = [0,0,0]

        
        
      # 自加end

        r_avg = np.sum(frame[:, :, 0]) / num_pixels
        g_avg = np.sum(frame[:, :, 1]) / num_pixels
        b_avg = np.sum(frame[:, :, 2]) / num_pixels
        if num_pixels == 0:
            print(num_pixels)
        ppg = [r_avg, g_avg, b_avg]
        # print('ppg',ppg)
        for i, col in enumerate(ppg):
            if math.isnan(col):
                ppg[i] = 0

    # print('ppg',np.array(ppg).shape) (3,)
    return ppg,V,sort_diag,number_faces
    # return ppg

# 检测人脸


def detectFace(frame,number_faces):
    # 人脸数
    faces = detector(frame, 0)

    print('faces', faces)
    print('faces', len(faces))
    # 待会要写的字体
    font = cv2.FONT_HERSHEY_SIMPLEX

    # 标 68 个点
    if len(faces) != 0:
        # 检测到人脸
        for i in range(len(faces)):
            # 取特征点坐标
            landmarks = np.matrix([[p.x, p.y]
                                   for p in predictor(frame, faces[i]).parts()])
            # img=frame[face.top():face.bottom(),face.left():face.right()]
            img_face = []
            img_face.append(frame[faces[i].top():faces[i].bottom(),faces[i].left():faces[i].right()])
                
            # 存取特征点
            points_keys = []
            for idx, point in enumerate(landmarks):
                # 68 点的坐标
                pos = (point[0, 0], point[0, 1])

                points_keys.append([point[0, 0], point[0, 1]])

                # 利用 cv2.circle 给每个特征点画一个圈，共 68 个
                # cv2.circle(img_rd, pos, 2, color=(139, 0, 0))
                # 利用 cv2.putText 写数字 1-68
                # cv2.putText(img_rd, str(idx + 1), pos, font, 0.2, (187, 255, 255), 1, cv2.LINE_AA)

        # 人脸分割
        frame_cropped, num_pixels = get_local_face(frame, points_keys)
        frame_cropped.append(img_face[0])
        # 绘图
        # cv2.imshow("picture", frame)
        # cv2.imshow("left_face", frame_local[0])
        # cv2.imshow("left_nose", frame_local[1])
        # cv2.imshow("right_nose", frame_local[2])
        # cv2.imshow("right_face", frame_local[3])
        # cv2.imshow("left_mouth", frame_local[4])
        # cv2.imshow("right_mouth", frame_local[5])
        if list(frame_cropped[0]) != []:
            try:
                # cv2.imshow("background_left", frame_cropped[0])
                print(number_faces)
                cv2.imwrite("/Users/zhanglan/Desktop/bgleft1/"+str(number_faces) +'.jpg',frame_cropped[0])
                # cv2.imshow("background_right", frame_cropped[1])
                cv2.imwrite("/Users/zhanglan/Desktop/bgright1/"+str(number_faces) +'.jpg',frame_cropped[1])
                # cv2.imshow("face", frame_cropped[-1])
                cv2.imwrite("/Users/zhanglan/Desktop/face1/"+str(number_faces) +'.jpg',frame_cropped[-1])
            except Exception as e:
                print("debug")
        return frame_cropped, num_pixels
    #   cv2.putText(img_rd, "faces: " + str(len(faces)),(20, 50), font, 1, (0, 0, 0), 1, cv2.LINE_AA)
    # else:
    #   # 没有检测到人脸
    #   cv2.putText(img_rd, "no face", (20, 50), font,1, (0, 0, 0), 1, cv2.LINE_AA)

# 傅立叶变换后的信号


def extract_pulse_local(rppg, fs):
    '''
    输入所有的face的rppg信号,初始采样时间戳，摄像头采样率，然后输出重采样之后的rppg以及获取的pulse信号
    :param rppg:rppg data from rPPG_Extracter.py
    : fs sampling rate
    :return:
    '''

    # 初始化
    pulse = []
    # 操作
    rPPG_len = len(rppg)
    for i in range(rPPG_len):
        rppg_one = rppg[i]
        pulse_one = extract_pulse(rppg_one, fftlength, fs)
        print('pulse_one', (np.isnan(pulse_one) == True).any())
        if((np.isnan(pulse_one) == True).any()):
          continue
        else:
          pulse.append(pulse_one)
        # if pulse[0][0] != 0:
        #   end = datetime.datetime.now()
        #   time_sub = end - begin
        #   print("time", time_sub.total_seconds())

    return pulse

# 提取rppg信号


def rppg_extracter(frames, num_pixels,number_faces):
    # 保存刷新时间，然后求帧率显示而已
    # dt = time.time() - time_start[0]
    # fps = 1 / (dt)
    # time_start[0] = time.time()

    # left_face = frames[0]
    # right_face = frames[3]

    # rPPG = []
    #frames 人脸分割部位数组
    roi_num = len(frames)
    for i in range(roi_num):
        roi_data = frames[i]
        # 三维数组，RGB，每一个是一个一维数组，组合起来是二维数据，多个部位，多个二维数组组合起来是三维数组
        a,V, sort_diag,number_faces=  calc_ppg(num_pixels, roi_data,number_faces)
        rPPG[i].append(a)  # rppg[i] 三维数组
        # s2r[i].append(b)
        if(len(V)!=0 and len(sort_diag)!=0):
          U[number_faces].append(V)
          sigmas[number_faces].append(sort_diag)
          # print('U[i]',U[i])
          # print('sigmas[i]',sigmas[i])
          if number_faces > 0:
            print('U[number_faces][i]',U[number_faces][i])
            rot = [np.matmul(U[number_faces][i][:,0].T, U[number_faces-1][i][:,1]), np.matmul(U[number_faces][i][:,0], U[number_faces-1][i][:,2])]
            print(1,sigmas[number_faces][i][0]/sigmas[number_faces][i-1][1])
            print(2,sigmas[number_faces][i][0]/sigmas[number_faces][i-1][2])

            scale = [math.sqrt(sigmas[number_faces][i][0]/sigmas[number_faces-1][i][1]), math.sqrt(sigmas[number_faces][i][0]/sigmas[number_faces-1][i][2])]
            sr = np.array(scale) * np.array(rot)
            sr_bp = np.matmul(sr, [U[number_faces-1][i][:,1], U[number_faces-1][i][:,2]])
            signal_sr_b[number_faces].append(sr_bp)
            # scale1 = sigmas[i][number_faces][0]/sigmas[i][number_faces-1][1]
            # scale2 = sigmas[i][number_faces][0]/sigmas[i][number_faces-1][2]
            # if(scale1 > 0):
            #   scale1 = math.sqrt(sigmas[i][number_faces][0]/sigmas[i][number_faces-1][1])
            # else:
            #   scale1 = 0 - math.sqrt(abs(sigmas[i][number_faces][0]/sigmas[i][number_faces-1][1]))
            
            # if(scale2 > 0):
            #   scale2 = math.sqrt(sigmas[i][number_faces][0]/sigmas[i][number_faces-1][2])
            # else:
            #   scale2 = 0 - math.sqrt(abs(sigmas[i][number_faces][0]/sigmas[i][number_faces-1][2]))
            # scale = [scale1,scale2]
            # sr = np.array(scale) * np.array(rot)
            # sr_bp = np.matmul(sr, [U[i][number_faces-1][:,1], U[i][number_faces-1][:,2]])
            print('sr_bp',sr_bp)
            
          

    return rPPG
    # rPPG.append(calc_ppg(num_pixels, roi_data))
    # 提取rppg信号分量
    # print('rPPG_sample',rPPG[0])
    # rPPG_sample_left = rPPG[0]
    # rPPG_sample_right = rPPG[3]
    # rPPG_sample_bgleft = rPPG[-1]
    # rPPG_sample_bgright = rPPG[-2]


def buffer(x, n, p=0, opt=None):
    '''Mimic MATLAB routine to generate buffer array

    MATLAB docs here: https://se.mathworks.com/help/signal/ref/buffer.html

    Args
    ----
    x:   signal array
    n:   number of data segments
    p:   number of values to overlap
    opt: initial condition options. default sets the first `p` values
         to zero, while 'nodelay' begins filling the buffer immediately.
    '''
    if p >= n:
        raise ValueError('p ({}) must be less than n ({}).'.format(p,n))

    # Calculate number of columns of buffer array
    cols = int(np.ceil(len(x)/float(n-p)))
    # Check for opt parameters
    if opt == 'nodelay':
        # Need extra column to handle additional values left
        cols += 1
    elif opt != None:
        raise SystemError('Only `None` (default initial condition) and '
                          '`nodelay` (skip initial condition) have been '
                          'implemented')
    # Create empty buffer array
    b = np.zeros((n, cols))

    # Fill buffer by column handling for initial condition and overlap
    j = 0
    for i in range(cols):
        # Set first column to n values from x, move to next iteration
        if i == 0 and opt == 'nodelay':
            b[0:n,i] = x[0:n]
            continue
        # set first values of row to last p values
        elif i != 0 and p != 0:
            b[:p, i] = b[-p:, i-1]
        # If initial condition, set p elements in buffer array to zero
        else:
            b[:p, i] = 0
        # Get stop index positions for x
        k = j + n - p
        # Get stop index position for b, matching number sliced from x
        n_end = p+len(x[j:k])
        # Assign values to buffer array from x
        b[p:n_end,i] = x[j:k]
        # Update start index location for next iteration of x
        j = k
    return b



def cross_enhace(data_list):
    '''
    输入二维数组，输出二维数组，求每一行与其他行的互相关增强
    :param data_list:
    :return:
    '''
    data_len = len(data_list)
    data_enhance = []
    # 相关增强
    for i in range(data_len):
        j = i + 1
        while j < data_len:
            data_enhance.append(data_list[i] * data_list[j])
            j += 1
    return data_enhance


def data_cross_data(data_list_one, data_list_two):
  '''
  输入两个二维数组求互相关，增强  /mutual correlation
  :param data_list_one:
  :param data_list_two:
  :return:
  '''
  len_one = len(data_list_one)
  len_two = len(data_list_two)
  data_cross = []
  for i in range(len_one):
    for j in range(len_two):
      data = np.array([data_list_one[i], data_list_two[j]])
      data_corff = np.corrcoef(data)

      # 只保留相关系数
      data_cross.append(data_corff[0][1])

  return data_cross


# 相关性
def pulse_process(pulse_list):
  '''
  对获取的pluse信号做处理,判断是否是欺骗  /Process the acquired pluse signal to determine whether it is spoofing attacks
  :param pulse_list:
  :return:
  '''
  '''
  :param pulse_list:
  :return:
  '''
  pulse_len = len(pulse_list)
  face_pulse = pulse_list[0:pulse_len - 2]
  ground_pulse = pulse_list[pulse_len - 2:pulse_len]

  # 相关增强
  face_pulse_enhance = cross_enhace(face_pulse)
  ground_pulse_enhance = cross_enhace(ground_pulse)

  # 相减
  face_sub_face = pulse_list[0] - pulse_list[1]
  face_sub_ground = pulse_list[0] - pulse_list[2]
  face_distance_face = face_sub_face ** 2
  face_distance_ground = face_sub_ground ** 2

  # 相关
  face_cor_face = data_cross_data([pulse_list[0]], [pulse_list[1]])
  ground_cor_ground = data_cross_data([pulse_list[-1]], [pulse_list[-2]])
  face_cor_ground = data_cross_data(face_pulse_enhance, ground_pulse_enhance)
  print("face_cor_ground", face_cor_ground)
  for _ in range(3):
      face_cor_ground.remove(max(face_cor_ground))
  mean_cor = np.mean(np.array(face_cor_ground))
  # if mean_cor < 0.50:
  #     print("true")
  # else:
  #     print("false")
  print("mean_cor", np.mean(np.array(face_cor_ground)))
  print("face_cor_face", face_cor_face)
  print("ground_cor_ground", ground_cor_ground)

  return face_pulse_enhance, ground_pulse_enhance, mean_cor


# Dlib 检测器和预测器
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(
    '/Users/zhanglan/Desktop/rely/shape_predictor_68_face_landmarks.dat')


# 检测图片
# img_rd = cv2.imread("/Users/zhanglan/Desktop/1.jpg")
# img_gray = cv2.cvtColor(img_rd, cv2.COLOR_RGB2GRAY)
# detectFace(img_gray)

# 检测视频
camera = cv2.VideoCapture("/Users/zhanglan/Desktop/8-3.avi")
# camera = cv2.VideoCapture("/Users/zhanglan/Desktop/相似性结果/test/4/6.avi")
num_frames = 0
number_faces = 0
# rPPG = [[], [], [], [], [], [], [], [], [], [], [], [],
#         [], [], [], [], [], [], [], [], [], [], [], []]  # 24
rPPG = [[],[],[]]
pulse = [[], [], [], [], [], [], [], [], [], [], [], [],
        [], [], [], [], [], [], [], [], [], [], [], []]  # 24
U = [[], [], []]  # 24
sigmas = [[], [], []]  # 24
signal_sr_b = [[], [], []]  # 24
windowsize = 3 # secs
overlap = 2 # secs
cutoff = 2 # hz
# num_face = 0
# 2sr自加
# signal_sr_b = []
# U, sigmas, pulse = [], [], []

# 2sr自加
if not camera.isOpened():
    print("cannot open camear")
    exit(0)
while True:
    ret, frame = camera.read()
    if not ret:
        break
    
    # cv2.imshow("image", frame)
    rows, cols = frame.shape[:2]
    # cv2.imshow("image", frame)
    frame = cv2.resize(frame, None, fx=0.5, fy=0.5,
                       interpolation=cv2.INTER_CUBIC)
    # 人脸检测 与 人脸分割
    if(detectFace(frame,number_faces)):
      roi, num_pixels = detectFace(frame,number_faces)
      # rppg信号提取
      # rppg_extracter(roi, num_pixels,number_faces)
      # for i in range(len(signal_sr_b)):
      #   item = np.array(signal_sr_b[i])
      #   # print(item.shape)
      #   if(item.shape[0] != 0):
      #     # print(item[:,1].shape)
      #     sigma = np.std(item[:,0]) / np.std(item[:,1]) #计算标准差
      #     # print('signal_sr_b',signal_sr_b)
      #     blocks_one = buffer(item[:,0], windowsize*fs, windowsize*fs-1).T
      #     blocks_two = buffer(item[:,1], windowsize*fs, windowsize*fs-1).T
      #     blocks_three = buffer(item[:,2], windowsize*fs, windowsize*fs-1).T

      #     frames, dim = blocks_one.shape
      #     for i in range(frames):
      #       sigma = np.std(blocks_one[i,:])/np.std(blocks_two[i,:])
      #       p_block = blocks_one[i,:] - sigma*blocks_two[i,:]
      #       pulse.append(p_block - np.mean(p_block))
      #     pulse[i] = np.array(pulse)
      number_faces += 1
      
    num_frames += 1
    

print('rppg',rPPG)
# print('pulse',pulse)
print('signal_sr_b',signal_sr_b)
# print('sigmas',sigmas)

# 自添add

# 自添end


# 傅立叶转换
# rPPG_sample = rPPG[0]  # 左脸rgb
# rPPG_sample = np.transpose(rPPG_sample)

# if rPPG_sample.shape[1] > 10:
#   rppg = []
#   rppg_len = len(rPPG)
#   for i in range(rppg_len):
#     rppg_one = rPPG[i]
#     if rppg_one != []:
#       rppg_one = np.transpose(rppg_one)
#       rppg.append(rppg_one)
#   # rppg：roi的rgb列表 pulse：roi傅立叶变换后的信号
#   fftpulse = extract_pulse_local(rppg, fs)
#   # print('fftpulse',fftpulse)
#   # 测试相关性
#   face_pulse, ground_pulse, mean_cor = pulse_process(fftpulse)
#   if mean_cor < 0.50:
#       print("true")
#   else:
#       print("false")
  

# 窗口显示
# 参数取 0 可以拖动缩放窗口，为 1 不可以
# cv2.namedWindow("image", 0)
# cv2.imshow("image", img_rd)

cv2.waitKey(0)
