import sys
import cv2
import os
import numpy as np
import dlib
import io
import face_recognition
import shutil


# 对比视频帧的照片和身份证截取的头像   该方法也可以做简单的图片人脸对比
# distance 对比矩阵的偏移 我理解就是距离 越小越相似 越大越不相似
# one_pic  第一张图片
# two_pic  第二张图片

def compare(distance, one_pic, two_pic):
    first_image = face_recognition.load_image_file(one_pic)
    second_image = face_recognition.load_image_file(two_pic)
    face_one_num = face_recognition.face_locations(first_image, number_of_times_to_upsample=1, model='hog')
    face_two_num = face_recognition.face_locations(second_image, number_of_times_to_upsample=1, model='hog')
    if len(face_one_num) == 0:
        print(110)
        print("can't find face in video")
        exit()
    if len(face_two_num) == 0:
        print(110)
        print("can't find face in id card photo")
        exit()
    first_encoding = face_recognition.face_encodings(first_image)[0]
    second_encoding = face_recognition.face_encodings(second_image)[0]
    same = face_recognition.face_distance([first_encoding], second_encoding)
    results = face_recognition.compare_faces([first_encoding], second_encoding, tolerance=distance)
    print(100)
    print(results)
    print(np.round((1 - same) * 100, 2))


# 视频截取视频中的人脸
# video_url 视频
# catch_pic_num 截取的视频人脸数量
# path_name 存放视频人脸图片的路径
def CatchPICFromVideo(video_url, catch_pic_num, path_name):
    # 视频来源
    cap = cv2.VideoCapture(video_url)
    xml_path ="xml路径"
        # "E:\my\project\jfkj\haarcascade_frontalface_alt.xml"

    # 告诉OpenCV使用人脸识别分类器
    classfier = cv2.CascadeClassifier(xml_path)

    # 识别出人脸后要画的边框的颜色，RGB格式, color是一个不可增删的数组
    color = (0, 255, 0)

    num = 0
    while cap.isOpened():
        ok, frame = cap.read()  # 读取一帧数据
        if not ok:
            break
        width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)  # 返回视频的宽
        height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)  # 返回视频的高
        if width>height:
           frame=rotate_bound(frame,270)
        grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)  # 将当前桢图像转换成灰度图像
        # 人脸检测，1.2和2分别为图片缩放比例和需要检测的有效点数
        faceRects = classfier.detectMultiScale(grey, scaleFactor=1.2, minNeighbors=10, minSize=(40, 40))
        if len(faceRects) > 0:  # 大于0则检测到人脸
            for faceRect in faceRects:  # 单独框出每一张人脸
                x, y, w, h = faceRect

                # 将当前帧保存为图片
                img_name = "%s/%d.jpg" % (path_name, num)
                # print(img_name)
                image = frame[y - 10: y + h + 10, x - 10: x + w + 10]
                cv2.imwrite(img_name, image, [int(cv2.IMWRITE_PNG_COMPRESSION), 9])

                num += 1
                if num > (catch_pic_num):  # 如果超过指定最大保存数量退出循环
                    break

                # 画出矩形框
                cv2.rectangle(frame, (x - 10, y - 10), (x + w + 10, y + h + 10), color, 2)

                # 显示当前捕捉到了多少人脸图片了，这样站在那里被拍摄时心里有个数，不用两眼一抹黑傻等着
                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(frame, 'num:%d/100' % (num), (x + 30, y + 30), font, 1, (255, 0, 255), 4)
        else:
            cv2.imwrite(path_name+'/0.jpg', frame, [int(cv2.IMWRITE_PNG_COMPRESSION), 9])
                # 超过指定最大保存数量结束程序
        if num > (catch_pic_num): break

        # 显示图像
        # cv2.imshow('001', frame)
        # c = cv2.waitKey(10)
        # if c & 0xFF == ord('q'):
        #     break

    # 释放摄像头并销毁所有窗口
    # cap.release()
    # cv2.destroyAllWindows()


# 截取身份证头像
# img_in 身份证正面初始图片
# img_out 截取出来的身份证头像图片
def tranposeDector(img_in, img_out, cnt=0):
    # 初始化正脸检测器
    global dets
    dector = dlib.get_frontal_face_detector()
    img = cv2.imread(img_in)
    # 检测图上的人脸数
    try:
        dets = dector(img, 1)
    except Exception as ex:  # 脸部无法检测
        print(110)
        print("can't find face in id card photo")
        exit()
    # 身份证上只能有一个人脸，即为检查结果的第一个值
    if dets:
        face = dets[0]  # [(354, 96) (444, 186)] 检测出左上、右下两个点
        # 计算想裁取的图片的高度 下-上
        height = face.bottom() - face.top() + 60
        # 计算想裁取的图片的宽度 右-左
        width = face.right() - face.left() + 40
        # 以计算出的图片大小生成空白板
        img_blank = np.zeros((height, width, 3), np.uint8)
        # 将图片写入空白板
        for i in range(height):
            for j in range(width):  # top线上方40像素位置开始读, left线左15像素位置开始读
                img_blank[i][j] = img[face.top() - 40 + i][face.left() - 15 + j]
        try:
            cv2.imwrite(img_out, img_blank)  # 保存写入数据后的空白板图片
        except Exception as ex:
            print(110)
            print("can't find face in id card photo")
            exit()
        # cv2.destroyAllWindows()  # 释放所有窗口资源
    else:
        cnt += 1
        if cnt > 3:
            print(110)
            print("can't find face in id card photo")
            exit()
        print("transpose times:", cnt)
        transposeImage = cv2.transpose(img)  # 图像反向旋转90度
        flipedImageX = cv2.flip(transposeImage, 0)  # 沿X轴方向的镜像图片
        # cv2.imshow("flipedImageX",flipedImageX)
        # cv2.waitKey(1000)
        tranposeDector(flipedImageX, cnt)

#旋转图片 image 图片对象 angle 旋转的角度 顺时针正值
def rotate_bound(image, angle):
    # grab the dimensions of the image and then determine the
    # center
    (h, w) = image.shape[:2]
    (cX, cY) = (w // 2, h // 2)

    # grab the rotation matrix (applying the negative of the
    # angle to rotate clockwise), then grab the sine and cosine
    # (i.e., the rotation components of the matrix)
    M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
    cos = np.abs(M[0, 0])
    sin = np.abs(M[0, 1])

    # compute the new bounding dimensions of the image
    nW = int((h * sin) + (w * cos))
    nH = int((h * cos) + (w * sin))

    # adjust the rotation matrix to take into account translation
    M[0, 2] += (nW / 2) - cX
    M[1, 2] += (nH / 2) - cY

    # perform the actual rotation and return the image
    return cv2.warpAffine(image, M, (nW, nH))

#主入口 这里是用来php调用使用的 所以有参数传入 如果不需要调用 或者其他语言调用需要自行处理 php调用使用exec() php前置逻辑需要自己处理
if __name__ == '__main__':
    video_url = sys.argv[1]
        # "wff.mp4"
    video_pic_path =sys.argv[2]
        # "pic/"
    card_pic_path_in = sys.argv[3]
        # "head.jpg"
    card_pic_path_out = sys.argv[4]
        # "head/1.jpg"

    distance = 1 - (int(sys.argv[5]) / 100)
    CatchPICFromVideo(video_url, 2, video_pic_path)
    tranposeDector(card_pic_path_in, card_pic_path_out)
    compare(distance, video_pic_path + '/0.jpg', card_pic_path_out)
