from PIL import Image
from facenet import Facenet
import numpy as np
import cv2
import detect_face
import tensorflow as tf
import sys
import os

imgSize1 = [112, 96]
imgSize2 = [112, 112]
coord5point1 = [[30.2946, 51.6963],  # 112x96的目标点
                [65.5318, 51.6963],
                [48.0252, 71.7366],
                [33.5493, 92.3655],
                [62.7299, 92.3655]]
coord5point2 = [[30.2946 + 8.0000, 51.6963],  # 112x112的目标点
                [65.5318 + 8.0000, 51.6963],
                [48.0252 + 8.0000, 71.7366],
                [33.5493 + 8.0000, 92.3655],
                [62.7299 + 8.0000, 92.3655]]


def transformation_from_points(points1, points2):
    points1 = points1.astype(np.float64)
    points2 = points2.astype(np.float64)
    c1 = np.mean(points1, axis=0)
    c2 = np.mean(points2, axis=0)
    points1 -= c1
    points2 -= c2
    s1 = np.std(points1)
    s2 = np.std(points2)
    points1 /= s1
    points2 /= s2
    U, S, Vt = np.linalg.svd(points1.T * points2)
    R = (U * Vt).T
    return np.vstack([np.hstack(((s2 / s1) * R, c2.T - (s2 / s1) * R * c1.T)), np.matrix([0., 0., 1.])])


def warp_im(img_im, orgi_landmarks, tar_landmarks):
    pts1 =np.float64(np.matrix([[point[0], point[1]] for point in orgi_landmarks]))
    pts2 =np.float64(np.matrix([[point[0], point[1]] for point in tar_landmarks]))
    M = transformation_from_points(pts1, pts2)
    dst = cv2.warpAffine(img_im, M[:2], (img_im.shape[1], img_im.shape[0]))
    return dst


def rect(path):
    # 对一个路径下的所有图片进行两种方式对齐，并保存
    # 加载mtcnn参数
    with tf.Graph().as_default():
        sess = tf.compat.v1.Session()
        pnet, rnet, onet = detect_face.create_mtcnn(sess, None)
    minsize = 50  # minimum size of face
    threshold = [0.6, 0.7, 0.7]  # three steps's threshold
    factor = 0.709  # scale factor

    # Size Parameter
    lower_threshold = 100
    upper_threshold = 200
    img_im = cv2.imread(path)
    if img_im.shape[0] < 256:
        img_im = cv2.resize(img_im, (256, 256))
        # 关键点检测

    shape = img_im.shape
    height = shape[0]
    width = shape[1]

    bounding_boxes, points = detect_face.detect_face(img_im, minsize, pnet, rnet, onet, threshold, factor)
            # 处理该张图片中的每个框
    if bounding_boxes.shape[0] > 0:
        for i in range(bounding_boxes.shape[0]):  # 根据行号得到每张图片有多少个回归框
            x1, y1, x2, y2 = int(min(bounding_boxes[i][0], min(points[:, i][:5]))), \
                                    int(min(bounding_boxes[i][1], min(points[:, i][5:]))), \
                                    int(max(bounding_boxes[i][2], max(points[:, i][:5]))), \
                                    int(max(bounding_boxes[i][3], max(points[:, i][5:])))
                    # 外扩大100%，防止对齐后人脸出现黑边
            new_x1 = max(int(1.50 * x1 - 0.50 * x2), 0)
            new_x2 = min(int(1.50 * x2 - 0.50 * x1), width - 1)
            new_y1 = max(int(1.50 * y1 - 0.50 * y2), 0)
            new_y2 = min(int(1.50 * y2 - 0.50 * y1), height - 1)
                    # new_x1 = max(int(1.30 * x1 - 0.30 * x2),0)
                    # new_x2 = min(int(1.30 * x2 - 0.30 * x1),width-1)
                    # new_y1 = max(int(1.30 * y1 - 0.30 * y2),0)
                    # new_y2 = min(int(1.30 * y2 - 0.30 * y1),height-1)

                    # 得到原始图中关键点坐标
            left_eye_x = points[:, i][:5][0]
            right_eye_x = points[:, i][:5][1]
            nose_x = points[:, i][:5][2]
            left_mouth_x = points[:, i][:5][3]
            right_mouth_x = points[:, i][:5][4]
            left_eye_y = points[:, i][5:][0]
            right_eye_y = points[:, i][5:][1]
            nose_y = points[:, i][5:][2]
            left_mouth_y = points[:, i][5:][3]
            right_mouth_y = points[:, i][5:][4]

                    # 得到外扩100%后图中关键点坐标
            new_left_eye_x = left_eye_x - new_x1
            new_right_eye_x = right_eye_x - new_x1
            new_nose_x = nose_x - new_x1
            new_left_mouth_x = left_mouth_x - new_x1
            new_right_mouth_x = right_mouth_x - new_x1
            new_left_eye_y = left_eye_y - new_y1
            new_right_eye_y = right_eye_y - new_y1
            new_nose_y = nose_y - new_y1
            new_left_mouth_y = left_mouth_y - new_y1
            new_right_mouth_y = right_mouth_y - new_y1

            face_landmarks = [[new_left_eye_x, new_left_eye_y],  # 在扩大100%人脸图中关键点坐标
                                [new_right_eye_x, new_right_eye_y],
                                [new_nose_x, new_nose_y],
                                [new_left_mouth_x, new_left_mouth_y],
                                [new_right_mouth_x, new_right_mouth_y]]
            face = img_im[new_y1: new_y2, new_x1: new_x2]  # 扩大100%的人脸区域
            dst1 = warp_im(face, face_landmarks, coord5point1)  # 112x96对齐后尺寸
            dst2 = warp_im(face, face_landmarks, coord5point2)  # 112x112对齐后尺寸
            crop_im1 = dst1[0:imgSize1[0], 0:imgSize1[1]]
            crop_im2 = dst2[0:imgSize2[0], 0:imgSize2[1]]
            cv2.imwrite(path,crop_im2)
            #cv2.imwrite('norm' + every_pic_name[:-4] + '_' + str(num) + '_align_112x112.jpg', crop_im2)

def blur_demo(image):
    dst = cv2.blur(image, (3, 3))
    return dst

def median_blur_demo(image):  # 中值模糊  对椒盐噪声有很好的去燥效果
    dst = cv2.medianBlur(image, 5)
    return dst

def predict(file1,file2):
   #img1 = cv2.imread(file1)
    img1=Image.open(file1)
    #img1 = cv2.resize(img1, (112, 112))
    #img2 = cv2.imread(file2)
    img2=Image.open(file2)
    #img2 = cv2.resize(img2,(112, 112))
    model = Facenet()
    """
        以下是同学进行判断的代码
        此处省略直接返回0.2
    """
    probability = model.detect_image(img1, img2)
    return probability

def main(to_pred_dir,result_save_path):
    subdirs = os.listdir(to_pred_dir) # name

    labels = []
    for subdir in subdirs:
        imga=cv2.imread(os.path.join(to_pred_dir,subdir,"a.jpg"))
        imgb=cv2.imread(os.path.join(to_pred_dir,subdir,"b.jpg"))
        if imga.shape[0] < 256:
            imga = cv2.resize(imga, (125, 125))
        imgt = blur_demo(imga)
        imgf = median_blur_demo(imgt)
        if imgf.shape[0] < 256:
            imgf = cv2.resize(imgf, (256, 256))
        if imgb.shape[0] < 256:
            imgb = cv2.resize(imgb, (256, 256))
        cv2.imwrite(os.path.join(to_pred_dir,subdir,"a.jpg"), imgf)
        rect(os.path.join(to_pred_dir,subdir,"a.jpg"))
        rect(os.path.join(to_pred_dir,subdir,"b.jpg"))
        result = predict(os.path.join(to_pred_dir,subdir,"a.jpg"),os.path.join(to_pred_dir,subdir,"b.jpg"))
        print(result[0])
        labels.append(result[0])
    fw = open(result_save_path,"w")
    fw.write("id,label\n")
    for subdir,label in zip(subdirs,labels):
        fw.write("{},{}\n".format(subdir,label))
    fw.close()
if __name__ == "__main__":
    to_pred_dir = sys.argv[1]
    result_save_path = sys.argv[2]
    main(to_pred_dir, result_save_path)