import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy import misc
from os.path import join as pjoin
import sys
import copy
import cv2
import detect_face
import facenet
import nn4 as network
import random


import sklearn

from sklearn.externals import joblib

#restore mtcnn model

print('Creating networks and loading parameters')
gpu_memory_fraction=0.25
with tf.Graph().as_default():
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
    sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
    with sess.as_default():
        pnet, rnet, onet = detect_face.create_mtcnn(sess, './models/mtcnn')

minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ]  # three steps's threshold
factor = 0.709 # scale factor
margin = 32
image_size = 160
model = "./models/facenet/20170512-110547"

img = cv2.imread("/home/dl/test/face/10.jpg")

if img.ndim == 2:
    img = facenet.to_rgb(img)
img = img[:, :, 0:3]

bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)

#draw rectangle in the image
"""
for face_position in bounding_boxes:
    face_position = face_position.astype(int)

    # print((int(face_position[0]), int( face_position[1])))
    # word_position.append((int(face_position[0]), int( face_position[1])))

    cv2.rectangle(img, (face_position[0],
                          face_position[1]),
                  (face_position[2], face_position[3]),
                  (0, 255, 0), 2)

    cv2.imwrite("/home/dl/test/face/5_f.jpg", img)


"""
def get_aligned_img(img, bounding_boxes):
    nrof_faces = bounding_boxes.shape[0]
    if nrof_faces > 0:
        det = bounding_boxes[:, 0:4]
        img_size = np.asarray(img.shape)[0:2]
        if nrof_faces > 1:
            bounding_box_size = (det[:, 2] - det[:, 0]) * (det[:, 3] - det[:, 1])
            img_center = img_size / 2
            offsets = np.vstack(
                [(det[:, 0] + det[:, 2]) / 2 - img_center[1], (det[:, 1] + det[:, 3]) / 2 - img_center[0]])
            offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
            index = np.argmax(bounding_box_size - offset_dist_squared * 2.0)  # some extra weight on the centering
            det = det[index, :]
        det = np.squeeze(det)
        bb = np.zeros(4, dtype=np.int32)
        bb[0] = np.maximum(det[0] - margin / 2, 0)
        bb[1] = np.maximum(det[1] - margin / 2, 0)
        bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
        bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
        cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
        scaled = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
        return scaled

aligned_img = get_aligned_img(img, bounding_boxes)
#cv2.imwrite("/home/dl/test/face/5_f.jpg", aligned_img)

def cal_embedding(image):
    with tf.Graph().as_default():
        with tf.Session() as sess:
            facenet.load_model(model)
            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")

            # image_size = images_placeholder.get_shape()[1]  # For some reason this doesn't work for frozen graphs
            # get the embeddings
            data = image.reshape(-1,image_size,image_size,3)
            feed_dict = { images_placeholder:np.array(data), phase_train_placeholder:False }
            emb_data = sess.run([embeddings], feed_dict=feed_dict)[0]
            return emb_data

def compute_euclidean_distance(x, y):
    """
    Computes the euclidean distance between two tensorflow variables
    """

    with tf.name_scope('euclidean_distance') as scope:
        #d = tf.square(tf.sub(x, y))
        #d = tf.sqrt(tf.reduce_sum(d)) # What about the axis ???
        d = np.sum(np.square(np.subtract(x, y)))
        return d

def prewhiten(x):
    mean = np.mean(x)
    std = np.std(x)
    std_adj = np.maximum(std, 1.0 / np.sqrt(x.size))
    y = np.multiply(np.subtract(x, mean), 1 / std_adj)
    return y

if __name__ == '__main__':
    with tf.Graph().as_default():
        with tf.Session() as sess:
            facenet.load_model(model)
            imagepathes = ["/home/dl/test/face/3_f.jpg","/home/dl/test/face/4_f.jpg","/home/dl/test/face/5_f.jpg"]
            images = facenet.load_data(imagepathes, False, False, image_size)
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
            feed_dict = {images_placeholder: images, phase_train_placeholder: False}
            emb_array = sess.run(embeddings, feed_dict=feed_dict)


    img1 = misc.imread("/home/dl/test/face/3.jpg")
    bounding_boxes, _ = detect_face.detect_face(img1, minsize, pnet, rnet, onet, threshold, factor)
    aligned = get_aligned_img(img1, bounding_boxes)
    aligned = prewhiten(aligned)
    emb_data1 = cal_embedding(aligned)

    img2 = cv2.imread("/home/dl/test/face/4.jpg")
    bounding_boxes, _ = detect_face.detect_face(img2, minsize, pnet, rnet, onet, threshold, factor)
    aligned = get_aligned_img(img2, bounding_boxes)
    aligned = prewhiten(aligned)
    emb_data2 = cal_embedding(aligned)

    img3 = cv2.imread("/home/dl/test/face/5.jpg")
    bounding_boxes, _ = detect_face.detect_face(img3, minsize, pnet, rnet, onet, threshold, factor)
    aligned = get_aligned_img(img3, bounding_boxes)
    aligned = prewhiten(aligned)
    emb_data3 = cal_embedding(aligned)
    sess = tf.Session()
    print "3 to 4:  {}".format(sess.run(compute_euclidean_distance(emb_data1, emb_data2)))
    print "3 to 5:  {}".format(sess.run(compute_euclidean_distance(emb_data1, emb_data3)))
    print "4 to 5:  {}".format(sess.run(compute_euclidean_distance(emb_data2, emb_data3)))