#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import copy
import cv2
import numpy as np
from mtcnn.mtcnn import MTCNN
import os, sys
import pickle
from pathlib import Path

# dataset : http://www.cs.columbia.edu/CAVE/databases/pubfig/
# const
img_input_path = "../obm.jpg"   #"../test.jpg"
img_output_path = "../rst.jpg"
face_img_output_path = "../img/evaluate/face/"
face_feature_output_path = "../img/evaluate/feature/"

# data base
data_base_path = "../img/data/"
data_base_input = "../img/train/"

def crop_faces(image, result):

    original_img = copy.deepcopy(image)
    for rst in result:
        bounding_box = rst['box']
        keypoints = rst['keypoints']

        cv2.rectangle(image,
                      (bounding_box[0], bounding_box[1]),
                      (bounding_box[0]+bounding_box[2], bounding_box[1] + bounding_box[3]),
                      (0,155,255),
                      2)
        cv2.circle(image,(keypoints['left_eye']), 2, (0,155,255), 2)
        cv2.circle(image,(keypoints['right_eye']), 2, (0,155,255), 2)
        cv2.circle(image,(keypoints['nose']), 2, (0,155,255), 2)
        cv2.circle(image,(keypoints['mouth_left']), 2, (0,155,255), 2)
        cv2.circle(image,(keypoints['mouth_right']), 2, (0,155,255), 2)

    # crop face image
    i = 0
    for rst in result:
        bounding_box = rst['box']
        x1, y1, w, h = bounding_box
        x2, y2 = x1 + w, y1 + h
        face = original_img[y1:y2, x1:x2]

        face_path = face_img_output_path + "face_" + str(i) + ".jpg"
        face_feature_path = face_feature_output_path + "face_" + str(i) + ".txt"
        cv2.imwrite(face_path, face)
        
        with open(face_feature_path, 'wb') as f:
            pickle.dump(rst, f)
        # with open('path','rb') as f:
        #     obj = pickle.load(f)
        #     print(obj)

        # identify face
        identify(face, rst)

        i+=1

    # show rst
    cv2.imwrite(img_output_path, image)
    cv2.imshow("org", original_img)    
    cv2.imshow("rst", image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

def get_thumbnail(img, size=(100, 70), greyscale=False):
    img = np.resize(img, size)

    return img

def get_cosine_distance(img1, img2):

    if img1.any()==None or img2.any()==None:
        return 0
    img1 = get_thumbnail(img1)
    img2 = get_thumbnail(img2)
    v1 = img1.flatten()
    v2 = img2.flatten()
    dist = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2) )

    return dist

def get_cosine_distance_vec(v1, v2):

    dist = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
    return dist

def convert_face_feature_to_vector(feature: dict):
    # [{'box': [277, 90, 48, 63], 'keypoints': {'nose': (303, 131), 'mouth_right': (313, 141), 'right_eye': (314, 114), 'left_eye': (291, 117), 'mouth_left': (296, 143)}, 'confidence': 0.99851983785629272}]
    l = []

    box = feature["box"]
    keypoints = feature["keypoints"]
    confidence = feature["confidence"]

    nose = keypoints["nose"]
    mouth_right = keypoints["mouth_right"]
    mouth_left = keypoints["mouth_left"]
    right_eye = keypoints["right_eye"]
    left_eye = keypoints["left_eye"]

    l.extend(box)
    l.append(confidence)
    l.extend(nose)
    l.extend(mouth_right)
    l.extend(mouth_left)
    l.extend(right_eye)
    l.extend(left_eye)

    return l

def construct_data_base():
    detector = MTCNN()

    p_in = Path(data_base_input)
    p_out = Path(data_base_path)
    p_out_str = p_out.as_posix() + "/"
    n = 0
    for p in p_in.iterdir():
        p_str = p.as_posix()
        img = cv2.imread(p_str)
        original_img = copy.deepcopy(img)

        result = detector.detect_faces(img)
        # assume database's img have just one face 
        i = 0
        for rst in result:
            bounding_box = rst['box']
            x1, y1, w, h = bounding_box
            if x1<0 or y1<0 or w<0 or h<0:
                continue
            x2, y2 = x1 + w, y1 + h
            face = original_img[y1:y2, x1:x2]

            face_path = p_out_str + "face_" + str(n) + "_" + str(i) + ".jpg"
            face_feature_path = p_out_str + "face_"+ str(n) + "_" + str(i) +".txt"
            cv2.imwrite(face_path, face)
            with open(face_feature_path, 'wb') as f:
                pickle.dump(rst, f)

            i+=1
        if i == 0: continue
        n+=1
    print("data base construct successfully")

# identify the image from database
def identify(face, feature):

    feature_vec = convert_face_feature_to_vector(feature)

    p_db = Path(data_base_path)
    l_face = [ p for p in p_db.iterdir() if ".jpg" == p.suffix]
    l_feature = [p for p in p_db.iterdir() if ".txt" == p.suffix]
    l_face.sort(key=lambda p : p.stem)
    l_feature.sort(key=lambda p: p.stem)

    scores = []
    for p_face, p_feature in zip(l_face, l_feature):
        
        p_feature_str = p_feature.resolve()
        f = open(p_feature_str, 'rb')
        feature_db = pickle.load(f)
        f.close()
        p_face_str = p_face.as_posix()
        face_db = cv2.imread(p_face_str)

        feature_db_vec = convert_face_feature_to_vector(feature_db)

        s1 = get_cosine_distance(face, face_db)
        s2 = get_cosine_distance_vec(feature_vec, feature_db_vec)
        s = s1 + s2
        scores.append(s)
    print(scores)
    # find index of max element
    cnt = len(scores)
    s_max = 0
    s_max_idx = 0
    for i in range(cnt):
        if(scores[i] > s_max):
            s_max = scores[i]
            s_max_idx = i

    s_feature = l_feature[s_max_idx]
    s_feature_str = s_feature.resolve()
    f = open(s_feature_str, 'rb')
    feature_s = pickle.load(f)
    f.close()
    print("orignal feature:", feature)
    print("identified feature:", feature_s)

    p_face_s = l_face[s_max_idx]
    p_face_s_str = p_face_s.as_posix()
    face_s = cv2.imread(p_face_s_str)
    cv2.imshow("face", face)
    cv2.imshow("identified face", face_s)


def recognize():
    scores = []

    img_dir = Path(face_img_output_path)
    feature_dir = Path(face_feature_output_path)
    # TODO 


def test():
    # img1 = cv2.imread("../test.jpg")
    # img2 = cv2.imread("../rst.jpg")
    # img1 = [1, 1]
    # img2 = [1, 0]
    # dist = get_cosine_distance(img1, img2)
    # print(dist)
    face_feature_path = data_base_path + "face_12_0.txt"
    with open(face_feature_path,'rb') as f:
        obj = pickle.load(f)
        print(obj)

def main():

    construct_data_base()
    detector = MTCNN()

    image = cv2.imread(img_input_path)
    result = detector.detect_faces(image)
    # print(result)
    crop_faces(image, result)
    # recognize()


if __name__ == "__main__":
    # test()
    main()