import requests
from json import JSONDecoder
import numpy as np
import pickle
import logging
from mtcnn import MTCNN
import cv2
import os
import train
import test
from config import *
import pandas as pd
from pathlib import Path

logging.basicConfig(
    level=logging.DEBUG,
    format='%(asctime)s %(levelname)s: %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger(__name__)

imgSize = [600, 600]
coord5point = [[30.2946, 51.6963],
               [65.5318, 51.6963],
               [48.0252, 71.7366],
               [33.5493, 92.3655],
               [62.7299, 92.3655]]

# face_landmarks = [[259, 137],
#                   [319, 150],
#                   [284, 177],
#                   [253, 206],
#                   [297, 216]]

config = Configuration()


def transformation_from_points(points1, points2):
    points1 = points1.astype(np.float64)
    points2 = points2.astype(np.float64)
    c1 = np.mean(points1, axis=0)
    c2 = np.mean(points2, axis=0)
    points1 -= c1
    points2 -= c2
    s1 = np.std(points1)
    s2 = np.std(points2)
    points1 /= s1
    points2 /= s2
    U, S, Vt = np.linalg.svd(points1.T * points2)
    R = (U * Vt).T
    return np.vstack([np.hstack(((s2 / s1) * R, c2.T - (s2 / s1) * R * c1.T)), np.matrix([0., 0., 1.])])


def warp_im(img_im, orgi_landmarks, tar_landmarks):
    pts1 = np.float64(np.matrix([[point[0], point[1]] for point in orgi_landmarks]))
    # pts1是pts2的将近10倍大。
    pts2 = np.float64(np.matrix([[point[0], point[1]] for point in tar_landmarks]))
    # TODO(Warning: This multiplication may not make sense. It may be even wrong.)
    pts2 = 5 * pts2
    M = transformation_from_points(pts1, pts2)
    # 为什么经过仿射变换之后图片的分辨率变得那么低？
    # 我理想的分辨率是剪裁了之后的分辨率和makeup数据集中的分辨率差不多。
    # 主要是下面这个变换后图片的分辨率也变得很小。
    # 为什么？
    # 应该是M的原因。
    # 具体原因应该是pts1和pts2中的部分参数过小。
    dst = cv2.warpAffine(img_im, M[:2], (img_im.shape[1], img_im.shape[0]))
    return dst


def get_landmarks(face_img_path):
    """
    Get the landmarks of an images using the api of face++(url: https://api-cn.faceplusplus.com/facepp/v3/detect)
    input:
    face_img_path The path of input image.
    return:
    landmarks_ndarray The numpy ndarray of the landmarks of the input image.
    """
    compare_url = "https://api-cn.faceplusplus.com/facepp/v3/detect"
    key = "8qeuHzHo-cjmG8-5Cl7o6qg5YpKMIvby"
    secret = "xXv1JRE3gJcfDqdSmDfGlpZqOvmKiH0v"
    data = {"api_key": key, "api_secret": secret, "return_landmark": 2, "return_attributes": "gender"}
    files = {"image_file": open(face_img_path, "rb")}
    response = requests.post(compare_url, data=data, files=files)
    req_con = response.content.decode('utf-8')
    req_dict = JSONDecoder().decode(req_con)
    # TODO(Warning: The landmarks below and the landmarks of the original authors may not match.)
    landmarks_dict = req_dict["faces"][0]["landmark"]
    # landmarks_list1 = [ele.values() for ele in landmarks_dict.values()]
    landmarks_list = [list(ele.values()) for ele in landmarks_dict.values()]
    landmarks_ndarray = np.array(landmarks_list)
    old_landmarks_ndarray = transform_landmarks(landmarks_ndarray)
    return old_landmarks_ndarray


def transform_landmarks(new_landmarks):
    """
    Convert the new point sequence of fpp to the old point sequence of fpp.
    """
    # Convert the new point sequence of fpp to the old point sequence of fpp.
    matchup_map = pd.read_excel(os.path.join(Path(__file__).resolve().parent, "fpp-new-old_version_matchup.xlsx"))
    sequence_of_old_point = matchup_map["old"].to_numpy()
    transformation_matrix = np.eye(sequence_of_old_point.shape[0])[sequence_of_old_point]
    inv_transformation_matrix = np.linalg.inv(transformation_matrix)
    old_landmarks_ndarray = np.matmul(inv_transformation_matrix, new_landmarks)
    return old_landmarks_ndarray


def align(input_img_name):
    # Face alignment.
    path_of_hust423_img_dir = config.path_of_hust423_img_dir
    path_of_input_img = os.path.join(path_of_hust423_img_dir, "not_aligned",
                                     input_img_name)
    img_rgb = cv2.cvtColor(cv2.imread(path_of_input_img), cv2.COLOR_BGR2RGB)
    detector = MTCNN()
    detect_result_of_mtcnn = detector.detect_faces(img_rgb)
    keypoints_dict = detect_result_of_mtcnn[0]['keypoints']
    keypoints_list = [ele for ele in keypoints_dict.values()]
    face_landmarks = keypoints_list
    img_im = cv2.imread(path_of_input_img)
    dst = warp_im(img_im, face_landmarks, coord5point)
    # cv2.imwrite(os.path.join(config.data_dir, config.before_dir, "not_cropped_{}".format(input_img_name)), dst)
    crop_im = dst[0:imgSize[0], 0:imgSize[1]]
    path_of_output_img = os.path.join(config.data_dir, config.before_dir,
                                      input_img_name)
    cv2.imwrite(path_of_output_img, crop_im)
    return os.path.join(config.before_dir, input_img_name)


def align_and_update_the_landmark_pk(img_name):
    # 1. Face alignment.
    path_of_img_aligned = align(img_name)
    # 2. Get landmarks.
    landmarks = get_landmarks(os.path.join(config.data_dir, path_of_img_aligned))
    # 3. Generate new pk file.
    api_landmarks_pk_path = os.path.join(config.data_dir, config.lmk_name)
    api_landmarks = pickle.load(open(api_landmarks_pk_path, 'rb'))
    api_landmarks[path_of_img_aligned] = landmarks
    pickle.dump(api_landmarks, open(api_landmarks_pk_path, 'wb'))


def align_and_update_all_files():
    """
    Align and update the landmark pk for all files in hust423_img/not_aligned
    """
    for name in os.listdir(os.path.join(config.path_of_hust423_img_dir, "not_aligned")):
        align_and_update_the_landmark_pk(name)


if __name__ == '__main__':
    # # align_and_update_the_landmark_pk("sfr.jpg")
    # align_and_update_all_files()
    # 4. Train.
    train.main()
    # 5. Test.
    test.main()
