import os

import cv2
import numpy as np
import tqdm
from loguru import logger

from analyze_iphone_data import *
from insightface.app import FaceAnalysis
from projection import vertices_and_projection_on_captured_image_ as vp2ci
import click

is_remove_display_bg = False
is_debug_show = False

blend_keys = ['browDown_L', 'browDown_R', 'browInnerUp', 'browOuterUp_L', 'browOuterUp_R', 'cheekPuff', 'cheekSquint_L',
              'cheekSquint_R', 'eyeBlink_L', 'eyeBlink_R', 'eyeLookDown_L', 'eyeLookDown_R', 'eyeLookIn_L',
              'eyeLookIn_R', 'eyeLookOut_L', 'eyeLookOut_R', 'eyeLookUp_L', 'eyeLookUp_R', 'eyeSquint_L', 'eyeSquint_R',
              'eyeWide_L', 'eyeWide_R', 'jawForward', 'jawLeft', 'jawOpen', 'jawRight', 'mouthClose', 'mouthDimple_L',
              'mouthDimple_R', 'mouthFrown_L', 'mouthFrown_R', 'mouthFunnel', 'mouthLeft', 'mouthLowerDown_L',
              'mouthLowerDown_R', 'mouthPress_L', 'mouthPress_R', 'mouthPucker', 'mouthRight', 'mouthRollLower',
              'mouthRollUpper', 'mouthShrugLower', 'mouthShrugUpper', 'mouthSmile_L', 'mouthSmile_R', 'mouthStretch_L',
              'mouthStretch_R', 'mouthUpperUp_L', 'mouthUpperUp_R', 'noseSneer_L', 'noseSneer_R', 'tongueOut']


def trans_to_align(image, kps, input_size=256):
    from skimage import transform as trans
    new_size = 144
    dst_pts = np.array([
        [38.2946, 51.6963],
        [73.5318, 51.5014],
        [56.0252, 71.7366],
        [41.5493, 92.3655],
        [70.7299, 92.2041]], dtype=np.float32)
    dst_pts[:, 0] += ((new_size - 112) // 2)
    dst_pts[:, 1] += 8
    dst_pts[:, :] *= (input_size / float(new_size))
    tf = trans.SimilarityTransform()
    tf.estimate(kps, dst_pts)
    tform = tf.params[0:2, :]
    warped = cv2.warpAffine(image, tform, (input_size,) * 2)

    return tform, warped


def write_to_file(filename, data):
    with open(filename, 'w') as f:
        for item in data:
            image_path = item['image_path']
            mesh_path = item['mesh_path']
            kps_path = item['kps_path']
            blendshape_path = item['blendshape_path']
            worldspace_path = item['worldspace_path']
            f.write(f"{image_path}\t{mesh_path}\t{kps_path}\t{blendshape_path}\t{worldspace_path}\n")


@click.command()
@click.argument("arkit_dataset", type=click.Path(exists=True))
@click.argument("save", type=click.Path(exists=False))
@click.option('-rate', "--rate", nargs=3, type=float)
def main(arkit_dataset, save, rate):
    """制作landmark2d数据集主函数，暂时还没搞明白arkit提取出来的矩阵参数之间的明确的关系，
    所以这边先写制作2D投影后的关键点，先进行2D关键点的实验"""
    assert np.isclose(sum(rate), 1.0, rtol=1e-05, atol=1e-08, equal_nan=False), "The rates must sum to 1.0"
    # 创建一堆存数据集的文件夹
    images_folder = os.path.sep.join([save, 'images'])
    os.makedirs(images_folder, exist_ok=True)
    mesh_folder = os.path.sep.join([save, 'mesh'])
    os.makedirs(mesh_folder, exist_ok=True)
    kps_folder = os.path.sep.join([save, 'kps'])
    os.makedirs(kps_folder, exist_ok=True)
    draw_folder = os.path.sep.join([save, 'draw'])
    os.makedirs(draw_folder, exist_ok=True)
    worldspace_folder = os.path.sep.join([save, 'worldspace'])
    os.makedirs(worldspace_folder, exist_ok=True)
    blendshape_folder = os.path.sep.join([save, 'blendshape'])
    os.makedirs(blendshape_folder, exist_ok=True)
    dirs = list_subdirectories(arkit_dataset)
    text_dataset_list = list()
    error_data = 0
    for dir_name in tqdm.tqdm(dirs):
        basename = os.path.basename(dir_name)
        image_path = os.path.sep.join([images_folder, f'{basename}.png'])
        mesh_path = os.path.sep.join([mesh_folder, f'{basename}_mesh.npy'])
        kps_path = os.path.sep.join([kps_folder, f'{basename}_kps.npy'])
        blendshape_path = os.path.sep.join([blendshape_folder, f'{basename}_blendshape.npy'])
        worldspace_path = os.path.sep.join([worldspace_folder, f'{basename}_worldspace.npy'])

        image, data = load_captured_item_from_folder(dir_name)
        ori_shape = image.shape[:2][::-1]
        faces = app.get(image)
        det_kps5 = None
        if len(faces) > 0:
            det_kps5 = faces[0].kps
        if det_kps5 is not None:
            det_kps5 = np.asarray(det_kps5)
            np.save(os.path.sep.join([dir_name, "kps.npy"]), det_kps5)
        else:
            logger.error(f"{dir_name} not detected face.")
            error_data += 1
            continue
        # get blend shape
        blendshape_dict = data['blendShapes']
        blendshape_list = list()
        for k in blend_keys:
            blendshape_list.append(blendshape_dict[k])
        blendshape = np.asarray(blendshape_list)

        # get world space
        worldspace = data['vertex']

        project_points = get_mesh_projection_from_captured_item(image, data)
        kps_eys_left, kps_eys_right = fit_eyes_center_kps(project_points)
        mesh_kps5 = np.asarray(
            [kps_eys_left, kps_eys_right, project_points[37], project_points[249], project_points[684]])

        if is_remove_display_bg:
            sel_boundbox = get_bounding_box(project_points, scale=1.3)
            # 去除多余的内容，因为太多数据集带显示屏其他内容，这样做可能可以降低训练过程中拟合背景的干扰
            mask = np.zeros_like(image)
            cv2.rectangle(mask, (sel_boundbox[0], sel_boundbox[1]), (sel_boundbox[2], sel_boundbox[3]), (255, 255, 255),
                          -1)
            image = cv2.bitwise_and(image, mask)

        trans, warped = trans_to_align(image, det_kps5)
        trans_mesh = apply_affine_transform_to_keypoints(project_points, trans)
        trans_kps5 = apply_affine_transform_to_keypoints(det_kps5, trans)

        kps_eys_left, kps_eys_right = fit_eyes_center_kps(trans_mesh)
        trans_mesh_kps5 = np.asarray(
            [kps_eys_left, kps_eys_right, trans_mesh[37], trans_mesh[249], trans_mesh[684]])

        # 计算五个关键点之间的MAE，超过0.01则记为不合格
        mae = calculate_mean_absolute_error(trans_mesh_kps5, trans_kps5, (256, 256))

        if not is_debug_show:
            if mae > 0.03:
                logger.error(f"{dir_name} fit non-compliance. loss: {mae}")
                error_data += 1
                continue

        draw = warped.copy()
        for x, y in trans_mesh.astype(int):
            cv2.circle(draw, (x, y), radius=1, color=(0, 255, 0), thickness=1)
        for x, y in trans_kps5.astype(int):
            cv2.circle(draw, (x, y), radius=1, color=(0, 0, 255), thickness=2)

        if is_debug_show and mae > 0.03:
            print(mae)
            cv2.imshow("w", draw)
            cv2.waitKey(0)
        np.save(kps_path, trans_kps5)
        np.save(mesh_path, trans_mesh)
        cv2.imwrite(image_path, warped)
        cv2.imwrite(os.path.sep.join([draw_folder, f'{basename}.png']), draw)
        np.save(worldspace_path, worldspace)
        np.save(blendshape_path, blendshape)
        text_dataset_list.append(
            dict(image_path=os.path.sep.join(['images', f'{basename}.png']),
                 mesh_path=os.path.sep.join(['mesh', f'{basename}_mesh.npy']),
                 kps_path=os.path.sep.join(['kps', f'{basename}_kps.npy']),
                 blendshape_path=os.path.sep.join(['blendshape', f'{basename}_blendshape.npy']),
                 worldspace_path=os.path.sep.join(['worldspace', f'{basename}_worldspace.npy']),

        ))

    # 划分数据集索引文件
    random.shuffle(text_dataset_list)
    n = len(text_dataset_list)
    train_size = int(n * rate[0])
    val_size = int(n * rate[1])
    test_size = n - train_size - val_size
    train_data = text_dataset_list[:train_size]
    val_data = text_dataset_list[train_size:train_size + val_size]
    test_data = text_dataset_list[-test_size:]
    print("Train data:", len(train_data))
    print("Val data:", len(val_data))
    print("Test data:", len(test_data))
    print(f"has error data : {error_data}")

    write_to_file(os.path.sep.join([save, "train.txt"]), train_data)
    write_to_file(os.path.sep.join([save, "val.txt"]), val_data)
    write_to_file(os.path.sep.join([save, "test.txt"]), test_data)


if __name__ == '__main__':
    app = FaceAnalysis(allowed_modules=['detection', ])
    app.prepare(ctx_id=0, det_size=(640, 640))
    main()
