import json
import os.path
import random

import cv2
import numpy as np
import tqdm
from insightface.app import FaceAnalysis
from insightface.data import get_image as ins_get_image
from projection import vertices_and_projection_on_captured_image_ as vp2ci


def fit_ellipse(keypoints):
    # 将关键点转换为numpy数组
    points = np.array(keypoints, dtype=np.float32)

    # 使用OpenCV拟合椭圆
    ellipse = cv2.fitEllipse(points)

    # 返回椭圆的中心
    return ellipse[0]


def fit_eyes_center_kps(keypoints):
    left_eyes = range(1085, 1109)
    right_eyes = range(1061, 1085)
    l_c = fit_ellipse(keypoints[left_eyes])
    r_c = fit_ellipse(keypoints[right_eyes])

    return np.asarray(l_c), np.asarray(r_c)


def get_bounding_box(keypoints, scale=1.2, square=True):
    min_x = np.min(keypoints[:, 0])
    min_y = np.min(keypoints[:, 1])
    max_x = np.max(keypoints[:, 0])
    max_y = np.max(keypoints[:, 1])

    center_x = (min_x + max_x) / 2
    center_y = (min_y + max_y) / 2

    width = max_x - min_x
    height = max_y - min_y

    new_width = width * scale
    new_height = height * scale

    if square:
        max_dim = max(new_width, new_height)
        new_width = max_dim
        new_height = max_dim

    new_min_x = int(center_x - new_width / 2)
    new_min_y = int(center_y - new_height / 2)
    new_max_x = int(center_x + new_width / 2)
    new_max_y = int(center_y + new_height / 2)

    return new_min_x, new_min_y, new_max_x, new_max_y


def warp_image_and_keypoints(image, keypoints, min_x, min_y, max_x, max_y):
    cropped_image = image[min_y:max_y, min_x:max_x]

    new_keypoints = keypoints.copy()
    new_keypoints[:, 0] -= min_x
    new_keypoints[:, 1] -= min_y

    return cropped_image, new_keypoints


def resize_image_and_keypoints(image, keypoints, target_shape):
    h, w = image.shape[:2]
    new_h, new_w = target_shape

    print(image.shape)
    resized_image = cv2.resize(image, (new_w, new_h))
    keypoints[:, 0] = keypoints[:, 0] * (new_w / w)
    keypoints[:, 1] = keypoints[:, 1] * (new_h / h)

    return resized_image, keypoints


def get_affine_transform_matrix(src_pts, dst_pts):
    affine_matrix = cv2.getAffineTransform(src_pts.astype(np.float32), dst_pts.astype(np.float32))
    return affine_matrix


def apply_affine_transform_to_keypoints(keypoints, affine_matrix):
    keypoints = np.concatenate([keypoints, np.ones((keypoints.shape[0], 1))], axis=1)
    transformed_keypoints = np.dot(keypoints, affine_matrix.T)
    return transformed_keypoints


def process_image_and_keypoints(image, keypoints, shape=(256, 256)):
    min_x, min_y, max_x, max_y = get_bounding_box(keypoints)
    cropped_image, new_keypoints = warp_image_and_keypoints(image, keypoints, min_x, min_y, max_x, max_y)

    resized_image, resized_keypoints = resize_image_and_keypoints(cropped_image, new_keypoints, shape)

    # Calculate the affine transformation matrix
    src_pts = np.float32([[min_x, min_y], [max_x, min_y], [min_x, max_y]])
    dst_pts = np.float32([[0, 0], [shape[1], 0], [0, shape[0]]])
    affine_matrix = get_affine_transform_matrix(src_pts, dst_pts)

    return resized_image, resized_keypoints, affine_matrix


def load_captured_item_from_folder(path: str):
    captured_image = os.path.sep.join([path, "capturedImage.jpg"])
    json_path = os.path.sep.join([path, "data.json"])
    image = cv2.imread(captured_image)
    with open(json_path, "r") as f:
        data = json.load(f)

    return image, data


def get_mesh_projection_from_captured_item(image, data):
    vertices = np.array(data['capVertex'])
    intrinsics = np.array(data['intrinsics'])
    camera_transform = np.array(data['cameraTransform'])
    face_transform = np.array(data['faceAnchorTransform'])

    project_points = vp2ci(vertices, face_transform, intrinsics, camera_transform)
    height, width = image.shape[:2]
    project_points[:, 0] /= 1080
    project_points[:, 1] /= 1440
    project_points[:, 0] *= height
    project_points[:, 1] *= width

    project_points[:, 0] = width - project_points[:, 0]

    return project_points


def create_collage(images, tags, rows=4, cols=4, margin=10):
    if len(images) != rows * cols or len(tags) != rows * cols:
        raise ValueError("The number of images and tags should match rows * cols")

    max_height = max([img.shape[0] for img in images])
    max_width = max([img.shape[1] for img in images])

    # Add space for the tags
    tag_height = 30
    collage_height = rows * (max_height + tag_height) + (rows + 1) * margin
    collage_width = cols * max_width + (cols + 1) * margin

    collage = np.zeros((collage_height, collage_width, 3), dtype=np.uint8)

    image_idx = 0
    for row in range(rows):
        for col in range(cols):
            y = row * (max_height + margin + tag_height) + margin
            x = col * (max_width + margin) + margin

            image = images[image_idx]
            image_height, image_width = image.shape[:2]

            collage[y:y + image_height, x:x + image_width] = image

            # Add the tag below the image
            tag = tags[image_idx]
            tag_x = x
            tag_y = y + image_height + tag_height // 2
            font = cv2.FONT_HERSHEY_SIMPLEX
            font_scale = 0.8
            font_thickness = 2
            cv2.putText(collage, tag, (tag_x, tag_y), font, font_scale, (0, 30, 200), font_thickness)

            image_idx += 1

    return collage


def calculate_mean_absolute_error(keypoints1, keypoints2, shape):
    if keypoints1.shape != keypoints2.shape:
        raise ValueError("The two sets of keypoints must have the same shape")

    # 归一化关键点
    normalized_keypoints1 = keypoints1 / np.array(shape)
    normalized_keypoints2 = keypoints2 / np.array(shape)

    error = np.abs(normalized_keypoints1 - normalized_keypoints2)
    mean_absolute_error = np.mean(error)

    return mean_absolute_error


def show_one():
    image, data = load_captured_item_from_folder("T2_375467")
    project_points = get_mesh_projection_from_captured_item(image, data)

    crop_image, crop_points, affine = process_image_and_keypoints(image, project_points)
    # 对原图先提取5个关键点
    faces = app.get(image)
    det_kps5 = None
    if len(faces) > 0:
        det_kps5 = faces[0].kps
    if det_kps5 is not None:
        det_kps5 = np.asarray(det_kps5)
        det_kps5 = apply_affine_transform_to_keypoints(det_kps5, affine)

    for x, y in crop_points.astype(int):
        cv2.circle(crop_image, (x, y), radius=1, color=(0, 255, 0), thickness=1)

    if det_kps5 is not None:
        for x, y in det_kps5.astype(int):
            cv2.circle(crop_image, (x, y), radius=1, color=(240, 0, 0), thickness=2)
    kps_eys_left, kps_eys_right = fit_eyes_center_kps(crop_points)
    mesh_kps5 = np.asarray(
        [kps_eys_left, kps_eys_right, crop_points[37], crop_points[249], crop_points[684]])
    cv2.circle(crop_image, tuple(kps_eys_left.astype(int)), radius=1, color=(0, 0, 255), thickness=3)
    cv2.circle(crop_image, tuple(kps_eys_right.astype(int)), radius=1, color=(0, 0, 255), thickness=3)
    if det_kps5 is not None:
        # 有检测到人脸和5个关键点则形成对比
        mae = calculate_mean_absolute_error(mesh_kps5, det_kps5, 256)
        mae_str = str(round(float(mae), 3))
        print(mae_str)


    cv2.imshow("w", crop_image)
    cv2.waitKey(0)


def list_subdirectories(path):
    subdirectories = []
    for entry in os.scandir(path):
        if entry.is_dir():
            subdir_path = os.path.join(path, entry.name)
            subdirectories.append(subdir_path)
    return subdirectories


def show_mutil_images(batch=32, pad_num=16):
    dirs = list_subdirectories("arkit_data/emotion/")
    random.shuffle(dirs)
    total_show = list()
    idx = 0
    for _ in tqdm.tqdm(range(batch)):
        images = list()
        tags = list()
        index = np.random.randint(0, len(dirs), size=pad_num)
        for idx in index:
            dir_ = dirs[idx]
            image, data = load_captured_item_from_folder(dir_)
            # try:
            project_points = get_mesh_projection_from_captured_item(image, data)
            crop_image, crop_points, affine = process_image_and_keypoints(image, project_points)
            # except:
            #     print(dir_, "error")
            #     continue

            # 对原图先提取5个关键点
            faces = app.get(image)
            det_kps5 = None
            if len(faces) > 0:
                det_kps5 = faces[0].kps
            if det_kps5 is not None:
                det_kps5 = np.asarray(det_kps5)
                det_kps5 = apply_affine_transform_to_keypoints(det_kps5, affine)
            # print(det_kps5)
            if det_kps5 is not None:
                for x, y in det_kps5.astype(int):
                    cv2.circle(crop_image, (x, y), radius=1, color=(240, 0, 0), thickness=2)

            for x, y in mesh_kps5.astype(int):
                cv2.circle(crop_image, (x, y), radius=1, color=(0, 0, 240), thickness=2)

            kps_eys_left, kps_eys_right = fit_eyes_center_kps(crop_points)
            mesh_kps5 = np.asarray(
                [kps_eys_left, kps_eys_right, crop_points[37], crop_points[249], crop_points[684]])
            cv2.circle(crop_image, tuple(kps_eys_left.astype(int)), radius=1, color=(0, 0, 255), thickness=3)
            cv2.circle(crop_image, tuple(kps_eys_right.astype(int)), radius=1, color=(0, 0, 255), thickness=3)

            if det_kps5 is not None:
                for x, y in det_kps5.astype(int):
                    cv2.circle(crop_image, (x, y), radius=1, color=(240, 0, 0), thickness=2)

            for x, y in mesh_kps5.astype(int):
                cv2.circle(crop_image, (x, y), radius=1, color=(0, 0, 240), thickness=2)

            if det_kps5 is not None:
                # 有检测到人脸和5个关键点则形成对比
                mae = calculate_mean_absolute_error(mesh_kps5, det_kps5, 256)
                mae_str = str(round(float(mae), 3))
                if mae > 4.5e-2:
                    mae_str += "-exception"
                tags.append(mae_str)
            else:
                tags.append("non")
            images.append(crop_image)

        collage = create_collage(images, tags)
        total_show.append(collage)
        idx += 1
    for img in total_show:
        # cv2.imshow("collage", img)
        # cv2.waitKey(0)
        cv2.imwrite(f"{os.path.sep.join(['save', str(idx)])+'.png'}", img)


if __name__ == '__main__':
    app = FaceAnalysis(allowed_modules=['detection', ])
    app.prepare(ctx_id=0, det_size=(640, 640))
    # show_mutil_images()
    show_one()