import json

import cv2
import numpy as np


def vertices_and_projection_on_captured_image__(vertices, face_transform, intrinsics, camera_transform):
    vertices = np.hstack((vertices, np.ones((vertices.shape[0], 1), dtype=np.float32)))
    Rt = face_transform @ np.linalg.inv(camera_transform)
    p_camera = vertices @ Rt
    x = p_camera[:, 0] * intrinsics[0, 0] / p_camera[:, 2] + intrinsics[2, 0]
    y = p_camera[:, 1] * intrinsics[1, 1] / p_camera[:, 2] + intrinsics[2, 1]
    projected_points = np.vstack((x, y)).T

    return projected_points


def vertices_and_projection_on_captured_image__(vertices, Rt, intrinsics):
    vertices = np.hstack((vertices, np.ones((vertices.shape[0], 1), dtype=np.float32)))
    p_camera = vertices @ Rt
    x = p_camera[:, 0] * intrinsics[0, 0] / p_camera[:, 2] + intrinsics[2, 0]
    y = p_camera[:, 1] * intrinsics[1, 1] / p_camera[:, 2] + intrinsics[2, 1]
    projected_points = np.vstack((x, y)).T

    return projected_points


def vertices_and_projection_on_captured_image_(vertices, transform, intrinsics, camera_transform):
    vertices = np.hstack((vertices, np.ones((vertices.shape[0], 1), dtype=np.float32)))
    p_world = vertices @ transform
    p_camera = p_world @ np.linalg.inv(camera_transform)
    x = p_camera[:, 0] * intrinsics[0, 0] / p_camera[:, 2] + intrinsics[2, 0]
    y = p_camera[:, 1] * intrinsics[1, 1] / p_camera[:, 2] + intrinsics[2, 1]
    projected_points = np.vstack((x, y)).T

    return projected_points


def vertices_and_projection_on_captured_image(vertices, transform, intrinsics, camera_transform):
    projected_points = []

    for idx, vertex in enumerate(vertices):
        pos = np.array([vertex[0], vertex[1], vertex[2], 1], dtype=np.float32)

        p_world = pos @ transform
        # p_camera = p_world @ np.linalg.inv(camera_transform)
        p_camera = p_world @ np.linalg.inv(camera_transform)

        # Project the point from camera space to image space
        x = p_camera[0] * intrinsics[0, 0] / p_camera[2] + intrinsics[2, 0]
        y = p_camera[1] * intrinsics[1, 1] / p_camera[2] + intrinsics[2, 1]

        projected = (x, y)
        projected_points.append(projected)

    return np.asarray(projected_points)



def new_project(vertices, face_transform, intrinsics, camera_transform):
    # 计算变换矩阵
    R_t = face_transform @ np.linalg.inv(camera_transform)

    # 将3D点转换为齐次坐标
    vertices = np.hstack((vertices, np.ones((vertices.shape[0], 1), dtype=np.float32)))

    # 提取相机内参
    fx = intrinsics[0, 0]
    fy = intrinsics[1, 1]
    cx = intrinsics[0, 2]
    cy = intrinsics[1, 2]

    # 创建投影矩阵
    M_proj = np.array([
        [fx, 0, -cx, 0],
        [0, fy, -cy, 0],
        [0, 0, 1, 0],
        [0, 0, 0, 1]
    ])

    # 创建图像空间变换矩阵
    img_w = 1080  # 你需要根据实际情况修改这个值
    img_h = 1440  # 你需要根据实际情况修改这个值
    M1 = np.array([
        [img_w / 2, 0, 0, 0],
        [0, img_h / 2, 0, 0],
        [0, 0, 1, 0],
        [img_w / 2, img_h / 2, 0, 1]
    ])

    # 进行投影
    verts = vertices @ R_t @ M_proj @ M1
    w_ = verts[:, [3]]
    verts = verts / w_

    # 将投影后的点转换为非齐次坐标
    projected_points = verts[:, :2]
    projected_points[:, 1] = img_h - projected_points[:, 1]

    return projected_points


if __name__ == '__main__':
    with open("/Users/tunm/datasets/arkit_data/emotion/T2_300/data.json", "r") as f:
        data = json.load(f)

    vertices = np.array(data['capVertex'])
    intrinsics = np.array(data['intrinsics'])
    camera_transform = np.array(data['cameraTransform'])
    face_transform = np.array(data['faceAnchorTransform'])

    print("vertices", vertices)
    print("intrinsics", intrinsics)
    print("camera_transform", camera_transform)
    print("camera_transform_inv", np.linalg.inv(camera_transform))
    print("face_transform", face_transform)

    project_points = vertices_and_projection_on_captured_image__(vertices, face_transform, intrinsics, camera_transform)
    # vertices_and_projection_on_captured_image_(vertices, face_transform, intrinsics, camera_transform)

    # test pnp
    rvec = np.zeros((3, 1))
    tvec = np.zeros((3, 1))



    print(project_points)

    image = cv2.imread("/Users/tunm/datasets/arkit_data/emotion/T2_300/capturedImage.jpg")
    # image = cv2.resize(image, (1440, 1080))
    height, width = image.shape[:2]
    project_points[:, 0] /= 1080
    project_points[:, 1] /= 1440
    project_points[:, 0] *= height
    project_points[:, 1] *= width

    project_points[:, 0] = width - project_points[:, 0]
    print(project_points)


    print('eyeBlink_L: ', data['blendShapes']['eyeBlink_L'])
    print('eyeBlink_R: ', data['blendShapes']['eyeBlink_R'])
    print('mouthClose: ', data['blendShapes']['mouthClose'])
    for x, y in project_points.astype(int):
        cv2.circle(image, (x, y), radius=2, color=(0, 255, 0), thickness=-1)

    cv2.imshow("w", image)
    cv2.waitKey(0)

    # print("face_transform @ camera_transform", face_transform @ np.linalg.inv(camera_transform))
    #
    #
    # new_p = new_project(vertices, face_transform, intrinsics, camera_transform)
    # new_p[:, 0] = width - new_p[:, 0]
    #
    # for x, y in new_p.astype(int):
    #     cv2.circle(image, (x, y), radius=2, color=(0, 0, 255), thickness=-1)

    # cv2.imshow("1", image)
    # cv2.waitKey(0)
    cv2.imwrite("2.jpg", image)

    #
    # print(new_p // 10)
    #
    # print(project_points)