import numpy as np
import json
import cv2

def solver_rigid(pts_3d, pts_2d, camera_matrix):
    # pts_3d  Nx3
    # pts_2d  Nx2
    # camera_matrix 4x4
    dist_coeffs = np.zeros((4, 1))
    pts_3d = pts_3d.copy()
    pts_2d = pts_2d.copy()
    # print(pts_3d.shape, pts_3d.dtype, pts_2d.shape, pts_2d.dtype)
    success, rotation_vector, translation_vector = cv2.solvePnP(pts_3d, pts_2d, camera_matrix, dist_coeffs, flags=0)
    assert success
    R, _ = cv2.Rodrigues(rotation_vector)
    R = R.T
    R[:, 1:3] *= -1
    T = translation_vector.flatten()
    T[1:] *= -1

    return R, T

def solve(self, verts3d, verts2d):
    print(verts3d.shape, verts2d.shape)
    B = verts3d.shape[0]
    R = np.zeros([B, 3, 3], dtype=np.float32)
    t = np.zeros([B, 1, 3], dtype=np.float32)
    for n in range(B):
        _R, _t = solver_rigid(verts3d[n], verts2d[n], self.camera_matrix)
        R[n] = _R
        t[n, 0] = _t
    return R, t


def solve_one(verts3d, verts2d, camera_matrix):
    R, t = solver_rigid(verts3d, verts2d, camera_matrix)
    return R, t

if __name__ == '__main__':
    image = cv2.imread("/Users/tunm/datasets/arkit_data/emotion/T2_300/capturedImage.jpg")
    with open("/Users/tunm/datasets/arkit_data/emotion/T2_300/data.json", "r") as f:
        data = json.load(f)

    vertices = np.array(data['capVertex'])
    intrinsics = np.array(data['intrinsics'])
    camera_transform = np.array(data['cameraTransform'])
    face_transform = np.array(data['faceAnchorTransform'])



    print("vertices", vertices)
    print("intrinsics", intrinsics)
    print("camera_transform", camera_transform)
    print("camera_transform_inv", np.linalg.inv(camera_transform))
    print("face_transform", face_transform)

    txt_path = 'resources/m_projection_matrix.txt'
    M_proj = np.loadtxt(txt_path, dtype=np.float32)

    print(M_proj)

    img_w = image.shape[1]
    img_h = image.shape[0]

    M1 = np.array([
        [img_w / 2, 0, 0, 0],
        [0, img_h / 2, 0, 0],
        [0, 0, 1, 0],
        [img_w / 2, img_h / 2, 0, 1]
    ])

    Rt = face_transform @ np.linalg.inv(camera_transform)

    ones = np.ones([vertices.shape[0], 1])
    verts_homo = np.concatenate([vertices, ones], axis=1)

    verts = verts_homo @ Rt @ M_proj @ M1
    w_ = verts[:, [3]]
    verts = verts / w_
    # image space: →+x，↓+y
    points2d = verts[:, :2]
    points2d[:, 1] = img_h - points2d[:, 1]


    points2d -= (4, 10)
    print(points2d)

    for x, y in points2d.astype(int):
        cv2.circle(image, (x, y), radius=2, color=(0, 255, 0), thickness=-1)

    cv2.imshow("w", image)
    cv2.waitKey(0)

    cv2.imwrite("1.jpg", image)

    # pnp
    camera_matrix = M_proj @ M1
    camera_matrix = camera_matrix[:3, :3].T
    camera_matrix[0, 2] = img_w / 2
    camera_matrix[1, 2] = img_h / 2

    R, t = solve_one(vertices, points2d, camera_matrix)

    print(R, t)

    print(Rt)