import numpy as np
import json
import cv2
from insightface.app import FaceAnalysis
from insightface.data import get_image as ins_get_image
from projection import vertices_and_projection_on_captured_image_ as vp2ci
from projection import vertices_and_projection_on_captured_image__
from analyze_iphone_data import *

def solver_rigid(pts_3d, pts_2d, camera_matrix):
    # pts_3d  Nx3
    # pts_2d  Nx2
    # camera_matrix 4x4
    dist_coeffs = np.zeros((4, 1))
    pts_3d = pts_3d.copy()
    pts_2d = pts_2d.copy()
    # print(pts_3d.shape, pts_3d.dtype, pts_2d.shape, pts_2d.dtype)
    success, rotation_vector, translation_vector = cv2.solvePnP(pts_3d, pts_2d, camera_matrix, dist_coeffs, flags=0)
    assert success
    R, _ = cv2.Rodrigues(rotation_vector)
    R = R.T
    R[:, 1:3] *= -1
    T = translation_vector.flatten()
    T[1:] *= -1

    return R, T

def solve(self, verts3d, verts2d):
    print(verts3d.shape, verts2d.shape)
    B = verts3d.shape[0]
    R = np.zeros([B, 3, 3], dtype=np.float32)
    t = np.zeros([B, 1, 3], dtype=np.float32)
    for n in range(B):
        _R, _t = solver_rigid(verts3d[n], verts2d[n], self.camera_matrix)
        R[n] = _R
        t[n, 0] = _t
    return R, t


def solve_one(verts3d, verts2d, camera_matrix):
    R, t = solver_rigid(verts3d, verts2d, camera_matrix)
    return R, t

if __name__ == '__main__':
    app = FaceAnalysis(allowed_modules=['detection', ])
    app.prepare(ctx_id=0, det_size=(640, 640))

    image = cv2.imread("/Users/tunm/datasets/arkit_data/emotion/T2_300/capturedImage.jpg")
    with open("/Users/tunm/datasets/arkit_data/emotion/T2_300/data.json", "r") as f:
        data = json.load(f)

    vertices = np.array(data['capVertex'])
    intrinsics = np.array(data['intrinsics'])
    camera_transform = np.array(data['cameraTransform'])
    face_transform = np.array(data['faceAnchorTransform'])



    print("vertices", vertices)
    print("intrinsics", intrinsics)
    print("camera_transform", camera_transform)
    print("camera_transform_inv", np.linalg.inv(camera_transform))
    print("face_transform", face_transform)

    txt_path = 'resources/m_projection_matrix.txt'
    M_proj = np.loadtxt(txt_path, dtype=np.float32)

    print(M_proj)

    img_w = image.shape[1]
    img_h = image.shape[0]

    M1 = np.array([
        [img_w / 2, 0, 0, 0],
        [0, img_h / 2, 0, 0],
        [0, 0, 1, 0],
        [img_w / 2, img_h / 2, 0, 1]
    ])

    Rt = face_transform @ np.linalg.inv(camera_transform)
    print(Rt)

    ones = np.ones([vertices.shape[0], 1])
    verts_homo = np.concatenate([vertices, ones], axis=1)

    verts = verts_homo @ Rt @ M_proj @ M1
    w_ = verts[:, [3]]
    verts = verts / w_
    # image space: →+x，↓+y
    points2d = verts[:, :2]
    points2d[:, 1] = img_h - points2d[:, 1]


    points2d -= (4, 10)
    print(points2d)

    # for x, y in points2d.astype(int):
    #     cv2.circle(image, (x, y), radius=2, color=(0, 255, 0), thickness=-1)
    #
    # cv2.imshow("w", image)
    # cv2.waitKey(0)

    # cv2.imwrite("1.jpg", image)

    # pnp
    camera_matrix = M_proj @ M1
    camera_matrix = camera_matrix[:3, :3].T
    camera_matrix[0, 2] = img_w / 2
    camera_matrix[1, 2] = img_h / 2

    R, t = solve_one(vertices, points2d, camera_matrix)

    print(R, t)

    print(Rt)

    crop_image, crop_points, affine = process_image_and_keypoints(image, points2d)

    # 对原图先提取5个关键点
    faces = app.get(image)
    det_kps5 = None
    if len(faces) > 0:
        det_kps5 = faces[0].kps
    if det_kps5 is not None:
        det_kps5 = np.asarray(det_kps5)
        det_kps5 = apply_affine_transform_to_keypoints(det_kps5, affine)
        mesh_apply = apply_affine_transform_to_keypoints(points2d, affine)

        for x, y in det_kps5.astype(int):
            cv2.circle(crop_image, (x, y), radius=1, color=(240, 0, 0), thickness=2)

        for x, y in mesh_apply.astype(int):
            cv2.circle(crop_image, (x, y), radius=1, color=(0, 200, 0), thickness=1)

        #

        c_af = affine @ intrinsics.T
        c_af = c_af.T
        print("caf", affine)
        new_pjm = np.asarray([[1.57443701, 0.0, 0.0, 0.0, ],
                              [0.0, 1.57443701, 0.0, 0.0, ],
                              [0.0, 0.0, -0.99999976, -1.0, ],
                              [0.0, 0.0, -0.001, 0.0]])
        new_pjm[0, 0] = c_af[0, 0] / c_af[2, 0]
        new_pjm[1, 1] = c_af[1, 1] / c_af[2, 1]

        # print("mj", intrinsics.shape)
        # affine_t = np.vstack((affine, np.asarray([0, 0, 1])))
        # print("m", affine_t @ intrinsics)
        #
        crop_h, crop_w = crop_image.shape[:2]
        M1_crop = np.array([
            [crop_w / 2, 0, 0, 0],
            [0, crop_h / 2, 0, 0],
            [0, 0, 1, 0],
            [crop_w / 2, crop_h / 2, 0, 1]
        ])
        camera_matrix_crop = new_pjm @ M1_crop
        camera_matrix_crop = camera_matrix_crop[:3, :3].T
        camera_matrix_crop[0, 2] = crop_w / 2
        camera_matrix_crop[1, 2] = crop_h / 2

        R, t = solve_one(vertices, mesh_apply, camera_matrix_crop)
        print(R, t)
        cRt = np.vstack((R, t))
        cRt = np.hstack((cRt, np.array([0, 0, 0, 1]).reshape(-1, 1)))
        print("CRT", cRt)

        tp = vertices_and_projection_on_captured_image__(vertices, cRt, c_af)

        print('out', R, t)

        cv2.imshow("r", crop_image)
        cv2.waitKey(0)