import numpy as np
from rotate import rotate
# --- Original Camera Parameters and Image Dimensions ---
# deprecated, not working, verify_rotate_compare.py works
# Original image dimensions (Height x Width)
original_H, original_W = 640, 480
image = np.random.rand(640, 480, 3)
depth = np.random.rand(640, 480)
mask = np.random.randint(0, 2, (640, 480))
# Original Camera Intrinsics Matrix (K)
original_K = np.array([[436.11008, 0., 240.91835],
                       [0., 436.11008, 322.66653],
                       [0., 0., 1.]])

# Original Camera Extrinsics Matrix (Pose) [R | t] - World to Camera
original_pose = np.array([[ 9.8153263e-01, -7.7415816e-02,  1.7492986e-01, -6.1428569e-02],
                          [ 7.7405863e-02,  9.9697584e-01,  6.8903100e-03, -4.0515000e-04],
                          [-1.7493425e-01,  6.7775301e-03,  9.98455679e-01,  1.0289070e-02], # Using corrected value
                          [ 0.0000000e+00, -0.0000000e+00, -0.0000000e+00,  1.0000000e+00]])


# --- Choose a Trivial 3D World Point ---
# Let's pick a point at (1, 2, 5) in world coordinates
world_point = np.array([2, 1, 3, 1.0]) # Homogeneous coordinates

image_rot, depth_rot, intrinsics_new, new_pose, mask_rot = rotate(image, depth, original_K, original_pose, mask, k=1)

print("Original 3D World Point:\n", world_point[:3])

def project_to_2d(world_point, original_K, original_pose):
    """
    Projects a 3D point in world coordinates to 2D image coordinates.

    Args:
        point_3d_world: A numpy array of shape (4,) representing the 3D point
                        in homogeneous world coordinates.
        intrinsics_matrix: A numpy array of shape (3, 3) representing the
                           camera intrinsic matrix.
        pose_matrix: A numpy array of shape (4, 4) representing the camera
                     pose (world to camera transformation).
    Returns:
        A numpy array of shape (2,) representing the 2D pixel coordinates (u, v).
        Returns None if the point is behind the camera.
    """
    # Transform point from world to camera coordinates
    # The pose matrix is world to camera, so multiply by the pose matrix
    # The convention used here is pose transforms from world to camera.
    # If your pose is camera to world, you need to invert it here.
    point_in_orig_camera_frame = original_pose @ world_point

    print("\nPoint in Original Camera Frame:\n", point_in_orig_camera_frame[:3])


    # Project point to original pixel coordinates
    # K @ X_c = s * [u, v, 1]^T
    s_orig = point_in_orig_camera_frame[2] # The z-coordinate in camera frame is the depth
    uv_orig_homogeneous = original_K @ point_in_orig_camera_frame[:3]
    u_orig, v_orig = uv_orig_homogeneous[:2] / s_orig

    print("\nProjected Original Pixel Coordinates (u, v):", (u_orig, v_orig))
    print("Original Depth (s):", s_orig)

    return u_orig, v_orig, s_orig

u_orig, v_orig, depth_orig = project_to_2d(world_point, original_K, original_pose)
# --- Step 3: Rotate the original pixel coordinates (90 deg CCW) ---

# The transformation for a 90-degree CCW rotation of pixel (u, v) in HxW image
# to (u', v') in WxH image is approximately (v, W - u) relative to the top-left origin
# For coordinates, it's new_u = original_v, new_v = original_W - original_u
# We are using the coordinate values directly here.

u_rot = v_orig
v_rot = original_W - u_orig # Using original width

print("\nRotated Pixel Coordinates (u', v'):", (u_rot, v_rot))
print("\nAdjusted Camera Intrinsics (K'):\n", intrinsics_new)
print("\nAdjusted Camera Pose (Pose'):\n", new_pose)

# --- Step 5 & 6: Unproject the rotated pixel using adjusted parameters ---
def unproject_to_3d(point_2d, depth_in_camera, intrinsics_matrix, pose_matrix):
    """
    Unprojects a 2D point with a given depth to a 3D point in world coordinates.

    Args:
        point_2d: A numpy array of shape (2,) representing the 2D pixel coordinates (u, v).
        depth_in_camera: The depth (Z-coordinate) of the point in the camera's
                         coordinate system.
        intrinsics_matrix: A numpy array of shape (3, 3) representing the
                           camera intrinsic matrix.
        pose_matrix: A numpy array of shape (4, 4) representing the camera
                     pose (world to camera transformation).
    Returns:
        A numpy array of shape (4,) representing the 3D point in homogeneous
        world coordinates.
    """
    # Convert 2D point to homogeneous camera coordinates (before intrinsics)
    # inv(K) @ [u, v, 1] * depth_in_camera
    intrinsics_inv = np.linalg.inv(intrinsics_matrix)
    point_2d_homogeneous = np.array([point_2d[0], point_2d[1], 1.0], dtype=np.float32)
    point_3d_camera = (intrinsics_inv @ point_2d_homogeneous) * depth_in_camera

    # Convert from camera coordinates to world coordinates
    # If pose_matrix is world to camera, its inverse is camera to world
    pose_inv = np.linalg.inv(pose_matrix)
    point_3d_world_homogeneous = pose_inv @ np.append(point_3d_camera, 1.0)

    return point_3d_world_homogeneous[:3]

point_3d_world = unproject_to_3d((u_rot, v_rot), depth_orig, intrinsics_new, new_pose)

print("\nUnprojected 3D World Point:\n", point_3d_world)


# --- Step 7: Verify if the unprojected point is the same as the original world point ---

# Compare the original world_point with the unprojected_world_point
# Use a tolerance for floating-point comparison
tolerance = 1e-2
are_points_same = np.allclose(world_point[:3], point_3d_world[:3], atol=tolerance)

print("\nAre the original and unprojected world points the same (within tolerance)?", are_points_same)

if are_points_same:
    print("The 3D correspondence is maintained after rotation and parameter adjustment.")
else:
    print("There is a discrepancy in the 3D correspondence.")