from typing import Tuple

import numpy as np
from ...camera import camera
from ...utils.base import affine


# 该函数的目标是根据一组在相机坐标系下的3D点，推断一个可以“包住”这些点的最小相机内参设置（焦距和主点）
def gen_intrinsics_from_bounding_pts(
        pts_eye: np.ndarray, image_w: int, image_h: int, min_focal: float = 5
) -> Tuple[np.ndarray, np.ndarray]:
    pts_ndc = pts_eye[..., 0:2] / pts_eye[..., 2:]
    img_size = np.array([image_w, image_h], dtype=pts_eye.dtype)
    # Given our convention, we need to shift one pixel before dividing by 2.
    cx_cy = (img_size - 1) / 2
    fx_fy = cx_cy / np.absolute(pts_ndc).max()

    # Some sanity checks
    if np.any(pts_eye[..., 2:] < 0.0001) or np.any(fx_fy < min_focal):
        raise ValueError("Unable to create crop camera", fx_fy)

    return fx_fy, cx_cy


# 输入原始相机 camera_orig、一组三维世界坐标点 pts_world，生成一个位置相同但朝向、内参更新后的新相机，使得这组点完整投影到图像中
def gen_crop_parameters_from_points(
        camera_orig: camera.CameraModel,
        pts_world,
        new_image_size: Tuple[int, int],
        mirror_img_x: bool,
        camera_angle: float = 0,
        focal_multiplier: float = 1.0,
        augment=False
) -> camera.PinholePlaneCameraModel:
    """
    Given the original camera transform and a list of 3D points in the world space,
    compute the new perspective camera that makes sure after projection all the points
    can be projected inside the image.

    Auguments:
    * camera_orig: the original camera used for generating an image. The returned camera
        will have the same position but different rotation and intrinsics parameters.
    * pts_world: points in the world space that must be projected inside the image by
        the generated world to eye transform and intrinsics.
    * new_image_size: target image size
    * mirror_img_x: whether to flip the image. A typical use case is we usually mirror the
        right hand images so that a model need to handle left hand data only
    * camera_angle: how the camera is oriented physically so that we can rotate the object of
        interest to the 'upright' direction
    * focal_multiplier: when less than 1, we are zooming out a little. The effect on the image
        is some margin will be left at the boundary.
    """
    orig_world_to_eye_xf = np.linalg.inv(camera_orig.camera_to_world_xf)

    crop_center = (pts_world.min(axis=0) + pts_world.max(axis=0)) / 2.0
    new_world_to_eye = affine.make_look_at_matrix(
        orig_world_to_eye_xf, crop_center, camera_angle, augment
    )
    if mirror_img_x:
        mirrorx = np.eye(4, dtype=np.float32)
        mirrorx[0, 0] = -1
        new_world_to_eye = mirrorx @ new_world_to_eye

    fx_fy, cx_cy = gen_intrinsics_from_bounding_pts(
        affine.transform3(new_world_to_eye, pts_world),
        new_image_size[0],
        new_image_size[1],
    )
    if augment:
        focal_multiplier *= np.random.uniform(0.8, 0.95)
    fx_fy = focal_multiplier * fx_fy

    return camera.PinholePlaneCameraModel(
        width=new_image_size[0],
        height=new_image_size[1],
        f=fx_fy,
        c=cx_cy,
        distort_coeffs=[],
        camera_to_world_xf=np.linalg.inv(new_world_to_eye),
    )


def get_intrinsics_from_box(
        new_image_size: Tuple[int, int],
        back_projection: np.ndarray,
        origin_focal: Tuple[float, float],
        scale: Tuple[float, float],
):
    # 计算图像中心 (cx, cy)
    img_size = np.array(new_image_size, dtype=np.float32)
    cx_cy = (img_size - 1) / 2

    # 解包原始焦距
    fx_orig, fy_orig = origin_focal

    # 解构反向投影点 p = [px, py, pz]
    px, py, pz = back_projection

    # 计算 ||p||^2
    p_norm_sq = px ** 2 + py ** 2 + pz ** 2

    # 计算分母项
    denom_x = np.sqrt(px ** 2 + 1)
    denom_y = np.sqrt(py ** 2 + 1)

    # 计算虚拟焦距 (Option C)
    hx_virt = fx_orig * p_norm_sq / denom_x
    hy_virt = fy_orig * p_norm_sq / denom_y

    fx_virt = hx_virt / scale[0] * (new_image_size[0] / 640)
    fy_virt = hy_virt / scale[1] * (new_image_size[1] / 480)
    fx_virt = max(fx_virt, fy_virt)
    fy_virt = fx_virt

    fx_fy = np.array([fx_virt, fy_virt], dtype=np.float32)
    # fx_fy = np.array([fx_orig*0.5, fy_orig*0.5], dtype=np.float32)

    return fx_fy, cx_cy


# def get_crop_camera_from_box(
#         camera_orig: camera.CameraModel,
#         centers: Tuple[float, float],
#         scales: Tuple[float, float],
#         new_image_size: Tuple[int, int],
#         mirror_img_x: bool = False,
#         camera_angle: float = 0,
#         focal_multiplier: float = 0.95,
# ) -> camera.PinholePlaneCameraModel:
#     # 获取原始相机的世界到相机坐标系的变换矩阵
#     orig_world_to_eye_xf = np.linalg.inv(camera_orig.camera_to_world_xf)
#
#     # 获取原始图像尺寸
#     H, W = camera_orig.height, camera_orig.width
#
#     # 将归一化的中心点转换为像素坐标
#     center_x, center_y = centers
#     center_x *= W
#     center_y *= H
#     centers = np.array([center_x, center_y], dtype=np.float32)
#
#     # 获取原始相机内参
#     fx, fy = camera_orig.f
#     cx, cy = camera_orig.c
#     x = (centers[0] - cx) / fx
#     y = (centers[1] - cy) / fy
#     crop_center = np.array([x, y, 1.0], dtype=np.float32)
#
#     # 计算缩放后的边界框尺寸
#     scale_x, scale_y = scales
#     half_w = scale_x * W / 2
#     half_h = scale_y * H / 2
#
#     # 计算边界框的四个角点（像素坐标）
#     top_left = [center_x - half_w, center_y - half_h]
#     top_right = [center_x + half_w, center_y - half_h]
#     bottom_left = [center_x - half_w, center_y + half_h]
#     bottom_right = [center_x + half_w, center_y + half_h]
#
#     # 将角点转换为数组
#     box_points_pixel = np.array([top_left, top_right, bottom_left, bottom_right, centers], dtype=np.float32)
#
#     # 反投影到原始相机坐标系（z=1平面）
#     x = (box_points_pixel[:, 0] - cx) / fx
#     y = (box_points_pixel[:, 1] - cy) / fy
#     z = np.ones_like(x)
#     box_points_orig_cam = np.stack([x, y, z], axis=-1)
#
#     # 创建新的世界到相机坐标系的变换矩阵
#
#     new_world_to_eye = affine.make_look_at_from_camera_space(
#         orig_world_to_eye_xf, crop_center, camera_angle
#     )
#
#     if mirror_img_x:
#         mirrorx = np.eye(4, dtype=np.float32)
#         mirrorx[0, 0] = -1
#         new_world_to_eye = mirrorx @ new_world_to_eye
#
#     # 计算从原始相机坐标系到新相机坐标系的变换矩阵
#     T =  np.linalg.inv(orig_world_to_eye_xf) @ new_world_to_eye
#     R = T[:3, :3]  # 旋转部分
#     # 将点变换到新相机坐标系
#     box_points_new_cam = box_points_orig_cam @ R
#
#
#     # 使用边界框的点生成新的内参
#     fx_fy, cx_cy = gen_intrinsics_from_bounding_pts(
#         box_points_new_cam,
#         new_image_size[0],
#         new_image_size[1]
#     )
#     print(fx_fy)
#
#     # 应用焦距缩放因子
#     fx_fy = focal_multiplier * fx_fy
#
#     # 创建并返回新的相机模型
#     return camera.PinholePlaneCameraModel(
#         width=new_image_size[0],
#         height=new_image_size[1],
#         f=fx_fy,
#         c=cx_cy,
#         distort_coeffs=[],
#         camera_to_world_xf=np.linalg.inv(new_world_to_eye)
#     )

def get_crop_camera_from_box(
        camera_orig: camera.CameraModel,
        centers: Tuple[float, float],
        scales: Tuple[float, float],
        new_image_size: Tuple[int, int],
        mirror_img_x: bool = False,
        camera_angle: float = 0,
        focal_multiplier: float = 0.7,
) -> camera.PinholePlaneCameraModel:
    orig_world_to_eye_xf = np.linalg.pinv(camera_orig.camera_to_world_xf)
    H, W = camera_orig.height, camera_orig.width
    center_x, center_y = centers
    center_x *= W
    center_y *= H
    centers = np.array([center_x, center_y], dtype=np.float32)
    # 反投影到相机坐标系（归一化方向）
    fx, fy = camera_orig.f
    cx, cy = camera_orig.c
    x = (centers[0] - cx) / fx
    y = (centers[1] - cy) / fy
    crop_center = np.array([x, y, 1.0], dtype=np.float32)

    # crop_center = camera_orig.window_to_eye(centers)

    new_world_to_eye = affine.make_look_at_from_camera_space(
        orig_world_to_eye_xf, crop_center, camera_angle
    )
    if mirror_img_x:
        mirrorx = np.eye(4, dtype=np.float32)
        mirrorx[0, 0] = -1
        new_world_to_eye = mirrorx @ new_world_to_eye

    fx_fy, cx_cy = get_intrinsics_from_box(
        new_image_size,
        crop_center,
        camera_orig.f,
        scales
    )

    fx_fy = focal_multiplier * fx_fy

    return camera.PinholePlaneCameraModel(
        width=new_image_size[0],
        height=new_image_size[1],
        f=fx_fy,
        c=cx_cy,

        distort_coeffs=[],
        camera_to_world_xf=np.linalg.pinv(new_world_to_eye),
        # camera_to_world_xf=np.linalg.inv(orig_world_to_eye_xf),
    )
