import numpy as np
import torch

from typing import Dict, List, Optional, Tuple
from .crop import gen_crop_parameters_from_points
from ...camera import camera
from ...hand.hand import HandModel, NUM_JOINTS_PER_HAND
from ...hand.handpose import SingleHandPose
from ...hand.hand_pose_utils import landmarks_from_hand_pose


# 利用上下限插值，计算手部模型的“中立关节角度”
def neutral_joint_angles(up: HandModel, lower_factor: float = 0.5) -> torch.Tensor:
    joint_limits = up.joint_limits
    assert joint_limits is not None
    return joint_limits[..., 0] * lower_factor + joint_limits[..., 1] * (
            1 - lower_factor
    )


def rank_hand_visibility_in_cameras(
        cameras: List[camera.CameraModel],
        hand_model: HandModel,
        hand_pose: SingleHandPose,
        hand_idx: int,
        min_required_vis_landmarks: int,
) -> List[int]:
    landmarks_world = landmarks_from_hand_pose(hand_model, hand_pose, hand_idx)
    n_landmarks_in_view = []
    ranked_cam_indices = []
    for cam_idx, camera in enumerate(cameras):
        landmarks_eye = camera.world_to_eye(landmarks_world)
        landmarks_win2 = camera.eye_to_window(landmarks_eye)

        n_visible = (
                (landmarks_win2[..., 0] >= 0)
                & (landmarks_win2[..., 0] <= camera.width - 1)
                & (landmarks_win2[..., 1] >= 0)
                & (landmarks_win2[..., 1] <= camera.height - 1)
                & (landmarks_eye[..., 2] > 0)
        ).sum()

        n_landmarks_in_view.append(n_visible)
        # Only push the cameras that can see enough hand points
        if n_visible >= min_required_vis_landmarks:
            ranked_cam_indices.append(cam_idx)

    #  Favor the view that sees more landmarks
    ranked_cam_indices.sort(
        reverse=True,
        key=lambda x: n_landmarks_in_view[x],
    )
    return ranked_cam_indices


def _get_crop_points_from_hand_pose(
        hand_model: HandModel,
        gt_hand_pose: SingleHandPose,
        hand_idx: int,
        num_crop_points: int,
) -> np.ndarray:
    assert num_crop_points in [21, 42, 63]
    neutral_hand_pose = SingleHandPose(
        joint_angles=neutral_joint_angles(hand_model).numpy(),
        wrist_xform=gt_hand_pose.wrist_xform,
    )
    open_hand_pose = SingleHandPose(
        joint_angles=np.zeros(NUM_JOINTS_PER_HAND, dtype=np.float32),
        wrist_xform=gt_hand_pose.wrist_xform,
    )

    crop_points = [landmarks_from_hand_pose(hand_model, gt_hand_pose, hand_idx)]
    if num_crop_points > 21:
        crop_points.append(
            landmarks_from_hand_pose(hand_model, neutral_hand_pose, hand_idx)
        )
    if num_crop_points > 42:
        crop_points.append(
            landmarks_from_hand_pose(hand_model, open_hand_pose, hand_idx)
        )
    return np.concatenate(crop_points, axis=0)


def gen_crop_cameras_from_pose(
        cameras: List[camera.CameraModel],
        camera_angles: List[float],
        hand_model: HandModel,
        hand_pose: SingleHandPose,
        hand_idx: int,
        num_crop_points: int,
        new_image_size: Tuple[int, int],
        max_view_num: Optional[int] = None,
        sort_camera_index: bool = False,
        focal_multiplier: float = 0.95,
        mirror_right_hand: bool = True,
        min_required_vis_landmarks: int = 19,
        augment=False
) -> Dict[int, camera.PinholePlaneCameraModel]:
    crop_cameras: Dict[int, camera.PinholePlaneCameraModel] = {}
    crop_points = _get_crop_points_from_hand_pose(
        hand_model,
        hand_pose,
        hand_idx,
        num_crop_points,
    )
    cam_indices = rank_hand_visibility_in_cameras(
        cameras=cameras,
        hand_model=hand_model,
        hand_pose=hand_pose,
        hand_idx=hand_idx,
        min_required_vis_landmarks=min_required_vis_landmarks,
    )

    # 默认使用可见性排序(最佳视角优先), 开启后按相机ID排序(保证结果确定性)
    if sort_camera_index:
        cam_indices = sorted(cam_indices)

    for cam_idx in cam_indices:
        crop_cameras[cam_idx] = gen_crop_parameters_from_points(
            cameras[cam_idx],
            crop_points,
            new_image_size,
            mirror_img_x=(mirror_right_hand and hand_idx == 1),
            camera_angle=camera_angles[cam_idx],
            focal_multiplier=focal_multiplier,
            augment=augment
        )
        if len(crop_cameras) == max_view_num:
            break

    return crop_cameras


def gen_crop_cameras(
        cameras: List[camera.CameraModel],
        camera_angles: List[float],
        hand_model: HandModel,
        gt_tracking: Dict[int, SingleHandPose],
        min_num_crops: int,
        hand_confidence_threshold: float = 0.5,
        num_crop_points: int = 63,
        hand_ratio_in_crop: float = 0.95,
        min_required_vis_landmarks: int = 5,
        new_image_size: Tuple[int, int] = (96, 96),
        max_view_num: int = 4,
        sort_camera_index: bool = True,
        mirror_right_hand: bool = False,
        augment=False
) -> Dict[int, Dict[int, camera.PinholePlaneCameraModel]]:
    crop_cameras: Dict[int, Dict[int, camera.PinholePlaneCameraModel]] = {}
    if not gt_tracking:
        return crop_cameras

    for hand_idx, gt_hand_pose in gt_tracking.items():
        if gt_hand_pose.hand_confidence < hand_confidence_threshold:
            continue

        crop_cameras[hand_idx] = gen_crop_cameras_from_pose(
            cameras,
            camera_angles,
            hand_model,
            gt_hand_pose,
            hand_idx,
            num_crop_points,
            new_image_size,
            max_view_num=max_view_num,
            sort_camera_index=sort_camera_index,
            focal_multiplier=hand_ratio_in_crop,
            mirror_right_hand=mirror_right_hand,
            min_required_vis_landmarks=min_required_vis_landmarks,
            augment=augment
        )

    # Remove empty crop_cameras
    del_list = []
    for hand_idx, per_hand_crop_cameras in crop_cameras.items():
        if not per_hand_crop_cameras or len(per_hand_crop_cameras) < min_num_crops:
            del_list.append(hand_idx)
    for hand_idx in del_list:
        del crop_cameras[hand_idx]

    return crop_cameras


def gen_reference_camera_idx(
        cameras: List[camera.CameraModel],
        hand_model: HandModel,
        hand_pose: SingleHandPose,
        hand_idx: int,
        min_required_vis_landmarks: int = 19,
) -> int:
    cam_indices = rank_hand_visibility_in_cameras(
        cameras=cameras,
        hand_model=hand_model,
        hand_pose=hand_pose,
        hand_idx=hand_idx,
        min_required_vis_landmarks=min_required_vis_landmarks,
    )
    return cam_indices[0]


