import numpy as np

from typing import Dict, List, Optional, Tuple
from .crop import gen_crop_parameters_from_points, get_crop_camera_from_box
from ...camera import camera


def gen_crop_cameras_yvr(
        cameras: List[camera.CameraModel],
        camera_angles: List[float],
        gt_tracking,
        min_num_crops: int,
        hand_ratio_in_crop: float = 0.95,
        min_required_vis_landmarks: int = 5,
        new_image_size: Tuple[int, int] = (96, 96),
        max_view_num: int = 4,
        sort_camera_index: bool = True,
        mirror_right_hand: bool = False,
        augment=False
) -> Dict[int, Dict[int, camera.PinholePlaneCameraModel]]:
    crop_cameras: Dict[int, Dict[int, camera.PinholePlaneCameraModel]] = {}
    if not gt_tracking:
        return crop_cameras

    for hand_idx, gt_hand_pose in gt_tracking.items():
        crop_cameras[hand_idx] = gen_crop_cameras_from_landmarks(
            cameras,
            camera_angles,
            gt_tracking[hand_idx],
            hand_idx,
            new_image_size,
            max_view_num=max_view_num,
            sort_camera_index=sort_camera_index,
            focal_multiplier=hand_ratio_in_crop,
            mirror_right_hand=mirror_right_hand,
            min_required_vis_landmarks=min_required_vis_landmarks,
            augment=augment
        )

    # Remove empty crop_cameras
    del_list = []
    for hand_idx, per_hand_crop_cameras in crop_cameras.items():
        if not per_hand_crop_cameras or len(per_hand_crop_cameras) < min_num_crops:
            del_list.append(hand_idx)
    for hand_idx in del_list:
        del crop_cameras[hand_idx]

    return crop_cameras


def gen_crop_cameras_from_landmarks(
        cameras: List[camera.CameraModel],
        camera_angles: List[float],
        landmarks: np.ndarray,
        hand_idx: int,
        new_image_size: Tuple[int, int],
        max_view_num: Optional[int] = None,
        sort_camera_index: bool = False,
        focal_multiplier: float = 0.95,
        mirror_right_hand: bool = True,
        min_required_vis_landmarks: int = 19,
        augment=False
) -> Dict[int, camera.PinholePlaneCameraModel]:
    crop_cameras: Dict[int, camera.PinholePlaneCameraModel] = {}
    crop_points = landmarks
    cam_indices = rank_hand_visibility_in_cameras(
        cameras=cameras,
        landmarks=landmarks,
        min_required_vis_landmarks=min_required_vis_landmarks,
    )

    # 默认使用可见性排序(最佳视角优先), 开启后按相机ID排序(保证结果确定性)
    if sort_camera_index:
        cam_indices = sorted(cam_indices)

    for cam_idx in cam_indices:
        crop_cameras[cam_idx] = gen_crop_parameters_from_points(
            cameras[cam_idx],
            crop_points,
            new_image_size,
            mirror_img_x=(mirror_right_hand and hand_idx == 1),
            camera_angle=camera_angles[cam_idx],
            focal_multiplier=focal_multiplier,
            augment=augment
        )
        if len(crop_cameras) == max_view_num:
            break

    return crop_cameras


def rank_hand_visibility_in_cameras(
        cameras: List[camera.CameraModel],
        landmarks,
        min_required_vis_landmarks: int,
) -> List[int]:
    landmarks_world = landmarks
    n_landmarks_in_view = []
    ranked_cam_indices = []
    for cam_idx, camera in enumerate(cameras):
        landmarks_eye = camera.world_to_eye(landmarks_world)
        landmarks_win2 = camera.eye_to_window(landmarks_eye)

        n_visible = (
                (landmarks_win2[..., 0] >= 0)
                & (landmarks_win2[..., 0] <= camera.width - 1)
                & (landmarks_win2[..., 1] >= 0)
                & (landmarks_win2[..., 1] <= camera.height - 1)
                & (landmarks_eye[..., 2] > 0)
        ).sum()

        n_landmarks_in_view.append(n_visible)
        # Only push the cameras that can see enough hand points
        if n_visible >= min_required_vis_landmarks:
            ranked_cam_indices.append(cam_idx)

    #  Favor the view that sees more landmarks
    ranked_cam_indices.sort(
        reverse=True,
        key=lambda x: n_landmarks_in_view[x],
    )
    return ranked_cam_indices


def gen_reference_camera_idx(
        cameras: List[camera.CameraModel],
        landmarks: np.ndarray,
        min_required_vis_landmarks: int = 19,
) -> int:
    cam_indices = rank_hand_visibility_in_cameras(
        cameras=cameras,
        landmarks=landmarks,
        min_required_vis_landmarks=min_required_vis_landmarks,
    )
    return cam_indices[0]


def gen_crop_cameras_classify(input_frame, gt_tracking, new_image_size: Tuple[int, int],
                              confidence_threshold: float = 0.5):
    V = len(input_frame.views)

    crop_cameras = {}
    best_view_id = [0, 0]
    max_confidences = [0, 0]
    for hand_id in range(2):
        crop_cameras_v = {}
        for v in range(V):
            view = input_frame.views[v]
            centers = gt_tracking[v]["centers"][hand_id]
            centers = centers / np.array([640, 480])
            scales = gt_tracking[v]["scales"][hand_id]
            confs = gt_tracking[v]["confidences"][hand_id]
            if confs[0] <= confidence_threshold:
                continue

            if confs[0] > max_confidences[hand_id]:
                max_confidences[hand_id] = confs[0]
                best_view_id[hand_id] = v

            crop_camera = get_crop_camera_from_box(
                view.camera,
                centers,
                scales,
                new_image_size,
                camera_angle=view.camera_angle,
                focal_multiplier=0.8,
                mirror_img_x=True
            )
            crop_cameras_v[v] = crop_camera
        if len(crop_cameras_v):
            crop_cameras[hand_id] = crop_cameras_v
    return crop_cameras, best_view_id
