from typing import Tuple

import cv2
import numpy as np
import torch

from hand.track.umetrack.camera.camera_utils import _warp_image
from hand.track.umetrack.pipelines.crop.crop import get_crop_camera_from_box

from hand.track.umetrack.pipelines.crop.yvr import gen_crop_cameras_yvr, gen_reference_camera_idx
from hand.track.umetrack.pipelines.track.tracker import MM_TO_M
from .base import Detector
from .warp import perspective_crop_image_cv

MIN_LANDMARKS = 3
RESIZE_SIZE = 96
NUM_VIEWS = 6
HAND_CONFIDENCE_THRESHOLD = 0.5
MIN_NUM_CROPS = 1
HAND_RATIO_IN_CROP = 0.8
JOINT_NUMS = 22


class YVRGTDetector(Detector):
    def __init__(self, loader):
        super().__init__(None)
        self.loader = loader
        self.history = []

    def process(self, image):
        pass

    def detect(self, input_frame, gt_tracking):

        stream = self.loader._image_stream
        V = len(input_frame.views)
        crop_cameras = gen_crop_cameras_yvr(
            [view.camera for view in input_frame.views],
            stream.camera_angles,
            gt_tracking,
            min_num_crops=MIN_NUM_CROPS,
            min_required_vis_landmarks=MIN_LANDMARKS,
            new_image_size=(RESIZE_SIZE, RESIZE_SIZE),
            max_view_num=NUM_VIEWS,
            hand_ratio_in_crop=HAND_RATIO_IN_CROP,
            augment=False,
            mirror_right_hand=True
        )
        hand_samples = {0: None, 1: None}
        for hand_idx, crop_camera_info in crop_cameras.items():

            reference_camera_idx = gen_reference_camera_idx(
                [view.camera for view in input_frame.views],
                gt_tracking[hand_idx],
                min_required_vis_landmarks=MIN_LANDMARKS,
            )

            # 初始化缓冲区
            left_images_filled = [np.zeros((RESIZE_SIZE, RESIZE_SIZE), dtype=np.float32) for _ in range(V)]
            intrinsics_filled = [np.eye(3, dtype=np.float32) for _ in range(V)]
            view_masks = [0] * V
            extrinsics_xf = [np.eye(4, dtype=np.float32) for _ in range(V)]

            # 获取真实值
            landmarks = gt_tracking[hand_idx]

            for cam_idx, crop_camera in crop_camera_info.items():
                view_data = input_frame.views[cam_idx]
                crop_image = _warp_image(view_data.camera, crop_camera, view_data.image)

                # order = [4, 8, 12, 16, 20, 0, 2, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15, 17, 18, 19, 1]
                # landmarks = landmarks[order,:]
                # landmarks_camera = crop_camera.world_to_eye(landmarks)
                # landmarks_window = crop_camera.eye_to_window(landmarks_camera)
                #
                # for i, l in enumerate(landmarks_window):
                #     # 绘制关键点
                #     cv2.circle(crop_image, (int(l[0]), int(l[1])), 2, (255, 0, 0), -1)
                #
                #     # 标注数字
                #     cv2.putText(crop_image, str(i), (int(l[0]), int(l[1])),
                #                 cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 255, 255), 1)
                # cv2.imshow("test", crop_image)
                # cv2.waitKey(0)

                # Normalize and store image as single channel
                left_images_filled[cam_idx] = crop_image.astype(np.float32) / 255.0
                intrinsics_filled[cam_idx] = crop_camera.uv_to_window_matrix().astype(np.float32)

                eye_to_world = crop_camera.camera_to_world_xf.copy()
                eye_to_world[:3, 3] *= MM_TO_M
                extrinsics_xf[cam_idx] = eye_to_world.astype(np.float32)

                view_masks[cam_idx] = 1

            # 转换为张量（使用更紧凑的数据类型）

            sample = {
                'images': torch.tensor(np.array(left_images_filled)).unsqueeze(1),
                'intrinsics': torch.tensor(np.array(intrinsics_filled)),
                'extrinsics': torch.tensor(np.array(extrinsics_xf)),
                'view_mask': torch.tensor(np.array(view_masks), dtype=torch.uint8),
                'landmarks': torch.tensor(np.array(landmarks), dtype=torch.float32),
                'hand_idx': torch.tensor(hand_idx, dtype=torch.uint8),
                'reference_camera_idx': torch.tensor(reference_camera_idx, dtype=torch.uint8),
            }

            hand_samples[hand_idx] = sample

        return hand_samples


class DetNetDetector(Detector):
    def __init__(self, loader, model):
        super().__init__(None)
        self.loader = loader
        self.model = model

    def process(self, image):
        pass

    def gen_crop_cameras(self, input_frame, new_image_size: Tuple[int, int], confidence_threshold: float = 0.5):
        batch_images = []
        V = len(input_frame.views)
        for view in input_frame.views:
            image = view.image
            image = image.astype('float32') / 255.0
            image = torch.tensor(image).unsqueeze(0).float()
            batch_images.append(image)
        batch_images = torch.stack(batch_images)
        input_tensor = batch_images * 2.0 - 1.0
        # input_tensor = xxx
        # input_tensor = batch_images
        with torch.no_grad():
            center_pred, scale_pred, conf_pred = self.model(input_tensor)
        # print(center_pred)
        # scale_pred = torch.clamp(scale_pred * 1.5, max=1.0)
        crop_cameras = {}
        best_view_id = [0, 0]
        max_confidences = [0, 0]
        for hand_id in range(2):
            crop_cameras_v = {}
            for v in range(V):
                view = input_frame.views[v]
                centers = center_pred[v][hand_id]
                scales = scale_pred[v][hand_id]
                confs = conf_pred[v][hand_id]
                if confs[0][0] <= confidence_threshold:
                    continue

                if confs[0][0] > max_confidences[hand_id]:
                    max_confidences[hand_id] = confs[0][0]
                    best_view_id[hand_id] = v

                crop_camera = get_crop_camera_from_box(
                    view.camera,
                    centers.clone(),
                    scales.clone(),
                    new_image_size,
                    camera_angle=view.camera_angle,
                    focal_multiplier=0.8,
                    mirror_img_x=True
                )
                crop_cameras_v[v] = crop_camera
            if len(crop_cameras_v):
                crop_cameras[hand_id] = crop_cameras_v
        return crop_cameras, best_view_id

    def detect(self, input_frame, new_image_size: Tuple[int, int], confidence_threshold: float = 0.5,
               ):
        # gt_hand_model = self.image_pose_stream._hand_pose_labels.hand_model
        V = len(input_frame.views)
        crop_cameras, reference_camera_idxs = self.gen_crop_cameras(
            input_frame,
            new_image_size,
            confidence_threshold
        )

        hand_samples = {0: None, 1: None}
        for hand_idx, crop_camera_info in crop_cameras.items():
            # Initialize per-view buffers
            left_images_filled = [np.zeros((96, 96), dtype=np.float32) for _ in range(V)]
            intrinsics_filled = [np.eye(3, dtype=np.float32) for _ in range(V)]
            view_masks = [0] * V  # binary mask per view (0 = padded, 1 = valid)
            extrinsics_xf = [np.eye(4, dtype=np.float32) for _ in range(V)]
            # Pack ground-truth pose

            # Process and save crops
            for cam_idx, crop_camera in crop_camera_info.items():
                view_data = input_frame.views[cam_idx]
                crop_image = perspective_crop_image_cv(
                    view_data.image.astype(np.float32),
                    view_data.camera,
                    crop_camera,
                )
                # crop_image = _warp_image(view_data.camera, crop_camera, view_data.image)
                # print(crop_image.shape)
                # cv2.imshow("test1", crop_image)
                # cv2.waitKey(0)

                # Normalize and store image as single channel
                left_images_filled[cam_idx] = crop_image.astype(np.float32) / 255.0
                intrinsics_filled[cam_idx] = crop_camera.uv_to_window_matrix().astype(np.float32)

                world_to_eye = np.linalg.inv(crop_camera.camera_to_world_xf).copy()
                world_to_eye[:3, 3] *= MM_TO_M
                extrinsics_xf[cam_idx] = world_to_eye.astype(np.float32)

                view_masks[cam_idx] = 1

            # Convert to torch tensors
            images_tensor = torch.stack([
                torch.from_numpy(img).unsqueeze(0) for img in left_images_filled
            ])  # (V, 1, 96, 96)
            intrinsics_tensor = torch.stack([
                torch.from_numpy(K) for K in intrinsics_filled
            ])  # (V, 3, 3)
            extrinsics_tensor = torch.stack([
                torch.from_numpy(xf) for xf in extrinsics_xf
            ])  # (V, 4, 4)
            masks_tensor = torch.tensor(view_masks, dtype=torch.uint8)  # (V,)
            hand_idx_tensor = torch.tensor(hand_idx, dtype=torch.int64)

            reference_camera_idx = torch.tensor(reference_camera_idxs[hand_idx], dtype=torch.int64)

            # Build sample dict and save
            sample = {
                'images': images_tensor,
                'intrinsics': intrinsics_tensor,
                'extrinsics': extrinsics_tensor,
                'view_mask': masks_tensor,
                'hand_idx': hand_idx_tensor,
                'reference_camera_idx': reference_camera_idx
            }
            hand_samples[hand_idx] = sample
        return hand_samples

    def detect_from_history(self, input_frame, gt_tracking):

        stream = self.loader._image_stream
        V = len(input_frame.views)
        crop_cameras = gen_crop_cameras_yvr(
            [view.camera for view in input_frame.views],
            stream.camera_angles,
            gt_tracking,
            min_num_crops=MIN_NUM_CROPS,
            min_required_vis_landmarks=MIN_LANDMARKS,
            new_image_size=(RESIZE_SIZE, RESIZE_SIZE),
            max_view_num=NUM_VIEWS,
            hand_ratio_in_crop=HAND_RATIO_IN_CROP,
            augment=False,
            mirror_right_hand=True
        )
        hand_samples = {0: None, 1: None}
        for hand_idx, crop_camera_info in crop_cameras.items():

            reference_camera_idx = gen_reference_camera_idx(
                [view.camera for view in input_frame.views],
                gt_tracking[hand_idx],
                min_required_vis_landmarks=MIN_LANDMARKS,
            )

            # 初始化缓冲区
            left_images_filled = [np.zeros((RESIZE_SIZE, RESIZE_SIZE), dtype=np.float32) for _ in range(V)]
            intrinsics_filled = [np.eye(3, dtype=np.float32) for _ in range(V)]
            view_masks = [0] * V
            extrinsics_xf = [np.eye(4, dtype=np.float32) for _ in range(V)]

            # 获取真实值
            landmarks = gt_tracking[hand_idx]

            for cam_idx, crop_camera in crop_camera_info.items():
                view_data = input_frame.views[cam_idx]
                crop_image = _warp_image(view_data.camera, crop_camera, view_data.image)

                # order = [4, 8, 12, 16, 20, 0, 2, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15, 17, 18, 19, 1]
                # landmarks = landmarks[order,:]
                # landmarks_camera = crop_camera.world_to_eye(landmarks)
                # landmarks_window = crop_camera.eye_to_window(landmarks_camera)
                #
                # for i, l in enumerate(landmarks_window):
                #     # 绘制关键点
                #     cv2.circle(crop_image, (int(l[0]), int(l[1])), 2, (255, 0, 0), -1)
                #
                #     # 标注数字
                #     cv2.putText(crop_image, str(i), (int(l[0]), int(l[1])),
                #                 cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 255, 255), 1)
                # cv2.imshow("test", crop_image)
                # cv2.waitKey(0)

                # Normalize and store image as single channel
                left_images_filled[cam_idx] = crop_image.astype(np.float32) / 255.0
                intrinsics_filled[cam_idx] = crop_camera.uv_to_window_matrix().astype(np.float32)

                eye_to_world = crop_camera.camera_to_world_xf.copy()
                eye_to_world[:3, 3] *= MM_TO_M
                extrinsics_xf[cam_idx] = eye_to_world.astype(np.float32)

                view_masks[cam_idx] = 1

            # 转换为张量（使用更紧凑的数据类型）

            sample = {
                'images': torch.tensor(np.array(left_images_filled)).unsqueeze(1),
                'intrinsics': torch.tensor(np.array(intrinsics_filled)),
                'extrinsics': torch.tensor(np.array(extrinsics_xf)),
                'view_mask': torch.tensor(np.array(view_masks), dtype=torch.uint8),
                'landmarks': torch.tensor(np.array(landmarks), dtype=torch.float32),
                'hand_idx': torch.tensor(hand_idx, dtype=torch.uint8),
                'reference_camera_idx': torch.tensor(reference_camera_idx, dtype=torch.uint8),
            }

            hand_samples[hand_idx] = sample

        return hand_samples