import cv2
import numpy as np
import torch

from hand.track.umetrack.camera.camera_utils import _warp_image
from hand.track.umetrack.pipelines.crop.perspective_crop import gen_crop_cameras, gen_reference_camera_idx
from hand.track.umetrack.pipelines.track.tracker import MM_TO_M
from .base import Detector
from .warp import perspective_crop_image_cv

MIN_LANDMARKS = 3


class GtDetector(Detector):
    def __init__(self, image_pose_stream):
        super().__init__(None)
        self.image_pose_stream = image_pose_stream

    def process(self, image):
        pass

    def detect(self, input_frame, gt_tracking, skeleton=False):
        gt_hand_model = self.image_pose_stream._hand_pose_labels.hand_model
        V = len(input_frame.views)
        crop_cameras = gen_crop_cameras(
            [view.camera for view in input_frame.views],
            self.image_pose_stream._hand_pose_labels.camera_angles,
            gt_hand_model,
            gt_tracking,
            min_num_crops=1,
            min_required_vis_landmarks=1,
            mirror_right_hand=True
        )
        hand_samples = {0: None, 1: None}
        for hand_idx, crop_camera_info in crop_cameras.items():
            # Initialize per-view buffers
            left_images_filled = [np.zeros((96, 96), dtype=np.float32) for _ in range(V)]
            intrinsics_filled = [np.eye(3, dtype=np.float32) for _ in range(V)]
            view_masks = [0] * V  # binary mask per view (0 = padded, 1 = valid)
            extrinsics_xf = [np.eye(4, dtype=np.float32) for _ in range(V)]
            # Pack ground-truth pose
            joint_angles = gt_tracking[hand_idx].joint_angles.astype(np.float32)
            wrist_xform = gt_tracking[hand_idx].wrist_xform.astype(np.float32)

            # Process and save crops
            for cam_idx, crop_camera in crop_camera_info.items():
                view_data = input_frame.views[cam_idx]

                # crop_image = perspective_crop_image_cv(view_data.image, view_data.camera, crop_camera)
                crop_image = _warp_image(view_data.camera, crop_camera, view_data.image)
                # cv2.imshow("test",crop_image)
                # cv2.waitKey(0)
                # Normalize and store image as single channel
                left_images_filled[cam_idx] = crop_image.astype(np.float32) / 255.0
                intrinsics_filled[cam_idx] = crop_camera.uv_to_window_matrix().astype(np.float32)

                eye_to_world = crop_camera.camera_to_world_xf.copy()
                eye_to_world[:3, 3] *= MM_TO_M
                extrinsics_xf[cam_idx] = eye_to_world.astype(np.float32)

                view_masks[cam_idx] = 1

            reference_camera_idx = gen_reference_camera_idx(
                [view.camera for view in input_frame.views],
                gt_hand_model,
                gt_tracking[hand_idx],
                hand_idx,
                min_required_vis_landmarks=MIN_LANDMARKS,
            )
            # Convert to torch tensors
            images_tensor = torch.stack([
                torch.from_numpy(img).unsqueeze(0) for img in left_images_filled
            ])  # (V, 1, 96, 96)
            intrinsics_tensor = torch.stack([
                torch.from_numpy(K) for K in intrinsics_filled
            ])  # (V, 3, 3)
            extrinsics_tensor = torch.stack([
                torch.from_numpy(xf) for xf in extrinsics_xf
            ])  # (V, 4, 4)
            masks_tensor = torch.tensor(view_masks, dtype=torch.uint8)  # (V,)
            joints_tensor = torch.from_numpy(joint_angles)  # (J,)
            wrist_tensor = torch.from_numpy(wrist_xform)  # (4, 4)
            hand_idx_tensor = torch.tensor(hand_idx, dtype=torch.int64)

            # Build sample dict and save
            sample = {
                'images': images_tensor,
                'intrinsics': intrinsics_tensor,
                'extrinsics': extrinsics_tensor,
                'view_mask': masks_tensor,
                'joints': joints_tensor,
                'wrist': wrist_tensor,
                'hand_idx': hand_idx_tensor
            }
            if skeleton:
                sample['joint_rest_positions'] = gt_hand_model.joint_rest_positions
                sample['joint_rotation_axes'] = gt_hand_model.joint_rotation_axes
                world_to_ref = extrinsics_xf[reference_camera_idx]
                world_to_ref = torch.from_numpy(world_to_ref)
                sample['world_to_ref'] = world_to_ref
                reference_camera_idx = torch.tensor(reference_camera_idx, dtype=torch.int64)
                sample['reference_camera_idx'] = reference_camera_idx

            hand_samples[hand_idx] = sample
        return hand_samples
