import cv2
import numpy as np
import torch

from typing import Tuple
from hand.track.umetrack.camera.camera_utils import _warp_image
from hand.track.umetrack.pipelines.crop.crop import get_crop_camera_from_box
from hand.track.umetrack.pipelines.crop.perspective_crop import gen_reference_camera_idx
from hand.track.umetrack.pipelines.track.tracker import MM_TO_M
from hand.detect.datasets.detect_dataset import HandDetectionDataset
# from .warp import perspective_crop_image_cv, perspective_crop_image
from .base import Detector
from .warp import perspective_crop_image_cv

MIN_LANDMARKS = 3


class DetNetDetector(Detector):
    def __init__(self, image_pose_stream, model):
        super().__init__(None)
        self.image_pose_stream = image_pose_stream
        self.model = model

    def process(self, image):
        pass

    def gen_crop_cameras(self, input_frame, new_image_size: Tuple[int, int], confidence_threshold: float = 0.5):
        batch_images = []
        V = len(input_frame.views)
        for view in input_frame.views:
            image = view.image
            image = image.astype('float32') / 255.0
            image = torch.tensor(image).unsqueeze(0).float()
            batch_images.append(image)
        batch_images = torch.stack(batch_images)
        input_tensor = batch_images * 2.0 - 1.0
        # input_tensor = xxx
        # input_tensor = batch_images
        with torch.no_grad():
            center_pred, scale_pred, conf_pred = self.model(input_tensor)
        # print(center_pred)
        # scale_pred = torch.clamp(scale_pred * 1.5, max=1.0)
        crop_cameras = {}
        for hand_id in range(2):
            crop_cameras_v = {}
            for v in range(V):
                view = input_frame.views[v]
                centers = center_pred[v][hand_id]
                scales = scale_pred[v][hand_id]
                confs = conf_pred[v][hand_id]
                if confs[0][0] <= confidence_threshold:
                    continue
                # img = batch_images[v]
                # targets = {
                #     "centers":center_pred[v],
                #     "scales":scale_pred[v],
                #     "confidences":conf_pred[v],
                # }
                #
                # img = HandDetectionDataset.visualize_sample(img, targets)
                # cv2.imshow("test",img)
                # cv2.waitKey(0)
                crop_camera = get_crop_camera_from_box(
                    view.camera,
                    centers.clone(),
                    scales.clone(),
                    new_image_size,
                    camera_angle=view.camera_angle,
                    focal_multiplier=0.8
                )
                crop_cameras_v[v] = crop_camera
            if len(crop_cameras_v):
                crop_cameras[hand_id] = crop_cameras_v
        return crop_cameras

    def detect(self, input_frame, gt_tracking, new_image_size: Tuple[int, int], confidence_threshold: float = 0.5,
               skeleton=True):
        gt_hand_model = self.image_pose_stream._hand_pose_labels.hand_model
        V = len(input_frame.views)
        crop_cameras = self.gen_crop_cameras(
            input_frame,
            new_image_size,
            confidence_threshold
        )

        hand_samples = {0: None, 1: None}
        for hand_idx, crop_camera_info in crop_cameras.items():
            if gt_tracking.get(hand_idx, None) is None:
                continue
            # Initialize per-view buffers
            left_images_filled = [np.zeros((96, 96), dtype=np.float32) for _ in range(V)]
            intrinsics_filled = [np.eye(3, dtype=np.float32) for _ in range(V)]
            view_masks = [0] * V  # binary mask per view (0 = padded, 1 = valid)
            extrinsics_xf = [np.eye(4, dtype=np.float32) for _ in range(V)]
            # Pack ground-truth pose
            joint_angles = gt_tracking[hand_idx].joint_angles.astype(np.float32)
            wrist_xform = gt_tracking[hand_idx].wrist_xform.astype(np.float32)

            # Process and save crops
            for cam_idx, crop_camera in crop_camera_info.items():
                view_data = input_frame.views[cam_idx]
                crop_image = perspective_crop_image_cv(
                    view_data.image.astype(np.float32),
                    view_data.camera,
                    crop_camera,
                )
                # crop_image = _warp_image(view_data.camera, crop_camera, view_data.image)
                # print(crop_image.shape)
                # cv2.imshow("test1", crop_image)
                # cv2.waitKey(0)

                # Normalize and store image as single channel
                left_images_filled[cam_idx] = crop_image.astype(np.float32) / 255.0
                intrinsics_filled[cam_idx] = crop_camera.uv_to_window_matrix().astype(np.float32)

                world_to_eye = np.linalg.inv(crop_camera.camera_to_world_xf).copy()
                world_to_eye[:3, 3] *= MM_TO_M
                extrinsics_xf[cam_idx] = world_to_eye.astype(np.float32)

                view_masks[cam_idx] = 1

            reference_camera_idx = gen_reference_camera_idx(
                [view.camera for view in input_frame.views],
                gt_hand_model,
                gt_tracking[hand_idx],
                hand_idx,
                min_required_vis_landmarks=MIN_LANDMARKS,
            )

            # Convert to torch tensors
            images_tensor = torch.stack([
                torch.from_numpy(img).unsqueeze(0) for img in left_images_filled
            ])  # (V, 1, 96, 96)
            intrinsics_tensor = torch.stack([
                torch.from_numpy(K) for K in intrinsics_filled
            ])  # (V, 3, 3)
            extrinsics_tensor = torch.stack([
                torch.from_numpy(xf) for xf in extrinsics_xf
            ])  # (V, 4, 4)
            masks_tensor = torch.tensor(view_masks, dtype=torch.uint8)  # (V,)
            joints_tensor = torch.from_numpy(joint_angles)  # (J,)
            wrist_tensor = torch.from_numpy(wrist_xform)  # (4, 4)
            hand_idx_tensor = torch.tensor(hand_idx, dtype=torch.int64)

            # Build sample dict and save
            sample = {
                'images': images_tensor.unsqueeze(0),
                'intrinsics': intrinsics_tensor.unsqueeze(0),
                'extrinsics': extrinsics_tensor.unsqueeze(0),
                'view_mask': masks_tensor.unsqueeze(0),
                'joints': joints_tensor.unsqueeze(0),
                'wrist': wrist_tensor.unsqueeze(0),
                'hand_idx': hand_idx_tensor.unsqueeze(0)
            }
            if skeleton:
                sample['joint_rest_positions'] = gt_hand_model.joint_rest_positions.unsqueeze(0)
                sample['joint_rotation_axes'] = gt_hand_model.joint_rotation_axes.unsqueeze(0)
                world_to_ref = extrinsics_xf[reference_camera_idx]
                world_to_ref = torch.from_numpy(world_to_ref)
                sample['world_to_ref'] = world_to_ref.unsqueeze(0)
                reference_camera_idx = torch.tensor(reference_camera_idx, dtype=torch.int64)
                sample['reference_camera_idx'] = reference_camera_idx.unsqueeze(0)
            hand_samples[hand_idx] = sample
        return hand_samples
