import json
import random

import torch
import numpy as np

from hand.track.umetrack.datasets.video_pose_data import load_hand_model_from_dict
from hand.track.umetrack.hand.hand import scaled_hand_model_batch, hand_model_to_device, batch_hand
from hand.track.umetrack.hand.hand_pose_utils import landmarks_from_batch_hand_pose
from hand.track.umetrack.model_v3 import model_utils
from hand.track.umetrack.model_v3.regressor import _gen_rigid_features
from .base import Tracker


class DYDTracker(Tracker):
    def __init__(self, model, device, generic_hand_model):
        super().__init__(model)
        self.device = device
        self.model.to(self.device)
        self.batch_generic = batch_hand(generic_hand_model, 1)
        self.focal = 200

    def track(
            self,
            sample,
    ):
        images = sample['images'].to(self.device)
        intrinsics = sample['intrinsics'].to(self.device)
        extrinsics = sample['extrinsics'].to(self.device)
        view_mask = sample['view_mask'].to(self.device)
        # print(images.shape)
        V = images.shape[1]  # 当前样本的视图数

        # 展平视图维度 (V, ...) -> (V, ...)
        intrinsics_flat = intrinsics.reshape(-1, 3, 3)
        extrinsics_flat = extrinsics.reshape(-1, 4, 4)

        # 1. 计算内参矩阵的逆
        K_inv = torch.inverse(intrinsics_flat)  # (V, 3, 3)
        # 2. 构建4x4的内参逆矩阵
        K4_inv = torch.eye(4, device=K_inv.device, dtype=K_inv.dtype)
        K4_inv = K4_inv.unsqueeze(0).repeat(V, 1, 1)  # (V, 4, 4)
        K4_inv[:, :3, :3] = K_inv
        # 3. 焦距归一化
        f_orig = intrinsics_flat[:, 0, 0]  # (V,)
        # scale_z = (self.focal / f_orig)
        scale_z = (f_orig / self.focal)
        S_z_inv = torch.eye(4, device=f_orig.device, dtype=f_orig.dtype)
        S_z_inv = S_z_inv.unsqueeze(0).repeat(V, 1, 1)  # (V, 4, 4)
        S_z_inv[:, 2, 2] = scale_z
        # 4. 计算相机到世界坐标系的变换
        T_cam2world = torch.inverse(extrinsics_flat)  # (V, 4, 4)
        # 5. 组合完整变换矩阵 (V, 4, 4)
        total_xfs = T_cam2world @ S_z_inv @ K4_inv
        pred_joints, pred_wrist, skel_scales, pred_hand_logits = self.model(
            images, view_mask, total_xfs
        )

        # Forward
        # pred_joints, pred_wrist, skel_scales, pred_hand_logits = self.model(
        #     images, extrinsics, intrinsics, view_mask
        # )
        pred_hand_idx = torch.argmax(pred_hand_logits, dim=1)
        pred_wrist[..., :3, 3] *= 1000  # scale translation component

        # Landmark computation
        # calibrated = scaled_hand_model_batch(self.batch_generic, skel_scales)
        # pred_landmarks = landmarks_from_batch_hand_pose(
        #     self.batch_generic, pred_joints, pred_wrist, pred_hand_idx
        # )
        pred_joints = pred_joints.squeeze(0)
        pred_wrist = pred_wrist.squeeze(0)
        skel_scales = skel_scales[0].item()
        pred_hand_idx = pred_hand_idx[0].item()
        return pred_joints, pred_wrist, skel_scales, pred_hand_idx


class DYDTrackerV2(Tracker):
    def __init__(self, model, device, generic_hand_model):
        super().__init__(model)
        self.device = device
        self.model.to(self.device)
        self.batch_generic = batch_hand(generic_hand_model, 1)
        self.focal = 200
        self.num_views = 4

    def track(
            self,
            sample,
    ):
        # 读取图像数据并归一化到[0,1]
        images = sample['images'].numpy().astype(np.float32)
        intrinsics = sample['intrinsics'].numpy().astype(np.float32)
        extrinsics = sample['extrinsics'].numpy().astype(np.float32)  # camera_to_world
        view_mask = sample['view_mask'].numpy().astype(np.float32)
        # landmarks = sample['landmarks'].numpy().astype(np.float32)
        hand_idx = sample['hand_idx'].numpy().astype(np.int64)
        reference_camera_idx = sample['reference_camera_idx'].numpy().astype(np.int64)

        # 确保参考视角包含在选择的视角中
        valid_views = np.where(view_mask == 1)[0]
        if len(valid_views) < self.num_views:
            # 如果有效视角不足，使用所有有效视角并填充无效视角
            selected_views = list(valid_views)
            # 添加无效视角直到达到所需数量
            invalid_views = np.where(view_mask == 0)[0]
            while len(selected_views) < self.num_views and len(invalid_views) > 0:
                selected_views.append(invalid_views[0])
                invalid_views = invalid_views[1:]
        else:
            # 确保参考视角被选中
            if reference_camera_idx not in valid_views:
                # 如果参考视角无效，从有效视角中随机选择一个作为参考
                reference_camera_idx = random.choice(valid_views)

            # 从有效视角中选择其他视角
            other_valid_views = [v for v in valid_views if v != reference_camera_idx]
            if len(other_valid_views) >= self.num_views - 1:
                # 如果有效视角足够，随机选择其他视角
                selected_other_views = random.sample(other_valid_views, self.num_views - 1)
            else:
                # 如果有效视角不足，使用所有其他有效视角
                selected_other_views = other_valid_views
                # 添加无效视角直到达到所需数量
                invalid_views = np.where(view_mask == 0)[0]
                while len(selected_other_views) < self.num_views - 1 and len(invalid_views) > 0:
                    selected_other_views.append(invalid_views[0])
                    invalid_views = invalid_views[1:]

            # 组合参考视角和其他选择的视角
            selected_views = [reference_camera_idx] + selected_other_views

        # 限制选择的视角数量
        selected_views = selected_views[:self.num_views]
        selected_views = [0, 1, 2, 3]
        # 根据选择的视角筛选数据
        images = images[selected_views]
        intrinsics = intrinsics[selected_views]
        extrinsics = extrinsics[selected_views]
        view_mask = view_mask[selected_views]

        # 更新参考视角在新的视角列表中的索引
        new_reference_idx = np.where(np.array(selected_views) == reference_camera_idx)[0]
        if len(new_reference_idx) > 0:
            reference_camera_idx = new_reference_idx[0]
        else:
            reference_camera_idx = 0  # 默认使用第一个视角作为参考

        V = images.shape[0]
        # 展平视图维度 (V, ...) -> (V, ...)
        intrinsics_flat = torch.from_numpy(intrinsics).reshape(-1, 3, 3)
        extrinsics_flat = torch.from_numpy(extrinsics).reshape(-1, 4, 4)

        # 计算参考相机的外参逆矩阵 (world_to_camera)
        ref_camera_to_world = extrinsics_flat[reference_camera_idx]
        world_to_ref_camera = torch.inverse(ref_camera_to_world)

        # 1. 计算内参矩阵的逆
        K_inv = torch.inverse(intrinsics_flat)  # (V, 3, 3)
        K4_inv = torch.eye(4, device=K_inv.device, dtype=K_inv.dtype)
        K4_inv = K4_inv.unsqueeze(0).repeat(V, 1, 1)  # (V, 4, 4)
        K4_inv[:, :3, :3] = K_inv

        # 2. 焦距归一化
        f_orig = intrinsics_flat[:, 0, 0]  # (V,)
        scale_z = (f_orig / self.focal)
        S_z_inv = torch.eye(4, device=f_orig.device, dtype=f_orig.dtype)
        S_z_inv = S_z_inv.unsqueeze(0).repeat(V, 1, 1)  # (V, 4, 4)
        S_z_inv[:, 2, 2] = scale_z

        # 4. 计算相机到世界坐标系的变换
        T_cam2world = extrinsics_flat  # (V, 4, 4)

        # 5. 组合完整变换矩阵 (V, 4, 4)
        total_xfs = T_cam2world @ S_z_inv @ K4_inv

        # 转换为PyTorch张量
        sample = {
            'images': torch.from_numpy(images).unsqueeze(0),  # [NUM_SELECTED_VIEWS, 1, H, W]
            'intrinsics': torch.from_numpy(intrinsics).unsqueeze(0),  # [NUM_SELECTED_VIEWS, 3, 3]
            'extrinsics': torch.from_numpy(extrinsics).unsqueeze(0),
            'total_xfs': total_xfs.unsqueeze(0),  # [NUM_SELECTED_VIEWS, 4, 4]
            'view_mask': torch.from_numpy(view_mask).unsqueeze(0),  # [NUM_SELECTED_VIEWS]
            # 'landmarks': torch.from_numpy(landmarks).unsqueeze(0),  # [21, 3]
            'hand_idx': torch.tensor(hand_idx).unsqueeze(0),  # scalar
            'reference_camera_idx': torch.tensor(reference_camera_idx).unsqueeze(0),  # scalar
            'world_to_ref_camera': world_to_ref_camera.unsqueeze(0),  # [4, 4] 世界到参考相机变换
        }

        images = sample["images"]
        view_mask = sample["view_mask"]
        total_xfs = sample["total_xfs"]
        world_to_ref = sample["world_to_ref_camera"]
        pred_joints, pred_wrist, skel_scales, pred_hand_logits = self.model(
            images, view_mask, total_xfs, world_to_ref
        )
        pred_wrist = torch.inverse(world_to_ref) @ pred_wrist

        # Forward
        # pred_joints, pred_wrist, skel_scales, pred_hand_logits = self.model(
        #     images, extrinsics, intrinsics, view_mask
        # )
        pred_hand_idx = torch.argmax(pred_hand_logits, dim=1)
        pred_wrist[..., :3, 3] *= 1000  # scale translation component

        # Landmark computation
        # calibrated = scaled_hand_model_batch(self.batch_generic, skel_scales)
        # pred_landmarks = landmarks_from_batch_hand_pose(
        #     self.batch_generic, pred_joints, pred_wrist, pred_hand_idx
        # )
        pred_joints = pred_joints.squeeze(0)
        pred_wrist = pred_wrist.squeeze(0)
        skel_scales = skel_scales[0].item()
        pred_hand_idx = pred_hand_idx[0].item()
        return pred_joints, pred_wrist, skel_scales, pred_hand_idx


class DYDTrackerSK(Tracker):
    def __init__(self, model, device, hand_model):
        super().__init__(model)
        self.device = device
        self.model.to(self.device)
        self.hand_model = batch_hand(hand_model, 1)
        self.focal = 200

    def track(
            self,
            sample,
    ):
        images = sample['images'].to(self.device)
        intrinsics = sample['intrinsics'].to(self.device)
        extrinsics = sample['extrinsics'].to(self.device)
        view_mask = sample['view_mask'].to(self.device)
        joint_rotation_axes = sample['joint_rotation_axes'].to(self.device)
        joint_rest_positions = sample['joint_rest_positions'].to(self.device)
        reference_camera_idx = sample['reference_camera_idx'].to(self.device)
        world_to_ref = sample['world_to_ref'].to(self.device)
        # print(images.shape)
        V = images.shape[1]  # 当前样本的视图数

        # 展平视图维度 (V, ...) -> (V, ...)
        intrinsics_flat = intrinsics.reshape(-1, 3, 3)
        extrinsics_flat = extrinsics.reshape(-1, 4, 4)

        # 1. 计算内参矩阵的逆
        K_inv = torch.inverse(intrinsics_flat)  # (V, 3, 3)
        # 2. 构建4x4的内参逆矩阵
        K4_inv = torch.eye(4, device=K_inv.device, dtype=K_inv.dtype)
        K4_inv = K4_inv.unsqueeze(0).repeat(V, 1, 1)  # (V, 4, 4)
        K4_inv[:, :3, :3] = K_inv
        # 3. 焦距归一化
        f_orig = intrinsics_flat[:, 0, 0]  # (V,)
        # scale_z = (self.focal / f_orig)
        scale_z = (f_orig / self.focal)
        S_z_inv = torch.eye(4, device=f_orig.device, dtype=f_orig.dtype)
        S_z_inv = S_z_inv.unsqueeze(0).repeat(V, 1, 1)  # (V, 4, 4)
        S_z_inv[:, 2, 2] = scale_z
        # 4. 计算相机到世界坐标系的变换
        T_cam2world = torch.inverse(extrinsics_flat)  # (V, 4, 4)
        # 5. 组合完整变换矩阵 (V, 4, 4)
        total_xfs = T_cam2world @ S_z_inv @ K4_inv
        pred_joints, pred_wrist, skel_scales, pred_hand_logits = self.model(
            images, view_mask, total_xfs, joint_rotation_axes, joint_rest_positions, world_to_ref
        )
        pred_wrist = torch.inverse(world_to_ref) @ pred_wrist

        pred_hand_model_idx = torch.argmax(pred_hand_logits, dim=1)
        pred_wrist[..., :3, 3] *= 1000  # scale translation component

        pred_joints = pred_joints.squeeze(0)
        pred_wrist = pred_wrist.squeeze(0)
        skel_scales = skel_scales[0].item()
        pred_hand_model_idx = pred_hand_model_idx[0].item()
        return pred_joints, pred_wrist, skel_scales, pred_hand_model_idx
