import os
import torch
from torch.utils.data import Dataset, DataLoader


class HandPoseDataset(Dataset):
    """
    PyTorch Dataset for loading processed hand pose samples saved as .pt files.

    Each sample is a dict with keys:
      - images: Tensor of shape (V, 1, 96, 96)
      - intrinsics: Tensor of shape (V, 3, 3)
      - extrinsics: Tensor of shape (V, 4, 4)
      - view_mask: Tensor of shape (V,)
      - joints: Tensor of shape (J,)
      - wrist: Tensor of shape (4, 4)
      - hand_idx: Tensor scalar
    """

    def __init__(self, samples_root: str, transforms=None):
        self.samples_dir = os.path.join(samples_root, 'samples')
        self.file_list = sorted([
            os.path.join(self.samples_dir, f)
            for f in os.listdir(self.samples_dir)
            if f.endswith('.pt')
        ])
        self.transforms = transforms

    def __len__(self):
        return len(self.file_list)

    def __getitem__(self, idx):
        sample_path = self.file_list[idx]
        sample = torch.load(sample_path)
        if self.transforms:
            sample = self.transforms(sample)
        return sample


class HandPoseTransformedDataset(Dataset):
    """
    Modified dataset that returns:
      Inputs: images, total_xfs, masks
      Outputs: joints, wrist, hand_idx
    """

    def __init__(self, samples_root: str, transforms=None, focal=200):
        self.samples_dir = os.path.join(samples_root, 'samples')
        self.file_list = sorted([
            os.path.join(self.samples_dir, f)
            for f in os.listdir(self.samples_dir)
            if f.endswith('.pt')
        ])
        self.transforms = transforms
        self.focal = focal

    def __len__(self):
        return len(self.file_list)

    def __getitem__(self, idx):
        # 加载原始样本
        sample_path = self.file_list[idx]
        sample = torch.load(sample_path)

        if self.transforms:
            sample = self.transforms(sample)

        # 提取必要数据
        images = sample['images']  # (V, 1, 96, 96)
        intrinsics = sample['intrinsics']  # (V, 3, 3)
        extrinsics = sample['extrinsics']  # (V, 4, 4)
        extrinsics = extrinsics.float()  # 确保是浮点类型
        masks = sample['view_mask']  # (V,)
        joints = sample['joints']  # (J,)
        wrist = sample['wrist']  # (4, 4)
        hand_idx = sample['hand_idx']  # scalar
        # print(images.dtype, intrinsics.dtype, extrinsics.dtype, masks.dtype, joints.dtype, wrist.dtype, hand_idx.dtype)

        # 计算 total_xfs (V, 4, 4)
        V = images.shape[0]  # 当前样本的视图数

        # 展平视图维度 (V, ...) -> (V, ...)
        intrinsics_flat = intrinsics.reshape(-1, 3, 3)
        extrinsics_flat = extrinsics.reshape(-1, 4, 4)

        # 1. 计算内参矩阵的逆
        K_inv = torch.inverse(intrinsics_flat)  # (V, 3, 3)

        # 2. 构建4x4的内参逆矩阵
        K4_inv = torch.eye(4, device=K_inv.device, dtype=K_inv.dtype)
        K4_inv = K4_inv.unsqueeze(0).repeat(V, 1, 1)  # (V, 4, 4)
        K4_inv[:, :3, :3] = K_inv

        # 3. 焦距归一化
        f_orig = intrinsics_flat[:, 0, 0]  # (V,)
        # scale_z = (self.focal / f_orig)
        scale_z = (f_orig / self.focal)
        S_z_inv = torch.eye(4, device=f_orig.device, dtype=f_orig.dtype)
        S_z_inv = S_z_inv.unsqueeze(0).repeat(V, 1, 1)  # (V, 4, 4)
        S_z_inv[:, 2, 2] = scale_z

        # 4. 计算相机到世界坐标系的变换
        T_cam2world = torch.inverse(extrinsics_flat)  # (V, 4, 4)

        # 5. 组合完整变换矩阵 (V, 4, 4)
        total_xfs = T_cam2world @ S_z_inv @ K4_inv

        data = {
            "images": images,
            "total_xfs": total_xfs,
            "view_mask": masks,
            "joints": joints,
            "wrist": wrist,
            "hand_idx": hand_idx,
        }
        # 返回所需格式
        return data


if __name__ == '__main__':
    # Prevent multiprocessing errors on Windows
    # import multiprocessing
    #
    # multiprocessing.freeze_support()

    # Modify parameters as needed
    samples_root = r'C:\workspace\umetrack_data\torch_data\training'
    batch_size = 16
    num_workers = 4  # Set to 0 if you encounter worker spawn issues

    dataset = HandPoseDataset(samples_root, transforms=None)
    for i in dataset:
        print()
