import h5py
import numpy as np
import torch
from torch.utils.data import Dataset
import os
import glob

from hand.track.umetrack.hand.combine_hand import get_all_hand_model

import h5py
import numpy as np
import torch
from torch.utils.data import Dataset
import os
import glob
import random


class HandPoseDataset(Dataset):
    def __init__(self, root_dir, split='training', transform=None, num_views=4):
        """
        多视角手部姿态估计数据集

        参数:
            root_dir (str): 数据集根目录 (SAVE_ROOT)
            split (str): 数据集分割 ('training' 或 'testing')
            transform (callable, optional): 应用于样本的可选变换
            num_views (int): 需要选择的视角数量
        """
        self.root_dir = root_dir
        self.split = split
        self.transform = transform
        self.num_views = num_views
        self.file_paths = []
        self.file_indices = []
        self.total_samples = 0
        self.focal = 200

        # 收集所有批次文件
        batch_dir = os.path.join(root_dir, split, 'batches')
        h5_files = glob.glob(os.path.join(batch_dir, '*.h5'))

        # 构建文件索引映射
        for file_path in sorted(h5_files):
            with h5py.File(file_path, 'r') as h5f:
                num_samples = h5f['images'].shape[0]
                self.file_paths.append(file_path)
                self.file_indices.append((self.total_samples, self.total_samples + num_samples))
                self.total_samples += num_samples

        # 预加载所有文件元数据以减少__getitem__中的文件打开操作
        self.file_metadata = []
        for file_path in self.file_paths:
            with h5py.File(file_path, 'r') as h5f:
                self.file_metadata.append({
                    'num_samples': h5f['images'].shape[0],
                    'datasets': list(h5f.keys())
                })

    def __len__(self):
        return self.total_samples

    def __getitem__(self, idx):
        # 找到包含该索引的文件
        file_idx = None
        local_idx = None
        for i, (start, end) in enumerate(self.file_indices):
            if start <= idx < end:
                file_idx = i
                local_idx = idx - start
                break

        if file_idx is None:
            raise IndexError(f"Index {idx} out of range")

        file_path = self.file_paths[file_idx]

        # 从HDF5文件中读取数据
        with h5py.File(file_path, 'r') as h5f:
            # 读取图像数据并归一化到[0,1]
            images = h5f['images'][local_idx].astype(np.float32) / 255.0
            intrinsics = h5f['intrinsics'][local_idx].astype(np.float32)
            extrinsics = h5f['extrinsics'][local_idx].astype(np.float32)  # camera_to_world
            view_mask = h5f['view_mask'][local_idx].astype(np.float32)
            landmarks = h5f['landmarks'][local_idx].astype(np.float32)
            hand_idx = h5f['hand_idx'][local_idx].astype(np.int64)
            reference_camera_idx = h5f['reference_camera_idx'][local_idx].astype(np.int64)

        # 确保参考视角包含在选择的视角中
        valid_views = np.where(view_mask == 1)[0]
        if len(valid_views) < self.num_views:
            # 如果有效视角不足，使用所有有效视角并填充无效视角
            selected_views = list(valid_views)
            # 添加无效视角直到达到所需数量
            invalid_views = np.where(view_mask == 0)[0]
            while len(selected_views) < self.num_views and len(invalid_views) > 0:
                selected_views.append(invalid_views[0])
                invalid_views = invalid_views[1:]
        else:
            # 确保参考视角被选中
            if reference_camera_idx not in valid_views:
                # 如果参考视角无效，从有效视角中随机选择一个作为参考
                reference_camera_idx = random.choice(valid_views)

            # 从有效视角中选择其他视角
            other_valid_views = [v for v in valid_views if v != reference_camera_idx]
            if len(other_valid_views) >= self.num_views - 1:
                # 如果有效视角足够，随机选择其他视角
                selected_other_views = random.sample(other_valid_views, self.num_views - 1)
            else:
                # 如果有效视角不足，使用所有其他有效视角
                selected_other_views = other_valid_views
                # 添加无效视角直到达到所需数量
                invalid_views = np.where(view_mask == 0)[0]
                while len(selected_other_views) < self.num_views - 1 and len(invalid_views) > 0:
                    selected_other_views.append(invalid_views[0])
                    invalid_views = invalid_views[1:]

            # 组合参考视角和其他选择的视角
            selected_views = [reference_camera_idx] + selected_other_views

        # 限制选择的视角数量
        selected_views = selected_views[:self.num_views]

        # 根据选择的视角筛选数据
        images = images[selected_views]
        intrinsics = intrinsics[selected_views]
        extrinsics = extrinsics[selected_views]
        view_mask = view_mask[selected_views]

        # 更新参考视角在新的视角列表中的索引
        new_reference_idx = np.where(np.array(selected_views) == reference_camera_idx)[0]
        if len(new_reference_idx) > 0:
            reference_camera_idx = new_reference_idx[0]
        else:
            reference_camera_idx = 0  # 默认使用第一个视角作为参考

        V = images.shape[0]
        # 展平视图维度 (V, ...) -> (V, ...)
        intrinsics_flat = torch.from_numpy(intrinsics).reshape(-1, 3, 3)
        extrinsics_flat = torch.from_numpy(extrinsics).reshape(-1, 4, 4)

        # 计算参考相机的外参逆矩阵 (world_to_camera)
        ref_camera_to_world = extrinsics_flat[reference_camera_idx]
        world_to_ref_camera = torch.inverse(ref_camera_to_world)

        # 1. 计算内参矩阵的逆
        K_inv = torch.inverse(intrinsics_flat)  # (V, 3, 3)
        K4_inv = torch.eye(4, device=K_inv.device, dtype=K_inv.dtype)
        K4_inv = K4_inv.unsqueeze(0).repeat(V, 1, 1)  # (V, 4, 4)
        K4_inv[:, :3, :3] = K_inv

        # 2. 焦距归一化
        f_orig = intrinsics_flat[:, 0, 0]  # (V,)
        scale_z = (f_orig / self.focal)
        S_z_inv = torch.eye(4, device=f_orig.device, dtype=f_orig.dtype)
        S_z_inv = S_z_inv.unsqueeze(0).repeat(V, 1, 1)  # (V, 4, 4)
        S_z_inv[:, 2, 2] = scale_z

        # 4. 计算相机到世界坐标系的变换
        T_cam2world = extrinsics_flat  # (V, 4, 4)

        # 5. 组合完整变换矩阵 (V, 4, 4)
        total_xfs = T_cam2world @ S_z_inv @ K4_inv

        # 转换为PyTorch张量
        sample = {
            'images': torch.from_numpy(images),  # [NUM_SELECTED_VIEWS, 1, H, W]
            'intrinsics': torch.from_numpy(intrinsics),  # [NUM_SELECTED_VIEWS, 3, 3]
            'extrinsics': torch.from_numpy(extrinsics),
            'total_xfs': total_xfs,  # [NUM_SELECTED_VIEWS, 4, 4]
            'view_mask': torch.from_numpy(view_mask),  # [NUM_SELECTED_VIEWS]
            'landmarks': torch.from_numpy(landmarks),  # [21, 3]
            'hand_idx': torch.tensor(hand_idx),  # scalar
            'reference_camera_idx': torch.tensor(reference_camera_idx),  # scalar
            'world_to_ref_camera': world_to_ref_camera,  # [4, 4] 世界到参考相机变换
        }

        if self.transform:
            sample = self.transform(sample)

        return sample

    def get_dataset_stats(self):
        """返回数据集的统计信息"""
        return {
            'total_samples': self.total_samples,
            'num_files': len(self.file_paths),
            'split': self.split,
            'data_dir': os.path.join(self.root_dir, self.split)
        }


if __name__ == '__main__':
    from torch.utils.data import DataLoader

    # 初始化数据集
    dataset = HandPoseDataset(
        root_dir='C:/workspace/yvr_data',
        split='training',
        transform=None  # 可以添加自定义变换
    )

    # 获取数据集统计信息
    stats = dataset.get_dataset_stats()
    print(f"数据集信息: {stats['split']}, 样本数: {stats['total_samples']}, 文件数: {stats['num_files']}")

    # 创建数据加载器
    dataloader = DataLoader(
        dataset,
        batch_size=32,
        shuffle=True,
        num_workers=4,  # 使用多进程加载
        pin_memory=True,  # 加速GPU传输
    )

    # 遍历数据
    for batch in dataloader:
        images = batch['images']  # [B, NUM_VIEWS, 1, H, W]
        view_mask = batch['view_mask']  # [B, NUM_VIEWS]

        # 在此处添加模型训练/评估代码
        # ...
