import h5py
import numpy as np
import torch
from torch.utils.data import Dataset
import os
import glob
import random


class HandPoseDataset(Dataset):
    def __init__(self, root_dir, split='training', transform=None, num_views=4):
        """
        多视角手部姿态估计数据集

        参数:
            root_dir (str): 数据集根目录 (SAVE_ROOT)
            split (str): 数据集分割 ('training' 或 'testing')
            transform (callable, optional): 应用于样本的可选变换
            num_views (int): 需要选择的视角数量
        """
        self.root_dir = root_dir
        self.split = split
        self.transform = transform
        self.num_views = num_views
        self.file_paths = []
        self.file_indices = []
        self.total_samples = 0
        self.focal = 200

        # 收集所有批次文件
        batch_dir = os.path.join(root_dir, split, 'classify_batches')
        h5_files = glob.glob(os.path.join(batch_dir, '*.h5'))

        # 构建文件索引映射
        for file_path in sorted(h5_files):
            with h5py.File(file_path, 'r') as h5f:
                num_samples = h5f['images'].shape[0]
                self.file_paths.append(file_path)
                self.file_indices.append((self.total_samples, self.total_samples + num_samples))
                self.total_samples += num_samples

        # 预加载所有文件元数据以减少__getitem__中的文件打开操作
        self.file_metadata = []
        for file_path in self.file_paths:
            with h5py.File(file_path, 'r') as h5f:
                self.file_metadata.append({
                    'num_samples': h5f['images'].shape[0],
                    'datasets': list(h5f.keys())
                })

    def __len__(self):
        return self.total_samples

    def __getitem__(self, idx):
        # 找到包含该索引的文件
        file_idx = None
        local_idx = None
        for i, (start, end) in enumerate(self.file_indices):
            if start <= idx < end:
                file_idx = i
                local_idx = idx - start
                break

        if file_idx is None:
            raise IndexError(f"Index {idx} out of range")

        file_path = self.file_paths[file_idx]

        # 从HDF5文件中读取数据
        with h5py.File(file_path, 'r') as h5f:
            # 读取图像数据并归一化到[0,1]
            images = h5f['images'][local_idx].astype(np.float32) / 255.0
            intrinsics = h5f['intrinsics'][local_idx].astype(np.float32)
            extrinsics = h5f['extrinsics'][local_idx].astype(np.float32)  # camera_to_world
            view_mask = h5f['view_mask'][local_idx].astype(np.float32)
            label = h5f['label'][local_idx].astype(np.int64)
            hand_idx = h5f['hand_idx'][local_idx].astype(np.int64)
            reference_camera_idx = h5f['reference_camera_idx'][local_idx].astype(np.int64)

        # 确保参考视角包含在选择的视角中
        valid_views = np.where(view_mask == 1)[0]
        if len(valid_views) < self.num_views:
            # 如果有效视角不足，使用所有有效视角并填充无效视角
            selected_views = list(valid_views)
            # 添加无效视角直到达到所需数量
            invalid_views = np.where(view_mask == 0)[0]
            while len(selected_views) < self.num_views and len(invalid_views) > 0:
                selected_views.append(invalid_views[0])
                invalid_views = invalid_views[1:]
        else:
            # 确保参考视角被选中
            if reference_camera_idx not in valid_views:
                # 如果参考视角无效，从有效视角中随机选择一个作为参考
                reference_camera_idx = random.choice(valid_views)

            # 从有效视角中选择其他视角
            other_valid_views = [v for v in valid_views if v != reference_camera_idx]
            if len(other_valid_views) >= self.num_views - 1:
                # 如果有效视角足够，随机选择其他视角
                selected_other_views = random.sample(other_valid_views, self.num_views - 1)
            else:
                # 如果有效视角不足，使用所有其他有效视角
                selected_other_views = other_valid_views
                # 添加无效视角直到达到所需数量
                invalid_views = np.where(view_mask == 0)[0]
                while len(selected_other_views) < self.num_views - 1 and len(invalid_views) > 0:
                    selected_other_views.append(invalid_views[0])
                    invalid_views = invalid_views[1:]

            # 组合参考视角和其他选择的视角
            selected_views = [reference_camera_idx] + selected_other_views

        # 限制选择的视角数量
        selected_views = selected_views[:self.num_views]

        # 根据选择的视角筛选数据
        images = images[selected_views]
        intrinsics = intrinsics[selected_views]
        extrinsics = extrinsics[selected_views]
        view_mask = view_mask[selected_views]

        # 更新参考视角在新的视角列表中的索引
        new_reference_idx = np.where(np.array(selected_views) == reference_camera_idx)[0]
        if len(new_reference_idx) > 0:
            reference_camera_idx = new_reference_idx[0]
        else:
            reference_camera_idx = 0  # 默认使用第一个视角作为参考

        V = images.shape[0]
        # 转换为PyTorch张量
        images_tensor = torch.from_numpy(images)
        intrinsics_tensor = torch.from_numpy(intrinsics).reshape(-1, 3, 3)
        extrinsics_tensor = torch.from_numpy(extrinsics).reshape(-1, 4, 4)

        # 计算参考相机的外参逆矩阵 (world_to_camera)
        ref_camera_to_world = extrinsics_tensor[reference_camera_idx]
        world_to_ref_camera = torch.inverse(ref_camera_to_world)

        # 计算total_xfs_ (用于模型输入)
        # 1. 计算内参矩阵的逆
        K_inv = torch.inverse(intrinsics_tensor)  # (V, 3, 3)
        K4_inv = torch.eye(4, dtype=K_inv.dtype)
        K4_inv = K4_inv.unsqueeze(0).repeat(V, 1, 1)  # (V, 4, 4)
        K4_inv[:, :3, :3] = K_inv

        # 2. 焦距归一化
        f_orig = intrinsics_tensor[:, 0, 0]  # (V,)
        scale_z = (f_orig / self.focal)
        S_z_inv = torch.eye(4, dtype=f_orig.dtype)
        S_z_inv = S_z_inv.unsqueeze(0).repeat(V, 1, 1)  # (V, 4, 4)
        S_z_inv[:, 2, 2] = scale_z

        # 3. 计算相机到世界坐标系的变换
        T_cam2world = extrinsics_tensor  # (V, 4, 4)

        # 4. 组合完整变换矩阵 (V, 4, 4)
        total_xfs_ = T_cam2world @ S_z_inv @ K4_inv

        sample = {
            'images': images_tensor,  # [NUM_SELECTED_VIEWS, 1, H, W]
            'intrinsics': intrinsics_tensor,  # [NUM_SELECTED_VIEWS, 3, 3]
            'extrinsics': extrinsics_tensor,
            'total_xfs_': total_xfs_,  # [NUM_SELECTED_VIEWS, 4, 4] 注意名称改为total_xfs_
            'view_mask': torch.from_numpy(view_mask),  # [NUM_SELECTED_VIEWS]
            'hand_idx': torch.tensor(hand_idx),  # scalar
            'reference_camera_idx': torch.tensor(reference_camera_idx),  # scalar
            'world_to_ref_camera': world_to_ref_camera,  # [4, 4] 世界到参考相机变换
            'label': torch.tensor(label, dtype=torch.long),  # 分类标签
        }

        if self.transform:
            sample = self.transform(sample)

        return sample

    def get_class_distribution(self):
        """获取类别分布"""
        class_counts = {}
        for file_path in self.file_paths:
            with h5py.File(file_path, 'r') as f:
                if 'label' in f:
                    labels = f['label'][:]
                else:
                    # 如果没有标签，使用hand_idx
                    labels = f['hand_idx'][:]
                unique, counts = np.unique(labels, return_counts=True)
                for cls, count in zip(unique, counts):
                    class_counts[cls] = class_counts.get(cls, 0) + count
        return class_counts

    def get_dataset_stats(self):
        """返回数据集的统计信息"""
        return {
            'total_samples': self.total_samples,
            'num_files': len(self.file_paths),
            'split': self.split,
            'data_dir': os.path.join(self.root_dir, self.split)
        }


if __name__ == '__main__':
    # 创建数据集
    dataset = HandPoseDataset(
        root_dir=r"C:\workspace\yvr_data",
        split="training"
    )
    # print(dataset[1876])

    # 创建数据加载器
    from torch.utils.data import DataLoader

    dataloader = DataLoader(
        dataset,
        batch_size=32,
        shuffle=True,
        num_workers=4,
        pin_memory=True
    )

    # 使用示例
    for batch in dataloader:
        images = batch['images']  # (batch_size, NUM_VIEWS, 1, H, W)
        labels = batch['label']  # (batch_size,)
        view_mask = batch['view_mask']  # (batch_size, NUM_VIEWS)

        print(f"Batch images shape: {images.shape}")
        print(f"Batch labels: {labels}")
        break

    # 检查类别分布
    class_dist = dataset.get_class_distribution()
    print("Class distribution:", class_dist)