import time

import h5py
import torch
from torch.utils.data import Dataset
import numpy as np
import os
import cv2
from glob import glob

import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.colors as mcolors


class HandDetectionDataset(Dataset):
    def __init__(self, data_root, usage="training", transform=None):
        """
        手部检测数据集类

        参数:
            data_root (str): HDF5文件存储的根目录
            usage (str): 数据集用途，如'training', 'validation', 'testing'
            transform (callable, optional): 可选的图像变换函数
        """
        super(HandDetectionDataset, self).__init__()
        self.data_root = data_root
        self.usage = usage
        self.transform = transform

        # 收集所有HDF5文件路径
        self.file_paths = glob(os.path.join(data_root, usage, "detect_batches", "*.h5"))

        # 计算每个文件中的样本数量并建立索引映射
        self.index_mapping = []
        for file_idx, file_path in enumerate(self.file_paths):
            with h5py.File(file_path, 'r') as f:
                num_samples = f['images'].shape[0]
                for sample_idx in range(num_samples):
                    self.index_mapping.append((file_idx, sample_idx))

        print(f"Loaded {len(self.index_mapping)} samples from {len(self.file_paths)} files")

    def __len__(self):
        return len(self.index_mapping)

    def __getitem__(self, idx):
        # 获取文件索引和样本索引
        file_idx, sample_idx = self.index_mapping[idx]
        file_path = self.file_paths[file_idx]

        # 打开HDF5文件并读取数据
        with h5py.File(file_path, 'r') as f:
            # 读取图像数据并转换为float32 [0,1]范围
            image = f['images'][sample_idx].astype(np.float32) / 255.0

            # 读取其他数据
            centers = f['centers'][sample_idx].astype(np.float32)
            scales = f['scales'][sample_idx].astype(np.float32)
            confidences = f['confidences'][sample_idx].astype(np.float32)
            view_index = f['view_indices'][sample_idx].astype(np.int64)
            frame_index = f['frame_indices'][sample_idx].astype(np.int64)

            _, H, W = image.shape
            centers[:, 0] /= W  # 归一化x坐标
            centers[:, 1] /= H  # 归一化y坐标

        # 转换为PyTorch张量
        image = torch.from_numpy(image)
        centers = torch.from_numpy(centers)
        scales = torch.from_numpy(scales)
        confidences = torch.from_numpy(confidences).unsqueeze(1)
        view_index = torch.from_numpy(view_index)
        frame_index = torch.from_numpy(frame_index)

        # 应用变换（如果有）
        if self.transform:
            image = self.transform(image)

        # 返回样本
        return {
            'image': image,
            'centers': centers,
            'scales': scales,
            'confidences': confidences,
            'view_index': view_index,
            'frame_index': frame_index
        }

    def visualize(self, idx=None, sample=None, figsize=(10, 8)):
        """
        可视化样本，显示图像和手部检测框

        参数:
            idx: 样本索引，如果提供则从数据集中获取样本
            sample: 直接提供样本数据字典
            figsize: 图像大小
        """
        if sample is None and idx is not None:
            sample = self[idx]
        elif sample is None:
            raise ValueError("必须提供idx或sample参数")

        # 获取图像数据
        image = sample['image'].numpy()
        if image.shape[0] == 1:  # 灰度图
            image = image[0]  # 从(1, H, W)转换为(H, W)
        else:
            image = np.transpose(image, (1, 2, 0))  # 从(C, H, W)转换为(H, W, C)

        # 获取手部检测信息
        centers = sample['centers'].numpy()
        scales = sample['scales'].numpy()
        confidences = sample['confidences'].numpy()

        # 反归一化中心坐标
        H, W = image.shape[:2]
        centers[:, 0] *= W  # 反归一化x坐标
        centers[:, 1] *= H  # 反归一化y坐标

        # 计算边界框
        bboxes = []
        for i in range(len(centers)):
            if confidences[i][0] > 0:  # 只显示置信度大于0的手
                # 计算边界框的宽高
                bbox_w = scales[i, 0] * W
                bbox_h = scales[i, 1] * H

                # 计算边界框的左上角坐标
                x1 = centers[i, 0] - bbox_w / 2
                y1 = centers[i, 1] - bbox_h / 2

                bboxes.append((x1, y1, bbox_w, bbox_h, confidences[i][0][0]))

        # 创建颜色映射
        colors = list(mcolors.TABLEAU_COLORS.values())

        # 绘制图像和边界框
        fig, ax = plt.subplots(1, 1, figsize=figsize)
        ax.imshow(image, cmap='gray' if len(image.shape) == 2 else None)

        for i, (x, y, w, h, conf) in enumerate(bboxes):
            # 绘制边界框
            rect = patches.Rectangle(
                (x, y), w, h,
                linewidth=2,
                edgecolor=colors[i % len(colors)],
                facecolor='none'
            )
            ax.add_patch(rect)

            # 添加置信度文本
            ax.text(
                x, y - 5,
                f'Hand {i + 1}: {conf:.2f}',
                color=colors[i % len(colors)],
                fontsize=12,
                bbox=dict(facecolor='white', alpha=0.7)
            )

            # 添加中心点
            ax.plot(centers[i, 0], centers[i, 1], 'o', color=colors[i % len(colors)], markersize=8)

        ax.set_title(f"View {sample['view_index'].item()}, Frame {sample['frame_index'].item()}")
        plt.axis('off')
        plt.tight_layout()
        plt.show()

        return fig, ax

    def get_cv_image(self, idx=None, sample=None):
        """
        返回OpenCV格式的图像，带有手部检测框和标签

        参数:
            idx: 样本索引，如果提供则从数据集中获取样本
            sample: 直接提供样本数据字典

        返回:
            cv_image: OpenCV格式的图像(BGR顺序)
        """
        if sample is None and idx is not None:
            sample = self[idx]
        elif sample is None:
            raise ValueError("必须提供idx或sample参数")

        # 获取图像数据
        image = sample['image'].numpy()
        if image.shape[0] == 1:  # 灰度图
            image = np.repeat(image, 3, axis=0)  # 转换为3通道
        image = np.transpose(image, (1, 2, 0))  # 从(C, H, W)转换为(H, W, C)
        image = (image * 255).astype(np.uint8)  # 转换为0-255范围

        # 转换为BGR格式(OpenCV默认格式)
        if image.shape[2] == 3:
            cv_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        else:
            cv_image = image

        # 获取手部检测信息
        centers = sample['centers'].numpy()
        scales = sample['scales'].numpy()
        confidences = sample['confidences'].numpy()

        # 反归一化中心坐标
        H, W = cv_image.shape[:2]
        centers[:, 0] *= W  # 反归一化x坐标
        centers[:, 1] *= H  # 反归一化y坐标

        # 定义颜色列表(BGR格式)
        colors = [
            (255, 0, 0),  # 蓝色
            (0, 255, 0),  # 绿色
            (0, 0, 255),  # 红色
            (0, 255, 255),  # 黄色
            (255, 0, 255),  # 紫色
            (255, 255, 0),  # 青色
        ]

        # 绘制检测框和标签
        for i in range(len(centers)):
            if confidences[i][0] > 0.2:  # 只显示置信度大于0的手
                # 计算边界框的宽高
                bbox_w = scales[i, 0] * W
                bbox_h = scales[i, 1] * H

                # 计算边界框的左上角坐标
                x1 = int(centers[i, 0] - bbox_w / 2)
                y1 = int(centers[i, 1] - bbox_h / 2)
                x2 = int(x1 + bbox_w)
                y2 = int(y1 + bbox_h)

                # 选择颜色
                color = colors[i % len(colors)]

                # 绘制边界框
                cv2.rectangle(cv_image, (x1, y1), (x2, y2), color, 2)

                # 绘制中心点
                center_x = int(centers[i, 0])
                center_y = int(centers[i, 1])
                cv2.circle(cv_image, (center_x, center_y), 4, color, -1)

                # 添加置信度文本
                label = f'Hand {i + 1}: {confidences[i][0][0]:.2f}'

                # 计算文本背景位置
                (text_width, text_height), baseline = cv2.getTextSize(
                    label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)

                # 绘制文本背景
                cv2.rectangle(
                    cv_image,
                    (x1, y1 - text_height - 5),
                    (x1 + text_width, y1),
                    color,
                    -1
                )

                # 绘制文本
                cv2.putText(
                    cv_image,
                    label,
                    (x1, y1 - 5),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.5,
                    (255, 255, 255),  # 白色文本
                    1
                )

        # 添加视图和帧信息
        info_text = f"View {sample['view_index'].item()}, Frame {sample['frame_index'].item()}"
        cv2.putText(
            cv_image,
            info_text,
            (10, 20),
            cv2.FONT_HERSHEY_SIMPLEX,
            0.6,
            (0, 0, 0),  # 黑色文本
            2
        )

        return cv_image


# 示例使用方式
if __name__ == '__main__':
    # 数据集根目录
    data_root = r"C:\workspace\umetrack_data\torch_data"  # 或您的实际路径

    # 创建数据集实例
    dataset = HandDetectionDataset(data_root, usage="training")

    dataset.visualize(1)

    time.sleep(10)

    # 创建数据加载器
    from torch.utils.data import DataLoader

    dataloader = DataLoader(
        dataset,
        batch_size=32,
        shuffle=True,
        num_workers=4,
        pin_memory=True  # 如果使用GPU，可以加速数据传输
    )

    # 测试数据加载
    for batch_idx, batch in enumerate(dataloader):
        print(f"Batch {batch_idx}:")
        print(f"  Images shape: {batch['image'].shape}")
        print(f"  Centers shape: {batch['centers'].shape}")
        print(f"  Scales shape: {batch['scales'].shape}")
        print(f"  Confidences shape: {batch['confidences'].shape}")

        if batch_idx >= 2:  # 只测试前几个批次
            break
