import torch
from torchvision import datasets, transforms
from collections import defaultdict

# 定义数据预处理
transform = transforms.Compose([
    transforms.ToTensor()
])

# 加载VOC2012训练集
voc_dataset = datasets.VOCDetection(
    root='./data',  # 数据集存储的根目录
    year='2012',
    image_set='trainval',  # 使用训练验证集
    download=False,  # 如果数据集不存在，是否下载
    transform=transform
)

# 定义类别名称到分类值的映射
voc_classes = [
    "aeroplane", "bicycle", "bird", "boat",
    "bottle", "bus", "car", "cat",
    "chair", "cow", "diningtable", "dog",
    "horse", "motorbike", "person", "pottedplant",
    "sheep", "sofa", "train", "tvmonitor"
]
class_to_idx = {cls: idx for idx, cls in enumerate(voc_classes)}

# 初始化一个集合来存储所有的标签分类值
all_label_indices = set()

# 定义要查看的样本数量
n = 10

# 遍历前n个数据集样本，收集所有的标签分类值
for i in range(n):
    image, target = voc_dataset[i]
    objects = target['annotation']['object']
    # 获取图像的大小
    image_size = image.shape
    # 标签数量可以理解为目标物体的数量
    if isinstance(objects, list):
        label_size = len(objects)
    else:
        label_size = 1

    print(f"第 {i + 1} 个样本 - 图像大小: {image_size}, 标签数量: {label_size}")

    if isinstance(objects, list):
        for obj in objects:
            label = obj['name']
            if label in class_to_idx:
                all_label_indices.add(class_to_idx[label])
    else:
        label = objects['name']
        if label in class_to_idx:
            all_label_indices.add(class_to_idx[label])

# 打印标签分类值的范围
print("前{}个图像样本的标签分类值范围：".format(n), sorted(all_label_indices))