import os
from PIL import Image
import torch
import numpy as np
from torchvision import transforms

class VOCSegmentation(torch.utils.data.Dataset):
    def __init__(self, root, image_set='train', transform=None, image_ids=None):
        self.root = root
        self.image_set = image_set
        self.transform = transform
        # 恢复标签的缩放操作
        # self.target_transform = transforms.Resize((224, 224), interpolation=transforms.InterpolationMode.NEAREST)
        # 标签的处理操作
        self.label_transform = transforms.Compose([
            transforms.Resize((224, 224), interpolation=transforms.InterpolationMode.NEAREST),  # 使用最近邻插值进行缩放
            transforms.ToTensor()  # 将标签转换为张量
        ])
        # 读取图像和标签的路径
        image_dir = os.path.join(self.root, 'JPEGImages')
        label_dir = os.path.join(self.root, 'SegmentationClass')

        if image_ids is None:
            image_ids = []
            with open(os.path.join(self.root, 'ImageSets', 'Segmentation', image_set + '.txt')) as f:
                for line in f:
                    image_ids.append(line.strip())

        self.images = []
        self.labels = []
        for image_id in image_ids:
            self.images.append(os.path.join(image_dir, image_id + '.jpg'))
            self.labels.append(os.path.join(label_dir, image_id + '.png'))

    def __len__(self):
        return len(self.images)

    def __getitem__(self, index):
        image = Image.open(self.images[index]).convert('RGB')
        label = Image.open(self.labels[index])

        if self.transform is not None:
            image = self.transform(image)

        if self.label_transform is not None:
            label = self.label_transform(label)

        # 将标签转换为整数类型的张量
        label = label.long()

        # 处理非法标签值，将超出 [0, 20] 范围的标签值设置为背景类（0）
        label = torch.clamp(label, min=0, max=20)

        # 去掉多余的维度
        label = label.squeeze(0)

        return image, label