import os  
import torch  
from torch.utils.data import Dataset  
from PIL import Image  
from torchvision import transforms, datasets, models
import numpy as np
from config import opt



class MultiLabelImageDatasetTest(Dataset):  
    def __init__(self, root_dir, transform=None):  
        """  
        Args:  
            root_dir (string): Directory with all the images.  
            transform (callable, optional): Optional transform to be applied  
                on a sample.  
        """  
        self.root_dir = root_dir  
        self.transform = transform  
        self.images = []  
        self.filenames = []  
        self.classes = opt.all_sig
        # 遍历文件夹中的所有文件  
        for filename in os.listdir(root_dir):  
            if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.tiff')):  
                # 假设标签是通过下划线分隔的  
                # parts = filename.split('_')  
                # 移除文件名部分和扩展名  
                image_path = os.path.join(root_dir, filename)  
                self.images.append(image_path)  
                self.filenames.append(filename)  # 将标签作为列表存储  
        # print(self.labels)
    def __len__(self):  
        return len(self.images)  
  
    def __getitem__(self, idx):  
        image_path = self.images[idx]  
        image = Image.open(image_path).convert('RGB')  
  
        if self.transform:  
            image = self.transform(image)  
  
        labels_in_filename = []
        for i in self.filenames[idx].split("-"):
            labels_in_filename.append(i.split("_")[0])
        one_hot_encoding = np.zeros(len(self.classes), dtype=int)  
        for label in labels_in_filename:  
            if label in self.classes:  
                index = self.classes.index(label)  
                one_hot_encoding[index] = 1  
        data_info = {"image_tensors": image, "labels": {}}
        for class_, val in zip(*[self.classes, one_hot_encoding]):
            data_info["labels"][class_] = val
        return data_info