import os
from PIL import Image
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import numpy as np


ind_to_labelname = dict()
labelname_to_ind = dict()
current_path = os.path.dirname(__file__)
with open(os.path.join(current_path, './200_wnids.txt')) as f:
    ind = 0
    for line in f:
        line = line.rstrip('\n')
        ind_to_labelname[ind] = line
        labelname_to_ind[line] = ind
        ind = ind + 1

# ind_to_rowy = dict()
# rowy_to_ind = dict()
# with open(os.path.join(current_path, './map_clsloc.txt')) as f:
#     for line in f:
#         line = line.rstrip('\n')
#         row_label, row_ind, label = line.split(' ')
#         ind_to_rowy[int(row_ind)] = row_label
#         rowy_to_ind[row_label] = int(row_ind)

class TinyImageNetDataset(Dataset):
    def __init__(self, root_dir='../../datasets/tiny-imagenet-200', transform=None, data_type='train'):
        self.root_dir = root_dir
        self.transform = transform
        assert data_type in ['train', 'val', 'test']

        # 读取训练集或验证集
        if data_type == 'train' or data_type == 'val':
            self.data_dir = os.path.join(root_dir, data_type)
            self.image_list = []
            self.labels = []
            # 遍历每个类目录
            for class_dir in os.listdir(self.data_dir):
                class_path = os.path.join(self.data_dir, class_dir)
                # 读取每个类别中的图像
                if os.path.isdir(class_path):
                    class_path_imgs = os.path.join(class_path, 'images')
                    for img_file in os.listdir(class_path_imgs):
                        # assert class_dir == img_file.split('_')[0]
                        img_path = os.path.join(class_path_imgs, img_file)
                        assert os.path.exists(img_path)
                        self.image_list.append(img_path)
                        self.labels.append(labelname_to_ind[class_dir])
        # elif data_type == 'val':
        #     # 读取验证集
        #     self.data_dir = os.path.join(root_dir, 'val', 'images')
        #     self.val_annotations_file = os.path.join(root_dir, 'val', 'val_annotations.txt')
        #     self.image_list, self.labels = self.load_val_data()
        else:
            raise ValueError("Only supports passing in either `train' or `val', and `val' can be used as `test'")

    # def load_val_data(self):
    #     image_list = []
    #     labels = []
    #     with open(self.val_annotations_file, 'r') as f:
    #         for line in f:
    #             tokens = line.strip().split('\t')
    #             img_file = tokens[0]  # 图像文件名
    #             class_id = tokens[1]  # 对应类名
    #             img_path = os.path.join(self.data_dir, img_file)
    #             image_list.append(img_path)
    #             labels.append(class_id)
    #     return image_list, labels

    def __len__(self):
        return len(self.image_list)

    def __getitem__(self, idx):
        img_path = self.image_list[idx]
        image = Image.open(img_path).convert('RGB')
        image = np.array(image)
        # 进行转换
        if self.transform:
            image = self.transform(image)

        label = self.labels[idx]
        return image, label

# if __name__ == '__main__':
#     # 数据增强和转换
#     transform = transforms.Compose([
#         transforms.ToTensor(),
#         transforms.ToPILImage(),
#         transforms.Resize((64, 64)),  # Tiny ImageNet 中图像的大小
#         transforms.ToTensor(),
#     ])
#
#     # 实例化训练集和验证集
#     train_dataset = TinyImageNetDataset(root_dir='D:/PycharmProjects/datasets/tiny-imagenet-200', transform=transform, data_type='train')
#     val_dataset = TinyImageNetDataset(root_dir='D:/PycharmProjects/datasets/tiny-imagenet-200', transform=transform, data_type='val')
#
#     # 创建 DataLoader
#     train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
#     val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False)
#
#     # 示例：遍历数据
#     for images, labels in train_loader:
#         print(images.shape, labels)  # 打印出 batch 的形式
#         break