import torch
import torch.utils.data as data
from PIL import Image
import PIL
import os
import os.path
import numpy as np
import pandas as pd



class SBDDataset(data.Dataset):
    def __init__(self, data_dir="./archive", transform=None):
        self.transform = transform   
        self.data_dir: str = data_dir
        self.images = os.listdir(self.data_dir + "/images_normliazed")

    def get_img(self, img_path):
        img = Image.open(img_path).convert('RGB') 
        if self.transform is not None:
            img: torch.Tensor = self.transform(img)  
        return img  
    
    def get_labels(self, label_path):
        with open(label_path) as f:
            list_of_label = f.readlines()
            for i in range(len(list_of_label)):
                list_of_label[i] = list_of_label[i].strip().split()
            for i in range(len(list_of_label)):
                for j in range(len(list_of_label[0])):
                    list_of_label[i][j] = int(list_of_label[i][j]) + 1
        tensor_of_label = torch.tensor(list_of_label)
        return tensor_of_label


    def __getitem__(self, index):
        key = self.images[index]
        image_name = key.split(".")[0]
        data_dir = self.data_dir
        
        img_name = '%s/images/%s' % (data_dir, key)
        label_path = '%s/labels_raw/%s.regions.txt' % (data_dir, image_name)
        # print(img_name)
        
        img: torch.Tensor = self.get_img(img_name)  
        # print("img size: ", img.size())
        label = self.get_labels(label_path)
        # print("label size: ", label.size())
        return img, label

    def __len__(self):
        return len(self.images)


# class CoCoDataset(data.Dataset):
#     def __init__(self, data_dir, split='train', imsize=256, transform=None):
#         self.transform = transform  # main函数里在实例化TextDataSet之前有一个transform.compose，就是这个玩意
#         self.imsize: int = imsize  # 这个imsize是在main里面导入的cfg.imsize
#         self.data: list = []  # 这玩意我不知道有什么用，我先留着。
#         self.data_dir: str = data_dir  # 理解为根目录
#         split_dir: str = os.path.join(data_dir, split)  # 根目录下的split目录
#         self.split_dir = split_dir
#         self.images = os.listdir(self.split_dir + "/image")

#     def get_img(self, img_path):
#         img = Image.open(img_path).convert('RGB')
#         load_size = int(self.imsize * 300 / 256)  # 后面的transform里面有随机截取，所以这里先放大到76*76。
#         img = img.resize((load_size, load_size))  # 图像resize+插值
#         if self.transform is not None:
#             img: torch.Tensor = self.transform(img)  # transform在main里面有定义。
#         return img  # transform之后的img是一个torch.Tensor类型的3*64*64的张量。

#     # def load_embedding(self, data_dir):
#     #     embedding_path = os.path.join(data_dir, 'embeddings.pkl')
#     #     with open(embedding_path, 'rb') as f:
#     #         embeddings = pickle.load(f)
#     #         embeddings = np.array(embeddings)
#     #         print('embeddings: ', embeddings.shape)
#     #     return embeddings

#     # def load_filenames(self, data_dir):
#     #     filepath = os.path.join(data_dir, 'filenames.pkl')
#     #     with open(filepath, 'rb') as f:
#     #         filenames = pickle.load(f)
#     #     print('Load filenames from: %s (%d)' % (filepath, len(filenames)))
#     #     return filenames

#     def __getitem__(self, index):
#         key = self.images[index]
#         data_dir = self.split_dir
#         # embedding: np.ndarray = self.embeddings[index, :]  # np.ndarray，输出的embedding是一维的。
#         img_name = '%s/image/%s' % (data_dir, key)
#         # print(img_name)
#         img: torch.Tensor = self.get_img(img_name)  # 这个img是用的Image.open打开的，用的是原程序的方式。
#         return img, 1

#     def __len__(self):
#         return len(self.images)