import os
import torchvision.transforms as transforms
from PIL import Image
from torch.utils import data
from data import custom_transform as tr


class Refuge(data.Dataset):
    def __init__(self, root, split='Training'):
        self.root = root
        self.split = split
        train_dir = os.path.join(self.root, self.split)
        self.annotation = os.path.join(self.root, "Annotation_" + self.split)
        self.file = {self.split: self.get_data(train_dir)}
        self.train_transforms = transforms.Compose([
            tr.RandomCrop(700, 700),
            tr.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
            tr.ToTensor(),
        ])

    def get_data(self, root):
        return [os.path.join(cur_dir, file)
                for cur_dir, _, files in os.walk(os.path.join(root))
                for file in files
                if file.endswith('.jpg') or file.endswith('.bmp')]

    def encode_seg(self, label):
        label[label == 0] = 0
        label[label == 128] = 100
        label[label == 255] = 200
        return label

    def __len__(self):
        return len(self.file[self.split])

    def __getitem__(self, index):
        img_path = self.file[self.split][index].rstrip()
        label_path = os.path.join(self.annotation, img_path.split(os.sep)[-2],
                                  os.path.basename(img_path).split('.')[-2] + '.bmp')

        img = Image.open(img_path)
        label = Image.open(label_path)
        train_set = {'image': img, 'label': label}
        train_set = self.train_transforms(train_set)
        #视盘，视杯，无效区域  128 0 255

        return train_set


