import os
from random import shuffle

import numpy as np
from PIL import Image
from torch.utils.data.dataset import Dataset

from utils.train.imageUtils import letterbox_image_mask, get_random_data


class Dataset(Dataset):
    def __init__(self, train_lines, image_size, num_classes, random_data, dataset_path):
        super(Dataset, self).__init__()

        self.train_lines = train_lines
        self.train_batches = len(train_lines)
        self.image_size = image_size
        self.num_classes = num_classes
        self.random_data = random_data
        self.dataset_path = dataset_path

    def __len__(self):
        return self.train_batches

    def __getitem__(self, index):
        if index == 0:
            shuffle(self.train_lines)

        annotation_line = self.train_lines[index]
        name = annotation_line.split()[0]

        # 从文件中读取图像
        jpg = Image.open(os.path.join(os.path.join(self.dataset_path, "JPEGImages"), name + ".jpg"))
        png = Image.open(os.path.join(os.path.join(self.dataset_path, "SegmentationClass"), name + ".png"))

        if self.random_data:
            jpg, png = get_random_data(jpg, png, (int(self.image_size[1]), int(self.image_size[0])))
        else:
            jpg, png = letterbox_image_mask(jpg, png, (int(self.image_size[1]), int(self.image_size[0])))

        png = np.array(png)
        # png[png >= self.num_classes] = self.num_classes
        png[png >= self.num_classes - 1] = self.num_classes - 1

        # -------------------------------------------------------#
        #   Converted to the form of one_hot.
        #   Here, +1 is needed because some labels in the voc data set have white edges.
        #   We need to ignore the white edges. The purpose of +1 is to make it easy to ignore。
        # -------------------------------------------------------#
        seg_labels = np.eye(self.num_classes)[png.reshape([-1])]
        seg_labels = seg_labels.reshape((int(self.image_size[1]), int(self.image_size[0]), self.num_classes))

        jpg = np.transpose(np.array(jpg), [2, 0, 1])

        return jpg, png, seg_labels
