import math
import os
from random import shuffle

import cv2
import numpy as np

from tensorflow import keras

from utils.utils import cvtColor, preprocess_input
from osgeo import gdal


class UnetDataset(keras.utils.Sequence):
    def __init__(self, annotation_lines, input_shape, batch_size, num_classes, train, dataset_path):
        self.annotation_lines = annotation_lines
        self.length = len(self.annotation_lines)
        self.input_shape = input_shape
        self.batch_size = batch_size
        self.num_classes = num_classes
        self.train = train
        self.dataset_path = dataset_path

    def __len__(self):
        return math.ceil(len(self.annotation_lines) / float(self.batch_size))

    def __getitem__(self, index):
        images = []
        targets = []
        for i in range(index * self.batch_size, (index + 1) * self.batch_size):
            i = i % self.length
            name = self.annotation_lines[i].split()[0]
            # -------------------------------#
            #   从文件中读取图像
            # -------------------------------#
            dataset_img = gdal.Open(os.path.join(os.path.join(self.dataset_path, "VOC2007/JPEGImages"), name + ".tif"))
            width = dataset_img.RasterXSize
            height = dataset_img.RasterYSize

            jpg = dataset_img.ReadAsArray(0, 0, width, height).transpose([1,2,0])  # 获取数据
            png = cv2.imread(os.path.join(os.path.join(self.dataset_path, "VOC2007/SegmentationClass"), name + ".png"), 0)
            # -------------------------------#
            #   数据增强
            # -------------------------------#
            jpg = preprocess_input(jpg)
            png[png >= self.num_classes] = self.num_classes
            # -------------------------------------------------------#
            #   转化成one_hot的形式
            #   在这里需要+1是因为voc数据集有些标签具有白边部分
            #   我们需要将白边部分进行忽略，+1的目的是方便忽略。
            # -------------------------------------------------------#
            seg_labels = np.eye(self.num_classes + 1)[png.reshape([-1])]
            seg_labels = seg_labels.reshape((int(self.input_shape[0]), int(self.input_shape[1]), self.num_classes + 1))

            images.append(jpg)
            targets.append(seg_labels)

        images = np.array(images)
        targets = np.array(targets)
        return images, targets

    def generate(self):
        i = 0
        while True:
            images = []
            targets = []
            for b in range(self.batch_size):
                if i == 0:
                    np.random.shuffle(self.annotation_lines)
                name = self.annotation_lines[i].split()[0]
                # -------------------------------#
                #   从文件中读取图像
                # -------------------------------#
                dataset_img = gdal.Open(os.path.join(os.path.join(self.dataset_path, "VOC2007/JPEGImages"), name + ".tif"))
                width = dataset_img.RasterXSize
                height = dataset_img.RasterYSize

                jpg = dataset_img.ReadAsArray(0, 0, width, height).transpose([1,2,0])  # 获取数据
                png = cv2.imread(os.path.join(os.path.join(self.dataset_path, "VOC2007/SegmentationClass"), name + ".png"), 0)
                # -------------------------------#
                #   数据增强
                # -------------------------------#
                jpg = preprocess_input(np.array(jpg, np.float64))
                png[png >= self.num_classes] = self.num_classes
                # -------------------------------------------------------#
                #   转化成one_hot的形式
                #   在这里需要+1是因为voc数据集有些标签具有白边部分
                #   我们需要将白边部分进行忽略，+1的目的是方便忽略。
                # -------------------------------------------------------#
                seg_labels = np.eye(self.num_classes + 1)[png.reshape([-1])]
                seg_labels = seg_labels.reshape((int(self.input_shape[0]), int(self.input_shape[1]), self.num_classes + 1))

                images.append(jpg)
                targets.append(seg_labels)
                i = (i + 1) % self.length

            images = np.array(images)
            targets = np.array(targets)
            yield images, targets

    def on_epoch_end(self):
        shuffle(self.annotation_lines)
