import matplotlib.pyplot as plt
import numpy as np
import random
import os

from PIL.Image import Image

os.environ["TF_CPP_MIN_LOG_LEVEL"]='2' # 只显示 warning 和 Error

from tensorflow.python.keras.utils import Sequence
import string
from tensorflow.python.keras.models import *
from tensorflow.python.keras.layers import *



class BaseSequence(Sequence):
    """
    基础的数据流生成器，每次迭代返回一个batch
    BaseSequence可直接用于fit_generator的generator参数
    fit_generator会将BaseSequence再次封装为一个多进程的数据流生成器
    而且能保证在多进程下的一个epoch中不会重复取相同的样本
    """
    def __init__(self, img_paths, labels, batch_size, img_size):
        #np.hstack在水平方向上平铺
        self.x_y = np.hstack((np.array(img_paths).reshape(len(img_paths), 1), np.array(labels)))
        self.batch_size = batch_size
        self.img_size = img_size

    def __len__(self):
        #math.ceil表示向上取整
        #调用len(BaseSequence)时返回，返回的是每个epoch我们需要读取数据的次数
        return np.math.ceil(len(self.x_y) / self.batch_size)

    def preprocess_img(self, img_path):

        img = Image.open(img_path)
        resize_scale = self.img_size[0] / max(img.size[:2])
        img = img.resize((self.img_size[0], self.img_size[0]))
        img = img.convert('RGB')
        img = np.array(img)

        # 数据归一化
        img = np.asarray(img, np.float32) / 255.0
        return img

    def __getitem__(self, idx):
        batch_x = self.x_y[idx * self.batch_size: (idx + 1) * self.batch_size, 0]
        batch_y = self.x_y[idx * self.batch_size: (idx + 1) * self.batch_size, 1:]
        batch_x = np.array([self.preprocess_img(img_path) for img_path in batch_x])
        batch_y = np.array(batch_y).astype(np.float32)
        print(batch_x.shape)
        return batch_x, batch_y
	#重写的父类Sequence中的on_epoch_end方法，在每次迭代完后调用。
    def on_epoch_end(self):
        #每次迭代后重新打乱训练集数据
        np.random.shuffle(self.x_y)

# animals = BaseSequence(img_paths, labels, batch_size, img_size)
# for i in range(10):
#     print(animals.__getitem__(i))