"""数据生成器"""
import numpy as np
import math
from keras.utils import Sequence
from keras.utils import np_utils
from PIL import Image
from keras.preprocessing.image import ImageDataGenerator
from keras.applications import backend
from keras.applications import layers
from keras.applications import models
from keras.applications import utils
from sklearn.model_selection import train_test_split

import os
from glob import glob
import random
import codecs

import cv2

_KERAS_BACKEND = backend
_KERAS_LAYERS = layers
_KERAS_MODELS = models
_KERAS_UTILS = utils
# imagenet的全局张量用于预处理符号输入
_IMAGENET_MEAN = None

def get_submodules_from_kwargs(kwargs):
    backend = kwargs.get('backend', _KERAS_BACKEND)
    layers = kwargs.get('layers', _KERAS_LAYERS)
    models = kwargs.get('models', _KERAS_MODELS)
    utils = kwargs.get('utils', _KERAS_UTILS)
    for key in kwargs.keys():
        if key not in ['backend', 'layers', 'models', 'utils']:
            raise TypeError('Invalid keyword argument: %s', key)
    return backend, layers, models, utils

def _preprocess_numpy_input(x, data_format, mode, **kwargs):
    """预处理对一批图像进行编码的Numpy数组"""
    backend, _, _, _ = get_submodules_from_kwargs(kwargs)
    if not issubclass(x.dtype.type, np.floating):
        x = x.astype(backend.floatx(), copy=False)
    if mode == 'tf':
        x /= 127.5
        x -= 1.
        return x
    if mode == 'torch':
        x /= 255.
        mean = [0.485, 0.456, 0.406]
        std = [0.229, 0.224, 0.225]
    else:
        if data_format == 'channels_first':
            # 'RGB'->'BGR'
            if x.ndim == 3:
                x = x[::-1, ...]
            else:
                x = x[:, ::-1, ...]
        else:
            # 'RGB'->'BGR'
            x = x[..., ::-1]
        mean = [103.939, 116.779, 123.68]
        std = None
    # Zero-center by mean pixel
    if data_format == 'channels_first':
        if x.ndim == 3:
            x[0, :, :] -= mean[0]
            x[1, :, :] -= mean[1]
            x[2, :, :] -= mean[2]
            if std is not None:
                x[0, :, :] /= std[0]
                x[1, :, :] /= std[1]
                x[2, :, :] /= std[2]
        else:
            x[:, 0, :, :] -= mean[0]
            x[:, 1, :, :] -= mean[1]
            x[:, 2, :, :] -= mean[2]
            if std is not None:
                x[:, 0, :, :] /= std[0]
                x[:, 1, :, :] /= std[1]
                x[:, 2, :, :] /= std[2]
    else:
        x[..., 0] -= mean[0]
        x[..., 1] -= mean[1]
        x[..., 2] -= mean[2]
        if std is not None:
            x[..., 0] /= std[0]
            x[..., 1] /= std[1]
            x[..., 2] /= std[2]
    return x

def _preprocess_symbolic_input(x, data_format, mode, **kwargs):
    """预处理张量以编码一批图像"""
    global _IMAGENET_MEAN
    backend, _, _, _ = get_submodules_from_kwargs(kwargs)
    if mode == 'tf':
        x /= 127.5
        x -= 1.
        return x
    if mode == 'torch':
        x /= 255.
        mean = [0.485, 0.456, 0.406]
        std = [0.229, 0.224, 0.225]
    else:
        if data_format == 'channels_first':
            # 'RGB'->'BGR'
            if backend.ndim(x) == 3:
                x = x[::-1, ...]
            else:
                x = x[:, ::-1, ...]
        else:
            # 'RGB'->'BGR'
            x = x[..., ::-1]
        mean = [103.939, 116.779, 123.68]
        std = None
    if _IMAGENET_MEAN is None:
        _IMAGENET_MEAN = backend.constant(-np.array(mean))
    # Zero-center by mean pixel
    if backend.dtype(x) != backend.dtype(_IMAGENET_MEAN):
        x = backend.bias_add(
            x, backend.cast(_IMAGENET_MEAN, backend.dtype(x)),
            data_format=data_format)
    else:
        x = backend.bias_add(x, _IMAGENET_MEAN, data_format)
    if std is not None:
        x /= std
    return x

def preprocess_input(x, data_format=None, mode='caffe', **kwargs):
    """预处理张量或Numpy数组，对一批图像进行编码"""
    backend, _, _, _ = get_submodules_from_kwargs(kwargs)
    if data_format is None:
        data_format = backend.image_data_format()
    if data_format not in {'channels_first', 'channels_last'}:
        raise ValueError('Unknown data_format ' + str(data_format))
    if isinstance(x, np.ndarray):
        return _preprocess_numpy_input(x, data_format=data_format,
                                       mode=mode, **kwargs)
    else:
        return _preprocess_symbolic_input(x, data_format=data_format,
                                          mode=mode, **kwargs)

class Cutout(object):
    """Randomly mask out one or more patches from an image.
    Args:
        n_holes (int): Number of patches to cut out of each image.
        length (int): The length (in pixels) of each square patch.
    """
    def __init__(self, n_holes, length):
        self.n_holes = n_holes
        self.length = length
    def __call__(self,img):
        h = img.shape[0]
        w = img.shape[1]
        c = img.shape[2]
        mask = np.ones((h,w,c),np.float32)
        for n in range(self.n_holes):
            y = np.random.randint(h)
            x = np.random.randint(w)
            # 截取函数 遮挡部分不能超过图片的一半
            y1 = np.clip(y - self.length//2,0,h)
            y2 = np.clip(y+self.length//2,0,h)
            x1 = np.clip(x-self.length//2,0,w)
            x2 = np.clip(x+self.length//2,0,w)
            mask[y1:y2,x1:x2,:] = 0.
        # mask = tf.convert_to_tensor(mask)
        # mask = tf.reshape(mask,img.shape)
        img = img*mask
        return img

class DataGenerator(Sequence):
    """
    数据生成器，每次迭代返回一个batch
    可直接用于fit_generator的generator参数
    """
    def __init__(self, img_paths, labels, batch_size, img_size, is_train):
        assert len(img_paths) == len(labels), "len(img_paths) must equal to len(lables)"
        assert img_size[0] == img_size[1], "img_size[0] must equal to img_size[1]"
        ## (?,41)
        self.x_y = np.hstack((np.array(img_paths).reshape(len(img_paths), 1), np.array(labels)))
        self.batch_size = batch_size
        self.img_size = img_size
        self.is_train = is_train
        if self.is_train:
            train_datagen = ImageDataGenerator(
                rotation_range=30,  # 图片随机转动角度
                width_shift_range=0.2,  # 浮点数，图片宽度的某个比例，数据提升时图片水平偏移的幅度
                height_shift_range=0.2,  # 浮点数，图片高度的某个比例，数据提升时图片竖直偏移的幅度
                shear_range=0.2,  # 剪切强度（逆时针方向的剪切变换角度）
                zoom_range=0.2,  # 随机缩放的幅度，
                horizontal_flip=True,  # 随机水平翻转
                vertical_flip=True,  # 随机竖直翻转
                fill_mode='nearest'
            )
            self.train_datagen = train_datagen

    def __len__(self):
        return math.ceil(len(self.x_y) / self.batch_size)

    @staticmethod
    def center_img(img, size=None, fill_value=255):
        """
        center img in a square background
        """
        h, w = img.shape[:2]
        if size is None:
            size = max(h, w)
        # h,w,channel
        shape = (size, size) + img.shape[2:]
        background = np.full(shape, fill_value, np.uint8)
        center_x = (size - w) // 2
        center_y = (size - h) // 2
        background[center_y:center_y + h, center_x:center_x + w] = img
        return background

    def preprocess_img(self, img_path):
        """
        image preprocessing
        you can add your special preprocess method here
        """
        img = Image.open(img_path)
        img = img.resize((256, 256))
        img = img.convert('RGB')
        img = np.array(img)
        img = img[16:16 + 224, 16:16 + 224]
        return img

    def cutout_img(self, img):
        cut_out = Cutout(n_holes=1, length=40)
        img = cut_out(img)
        return img

    def __getitem__(self, idx):
        # 图片路径
        batch_x = self.x_y[idx * self.batch_size: (idx + 1) * self.batch_size, 0]
        # 图片标签
        batch_y = self.x_y[idx * self.batch_size: (idx + 1) * self.batch_size, 1:]
        # 这里是像素数组 （224，224，3）
        batch_x = np.array([self.preprocess_img(img_path) for img_path in batch_x])
        # smooth labels
        batch_y = np.array(batch_y).astype(np.float32) * (1 - 0.05) + 0.05 / 40
        # # 训练集数据增强
        if self.is_train:
            indexs = np.random.choice([0, 1, 2], batch_x.shape[0], replace=True, p=[0.4, 0.4, 0.2])
            mask_indexs = np.where(indexs == 1)
            multi_indexs = np.where(indexs == 2)
            if len(multi_indexs):
                # 数据增强
                multipy_batch_x = batch_x[multi_indexs]
                multipy_batch_y = batch_y[multi_indexs]
                train_datagenerator = self.train_datagen.flow(multipy_batch_x, multipy_batch_y,
                                                              batch_size=self.batch_size)
                (multipy_batch_x, multipy_batch_y) = train_datagenerator.next()
                batch_x[multi_indexs] = multipy_batch_x
                batch_y[multi_indexs] = multipy_batch_y
            if len(mask_indexs[0]):
                # 随机遮挡
                mask_batch_x = batch_x[mask_indexs]
                mask_batch_y = batch_y[mask_indexs]
                mask_batch_x = np.array([self.cutout_img(img) for img in mask_batch_x])
                batch_x[mask_indexs] = mask_batch_x
                batch_y[mask_indexs] = mask_batch_y
        # 预处理
        batch_x = np.array([preprocess_input(img) for img in batch_x])

        return batch_x, batch_y

    def on_epoch_end(self):
        """Method called at the end of every epoch.
        """
        np.random.shuffle(self.x_y)


# 加载训练文件到模型标签
def gendata(train_data_dir, batch_size, num_classes, input_size):
    label_files = glob(os.path.join(train_data_dir, '*.txt'))
    random.shuffle(label_files)
    img_paths = []
    labels = []
    for index, file_path in enumerate(label_files):
        with codecs.open(file_path, 'r', 'utf-8') as f:
            line = f.readline()
        line_split = line.strip().split(', ')
        if len(line_split) != 2:
            print('%s contain error lable' % os.path.basename(file_path))
            continue
        img_name = line_split[0]
        label = int(line_split[1])
        img_paths.append(os.path.join(train_data_dir, img_name))
        labels.append(label)

    labels = np_utils.to_categorical(labels, num_classes)
    train_img_paths, validation_img_paths, train_labels, validation_labels = \
        train_test_split(img_paths, labels, stratify=labels, test_size=0.15, random_state=0)
    print('total samples: %d, training samples: %d, validation samples: %d' % (
    len(img_paths), len(train_img_paths), len(validation_img_paths)))
    # 训练集随机增强图片
    train_sequence = DataGenerator(train_img_paths, train_labels, batch_size, [input_size, input_size], is_train=True)
    validation_sequence = DataGenerator(validation_img_paths, validation_labels, batch_size, [input_size, input_size],
                                       is_train=False)
    return train_sequence, validation_sequence