import codecs
import math
import os
import random
from collections import Counter
from glob import glob
from io import BytesIO

import numpy as np
import requests as req
from PIL import Image
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils, Sequence
from keras.applications.resnet50 import preprocess_input
from sklearn.model_selection import train_test_split
from tools.utils import Cutout


class BaseSequence(Sequence):
    """
    基础的数据流生成器，每次迭代返回一个batch
    BaseSequence可直接用于fit_generator的generator参数
    fit_generator会将BaseSequence再次封装为一个多进程的数据流生成器
    而且能保证在多进程下的一个epoch中不会重复取相同的样本
    """

    def __init__(self, img_paths, labels, batch_size, img_size, is_train):
        assert len(img_paths) == len(labels), "len(img_paths) must equal to len(lables)"
        assert img_size[0] == img_size[1], "img_size[0] must equal to img_size[1]"
        ## (?,41)
        self.x_y = np.hstack((np.array(img_paths).reshape(len(img_paths), 1), np.array(labels)))
        self.batch_size = batch_size
        self.img_size = img_size
        self.is_train = is_train
        if self.is_train:
            # 通过实时数据增强生成张量图像数据批次。数据将不断循环（按批次）
            # 初始化ImageDataGenerator
            train_datagen = ImageDataGenerator(
                rotation_range=30,  # 图片随机转动角度
                width_shift_range=0.2,  # 浮点数，图片宽度的某个比例，数据提升时图片水平偏移的幅度
                height_shift_range=0.2,  # 浮点数，图片高度的某个比例，数据提升时图片竖直偏移的幅度
                shear_range=0.2,  # 剪切强度（逆时针方向的剪切变换角度）
                zoom_range=0.2,  # 随机缩放的幅度，
                horizontal_flip=True,  # 随机水平翻转
                vertical_flip=True,  # 随机竖直翻转
                fill_mode='nearest'
            )
            self.train_datagen = train_datagen

    # batch的个数
    def __len__(self):
        return math.ceil(len(self.x_y) / self.batch_size)

    @staticmethod
    def center_img(img, size=None, fill_value=255):
        """
        center img in a square background
        """
        h, w = img.shape[:2]
        if size is None:
            size = max(h, w)
        # h,w,channel
        shape = (size, size) + img.shape[2:]
        background = np.full(shape, fill_value, np.uint8)
        center_x = (size - w) // 2
        center_y = (size - h) // 2
        background[center_y:center_y + h, center_x:center_x + w] = img
        return background

    # 处理图片标准化
    def preprocess_img(self, img_path):
        """
        image preprocessing
        you can add your special preprocess method here
        """
        img = Image.open(img_path)
        img = img.resize((256, 256))
        img = img.convert('RGB')
        img = np.array(img)
        img = img[16:16 + 224, 16:16 + 224]
        return img

    # 一种正则化方法，在训练时随机把图片的一部分减掉，这样能提高模型的鲁棒性
    def cutout_img(self, img):
        cut_out = Cutout(n_holes=1, length=40)
        img = cut_out(img)
        return img

    # 第n个batch
    def __getitem__(self, idx):

        # 图片路径
        batch_x = self.x_y[idx * self.batch_size: (idx + 1) * self.batch_size, 0]
        # 图片标签
        batch_y = self.x_y[idx * self.batch_size: (idx + 1) * self.batch_size, 1:]

        # 这里是像素数组 （224，224，3）
        batch_x = np.array([self.preprocess_img(img_path) for img_path in batch_x])
        # smooth labels
        batch_y = np.array(batch_y).astype(np.float32) * (1 - 0.05) + 0.05 / 40

        # # 训练集数据增强
        if self.is_train:
            # 从给定的一维数组生成随机样本
            indexs = np.random.choice([0, 1, 2], batch_x.shape[0], replace=True, p=[0.4, 0.4, 0.2])
            mask_indexs = np.where(indexs == 1)
            multi_indexs = np.where(indexs == 2)

            if len(multi_indexs):
                # 数据增强
                multipy_batch_x = batch_x[multi_indexs]
                multipy_batch_y = batch_y[multi_indexs]

                train_datagenerator = self.train_datagen.flow(multipy_batch_x, multipy_batch_y,
                                                              batch_size=self.batch_size)
                (multipy_batch_x, multipy_batch_y) = train_datagenerator.next()

                batch_x[multi_indexs] = multipy_batch_x
                batch_y[multi_indexs] = multipy_batch_y

            if len(mask_indexs[0]):
                # 随机遮挡
                mask_batch_x = batch_x[mask_indexs]
                mask_batch_y = batch_y[mask_indexs]
                mask_batch_x = np.array([self.cutout_img(img) for img in mask_batch_x])

                batch_x[mask_indexs] = mask_batch_x
                batch_y[mask_indexs] = mask_batch_y

        # 预处理
        batch_x = np.array([preprocess_input(img) for img in batch_x])

        return batch_x, batch_y

    # 在迭代之间修改数据集
    def on_epoch_end(self):
        """Method called at the end of every epoch.
        """
        np.random.shuffle(self.x_y)


def data_flow(train_data_dir, batch_size, num_classes, input_size):
    img_paths = []
    labels = []
    label_files = glob(os.path.join(train_data_dir, '*.txt'))
    random.shuffle(label_files)
    for index, file_path in enumerate(label_files):
        with codecs.open(file_path, 'r', 'utf-8') as f:
            line = f.readline()
        line_split = line.strip().split(', ')
        if len(line_split) != 2:
            print('%s contain error lable' % os.path.basename(file_path))
            continue
        img_name = line_split[0]
        label = int(line_split[1])
        img_paths.append(os.path.join(train_data_dir, img_name))
        labels.append(label)
    # 画分训练集，验证集，测试集
    train_img_paths, test_img_paths, train_labels, test_labels = \
        train_test_split(img_paths, labels, stratify=labels, test_size=40, random_state=0)
    train_img_paths, validation_img_paths, train_labels, validation_labels = \
        train_test_split(train_img_paths, train_labels, stratify=train_labels, test_size=0.15, random_state=0)
    print('total samples: %d, training samples: %d, validation samples: %d, test samples: %d' % (
    len(img_paths), len(train_img_paths), len(validation_img_paths), len(test_img_paths)))

    # 将类向量（整数）转换为二进制类矩阵。
    train_labels = np_utils.to_categorical(train_labels, num_classes)
    validation_labels = np_utils.to_categorical(validation_labels, num_classes)

    # 训练集随机增强图片
    # Sequence是进行多进程处理的更安全的方法。这种结构保证网络在每个时期每个样本只训练一次
    train_sequence = BaseSequence(train_img_paths, train_labels, batch_size, [input_size, input_size], is_train=True)
    validation_sequence = BaseSequence(validation_img_paths, validation_labels, batch_size, [input_size, input_size],
                                       is_train=True)
    return train_sequence, validation_sequence, test_img_paths, test_labels


# 本地路径获取图片信息
def preprocess_img(img_path,img_size):
    try:
        img = Image.open(img_path)
        # if img.format:
        # resize_scale = img_size / max(img.size[:2])
        # img = img.resize((int(img.size[0] * resize_scale), int(img.size[1] * resize_scale)))
        img = img.resize((256, 256))
        img = img.convert('RGB')
        # img.show()
        img = np.array(img)
        imgs = []
        for _ in range(10):
            i = random.randint(0, 32)
            j = random.randint(0, 32)
            imgg = img[i:i + 224, j:j + 224]
            imgg = preprocess_input(imgg)
            imgs.append(imgg)
        return imgs
    except Exception as e:
        print('发生了异常data_process：', e)
        return 0


# url获取图片数组信息
def preprocess_img_from_Url(img_path,img_size):
    try:
        response = req.get(img_path)
        img = Image.open(BytesIO(response.content))
        img = img.resize((256, 256))
        img = img.convert('RGB')
        # img.show()
        img = np.array(img)
        imgs = []
        for _ in range(10):
            i = random.randint(0, 32)
            j = random.randint(0, 32)
            imgg = img[i:i + 224, j:j + 224]
            imgg = preprocess_input(imgg)
            imgs.append(imgg)
        return imgs
    except Exception as e:
        print('发生了异常data_process：', e)
        return 0


# 本地路径获取图片信息
def preprocess_img(img_path,img_size):
    try:
        img = Image.open(img_path)
        # if img.format:
        # resize_scale = img_size / max(img.size[:2])
        # img = img.resize((int(img.size[0] * resize_scale), int(img.size[1] * resize_scale)))
        img = img.resize((256, 256))
        img = img.convert('RGB')
        # img.show()
        img = np.array(img)
        imgs = []
        for _ in range(10):
            i = random.randint(0, 32)
            j = random.randint(0, 32)
            imgg = img[i:i + 224, j:j + 224]
            imgg = preprocess_input(imgg)
            imgs.append(imgg)
        return imgs
    except Exception as e:
        print('发生了异常data_process：', e)
        return 0