# -*- encoding:utf-8 -*-
'''
Author:Alexxrhuang
Data:2019.07.12
'''
import sys
import random
import threading
import numpy as np
import imgaug.augmenters as iaa
from utils.common import logger, show_melspectrogram
from utils.prepare_data import prepare_data
from utils.feature import extract_delta, load_train_data, preprocess_data
import numpy.linalg as nl
from scipy import interpolate
from scipy.spatial.distance import pdist, cdist, squareform
np.set_printoptions(threshold=sys.maxsize)


# Source: https://www.kaggle.com/davids1992/specaugment-quick-implementation
def spec_augment_core(spec,
                      num_mask=2,
                      freq_masking=0.15,
                      time_masking=0.20,
                      value=0):
    spec = spec.copy()
    num_mask = random.randint(1, num_mask)
    for i in range(num_mask):
        all_freqs_num, all_frames_num = spec.shape
        freq_percentage = random.uniform(0.0, freq_masking)

        num_freqs_to_mask = int(freq_percentage * all_freqs_num)
        f0 = np.random.uniform(low=0.0, high=all_freqs_num - num_freqs_to_mask)
        f0 = int(f0)
        spec[f0:f0 + num_freqs_to_mask, :] = value

        time_percentage = random.uniform(0.0, time_masking)
        num_frames_to_mask = int(time_percentage * all_frames_num)
        t0 = np.random.uniform(low=0.0, high=all_frames_num - num_frames_to_mask)
        t0 = int(t0)
        spec[:, t0:t0 + num_frames_to_mask] = value
    return spec


def mixup_core(batch_size, alpha, X1, y1, min_value=0.5):
    if alpha <= 0:
        return X1, y1

    lams = np.random.beta(alpha, alpha, batch_size)      # shape = [batch_size]， 表示两个batch内各个样本的插值因子
    # lams[lams < min_value] = min_value                   # 保证原分布不变；Notes：但是这样不是很容易导致mixup非常强大吗？0.5的mixup属于最大的mixup了吧

    X_lams = lams.reshape(batch_size, 1, 1, 1)
    if len(y1.shape) == 3:
        y_lams = lams.reshape(batch_size, 1, 1)     # 帧级别标注
    else:
        y_lams = lams.reshape(batch_size, 1)        # 块级别标注
    X2 = X1[::-1]
    X = X1 * X_lams + X2 * (1.0 - X_lams)
    y2 = y1[::-1]
    y = y1 * y_lams + y2 * (1.0 - y_lams)
    return X, y


def imgaug_setup(crop_shape=(230, 115)):
    ''' Some augumentation in image
    see examples : https://github.com/aleju/imgaug
    '''
    imgaug_seq = iaa.Sequential(
        iaa.size.KeepSizeByResize(children=[                # 将裁剪后的图插值回原大小
            iaa.Fliplr(0.5),                                # 50%的概率水平翻转
            iaa.CropToFixedSize(width=crop_shape[0], height=crop_shape[1]),     # position='center-center'
        ]), random_order=True)
    return imgaug_seq


class RatioDataGenerator(object):
    def __init__(self, X, Y, batch_size, alpha=0.2, image_data_format='channels_first', te_max_iter=sys.maxsize, verbose=1):
        self.X = X
        self.Y = Y
        self.batch_size = batch_size
        self.alpha = alpha
        self._te_max_iter_ = te_max_iter
        self._verbose_ = verbose

    def __iter__(self):
        return self

    def get_lb_list(self, n_samples_list):
        '''
        Args:
            n_samples_list: 每个类

        Returns:
            lb_list: 经过ratio映射后的缩影数据集，存储了各个类的chunk个数
                     如[0, 0, 1, 2, 2, 2]表示缩影数据集有2个Chunk0/1个Chunk1/3个Chunk2
        '''
        lb_list = []
        ratio_percent = []
        ratio_list = n_samples_list/np.sum(n_samples_list)
        for idx in range(len(n_samples_list)):
            ratio = ratio_list[idx]
            if ratio <= 0:
                logger.warning("Class %d dont have any samples" % (idx), extra={"verbosity": 0})
                continue
            if ratio < 0.005:
                lb_list += [idx]
                ratio_percent.append(1)
            elif ratio < 0.01:
                lb_list += [idx] * 2
                ratio_percent.append(2)
            elif ratio < 0.05:
                lb_list += [idx] * 4
                ratio_percent.append(4)
            elif ratio < 0.1:
                lb_list += [idx] * 6
                ratio_percent.append(6)
            elif ratio < 0.2:
                lb_list += [idx] * 8
                ratio_percent.append(8)
            elif ratio < 0.3:
                lb_list += [idx] * 10
                ratio_percent.append(10)
            elif ratio < 0.4:
                lb_list += [idx] * 12
                ratio_percent.append(12)
            else:
                lb_list += [idx] * 14
                ratio_percent.append(14)
        ratio_percent /= np.sum(ratio_percent)
        return lb_list, ratio_percent

    def __call__(self):
        X = self.X
        Y = self.Y
        return self.generate(X, Y)

    def generate(self, X, Y, mixup_prob=0.5):
        batch_size = self.batch_size
        n_samples, samples_len, n_labels = Y.shape
        n_samples_list = np.sum(Y[:, 0, :], axis=0)
        lb_list, ratio_percent = self.get_lb_list(n_samples_list)
        if self._verbose_ > 0:
            logger.info('label percent: %s' % (n_samples_list/np.sum(n_samples_list)), extra={"verbosity": 0})
            logger.info('After ratio, label percent: %s' % (ratio_percent), extra={"verbosity": 0})

        index_list = []                         # 每个元素表示在标注y中，每个类别对应的下标
        for i1 in range(n_labels):
            # 直接取每个块的第一帧作为该块的标注
            # 因为在预处理中已经尽量将相同的帧放在同一个块内了
            index_list.append(np.where(Y[:, 0, i1] == 1)[0])
            np.random.shuffle(index_list[i1])   # 打乱每个类的块

        queue = []
        pointer_list = [0] * n_labels
        len_list = [len(i) for i in index_list]     # 每个类的chunk个数,此时真实数据索引已经固定
        iter = 0
        while True:
            if iter == self._te_max_iter_:
                break
            iter += 1
            batch_x = []
            batch_y = []

            while len(queue) < batch_size:          # 当队列小于batch_size时，添加一个缩影数据集
                random.shuffle(lb_list)
                queue += lb_list
            batch_idx = queue[: batch_size]        # 从队列中取出一个batch索引，batch_idx表示要取chunk的类别及顺序
            queue[: batch_size] = []

            # 当前batch中每个类的个数
            n_per_class_list = [batch_idx.count(idx) for idx in range(n_labels)]
            for i1 in range(n_labels):
                # 从该类的真实数据集的索引中取出数据，个数由n_per_class_list[i1]决定
                end_index = pointer_list[i1] + n_per_class_list[i1]
                if end_index <= len_list[i1]:
                    per_class_batch_idx = index_list[i1][pointer_list[i1]: end_index]
                    pointer_list[i1] += n_per_class_list[i1]
                else:
                    # 当数据不够时，重新取多几次，直到取够batch的数据
                    np.random.shuffle(index_list[i1])     # 重新打乱该类所有块, index_list表示真实数据集的索引
                    per_class_batch_idx = index_list[i1][pointer_list[i1]:]
                    end_index = n_per_class_list[i1] - (len_list[i1] - pointer_list[i1])
                    # print(end_index, n_per_class_list[i1], len_list[i1], pointer_list[i1])
                    circle_num = end_index // len_list[i1]
                    end_index = end_index % len_list[i1]
                    # print(circle_num, end_index)
                    tmp = np.array([])
                    if circle_num > 0:
                        tmp = np.vstack([index_list[i1]] * circle_num)      # 如果要严谨的话，这里也应该进行多次打乱
                        tmp = tmp.flatten()
                    per_class_batch_idx = np.concatenate((per_class_batch_idx, tmp, index_list[i1][:end_index]), axis=0)
                    pointer_list[i1] = end_index
                batch_x.append(X[per_class_batch_idx.astype('int64')])
                batch_y.append(Y[per_class_batch_idx.astype('int64')])
            batch_x = np.concatenate(batch_x, axis=0)       # 拼接所有类的chunk
            batch_y = np.concatenate(batch_y, axis=0)
            # print(np.sum(np.squeeze(batch_y[:, 0, :]), axis=0)/np.sum(np.squeeze(batch_y[:, 0, :])))      # 用于打印batch中真实的比例

            if np.random.uniform(low=0, high=1) > mixup_prob:
                yield batch_x, batch_y
            else:
                yield mixup_core(self.batch_size, self.alpha, batch_x, batch_y)


class ComplicateDataGenerator():
    def __init__(self, X_train, y_train, batch_size=2, alpha=0.275, is_delta=True, image_data_format='channels_first',
                 mixup_prob=0.5, spec_aug_prob=0.5, imgaug_prob=0.35, shuffle=True):
        '''
        Args:
            shuffle: 每个epoch开始时进行重新打乱，当model.fit的steps_per_epoch非空时，必须每轮手动进行打乱
        '''
        self.X_train = X_train
        self.y_train = y_train
        self.batch_size = batch_size
        self.alpha = alpha
        self.is_delta = is_delta
        self.image_data_format = image_data_format
        self.mixup_prob = mixup_prob
        self.spec_aug_prob = spec_aug_prob
        self.imgaug_prob = imgaug_prob
        self.shuffle = shuffle
        self.sample_num = len(X_train)
        self.lock = threading.Lock()
        self.fix_indexes = np.arange(self.sample_num)
        self.imgaug_seq = imgaug_setup()
        np.random.shuffle(self.fix_indexes)        # 进行一次随机并固定样本序列

    def __iter__(self):
        return self

    # @threadsafe_generator
    def __call__(self):
        # with self.lock:
        while True:
            indexes = self.__get_exploration_order()
            itr_num = int(len(indexes) // self.batch_size)
            if itr_num == 0:
                logger.warning("the batch size is too large", extra={"verbosity": 0})
                break

            for i in range(itr_num):
                batch_ids = indexes[i * self.batch_size:(i + 1) * self.batch_size]      # 每次提取两个batch的数据的下标
                X, y = self.__data_generation(batch_ids)
                yield X, y

    def __get_exploration_order(self):
        '''获取随机的全局样本下标
        '''
        indexes = self.fix_indexes
        if self.shuffle:
            np.random.shuffle(indexes)
        return indexes

    def __data_generation(self, batch_ids):
        '''得到一个batch

        Args:
            batch_ids: 包含batch的数据
        '''

        X = self.X_train[batch_ids[:]]
        y = self.y_train[batch_ids[:]]
        # X0 = X.copy()
        # y0 = y.copy()

        if np.random.uniform(low=0, high=1) < self.mixup_prob:
            X, y = mixup_core(self.batch_size, self.alpha, X, y)

        if np.random.uniform(low=0, high=1) < self.spec_aug_prob:
            X = np.squeeze(X)
            for i in range(len(X)):
                X[i, :, :] = spec_augment_core(X[i, :, :])
            X = np.expand_dims(X, 1)

        # 图像增强
        if np.random.uniform(low=0, high=1) < self.imgaug_prob:
            X = X.astype(np.float32)
            for i in range(len(X)):
                X[i, :, :, :] = self.imgaug_seq.augment_images(X[i, :, :, :])

        # 提取delta特征
        if self.is_delta:
            X = extract_delta(X)

        if self.image_data_format == 'channels_last':
            X = X.transpose(0, 2, 3, 1)
        return X, y


class DeltaDataGenerator():
    def __init__(self, X_train, y_train, batch_size=2, shuffle=True):
        '''
        Args:
            测试用，输出三通道的特征
        '''
        self.X_train = X_train
        self.y_train = y_train
        self.batch_size = batch_size
        self.shuffle = shuffle
        self.sample_num = len(X_train)
        self.lock = threading.Lock()
        self.fix_indexes = np.arange(self.sample_num)
        np.random.shuffle(self.fix_indexes)        # 进行一次随机并固定样本序列

    def __iter__(self):
        return self

    # @threadsafe_generator
    def __call__(self):
        # with self.lock:
        while True:
            indexes = self.__get_exploration_order()
            itr_num = int(len(indexes) // self.batch_size)
            if itr_num == 0:
                logger.warning("the batch size is too large", extra={"verbosity": 0})
                break

            for i in range(itr_num):
                batch_ids = indexes[i * self.batch_size:(i + 1) * self.batch_size]      # 每次提取两个batch的数据的下标
                X, y = self.__data_generation(batch_ids)
                yield X, y

    def __get_exploration_order(self):
        '''获取随机的全局样本下标
        '''
        indexes = self.fix_indexes
        if self.shuffle:
            np.random.shuffle(indexes)
        return indexes

    def __data_generation(self, batch_ids):
        '''得到一个batch

        Args:
            batch_ids: 包含batch的数据
        '''

        tmp = self.X_train[batch_ids[:]]
        y = self.y_train[batch_ids[:]]
        tmp1 = tmp[:]
        tmp2 = tmp[:]
        X = np.concatenate([tmp, tmp1, tmp2], axis=1).transpose(0, 2, 3, 1)
        print(X.shape)
        return X, y


class SimpleDataGenerator():
    def __init__(self, X_train, y_train, batch_size=2, image_data_format='channels_first', shuffle=True):
        '''
        Args:
            shuffle: 每个epoch开始时进行重新打乱，当model.fit的steps_per_epoch非空时，必须每轮手动进行打乱
        '''
        self.X_train = X_train
        self.y_train = y_train
        self.batch_size = batch_size
        self.image_data_format = image_data_format
        self.shuffle = shuffle
        self.sample_num = len(X_train)
        self.lock = threading.Lock()
        self.fix_indexes = np.arange(self.sample_num)
        np.random.shuffle(self.fix_indexes)        # 进行一次随机并固定样本序列

    def __iter__(self):
        return self

    # @threadsafe_generator
    def __call__(self):
        # with self.lock:
        while True:
            indexes = self.__get_exploration_order()
            itr_num = int(len(indexes) // self.batch_size)
            if itr_num == 0:
                logger.warning("the batch size is too large", extra={"verbosity": 0})
                break

            for i in range(itr_num):
                batch_ids = indexes[i * self.batch_size:(i + 1) * self.batch_size]      # 每次提取两个batch的数据的下标
                X, y = self.__data_generation(batch_ids)
                yield X, y

    def __get_exploration_order(self):
        '''获取随机的全局样本下标
        '''
        indexes = self.fix_indexes
        if self.shuffle:
            np.random.shuffle(indexes)
        return indexes

    def __data_generation(self, batch_ids):
        '''得到一个batch

        Args:
            batch_ids: 包含batch的数据
        '''

        X = self.X_train[batch_ids[:]]
        y = self.y_train[batch_ids[:]]
        if self.image_data_format == 'channels_last':
            X = X.transpose(0, 2, 3, 1)
        return X, y


if __name__ == '__main__':
    # X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [1, 2], [3, 4], [5, 6], [7, 8]])
    # Y = np.array([1, 2, 3, 4, 1, 2, 3, 4])
    batch_size = 2
    # feat_dir = '/home/xiaorong/workstation/sed-crnn-master/exp/Urban_Sound/feat_Test2_sp/'
    feat_dir = '/home/xiaorong/workstation/sed-crnn-master/exp/SED_SMALL/feat_sp/'
    firgure_dir = '/home/xiaorong/workstation/sed-crnn-master'

    X, Y, X_dev, Y_dev = load_train_data(feat_dir, 256, 1)

    generator = ComplicateDataGenerator(X, Y, batch_size=batch_size, alpha=0.2, mixup_prob=0.0, spec_aug_prob=0.0)
    generator_fun = generator()
    X, y, X0, y0 = next(generator_fun)

    # show_melspectrogram(firgure_dir + '/spec.jpg', X[0, 0, :, :], title='Log-frequency power spectrogram')
    # show_melspectrogram(firgure_dir + '/origin.jpg', X0[0, 0, :, :], title='Log-frequency power spectrogram')
