# -*- coding: utf-8 -*-

from keras.callbacks import TensorBoard, ModelCheckpoint, LearningRateScheduler
import tensorflow as tf
import keras.backend as K
from keras.utils import to_categorical
from lr_scheduler import WarmUpCosineDecayScheduler

import esc10_input
import numpy as np
# import models
import models
import os
import multiprocessing

import matplotlib.pyplot as plt
import datetime

# 控制中文显示
from pylab import *

mpl.rcParams['font.sans-serif'] = ['SimHei']

from keras.callbacks import EarlyStopping


def use_gpu():
    """Configuration for GPU"""
    from keras.backend import set_session   #(下面的可能库用的不对)
    # from keras.backend.tensorflow_backend import set_session
    os.environ['CUDA_VISIBLE_DEVICES'] = str(0)  # 使用第一台GPU
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.5  # GPU使用率为50%
    config.gpu_options.allow_growth = True  # 允许容量增长
    set_session(tf.InteractiveSession(config=config))


def CNN_train(test_fold, feat):
    """
    Training CNN using extracted feature
    :param test_fold: test fold of 5-fold cross validation
    :param feat: which feature to use
    """

    # 学习率衰减策略，每20个epoch衰减一次，变为0.1倍。
    def scheduler(epoch):
        if epoch in [30,60]:
            lr = K.get_value(model.optimizer.lr)
            K.set_value(model.optimizer.lr, lr * 0.1)
            print("lr changed to {}".format(lr * 0.1))
        return K.get_value(model.optimizer.lr)

    # 读取特征数据
    train_features, train_labels, test_features, test_labels = esc10_input.get_data(test_fold, feat)

    # 一些超参的配置
    epoch = 70
    num_class = 10
    batch_size = 32
    input_shape = (60, 65, 1)
    # input_shape = (80, 259, 1)
    # routings = 3

    # 构建CNN模型
    model = models.CNN(input_shape, num_class)
    # models = capsnet.CapsNet(input_shape, num_class, routings)


    # 回调函数
    reduce_lr = LearningRateScheduler(scheduler)  # 学习率衰减
    logs = TensorBoard(log_dir='./log/fold{}/'.format(test_fold))  # 保存模型训练日志
    checkpoint = ModelCheckpoint('./saved_model/cnn_{}_fold{}_best.h5'.format(feat, test_fold),  # 保存在验证集上性能最好的模型
                                 monitor='val_acc', verbose=1, save_best_only=True, mode='max', period=1)
    # 训练模型
    # models.fit(train_features, train_labels, batch_size=batch_size, nb_epoch=epoch, verbose=1, validation_split=0.1,
    #           callbacks=[checkpoint, reduce_lr, logs])

    #############################################################################################################
    # 余弦退回warmup
    # 得到总样本数
    # sample_count = len(train_sequence)* param.batch_size
    sample_count = len(train_features)
    # 第二阶段学习率以及总步数
    learning_rate_base = 0.001
    # learning_rate_base = reduce_lr
    total_steps = int(epoch * sample_count) / batch_size

    # 计算第一阶段的步数需要多少 warmup_steps
    # 10 {'fold1': 0.775, 'fold2': 0.8125, 'fold3': 0.7375, 'fold4': 0.825, 'fold5': 0.925, 'mean': 0.8150000000000001}
    # 20 {'fold1': 0.7375, 'fold2': 0.8125, 'fold3': 0.75, 'fold4': 0.8125, 'fold5': 0.8625, 'mean': 0.7949999999999999}
    # 30 {'fold1': 0.7625, 'fold2': 0.8125, 'fold3': 0.7875, 'fold4': 0.825, 'fold5': 0.8875, 'mean': 0.8150000000000001}
    # 30 {'fold1': 0.775, 'fold2': 0.825, 'fold3': 0.7625, 'fold4': 0.8375, 'fold5': 0.8625, 'mean': 0.8125}
    # 1 最高准确率86，平均准确率80%
    # 50 {'fold1': 0.7875, 'fold2': 0.825, 'fold3': 0.8125, 'fold4': 0.8625, 'fold5': 0.8875, 'mean': 0.835}
    # 60 {'fold1': 0.775, 'fold2': 0.6875, 'fold3': 0.7875, 'fold4': 0.825, 'fold5': 0.8375, 'mean': 0.7825}
    # 40 {'fold1': 0.775, 'fold2': 0.7875, 'fold3': 0.75, 'fold4': 0.8375, 'fold5': 0.9, 'mean': 0.8099999999999999}
    warmup_epoch = 10
    warmup_steps = int(warmup_epoch * sample_count) / batch_size

    warm_lr = WarmUpCosineDecayScheduler(learning_rate_base=learning_rate_base,
                                         total_steps=total_steps,
                                         warmup_learning_rate=0,
                                         warmup_steps=warmup_steps,
                                         hold_base_rate_steps=0, )

    ##################################################################################################################

    history = model.fit(train_features,
                        train_labels,
                        batch_size=batch_size,
                        epochs = epoch,
                        verbose=1,
                        validation_split=0.1,
                        callbacks=[checkpoint, warm_lr, logs])
    # callbacks = [checkpoint, reduce_lr, logs]


    # early_stopping = EarlyStopping(monitor='val_loss', patience=50, verbose=1)
    #
    # history = models.fit(train_features, train_labels, batch_size=batch_size,
    #                     nb_epoch=epoch, verbose=1,
    #                     validation_split=0.1,
    #                     shuffle=False,
    #                     callbacks=[early_stopping])

    # 绘制训练损失和验证损失
    history_dict = history.history
    loss_values = history_dict['loss']
    val_loss_values = history.history['val_loss']

    epochs = range(1, len(loss_values) + 1)

    plt.plot(epochs, loss_values, 'bo', label='训练损失')
    plt.plot(epochs, val_loss_values, 'b', label='验证损失')
    plt.title('训练损失和验证损失')
    plt.xlabel('迭代')
    plt.ylabel('损失')
    plt.legend()

    plt.show()

    # 绘制训练精度和验证精度
    plt.clf()

    acc = history_dict['acc']
    val_acc = history.history['val_acc']

    plt.plot(epochs, acc, 'bo', label='训练精度')
    plt.plot(epochs, val_acc, 'b', label='验证精度')
    plt.title('训练精度和验证精度')
    plt.xlabel('迭代')
    plt.ylabel('精度')
    plt.legend()

    plt.show()

    # 保存模型
    model.save('./saved_model/cnn_{}_fold{}.h5'.format(feat, test_fold))

    # 输出训练好的模型在测试集上的表现
    score = model.evaluate(test_features, test_labels)
    print('Test score:', score[0])
    print('Test accuracy:', score[1])

    return score[1]


if __name__ == '__main__':

    curr_time = datetime.datetime.now()
    print("程序开始时间：", curr_time)

    use_gpu()  # 使用GPU
    dict_acc = {}  # results of each fold
    # 5-fold cross validation
    print('### [Start] Test models for ESC10 dataset #####')
    for fold in [1, 2, 3, 4, 5]:
        print("## Start test fold{} models #####".format(fold))
        # acc = CNN_train(fold, 'mfcc')
        acc = CNN_train(fold, 'logmel')
        dict_acc['fold{}'.format(fold)] = acc
        print("## Finish test fold{} models #####".format(fold))
    dict_acc['mean'] = np.mean(list(dict_acc.values()))
    print(dict_acc)
    print('### [Finish] Test models finished for ESC10 dataset #####')

    last_time = datetime.datetime.now()
    print("程序结束时间：", last_time)

################################################################################
# 4+4  {'fold1': 0.6875, 'fold2': 0.775, 'fold3': 0.7625, 'fold4': 0.725, 'fold5': 0.7375, 'mean': 0.7375}
# 4+3  {'fold1': 0.7, 'fold2': 0.8, 'fold3': 0.7375, 'fold4': 0.8375, 'fold5': 0.825, 'mean': 0.7799999999999999}
# 4+2  {'fold1': 0.775, 'fold2': 0.7875, 'fold3': 0.775, 'fold4': 0.8375, 'fold5': 0.7625, 'mean': 0.7875}

# 4+4
# 10次衰减
# {'fold1': 0.6, 'fold2': 0.625, 'fold3': 0.5875, 'fold4': 0.7, 'fold5': 0.5375, 'mean': 0.6100000000000001}
# 30
# {'fold1': 0.7375, 'fold2': 0.7875, 'fold3': 0.7875, 'fold4': 0.775, 'fold5': 0.75, 'mean': 0.7675}

#4+3
#30
#