# coding=utf-8
import os
import time
import tensorflow as tf
from Util.utils import get_data, data_hparams, GetEditDistance, decode_ctc
from keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping, ReduceLROnPlateau, LambdaCallback
from sklearn.metrics import roc_auc_score
import numpy as np
import matplotlib.pyplot as plt
import warnings

"""
 实验结果：
 GetMfccFeature > compute_mfcc_result > compute_fbank_result
 GetMfccFeature 不存在过拟合，准确率在100%，并且val_loss 和val_mean迭代趋势较好；
 compute_mfcc_result 存在一定的过拟合，但是总体准确率在96%；
 compute_fbank_result 存在严重过拟合，总体准确率在
"""
# 参数设置：
"""
dirFile：日志存放位置
dirPath：am模型日志位置
modelPath：modelSave位置
isTraining：是否处于训练状态
am_epochs：迭代轮数
lm_epochs：迭代轮数
gpuNum：gpu的数量
logTxt：验证日志name
"""
sum = 100
for i in range(100):
    loadmodel = "model_bast2.h5"
    dirPath = "./LogIDCNN/log_am2/"
    isTraining = True
    am_epochs = 1000
    lm_epochs = 50
    gpuNum = 1
    logTxt = "compute_fbank"
    Make_ocean = True
    Make_hai = False
    Make_Thchs30 = False
    Make_mmcs = False
    Make_aishell = False
    Make_prime = False
    Make_stcmd = False
    TrainBatchSize = 5
    DevBatchSize = 1


    os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
    warnings.filterwarnings('ignore')
    # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8)
    # sess = tf.Session(configs=tf.ConfigProto(gpu_options=gpu_options))

    # 0.准备训练所需数据------------------------------
    data_args = data_hparams()
    data_args.data_type = 'train'
    data_args.data_path = ''
    data_args.mmcs = Make_mmcs
    data_args.thchs30 = Make_Thchs30
    data_args.aishell = Make_aishell
    data_args.prime = Make_prime
    data_args.stcmd = Make_stcmd
    data_args.ocean = Make_ocean
    data_args.hai = Make_hai
    data_args.batch_size = TrainBatchSize
    # data_args.data_length = 10000
    data_args.data_length = None
    data_args.shuffle = True
    data_args.training = True
    train_data = get_data(data_args)

    # count_length = train_data.countLength

    # 0.准备验证所需数据------------------------------
    data_args = data_hparams()
    data_args.data_type = 'dev'
    data_args.data_path = ''
    data_args.hai = Make_hai
    data_args.mmcs = Make_mmcs
    data_args.thchs30 = Make_Thchs30
    data_args.aishell = Make_aishell
    data_args.prime = Make_prime
    data_args.ocean = Make_ocean
    data_args.stcmd = Make_stcmd
    data_args.batch_size = DevBatchSize
    # max 893
    data_args.data_length = None
    # data_args.data_length = 2000
    data_args.shuffle = False
    data_args.training=True
    dev_data = get_data(data_args)



    # start = i * dataLength
    # end = start + dataLength - 1
    # train_data.starItem = start
    # train_data.endItem = end
    # 重新获取数据
    # train_data.adjustDataList()
    # print("训练迭代数据轮:", str(i + 1))
    # 开始训练
    # 1.声学模型训练-----------------------------------
    from model_speech.DFIDCNN_CRF_CTC import Am, am_hparams

    # from model_speech.gru_ctc import Am, am_hparams

    am_args = am_hparams()
    am_args.vocab_size = len(train_data.am_vocab)
    am_args.gpu_nums = gpuNum
    am_args.lr = 0.001
    am_args.is_training = True
    am = Am(am_args)

    batch_num = len(train_data.wav_lst) // train_data.batch_size

    if os.path.exists(dirPath+loadmodel):
        print('loading acoustic model...')
        am.ctc_model.load_weights(dirPath+loadmodel)

    # 准备数据
    batch = train_data.get_am_batch()
    dev_batch = dev_data.get_am_batch()
    # 回调函数
    tensorBoard = TensorBoard(log_dir=dirPath+"tensorboard/" + str(int(time.time())), write_grads=True,
                              histogram_freq=0, update_freq="epoch")
    tensorBoard.set_model(am.ctc_model)

    earlyStopping = EarlyStopping(
        monitor='loss', min_delta=1e-5, patience=10, verbose=1
    )
    reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.8,
                                  patience=3, min_lr=0.00001)
    plot_loss_callback = LambdaCallback(
        on_epoch_end=lambda epoch, logs: plt.plot(np.arange(epoch),
                                                  logs['loss']))
    myCallBack = tf.keras.callbacks.LambdaCallback(
        on_epoch_end=lambda self, batch, logs: self.model.predict(self.validation_data))
    # checkpoint

    ckptbast = "best_weights2_"+str(i)+".h5"
    # ckpt_pi = "model_{epoch:02d}-{loss:.2f}.h5"
    if os.path.exists(dirPath+"checkpoint") is False:
        os.mkdir(dirPath+"checkpoint")
    checkpointPi = ModelCheckpoint(dirPath+"checkpoint/"+ckptbast, monitor='loss',
                                   save_weights_only=True,
                                   verbose=0,
                                   save_best_only=True,
                                   mode='min')
    # 开始训练
    if isTraining:
        am.ctc_model.fit_generator(batch, steps_per_epoch=batch_num, initial_epoch=0, epochs=am_epochs,
                                   callbacks=[tensorBoard, earlyStopping, reduce_lr, checkpointPi],
                                   workers=1,
                                   use_multiprocessing=False, validation_data=dev_batch, validation_steps=10, verbose=1)
    # 保存模型
    am.ctc_model.save_weights(dirPath + loadmodel)
    # 测试准确率
    word_error_num = 0
    word_num = 0
    with open("./" + dirPath + "/" + logTxt + ".txt", "a") as file:
        file.write("=" * 20 + "\n")
    file.close()
    j = 0
    # 初始化
    dev_batch = dev_data.get_am_batch()
    # 验证
    for item in range(10):
        inputs, _ = next(dev_batch)
        x = inputs['the_inputs']
        result = am.model.predict(x)
        # print(result.shape)
        # print("============")
        # print(len(dev_data.am_vocab))
        # print(len(train_data.am_vocab))
        # result = result.reshape(result.shape[1], result.shape[0], result.shape[2])
        # print(result.shape)
        _, result = decode_ctc(result, train_data.am_vocab)
        label = dev_data.pny_lst[j]
        j += 1
        with open("./" + dirPath + "/" + logTxt + ".txt", "a") as file:
            file.write("预测：" + ','.join(result) + "\n")
            file.write("实际：" + ','.join(label) + "\n")
        file.close()
        # 计算两个拼音的差距
        word_error_num += min(len(label), GetEditDistance(label, result))
        word_num += len(label)
        print('词错误率：', (word_error_num / word_num))
        strLine = '【第' + str(i) + '轮】词错误率：' + str((word_error_num / word_num))
        # 每次追加记录
        with open("./" + dirPath + "/" + logTxt + ".txt", "a") as file:
            file.write(strLine + "\n")
            file.write("=" * 20 + "\n")
        file.close()

print("=================================")
print("=================================")
print("=================================")
print("=================================")
print("=================================")
print("=================================")
print("=================================")
print("声学模型学习完毕")
print("=================================")
print("=================================")
print("=================================")
print("=================================")
print("=================================")
print("=================================")
print("=================================")
print("=================================")
#
# # 开始训练
# # 2.语言模型训练-------------------------------------------
# from model_language.transformer import Lm, lm_hparams
#
# lm_args = lm_hparams()
# lm_args.num_heads = 4
# lm_args.num_blocks = 6
# lm_args.input_vocab_size = len(train_data.pny_vocab)
# lm_args.label_vocab_size = len(train_data.han_vocab)
# # print(train_data.pny_vocab)
# # print(train_data.han_vocab)
# lm_args.max_length = 50
# lm_args.hidden_units = 512
# lm_args.dropout_rate = 0.1
# lm_args.lr = 0.0001
# lm_args.is_training = True
# lm = Lm(lm_args)
#
# epochs = lm_epochs
# with lm.graph.as_default():
#     saver = tf.train.Saver()
# with tf.Session(graph=lm.graph) as sess:
#     merged = tf.summary.merge_all()
#     sess.run(tf.global_variables_initializer())
#     add_num = 0
#     # if os.path.exists('logDfcnnCtc_ocean/log_lm/1615637285_time/checkpoint'):
#     #     print('loading language model...')
#     #     latest = tf.train.latest_checkpoint('logDfcnnCtc_ocean/log_lm/1615637285_time')
#     #     add_num = int(latest.split('_')[-1])
#     #     saver.restore(sess, latest)
#     writer = tf.summary.FileWriter('./' + dirFile + '/log_lm/tensorboard', tf.get_default_graph())
#
#     for k in range(epochs):
#         total_loss = 0
#         batch = train_data.get_lm_batch()
#         for i in range(batch_num):
#             input_batch, label_batch = next(batch)
#             # print(input_batch)
#             # print(label_batch)
#             if len(np.shape(label_batch)) < 2:
#                 print(label_batch)
#                 continue
#             feed = {lm.x: input_batch, lm.y: label_batch}
#             cost, _ = sess.run([lm.mean_loss, lm.train_op], feed_dict=feed)
#             # print("cost=>", cost)
#             total_loss += cost
#             if (k * batch_num + i) % 10 == 0:
#                 rs = sess.run(merged, feed_dict=feed)
#                 writer.add_summary(rs, k * batch_num + i)
#         print('epochs', k + 1, ': average loss = ', total_loss / batch_num)
#     saver.save(sess, './' + dirFile + '/log_lm/%d_time/model20210129_%d' % (time.time(), (epochs + add_num)))
#     writer.close()
