"""
该文件仅用于通过LSTM对数据集进行训练
"""
import os
import time
import config
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import LSTM.GRU as GRU
from LSTM import data_manage as dm


os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'


SEQ_MAX_LEN = config.SEQ_MAX_LEN
N_HIDDEN_UNITS = config.N_HIDDEN_UNITS
BATCH_SIZE = config.LSTM_BATCH_SIZE
LEARNING_RATE = config.LEARNING_RATE
EPOCH_SIZE = config.LSTM_EPOCH_SIZE
CLASS_NUMBERS = config.CLASS_NUMBERS
FILE_DATA_TRAIN = config.FILE_DATA_TRAIN
FILE_DATA_TEST = config.FILE_DATA_TEST
FILE_SAVE = config.FILE_SAVE    # 存储运行结果目录
MODEL_PATH = FILE_SAVE + '/Model/model.ckpt'    # 模型保存路径
# 若不存在目录，新建之
isExists = os.path.exists(MODEL_PATH)
if not isExists:
    os.makedirs(MODEL_PATH)


# 标签数值化
def one_hot(label):
    label_num = len(label)
    new_label = label.reshape(label_num)
    n_values = np.max(new_label) + 1   # 看结果取出最大的
    return np.eye(n_values)[np.array(new_label, dtype=np.int32)]


if __name__ == '__main__':

    # 读取训练数据集
    train_dic = dm.read_data_txt(FILE_DATA_TRAIN)
    train_x = train_dic['x']
    train_y = train_dic['y']
    train_len = train_dic['length']
    train_num = train_dic['number']
    train_y_hot = one_hot(train_y)
    print(train_x)
    print(train_y_hot)

    # 读取测试数据集
    test_dic = dm.read_data_txt(FILE_DATA_TEST)
    test_x = test_dic['x']
    test_y = test_dic['y']
    test_len = test_dic['length']
    test_num = test_dic['number']
    test_y_hot = one_hot(test_y)

    # 设置网络输入的占入符
    with tf.name_scope('input'):
        X = tf.placeholder("float", [None, SEQ_MAX_LEN, 5], name='x_input')     # 第三个参数是数据的维度
        Y = tf.placeholder("float", [None, CLASS_NUMBERS], name='y_input')
        true_length = tf.placeholder(tf.int32, [None], name='length_input')

    # 设置权重和偏置参数
    weights = {
        'in': tf.Variable(tf.truncated_normal([5, N_HIDDEN_UNITS]), name='w_in'),   # 第一个参数是数据的维度
        'out': tf.Variable(tf.truncated_normal([N_HIDDEN_UNITS, CLASS_NUMBERS]), name='w_out')
    }
    biases = {
        'in': tf.Variable(tf.constant(0.1, shape=[N_HIDDEN_UNITS, ]), name='b_in'),
        'out': tf.Variable(tf.constant(0.1, shape=[CLASS_NUMBERS, ]), name='b_out')
    }

    logits = GRU.GRU(X, weights, biases, true_length)
    prediction = tf.nn.softmax(logits)

    # 定义损失函数相关计算
    with tf.name_scope('loss_function'):
        loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))  # 定义softmax loss损失函数
        optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)  # 自适应矩估计算法，用于动态调整学习速率，类似于梯度下降
        train_op = optimizer.minimize(loss_op)   # 目的是使最终得到的loss最小
        tf.summary.scalar('loss', loss_op)

    # tf.argmax()就是返回最大的那个数值所在的下标  prediction和Y都是128行*10列  shape(128,10)
    # 模型输出向量中最大的那个数值所在的下标是否和真实标签向量中最大的那个数值所在的下标相等
    # 对输入的 x 和 y 两个 Tensor 逐元素（element-wise）做 (x == y) 逻辑比较，返回 bool 类型的 Tensor。
    pre_num = tf.argmax(prediction, 1, name="output")
    correct_pred = tf.equal(pre_num, tf.argmax(Y, 1))  # shape(128,1)
    # tf.cast()将x的数据格式转化成dtype.例如，原来x的数据格式是bool， 那么将其转化成float以后，就能够将其转化成0和1的序列。反之也可以
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))  # 对每一列求平均值
    saver = tf.train.Saver()    # 模型持久化
    time_start = time.time()    # 计时器开始

    # 开始训练
    with tf.Session() as sess:
        # Run the initializer
        train_iterations = []
        train_loss = []
        train_accuracy = []
        tf.global_variables_initializer().run()
        iterator = 1

        # 对一个batch的训练数据进行shuffle
        permutation = np.random.permutation(train_y.shape[0])
        shuffled_dataset = train_x[permutation, :, :]
        shuffled_labels = train_y_hot[permutation]
        shuffled_tr_length = np.array(train_len)[permutation]
        for i in range(EPOCH_SIZE):
            for start, end in zip(range(0, train_num, BATCH_SIZE), range(BATCH_SIZE, train_num + 1, BATCH_SIZE)):
                # Run optimization op (backprop)
                _, loss, acc = sess.run([train_op, loss_op, accuracy], feed_dict={
                    X: shuffled_dataset[start:end],
                    Y: shuffled_labels[start:end],
                    true_length: shuffled_tr_length[start:end]})
                iterator += 1
                if iterator % 10 == 0:
                    train_iterations.append(int(iterator))
                    train_loss.append(float(loss))
                    train_accuracy.append(float(acc))
                    print('Iter:{}, Loss:{}, Acc:{}'.format(iterator, loss, acc))
                # 每迭代200次保存一次模型
                if iterator >= 400 and iterator % 200 == 0:
                    saver.save(sess, MODEL_PATH, global_step=iterator)
        saver.save(sess, MODEL_PATH)    # 模型持久化，保存为ckpt格式
        time_end = time.time()  # 计时器结束
        print('Optimization Finished!')

        # 模型测试
        test_indices = np.arange(len(test_x))  # 得到测试数据的下标
        np.random.shuffle(test_indices)    # 打乱test_indices顺序
        test_indices = test_indices[0: BATCH_SIZE]     # 索引
        input_accuracy = sess.run(accuracy, feed_dict={
            X: np.array(test_x)[test_indices],
            Y: np.array(test_y_hot)[test_indices],
            true_length: np.array(test_len)[test_indices]})
        print('Testing Accuracy:', input_accuracy)

        # 程序运行计时
        input_time = time_end - time_start
        print('Totally time:', input_time)

        # 存储运行结果
        with open(FILE_SAVE + 'output.txt', 'a') as file_object:
            file_object.write('\nOriginal:\n')
            file_object.write('Testing Accuracy: ' + repr(input_accuracy) + '\n')
            file_object.write('Totally time: ' + repr(input_time) + '\n')
        file_object.close()

        # 绘制折线图
        fig = plt.figure()
        ax1 = fig.add_subplot(111)
        ax1.plot(range(len(train_iterations)), train_loss, 'b', label='Loss function')
        ax1.set_ylabel(u'Loss function')    # 左纵轴标签
        ax1.set_title(u"Original")
        ax2 = ax1.twinx()  # 添加一条坐标轴
        ax2.plot(range(len(train_iterations)), train_accuracy, 'r', label='Accuracy')
        ax2.set_ylabel(u'Accuracy')     # 右纵轴标签
        ax2.set_xlabel(u'Iterator')     # 横轴标签
        fig.legend(loc='upper left')    # 图例位置
        plt.grid()  # 背景网格
        fig.savefig(FILE_SAVE + 'figure.png')
