# -*- coding: UTF-8 -*-
# 模型训练：LSTM网络模型，针对时序的多变量数据训练。
# （由于是多变量，所以训练模型输出直接为要预测的步长，而不是像单变量训练模型时每次输出一个步长值 然后在预测的过程中循环调用模型）

import numpy as np
import tensorflow as tf
import os
import sys
import argparse
import shutil
import time
from Utils import data_process
import matplotlib as mpl

mpl.use("Agg")
import matplotlib.pyplot as plt

# reload(sys)
# sys.setdefaultencoding('utf8')

BASE_DIR = os.path.dirname(os.path.abspath(__file__))  # /home/zxl/zy/Predict_Module/Models_Train
sys.path.append(BASE_DIR)

parser = argparse.ArgumentParser()  # 创建解析器
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--data_name', type=str, default='pro_multi_variable', help='Name of time series data [default: ]')
# parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--max_epoch', type=int, default=1000, help='Epoch to run [default: 200]')
parser.add_argument('--batch_size', type=int, default=10, help='Batch Size during training [default:]')
parser.add_argument('--learning_rate', type=float, default=0.05, help='Initial learning rate [default: 0.001]')
parser.add_argument('--optimizer', default='adam', help='adam or Adagrad [default: adam]')
parser.add_argument('--decay_step', type=int, default=100, help='Decay step for lr decay [default: ]')
parser.add_argument('--decay_rate', type=float, default=0.8, help='Decay rate for lr decay [default: 0.5]')
parser.add_argument('--drop_rate', type=float, default=0.1, help='Drop out rate [default: 0.1]')
parser.add_argument('--hidden_size', type=int, default=128, help='LSTM hidden size [default: 8]')
parser.add_argument('--num_layer', type=int, default=2, help='LSTM layers [default: 1]')
parser.add_argument('--timestep', type=int, default=15, help='Training step [default: ]')
FLAGS = parser.parse_args()

# 参数设置
GPU_INDEX = FLAGS.gpu  # GPU选择
LOG_DIR = 'LOG_multi-LSTM_' + FLAGS.data_name  # 日志目录
MAX_EPOCH = FLAGS.max_epoch  # 训练epoch
BATCH_SIZE = FLAGS.batch_size  # 训练batch
BASE_LEARNING_RATE = FLAGS.learning_rate  # 学习率
OPTIMIZER = FLAGS.optimizer  # 优化器
DECAY_STEP = FLAGS.decay_step  # 学习率衰减步长
DECAY_RATE = FLAGS.decay_rate  # 衰减率
DATA_NAME = FLAGS.data_name  # 文件名
HIDDEN_SIZE = FLAGS.hidden_size  # 网络隐藏单元数
NUM_LAYER = FLAGS.num_layer  # 网络层数
TIME_STEP = FLAGS.timestep  # 训练历史步长/滑动窗口
LEN_TEST = 120  # 测试集长度
write_result = True  # 是否保存模型
FEATURE_NUM = 3  # 特征数目
SHEET_NAME = 'day'  # excel表中的sheet名
use_normal = True  # 是否归一化
PREDICT_UNIT = 5  # 预测步长

# 日志设置
name_file = sys.argv[0]  # /home/zxl/zy/Predict_Module/Models_Train/model_lstm.py
# DATA_ROOT = '/home/zxl/zy/Predict_Module/Data'  # 数据集目录
DATA_ROOT = '../Data'
if os.path.exists(LOG_DIR):
    shutil.rmtree(LOG_DIR)  # 清空日志
os.mkdir(LOG_DIR)  # 创建日志
os.system('cp %s %s' % (name_file, LOG_DIR))  # 复制本文件到日志中
LOG_FOUT = open(os.path.join(LOG_DIR, str(LOG_DIR) + '.txt'), 'w')  # os.path.join路径拼接，打开该文件删除原有内容并开始写入
LOG_FOUT.write(str(FLAGS) + '\n')  # 将所有参数写入日志


def log_string(out_str):  # 内容写入日志
    LOG_FOUT.write(out_str + '\n')  # 写入字符串
    LOG_FOUT.flush()  # 刷新缓冲区
    print(out_str)


LEN_DATA = data_process.getlen_data(DATA_ROOT, DATA_NAME, sheetname=SHEET_NAME)  # 时序数据总长度
# 写入日志
log_string('Name of data: %s' % DATA_NAME)
log_string('Length of input = %d' % LEN_DATA)
log_string('Length of train = %d' % (LEN_DATA - LEN_TEST))
log_string('Length of test = %d' % LEN_TEST)

# 数据处理
LEN_DATA = data_process.getlen_data(DATA_ROOT, DATA_NAME, sheetname=SHEET_NAME)  # 1000
train_data, test_data = data_process.load_data(DATA_ROOT, DATA_NAME, LEN_DATA, LEN_TEST, sheetname=SHEET_NAME,
                                               feature=FEATURE_NUM, normal=use_normal)
# 多预测一
# train_x, train_y = data_process.generate_data(train_data, TIME_STEP)  # (590, 20, 3)
# train_y = np.reshape(train_y, [train_y.shape[0], 1])
# test_x, test_y = data_process.generate_data(test_data, TIME_STEP)  # (100, 20, 3)
# test_y = np.reshape(test_y, [test_y.shape[0], 1])
# print train_x.shape, test_x.shape

# 多预测多
train_x, train_y = data_process.generate_multidata(train_data, TIME_STEP, PREDICT_UNIT)  # (590, 20, 3) (590, 5)
test_x, test_y = data_process.generate_multidata(test_data, TIME_STEP, PREDICT_UNIT)  # (100, 15, 3) (100, 5)
print(test_x.shape, test_y.shape)


# test_Y = []
# for i in range(test_y.shape[0]):
#     test_y0 = []
#     for j in range(test_y.shape[1]):
#         for p in range(test_y.shape[2]):
#             test_y0.append(test_y[i, j, p])
#     test_Y.append(test_y0)
# test_Y = np.mat(test_Y)  # (100, 15)
# print test_Y, test_Y.shape


# ----------------------------------------------------------------------------------------------------------------------


def count_trainable_params():  # 可训练参数数量
    total_parameters = 0
    for variable in tf.trainable_variables():
        shape = variable.get_shape()
        variable_parametes = 1
        for dim in shape:
            variable_parametes *= dim.value
        total_parameters += variable_parametes
    log_string("Total training params: %.1fk" % (total_parameters / 1e3))


def get_learning_rate(batch):
    learning_rate = tf.train.exponential_decay(
        BASE_LEARNING_RATE,  # Base learning rate.
        batch,  # Current index into the dataset.
        DECAY_STEP,  # Decay step.
        DECAY_RATE,  # Decay rate.
        staircase=True)
    learning_rate = tf.maximum(learning_rate, BASE_LEARNING_RATE / 100)  # CLIP THE LEARNING RATE
    return learning_rate


def get_lstm(input_x, input_y, scope, is_training):
    with tf.variable_scope(scope):
        cell = tf.nn.rnn_cell.MultiRNNCell([
            tf.nn.rnn_cell.BasicLSTMCell(64)
            for _ in range(2)
        ])

        outputs, _ = tf.nn.dynamic_rnn(cell, input_x, dtype=tf.float32)
        output = outputs[:, -1, :]
        predictions = tf.contrib.layers.fully_connected(output, PREDICT_UNIT,
                                                        activation_fn=None)  # shape=(?, 5)
        # print predictions

        # if is_training is not None:
        #     loss = tf.losses.mean_squared_error(labels=input_y, predictions=predictions)
        #     train_op = tf.contrib.layers.optimize_loss(
        #         loss, tf.train.get_global_step(), optimizer="Adagrad", learning_rate=0.1
        #     )
        # else:
        #     return predictions, None, None

    return predictions


def main():
    with tf.Graph().as_default():
        with tf.device('/gpu:' + str(GPU_INDEX)):
            is_training_pl = tf.placeholder(tf.bool, shape=())
            input_x = tf.placeholder(tf.float32, shape=(None, TIME_STEP, FEATURE_NUM))
            input_y = tf.placeholder(tf.float32, shape=(None, PREDICT_UNIT))

            # 调用模型
            predictions = get_lstm(input_x, input_y, scope='lstm', is_training=is_training_pl)  # (?, 15)

            # test_y3 = []
            # for i in range(predictions.shape[0]):
            #     test_y2 = []
            #     for j in range(predictions.shape[1]/FEATURE_NUM):
            #         for p in range(FEATURE_NUM):
            #             test_y1 = []
            #             test_y1.append(predictions[i, j * FEATURE_NUM + p])
            #         test_y2.append(test_y1)
            #     test_y3.append(test_y2)
            # print test_y3

            # print input_x, predictions, input_y
            loss = tf.losses.mean_squared_error(labels=input_y, predictions=predictions)
            tf.summary.scalar('loss', loss)

            batch = tf.Variable(0, trainable=False)
            learning_rate = get_learning_rate(batch)
            tf.summary.scalar('learning_rate', learning_rate)
            optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)  # Adam算法优化器
            train_op = optimizer.minimize(loss, global_step=batch)

            saver = tf.train.Saver()  # 实例化对象（default：保存最近的5个checkpoint 文件）

        config = tf.ConfigProto()  # 配置tf.Session的运算方式
        config.gpu_options.allow_growth = True  # 当使用GPU时候，Tensorflow运行自动慢慢达到最大GPU的内存
        config.allow_soft_placement = True  # 如果指定设备不存在，允许TF自动分配设备
        config.log_device_placement = False  # 打印设备分配日志
        sess = tf.Session(config=config)

        count_trainable_params()  # 训练参数数量Total training params

        merged = tf.summary.merge_all()  # 将所有summary全部保存到磁盘，以便tensorboard显示。一般这一句就可显示训练时的各种信息
        train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'), sess.graph)  # 用指定文件来保存图
        init = tf.global_variables_initializer()
        sess.run(init)

        # 训练
        start = time.time()
        min_loss = np.inf
        for epoch_idx in range(MAX_EPOCH):
            for batch_idx in range(int(train_x.shape[0] / BATCH_SIZE)):
                start_idx = batch_idx * BATCH_SIZE
                end_idx = (batch_idx + 1) * BATCH_SIZE
                feed_x = train_x[start_idx:end_idx, :, :]
                feed_y = train_y[start_idx:end_idx, :]
                summary, step, _, current_loss = sess.run([merged, batch, train_op, loss],
                                                          feed_dict={is_training_pl: True,
                                                                     input_x: feed_x, input_y: feed_y})
            train_writer.add_summary(summary, step)

            if epoch_idx % 1 == 0:
                loss_sum = 0
                for test_idx in range(int(test_x.shape[0] / BATCH_SIZE)):
                    start_idx = test_idx * BATCH_SIZE
                    end_idx = (test_idx + 1) * BATCH_SIZE
                    feed_data = test_x[start_idx:end_idx, :, :]
                    feed_label = test_y[start_idx:end_idx, :]
                    test_predict, test_loss = sess.run([predictions, loss],
                                                       feed_dict={is_training_pl: False,
                                                                  input_x: feed_data, input_y: feed_label})
                    loss_sum += test_loss
                loss_mean = loss_sum / float(test_x.shape[0] / BATCH_SIZE)
                # 保存最优模型
                if loss_mean <= min_loss:
                    min_loss = loss_mean
                    if write_result == True:
                        save_path_loss = saver.save(sess, os.path.join(LOG_DIR,
                                                                       "MODEL_" + DATA_NAME + "_LSTM_LOSS.ckpt"))
            # print min_loss, min_rmse
            if epoch_idx % 50 == 0:
                # print "train step: " + str(epoch_idx) + ", loss: " + str(current_loss) + ", test_loss: " + str(min_loss)
                log_string(
                    "train step: " + str(epoch_idx) + ", loss: " + str(current_loss) + ", test_loss: " + str(loss_mean))
        # 训练时间
        end = time.time()
        running_time = end - start
        # print "time cost : %.5f sec" % running_time
        # print "(测试集)最小loss：" + str(min_loss)
        log_string("-----------------------------------------------------------------------")
        log_string("Time Cost : %.5f sec" % running_time)
        log_string("Minest Test Loss：" + str(min_loss))

        # 测试过程(前端展示的测试集训练结果)
        Pred = []
        saver.restore(sess, os.path.join(LOG_DIR, "MODEL_" + DATA_NAME + "_LSTM_LOSS.ckpt"))
        for i in range(int(test_x.shape[0] / BATCH_SIZE)):
            start_idx = i * BATCH_SIZE
            end_idx = (i + 1) * BATCH_SIZE
            feed_x = test_x[start_idx:end_idx, :, :]
            feed_y = test_y[start_idx:end_idx, :]
            predict = sess.run(predictions, feed_dict={is_training_pl: False,
                                                       input_x: feed_x, input_y: feed_y})
            # print predict
            for j in predict:
                Pred.append(j)
        Pred = np.mat(Pred)

        # 计算测试误差
        prediction = np.array(Pred).squeeze()
        label = np.array(test_y).squeeze()
        # print prediction, label
        MSE = ((prediction - label) ** 2).mean(axis=1)
        MSE = np.array(MSE)
        MSE = MSE.mean(axis=0)
        log_string("MSE：" + str(MSE))

        sess.close()

        # 输出数据返回前端
        # print running_time
        # print MSE
        # print prediction[:, 0]
        # print label[:, 0]

        # 画图
        plt.plot(prediction[:, 0])
        plt.plot(label[:, 0])
        plt.savefig(os.path.join(LOG_DIR, "res_train.png"))


if __name__ == "__main__":
    main()
    LOG_FOUT.close()
