# -*- coding: UTF-8 -*-
# 模型训练：mWDN-LSTM网络模型，针对时序的单变量数据训练。

import numpy as np
import tensorflow as tf
import os
import sys
# from utils import *
# from Utils import *
import argparse
import shutil
# from models import *
import time
from Utils import data_process, mWDN
import matplotlib as mpl

mpl.use("Agg")
import matplotlib.pyplot as plt

BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)

parser = argparse.ArgumentParser()  # 创建解析器
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--data_name', type=str, default='pro_history',
                    help='Name of time series data [default: production_history]')
parser.add_argument('--max_epoch', type=int, default=1000, help='Epoch to run [default: 10000]')
parser.add_argument('--batch_size', type=int, default=10, help='Batch Size during training [default: 14]')
parser.add_argument('--learning_rate', type=float, default=0.05, help='Initial learning rate [default: ]')
parser.add_argument('--optimizer', default='adam', help='adam or Adagrad [default: adam]')
parser.add_argument('--decay_step', type=int, default=100, help='Decay step for lr decay [default: ]')
parser.add_argument('--decay_rate', type=float, default=0.8, help='Decay rate for lr decay [default: 0.5]')
# parser.add_argument('--drop_rate', type=float, default=0.1, help='Drop out rate [default: 0.1]')  #
parser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay rate [default: 0.0]')  #
parser.add_argument('--wavelet_reg', type=float, default=0.0,
                    help='Regularization term on the wavelet layers [default: 0.0]')
parser.add_argument('--hidden_size', type=int, default=64, help='LSTM hidden size [default: 16]')
parser.add_argument('--num_layer', type=int, default=2, help='LSTM layers [default: 2]')
parser.add_argument('--timestep', type=int, default=40, help='Training step [default: 36]')
FLAGS = parser.parse_args()  # 解析参数，共个参数

GPU_INDEX = FLAGS.gpu
LOG_DIR = 'LOG_mWDN-LSTM_' + FLAGS.data_name  # 日志目录
MAX_EPOCH = FLAGS.max_epoch
BATCH_SIZE = FLAGS.batch_size
BASE_LEARNING_RATE = FLAGS.learning_rate
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
DATA_NAME = FLAGS.data_name  # 文件名
WEIGHT_DECAY_LSTM = FLAGS.weight_decay
SIM_REG = FLAGS.wavelet_reg
HIDDEN_SIZE = FLAGS.hidden_size
NUM_LAYER = FLAGS.num_layer
TIME_STEP = FLAGS.timestep
LEN_TEST = 90  # 测试集长度
l1_value = 0.000
# use_bn = True  # 是否批量归一化（Bacth Normalization）
write_result = True
FEATURE_NUM = 1  # 特征数目
SHEET_NAME = 'day'  # excel表中的sheet名
use_normal = True  # 是否归一化

name_file = sys.argv[0]  # 日志用名
DATA_ROOT = '/home/zxl/zy/Predict_Module/Data'  # 数据集目录
if os.path.exists(LOG_DIR):
    shutil.rmtree(LOG_DIR)  # 递归删除文件夹下的所有子文件夹和子文件
os.mkdir(LOG_DIR)  # 创建目录
os.system('cp %s %s' % (name_file, LOG_DIR))  # cp：复制文件到目录
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')  # os.path.join路径拼接，打开该文件删除原有内容并开始写入
LOG_FOUT.write(str(FLAGS) + '\n')  # 将所有参数写入日志


def log_string(out_str):
    LOG_FOUT.write(out_str + '\n')  # 写入字符串
    LOG_FOUT.flush()  # 刷新缓冲区
    print(out_str)


LEN_DATA = data_process.getlen_data(DATA_ROOT, DATA_NAME, sheetname=SHEET_NAME)  # 时序数据总长度
# 写入日志
log_string('Name of data: %s' % DATA_NAME)
log_string('Length of input = %d' % LEN_DATA)
log_string('Length of train = %d' % (LEN_DATA - LEN_TEST))
log_string('Length of test = %d' % LEN_TEST)

# Data数据
train_data, test_data = data_process.load_data(DATA_ROOT, DATA_NAME, LEN_DATA, LEN_TEST, sheetname=SHEET_NAME,
                                               feature=FEATURE_NUM, normal=use_normal)  # 一维数组
train_x, train_y = data_process.generate_data(train_data, TIME_STEP)  # (len of train, 1, timestep); (len of train, 1)
train_x = np.reshape(train_x, [train_x.shape[0], train_x.shape[1]])
train_y = np.reshape(train_y, [train_y.shape[0], 1])
test_x, test_y = data_process.generate_data(test_data, TIME_STEP)  # (len of test, 1, timestep); (len of test, 1)
test_x = np.reshape(test_x, [test_x.shape[0], test_x.shape[1]])
test_y = np.reshape(test_y, [test_y.shape[0], 1])

print(train_x.shape, test_x.shape)


# ----------------------------------------------------------------------------------------------------------------------


def get_learning_rate(batch):
    learning_rate = tf.train.exponential_decay(
        BASE_LEARNING_RATE,  # Base learning rate.
        batch,  # Current index into the dataset.
        DECAY_STEP,  # Decay step.
        DECAY_RATE,  # Decay rate.
        staircase=True)
    learning_rate = tf.maximum(learning_rate, BASE_LEARNING_RATE / 100)  # CLIP THE LEARNING RATE
    return learning_rate


def count_trainable_params():
    total_parameters = 0
    for variable in tf.trainable_variables():
        shape = variable.get_shape()
        variable_parametes = 1
        for dim in shape:
            variable_parametes *= dim.value
        total_parameters += variable_parametes
    log_string("Total training params: %.1fk" % (total_parameters / 1e3))


def count_wave_params(name):
    total_parameters = 0
    for variable in name:
        shape = variable.get_shape()
        variable_parametes = 1
        for dim in shape:
            variable_parametes *= dim.value
        total_parameters += variable_parametes
    log_string("Total mWDN params: %.1fk" % (total_parameters / 1e3))


def get_lstm(input_x, input_y, scope, is_training):
    with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
        cell = tf.nn.rnn_cell.MultiRNNCell([
            tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE)
            for _ in range(NUM_LAYER)
        ])
        outputs, _ = tf.nn.dynamic_rnn(cell, input_x, dtype=tf.float32)
        output = outputs[:, -1, :]  # 所有batch的最后一个时刻的输出向量
        # print output
        predictions = tf.contrib.layers.fully_connected(output, 1, activation_fn=None)  # 全连接层
        # print predictions
        # if is_training is not None:
        #     loss = tf.losses.mean_squared_error(labels=input_y, predictions=predictions)  # MSE
        # else:
        #     return predictions, None

    return predictions


def wave_block_lstm(input_x, input_y, len_input, is_training, scope, scope_lstm, l1_value):
    with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
        lp_coe, hp_coe = mWDN.wave_op(input_x, len_input, scope='wave_func', l1_value=l1_value,
                                      weight_decay=WEIGHT_DECAY_LSTM, sim_reg=SIM_REG)
        predictions = get_lstm(hp_coe, input_y, scope=scope_lstm, is_training=is_training)

    return lp_coe, hp_coe, predictions


def get_model(input_x, input_y, is_training):
    # with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
    lp_1, hp_1, predict_1 = wave_block_lstm(input_x, input_y, TIME_STEP, is_training, scope='wave_level_1',
                                            scope_lstm='lstm_1', l1_value=l1_value)
    # tf.add_to_collection('losses', loss_1)  # 将tensor对象放入一个集合
    # tf.summary.scalar('loss_1', loss_1)

    lp_2, hp_2, predict_2 = wave_block_lstm(lp_1, input_y, TIME_STEP / 2, is_training, scope='wave_level_2',
                                            scope_lstm='lstm_2', l1_value=l1_value)
    # tf.add_to_collection('losses', loss_2)
    # tf.summary.scalar('loss_2', loss_2)

    lp_3, hp_3, predict_3 = wave_block_lstm(lp_2, input_y, TIME_STEP / 4, is_training, scope='wave_level_3',
                                            scope_lstm='lstm_3', l1_value=l1_value)
    # tf.add_to_collection('losses', loss_3)
    # tf.summary.scalar('loss_3', loss_3)

    predict_4 = get_lstm(lp_3, input_y, scope='lstm_4', is_training=is_training)
    # tf.add_to_collection('losses', loss_4)
    # tf.summary.scalar('loss_4', loss_4)
    # print predict_1, predict_2, predict_3, predict_4
    predict = mWDN.tf_concat(-1, [predict_1, predict_2, predict_3, predict_4])
    # print predict
    # loss_all = tf.add_n(tf.get_collection('losses'), name='total_loss')  # 相加
    # tf.summary.scalar('loss_all', loss_all)

    predictions = tf.layers.dense(predict, 1, activation=None)
    # print predictions

    return predictions


def main():
    with tf.Graph().as_default():
        with tf.device('/gpu:' + str(GPU_INDEX)):
            is_training_pl = tf.placeholder(tf.bool, shape=())
            input_x = tf.placeholder(tf.float32, shape=(None, TIME_STEP))  # BATCH_SIZE改为None
            input_y = tf.placeholder(tf.float32, shape=(None, 1))

            # 调用模型
            predictions = get_model(input_x, input_y, is_training=is_training_pl)
            # print input_x, predictions
            loss = tf.losses.mean_squared_error(labels=input_y, predictions=predictions)
            tf.summary.scalar('loss(MSE)', loss)

            batch = tf.Variable(0, trainable=False)  # 定义存储训练轮数的变量
            learning_rate = get_learning_rate(batch)
            tf.summary.scalar('learning_rate', learning_rate)

            optimizer_1 = tf.train.AdamOptimizer(learning_rate=learning_rate)  # 反向传播优化算法
            optimizer_2 = tf.train.AdamOptimizer(learning_rate=learning_rate)

            var_list_wave = [t for t in tf.trainable_variables() if t.name.split('/')[1] == 'wave_func']
            var_list_rand = [t for t in tf.trainable_variables() if not t.name.split('/')[1] == 'wave_func']

            train_op_1 = optimizer_1.minimize(loss, global_step=batch, var_list=var_list_rand)
            train_op_2 = optimizer_2.minimize(loss, global_step=batch, var_list=var_list_wave)
            train_op = tf.group(train_op_1, train_op_2)  # 返回的是操作

            saver = tf.train.Saver()  # 实例化对象（default：保存最近的5个checkpoint 文件）

        config = tf.ConfigProto()  # 配置tf.Session的运算方式
        config.gpu_options.allow_growth = True  # 当使用GPU时候，Tensorflow运行自动慢慢达到最大GPU的内存
        config.allow_soft_placement = True  # 如果指定设备不存在，允许TF自动分配设备
        config.log_device_placement = False  # 打印设备分配日志
        sess = tf.Session(config=config)

        count_trainable_params()  # 训练参数数量Total training params
        count_wave_params(var_list_wave)  # Total mWDN params

        merged = tf.summary.merge_all()  # 将所有summary全部保存到磁盘，以便tensorboard显示。一般这一句就可显示训练时的各种信息
        train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'), sess.graph)  # 用指定文件来保存图
        # test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'))
        init = tf.global_variables_initializer()
        sess.run(init)

        # 训练
        start = time.time()
        min_loss = np.inf
        for epoch_idx in range(MAX_EPOCH):
            for batch_idx in range(train_x.shape[0] / BATCH_SIZE):
                start_idx = batch_idx * BATCH_SIZE
                end_idx = (batch_idx + 1) * BATCH_SIZE
                feed_x = train_x[start_idx:end_idx, :]
                feed_y = train_y[start_idx:end_idx, :]
                summary, step, _, current_loss = sess.run(
                    [merged, batch, train_op, loss],
                    feed_dict={is_training_pl: True, input_x: feed_x, input_y: feed_y}
                )
            train_writer.add_summary(summary, step)

            if epoch_idx % 1 == 0:
                loss_sum = 0
                for test_idx in range(int(test_x.shape[0] / BATCH_SIZE)):
                    start_idx = test_idx * BATCH_SIZE
                    end_idx = (test_idx + 1) * BATCH_SIZE
                    feed_data = test_x[start_idx:end_idx, :]
                    feed_label = test_y[start_idx:end_idx, :]
                    test_predict, test_loss = sess.run(
                        [predictions, loss],
                        feed_dict={is_training_pl: False, input_x: feed_data, input_y: feed_label}
                    )
                    loss_sum += test_loss
                loss_mean = loss_sum / float(test_x.shape[0] / BATCH_SIZE)
                # 保存最优模型
                if loss_mean <= min_loss:
                    min_loss = loss_mean
                    if write_result == True:
                        save_path_loss = saver.save(sess, os.path.join(LOG_DIR,
                                                                       "MODEL_" + DATA_NAME + "_mWDN-LSTM_LOSS.ckpt"))
            if epoch_idx % 50 == 0:
                # print "train step: " + str(epoch_idx) + ", loss: " + str(current_loss) + ", test_loss: " + str(loss_mean)
                log_string(
                    "train step: " + str(epoch_idx) + ", loss: " + str(current_loss) + ", test_loss: " + str(loss_mean))
        # 训练时间
        end = time.time()
        running_time = end - start
        # print "time cost : %.5f sec" % running_time
        # print "(测试集)最小loss：" + str(min_loss)
        log_string("-----------------------------------------------------------------------")
        log_string("Time Cost : %.5f sec" % running_time)
        log_string("Minest Test Loss：" + str(min_loss))

        # 测试
        Pred = []
        saver.restore(sess, os.path.join(LOG_DIR, "MODEL_" + DATA_NAME + "_mWDN-LSTM_LOSS.ckpt"))
        for i in range(int(test_x.shape[0] / BATCH_SIZE)):
            start_i = i * BATCH_SIZE
            end_i = (i + 1) * BATCH_SIZE
            feed_x = test_x[start_i:end_i, :]
            feed_y = test_y[start_i:end_i, :]
            predict = sess.run(predictions, feed_dict={is_training_pl: False,
                                                       input_x: feed_x, input_y: feed_y})
            for j in predict:
                Pred.append(j)

        # 计算误差
        prediction = np.array(Pred).squeeze()
        label = np.array(test_y).squeeze()
        MSE = ((prediction - label) ** 2).mean(axis=0)
        # print "测试集MSE：" + str(MSE)
        log_string("MSE：" + str(MSE))

        sess.close()

        # 输出数据返回前端
        # print running_time
        # print MSE
        # print prediction
        # print label

        # 画图
        plt.plot(prediction)
        plt.plot(label)
        plt.savefig(os.path.join(LOG_DIR, "res_train.png"))


if __name__ == "__main__":
    main()
    LOG_FOUT.close()
