# coding: utf-8

# 根据sin预测模型先弄一个简单的模型


import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from more_itertools import flatten
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat

learn = tf.contrib.learn

# 1. 设置神经网络的参数。

FILE_PATH = "dataset/B0005_1.csv"
# FILE_PATH = "E:/Python/tensorflow/battery-master/10001737_output.csv"

HIDDEN_SIZE = 30
NUM_LAYERS = 2

TIMESTEPS = 10
TRAINING_STEPS = 100
BATCH_SIZE = 40

SAMPLE_GAP = 0.01

TRAIN_SIZE = 100


# #### 2. 定义生成正弦数据的函数。

# 这个函数需要修改，改为从文件中获取数据，可以参考彭伟的bp

def generate_data(seq):
    X = []
    y = []

    for i in range(len(seq) - TIMESTEPS - 1):
        X.append([seq[i: i + TIMESTEPS]])  # 取数组第i 到i + TIMESETPS
        y.append([seq[i + TIMESTEPS]])  # 取数组里的下标为i + TIMESETPS
    return np.array(X, dtype=np.float32), np.array(y, dtype=np.float32)


def handle_x_data():
    # 获取x轴的数据,count
    data_frame = pd.read_csv(FILE_PATH, usecols=[1])
    data_values = np.array(data_frame.values, dtype=np.float32)
    # 把 X * 1 矩阵转换为1 * X
    data_values = np.reshape(data_values, [1, -1])
    data_values = data_values[0]
    return data_values.tolist()


def handle_soh_data(is_test):
    # 只使用第14列，即soh
    data_frame = pd.read_csv(FILE_PATH, usecols=[14])
    data_values = np.array(data_frame.values, dtype=np.float32)
    # 把 X * 1 矩阵转换为1 * X
    data_values = np.reshape(data_values, [1, -1])
    data_values = data_values[0]
    middle = len(data_values) - TRAIN_SIZE
    # 训练数据取到后100为止
    if is_test:
        return data_values[middle:]
    else:
        return data_values[:middle]


def handle_soc_data(is_test):
    # 使用第5列-第4列，即▲soh,第三列是使用时长，soc和使用时长也有关系
    data_frame = pd.read_csv(FILE_PATH, usecols=[3, 4, 5])
    data_values = np.array(data_frame.values, dtype=np.float32)
    # 对时长排序
    data_values = data_values[data_values[:, 1].argsort()]
    delta_soc = []
    for i in range(data_values.shape[0]):
        tmp = data_values[i]
        x = abs(tmp[1] - tmp[2])
        delta_soc.append(x)
        # 过滤一下使用时间
        # if 8000 > tmp[0] > 4000:
        #     x = abs(tmp[1] - tmp[2])
        #     delta_soc.append(x)
    middle = len(delta_soc) - TRAIN_SIZE
    # 训练数据取到后100为止
    if is_test:
        return delta_soc[:middle]
    else:
        return delta_soc[middle:]


# #### 3. 定义lstm模型。
# 为了解决tensorflow的bug
def get_a_cell(lstm_size, keep_prob):
    lstm = tf.nn.rnn_cell.BasicLSTMCell(lstm_size)
    drop = tf.nn.rnn_cell.DropoutWrapper(lstm, output_keep_prob=keep_prob)
    return drop


def lstm_model(X, y):
    # lstm_cell = tf.contrib.rnn.BasicLSTMCell(HIDDEN_SIZE, state_is_tuple=True)
    # cell = tf.contrib.rnn.MultiRNNCell([lstm_cell] * NUM_LAYERS)
    with tf.name_scope('lstm'):
        cell = tf.nn.rnn_cell.MultiRNNCell(
            [get_a_cell(HIDDEN_SIZE, 1.0) for _ in range(NUM_LAYERS)]
        )

    output, _ = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
    output = tf.reshape(output, [-1, HIDDEN_SIZE])

    # 通过无激活函数的全联接层计算线性回归，并将数据压缩成一维数组的结构。
    predictions = tf.contrib.layers.fully_connected(output, 1, None)

    # 将predictions和labels调整统一的shape
    labels = tf.reshape(y, [-1])
    predictions = tf.reshape(predictions, [-1])

    # 这个loss是损失，使用均方误差（MSE;也称为L2损耗）
    loss = tf.losses.mean_squared_error(predictions, labels)

    train_op = tf.contrib.layers.optimize_loss(
        loss, tf.contrib.framework.get_global_step(),
        optimizer="Adagrad", learning_rate=0.1)

    return predictions, loss, train_op


# #### 4. 进行训练。


# 封装之前定义的lstm。
regressor = SKCompat(learn.Estimator(model_fn=lstm_model))

# 这样就没有测试数据了？要换一种方式产生测试数据
train_X, train_y = generate_data(handle_soh_data(False))
test_X, test_y = generate_data(handle_soh_data(True))
x = handle_x_data()
# 拟合数据。
regressor.fit(train_X, train_y, batch_size=BATCH_SIZE, steps=TRAINING_STEPS)

# 计算预测值。
predicted = [[pred] for pred in regressor.predict(test_X)]
fitted = [[pred] for pred in regressor.predict(train_X)]

# 计算MSE。
rmse = np.sqrt(((predicted - test_y) ** 2).mean(axis=0))

print("Mean Square Error is: %f" % rmse[0])

# #### 5. 画出预测值和真实值的曲线。

err = np.abs(predicted - test_y[:len(predicted)]) / test_y[:len(predicted)]
# data = pd.read_csv('dataset/B0005_1.csv', header=0)
total = np.array(list(train_y.tolist()) + list(predicted))

err = np.append(np.zeros(221) + 0.05, err)
up, down = total * (1 + 1.96 * err), total * (1 - 1.96 * err)

plot_predicted, = plt.plot(x[221:310], predicted, label='predicted')
plot_fitted, = plt.plot(x[:221], fitted, label='predicted')
plot_test, = plt.plot(x[:310], np.append(train_y, test_y), label='real_soc')
plt.legend([plot_predicted, plot_test], ['predicted', 'real_soc'])
plt.fill_between(x[:310], up, down, color='red', alpha=0.25)
plt.show()
