#!/usr/bin/env python 
# encoding: utf-8 
"""
 @Author : hanxiaopeng
 @Time : 2020/8/27 
"""
import os
import time

import matplotlib
import numpy as np
import pandas as pd
import tensorflow as tf
from matplotlib import pyplot as plt
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from sklearn.preprocessing import StandardScaler, MinMaxScaler

matplotlib.rcParams['font.size'] = 20
matplotlib.rcParams['figure.titlesize'] = 20
matplotlib.rcParams['figure.figsize'] = [9, 7]
matplotlib.rcParams['font.family'] = ['STKaiTi']
matplotlib.rcParams['axes.unicode_minus'] = False

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
print(tf.__version__)


# length是预测长度，look_back是滚动周期
length = 30
look_back = 30



starttime = time.time()

# 读取数据
df = pd.read_csv("../data.csv", header=0)
df = df.set_index('date')

print(len(df))

# 查看数据状态曲线
# df[:].plot()
# plt.show()

np_data = np.array(df)

mm = MinMaxScaler()
np_data = mm.fit_transform(np_data)
ss = StandardScaler()
np_data = ss.fit_transform(np_data)


lis = []
x_list = []
y_list = []
for i in range(len(np_data)):
    if i + look_back + 1 == len(np_data):
        break
    x = np_data[i: i + look_back].tolist()
    y = np_data[i + look_back + 1].tolist()
    x_list.append(x)
    y_list.append(y)
    lis.append([x, y])

# 要保存的矩阵样式
# print(lis)

x_array = np.array(x_list)
y_array = np.array(y_list)



len_train = int(len(x_array) * 0.5)

# 划分训练集，验证集，测试集
x_train = x_array[:len_train]
y_train = y_array[:len_train]

x_test = x_array[len_train:len_train + length]
y_test = y_array[len_train:len_train + length]

x_valid = x_array[len_train + length:]
y_valid = y_array[len_train + length:]


# # 保存
# np.savez('data.npz', x_array=x_array, y_array=y_array)
# # 读取
# loaddata = np.load('data.npz')
#
# x_array = loaddata['x_array']
# y_array = loaddata['y_array']


# 数据预处理
def preprocess(x, y):  # 自定义的预处理函数
    x = tf.cast(x, dtype=tf.float32)
    x = tf.reshape(x, [-1, look_back * 6])
    y = tf.cast(y, dtype=tf.float32)
    y = tf.reshape(y, [-1, 6])
    return x, y


batchsz = 233
train_db = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_db = train_db.batch(batchsz)  # 批训练，批规模
train_db = train_db.map(preprocess)
train_db = train_db.repeat(10000)  # 训练次数
x, y = next(iter(train_db))
print('train sample:', x.shape, y.shape)

valid_db = tf.data.Dataset.from_tensor_slices((x_valid, y_valid))
valid_db = valid_db.batch(batchsz).map(preprocess)



lr = 1e-1
accs, losses = [], []

w1, b1 = tf.Variable(tf.random.normal([180, 128], stddev=0.1, seed=1)), tf.Variable(
    tf.zeros([128]))  # stddev: 正态分布的标准差，默认为1.0
w2, b2 = tf.Variable(tf.random.normal([128, 64], stddev=0.1, seed=1)), tf.Variable(tf.zeros([64]))
w3, b3 = tf.Variable(tf.random.normal([64, 6], stddev=0.1, seed=1)), tf.Variable(tf.zeros([6]))

for step, (x, y) in enumerate(train_db):
    with tf.GradientTape() as tape:
        # layer1.
        h1 = x @ w1 + b1
        h1 = tf.nn.relu(h1)
        # layer2
        h2 = h1 @ w2 + b2
        h2 = tf.nn.relu(h2)
        # output
        out = h2 @ w3 + b3

        # 求损失mse
        loss = tf.square(y - out)
        loss = tf.reduce_mean(loss)

    # 借助于 tensorflow 自动求导
    grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3])

    # 根据梯度更新参数
    for p, g in zip([w1, b1, w2, b2, w3, b3], grads):
        p.assign_sub(lr * g)

    # 每迭代80次输出一次loss
    if step % 80 == 0:
        print(step, 'loss:', float(loss))
        losses.append(float(loss))

    if step % 80 == 0:

        for step, (x, y) in enumerate(valid_db):
            # layer1.
            h1 = x @ w1 + b1
            h1 = tf.nn.relu(h1)
            # layer2
            h2 = h1 @ w2 + b2
            h2 = tf.nn.relu(h2)
            # output
            out = h2 @ w3 + b3
            # correct = MAE(y, out)
            correct = mean_absolute_error(y, out)

        print(step, 'Evaluate MAE:', correct)
        accs.append(correct)
        # 加入早停机制，如果最新验证MAE比过去五个结果均值大，表示曲线到达拐点，训练停止
        # if correct > np.mean(accs[-10:]):
        #     break

plt.figure()
x_ = [i * 80 for i in range(len(losses))]
min_indx = np.argmin(losses)
plt.plot(x_, losses, color='C0', marker='s', label='训练')
plt.ylabel('线性回归 Loss')
plt.xlabel('Step')
plt.plot(min_indx, losses[min_indx], 'gs')
plt.legend()

plt.figure()
x0 = accs.index(min(accs)) * 80
y0 = min(accs)
# 标记折线最低点。xy是标记点位置，xytest是文本标注位置，文本偏移x-500，y+0.1以避免遮盖折现
plt.annotate('最低点: %s' % round(y0, 3), xy=(x0, y0), xytext=(x0 - 500, y0 + 0.1),
             arrowprops=dict(facecolor='black', shrink=0.00001))
plt.plot(x_, accs, color='C1', marker='s', label='测试')
plt.ylabel('线性回归 valid mae')
plt.xlabel('Step')
plt.legend()
plt.show()

# 直接预测
x_test = x_test.reshape(-1, 30 * 6)
h1 = x_test @ w1 + b1
h1 = tf.nn.relu(h1)
h2 = h1 @ w2 + b2
h2 = tf.nn.relu(h2)
out = h2 @ w3 + b3
y_hat_lis = out

# 滚动预测
# x_history = x_train.reshape(-1, 6)
# y_hat_lis = []
# for i in range(length):
#     x = x_history[-length:, ].reshape(-1, 30*6)
#     h1 = x @ w1 + b1
#     h1 = tf.nn.relu(h1)
#     h2 = h1 @ w2 + b2
#     h2 = tf.nn.relu(h2)
#     out = h2 @ w3 + b3
#     y_hat = out
#     x_history = np.concatenate((x_history, y_hat), axis=0)
#     y_hat_lis.append(y_hat)

y_hat = np.array(y_hat_lis).reshape(-1, 6)

# 反标准化和反归一化
y_hat = ss.inverse_transform(y_hat)
y_hat = mm.inverse_transform(y_hat)

y_test = ss.inverse_transform(y_test)
y_test = mm.inverse_transform(y_test)


# 绘制结果
figure = plt.figure(figsize=(20, 8))   #定制图框长宽
columns_list = df.columns              #标记特征名称
for i in range(y_hat.shape[1]):
    plt.subplot(2, 3, i + 1)
    plt.plot(y_hat[:, i][:30], label='pred')
    plt.plot(y_test[:, i][:30], label='true')
    plt.ylabel(columns_list[i])
    plt.legend()
plt.show()


def MAPE(true, pred):
    diff = np.abs(np.array(true) - np.array(pred))
    return np.mean(diff / true)


# 评价标准
r2_test = r2_score(y_test[:length, ], y_hat)
mape = MAPE(y_test[:length, ], y_hat)
mse = mean_squared_error(y_test[:length, ], y_hat)
# 分评价标准
r2_list = []
mape_list = []
mse_list = []
for i in range(y_hat.shape[1]):
    r2_list.append(r2_score(y_test[:, i][:length], y_hat[:, i][:length]))
    mape_list.append(MAPE(y_test[:, i][:length], y_hat[:, i][:length]))
    mse_list.append(mean_squared_error(y_test[:, i][:length], y_hat[:, i][:length]))


print('\n\n')
print('use time: ', round(time.time() - starttime, 3), 's')
print('#'*30)
# 整体结果保留三位小数
print('mape', round(mape, 3))
print('mse', round(mse, 3))
print('r2_score : ', round(r2_test, 3))
# 分别显示
print('*'*30)
print('r2_list:', [round(i, 3) for i in r2_list])
print('mape_list', [round(i, 3) for i in mape_list])
print('mse_list', [round(i, 3) for i in mse_list])

"""
控制台结果：

use time:  67.162 s
##############################
mape 0.141
mse 87.731
r2_score :  0.242
******************************
r2_list: [0.195, 0.222, -0.01, 0.025, 0.656, 0.363]
mape_list [0.091, 0.058, 0.104, 0.089, 0.241, 0.266]
mse_list [2.436, 428.1, 0.013, 15.364, 0.077, 80.396]

"""