from python_ai.common.xcommon import *
import tensorflow.compat.v1 as tf
import tensorflow as tsf
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt

np.random.seed(777)
tf.set_random_seed(777)

ver = 'v1.0'
alpha = 0.01
training_size_rate = 0.7
n_steps = 7
n_hidden = 15
n_iters = 100

# load data
sep('load data')
df = pd.read_csv(r'../../../../../large_data/DL1/stock/data-02-stock_daily.csv', header=1)
df = df.iloc[::-1]  # (732, 5)
m, n_features = df.shape
print('df', df.shape)

# scale
sep('scale')
xmin = df.min(axis=0)
xmax = df.max(axis=0)
check_shape(xmin, 'xmin')
check_shape(xmax, 'xmax')
df -= xmin
df /= xmax - xmin + 1e-8

# arrange data
sep('arrange data')

# My way. The real way is not as mine.
# 不是截断成n_steps的序列
# x = [df.iloc[i:i+n_steps].values.tolist() for i in range(0, m, n_steps)]
# len_last = len(x[-1])
# print(len_last)
# x[-1][len_last:] = [[0., 0., 0., 0., 0.] for i in range(n_steps - len_last)]
# len_last = len(x[-1])
# print(len_last)
# x = np.array(x)
# print('x', x.shape)  # (105, 7, 5)
# y = x[:, -1, [-1]]
# print('y', y.shape)  # (105, 1)
# m = x.shape[0]

# Teacher's way.
# 循环切出n_steps的序列
x = []
for i in range(0, m - n_steps):
    x.append(df.iloc[i:i + n_steps].to_numpy().tolist())
x = np.array(x)
print('x', x.shape)  # (725, 7, 5)
y = x[:, -1, [-1]]
print('y', y.shape)  # (725, 1)
m = x.shape[0]

# split
sep('split')
m_train = int(np.ceil(m * training_size_rate))
m_test = m - m_train
x_train, x_test = np.split(x, [m_train])
y_train, y_test = np.split(y, [m_train])
print('x_train', x_train.shape)
print('x_test', x_test.shape)
print('y_train', y_train.shape)
print('y_test', y_test.shape)

# placeholders
with tf.variable_scope('Input'):
    ph_x = tf.placeholder(tf.float32, [None, n_steps, n_features], 'ph_x')
    ph_y = tf.placeholder(tf.float32, [None, 1], 'ph_y')

# rnn
with tf.variable_scope('RNN'):
    cell = tf.nn.rnn_cell.BasicRNNCell(n_hidden)
    outputs, states = tf.nn.dynamic_rnn(cell, ph_x, dtype=tf.float32)
    outputs = outputs[:, -1]  # (?, n_hidden)

# fc
with tf.variable_scope('FC'):
    h = tsf.contrib.layers.fully_connected(outputs, 1, activation_fn=None)  # (?, 1)

with tf.variable_scope('Cost'):
    cost = tf.reduce_mean(tf.square(h - ph_y)) / 2
    tf.summary.scalar('cost', cost)

with tf.variable_scope('Train'):
    train = tf.train.AdamOptimizer(learning_rate=alpha).minimize(cost)

with tf.variable_scope('Metrics'):
    # root mean square exception
    rmse = tf.sqrt(tf.reduce_sum(tf.square(h - ph_y)))
    r2 = 1 - tf.reduce_mean(tf.square(h - ph_y)) / tf.reduce_mean(tf.square(ph_y - tf.reduce_mean(ph_y)))
    tf.summary.scalar('rmse', rmse)
    tf.summary.scalar('r2', r2)
    summary = tf.summary.merge_all()

with tf.Session() as sess:
    with tf.summary.FileWriter('./_log/' + os.path.basename(__file__) + '_' + ver, sess.graph) as fw:
        sess.run(tf.global_variables_initializer())

        group = int(np.ceil(n_iters / 20))
        for i in range(n_iters):
            _, costv, rmsev, r2v, sv = sess.run([train, cost, rmse, r2, summary], feed_dict={ph_x: x_train, ph_y: y_train})
            fw.add_summary(sv, i)
            if i % group == 0:
                print(f'#{i + 1}: cost = {costv}, rmse = {rmsev}, r2 = {r2v}')
                fw.flush()
        if i % group != 0:
            print(f'#{i + 1}: cost = {costv}, rmse = {rmsev}, r2 = {r2v}')
            fw.flush()

        r2v, hv = sess.run([r2, h], feed_dict={ph_x: x_test, ph_y: y_test})
        print(f'Testing R2 = {r2v}')

        plt.plot(hv[:-1], label='hypothesis')
        plt.plot(y_test[:-1], label='target')
        plt.legend()
        plt.show()