# coding:utf-8
import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt

from utils import util

_, _, data = util.split_data()

data = data.sort_index(ascending=True, axis=0)
data = data[9600:]

# Training and test data
data_label = data['label'].values
data_label = tf.one_hot(data_label, 2, 1, 0)

# del(data['label'])

# Scale data
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler.fit(data)

# Number of stocks in training data
n_stocks = data.shape[1] # 12

# Neurons
n_neurons_1 = 1024
n_neurons_2 = 512
n_neurons_3 = 256
n_neurons_4 = 128
learning_rate = 0.001
# Placeholder
X = tf.placeholder(dtype=tf.float32, shape=[None, n_stocks])
Y = tf.placeholder(dtype=tf.float32, shape=[None, 2])

# Initializers
sigma = 1
weight_initializer = tf.variance_scaling_initializer(mode="fan_avg", distribution="uniform", scale=sigma)
bias_initializer = tf.zeros_initializer()

# Hidden weights
W_hidden_1 = tf.Variable(weight_initializer([n_stocks, n_neurons_1]))
bias_hidden_1 = tf.Variable(bias_initializer([n_neurons_1]))
W_hidden_2 = tf.Variable(weight_initializer([n_neurons_1, n_neurons_2]))
bias_hidden_2 = tf.Variable(bias_initializer([n_neurons_2]))
W_hidden_3 = tf.Variable(weight_initializer([n_neurons_2, n_neurons_3]))
bias_hidden_3 = tf.Variable(bias_initializer([n_neurons_3]))
W_hidden_4 = tf.Variable(weight_initializer([n_neurons_3, n_neurons_4]))
bias_hidden_4 = tf.Variable(bias_initializer([n_neurons_4]))

# Output weights
W_out = tf.Variable(weight_initializer([n_neurons_4, 2]))
bias_out = tf.Variable(bias_initializer([2]))

# Hidden layer
hidden_1 = tf.nn.relu(tf.add(tf.matmul(X, W_hidden_1), bias_hidden_1))
hidden_2 = tf.nn.relu(tf.add(tf.matmul(hidden_1, W_hidden_2), bias_hidden_2))
hidden_3 = tf.nn.relu(tf.add(tf.matmul(hidden_2, W_hidden_3), bias_hidden_3))
hidden_4 = tf.nn.relu(tf.add(tf.matmul(hidden_3, W_hidden_4), bias_hidden_4))

# Output layer (transpose!)
out_pre = tf.add(tf.matmul(hidden_4, W_out), bias_out)

# out = tf.transpose(out_pre)
y_pre = tf.nn.softmax(out_pre)
# y_ = tf.sigmoid(out_pre)

acc = tf.equal(tf.argmax(Y, 1), tf.argmax(y_pre, 1))
accuracy = tf.reduce_mean(tf.cast(acc, tf.float32))

# Cost function
# loss = tf.reduce_mean(tf.squared_difference(out_pre, Y))
mse = tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=out_pre)
loss = tf.reduce_sum(mse)

# Optimizer
opt = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
# opt = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss)

batch_size = 30
loss_train, pre_test, acc_train = [], [], []
# Run
init = tf.global_variables_initializer()
epochs = 3 # 外层循环
with tf.Session() as sess:
    sess.run(init)
    data_label = sess.run(data_label)
    for e in range(epochs):
        # Minibatch training
        # for i in range(len(y_train)-10):
        # for i in range(len(data)-1):
        for i in range(300):
            # start = i * batch_size
            start = i
            batch_x = data[start:start + batch_size]
            batch_y = data_label[start:start + batch_size]
            # Run optimizer with batch
            opt_, loss_, yy, oo, aa = sess.run([opt, loss, y_pre, out_pre, accuracy], feed_dict={X: batch_x, Y: batch_y})
            if i % 50 ==0 and i > 0:
                loss_train.append(loss_)
                print("step: %s, loss: %s, acc: %s" % (i, loss_, aa))
            # prediction
            pred_index = start + batch_size
            pred = sess.run(y_pre, feed_dict={X: data[pred_index: pred_index + 1]})
            pred = sess.run(tf.equal(tf.argmax(pred, 1), tf.argmax(data_label[pred_index: pred_index + 1], 1)))
            if pred[0] == False:
                pre_test.append(0)
            elif pred[0] == True:
                pre_test.append(1)
        pred_acc = sess.run(tf.reduce_mean(tf.cast(pre_test, tf.float32)))
        print("epoch: %s, pred acc: %s" % (e, pred_acc))

    fig = plt.figure()
    plt.plot(loss_train)
    plt.title('train loss')
    plt.show()