import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import tqdm
from initializer import get_zero_shape, get_normal_shape
from lstm import BNLSTMCell
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt

#-------------------------------------------------------------------
# params
#-------------------------------------------------------------------
batch_size = 100
n_layer = 2
n_step = 28
n_input = 28
hidden_size = 100
n_class = 10
n_iter = 1001
display_step = 10
lr = 0.05
mnist = input_data.read_data_sets("data/", one_hot=True)
test_x = mnist.test.images.reshape((-1, n_step, n_input))
test_y = mnist.test.labels
acc = {}
loss = {}

#-------------------------------------------------------------------
# models
#-------------------------------------------------------------------
for mode in ["std", "bn", "bren"]:
    #---------------------------------------------------------------
    # build graph
    #---------------------------------------------------------------
    tf.reset_default_graph()
    x = tf.placeholder(tf.float32, [None, n_step, n_input])
    y = tf.placeholder(tf.float32, [None, n_class])
    training = tf.placeholder(tf.bool)

    lstm = tf.contrib.rnn.MultiRNNCell(
                [BNLSTMCell(hidden_size, n_step, training, mode=mode)
                    for i in range(n_layer)],
                state_is_tuple=True)

    initial_state = tuple((tf.constant(0, tf.int32),
        get_zero_shape(hidden_size, tf.shape(x)[0], tf.float32),
        get_normal_shape(hidden_size, tf.shape(x)[0], tf.float32))
        for i in range(n_layer))

    outputs, states = tf.nn.dynamic_rnn(lstm, x,
        initial_state=initial_state, dtype=tf.float32)
    _, __, output = states[-1]

    Wy = tf.get_variable('Wy', [hidden_size, n_class],
        initializer=tf.contrib.keras.initializers.glorot_uniform())
    by = tf.get_variable('by', [n_class], initializer=tf.zeros_initializer())
    pred = tf.nn.softmax(tf.matmul(output, Wy) + by)
    cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices=[1]))
    optimizer = tf.train.RMSPropOptimizer(lr, decay=0.5)
    gvs = optimizer.compute_gradients(cross_entropy)
    capped_gvs = [(None if grad is None else tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]
    train_step = optimizer.apply_gradients(capped_gvs)
    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    #---------------------------------------------------------------
    # run epoch
    #---------------------------------------------------------------
    acc[mode] = []
    loss[mode] = []
    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        for step in tqdm.tqdm(range(n_iter)):
            if step % display_step == 0:
                batch_x, batch_y = mnist.validation.next_batch(batch_size)
                batch_x = batch_x.reshape((batch_size, n_step, n_input))
                res = sess.run([accuracy, cross_entropy],
                    feed_dict={x: batch_x, y: batch_y, training: False})
                acc[mode].append(res[0])
                loss[mode].append(res[1])
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            batch_x = batch_x.reshape((batch_size, n_step, n_input))
            sess.run([train_step],
                feed_dict={x: batch_x, y: batch_y, training: True})

        res = sess.run([accuracy, cross_entropy],
            feed_dict={x: test_x, y: test_y, training: False})
        print(mode, "\nAccuracy: ", res[0], "\tLoss: ", res[1])
        print("Optimization Finished!")

#-------------------------------------------------------------------
# Performance
#-------------------------------------------------------------------
fig, ax = plt.subplots()
for mode, acc_list in acc.items():
    ax.plot(range(0, (len(acc_list)) * display_step,
        display_step), acc_list, label='%s LSTM' % (mode))
ax.set_xlabel('Training steps')
ax.set_ylabel('Accuracy')
ax.set_title('Accuracy')
ax.legend(loc=4)
fig.savefig('../../files/acc.png')
# plt.show()