import tensorflow.compat.v1 as tf
import tensorflow as tsf
import numpy as np
import os
import sys
from tensorflow.examples.tutorials.mnist import input_data

np.random.seed(777)
tf.set_random_seed(777)
file_name = os.path.basename(__file__)

ver = 'v1.0'
alpha = 0.001
n_epoch = 10
batch_size = 100
n_rnn_hidden = 128
n_fc_hidden = 50

data_path = r'../../../../../large_data/DL1/mnist'
if not os.path.exists(data_path):
    print('WRONG DATA DIR!')
    sys.exit(1)
mnist = input_data.read_data_sets(data_path, one_hot=True)
m_train, n = mnist.train.images.shape
h = int(np.ceil(np.sqrt(n)))
w = int(np.ceil(n / h))
n_cls = mnist.train.labels.shape[1]

with tf.variable_scope('Input'):
    ph_x = tf.placeholder(tf.float32, [None, n], 'ph_x')
    ph_x /= 255.0
    ph_y = tf.placeholder(tf.int32, [None, n_cls], 'ph_y')
    x_reshaped = tf.reshape(ph_x, [-1, h, w], name='x_reshaped')

with tf.variable_scope('RNN'):
    cell = tf.nn.rnn_cell.BasicRNNCell(n_rnn_hidden)
    outputs, states = tf.nn.dynamic_rnn(cell, x_reshaped, dtype=tf.float32)

with tf.variable_scope('FC'):
    fc1_out = tsf.contrib.layers.fully_connected(outputs[:, -1], n_fc_hidden, activation_fn=tf.nn.sigmoid)
    logits = tsf.contrib.layers.fully_connected(fc1_out, n_cls, activation_fn=None)

with tf.variable_scope('Cost'):
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=ph_y))

with tf.variable_scope('Train'):
    train = tf.train.AdamOptimizer(learning_rate=alpha).minimize(cost)

with tf.variable_scope('Metrics'):
    predict = tf.argmax(logits, axis=1)
    acc = tf.reduce_mean(
        tf.cast(
            tf.equal(
                predict,
                tf.argmax(ph_y, axis=1)
            ),
            tf.float32
        )
    )

with tf.variable_scope('Summary'):
    tf.summary.scalar('cost', cost)
    tf.summary.scalar('acc', acc)
    summary = tf.summary.merge_all()

with tf.Session() as sess:
    with tf.summary.FileWriter('./_log/' + file_name, sess.graph) as fw:
        sess.run(tf.global_variables_initializer())

        g_step = 0
        print('TRAINING STARTED!')
        for epoch in range(n_epoch):
            total_batch = int(np.ceil(m_train / batch_size))
            group = int(np.ceil(total_batch / 10))
            for i in range(total_batch):
                bx, by = mnist.train.next_batch(batch_size)
                _, costv, accv, sv = sess.run([train, cost, acc, summary], feed_dict={ph_x: bx, ph_y: by})
                fw.add_summary(sv, g_step)
                g_step += 1
                if i % group == 0:
                    print(f'g_step#{g_step + 1}: epoch#{epoch + 1}: batch#{i + 1}: cost = {costv}, acc = {accv}')
                if np.isclose(1.0, accv):
                    break
            if i % group != 0:
                print(f'g_step#{g_step + 1}: epoch#{epoch + 1}: batch#{i + 1}: cost = {costv}, acc = {accv}')
            if np.isclose(1.0, accv):
                print('TRAINING CONVERGED!')
                break
        print('TRAINING OVER!')

        print('Testing acc:')
        accv = sess.run(acc, feed_dict={ph_x: mnist.test.images, ph_y: mnist.test.labels})
        print(accv)
