import os
import tensorflow.compat.v1 as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np

np.random.seed(777)
tf.set_random_seed(777)

epsilon = 1e-5
alpha = 0.0001
n_epoch = 10
batch = 1000

L1 = 512
L2 = 1000
L3 = 400
L4 = 400

mnist = input_data.read_data_sets("MNIST_data_bak", one_hot=True)

# train data
x_train = np.array(mnist.train.images, dtype=np.float32)
min = x_train.min()
max = x_train.max()
x_train -= min
x_train /= max - min
y_train = mnist.train.labels
m, n = x_train.shape
_, n_cls = y_train.shape

# test data
x_test = np.array(mnist.test.images, dtype=np.float32)
min = x_test.min()
max = x_test.max()
x_test -= min
x_test /= max - min
y_test = mnist.test.labels

# shuffle
idx = np.random.permutation(m)
x_train = x_train[idx]
y_train = y_train[idx]
m_test = len(x_test)
idx = np.random.permutation(m_test)
x_test = x_test[idx]
y_test = y_test[idx]

x = tf.placeholder(tf.float32, [None, n], 'placeholder_x')
y = tf.placeholder(tf.float32, [None, n_cls], 'placeholder_y')

w1 = tf.Variable(tf.random.normal([n, L1]), dtype=tf.float32, name='w1')
b1 = tf.Variable(tf.random.normal([1, L1]), dtype=tf.float32, name='b1')

z1 = tf.matmul(x, w1) + b1

# sigmoid or relu
a1 = tf.nn.relu(z1, name='a1')
# a1 = tf.nn.sigmoid(z1, name='a1')

w2 = tf.Variable(tf.random.normal([L1, L2]), dtype=tf.float32, name='w2')
b2 = tf.Variable(tf.random.normal([1, L2]), dtype=tf.float32, name='b2')

z2 = tf.matmul(a1, w2) + b2

# sigmoid or relu
a2 = tf.nn.relu(z2, name='a2')
# a2 = tf.nn.sigmoid(z2, name='a2')

w3 = tf.Variable(tf.random.normal([L2, L3]), dtype=tf.float32, name='w3')
b3 = tf.Variable(tf.random.normal([1, L3]), dtype=tf.float32, name='b3')

z3 = tf.matmul(a2, w3) + b3

# sigmoid or relu
a3 = tf.nn.relu(z3, name='a3')
# a3 = tf.nn.sigmoid(z3, name='a3')

w4 = tf.Variable(tf.random.normal([L3, L4]), dtype=tf.float32, name='w4')
b4 = tf.Variable(tf.random.normal([1, L4]), dtype=tf.float32, name='b4')

z4 = tf.matmul(a3, w4) + b4

# sigmoid or relu
a4 = tf.nn.relu(z4, name='a4')
# a4 = tf.nn.sigmoid(z4, name='a4')

w5 = tf.Variable(tf.random.normal([L4, n_cls]), dtype=tf.float32, name='w5')
b5 = tf.Variable(tf.random.normal([1, n_cls]), dtype=tf.float32, name='b5')

z5 = tf.matmul(a4, w5) + b5

# finally
a5 = tf.nn.softmax(z5, name='a5')

cost = tf.reduce_mean(- tf.reduce_sum(y * tf.math.log(a5 + epsilon), axis=1), name='cost')

dz5 = a5 - y  # ? x n_cls

da4 = tf.matmul(dz5, tf.transpose(w5), name='da4')  # ? x n_cls, n_cls x L4 => ? x L4

# sigmoid or relu
dz4 = da4 * tf.cast(z4 > 0, tf.float32)  # ? x L4
# dz4 = da4 * a4 * (1 - a4)

da3 = tf.matmul(dz4, tf.transpose(w4), name='da2')  # ? x L4, L4 x L3 => ? x L3

# sigmoid or relu
dz3 = da3 * tf.cast(z3 > 0, tf.float32)  # ? x L3
# dz3 = da3 * a3 * (1 - a3)

da2 = tf.matmul(dz3, tf.transpose(w3), name='da2')  # ? x L3, L3 x L2 => ? x L2

# sigmoid or relu
dz2 = da2 * tf.cast(z2 > 0, tf.float32)  # ? x L2
# dz2 = da2 * a2 * (1 - a2)  # ? x 20

da1 = tf.matmul(dz2, tf.transpose(w2), name='da1')  # ? x L2, L2 x L1 => ? x L1

# sigmoid or relu
dz1 = da1 * tf.cast(z1 > 0, tf.float32)  # ? x L1
# dz1 = da1 * a1 * (1 - a1)  # ? x 20

dw5 = tf.matmul(tf.transpose(a4), dz5) / tf.cast(tf.shape(a2)[0], tf.float32)  # L4 x ?, ? x L5 => L4 x L5
db5 = tf.reduce_mean(dz5, axis=0, name='db5')  # 1 x L5
dw4 = tf.matmul(tf.transpose(a3), dz4) / tf.cast(tf.shape(a2)[0], tf.float32)  # L3 x ?, ? x L4 => L3 x L4
db4 = tf.reduce_mean(dz4, axis=0, name='db4')  # 1 x L4
dw3 = tf.matmul(tf.transpose(a2), dz3) / tf.cast(tf.shape(a2)[0], tf.float32)  # L2 x ?, ? x L3 => L2 x L3
db3 = tf.reduce_mean(dz3, axis=0, name='db3')  # 1 x L3
dw2 = tf.matmul(tf.transpose(a1), dz2) / tf.cast(tf.shape(a1)[0], tf.float32)  # L1 x ?, ? x L2 => L1 x L2
db2 = tf.reduce_mean(dz2, axis=0, name='db2')  # 1 x L2
dw1 = tf.matmul(tf.transpose(x), dz1) / tf.cast(tf.shape(a1)[0], tf.float32)  # n x ?, ? x L1 => n x L1
db1 = tf.reduce_mean(dz1, axis=0, name='db1')  # 1 x L1

update = [
    tf.assign(w5, w5 - alpha * dw5),
    tf.assign(b5, b5 - alpha * db5),
    tf.assign(w4, w4 - alpha * dw4),
    tf.assign(b4, b4 - alpha * db4),
    tf.assign(w3, w3 - alpha * dw3),
    tf.assign(b3, b3 - alpha * db3),
    tf.assign(w2, w2 - alpha * dw2),
    tf.assign(b2, b2 - alpha * db2),
    tf.assign(w1, w1 - alpha * dw1),
    tf.assign(b1, b1 - alpha * db1),
]

score = tf.reduce_mean(tf.cast(tf.equal(
    tf.argmax(a5, axis=1), tf.argmax(y, axis=1)
), dtype=tf.float32))

with tf.Session() as sess:
    with tf.summary.FileWriter('./log/' + os.path.basename(__file__), sess.graph) as fw:
        pass
    sess.run(tf.global_variables_initializer())

    for epoch in range(n_epoch):
        print(f'epoch#{epoch + 1}:')
        n_batch = m // batch
        group = n_batch // 10
        costv_avg = 0
        scorev_avg = 0
        rand_idx = np.random.permutation(m)
        x_train = x_train[rand_idx]
        y_train = y_train[rand_idx]
        for i in range(n_batch):
            _, costv, scorev = sess.run([update, cost, score],
                                        feed_dict={x: x_train[i*batch:(i+1)*batch], y: y_train[i*batch:(i+1)*batch]})
            costv_avg += costv
            scorev_avg += scorev
            if i % group == 0:
                print(f'batch#{ i+ 1}, cost = {costv}, acc = {scorev}')
        costv_avg /= n_batch
        scorev_avg /= n_batch
        print(f'epoch#{epoch + 1}, cost avg = {costv_avg}, acc avg = {scorev_avg}')

    print(f'Test score = {sess.run(score, feed_dict={x: x_test, y: y_test})}')
