import sys
import warnings
import numpy as np
import matplotlib.pyplot as plt
import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

warnings.simplefilter('error', UserWarning)

np.random.seed(1)
tf.set_random_seed(777)
plt.figure(figsize=[16, 8])
spr = 3  # subplot row
spc = 8  # subplot column
spn = 0

# load data
mnist = input_data.read_data_sets('./data/mnist', one_hot=True)
data_train = mnist.train.images
m, n = data_train.shape
print(m, n)
nn = int(np.sqrt(n))
LL1, LL2 = int(nn * 1.2), nn // 2
L1, L2, L3 = LL1 ** 2, LL2 ** 2, 10
y_train = mnist.train.labels
data_test = mnist.test.images
y_test = mnist.test.labels
y_test_ohdc = np.argmax(y_test, axis=1)


# scale data
def scale_date(data):
    mu = data.mean()
    sigma = data.std()
    data -= mu
    data /= sigma


scale_date(data_train)
scale_date(data_test)

# shuffle data
rnd_idx = np.random.permutation(m)
data_train = data_train[rnd_idx]
y_train = y_train[rnd_idx]
idx = np.arange(m)
multiple = 20
data_train = data_train[idx % multiple == 0]  # compress, tmp
y_train = y_train[idx % multiple == 0]  # compress, tmp
print(f'[[[[[[[ Sampling by 1/{multiple} to let it fast! ]]]]]]]]', file=sys.stderr)

with tf.name_scope('Input'):
    ph_x = tf.compat.v1.placeholder(tf.float32, [None, n], 'ph_x')
    ph_y = tf.compat.v1.placeholder(tf.float32, [None, L3], 'ph_y')

with tf.name_scope('FP'):
    w1 = tf.Variable(tf.random.normal([n, L1]), dtype=tf.float32, name='w1')
    w2 = tf.Variable(tf.random.normal([L1, L2]), dtype=tf.float32, name='w2')
    w3 = tf.Variable(tf.random.normal([L2, L3]), dtype=tf.float32, name='w3')
    b1 = tf.Variable(tf.random.normal([1, L1]), dtype=tf.float32, name='b1')
    b2 = tf.Variable(tf.random.normal([1, L2]), dtype=tf.float32, name='b2')
    b3 = tf.Variable(tf.random.normal([1, L3]), dtype=tf.float32, name='b3')
    z1 = tf.matmul(ph_x, w1) + b1
    z1 = tf.identity(z1, 'z1')
    a1 = tf.sigmoid(z1)
    a1 = tf.identity(a1, 'a1')
    z2 = tf.matmul(a1, w2) + b2
    z2 = tf.identity(z2, 'z2')
    a2 = tf.sigmoid(z2, name='a2')
    z3 = tf.matmul(a2, w3) + b3
    z3 = tf.identity(z3, 'z3')
    a3 = tf.nn.softmax(z3, name='a3')

with tf.name_scope('cost'):
    cost = tf.math.negative(
        tf.reduce_mean(
            tf.reduce_sum(ph_y * tf.math.log(a3), axis=1)
        ),
        name='cost'
    )  # ATTENTION with only mean, alpha > 0.1; with only sum, alpha = 0.0001
	
with tf.name_scope('BP'):
    # dz3 = ph_y * (a3 - 1) + (1 - ph_y) * a3  # m x L3  # ATTENTION derivative of softmax
    dz3 = a3 - ph_y  # ATTENTION derivative of softmax 其实就是这个！
    da2 = tf.matmul(dz3, tf.transpose(w3))  # m x L2
    dz2 = da2 * a2 * (1 - a2)  # m x L2
    da1 = tf.matmul(dz2, tf.transpose(w2))  # m x L1
    dz1 = da1 * a1 * (1 - a1)  # m x L1
    dw3 = tf.matmul(tf.transpose(a2), dz3) / tf.cast(tf.shape(a2)[0], tf.float32)  # L2 x L3  # ATTENTION the only way for 1/m
    db3 = tf.reduce_mean(dz3, axis=0)  # 1 x L3
    dw2 = tf.matmul(tf.transpose(a1), dz2) / tf.cast(tf.shape(a1)[0], tf.float32)  # L1 x L2
    db2 = tf.reduce_mean(dz2, axis=0)  # 1 x L2
    dw1 = tf.matmul(tf.transpose(ph_x), dz1) / tf.cast(tf.shape(ph_x)[0], tf.float32)  # n x L1
    db1 = tf.reduce_mean(dz1, axis=0)  # 1 x L1

with tf.name_scope('score'):
    score = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(a3, axis=1), tf.argmax(ph_y, axis=1)), dtype=tf.float32))

# train
useCache = True
ver = '1.21'
alpha = 0.1
iter0 = 400
group = 20
with tf.name_scope('train'):
    update = [
        tf.compat.v1.assign(w3, w3 - alpha * dw3),
        tf.compat.v1.assign(w2, w2 - alpha * dw2),
        tf.compat.v1.assign(w1, w1 - alpha * dw1),
        tf.compat.v1.assign(b3, b3 - alpha * db3),
        tf.compat.v1.assign(b2, b2 - alpha * db2),
        tf.compat.v1.assign(b1, b1 - alpha * db1),
    ]

with tf.compat.v1.Session() as sess:
    tf.compat.v1.summary.FileWriter('./log/' + os.path.basename(__file__), sess.graph)
    save_path = './save/' + os.path.basename(__file__) + '_' + ver
    saver = tf.compat.v1.train.Saver()
    if useCache and os.path.exists(save_path + '.meta'):
        print('Cache file exists, will read thetas in cache file...')
        saver.restore(sess, save_path)
    else:
        sess.run(tf.compat.v1.global_variables_initializer())
        j_his = np.zeros(iter0)
        for i in range(iter0):
            _, j, score_v = sess.run([update, cost, score], feed_dict={ph_x: data_train, ph_y: y_train})
            j_his[i] = j
            if 0 == i % group:
                print(f'#{i + 1} cost func value = {j}, score = {score_v}')
        if 0 != i % group:
            print(f'#{i + 1} cost func value = {j}, score = {score_v}')
        saver.save(sess, save_path)
        spn += 1
        plt.subplot(spr, spc, spn)
        plt.plot(j_his, label='cost function value')
        plt.grid()
        plt.legend()

    # test
    score_v, a3v, a2v, a1v = sess.run([score, a3, a2, a1], feed_dict={ph_x: data_test, ph_y: y_test})
    print(f'Testing score = {score_v}')

    # show detail
    def output(tgt, num_all):
        global spn
        num = 0
        idx = -1
        while True:
            idx += 1
            if y_test_ohdc[idx] != tgt:
                continue
            num += 1
            if num > num_all:
                break
            spn += 1
            plt.subplot(spr, spc, spn)
            plt.title(str(y_test_ohdc[idx]) + ' => ' + str(a3v[idx].argmax()))
            plt.imshow(data_test[idx].reshape(nn, nn))
            spn += 1
            plt.subplot(spr, spc, spn)
            plt.imshow(a1v[idx].reshape(LL1, LL1))
            spn += 1
            if spn > spr * spc:
                break
            plt.subplot(spr, spc, spn)
            plt.imshow(a2v[idx].reshape(LL2, LL2))
            spn += 1
            if spn > spr * spc:
                break
            plt.subplot(spr, spc, spn)
            plt.imshow(a3v[idx].reshape(-1, 1))

    output(8, 3)
    output(5, 3)

    plt.show()
