import sys
import warnings
import numpy as np
import matplotlib.pyplot as plt
import os
import tensorflow as tsf
import tensorflow.compat.v1 as tf
from tensorflow.examples.tutorials.mnist import input_data

warnings.simplefilter('error', UserWarning)

np.random.seed(1)
tf.set_random_seed(777)
figsize = [16, 8]
spr = 3  # subplot row
spc = 8  # subplot column
spn = 0
plt_started = False

useCache = True
ver = '1.24'
alpha = 0.01
training_epochs = 2
batch_size = 100

# load data
mnist = input_data.read_data_sets('./data/mnist', one_hot=True)
x_train = mnist.train.images
m, n = x_train.shape
print(m, n)
nn = int(np.sqrt(n))
L1, L2, L3 = 32, 64, 10
y_train = mnist.train.labels
rand_idx = np.random.permutation(m)
x_train = x_train[rand_idx]
y_train = y_train[rand_idx]
x_test = mnist.test.images
y_test = mnist.test.labels
y_test_ohdc = np.argmax(y_test, axis=1)


# scale data
def scale_date(data):
    # mu = data.mean()
    # sigma = data.std()
    # data -= mu
    # data /= sigma
    min = data.min()
    max = data.max()
    data -= min
    data /= max - min


scale_date(x_train)
scale_date(x_test)

# shuffle data
rnd_idx = np.random.permutation(m)
x_train = x_train[rnd_idx]
y_train = y_train[rnd_idx]
idx = np.arange(m)
# multiple = 20
# data_train = data_train[idx % multiple == 0]  # compress, tmp
# y_train = y_train[idx % multiple == 0]  # compress, tmp
# print(f'[[[[[[[ Sampling by 1/{multiple} to let it fast! ]]]]]]]]', file=sys.stderr)

with tf.name_scope('Input'):
    ph_x = tf.placeholder(tf.float32, [None, n], 'ph_x')
    ph_y = tf.placeholder(tf.float32, [None, L3], 'ph_y')
    pic_x = tf.reshape(ph_x, [-1, nn, nn, 1])

with tf.name_scope('FP'):
    w1 = tf.Variable(tf.random.normal([3, 3, 1, L1]), dtype=tf.float32, name='w1')

    a1c = tf.nn.conv2d(pic_x, w1, strides=[1, 1, 1, 1], padding='SAME')
    shape = a1c.shape.as_list()
    shape_b = shape.copy()
    shape_b[0] = 1
    # b1 = tf.Variable(tf.random.normal(shape_b), dtype=tf.float32, name='b1')
    # a1 += b1
    a1 = tf.nn.relu(a1c)
    a1 = tf.nn.max_pool(a1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

    w2 = tf.Variable(tf.random.normal([3, 3, L1, L2]), dtype=tf.float32, name='w2')
    a2c = tf.nn.conv2d(a1, w2, strides=[1, 1, 1, 1], padding='SAME')
    shape = a2c.shape.as_list()
    shape_b = shape.copy()
    shape_b[0] = 1
    # b2 = tf.Variable(tf.random.normal(shape_b), dtype=tf.float32, name='b2')
    # a2 += b2
    a2 = tf.nn.relu(a2c)
    a2 = tf.nn.max_pool(a2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

    shape = a2.shape.as_list()
    shape_b = shape.copy()
    shape_b[0] = 1
    shape_a = 1
    for i in shape_b:
        shape_a *= i

    a2 = tf.reshape(a2, [-1, shape_a])
    # w3 = tf.Variable(tf.random.normal([shape_a, L3]), dtype=tf.float32, name='w3')
    w3 = tf.get_variable('w3', shape=[shape_a, L3], initializer=tsf.contrib.layers.xavier_initializer())  # much better
    b3 = tf.Variable(tf.random.normal([1, L3]), dtype=tf.float32, name='b3')
    z3 = tf.matmul(a2, w3) + b3
    z3 = tf.identity(z3, 'z3')
    a3 = tf.nn.softmax(z3, name='a3')

with tf.name_scope('cost'):
    epsilon = 1e-5
    # cost = tf.math.negative(
    #     tf.reduce_mean(
    #         tf.reduce_sum(ph_y * tf.math.log(a3 + epsilon), axis=1)
    #     ),
    #     name='cost'
    # )  # ATTENTION with only mean, alpha > 0.1; with only sum, alpha = 0.0001

    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=z3, labels=ph_y))  # much better

with tf.name_scope('score'):
    score = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(z3, axis=1), tf.argmax(ph_y, axis=1)), dtype=tf.float32))

# train
with tf.name_scope('train'):
    update = tf.train.AdamOptimizer(learning_rate=alpha).minimize(cost)

with tf.Session() as sess:
    tf.summary.FileWriter('./log/' + os.path.basename(__file__), sess.graph)
    save_path = './save/' + os.path.basename(__file__) + '_' + ver
    saver = tf.train.Saver()
    if useCache and os.path.exists(save_path + '.meta'):
        print('Cache file exists, will read thetas in cache file...')
        saver.restore(sess, save_path)
    else:
        sess.run(tf.global_variables_initializer())
        j_his = np.zeros(training_epochs)
        for epoch in range(training_epochs):
            total_batch = mnist.train.num_examples // batch_size
            j_avg = 0
            score_avg = 0
            group = total_batch // 10
            for i in range(total_batch):
                # use scaled data
                j, _, score_v = sess.run([cost, update, score],
                                         feed_dict={ph_x: x_train[i*batch_size:(i+1)*batch_size],
                                                    ph_y: y_train[i*batch_size:(i+1)*batch_size]})

                # use not scaled data
                # batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                # j, _, score_v = sess.run([cost, update, score],
                #                          feed_dict={ph_x: batch_xs,
                #                                     ph_y: batch_ys})
                if i % group == 0:
                    print(f'epoch #{epoch + 1}, batch #{i + 1}: cost = {j}, acc = {score_v} [my]')
                j_avg += j
                score_avg += score_v
            if i % group != 0:
                print(f'epoch #{epoch + 1}, batch #{i + 1}: cost = {j}, acc = {score_v} [my]')
            j_avg /= total_batch
            score_avg /= total_batch
            j_his[epoch] = j_avg
            print(f'epoch #{epoch + 1}: avg cost = {j_avg}, avg score = {score_avg}')
        saver.save(sess, save_path)
        plt.figure(figsize=figsize)
        plt_started = True
        spn += 1
        plt.subplot(spr, spc, spn)
        plt.plot(j_his, label='cost function value')
        plt.grid()
        plt.legend()

    # test
    score_v, a1v, a2v, a3v = sess.run([score, a1c, a2c, a3], feed_dict={ph_x: x_test, ph_y: y_test})
    print(f'Testing score = {score_v}')

    if not plt_started:
        plt.figure(figsize=figsize)


    # show detail
    def output(tgt, num_all):
        global spn
        num = 0
        idx = -1
        while True:
            idx += 1
            if y_test_ohdc[idx] != tgt:
                continue
            num += 1
            if num > num_all:
                break
            spn += 1
            plt.subplot(spr, spc, spn)
            plt.title(str(y_test_ohdc[idx]) + ' => ' + str(a3v[idx].argmax()))
            plt.imshow(x_test[idx].reshape(nn, nn))
            spn += 1
            plt.subplot(spr, spc, spn)
            plt.imshow(a1v[idx].reshape(-1, nn // 2, nn // 2)[20])
            spn += 1
            if spn > spr * spc:
                break
            plt.subplot(spr, spc, spn)
            plt.imshow(a2v[idx].reshape(-1, nn // 2, nn // 2)[2])
            spn += 1
            if spn > spr * spc:
                break
            plt.subplot(spr, spc, spn)
            plt.imshow(a3v[idx].reshape(-1, 1))


    output(8, 3)
    output(5, 3)

    plt.show()
