import sys
import warnings
import numpy as np
import matplotlib.pyplot as plt
import os
import tensorflow as tsf
import tensorflow.compat.v1 as tf
from tensorflow.examples.tutorials.mnist import input_data
from python_ai.DL.tensorflow.common.xsummary import *

warnings.simplefilter('error', UserWarning)

np.random.seed(1)
tf.set_random_seed(777)
figsize = [16, 8]
spr = 3  # subplot row
spc = 8  # subplot column
spn = 0
plt_started = False

useCache = True
ver = '1.28'
alpha = 0.01
training_epochs = 2
batch_size = 100

# load data
mnist = input_data.read_data_sets('./data/mnist', one_hot=True)
x_train = mnist.train.images
m, n = x_train.shape
print(m, n)
nn = int(np.sqrt(n))
L1, L2, L3 = 32, 64, 10
y_train = mnist.train.labels
rand_idx = np.random.permutation(m)
x_train = x_train[rand_idx]
y_train = y_train[rand_idx]
x_test = mnist.test.images
y_test = mnist.test.labels
y_test_ohdc = np.argmax(y_test, axis=1)


# scale data
def scale_date(data):
    # mu = data.mean()
    # sigma = data.std()
    # data -= mu
    # data /= sigma
    min = data.min()
    max = data.max()
    data -= min
    data /= max - min


scale_date(x_train)
scale_date(x_test)

# shuffle data
rnd_idx = np.random.permutation(m)
x_train = x_train[rnd_idx]
y_train = y_train[rnd_idx]
idx = np.arange(m)
# multiple = 20
# data_train = data_train[idx % multiple == 0]  # compress, tmp
# y_train = y_train[idx % multiple == 0]  # compress, tmp
# print(f'[[[[[[[ Sampling by 1/{multiple} to let it fast! ]]]]]]]]', file=sys.stderr)

with tf.variable_scope('Input'):
    ph_x = tf.placeholder(tf.float32, [None, 784], name='ph_x')
    ph_y = tf.placeholder(tf.float32, [None, 10], name='ph_y')

with tf.variable_scope('FP'):
    pic_x = tf.reshape(ph_x, [-1, 28, 28, 1], name='pic_x')
    tf.summary.image('pic_x', pic_x)

    w1 = tf.Variable(tf.random.normal([3, 3, 1, 64]), dtype=tf.float32, name='w1')
    conv1 = tf.nn.conv2d(pic_x, w1, strides=[1, 1, 1, 1], padding='VALID', name='conv1')  # ?x26x26x64
    x_summary_image('conv1_0', conv1, 64)
    relu1 = tf.nn.relu(conv1, name='relu1')
    x_summary_image('relu1_0', relu1, 64)
    a1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')  # ?x13x13x64

    w2 = tf.Variable(tf.random.normal([3, 3, 64, 32]), dtype=tf.float32, name='w2')
    conv2 = tf.nn.conv2d(a1, w2, strides=[1, 1, 1, 1], padding='VALID', name='conv2')  # ?x11x11x32
    x_summary_image('conv2_0', conv2, 64)
    relu2 = tf.nn.relu(conv2, name='relu2')
    x_summary_image('relu2_0', conv2, 64)
    a2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')  # ?x6x6x32

    flat = tf.reshape(a2, [-1, 1152])  # 6x6x32

    w3 = tf.get_variable('w3', [1152, 100], initializer=tsf.contrib.layers.xavier_initializer())  # ATTENTION xavier
    b3 = tf.Variable(tf.random.normal([1, 100]), name='b3')
    w4 = tf.get_variable('w4', [100, 10], initializer=tsf.contrib.layers.xavier_initializer())
    b4 = tf.Variable(tf.random.normal([1, 10]), name='b4')

    z3 = tf.matmul(flat, w3) + b3
    a3 = tf.nn.sigmoid(z3, name='a3')
    z4 = tf.matmul(a3, w4) + b4
    logits = tf.nn.softmax(z4, name='logist')

with tf.variable_scope('cost'):
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=ph_y))
    tf.summary.scalar('cost', cost)

with tf.variable_scope('score'):
    score = tf.reduce_mean(tf.cast(
        tf.equal(tf.argmax(logits, axis=1), tf.argmax(ph_y, axis=1)),
        tf.float32
    ))
    tf.summary.scalar('score', score)

# train
with tf.variable_scope('train'):
    train = tf.train.AdamOptimizer(learning_rate=alpha).minimize(cost)

with tf.variable_scope('summary'):
    summary = tf.summary.merge_all()

with tf.Session() as sess:
    with tf.summary.FileWriter('./log/' + os.path.basename(__file__), sess.graph) as fw:
        save_path = './save/' + os.path.basename(__file__) + '_' + ver
        saver = tf.train.Saver()
        if useCache and os.path.exists(save_path + '.meta'):
            print('Cache file exists, will read thetas in cache file...')
            saver.restore(sess, save_path)
        else:
            sess.run(tf.global_variables_initializer())
            j_his = np.zeros(training_epochs)
            g_step = 0
            for epoch in range(training_epochs):
                total_batch = mnist.train.num_examples // batch_size
                j_avg = 0
                score_avg = 0
                group = total_batch // 10
                for i in range(total_batch):
                    # use scaled data
                    j, _, score_v, sv = sess.run([cost, train, score, summary], feed_dict={ph_x: x_train[i*batch_size:(i+1)*batch_size],
                                                              ph_y: y_train[i*batch_size:(i+1)*batch_size]})
                    fw.add_summary(sv, g_step)
                    g_step += 1

                    # use not scaled data
                    # batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                    # j, _, score_v = sess.run([cost, update, score],
                    #                          feed_dict={ph_x: batch_xs,
                    #                                     ph_y: batch_ys})
                    if i % group == 0:
                        print(f'epoch #{epoch + 1}, batch #{i + 1}: cost = {j}, acc = {score_v} [my]')
                    j_avg += j
                    score_avg += score_v
                if i % group != 0:
                    print(f'epoch #{epoch + 1}, batch #{i + 1}: cost = {j}, acc = {score_v} [my]')
                j_avg /= total_batch
                score_avg /= total_batch
                j_his[epoch] = j_avg
                print(f'epoch #{epoch + 1}: avg cost = {j_avg}, avg score = {score_avg}')
            saver.save(sess, save_path)
            plt.figure(figsize=figsize)
            plt_started = True
            spn += 1
            plt.subplot(spr, spc, spn)
            plt.plot(j_his, label='cost function value')
            plt.grid()
            plt.legend()

    # test
    score_v, a1v, a2v, a3v = sess.run([score, conv1, conv2, logits], feed_dict={ph_x: x_test, ph_y: y_test})
    print(f'Testing score = {score_v}')

    if not plt_started:
        plt.figure(figsize=figsize)


    # show detail
    def output(tgt, num_all):
        global spn
        num = 0
        idx = -1
        while True:
            idx += 1
            if y_test_ohdc[idx] != tgt:
                continue
            num += 1
            if num > num_all:
                break
            spn += 1
            plt.subplot(spr, spc, spn)
            plt.title(str(y_test_ohdc[idx]) + ' => ' + str(a3v[idx].argmax()))
            plt.imshow(x_test[idx].reshape(nn, nn))
            spn += 1
            plt.subplot(spr, spc, spn)
            plt.imshow(a1v[idx].transpose([2, 0, 1])[0])
            spn += 1
            if spn > spr * spc:
                break
            plt.subplot(spr, spc, spn)
            plt.imshow(a2v[idx].transpose([2, 0, 1])[0])
            spn += 1
            if spn > spr * spc:
                break
            plt.subplot(spr, spc, spn)
            plt.imshow(a3v[idx].reshape(-1, 1))


    output(8, 3)
    output(5, 3)

    plt.show()
