import warnings
import numpy as np
import matplotlib.pyplot as plt
import os
import tensorflow as tf

warnings.simplefilter('error', UserWarning)

np.random.seed(1)
tf.set_random_seed(777)
plt.figure(figsize=[16, 8])
spr = 3  # subplot row
spc = 8  # subplot column
spn = 0

# load data
path = r'../../../../large_data/ML1/hand_writing/'
data = np.loadtxt(path + 'imgX.txt', delimiter=',')
y = np.loadtxt(path + 'labely.txt', delimiter=',')
m, n = data.shape
nn = int(np.sqrt(n))
LL1, LL2 = 10, 8
L1, L2, L3 = LL1 ** 2, LL2 ** 2, 10
y[y == 10] = 0  # ATTENTION

# scale data
mu = data.mean()
sigma = data.std()
data -= mu
data /= sigma

# shuffle data
rnd_idx = np.random.permutation(m)
data = data[rnd_idx]
y = y[rnd_idx]

# splice data
X = np.c_[np.ones(m), data]

# onehot
y_onehot = np.zeros([m, 10])
for k, v in enumerate(y):
    y_onehot[k, int(v)] = 1
# print(y[:10])
# print(y_onehot[:10])

# split data
m_train = int(0.7 * m)
m_test = m - m_train
XX_train, XX_test = np.split(X, [m_train])
x_train, x_test = np.split(data, [m_train])
y_train, y_test = np.split(y, [m_train])
y_onehot_train, y_onehot_test = np.split(y_onehot, [m_train])

with tf.name_scope('Input'):
    ph_x = tf.compat.v1.placeholder(tf.float32, [None, n], 'ph_x')
    ph_y = tf.compat.v1.placeholder(tf.float32, [None, L3], 'ph_y')

with tf.name_scope('FP'):
    w1 = tf.Variable(tf.random.normal([n, L1]), dtype=tf.float32, name='w1')
    w2 = tf.Variable(tf.random.normal([L1, L2]), dtype=tf.float32, name='w2')
    w3 = tf.Variable(tf.random.normal([L2, L3]), dtype=tf.float32, name='w3')
    b1 = tf.Variable(tf.random.normal([1, L1]), dtype=tf.float32, name='b1')
    b2 = tf.Variable(tf.random.normal([1, L2]), dtype=tf.float32, name='b2')
    b3 = tf.Variable(tf.random.normal([1, L3]), dtype=tf.float32, name='b3')
    z1 = tf.matmul(ph_x, w1) + b1
    z1 = tf.identity(z1, 'z1')
    a1 = tf.sigmoid(z1)
    a1 = tf.identity(a1, 'a1')
    z2 = tf.matmul(a1, w2) + b2
    z2 = tf.identity(z2, 'z2')
    a2 = tf.sigmoid(z2, name='a2')
    z3 = tf.matmul(a2, w3) + b3
    z3 = tf.identity(z3, 'z3')
    a3 = tf.nn.softmax(z3, name='a3')

with tf.name_scope('cost'):
    cost = tf.math.negative(
        tf.reduce_mean(
            tf.reduce_sum(ph_y * tf.math.log(a3), axis=1)
        ),
        name='cost'
    )  # ATTENTION with only mean, alpha > 0.1; with only sum, alpha = 0.0001

with tf.name_scope('score'):
    score = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(a3, axis=1), tf.argmax(ph_y, axis=1)), dtype=tf.float32))

# train
useCache = False
ver = '1.21'
alpha = 0.5
iter0 = 2000
group = iter0 // 20
with tf.name_scope('train'):
    train = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=alpha)\
        .minimize(cost)

with tf.compat.v1.Session() as sess:
    tf.compat.v1.summary.FileWriter('./log/' + os.path.basename(__file__), sess.graph)
    save_path = './save/' + os.path.basename(__file__) + '_' + ver
    saver = tf.compat.v1.train.Saver()
    if useCache and os.path.exists(save_path + '.meta'):
        print('Cache file exists, will read thetas in cache file...')
        saver.restore(sess, save_path)
    else:
        sess.run(tf.compat.v1.global_variables_initializer())
        j_his = np.zeros(iter0)
        for i in range(iter0):
            _, j, score_v = sess.run([train, cost, score], feed_dict={ph_x: x_train, ph_y: y_onehot_train})
            j_his[i] = j
            if 0 == i % group:
                print(f'#{i + 1} cost func value = {j}, score = {score_v}')
        if 0 != i % group:
            print(f'#{i + 1} cost func value = {j}, score = {score_v}')
        saver.save(sess, save_path)
        spn += 1
        plt.subplot(spr, spc, spn)
        plt.plot(j_his, label='cost function value')
        plt.grid()
        plt.legend()

    # test
    score_v, a3v, a2v, a1v = sess.run([score, a3, a2, a1], feed_dict={ph_x: x_test, ph_y: y_onehot_test})
    print(f'Testing score = {score_v}')

    # show detail
    def output(tgt, num_all):
        global spn
        num = 0
        idx = -1
        while True:
            idx += 1
            if y_test[idx] != tgt:
                continue
            num += 1
            if num > num_all:
                break
            spn += 1
            plt.subplot(spr, spc, spn)
            plt.title(str(y_test[idx]) + ' => ' + str(a3v[idx].argmax()))
            plt.imshow(x_test[idx].reshape(nn, nn).T)
            spn += 1
            plt.subplot(spr, spc, spn)
            plt.imshow(a1v[idx].reshape(LL1, LL1).T)
            spn += 1
            if spn > spr * spc:
                break
            plt.subplot(spr, spc, spn)
            plt.imshow(a2v[idx].reshape(LL2, LL2).T)
            spn += 1
            if spn > spr * spc:
                break
            plt.subplot(spr, spc, spn)
            plt.imshow(a3v[idx].reshape(-1, 1))

    output(8, 3)
    output(5, 3)

    plt.show()
