import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt
from tensorflow.contrib.layers import xavier_initializer
from tensorflow.contrib.layers import variance_scaling_initializer

mnist = input_data.read_data_sets('./')
print(mnist.train.images.shape)
print(mnist.train.labels.shape)
print(mnist.validation.images.shape)
print(mnist.validation.labels.shape)
print(mnist.test.images.shape)
print(mnist.test.labels.shape)

plt.figure(figsize=(8,8))
for i in range(16):
    plt.subplot(4,4,i+1)
    plt.axis('off')
    plt.title('[{}]'.format(mnist.train.labels[i]))
    plt.imshow(mnist.train.images[i].reshape(28,28))


x = tf.placeholder('float',[None,784])
y = tf.placeholder('int64',[None])
learning_rate = tf.train.exponential_decay(0.95,8000,decay_steps=100,decay_rate=0.98,)
keep_prob = tf.placeholder('float')

def initialize(shape,stddev=0.1):
    return tf.truncated_normal(shape,stddev=0.1)

L1 = 1000
w1 = tf.get_variable('w1',[784,L1],'float',xavier_initializer(uniform=False))
b1 = tf.Variable(initialize([L1]))
logits_1 = tf.matmul(x,w1) + b1
outputs_1 = tf.nn.relu(logits_1)
dropout_L1 = tf.nn.dropout(outputs_1,keep_prob=1)

L2 = 800
w2 = tf.get_variable('w2',[L1,L2],'float',xavier_initializer(uniform=False))
b2 = tf.Variable(initialize([L2]))
logits_2 = tf.matmul(dropout_L1,w2) + b2
outputs_2 = tf.nn.relu(logits_2)
dropout_2 = tf.nn.dropout(outputs_2,keep_prob=1)

L3 = 500
w3 = tf.get_variable('w3',[L2,L3],'float',xavier_initializer(uniform=False))
b3 = tf.Variable(initialize([L3]))
logits_3 = tf.matmul(dropout_2,w3) + b3
outputs_3 = tf.nn.relu(logits_3)
dropout_3 = tf.nn.dropout(outputs_3,keep_prob=1)

L5_units_counts = 10
w5 = tf.get_variable('w5',[L3,10],'float',variance_scaling_initializer())
b5 = tf.Variable(initialize([10]))
logit_5 = tf.matmul(dropout_3,w5) + b5
logits = logit_5

cross_enptropy_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,logits=logits))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_enptropy_loss)

pred = tf.nn.softmax(logits)
accuracy_pred = tf.equal(tf.argmax(pred,1),y)
accuracy = tf.reduce_mean(tf.cast(accuracy_pred,tf.float32))

batch_size = 32
training_step = 9000
saver = tf.train.Saver()

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    validate_data = {x:mnist.validation.images,y:mnist.validation.labels}
    test_data = {x:mnist.test.images,y:mnist.test.labels}

    for i in range(training_step):
        xs,ys = mnist.train.next_batch(batch_size)
        _,loss = sess.run([optimizer,cross_enptropy_loss],feed_dict={x:xs,y:ys})

        if i > 0 and i % 100 == 0:
            validate_accuracy = sess.run(accuracy,feed_dict={x:mnist.validation.images,y:mnist.validation.labels,keep_prob:1})
            print("after %d training steps,the loss is %g,the validation accuracy is %g" % (i, loss, validate_accuracy))
    acc = sess.run(accuracy, feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1})
    print('the test accuracy is : ', acc)
    saver.save(sess,'D:/tensorflow-3.6/model_total/model')



with tf.Session() as sess:
    ckpt = tf.train.get_checkpoint_state('D:/tensorflow-3.6/model_total')
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess,ckpt.model_checkpoint_path)

        final_pred,acc = sess.run([pred,accuracy],feed_dict={x:mnist.test.images[:16],y:mnist.test.labels[:16]})
        print('test accuracy is :',acc)
        orders = np.argsort(final_pred)
        plt.figure(figsize=(8,8))

        for idx in range(16):
            order = orders[idx,:][-1]
            prob = final_pred[idx,:][order]
            plt.subplot(4, 4, idx + 1)
            plt.axis('off')
            plt.title('[{}]-[{}]-[{:.1f}%]'.format(mnist.test.labels[idx], order, prob * 100))
            plt.imshow(mnist.test.images[idx].reshape(28, 28))
        plt.show()
    else:
        pass