import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('./')
print(mnist.train.images.shape)
print(mnist.train.labels.shape)
print(mnist.validation.images.shape)
print(mnist.validation.labels.shape)
print(mnist.test.images.shape)
print(mnist.test.labels.shape)

plt.figure(figsize=(8,8))
for idx in range(16):
    plt.subplot(4,4,idx+1)
    plt.axis('off')
    plt.title('[{}]'.format(mnist.train.labels[idx]))
    plt.imshow(mnist.train.images[idx].reshape(28,28))
plt.show()

x = tf.placeholder('float',[None,784])
y = tf.placeholder('int64',[None])
learning_rate = tf.placeholder('float')

def initialize(shape,stddev=0.1):
    return tf.truncated_normal(shape,stddev=0.1)

L1_units_count = 3000
w1 = tf.Variable(initialize([784,L1_units_count]))
b1 = tf.Variable(initialize([L1_units_count]))
logits_1 = tf.matmul(x,w1) + b1
output_1 = tf.nn.relu(logits_1)
L2_unit_count = 10
w2 = tf.Variable(initialize([L1_units_count,L2_unit_count]))
b2 = tf.Variable(initialize([L2_unit_count]))
logits_2 = tf.matmul(output_1,w2) + b2
logits = logits_2

cross_entropy_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,logits=logits))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate,).minimize(loss=cross_entropy_loss)

pred = tf.nn.softmax(logits) #只是计算属于各类的概率
accuracy_pred = tf.equal(tf.argmax(pred,axis=1),y) #返回值是布尔型
accuracy = tf.reduce_mean(tf.cast(accuracy_pred,tf.float32))
print(logits,pred,accuracy_pred,accuracy)

batch_size = 32
training_step = 1000
saver = tf.train.Saver()


with tf.Session() as sess:

    sess.run(tf.global_variables_initializer())

    validation_data = {x:mnist.validation.images,y:mnist.validation.labels}
    test_data = {x: mnist.test.images, y: mnist.test.labels}

    for i in range(training_step):
        xs,ys = mnist.train.next_batch(batch_size)
        _,loss = sess.run([optimizer,cross_entropy_loss],feed_dict={x:xs,y:ys,learning_rate:0.3})

        if i > 0 and i % 100 == 0:
            validate_accuracy = sess.run(accuracy, feed_dict=validation_data)
            print("after %d training steps,the loss is %g,the validation accuracy is %g" % (i, loss, validate_accuracy))

    acc = sess.run(accuracy,feed_dict=test_data)
    print('the test accuracy is : ', acc)
    saver.save(sess, save_path='D:/tensorflow-3.6/model_1/model.ckpt')


with tf.Session() as sess:
    ckpt = tf.train.get_checkpoint_state('D:/tensorflow-3.6/model_1')
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess,ckpt.model_checkpoint_path)
        test_data_1 = {x: mnist.test.images[:16], y: mnist.test.labels[:16]}
        final_pred,acc = sess.run([pred,accuracy],feed_dict=test_data_1)
        orders = np.argsort(final_pred)
        print(orders)
        plt.figure(figsize=(8,8))
        for idx in range(16):
            order = orders[idx,:][-1]
            prob = final_pred[idx,:][order]
            plt.subplot(4,4,idx+1)
            plt.axis('off')
            plt.title('[{}]:[{}]-[{:.1f}%]'.format(mnist.test.labels[idx],order,prob*100))
            plt.imshow(mnist.test.images[idx].reshape((28, 28)))
        plt.show()
    else:
        pass