import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from matplotlib import pyplot as plt 

mnist = input_data.read_data_sets('./第七周')

#观察数据
print(mnist.train.images.shape)
print(mnist.train.labels.shape)

print(mnist.validation.images.shape)
print(mnist.validation.labels.shape)

print(mnist.test.images.shape)
print(mnist.test.labels.shape)

#还原数据为28*28的二维数据
plt.figure(figsize=(8,8))
for idx in range(20):
    plt.subplot(5, 4, idx + 1)
    plt.axis('off')
    plt.title('[{}]'.format(mnist.train.labels[idx]))
    plt.imshow(mnist.train.images[idx].reshape(28,28))
    
plt.show()

#定义用于训练的网络
x = tf.placeholder(tf.float32, [None, 784], name='x')
y = tf.placeholder(tf.int64, [None], name='y')
learning_rate = tf.placeholder(tf.float32)

#第一层隐层
L1_units_count = 100
W_1 = tf.Variable(tf.truncated_normal([784, L1_units_count]))
b_1 = tf.Variable(tf.truncated_normal([L1_units_count]))
logits_1 = tf.matmul(x, W_1) + b_1
output_1 = tf.nn.relu(logits_1)

print(logits_1)
print(output_1)

#第二层隐层
L2_units_count = 10
W_2 = tf.Variable(tf.truncated_normal([L1_units_count, L2_units_count]))
b_2 = tf.Variable(tf.truncated_normal([L2_units_count]))
logits_2 = tf.matmul(output_1, W_2) + b_2

logits = logits_2

#定义loss和用于网络优化的优化器
cross_entropy_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cross_entropy_loss)

#使用softmax函数作为激活函数
pred = tf.nn.softmax(logits)
correct_pred = tf.equal(tf.argmax(pred, 1), y)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

batch_size = 32
training_step = 1000

#创建session并运行
with tf.Session() as sess:
    #初始化session中的数据
    sess.run(tf.global_variables_initializer())

    #定义验证集和测试集
    validate_data = {
        x: mnist.validation.images,
        y: mnist.validation.labels
    }
    test_data = {
        x: mnist.test.images,
        y: mnist.test.labels
    }

    for i in range(training_step):
        xs, ys = mnist.train.next_batch(batch_size)
        _, loss = sess.run(
            [optimizer, cross_entropy_loss],
            feed_dict = {
                x: xs,
                y: ys,
                learning_rate: 0.3
            }
        )
        if i > 0 and i % 100 == 0:
            validate_accuracy = sess.run(accuracy, feed_dict=validate_data)
            print(
                'after {} training steps, the loss is {}, the validation accuaracy is {}.'.format(
                    i, loss, validate_accuracy
                )
            )

    print('the training finished!')
    #最终的准确率
    acc = sess.run(accuracy, feed_dict=test_data)
    print('the test accuarcy is: ', acc)