import numpy as np
import tensorflow as tf

tf.__version__
from matplotlib import pyplot as plt
%matplotlib inline
tf.logging.set_verbosity(tf.logging.INFO)
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./MNIST_data/")
print(mnist.train.images.shape)
print(mnist.train.labels.shape)
print(mnist.validation.images.shape)
print(mnist.validation.labels.shape)

print(mnist.test.images.shape)
print(mnist.test.labels.shape)
plt.figure(figsize=(8,8))

for idx in range(16):
    plt.subplot(4,4, idx+1)
    plt.axis('off')
    plt.title('[{}]'.format(mnist.train.labels[idx]))
    plt.imshow(mnist.train.images[idx].reshape((28,28)))
	
	##定义神经网络
##定义两个placeholder分别用于图像和lable数据，另外，定义一个float类型的变量用于设置学习率
x = tf.placeholder("float", [None, 784])
y = tf.placeholder("int64", [None])
learning_rate = tf.placeholder("float")
##为了让网络更高效的运行，多个数据会被组织成一个batch送入网络，两个placeholder的第一个维度就是batchsize，
##因为我们这里还没有确定batchsize，所以第一个维度留空
x = tf.placeholder("float", [None, 784])
y = tf.placeholder("int64", [None])
learning_rate = tf.placeholder("float")
def initialize(shape, stddev=0.1):
  return tf.truncated_normal(shape, stddev=0.1)

L1_units_count = 100

W_1 = tf.Variable(initialize([784, L1_units_count]))
b_1 = tf.Variable(initialize([L1_units_count]))
logits_1 = tf.matmul(x, W_1) + b_1
output_1 = tf.nn.relu(logits_1)

L2_units_count = 10 
W_2 = tf.Variable(initialize([L1_units_count, L2_units_count]))
b_2 = tf.Variable(initialize([L2_units_count]))
logits_2 = tf.matmul(output_1, W_2) + b_2  

logits = logits_2

## 接下来定义loss和用于优化网络的优化器
## loss计算使用了sparse_softmax_cross_entropy_with_logits,
## 这样做的好处是labels可以不用手动做one_hot省了一些麻烦。这里使用了sgd优化器，学习率为可以根据需要设定
## 试试看，增大减小学习率，换个优化器再进行训练会发生什么
cross_entropy_loss = tf.reduce_mean(
    tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y))

optimizer = tf.train.GradientDescentOptimizer(
    learning_rate=learning_rate).minimize(cross_entropy_loss)
## 需要注意的是，上面的网络，最后输出的是未经softmax的原始logits，而不是概率分布， 要想看到概率分布，还需要做一下softmax
## 将输出的结果与正确结果进行对比，即可得到我们的网络输出结果的准确率
pred = tf.nn.softmax(logits)
correct_pred = tf.equal(tf.argmax(pred, 1), y)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
## saver用于保存或恢复训练的模型。
batch_size = 32
trainig_step = 1000

saver = tf.train.Saver()
## 以上定义的所有操作，均为计算图，也就是仅仅是定义了网络的结构，实际需要运行的话，还需要创建一个session，并将数据填入网络中
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    #定义验证集与测试集
    validate_data = {
        x: mnist.validation.images,
        y: mnist.validation.labels,
    }
    test_data = {x: mnist.test.images, y: mnist.test.labels}

    for i in range(trainig_step):
        xs, ys = mnist.train.next_batch(batch_size)
        _, loss = sess.run(
            [optimizer, cross_entropy_loss],
            feed_dict={
                x: xs,
                y: ys,
                learning_rate: 0.3
            })

        #每100次训练打印一次损失值与验证准确率
        if i > 0 and i % 100 == 0:
            validate_accuracy = sess.run(accuracy, feed_dict=validate_data)
            print(
                "after %d training steps, the loss is %g, the validation accuracy is %g"
                % (i, loss, validate_accuracy))
            saver.save(sess, './model.ckpt', global_step=i)

    print("the training is finish!")
    #最终的测试准确率
    acc = sess.run(accuracy, feed_dict=test_data)
    print("the test accuarcy is:", acc)
	
	## 下面，用我们训练的模型做一个测试。
with tf.Session() as sess:
    ckpt = tf.train.get_checkpoint_state('./')
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        final_pred, acc = sess.run(
            [pred, accuracy],
            feed_dict={
                x: mnist.test.images[:16],
                y: mnist.test.labels[:16]
            })
        orders = np.argsort(final_pred)
        plt.figure(figsize=(8, 8))
        print(acc)
        for idx in range(16):
            order = orders[idx, :][-1]
            prob = final_pred[idx, :][order]
            plt.subplot(4, 4, idx + 1)
            plt.axis('off')
            plt.title('{}: [{}]-[{:.1f}%]'.format(mnist.test.labels[idx],
                                                  order, prob * 100))
            plt.imshow(mnist.test.images[idx].reshape((28, 28)))

    else:
        pass
## 以上提供的参数运行出来的结果，只看上面几个数字还是很不错的，但是总体准确率不太理想。

### 模型优化改进
#读取数据集
mnist_new = input_data.read_data_sets("./MNIST_data/",one_hot=True)
#定义输入,输出
x = tf.placeholder("float", [None, 784])
y = tf.placeholder("int64", [None, 10])
#隐层神经元个数
L1_units_count = 256
L2_units_count = 64
#输出层神经元个数
L3_units_count = 10
# 优化方法参数
learning_rate_base = 0.3  # 基础学习率
learning_rate_decay = 0.99  # 学习率的衰减率
regularization_rate = 0.001  # 正则化项在损失函数中的系数
#训练参数
batch_size = 256
trainig_step = 20000

# 存储训练轮数，设置为不可训练
global_step = tf.Variable(0, trainable=False)

def initialize(shape, stddev=0.1):
  return tf.truncated_normal(shape, stddev=0.1)
#隐层1:256个神经元
W_1 = tf.Variable(initialize([784, L1_units_count]))
b_1 = tf.Variable(initialize([L1_units_count]))
logits_1 = tf.matmul(x, W_1) + b_1
output_1 = tf.nn.relu(logits_1)#使用ReLU激活函数激活

#隐层2:64个神经元
W_2 = tf.Variable(initialize([L1_units_count, L2_units_count]))
b_2 = tf.Variable(initialize([L2_units_count]))
logits_2 = tf.matmul(output_1, W_2) + b_2
output_2 = tf.nn.relu(logits_2)#使用ReLU激活函数激活


#输出层:10个神经元
W_3 = tf.Variable(initialize([L2_units_count, L3_units_count]))
b_3 = tf.Variable(initialize([L3_units_count]))
logits_3 = tf.matmul(output_2, W_3) + b_3

logits_new = logits_3

#loss函数与优化器
#采用softmax + 交叉熵 + L2正则，与eta指数衰减 + 随机梯度下降
# 设置正则化方法
regularizer = tf.contrib.layers.l2_regularizer(regularization_rate)  # 定义L2正则化损失函数
regularization = regularizer(W_1) + regularizer(W_2) + regularizer(W_3)  # 计算模型的正则化损失

# 设置指数衰减法
learning_rate = tf.train.exponential_decay(learning_rate_base, 
                                           global_step, 
                                           mnist_new.train.num_examples / batch_size,
                                           learning_rate_decay)

# softmax激活的交叉熵计算
cross_entropy_loss = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(logits=logits_new, labels=y))
	
	# 总损失等于交叉熵损失和正则化损失的和
loss_sum = cross_entropy_loss + regularization
# 随机梯度下降优化器
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_sum, global_step=global_step)

##评估性能
pred_new = tf.nn.softmax(logits_new)
correct_pred_new = tf.equal(tf.argmax(pred_new, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred_new, tf.float32))

#保存模型
saver = tf.train.Saver()

#训练模型
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    #定义验证集与测试集
    validate_data = {
        x: mnist_new.validation.images,
        y: mnist_new.validation.labels,
    }
    test_data = {
        x: mnist_new.test.images, 
        y: mnist_new.test.labels
    }
    for i in range(trainig_step):
        xs, ys = mnist_new.train.next_batch(batch_size)
        _, loss = sess.run(
            [optimizer, loss_sum],
            feed_dict={
                x: xs,
                y: ys
            })

        #每2500次训练打印一次损失值与验证准确率
        if i > 0 and i % 2500 == 0:
            validate_accuracy = sess.run(accuracy, feed_dict=validate_data)
            print(
                "after %d training steps, the loss is %g, the validation accuracy is %g"
                % (i, loss, validate_accuracy))
            saver.save(sess, './mode2.ckpt', global_step=i)

    print("the training is finish!")
    
    #最终的测试准确率
    acc = sess.run(accuracy, feed_dict=test_data)
    print("the test accuarcy is:", acc)