import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt

# 读取数据集
mnist = input_data.read_data_sets("E:/VC_project/data/Mnist",one_hot=True) # 将label进行独热编码
mnist1 = input_data.read_data_sets('E:/VC_project/data/Mnist')


# 查看数据
print(mnist.train.images.shape) # 训练集
print(mnist.train.labels.shape)

print(mnist.test.images.shape) # 测试集
print(mnist.test.labels.shape)

print(mnist.validation.images.shape) # 校验集
print(mnist.validation.labels.shape)

print(mnist.train.labels[:2])
#print(mnist.train.images[:2])

# 查看train中的前16张图片
for idx in range(16):
    plt.subplot(4,4,idx+1)
    plt.title("[{}]".format(mnist1.train.labels[idx]))
    plt.imshow(mnist1.train.images[idx].reshape((28,28)))
plt.show()

# 使用tensorflow构建单层神经网络
# 1、计算图，定义网络结构
learning_rate = tf.placeholder(dtype=tf.float32) #学习率
X = tf.placeholder(dtype=tf.float32,shape=[None,784],name="X") # 输入数据
W = tf.Variable(tf.truncated_normal([784,10]),name="weight") #权重矩阵
b = tf.Variable(tf.zeros([10]),name="bias") # 偏置项

logits = tf.matmul(X,W)+b # 未激活输出

y = tf.placeholder(dtype=tf.float32,shape=[None,10],name="y") # 真值labels

cross_entropy_loss = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=y))  # 交叉熵损失

trian_step = tf.train.GradientDescentOptimizer(
    learning_rate=learning_rate).minimize(cross_entropy_loss) # 梯度下降优化器

# 输出的结果与正确结果进行对比,并计算准确率
correct_prediction = tf.equal(tf.arg_max(y,1),tf.arg_max(logits,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) # tf.cast数据类型转换：bool->float

# 2、训练模型

sess = tf.Session() # 实例化
sess.run(tf.global_variables_initializer()) # 变量初始化
lr = 1.0 #设置学习率

for step in range(3000):
    if step > 1000:
        lr = 0.3
    if step > 2000:
        lr = 0.1

    batch_x,batch_y = mnist.train.next_batch(32) # 将训练数据以每个batch进行训练
    _, loss = sess.run([trian_step,cross_entropy_loss],
                       feed_dict={
                           X:batch_x,
                           y:batch_y,
                           learning_rate:lr,
                       })
    # 每100次训练打印一次损失值、训练与测试准确率
    if (step+1) % 100 == 0:
        print("#"* 20)
        print("step [{}], entropy loss:[{}]".format(step+1,loss))
        trian_accuracy = sess.run(accuracy,feed_dict={X:batch_x,y:batch_y})
        print("trian accuracy: {}".format(trian_accuracy))

        test_accuracy = sess.run(accuracy,  # 测试集准确率
                                 feed_dict={
                                     X:mnist.test.images,
                                     y:mnist.test.labels
                                 })
        print("test accuracy: {}".format(test_accuracy))


