import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data

# 使用softmax回归算法进行MNIST

mnist = input_data.read_data_sets("data_mnist/", one_hot=True)

# 输入为[*, 784]的形状，其中784存储的是每张图片28*28的变量
x = tf.placeholder("float", [None, 784])

# W作为权重，必须型为[784, 10]， 即每个点作为对应数字的权重
W = tf.Variable(tf.zeros([784, 10]))
# b作为偏差，必须型为[10]， 指每个值作为对应数字的偏差
b = tf.Variable(tf.zeros([10]))

# 使用softmax回归算法求的y
y = tf.nn.softmax(tf.matmul(x, W) + b)

y_ = tf.placeholder("float", [None, 10])

# 损失函数
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
# cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))

# 使用学习率为0.01的梯度优化算法来进行最小化
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)

init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

for i in range(1000):
    train_images, train_labels = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: train_images, y_: train_labels})

# 评估准确度
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))

accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))

# train_images, train_labels = mnist.train.next_batch(1)

# 模拟测试
# test_x = tf.assign(x, train_images, name='test_x')
# sess.run(test_x)

# print(sess.run(y))
# print(train_labels)

sess.close()
