import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data

mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

print('train images :', mnist.train.images.shape,
      'labels:      ', mnist.train.labels.shape)
print('validation images:', mnist.validation.images.shape,
      'labels:           ', mnist.validation.labels.shape)
print('test images:', mnist.test.images.shape,
      'labels:           ', mnist.test.labels.shape)


def layer(output_dim, input_dim, inputs, activation=None):
    W = tf.Variable(tf.random_normal([input_dim, output_dim]))
    b = tf.Variable(tf.random_normal([1, output_dim]))
    XWb = tf.matmul(inputs, W) + b
    if activation is None:
        outputs = XWb
    else:
        outputs = activation(XWb)

    return outputs


x = tf.placeholder("float", [None, 784])
h1 = layer(output_dim=256, input_dim=784, inputs=x, activation=tf.nn.relu)
y_predict = layer(output_dim=10, input_dim=256, inputs=h1, activation=None)

# 定义训练方式
y_label = tf.placeholder("float", [None, 10])
# reduce是降维，reduce_mean表示以平均值的方式来减少，见:https://blog.csdn.net/chengshuhao1991/article/details/78545723
loss_function = tf.reduce_mean(
    # 使用交叉熵来进行训练，y_label是真实值,logits是预测值
    tf.nn.softmax_cross_entropy_with_logits(logits=y_predict,
                                            labels=y_label)

)
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss_function)

# 定义评估模型准确率的方式
correct_prediction = tf.equal(tf.argmax(y_label, 1),
                              tf.argmax(y_predict, 1))

# 再将前一步的计算结果进行平均计算
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

# 定义训练参数
trainEpochs = 15
batchSize = 100
loss_list = [];
epoch_list = [];
accuracy_list = []
totalBatchs = int(mnist.train.num_examples/batchSize)

from time import  time
startTime=time()


sess=tf.Session
sess.run(tf.global_variables_initializer())
for epoch in range(trainEpochs):
    for i in range(totalBatchs):
        batch_x, batch_y = mnist.train.next_batch(batchSize)
        sess.run(optimizer, feed_dict={x: batch_x, y_label: batch_y})

    loss, acc = sess.run([loss_function, accuracy],
                         feed_dict={x: mnist.validation.images,
                                    y_label: mnist.validation.labels})

    epoch_list.append(epoch);
    loss_list.append(loss)
    accuracy_list.append(acc)
    print("Train Epoch:", '%02d' % (epoch + 1), "Loss=", \
          "{:.9f}".format(loss), " Accuracy=", acc)

duration = time() - startTime
print("Train Finished takes:", duration)



import matplotlib.pyplot as plt
fig = plt.gcf()
fig.set_size_inches(4,2)
plt.plot(epoch_list, loss_list, label = 'loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['loss'], loc='upper left')


plt.plot(epoch_list, accuracy_list,label="accuracy" )
fig = plt.gcf()
fig.set_size_inches(4,2)
plt.ylim(0.8,1)
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend()
plt.show()

print("Accuracy:", sess.run(accuracy,
                           feed_dict={x: mnist.test.images,
                                      y_label: mnist.test.labels}))