# #注释1
# import tensorflow as tf
# import random
# import os
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = 2
#
# #注释2
# from tensorflow.examples.tutorials.mnist import input_data
# #注释3
# tf.set_random_seed(777)
#
# mnist = input_data.read_data_sets("D:\Deep learning_code\MNIST_data", one_hot=True)
#
# #注释4
# learning_rate = 0.001
# #注释5
# training_epochs = 15
# #注释6
# batch_size = 100
#
#
# TB_SUMMARY_DIR = ' '
#
#
# #注释7
# X = tf.placeholder(tf.float32, [None, 784])
# Y = tf.placeholder(tf.float32, [None, 10])
#
# #注释8
# x_image = tf.reshape(X, [-1, 28, 28, 1])
# #注释9
# tf.summary.image('input', x_image, 3)
#
#
# #注释10
# keep_prob = tf.placeholder(tf.float32)
#
# #注释11
# with tf.variable_scope('layer1') as scope:
#     W1 = tf.get_variable("W", shape=[784, 512],
#                          initializer=tf.contrib.layers.xavier_initializer())
#     b1 = tf.Variable(tf.random_normal([512])
#     ##注释12
#     L1 = tf.nn.relu(tf.matmul(X, W1) + b1)
#     ##注释13
#     L1 = tf.nn.dropout(L1, keep_prob=keep_prob)
#     # 注释14
#     tf.summary.histogram("X", X)
#     tf.summary.histogram("weights", W1)
#     tf.summary.histogram("bias", b1)
#     tf.summary.histogram("layer", L1)
#
# with tf.variable_scope('layer2') as scope:
#     W2 = tf.get_variable("W", shape=[512, 512],
#                          initializer=tf.contrib.layers.xavier_initializer())
#     b2 = tf.Variable(tf.random_normal([512]))
#     L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)
#     L2 = tf.nn.dropout(L2, keep_prob=keep_prob)
#
#     tf.summary.histogram("weights", W2)
#     tf.summary.histogram("bias", b2)
#     tf.summary.histogram("layer", L2)
#
# with tf.variable_scope('layer3') as scope:
#     W3 = tf.get_variable("W", shape=[512, 512],
#                          initializer=tf.contrib.layers.xavier_initializer())
#     b3 = tf.Variable(tf.random_normal([512]))
#     L3 = tf.relu(tf.matmul(L2, W3) + b3)
#     L3 = tf.dropout(L3, keep_prob=keep_prob)
#
#     tf.summary.histogram("weights", W3)
#     tf.summary.histogram("bias", b3)
#     tf.summary.histogram("layer", L3)
#
# with tf.variable_scope('layer4') as scope:
#     W4 = tf.get_variable("W", shape=[512, 512],
#                          initializer=tf.contrib.layers.xavier_initializer())
#     b4 = tf.Variable(tf.random_normal([512]))
#     L4 = tf.nn.relu(tf.matmul(L3, W4) + b4)
#     L4 = tf.nn.dropout(L4, keep_prob=keep_prob)
#
#     tf.summary.histogram("weights", W4)
#     tf.summary.histogram("bias", b4)
#     tf.summary.histogram("layer", L4)
#
# with tf.variable_scope('layer5') as scope:
#     W5 = tf.get_variable("W", shape=[512, 10],
#                          initializer=tf.contrib.layers.xavier_initializer())
#     b5 = tf.Variable(tf.random_normal([10]))
#     hypothesis = tf.matmul(L4, W5) + b5
#
#     tf.summary.histogram("weights", W5)
#     tf.summary.histogram("bias", b5)
#     tf.summary.histogram("hypothesis", hypothesis)
#
#
# #注释15
# cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
#     logits=hypothesis, labels=Y))
# #注释16
# optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# #注释17
# tf.summary.scalar("loss", cost)
#
# #注释18
# summary = tf.summary.merge_all()
#
# #注释19
# # sess = tf.Session()
# sess.run(tf.global_variables_initializer())
#
# #注释20
# writer = summary.FileWriter(TB_SUMMARY_DIR)
# writer.add_graph(sess.graph)
# global_step = 0
#
# print('Start learning!')
#
# #注释21
# for epoch in range(training_epochs):
#     avg_cost = 0
#     total_batch = int(mnist.train.num_examples / batch_size)
#
#     for i in range(total_batch):
#         batch_xs, batch_ys = mnist.train.next_batch(batch_size)
#         feed_dict = {X: batch_xs, Y: batch_ys, keep_prob: 0.7}
#         s, _, = sess.run([summary, optimizer], feed_dict=feed_dict)
#         writer.add_summary(s, global_step=global_step)
#         global_step += 1
#
#         avg_cost += sess.run(cost, feed_dict=feed_dict) / total_batch
#
#     print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))
#
# print('Learning Finished!')
#
# #注释22
# correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
# #注释23
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# print('Accuracy:', sess.run(accuracy, feed_dict={
#       X: mnist.test.images, Y: mnist.test.labels, keep_prob: 1}))
#
# ##注释24
# r = random.randint(0, mnist.test.num_examples - 1)
# print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1)))
# print("Prediction: ", sess.run(
#     tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r + 1]; keep_prob: 1}))
#
#
