import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
# api：处理mnist数据集的api

mnist = input_data.read_data_sets('.', one_hot=True)  # 将 mnist数据集导入到内存中备用
mnist2 = input_data.read_data_sets('.')  # 默认值：one_hot=False

learning_rate = tf.placeholder(tf.float32)  # 超参数（需要人工指定）
# x中的 None 代表 batch size，784代表图片维度（28×28），name是用来在计算图中查找该变量的
x = tf.placeholder(tf.float32, [None, 784], name='x')

# --- 两层 ------------------------------------
W1 = tf.get_variable('weight1', [784, 784], tf.float32, tf.contrib.layers.xavier_initializer())
b1 = tf.Variable(tf.zeros([784]), name='bias1')
W2 = tf.get_variable('weight2', [784, 10], tf.float32, tf.contrib.layers.xavier_initializer())
b2 = tf.Variable(tf.zeros([10]), name='bias2')
layer_1_logits = tf.matmul(x, W1) + b1
# layer_1_outputs = tf.sigmoid(layer_1_logits)
layer_1_outputs = tf.nn.relu(layer_1_logits)
layer_2_logits = tf.matmul(layer_1_outputs, W2) + b2
# --- 两层 ------------------------------------

# # --- 三层 ------------------------------------
# W1 = tf.get_variable('weight1', [784, 784], tf.float32, tf.contrib.layers.xavier_initializer())
# b1 = tf.Variable(tf.zeros([784]), name='bias1')
# W2 = tf.get_variable('weight2', [784, 512], tf.float32, tf.contrib.layers.xavier_initializer())
# b2 = tf.Variable(tf.zeros([512]), name='bias2')
# W3 = tf.get_variable('weight3', [512, 10], tf.float32, tf.contrib.layers.xavier_initializer())
# b3 = tf.Variable(tf.zeros([10]), name='bias3')
# layer_1_logits = tf.matmul(x, W1) + b1
# layer_1_outputs = tf.nn.relu(layer_1_logits)
# layer_2_logits = tf.matmul(layer_1_outputs, W2) + b2
# layer_2_outputs = tf.nn.relu(layer_2_logits)
# layer_3_logits = tf.matmul(layer_2_outputs, W3) + b3
# # --- 三层 ------------------------------------


# --- 一层 ------------------------------------
# # truncated_normal从正态分布中截取一部分数据来生成shape为[784, 10]的随即权重
# # W = tf.Variable(tf.truncated_normal([784, 10]), name='weight')
# W = tf.get_variable('weight', [784, 10], tf.float32, tf.contrib.layers.xavier_initializer())
# # bias可以用全零初始化
# b = tf.Variable(tf.zeros([10]), name='bias')
# # logits：网络未经激活的输出
# logits = tf.matmul(x, W) + b
# --- 一层 ------------------------------------

y = tf.placeholder(tf.float32, [None, 10], name='y')
# 先将未经激活的输出logits经过softmax函数，然后计算交叉熵损失
# 为了提高性能，将以上两个操作放在一个api里tf.nn.softmax_cross_entropy_with_logits
# 用 tf.reduce_mean 对一个batch的loss取平均
# cross_entropy = tf.reduce_mean(
#     tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits))
cross_entropy = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=layer_2_logits))
# 优化器：train_step是一个操作，每运行一次，都会 将输入在神经网络中前向运行一次，再反向运行一次。
# 损失函数利用梯度下降优化模型权重
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
# train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
# # ---------------------
# # J = cross_entropy + 0.0005 * (tf.reduce_sum(tf.abs(W1)) + tf.reduce_sum(tf.abs(W2)))  # L1 regularization
# J = cross_entropy + 0.0001 * (tf.reduce_sum(tf.square(W1)) + tf.reduce_sum(tf.square(W2)) + tf.reduce_sum(tf.square(W3)))  # L2 regularization
# train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(J)
# # ----------------------
# 因为未经激活的输出logits之后经过了softmax函数，而该函数并不会改变各个元素的大小顺序
# 所以，在计算accuracy时，可以直接求 logit的最大元素位置
# tf.equal返回的是bool值，相等True、不等False
correct_prediction = tf.equal(tf.arg_max(y,1), tf.arg_max(layer_2_logits, 1))
# tf.cast对bool类型的correct_prediction进行强制类型转换->tf.float32
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

# 以上定义的全是计算图，没有任何实际运算
# 用 tf.get_default_graph() 获取定义的计算图
# grap = tf.get_default_graph()
# grap.as_graph_def()  # 将 该计算图中 的定义内容打印出来


sess = tf.Session()  # 启动计算图的运行
sess.run(tf.global_variables_initializer())  # 初始化计算图
# lr = 1.0
lr = 0.5
for step in range(5000):
    # 对学习率进行动态调整
    if step > 1000:
        lr = 0.2
    if step > 2000:
        lr = 0.1
    batch_x, batch_y = mnist.train.next_batch(32)  # batch size = 32
    # run 一次 train_step，就相当于模型前向反向训练一次
    _, loss = sess.run(
        [train_step, cross_entropy],
        feed_dict={
            x: batch_x,
            y: batch_y,
            learning_rate: lr
        })
    if (step + 1) % 100 == 0:
        print('#' * 10)
        print('step [{}], entropy loss: [{}]'.format(step + 1, loss))
        print(sess.run(accuracy, feed_dict={x: batch_x, y: batch_y}))
        print(
            sess.run(
                accuracy,
                feed_dict={
                    x: mnist.test.images,
                    y: mnist.test.labels
                }))































