import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import os
import datetime
import emoji
from tensorflow.python.framework import graph_util

# 载入mnist数据，如果指定目录下没有已经下载好的数据，则tensorflow会自动下载
mnist = input_data.read_data_sets("../mnist")
print("Training data size:", mnist.train.num_examples)
print("Validation data size:", mnist.validation.num_examples)
print("Testing data size:", mnist.test.num_examples)

batch_size = 100
xs, ys = mnist.train.next_batch(batch_size)
print("X shape:", xs.shape)
print("Y shape:", ys.shape)

# MNIST数据集输入节点
INPUT_NODE = 784

# mnist输出节点数量
OUTPUT_NODE = 10

LAYER1_NODE = 500

LEARNING_RATE_BASE = 0.8  # base learning rate

LEARNING_RATE_DECAY = 0.99  # decay

REGULARIZATION_RATE = 0.0001

TRAINING_STEPS = 20000

MOVING_AVERAGE_DECAY = 0.99


def inference(input_tensor, avg_class, weights1, biases1, weights2, biases2):
    if avg_class is None:
        # 计算隐藏层前向传播结果，这里使用了ReLu激活函数
        layer1 = tf.nn.relu(tf.matmul(input_tensor, weights1) + biases1)
        return tf.matmul(layer1, weights2) + biases2
    else:
        layer1 = tf.nn.relu(tf.matmul(input_tensor, avg_class.average(weights1)) + avg_class.average(biases1))
        return tf.matmul(layer1, avg_class.average(weights2)) + avg_class.average(biases2)


def train(mnist):
    # 在构建静态图时，有些变量例如数据输入的节点，需要使用placeholder作为一个操作节点把位置占住。
    # 在训练或者inference时，将数据从placeholder灌入网络
    x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')  # 构建graph模型输入占位，分配内存
    y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input')  # 为标签构建placeholder

    # 生成隐层参数
    weights1 = tf.Variable(tf.truncated_normal([INPUT_NODE, LAYER1_NODE], stddev=0.1))  # 初始化参数值，生成截断正态分布下的初始权重值
    biases1 = tf.Variable(tf.constant(0.1, shape=[LAYER1_NODE]))  # 初始化偏差值，生成值全为0.1的向量
    # 生成输出层的参数
    weights2 = tf.Variable(tf.truncated_normal([LAYER1_NODE, OUTPUT_NODE], stddev=0.1))
    biases2 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE]))
    y = inference(x, None, weights1, biases1, weights2, biases2)  # 构建静态图，获得输出结果y。这个y是用于inference
    out = tf.nn.softmax(y, name="softmax")  # 计算输出y的softmax结果，即10类概率
    global_step = tf.Variable(0, trainable=False)  # 初始化迭代次数，初始值为0，因为迭代次数不能进行滑动平均，所以trainable=False
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)  # 定义一个指数滑动平均的操作，这个操作可以应用于网络中任何变量
    # 用tf.trainable_variable()获取所有可以训练的变量列表，全部指定为使用滑动平均模型，即weight12, biases12，前面用Variable定义的变量，trainable默认为true
    # 用.apply对指定的变量进行滑动平均
    variable_averages_op = variable_averages.apply(tf.trainable_variables())  # 将滑动平均应用于模型中权重，可以有效的平滑优化过程，对抗某一次训练的扰动:0.99*current + 0.01* last
    average_y = inference(x, variable_averages, weights1, biases1, weights2, biases2)  # average_y是利用平滑过后的权重计算的输出，这个average_y是用于训练
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, axis=1))  # tf.argmax返回axis=1(取值为0或1)上即每行最大元素的索引
    cross_entropy_mean = tf.reduce_mean(cross_entropy)  # batch平均交叉熵
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)  # REGULARIZATION_RATE是正则项的权重参数
    regularization = regularizer(weights1) + regularizer(weights2)
    loss = cross_entropy_mean + regularization
    # 学习率调整，global_step是当前的迭代次数，mnist.train.num_examples / batch_size是decay_step，每多少步迭代一次，
    # 即global_step % decay_step不等于0时，均不更新学习率
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples / batch_size,
                                               LEARNING_RATE_DECAY)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step)  # 定义优化器

    # 并不是必须要使用control_dependencies，control_dependencies用来控制一个依赖，也就是说运行with域内的操作，
    # 必须先运行train_step,variable_averages_op
    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')  # 没有任何作用，仅仅表示一个空的操作，但是由于有依赖，当我们计算train_op时，相当于把依赖算好了
        correct_prediction = tf.equal(tf.argmax(average_y, 1), tf.argmax(y_, 1))  # 计算一个batch内正确数量
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    with tf.Session() as sess:  # 开启会话，所有计算在session内计算
        sess.run(tf.global_variables_initializer())  # 初始化所有权重
        validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}  # 将输入数据灌入placeholder
        test_feed = {x: mnist.test.images, y_: mnist.test.labels}
        for i in range(TRAINING_STEPS):
            if i % 1000 == 0:
                validate_acc = sess.run(accuracy, feed_dict=validate_feed)  # 计算accuracy的结果，喂数据feed_dict
                print("After %d training step(s), validation accuracy using average model is %g" % (i, validate_acc))
            xs, ys = mnist.train.next_batch(batch_size)  # 获取下一批数据
            sess.run(train_op, feed_dict={x: xs, y_: ys})  # 计算train_op,由于定义了依赖会先计算train_step、variable_averages_op操作
        test_acc = sess.run(accuracy, feed_dict=test_feed)  # 计算test结果
        print("After %d training step(s), test accuracy using average model is %g" % (TRAINING_STEPS, test_acc))
        print(datetime.datetime.now(), emoji.emojize(":thumbs_up:"))
        constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ['softmax'])  # 将图与权重转化为常量
        with tf.gfile.GFile("mnistNet.pb", "wb") as f:
            f.write(constant_graph.SerializeToString())  # 序列化存储


if __name__ == "__main__":
    mnist = input_data.read_data_sets("../mnist", one_hot=True)  # 这里是tensorflow提供给mnist数据的专用接口，如果是其他自己的数据需要自己实现数据读取
    train(mnist)
