#!/usr/bin/python
# coding:utf-8
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.examples.tutorials.mnist import input_data

""" 快速定义网络结构的工具  -- >>> Slim """


def lenet5(inputs):  # 定义网络结构
    inputs = tf.reshape(inputs, [-1, 28, 28, 1])
    # 第一层卷积
    net = slim.conv2d(inputs, 32, [5, 5], padding="SAME", scope="layer1-conv")  # 过滤器深度为32, 尺寸为5*5
    net = slim.max_pool2d(net, 2, stride=2, scope="layer2-max-pool")  # 过滤器尺寸为2*2, 步长为2

    # 其他网络结构
    net = slim.conv2d(net, 64, [5, 5], padding="SAME", scope="layer2-conv")
    net = slim.max_pool2d(net, 2, stride=2, scope="layer4-max-pool")

    net = slim.flatten(net, scope="flatten")  # 将4维矩阵变为2维矩阵, 拉直

    # 全连接层
    net = slim.fully_connected(net, 500, scope="layer5")
    net = slim.fully_connected(net, 10, scope="output")
    return net


# 通过slim定义网络结构, 并使用之前章节中给出的方式训练定义好的模型
def train(mnist):  # 查看是否会自动选择GPU
    x = tf.placeholder(tf.float32, [None, 784], name="x-input")
    y_ = tf.placeholder(tf.float32, [None, 10], name="y-input")

    y = lenet5(x)  # 使用slim函数定义网络结构

    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    loss = tf.reduce_mean(cross_entropy)
    train_op = tf.train.GradientDescentOptimizer(0.01).minimize(loss)


    # 训练过程
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) as sess:
        tf.global_variables_initializer().run()
        for i in range(10000):
            xs, ys = mnist.train.next_batch(100)
            _, loss_value = sess.run([train_op, loss], feed_dict={x: xs, y_: ys})
            if i % 1000 == 0:
                print("After %d training step(s), loss on training batch is %g" % (i, loss_value))

    writer= tf.summary.FileWriter("../log/slim", tf.get_default_graph())
    writer.close()


if __name__ == '__main__':
    mnist = input_data.read_data_sets("../le_datasets/mnist/input_data",one_hot=True)  # TODO 填写mnist文件地址
    train(mnist)
