import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
import os

# 参数
data_dir = 'E:\learning\MNIST_data'     # mnist数据地址
save_dir = 'E:\learning\save'           # 保存地址
log_dir = 'E:\learning\log'             # summary地址
learing_rate = 1e-4                     # 学习率

# 读取数据
mnist = input_data.read_data_sets(data_dir, one_hot=True)

# input_placeholder
with tf.name_scope("input"):
    x = tf.placeholder(tf.float32, [None, 784], name='input_x')
    y_ = tf.placeholder(tf.float32, [None, 10], name='input_y_')

# 输入数据reshape
with tf.name_scope("input_reshape"):
    image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])
    tf.summary.image('input', image_shaped_input, 10)

# 第一层卷积
with tf.name_scope("layer1_conv"):
    conv_1 = tf.layers.conv2d(image_shaped_input, filters=32, kernel_size=5, strides=1, padding='SAME',
                              activation=tf.nn.relu, name='conv_1')
    pool_1 = tf.layers.max_pooling2d(conv_1, pool_size=2, strides=2, name='pol_1')

# 第二层卷积
with tf.name_scope("layer2_conv"):
    conv_2 = tf.layers.conv2d(pool_1, filters=64, kernel_size=5, strides=1, padding='SAME',
                              activation=tf.nn.relu, name='conv_2')
    pool_2 = tf.layers.max_pooling2d(conv_2, pool_size=2, strides=2, name='pol_2')

# 全连接
with tf.name_scope("layer3_dense"):
    flatten = tf.layers.flatten(pool_2, name='flatten')
    dense_1 = tf.layers.dense(flatten, units=1024, name='dense_1')
    keep_prob = tf.placeholder(tf.float32)
    dropped = tf.nn.dropout(dense_1, keep_prob)

with tf.name_scope("layer4_dense"):
    dense_2 = tf.layers.dense(dropped, units=10, name='out_label')
    tf.summary.histogram('dense_2', dense_2)

# 损失函数
with tf.name_scope("loss"):
    loss = tf.losses.softmax_cross_entropy(y_, dense_2, weights=1.0)
    tf.summary.scalar('loss', loss)

# 训练
with tf.name_scope("train"):
    train_step = tf.train.AdamOptimizer(learning_rate=learing_rate).minimize(loss)

# 准确率
with tf.name_scope("accuracy"):
    correct_pred = tf.equal(tf.argmax(dense_2, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
    tf.summary.scalar('accuracy', accuracy)

# 合并summary
merged = tf.summary.merge_all()

def loader(saver, session, load_dir):
    def extract_step(path):
        file_name = os.path.basename(path)
        return int(file_name.split('-')[-1])

    if tf.gfile.Exists(load_dir):
        ckpt = tf.train.get_checkpoint_state(load_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(session, ckpt.model_checkpoint_path)
            prev_step = extract_step(ckpt.model_checkpoint_path)
        else:
            tf.gfile.DeleteRecursively(load_dir)
            tf.gfile.MakeDirs(load_dir)
            prev_step = 0
    else:
        tf.gfile.MakeDirs(load_dir)
        prev_step = 0
    return prev_step



with tf.Session() as sess:
    # 创建tensorboard文件
    train_writer = tf.summary.FileWriter(log_dir + '/train', sess.graph)
    test_writer = tf.summary.FileWriter(log_dir + '/test')
    # 创建存储文件
    saver = tf.train.Saver(max_to_keep=2)
    last_step = loader(saver, sess, save_dir)
    # 变量初始化
    init = tf.global_variables_initializer()
    sess.run(init)
    for i in range(last_step, 20000):
        batch_xs, batch_ys = mnist.train.next_batch(50)
        summary, _ = sess.run([merged, train_step], feed_dict={x: batch_xs, y_:batch_ys, keep_prob: 0.5})
        train_writer.add_summary(summary, i)
        if i % 10 == 0:
            test_batch_xs, test_batch_ys = mnist.test.images, mnist.test.labels
            summary, acc = sess.run([merged, accuracy], feed_dict={x: test_batch_xs, y_: test_batch_ys, keep_prob: 1.0})
            test_writer.add_summary(summary, i)
            print('Accuracy at step %s: %s' % (i, acc))
        if i % 1000 == 0:
            saver.save(sess, save_dir + "/model.ckpt", i)
            print("Save: {}".format(i))
    train_writer.close()
    test_writer.close()
