import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.contrib import slim
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
# 导入数据
mnist = input_data.read_data_sets("./", one_hot=True)


def main():
    """
        实现卷积神经网络
    :return: None
    """
    # 建立两个占位符
    batch_size = 128
    train_step = 1000
    value_num = 784
    label_num = 10
    keep_prob = tf.placeholder(dtype=tf.float32, name="keep_prob")
    X = tf.placeholder(dtype=tf.float32, shape=[None, value_num], name="x")
    Y = tf.placeholder(dtype=tf.float32, shape=[None, label_num], name="y")
    x_image = tf.reshape(X, [-1, 28, 28, 1])
    print(X, Y, x_image)
    # 构建卷积神经网络
    # 1、卷积层
    with tf.name_scope("conv1"):
        conv1 = slim.conv2d(x_image, num_outputs=6, kernel_size=[5, 5],
                    padding="VALID", activation_fn=tf.nn.relu)
    # 2、池化层
    with tf.name_scope("pool1"):
        pool1 = slim.max_pool2d(conv1, kernel_size=[2, 2],
                                stride=[2, 2], padding="VALID", )
    # 3、卷积层
    with tf.name_scope("conv2"):
        conv2 = slim.conv2d(pool1, num_outputs=16, kernel_size=[5, 5],
                    padding="VALID", activation_fn=tf.nn.relu)
    # 4、池化层
    with tf.name_scope("pool2"):
        pool2 = slim.max_pool2d(conv2, kernel_size=[2, 2],
                                stride=[2, 2], padding="VALID")
    # 5、全连接层(120个神经元)
    with tf.name_scope("fc1"):
        flat = slim.flatten(pool2)
        fc1 = slim.fully_connected(flat, num_outputs=120, activation_fn=tf.nn.relu)
    # 6、全连接层(84个神经元)
    with tf.name_scope("fc2"):
        fc2 = slim.fully_connected(fc1, num_outputs=84, activation_fn=tf.nn.relu)
    # 7、添加一个dropout=0.6，再进行输出
    with tf.name_scope("dropout"):
        dropout = slim.dropout(fc2, keep_prob=keep_prob)
    # 8、全连接层
    with tf.name_scope("fc3"):
        logits = slim.fully_connected(dropout, num_outputs=10, activation_fn=None)
    # 计算loss和优化
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
    # 使用SAG优化器
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.3)
    # 使用Adam优化器
    # optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
    train_op = optimizer.minimize(loss=loss)
    # 预测准确率
    acc_pre = tf.equal(tf.arg_max(Y, 1), tf.arg_max(logits, 1))
    accuracy = tf.reduce_mean(tf.cast(acc_pre, tf.float32))
    # 保存模型
    saver = tf.train.Saver()
    # 初始化
    init_op = tf.global_variables_initializer()
    # 开启会话
    with tf.Session() as sess:
        sess.run(init_op)
        validation_set = {X: mnist.validation.images, Y: mnist.validation.labels, keep_prob: 1.0}
        test_set = {X: mnist.test.images, Y: mnist.test.labels, keep_prob: 1.0}
        for i in range(train_step):
            x_train, y_train = mnist.train.next_batch(batch_size)
            _, losses = sess.run([train_op, loss],
                               feed_dict={X: x_train, Y: y_train, keep_prob: 0.6})
            if i > 0 and i % 100 == 0:
                validation_acc = sess.run(accuracy, feed_dict=validation_set)
                print("经过{}次迭代,loss={},accuracy={}".format(i, losses, validation_acc))
                saver.save(sess=sess, save_path="./model.ckpt", global_step=i)
        print("finished!!!")
        # 测试集上准确率
        test_acc = sess.run(accuracy, feed_dict=test_set)
        print("测试集上的accuracy={}".format(test_acc))
    return None


if __name__ == "__main__":
    main()
