# -*- coding:utf-8 -*-

# @Time    : 2018/11/19 3:58 PM

# @Author  : Swing


from tensorflow.examples.tutorials.mnist import input_data

import tensorflow as tf

from tensorflow import layers

data_dir = '/Users/zhubin/Documents/ai/data/mnist/'
mnist = input_data.read_data_sets(data_dir, one_hot=True)

x = tf.placeholder(tf.float32, [None, 784], name='x')
y_ = tf.placeholder(tf.float32, [None, 10])
learning_rate = tf.placeholder(tf.float32)

with tf.name_scope('reshape'):
    x_image = tf.reshape(x, [-1, 28, 28, 1])

with tf.name_scope('conv1'):
    h_conv1 = layers.conv2d(x_image, 32, [5, 5], padding='SAME', activation=tf.nn.relu)

with tf.name_scope('pool1'):
    h_pool1 = layers.max_pooling2d(h_conv1, [2, 2], strides=2)

with tf.name_scope('conv2'):
    h_conv2 = layers.conv2d(h_pool1, 64, [5, 5], padding='SAME', activation=tf.nn.relu)

with tf.name_scope('pool2'):
    h_pool2 = layers.max_pooling2d(h_conv2, [2, 2], strides=2)

with tf.name_scope('fc1'):
    h_pool2_flat = layers.flatten(h_pool2)
    h_fc1 = layers.dense(h_pool2_flat, 1024, activation=tf.nn.relu)

with tf.name_scope('dropout'):
    keep_prob = tf.placeholder(tf.float32)
    h_fc1_drop = layers.dropout(h_fc1, keep_prob)

with tf.name_scope('fc2'):
    y = layers.dense(h_fc1_drop, 10, activation=None)

cross_entropy = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))

l2_loss = tf.add_n([tf.nn.l2_loss(w) for w in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)])
total_loss = cross_entropy + 7e-5 * l2_loss
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)

sess = tf.Session()
init_op = tf.global_variables_initializer()
sess.run(init_op)
# Train
for step in range(3000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    lr = 0.01
    _, loss, l2_loss_value, total_loss_value = sess.run(
        [train_step, cross_entropy, l2_loss, total_loss],
        feed_dict={x: batch_xs, y_: batch_ys, learning_rate: lr, keep_prob: 0.5})

    if (step + 1) % 100 == 0:
        print('step %d, entropy loss: %f, l2_loss: %f, total loss: %f' %
              (step + 1, loss, l2_loss_value, total_loss_value))
        # Test trained model
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        print(sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.5}))
    if (step + 1) % 1000 == 0:
        print(sess.run(accuracy, feed_dict={x: mnist.test.images,
                                            y_: mnist.test.labels, keep_prob: 0.5}))
