# -*- coding:utf-8 -*-

# @Time    : 2018/11/22 4:00 PM

# @Author  : Swing


import tensorflow.contrib.slim as slim
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

data_dir = 'data/mnist/'
# data_dir = '/home/swing/Documents/data/mnist/'
# data_dir = '/Users/zhubin/Documents/ai/data/mnist/'
mnist = input_data.read_data_sets(data_dir, one_hot=True)

is_training = tf.placeholder(tf.bool, name='MODE')
decay = tf.constant(0.9, name='decay')

x = tf.placeholder(tf.float32, [None, 784], name='x')
y_ = tf.placeholder(tf.float32, [None, 10])

with tf.name_scope('reshape'):
    x_image = tf.reshape(x, shape=[-1, 28, 28, 1], name='x_image')

learning_rate = tf.constant(0.9, name='lr')

bn_params = dict(is_training=is_training, decay=decay)

loss = 0.0

with slim.arg_scope([slim.conv2d, slim.fully_connected],
                    normalizer_fn=slim.batch_norm,
                    normalizer_params=bn_params,
                    # weights_regularizer=slim.l2_regularizer(0.0005),
                    weights_initializer=slim.xavier_initializer()
                    ):
    net = slim.conv2d(x_image, 128, [5, 5], scope='conv1')
    net = slim.max_pool2d(net, [2, 2], scope='pool1')
    net = slim.conv2d(net, 256, [5, 5], scope='conv2')
    net = slim.max_pool2d(net, [2, 2], scope='pool2')
    net = slim.flatten(net, scope='flatten')
    net = slim.fully_connected(net, 100, scope='fc1')
    net = slim.dropout(net, is_training=is_training, scope="dropout")
    net = slim.fully_connected(net, 10, activation_fn=None, normalizer_fn=None, scope='fc2')

cross_entropy = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(logits=net, labels=y_)
)
l2_loss = tf.add_n([tf.nn.l2_loss(w) for w in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)])
total_loss = cross_entropy + 0.00005 * l2_loss

update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
    # train_step = tf.train.AdamOptimizer(0.01).minimize(cross_entropy)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)

correct_prediction = tf.equal(tf.argmax(y_, 1), tf.argmax(net, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

batch_size = 100
n_batch = mnist.train.num_examples // batch_size

init_op = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init_op)

# 训练
for epoch in range(100):
    for batch in range(n_batch):
        # print(batch)
        batch_xs, batch_ys = mnist.train.next_batch(100)
        sess.run(train_step,
                 feed_dict={x: batch_xs,
                            y_: batch_ys,
                            is_training: True
                            })
    # 测试准确率

    accuracy_value, \
    learning_rate_value, \
    cross_entropy_value = sess.run([accuracy,
                                    learning_rate,
                                    cross_entropy],
                                   feed_dict={
                                       x: mnist.test.images,
                                       y_: mnist.test.labels,
                                       is_training: False
                                   })

    print('epoch: ', epoch + 1,
          'lr: ', learning_rate_value,
          'accuracy: ', accuracy_value,
          'cross entropy: ', cross_entropy_value
          )

    if loss != 0.0 and cross_entropy_value > loss:
        learning_rate = learning_rate * 0.1

    loss = cross_entropy_value
