# -*- coding:utf-8 -*-

# @Time    : 18-11-19 下午10:53

# @Author  : Swing


from tensorflow.examples.tutorials.mnist import input_data

import tensorflow as tf

import keras

data_dir = 'data/mnist/'
# data_dir = '/home/swing/Documents/data/mnist/'
# data_dir = '/Users/zhubin/Documents/ai/data/mnist/'
mnist = input_data.read_data_sets(data_dir, one_hot=True)

is_training = tf.placeholder(tf.bool, name='MODE')

truncated_normal_initializer = tf.keras.initializers.truncated_normal(stddev=0.1)
constant_initializer = tf.keras.initializers.constant(0.1)

x = tf.placeholder(tf.float32, [None, 784], name='x')
yt = tf.placeholder(tf.float32, [None, 10], name='yt')

# learning_rate = tf.placeholder(tf.float32, name='lr')
learning_rate = tf.Variable(0.01, name='lr')

with tf.name_scope('reshape'):
    x_image = tf.reshape(x, [-1, 28, 28, 1], name='x_image')

# 第一卷积层
net = tf.keras.layers.Conv2D(128, [5, 5], strides=[1, 1],
                             # kernel_initializer=truncated_normal_initializer,
                             # bias_initializer=constant_initializer,
                             kernel_regularizer=keras.regularizers.l2('0.0005'),
                             # bias_regularizer=keras.regularizers.l2('0.0005'),
                             padding='SAME', input_shape=[28, 28, 1], name='conv1')(x_image)
net = tf.keras.layers.Activation(tf.nn.relu)(net)
net = tf.keras.layers.BatchNormalization(trainable=is_training)(net)

# 第一池化层
net = tf.keras.layers.MaxPool2D([2, 2], name='pool1', )(net)
# net = tf.keras.layers.BatchNormalization()(net)
# 第二卷积层
net = tf.keras.layers.Conv2D(256, [5, 5], strides=[1, 1],
                             # kernel_initializer=truncated_normal_initializer,
                             # bias_initializer=constant_initializer,
                             kernel_regularizer=keras.regularizers.l2('0.0005'),
                             # bias_regularizer=keras.regularizers.l2('0.0005'),
                             padding='SAME', name='conv2')(net)
net = tf.keras.layers.Activation(tf.nn.relu)(net)
net = tf.keras.layers.BatchNormalization(trainable=is_training)(net)

# 第二池化层
net = tf.keras.layers.MaxPool2D([2, 2], name='pool2')(net)
# net = tf.keras.layers.BatchNormalization()(net)

# 第三层卷积层
# net = tf.keras.layers.Conv2D(10, [5, 5], strides=[1, 1],
#                              kernel_initializer=truncated_normal_initializer,
#                              bias_initializer=constant_initializer,
#                              padding='SAME', name='conv3')(net)
# net = tf.keras.layers.BatchNormalization()(net)
# net = tf.keras.layers.Activation(tf.nn.relu)(net)
# #
# # # 第三层全局平均池化层
# net = tf.keras.layers.AveragePooling2D([7, 7], name='avg')(net)
# net = tf.keras.layers.Flatten()(net)
# net = tf.keras.layers.Activation(tf.nn.softmax)(net)

net = tf.keras.layers.Flatten()(net)
net = tf.keras.layers.Dense(100,
                            # kernel_initializer=tf.keras.initializers.truncated_normal(stddev=0.1),
                            # bias_initializer=tf.keras.initializers.constant(0.1),
                            kernel_regularizer=keras.regularizers.l2('0.0005'),
                            # bias_regularizer=keras.regularizers.l2('0.0005'),
                            activation='relu')(net)
net = tf.keras.layers.BatchNormalization(trainable=is_training)(net)

keep_prob = tf.constant(0.5, name='keep_prob')
net = tf.keras.layers.Dropout(keep_prob)(net)

net = tf.keras.layers.Dense(10,
                            # kernel_initializer=tf.keras.initializers.truncated_normal(stddev=0.1),
                            # bias_initializer=tf.keras.initializers.constant(0.1),
                            kernel_regularizer=keras.regularizers.l2('0.0005'),
                            # bias_regularizer=keras.regularizers.l2('0.0005'),
                            activation='softmax')(net)

cross_entropy = tf.reduce_mean(
    tf.keras.losses.categorical_crossentropy(yt, net)
)

# l2_loss = tf.add_n([tf.nn.l2_loss(w) for w in tconv3_actf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)])
# total_loss = cross_entropy + 7e-5 * l2_loss

# train_step = tf.train.AdadeltaOptimizer(learning_rate).minimize(cross_entropy)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
    # train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
    train_step = tf.train.AdamOptimizer(0.01).minimize(cross_entropy)
init_op = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init_op)

batch_size = 100
n_batch = mnist.train.num_examples // batch_size

# 训练
for epoch in range(100):
    for batch in range(n_batch):
        # print(batch)
        batch_xs, batch_ys = mnist.train.next_batch(100)
        sess.run(train_step,
                 feed_dict={x: batch_xs,
                            yt: batch_ys,
                            is_training: True
                            })
    # 测试准确率
    correct_prediction = tf.equal(tf.argmax(net, 1), tf.argmax(yt, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    accuracy, learning_rate_value, cross_entropy_value = sess.run([accuracy,
                                                                   learning_rate,
                                                                   # l2_loss,
                                                                   cross_entropy,
                                                                   # total_loss
                                                                   ],
                                                                  feed_dict={
                                                                      x: mnist.test.images,
                                                                      yt: mnist.test.labels,
                                                                      is_training: False
                                                                  })

    print('epoch: ', epoch + 1,
          'lr: ', learning_rate_value,
          'accuracy: ', accuracy,
          'cross entropy: ', cross_entropy_value,
          )

    learning_rate = learning_rate * 0.9
