# -*- coding:utf-8 -*-

# @Time    : 2018/11/21 4:35 PM

# @Author  : Swing

from tensorflow.examples.tutorials.mnist import input_data

import tensorflow as tf

import tensorflow.contrib.slim as slim

data_dir = 'data/mnist/'
# data_dir = '/home/swing/Documents/data/mnist/'
# data_dir = '/Users/zhubin/Documents/ai/data/mnist/'
mnist = input_data.read_data_sets(data_dir, one_hot=True)

x = tf.placeholder(tf.float32, [None, 784], name='x')
y_ = tf.placeholder(tf.float32, [None, 10])

learning_rate = tf.placeholder(tf.float32)


def lenet_batchnorm(input, is_training):
    batch_norm_params = {"is_training": is_training, "decay": 0.9}
    #, "updates_collections": None
    with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_regularizer=slim.l2_regularizer(0.0005),
                        normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params):
        net = slim.conv2d(input, 128, [5, 5], scope="conv1")
        net = slim.max_pool2d(net, [2, 2], scope="pool1")
        net = slim.conv2d(net, 265, [5, 5], scope="conv2")
        net = slim.max_pool2d(net, [2, 2], scope="pool2")
        net = slim.flatten(net, scope="flatten")
        net = slim.fully_connected(net, 100, scope="fc3")
        net = slim.dropout(net, is_training=is_training, scope="dropout")
        net = slim.fully_connected(net, 10, activation_fn=None, normalizer_fn=None, scope="prob")
        return net


def train():
    is_training = tf.placeholder(tf.bool, name='MODE')
    x = tf.placeholder(tf.float32, shape=[None, 784])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    image = tf.reshape(x, [-1, 28, 28, 1])
    with tf.name_scope("image"):
        tf.summary.image("image", image)
    y = lenet_batchnorm(image, is_training)
    # y = lenet(image, is_training)
    loss = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)  # slim.losses.softmax_cross_entropy(y,y_)
    global_step = slim.get_or_create_global_step()  # tf.Variable(0)
    learning_rate = tf.train.exponential_decay(1e-4, global_step * 100, 50000, 0.95, staircase=True)
    # train_op=tf.train.AdamOptimizer(learning_rate).minimize(loss,global_step=global_step)
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        train_op = tf.train.AdamOptimizer(0.01).minimize(loss, global_step=global_step)
    correct_prediction = tf.equal(tf.argmax(y, axis=1), tf.argmax(y_, axis=1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    tf.summary.scalar("acc", accuracy)
    merged_summary = tf.summary.merge_all()
    saver = tf.train.Saver()

    batch_size = 100
    n_batch = mnist.train.num_examples // batch_size

    init_op = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init_op)

    # 训练
    for epoch in range(101):
        for batch in range(n_batch):
            # print(batch)
            batch_xs, batch_ys = mnist.train.next_batch(100)
            sess.run(train_op,
                     feed_dict={x: batch_xs,
                                y_: batch_ys,
                                is_training: True
                                })
        # 测试准确率

        accuracy = sess.run([accuracy], feed_dict={
            x: mnist.test.images,
            y_: mnist.test.labels,
            is_training: False
        })

        print('epoch: ', epoch + 1,
              # 'lr: ', learning_rate_value,
              'accuracy: ', accuracy,
              # 'cross entropy: ', cross_entropy_value,
              )

    # with tf.Session()as sess:
    #     writer = tf.summary.FileWriter("log", sess.graph)
    #     sess.run(tf.global_variables_initializer())
    #     for i in range(10000):
    #         batch = mnist.train.next_batch(100)
    #         if i % 100 == 0:
    #             summary, train_acc = sess.run([merged_summary, accuracy],
    #                                           feed_dict={x: batch[0], y_: batch[1], is_training: False})
    #             writer.add_summary(summary, i)
    #             print(str(i) + ":" + str(train_acc))
    #         sess.run(train_op, feed_dict={x: batch[0], y_: batch[1], is_training: True})
    #     # eval_acc=sess.run(accuracy,feed_dict={x:mnist.test.images,y_:mnist.test.labels,is_training:False})
    #     acc = 0
    #     for batch in range(10000 / test_batch_size):
    #         batch = mnist.test.next_batch(test_batch_size)
    #         acc += sess.run(accuracy, feed_dict={x: batch[0], y_: batch[1], is_training: False})
    #     eval_acc = acc / (10000 / test_batch_size)
    #     print("test acc" + str(eval_acc))
    #     saver.save(sess, "models/lenet.ckpt", global_step=global_step)


if __name__ == "__main__":
    train()
