# coding: utf-8

"""A very simple MNIST classifier.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/beginners
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf

print(tf.__version__)


# print(dir(tf))

def calBestAccuracy(mnist, kernel_size=[5, 5], lr=0.01,
                    reg_param=7e-5):
    # Define loss and optimizer
    x = tf.placeholder(tf.float32, [None, 784])
    y_ = tf.placeholder(tf.float32, [None, 10])
    learning_rate = tf.placeholder(tf.float32)

    with tf.name_scope('reshape'):
        x_image = tf.reshape(x, [-1, 28, 28, 1])

    # First convolutional layer - maps one grayscale image to 32 feature maps.
    with tf.name_scope('conv1'):
        '''
        kernel_initializer: An initializer for the convolution kernel.
        bias_initializer: An initializer for the bias vector. If None, no bias will be applied.
        '''
        # TODO 1. 使用正太分布初始化卷积核
        h_conv1 = tf.layers.conv2d(x_image, 32, kernel_size,
                                   padding='SAME',
                                   activation=tf.nn.relu,
                                   kernel_initializer=tf.truncated_normal_initializer(),
                                   # bias_initializer=tf.Constant(0)
                                   )

    # Pooling layer - downsamples by 2X.
    with tf.name_scope('pool1'):
        h_pool1 = tf.layers.max_pooling2d(h_conv1, pool_size=[2, 2],
                                          strides=[2, 2], padding='VALID')

    # Second convolutional layer -- maps 32 feature maps to 64.
    with tf.name_scope('conv2'):
        h_conv2 = tf.layers.conv2d(h_pool1, 64, kernel_size,
                                   padding='SAME',
                                   activation=tf.nn.relu,
                                   # kernel_initializer=tf.TruncatedNormal(stddev=0.01),
                                   # bias_initializer=tf.Constant(0)
                                   )

    # Second pooling layer.
    with tf.name_scope('pool2'):
        h_pool2 = tf.layers.max_pooling2d(h_conv2, pool_size=[2, 2],
                                          strides=[2, 2], padding='VALID')

    # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
    # is down to 7x7x64 feature maps -- maps this to 1024 features.
    with tf.name_scope('fc1'):
        h_pool2_flat = tf.layers.flatten(h_pool2)
        h_fc1 = tf.layers.dense(h_pool2_flat, 1024, activation=tf.nn.relu)

    # Dropout - controls the complexity of the model, prevents co-adaptation of
    # features.
    with tf.name_scope('dropout'):
        keep_prob = tf.placeholder(tf.float32)
        h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    # Map the 1024 features to 10 classes, one for each digit
    with tf.name_scope('fc2'):
        y = tf.layers.dense(h_fc1_drop, 10, activation=None)

    # The raw formulation of cross-entropy,
    #
    #   tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
    #                                 reduction_indices=[1]))
    #
    # can be numerically unstable.
    #
    # So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
    # outputs of 'y', and then average across the batch.
    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))

    # TODO L2损失，正则化因子
    l2_loss = tf.add_n([tf.nn.l2_loss(w) for w in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)])
    total_loss = cross_entropy + reg_param * l2_loss
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)

    sess = tf.Session()
    init_op = tf.global_variables_initializer()
    sess.run(init_op)
    # Train
    for step in range(3000):
        batch_xs, batch_ys = mnist.train.next_batch(100)
        lr = lr
        _, loss, l2_loss_value, total_loss_value = sess.run(
            [train_step, cross_entropy, l2_loss, total_loss],
            feed_dict={x: batch_xs, y_: batch_ys, learning_rate: lr, keep_prob: 0.5})

        if (step + 1) % 100 == 0:
            print('step %d, entropy loss: %f, l2_loss: %f, total loss: %f' %
                  (step + 1, loss, l2_loss_value, total_loss_value))
            # Test trained model
            correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            print(sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.5}))
        if (step + 1) % 1000 == 0:
            print(sess.run(accuracy, feed_dict={x: mnist.test.images,
                                                y_: mnist.test.labels, keep_prob: 0.5}))


if __name__ == '__main__':
    # Import data
    data_dir = '/tmp/tensorflow/mnist/input_data'
    from tensorflow.examples.tutorials.mnist import input_data

    mnist = input_data.read_data_sets(data_dir, one_hot=True)

    from tinyenv.flags import flags

    # Call this first to load the parameters.
    FLAGS = flags()
    # Then you can use the parameters like such:
    # lr = FLAGS.learning_rate
    # kernel_size = FLAGS.kernel_size
    # reg_param = FLAGS.reg_param

    lr = 0.01
    kernel_size = FLAGS.kernel_size
    reg_param = FLAGS.reg_param

    print(lr, kernel_size, reg_param)

    calBestAccuracy(mnist, kernel_size=[kernel_size, kernel_size], reg_param=reg_param, lr=lr)
