import argparse
import sys
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import numpy as np
FLAGS = None
import time

def main(_):
    # Import data
    mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True,
                                      validation_size=10000)
    n_hidden1=256
    n_hidden2=10
    # Create the model
    x = tf.placeholder(tf.float32, [None, 784])
    stddev=2/np.sqrt(784)
    W_1 = tf.Variable(tf.truncated_normal([784, n_hidden1], stddev=0.071))
    b_1 = tf.Variable(tf.truncated_normal([n_hidden1]))
    z_1 = tf.matmul(x, W_1) + b_1
    a_1 = tf.nn.relu(z_1)

    W_2 = tf.Variable(tf.truncated_normal([n_hidden1, n_hidden2], stddev=0.071))
    b_2 = tf.Variable(tf.truncated_normal([n_hidden2]))
    z_2 = tf.matmul(a_1, W_2) + b_2
    # a_2 = tf.nn.relu(z_2)

    # W_3 =  tf.Variable(tf.truncated_normal([n_hidden2, 10], stddev=0.2))
    # b_3 = tf.Variable(tf.random_normal([10]))
    # z_3 = tf.matmul(a_2, W_3) + b_3
    # a_3 = tf.nn.relu(z_3)

    # Define loss and optimizer
    y_ = tf.placeholder(tf.float32, [None, 10])
    regularizer = tf.contrib.layers.l2_regularizer(scale=3.0 / 10000)
    tf.add_to_collection(tf.GraphKeys.WEIGHTS, W_1)
    tf.add_to_collection(tf.GraphKeys.WEIGHTS, W_2)
    # tf.add_to_collection(tf.GraphKeys.WEIGHTS, W_3)
    #
    reg_term = tf.contrib.layers.apply_regularization(regularizer)

    loss = (tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=z_2))+reg_term )

    train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)

    sess = tf.Session()
    init_op = tf.global_variables_initializer()
    sess.run(init_op)

    correct_prediction = tf.equal(tf.argmax(z_2, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    # scalar_accuracy = tf.summary.scalar('accuracy', accuracy)
    train_writer = tf.summary.FileWriter(
        'MNIST/logs/tf16_reg/train', sess.graph)
    # validation_writer = tf.summary.FileWriter(
    #     'MNIST/logs/tf16_reg/validation')

    # Train
    best = 0
    for epoch in range(16):
        # start = time.clock()
        for _ in range(2000):
            batch_xs, batch_ys = mnist.train.next_batch(100)
            sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
        # Test trained model
        # end = time.clock()
        # print('Running1 time: %s Seconds' % (end - start))
        # start = time.clock()
        accuracy_currut_train = sess.run(accuracy,
                                         feed_dict={x: mnist.train.images,
                                                    y_: mnist.train.labels})

        accuracy_currut_validation = sess.run(
            accuracy,
            feed_dict={x: mnist.validation.images,
                       y_: mnist.validation.labels})

        print("Epoch %s: train: %s validation: %s"
              % (epoch, accuracy_currut_train, accuracy_currut_validation))
        best = (best, accuracy_currut_validation)[
            best <= accuracy_currut_validation]
        # end = time.clock()
        # print('Running2 time: %s Seconds' % (end - start))
    # Test trained model
    print("train best: %s" % best)
    # Test trained model
    correct_prediction = tf.equal(tf.argmax(z_2, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    print("test accuarcy is %f",sess.run(accuracy, feed_dict={x: mnist.test.images,
                                        y_: mnist.test.labels}))
    train_writer.close()
    # validation_writer.close()


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_dir', type=str, default='../MNIST/',
                        help='Directory for storing input data')
    FLAGS, unparsed = parser.parse_known_args()
    tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)