import numpy as np
import tensorflow as tf

from tensorflow.examples.tutorials.mnist import input_data



if __name__ == '__main__':
    mninst = input_data.read_data_sets('mnist_data', one_hot=True)
    input_x = tf.placeholder(tf.float32, [None, 28 * 28]) / 255
    output_y = tf.placeholder(tf.int32, [None, 10])
    input_x_images = tf.reshape(input_x, [-1, 28, 28, 1])

    # Test
    tesx_x = mninst.test.images[:3000]
    test_y = mninst.test.labels[:3000]

    conv1 = tf.layers.conv2d(
        inputs=input_x_images,
        filters=32,
        kernel_size=[5, 5],
        strides=1,
        padding='same',
        activation=tf.nn.relu
    )  # [28,28,32]

    pool1 = tf.layers.max_pooling2d(
        inputs=conv1,
        pool_size=[2, 2],
        strides=2,
    )  # [14,14,32]

    conv2 = tf.layers.conv2d(
        inputs=pool1,
        filters=64,
        kernel_size=[5, 5],
        strides=1,
        padding='same',
        activation=tf.nn.relu
    )  # [14,14,64]

    pool2 = tf.layers.max_pooling2d(
        inputs=conv2,
        pool_size=[2, 2],
        strides=2,
    )  # [7,7,64]

    # flat
    flat = tf.reshape(pool2, [-1, 7 * 7 * 64])

    dense = tf.layers.dense(
        inputs=flat,
        units=1024,
        activation=tf.nn.relu
    )

    dropout = tf.layers.dropout(
        inputs=dense,
        rate=0.5
    )
    logits = tf.layers.dense(
        inputs=dropout,
        units=10
    )
    loss = tf.losses.softmax_cross_entropy(
        onehot_labels=output_y,
        logits=logits
    )
    train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
    accuracy = tf.metrics.accuracy(
        labels=tf.argmax(output_y, axis=1),
        predictions=tf.argmax(logits, axis=1)
    )

    with tf.Session() as sess:
        init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
        sess.run(init)

        for i in range(20000):
            batch = mninst.train.next_batch(50)
            train_loss, train_op = sess.run([loss, train_op], {input_x: batch[0], output_y: batch[1]})
            if i % 100 == 0:
                test_accutray = sess.run(accuracy, {input_x: tesx_x, output_y: test_y})
                print("Step = {0}, Train loss = {1}, [Test accuracy = {2}]".format(i, train_loss, test_accutray))
        test_output = sess.run(logits, {input_x: tesx_x[:20]})
        inferenced_y = np.argmax(test_output, 1)
        print(inferenced_y, 'Inferenced numbers')
        print(np.argmax(test_y[:20], 1), 'Real numbers')
