import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

batch_size = 100

layer0_size = 28 * 28
layer1_size = 500
layer2_size = 500
layer3_size = 10

layer1 = {
    'weight': tf.Variable(tf.random_normal([layer0_size, layer1_size])),
    'bias': tf.Variable(tf.random_normal([1, layer1_size]))
}

layer2 = {
    'weight': tf.Variable(tf.random_normal([layer1_size, layer2_size])),
    'bias': tf.Variable(tf.random_normal([1, layer2_size]))
}

layer3 = {
    'weight': tf.Variable(tf.random_normal([layer2_size, layer3_size])),
    'bias': tf.Variable(tf.random_normal([1, layer3_size]))
}

x = tf.placeholder('float', [None, 28*28])
y = tf.placeholder('float')

mnist = input_data.read_data_sets('/tmp/data/', one_hot=True)


def predict(xx):
    activation = xx
    activation = tf.nn.relu(activation @ layer1['weight'] + layer1['bias'])
    activation = tf.nn.relu(activation @ layer2['weight'] + layer2['bias'])
    activation = activation @ layer3['weight'] + layer3['bias']
    return activation


def train(xx):
    activation = predict(xx)
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=activation))
    optimizer = tf.train.AdamOptimizer().minimize(cost)
    epochs_count = 40

    with tf.Session() as session:
        session.run(tf.initialize_all_variables())

        for epoch in range(epochs_count):
            epoch_loss = 0
            for _ in range(int(mnist.train.num_examples/batch_size)):
                xxx, yyy = mnist.train.next_batch(batch_size)
                _, c = session.run([optimizer, cost], feed_dict={x: xxx, y: yyy})
                epoch_loss += c
            print('Epoch', epoch, 'completed out of', epochs_count, 'loss:', epoch_loss)

        correct = tf.equal(tf.argmax(activation, 1), tf.argmax(y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
        print('Accuracy', accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))


train(x)
