from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf


def main():
    #载入数据
    data_dir = './data/'
    mnist = input_data.read_data_sets(data_dir, one_hot=True)

    #数据容器
    x = tf.placeholder(tf.float32, [None, 784])
    y_ = tf.placeholder(tf.float32, [None, 10])

    #初始化w,b
    def weight_variable(shape):
        init = tf.truncated_normal(shape, stddev=0.1)
        return tf.Variable(init)

    def bias_variable(shape):
        init = tf.constant(0.1, shape=shape)
        return tf.Variable(init)

    def conv2d(x, W):
        return tf.nn.conv2d(
            x, W, strides=[1, 1, 1, 1], padding='SAME')

    def max_pool_2x2(x):
        return tf.nn.max_pool(
            x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
    
    x_image = tf.reshape(x, [-1, 28, 28, 1])

    #conv1: 5x5x1x32
    W1 = weight_variable([5, 5, 1, 32])
    b1 = bias_variable([32])
    #p1: 14x14
    h1 = tf.nn.relu(conv2d(x_image, W1) + b1)
    p1 = max_pool_2x2(h1)

    #conv2: 4x4x1x64
    #p2: 7x7
    W2 = weight_variable([4, 4, 32, 64])
    b2 = bias_variable([64])

    h2 = tf.nn.relu(conv2d(p1, W2) + b2)
    p2 = max_pool_2x2(h2)

    #fc1: 7x7 -> 1024
    p_flat = tf.reshape(p2, [-1, 7 * 7 * 64])

    W_fc1 = weight_variable([7 * 7 * 64, 1024])
    b_fc1 = bias_variable([1024])

    h_fc1 = tf.nn.relu(tf.matmul(p_flat, W_fc1) + b_fc1)
    #drop参数容器
    keep_prob = tf.placeholder(tf.float32)
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    #fc2: 1024 -> 10
    W_fc2 = weight_variable([1024, 10])
    b_fc2 = bias_variable([10])

    y = tf.matmul(h_fc1_drop, W_fc2) + b_fc2

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
    l2_loss = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)] )
    total_loss = cross_entropy + 5e-5 * l2_loss

    #lr参数容器
    learning_rate = tf.placeholder(tf.float32)
    train_step = tf.train.AdamOptimizer(learning_rate).minimize(total_loss)

    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))    

    sess = tf.Session()
    init_op = tf.global_variables_initializer()
    sess.run(init_op)

    for step in range(20000):
        batch_xs, batch_ys = mnist.train.next_batch(100)
        lr = 0.001
        _, loss, l2_loss_value, total_loss_value = sess.run(
                [train_step, cross_entropy, l2_loss, total_loss], 
                feed_dict={x: batch_xs, y_: batch_ys, learning_rate:lr, keep_prob:0.6})
        acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob:0.6})
        if (step+1)%10 == 0:
            print("%4d train steps: acc: %5.4f, cross_entropy: %.4f; " % ((step+1), acc, loss))
        if acc > 0.985:
            break
    print("----------final result----------")
    print("Accuracy after %d train steps: %.5f" % ((step+1), acc))    

if __name__ == '__main__':
    main()