# coding=utf-8


import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import pylab

mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)

tf.reset_default_graph()

x = tf.placeholder(tf.float32, [None, 784]) # input data
y = tf.placeholder(tf.int32, [None])  # outputs
training = tf.placeholder(tf.bool)  # training flag


# 第一个卷积层（28——>14)
conv1 = tf.layers.conv1d(
    inputs=tf.reshape(x, [-1, 28, 28]),
    filters=32,
    kernel_size=5,
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))

pool1 = tf.layers.max_pooling1d(inputs=conv1, pool_size=2, strides=2)

# 第二个卷积层(14->7)
conv2 = tf.layers.conv1d(
    inputs=pool1,
    filters=64,
    kernel_size=5,
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
pool2 = tf.layers.max_pooling1d(inputs=conv2, pool_size=2, strides=2)

# 第三个卷积层(7->3)
conv3 = tf.layers.conv1d(
    inputs=pool2,
    filters=128,
    kernel_size=3,
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
pool3 = tf.layers.max_pooling1d(inputs=conv3, pool_size=2, strides=2)


dense1 = tf.layers.dense(inputs=tf.reshape(pool3, [-1, 3*128]),
                         units=1024,
                         activation=tf.nn.relu,
                         kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                         kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
dropout1 = tf.layers.dropout(dense1, training=training)
dense2 = tf.layers.dense(inputs=dropout1,
                         units=512,
                         activation=tf.nn.relu,
                         kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                         kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
dropout2 = tf.layers.dropout(dense2, training=training)
logits = tf.layers.dense(inputs=dropout2,
                         units=10,
                         activation=None,
                         kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                         kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
# 损失函数
loss = tf.losses.sparse_softmax_cross_entropy(labels=y, logits=logits)
train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
pred = tf.argmax(logits, 1)
correct_prediction = tf.equal(tf.cast(pred, tf.int32), y)
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

batch_size = 100

with tf.Session() as s:
    s.run(tf.global_variables_initializer())

    for epoch in range(50): # train 50 times
        total_batch = int(mnist.train.num_examples/batch_size)
        for i in range(total_batch):
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            # train with data set
            feed_dict = {x: batch_x,
                         y: batch_y,
                         training: True}
            _, loss_val, acc_val = s.run([train_op, loss, acc], feed_dict)
            # print('', loss_val, acc_val)
        # validate the result of training
        total_batch = int(mnist.validation.num_examples / batch_size)
        val_acc, val_loss = 0.0, 0.0
        for i in range(total_batch):
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            feed_dict = {x: batch_x,
                         y: batch_y,
                         training: False}
            loss_val, acc_val, pred_val = s.run([loss, acc, pred], feed_dict=feed_dict)
            val_acc += acc_val
            val_loss += loss_val
            # print( "------------")
            # print(batch_y)
            # print(pred_val)
        print("epoch:", epoch, "validation acc:", val_acc/total_batch, " loss", val_loss/total_batch)