# -*- coding: UTF-8 -*-
import numpy as np 
import tensorflow as tf 
old_v = tf.logging.get_verbosity()
tf.logging.set_verbosity(tf.logging.ERROR)
from tensorflow.examples.tutorials.mnist import input_data

#download data set (55000 * 28 * 28)
mnist = input_data.read_data_sets('mnist_data', one_hot=True)

#one_hot
#0: 1000000000
#1: 0100000000
#2: 0010000000
#...


# None, first dimension can be any length
input_x = tf.placeholder(tf.float32, [None, 28 * 28]) / 255
output_y = tf.placeholder(tf.int32, [None, 10])
input_x_images = tf.reshape(input_x, [-1, 28, 28, 1]) 

# select 3000 hand write figure from test dataset
test_x = mnist.test.images[:3000] # pic
test_y = mnist.test.labels[:3000]
print test_y


#build conv network nuel
#first layer
conv1 = tf.layers.conv2d(
    inputs=input_x_images,  # shape [28, 28, 1]
    filters=32,             # 32 filters, also known as depth of outputs
    kernel_size=[5, 5],      # filter size
    strides=1,              # step
    padding='same',          # 
    activation=tf.nn.relu
)
#first pool
pool1 = tf.layers.max_pooling2d(
    inputs=conv1,       #shape [28, 28, 32]
    pool_size=[2, 2],   #filter size(2 * 2)
    strides=2
) # shape [14, 14, 32]

#second layer
conv2 = tf.layers.conv2d(
    inputs=pool1,           # shape [14, 14, 32]
    filters=64,             # 64 filters, also known as depth of outputs
    kernel_size=[5, 5],     # filter size
    strides=1,              # step
    padding='same',         # 
    activation=tf.nn.relu
) #shape [14, 14, 64]
#second pool
pool2 = tf.layers.max_pooling2d(
    inputs=conv2,       #shape [14, 14, 64]
    pool_size=[2, 2],   #filter size(2 * 2)
    strides=2
) # shape [7, 7, 64]
#flatten
flat = tf.reshape(pool2, [-1, 7*7*64])

#1024个神经元的全连接层
dense = tf.layers.dense(inputs=flat, units=1024, activation=tf.nn.relu)

#dropout rate=0.5
dropout = tf.layers.dropout(inputs=dense, rate=0.5)

#10个神经元的全连接层，这里不用激活函数来做非线性化
logits = tf.layers.dense(inputs=dropout, units=10) #输出 形状[1, 1, 10]

#计算误差(计算cross entropy 交叉熵) 在用softmax计算百分比概率
loss = tf.losses.softmax_cross_entropy(onehot_labels=output_y, logits=logits)

#用Adm优化器来最小化误差，学习率0.001 
train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)

#精度 计算预测值和实际标签的匹配程度
#返回 (accuracy, update_op), 会创建两个局部变量
accuracy = tf.metrics.accuracy(
    labels=tf.argmax(output_y, axis=1),
    predictions=tf.argmax(logits, axis=1)
)
#sess
sess = tf.Session()
init = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())
sess.run(init)



for i in range(2000):
    batch = mnist.train.next_batch(50) #从Train数据集里取下一个50个样本
    train_loss = sess.run(loss, {input_x: batch[0],
    output_y: batch[1]})
    train_op_ = sess.run(train_op, {input_x: batch[0],
    output_y: batch[1]})
    if i % 100 ==0:
        test_accuracy = sess.run(accuracy, {input_x: test_x, output_y: test_y})
        # print("Step=%d, Train loss=%.4f, [Test accuracy=%.2f]" % (i, train_loss, test_accuracy))
        print train_loss

# 测试： 打印20个预测值和真实值
test_output = sess.run(logits, {input_x: test_x[:20]})
inferenced_y = np.argmax(test_output, 1)
print(inferenced_y, 'Inferenced Numbers') #推测的数字
print(np.argmax(test_y[:20], 1), 'Real Number') # real numbers
        
