#!/usr/bin/python
# -*- coding: UTF-8 -*-
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import time

x = tf.placeholder('float', [None, 784])
y_ = tf.placeholder('float', [None, 10])

x_image = tf.reshape(x, [-1, 28, 28, 1])

# 第一个卷积层
# 初始化卷积核和偏置值
filter1 = tf.Variable(tf.truncated_normal([5, 5, 1, 6]))  # 卷积核是由5*5大小的卷积，输入为1个通道而输出为6个通道
bias1 = tf.Variable(tf.truncated_normal([6]))  # 生成的偏置值与卷积结果进行求和的计算
conv1 = tf.nn.conv2d(x_image, filter1, strides=[1, 1, 1, 1], padding='SAME')
h_conv1 = tf.nn.sigmoid(conv1 + bias1)  # 求得第一个卷积层输出结果

# maxPooling池化层，对于2*2大小的框进行最大特征取值
maxPool2 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

filter2 = tf.Variable(tf.truncated_normal([5, 5, 6, 16]))
bias2 = tf.Variable(tf.truncated_normal([16]))
conv2 = tf.nn.conv2d(maxPool2, filter2, strides=[1, 1, 1, 1], padding='SAME')
h_conv2 = tf.nn.sigmoid(conv2 + bias2)

maxPool3 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

# 第三层 卷积层，这里需要进行卷积计算后的大小为[10,10,16]，其后的池化层将特征进行再一次压缩
filter3 = tf.Variable(tf.truncated_normal([5, 5, 16, 120]))
bias3 = tf.Variable(tf.truncated_normal([120]))
conv3 = tf.nn.conv2d(maxPool3, filter3, strides=[1, 1, 1, 1], padding='SAME')
h_conv3 = tf.nn.sigmoid(conv3 + bias3)

# 后面2个全连接层，全连接层的作用在整个卷积神经网络中起到“分类器”的作用
# 即将学到的“分布式特征表示”映射 到样本标记空间的作用

# 权值参数
W_fc1 = tf.Variable(tf.truncated_normal([7 * 7 * 120, 80]))
# 偏置值
b_fc1 = tf.Variable(tf.truncated_normal([80]))
# 将卷积的输出展开
h_pool2_flat = tf.reshape(h_conv3, [-1, 7 * 7 * 120])
# 神经网络计算，并添加sigmoid激活函数
h_fc1 = tf.nn.sigmoid(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

# 输出层，使用softmax进行多分类
# 这里对池化后的数据进行重新展开，将二维数据重新展开成一维数组之后计算每一行的元素个数。最后一个输出层在使用了softmax进行概率的计算
W_fc2 = tf.Variable(tf.truncated_normal([80, 10]))
b_fc2 = tf.Variable(tf.truncated_normal([10]))
y_conv = tf.nn.softmax(tf.matmul(h_fc1, W_fc2) + b_fc2)

# 最后是交叉熵作为损失函数，使用梯度下降来对模型进行训练
cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))
train_step = tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy)

sess = tf.InteractiveSession()

# 测试正确率
corrent_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(corrent_prediction, "float"))

# 所有变量进行初始化
sess.run(tf.initialize_all_variables())

# 获取mnist数据
mnist_data_set = input_data.read_data_sets('MNIST_data', one_hot=True)
# 进行训练
start_time = time.time()
for i in range(20000):
    # 取训练数据
    batch_xs, batch_ys = mnist_data_set.train.next_batch(200)

    # 每迭代100 个batch,对当前训练数据进行测试，输出当前预测准确率
    if i % 2 == 0:
        train_accuracy = accuracy.eval(feed_dict={x: batch_xs, y_: batch_ys})
        print("step %d, training accuracy %g" % (i, train_accuracy))
        # 计算间隔时间
        end_time = time.time()
        print('time:', (end_time - start_time))
        start_time = end_time

        # 训练数据
        train_step.run(feed_dict={x: batch_xs, y_: batch_ys})

# 关闭会话
sess.close()


