"""
通过mnist数据集手动实现卷积神经网络
mnist数据集特点: 图片的特征信息: 28 * 28 * 1, 目标值: one-hot编码[[0,0,0,1,...],[],]

1. 第一层卷积Filter: 32个过滤器, size 5 * 5, step: 1, padding:'SAME',扫描前后的宽度不变

2. 卷积过后的池化操作: size: 2 * 2, step:2, padding:'SAME', 扫描前后的宽度减半

3. 第二层卷积Filter: 64个过滤器, size 5 * 5, step: 1, padding:'SAME',扫描前后的宽度不变

4. 卷积过后的池化操作: size: 2 * 2, step:2, padding:'SAME', 扫描前后的宽度减半

5. 全连接层, 输出10个类型的概率, 获取优化损失训练op

6. 在会话迭代的运行op, 优化损失

"""
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data



def weight_variable(shape):
    return tf.Variable(tf.random_normal(dtype=tf.float32,shape=shape))

def bias_variable(shape):
    return tf.Variable(tf.constant(0.0, shape=shape))

def model():
    """
    通过卷积神经网络对mnist数据集完成深度学习的识别
    :return: 
    """
    # 准备训练数据
    with tf.variable_scope('data'):
        x_data = tf.placeholder(tf.float32, shape=[None, 784])
        y_true = tf.placeholder(tf.int32, shape=[None, 10])
    # 卷积层1中作的卷积,relu激活函数 和池化操作
    with tf.variable_scope('conv1'):
        # 获取32个filter: 5 * 5 * 1 * 32
        w_conv1 = weight_variable(shape=[5, 5, 1, 32])
        b_con1 = bias_variable(shape=[32])

        # 卷积需要的数据形状是4D [batch, height, width, channels]
        # 需要修改x_data的形状
        x_reshape = tf.reshape(x_data, shape=[-1, 28, 28, 1])
        # 先卷积然后在经过relu激活函数 卷积之后: [-1, 28, 28, 32]
        x_relu1 = tf.nn.relu(tf.nn.conv2d(input=x_reshape, filter=w_conv1, strides=[1,1,1,1], padding="SAME"))
        # 池化操作, 最大池化 经过池化之后的数据形状: [-1, 14,14,32]
        x_pool1 = tf.nn.max_pool(value=x_relu1, ksize=[1,2,2,1], strides=[1,2,2,1], padding="SAME")

    # 将第一层卷积得到的输出值, 当做第二层卷积的输入值, 经过relu激活函数和池化操作
    # 输入的数据的形状: [-1, 14, 14, 32]
    with tf.variable_scope('conv2'):
        w_conv2 = weight_variable(shape=[5, 5, 32, 64])
        b_con2 = bias_variable(shape=[64])
        # 输入: [-1, 14, 14, 32] 输出: [-1, 14, 14, 64]
        x_relu2 = tf.nn.relu(tf.nn.conv2d(input=x_pool1, filter=w_conv2, strides=[1,1,1,1], padding="SAME"))
        # 输入: [-1, 14, 14, 64], 输出: [-1, 7, 7, 64]
        x_pool2 = tf.nn.max_pool(value=x_relu2, ksize=[1,2,2,1], strides=[1,2,2,1], padding="SAME")

    # 全连接层的处理
    # 输出值:  [-1,10] ,输入值: [-1, 7,7,64]
    # 全连接层的计算: 输入值 * 权重值 = 输出值
    # 修改输入的修改成二维的 [-1, 7 * 7 * 64]
    # 权重值的形状:  [-1, 7*7*64] * [7*7*64, 10] = [-1, 10]
    with tf.variable_scope('fc'):
        # 修改输入值的形状
        x_fc = tf.reshape(x_pool2, shape=[-1,7*7*64])
        # 创建一个 [7*7*64, 10] 权重
        w_fc = weight_variable(shape=[7*7*64, 10])
        # 偏置
        bias_fc = bias_variable(shape=[10])
        # 矩阵相乘得到预测的结果
        y_predict = tf.matmul(x_fc, w_fc) + bias_fc

    return x_data, y_true, y_predict



def compute(y_true, y_predict):
    """
    通过真实值和预测值进行损失计算
    :param y_true: 真实值
    :param y_predict: 预测值
    :return: 返回平均损失
    """
    with tf.variable_scope('compute_loss'):
        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=y_predict))
    return loss

def sgd(loss, y_true, y_predict):
    """
    通过梯度下降优化损失
    :param loss: 损失的op
    :param y_true: 真实值
    :param y_predict: 预测值
    :return: 返回梯度下降优化的对象
    """
    train_op = tf.train.GradientDescentOptimizer(learning_rate=0.0001).minimize(loss)
    equal_list = tf.equal(tf.argmax(y_true, 1), tf.argmax(y_predict, 1))
    # 计算准确性
    accuracy = tf.reduce_mean(tf.cast(equal_list, tf.float32))
    return train_op, accuracy




def main():
    # 获取数据
    mnist = input_data.read_data_sets('./data/mnist/', one_hot=True)
    x_data, y_true, y_predict = model()
    # 计算损失
    loss = compute(y_true, y_predict)
    # 梯度下降优化损失
    train_op, accuracy = sgd(loss,y_true,y_predict)

    # 定义全局初始化的op
    init_op = tf.global_variables_initializer()
    # 开启会话开始训练

    with tf.Session() as sess:
        sess.run(init_op)
        # 循环优化
        for i in range(2000):
            # 获取批量的训练数据
            x_train, y_train = mnist.train.next_batch(100)
            # 每训练一百次完成一次准确率的打印
            if i % 100 == 0:
                print("训练的准确率: ", sess.run(accuracy, feed_dict={x_data:x_train, y_true:y_train}))
            # 执行训练的op对象
            sess.run(train_op, feed_dict={x_data:x_train, y_true:y_train})

if __name__ == '__main__':
    # 通过卷积神经网络在实现图片识别的时候需要讲学习率设置的小一些,
    # conv_mnist()
    main()

