# 这是一个示例 Python 脚本。

# 按 Shift+F10 执行或将其替换为您的代码。
# 按 双击 Shift 在所有地方搜索类、文件、工具窗口、操作和设置。


# -*- coding:utf-8 -*-
# !/usr/bin/env python
import tensorflow.compat.v1 as tf  # tensorflowv1和v2很多不兼容，并且不能够支持运行时计算
import os
import input_data

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'


# 定义一个函数，用于初始化所有的权值 W
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)


# 定义一个函数，用于初始化所有的偏置项 b
def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)


# 定义一个函数，用于构建卷积层
def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')


# 定义一个函数，用于构建池化层
def max_pool(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')


def bp_neural():
    tf.disable_eager_execution()
    mnist_data = input_data.read_data_sets("./MNIST_data", one_hot=True)
    train_data = mnist_data.train
    validation_data = mnist_data.validation
    test_data = mnist_data.test

    i1 = 10
    i2 = 10
    input_n = 784  # 28*28,将图片每个像素进行输入
    middle_input_n = 7 * 7 * i2
    output_n = 10
    hidden_n = 512


    # 输入，输出占位
    x_ = tf.placeholder(tf.float32, shape=[None, input_n])
    y_ = tf.placeholder(tf.float32, shape=[None, output_n])

    x_image = tf.reshape(x_, [-1, 28, 28, 1])  # 转换输入数据shape,以便于用于网络中
    W_conv1 = weight_variable([5, 5, 1, i1])
    b_conv1 = bias_variable([i1])
    h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)  # 第一个卷积层
    h_pool1 = max_pool(h_conv1)  # 池化，变成14*14了

    W_conv2 = weight_variable([5, 5, i1, i2])
    b_conv2 = bias_variable([i2])
    h_conv1 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)  # 第二个卷积层
    h_pool2 = max_pool(h_conv1)  # 池化，变成7*7*64了

    mid_input = tf.reshape(h_pool2, [-1, 7 * 7 * i2])

    # 下方是已经设置好的普通神经网络，需要用卷积神经网络传入输出值到这里面
    # 权值矩阵和阈值
    w1 = tf.Variable(
        tf.random_normal([middle_input_n, hidden_n], stddev=0.1))  # stddev是正态分布的标准差,依照标准差函数随机选取若干值形成一个符合形状的张量作为权重
    b1 = tf.Variable(tf.random_normal([1, hidden_n], stddev=0.1))
    mid_opt = tf.nn.relu(tf.matmul(mid_input, w1) + b1)

    keep_prob = tf.placeholder("float")
    h_fc1_drop = tf.nn.dropout(mid_opt, keep_prob)  # dropout层

    W_fc2 = weight_variable([hidden_n, output_n])
    b_fc2 = bias_variable([output_n])
    final_opt = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)  # softmax层

    # w2 = tf.Variable(tf.random_normal([hidden_n, hidden_n], stddev=0.1))
    # b2 = tf.Variable(tf.random_normal([1, hidden_n], stddev=0.1))
    # y2 = tf.nn.sigmoid(tf.matmul(y1, w2) + b2)

    # w3 = tf.Variable(tf.random_normal([hidden_n, hidden_n], stddev=0.1))
    # b3 = tf.Variable(tf.random_normal([1, hidden_n], stddev=0.1))
    # y3 = tf.nn.sigmoid(tf.matmul(y2, w3) + b3)

    # wf = tf.Variable(tf.random_normal([hidden_n, output_n], stddev=0.1))
    # bf = tf.Variable(tf.random_normal([1, output_n], stddev=0.1))
    #
    # # 选择sigmoid函数
    #
    # # matrix_yi = tf.nn.sigmoid(tf.matmul(x_, w1) + b1)  # 每一层的计算
    # final_opt = tf.nn.relu(tf.matmul(y1, wf) + bf)  # 倒数第二层和最后一层计算和其他层不一样

    # keep_prob = tf.placeholder(tf.float32)
    # final_opt = tf.nn.dropout(full_opt, keep_prob)

    # relu函数
    # hidden_op = tf.nn.relu(tf.matmul(x, w1) + b1)
    # final_opt = tf.nn.relu(tf.matmul(hidden_op, w2) + b2)

    # 对输出层计算交叉熵损失（可以使用方差，本质上都是在计算熵）
    loss_fun = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(final_opt), reduction_indices=[1]))
    opt = tf.train.GradientDescentOptimizer(0.2).minimize(loss_fun)  # 学习率：过大会震荡，过小会导致收敛过慢
    # 计算准确率
    correct_pred = tf.equal(tf.argmax(final_opt, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
    # 初始化变量
    init = tf.global_variables_initializer()

    train_is_no = True
    with tf.Session() as session:
        session.run(init)
        if train_is_no:
            for i in range(1000):
                # 每次选择100个样本进行训练
                batch_xs, batch_ys = train_data.next_batch(500)
                # test_batch_xs, test_batch_ys = test_data.next_batch(100)
                opt.run({x_: batch_xs, y_: batch_ys, keep_prob: 0.5})
                if i % 50 == 0:
                    train_error = 1 - accuracy.eval({x_: train_data.images, y_: train_data.labels, keep_prob: 0.5})
                    test_error = 1 - accuracy.eval({x_: test_data.images, y_: test_data.labels, keep_prob: 0.5})
                    print("step %d, training error %g , test error %.4f" % (i, train_error, test_error))

            # 计算模型在测试集上的准确率
            print("test accuracy %.4f" % accuracy.eval({x_: test_data.images, y_: test_data.labels, keep_prob: 0.5}))


def main():
    bp_neural()


if __name__ == '__main__':
    main()

# 访问 https://www.jetbrains.com/help/pycharm/ 获取 PyCharm 帮助
