"""
数字图谱预测
运用分类模型
"""

import tensorflow.compat.v1 as tf
import numpy as np
from tensorflow import keras

# 获取训练数据和测试数据
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
mnist_data = keras.datasets.mnist.load_data()

# 在Tensorflow 2.0 中，eager execution 是默认开启的。所以，需要先关闭eager execution
tf.compat.v1.disable_eager_execution()


def add_layer(inputs, in_size, out_size, activation_func=None):
    # 创建一个参数，值为一个由随机数构成的 行为 in_size，列为out_size 的矩阵
    Weights = tf.Variable(tf.random_normal([in_size, out_size]))
    # 创建一个 1*out_size shape的矩阵，然后 + 0.1
    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
    # 矩阵乘 input Weight 然后 + biases
    Wx_plus_b = tf.matmul(inputs, Weights) + biases

    # 如果不存在激活函数，输出线性结果
    if activation_func is None:
        outputs = Wx_plus_b
    else:
        # 否则，激活线性函数
        outputs = activation_func(Wx_plus_b)
    return outputs


def compute_accuracy(v_xs, v_ys):
    global prediction
    # 使用测试数据验证正确性
    y_pre = sess.run(prediction, feed_dict={xs: v_xs})
    correct_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_ys, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys})
    return result


# The images are stored in one-dimensional arrays of this length（784）. 图片被存贮在784长度的一维向量中
xs = tf.placeholder(tf.float32, [None, 784])  # None表示第一个维度有多少个元素都可以，784表示每张图象是一个长度为784的向量。
ys = tf.placeholder(tf.float32, [None, 10])  # 10 表示最后所有可能的分类是10个
# 输入xs , 784 输入矩阵的size , 10 输出矩阵的size 激励函数 softmax
prediction = add_layer(xs, 784, 10, activation_func=tf.nn.softmax)  # 隐藏层  softmax是用于处理分类数据的激励函数
# ys - predication 真实数据与预测值相减
# square 平方
# reduce_sum 累加 reduction_indices = axis 表示 纵/横坐标轴（维度）的意思
# reduce_mean 求平均
# cross_entropy 处理分类loss的算法
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction), reduction_indices=[1]))

# 使用梯度下降的优化器减少loss误差
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

# 通用流程
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)


# def get_Batch(data, label, batch_size):
#     print(data.shape, label.shape)
#     input_queue = tf.train.slice_input_producer([data, label], num_epochs=1, shuffle=True, capacity=32)
#     x_batch, y_batch = tf.train.batch(input_queue, batch_size=batch_size, num_threads=1, capacity=32,
#                                       allow_smaller_final_batch=False)
#     return x_batch, y_batch


for i in range(1000):
    # x_batch, y_batch = get_Batch(x_train, y_train, 100)
    sess.run(train_step, feed_dict={xs: x_train, ys: y_train})
    if i % 50 == 0:
        # 每训练50次，打印loss值，可以看到误差值越来越小
        print(compute_accuracy(x_test, y_test))
