# -*-coding:utf-8 -*-
# @Time: 2023/4/10 14:42
# @Author: cuishuohao
# @File: demo_mnist
# @Software: PyCharm

import tensorflow as tf
from tensorflow import keras
from keras import datasets
import matplotlib.pyplot as plt
import numpy as np

# 读取数据
(x_train, y_train), (x_test, y_test) = datasets.mnist.load_data()

# 可视化样本，下面是输出了训练集中前 20 个样本
fig, ax = plt.subplots(nrows=4, ncols=5, sharex='all', sharey='all')
ax = ax.flatten()
for i in range(20):
    img = x_train[i].reshape(28, 28)
    ax[i].imshow(img, cmap='Greys')
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
plt.show()

# 将0-255之间的数归一化
x_train = tf.convert_to_tensor(x_train, dtype=tf.float32) / 255.
y_train = tf.convert_to_tensor(y_train, dtype=tf.int32)
# x输入拉平后作为输入
x_train = tf.reshape(x_train, [-1, 28 * 28])
print(x_train.shape)

# 将y转换成one-hot编码
y_train = tf.one_hot(y_train, depth=10)

# 5.初始化模型参数
# 初始化各层的参数
# tf.randomtruncated normal 产生截断正态分布随机数
# 取值范围为[mean-2*stddev，mean+2*stddev ]
w1 = tf.Variable(tf.random.truncated_normal([784, 256], stddev=0.1))
b1 = tf.Variable(tf.zeros([256]))

w2 = tf.Variable(tf.random.truncated_normal([256, 128], stddev=0.1))
b2 = tf.Variable(tf.zeros([128]))

w3 = tf.Variable(tf.random.truncated_normal([128,10], stddev=0.1))
b3 = tf.Variable(tf.zeros([10]))

# 6.完成一次前馈计算
# (1)首先计算第 1层神经元的前向计算，这里隐藏层神经元的激活函数都采用 ReLU激活函数。
# print("x_train:", x_train.shape)
# print("w1:", w1.shape)
# x_train: (60000, 784)
# w1: (784, 256)
net1 = x_train @ w1 + b1  # (6,784)和(784,256)做矩阵运算
out1 = tf.nn.relu(net1)
print(out1.shape)
# (2)然后完成第 2 层和第 3 层的前向计算。
# (6080,256)@(256,128) + (128)

# out1: (60000, 256)
# w2: (256, 128)
net2 = out1 @ w2 + b2
out2 = tf.nn.relu(net2)

# out2: (60000, 128)
# w3: (128, 10)
net3 = out2 @ w3 + b3
out3 = tf.nn.softmax(net3)  # 最后一层的激活函数使用softmax

# (3)计算模型输出值与真实值之间的误差值，这里使用交叉损失函数。
# 计算 out3与y之间的交叉熵
loss = tf.nn.softmax_cross_entropy_with_logits(labels=y_train, logits=out3)
print(loss)

# 计算 loss 的均值
loss = tf.reduce_mean(loss)
print(loss)

# 完成一次反向传播，计算各参数的梯度
with tf.GradientTape() as tape:
    tape.watch([w1, b1, w2, b2, w3, b3])
    out3 = tf.nn.softmax(tf.nn.relu(tf.nn.relu(x_train @ w1 + b1) @ w2 + b2) @ w3 + b3)  # 前向计算公式的整合
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_train, logits=out3))
grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3])

# 定义学习率
lr = 0.001
# 训练模型
All_loss = []
# 重复计算进行梯度下降
# for t in range(1001):  # T=1001 为了快计算速度可以减小T值和N值
for t in range(31):  # T=1001 为了快计算速度可以减小T值和N值
    # 打乱数据集
    tf.random.shuffle(x_train, seed=0)  # 注意seed 需要一致才能保证打乱的顺序一致
    tf.random.shuffle(y_train, seed=0)  #
    for i in range(10):  # N=10
        # 随机选取一个样本
        rand = np.random.randint(1, 60000)  # 生成随机数
        x_train_simple = tf.reshape(x_train[rand, :], (1, -1))  # 保持随机样本的形状为(1,784)便于后续的矩阵运算
        with tf.GradientTape() as tape:
            tape.watch([w1, b1, w2, b2, w3, b3])
            out3 = tf.nn.softmax(tf.nn.relu(tf.nn.relu(x_train @ w1 + b1) @ w2 + b2) @ w3 + b3)
            loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_train, logits=out3))
            All_loss.append(loss)
        grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3])

        # 更新参数
        # tf.assign_sub(ref,value,use_locking=None,name=None)
        # 即变最 ref 减去 value值，即 ref =ref - value
        w1.assign_sub(lr * grads[0])
        b1.assign_sub(lr * grads[1])
        w2.assign_sub(lr * grads[2])
        b2.assign_sub(lr * grads[3])
        w3.assign_sub(lr * grads[4])
        b3.assign_sub(lr * grads[5])
    # 输出
    if t % 10 == 0:
        print(t, 'loss:', float(loss))

# 查看损失值的变化情况:
plt.plot(All_loss)
plt.show()

# 9.测试模型
# 测试集数据预处理
x_test = tf.convert_to_tensor(x_test, dtype=tf.float32) / 255.
y_test = tf.convert_to_tensor(y_test, dtype=tf.int32)
x_test = tf.reshape(x_test, [-1, 28 * 28])
y_test = tf.cast(y_test, tf.int64)
# 在当前模型上进行前向计算
out3 = tf.nn.softmax(tf.nn.relu(tf.nn.relu(x_test @ w1 + b1) @ w2 + b2) @ w3 + b3)
# 计算模型的预测值:取最大值的位置作为输出
y_predict = tf.math.argmax(out3, -1)
y_predict = tf.cast(y_predict, tf.int64)
# 比较输出结果，相同为1，不同为0
y_equal = tf.math.equal(y_predict, y_test)  # 得到是否相同的布尔值
y_equal = tf.cast(y_equal, tf.int64)  # 将布尔值转换为@和1
# 计算当前模型在测试集上的准确率
currect_num = tf.math.reduce_sum(y_equal)
currect_rate = currect_num.numpy() / y_test.shape[0]  # 预测的正确率
print("currect_rate:",currect_rate)

# 10.训练模型优化
# 一个批次128个样本
train_db = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(128)
# 训练模型
All_loss = []
# 定义学习率
lr = 0.001
# 选代100次
for epoch in range(101):
    for step, (x_batch, y_batch) in enumerate(train_db):
        # 每次随机选取一部分数据
        # x_batch:[128，28，28]
        # y_batch:[128]
        with tf.GradientTape() as tape:
            tape.watch([w1, b1, w2, b2, w3, b3])
            out3 = tf.nn.softmax(tf.nn.relu(tf.nn.relu(x_batch @ w1 + b1) @ w2 + b2) @ w3 + b3)
            loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_batch, logits=out3))
            All_loss.append(loss)
        grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3])

        # 更新参数
        # tf.assign sub(ref，value，use locking=None，name=None)
        # 即变量 ref 减去 value值，即 ref =ref - value
        w1.assign_sub(lr * grads[0])
        b1.assign_sub(lr * grads[1])
        w2.assign_sub(lr * grads[2])
        b2.assign_sub(lr * grads[3])
        w3.assign_sub(lr * grads[4])
        b3.assign_sub(lr * grads[5])

    # 输出
    if epoch % 10 == 0:
        print(epoch, '优化loss:', float(loss))
