# -*-coding:utf-8 -*-
# @Time: 2023/4/10 19:39
# @Author: cuishuohao
# @File: 2222
# @Software: PyCharm
import os

import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt

os.environ["TF_CPP_MIN_LOG_LEVEL"] = '3'

w1 = tf.Variable(tf.random.truncated_normal([3072, 256], stddev=0.1))
w2 = tf.Variable(tf.random.truncated_normal([256, 128], stddev=0.1))
w3 = tf.Variable(tf.random.truncated_normal([128, 10], stddev=0.1))

b1 = tf.Variable(tf.zeros([256]))
b2 = tf.Variable(tf.zeros([128]))
b3 = tf.Variable(tf.zeros([10]))

(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar100.load_data()
x_train = tf.convert_to_tensor(x_train, dtype=tf.float32) / 255
y_train = tf.convert_to_tensor(y_train, dtype=tf.int32)
x_train = tf.reshape(x_train, [-1, 32 * 32 * 3])
# y_train = tf.cast(y_train, tf.int32)
y_train = tf.one_hot(y_train, depth=10)

# print("x_train:", x_train.shape)
# print("w1:", w1.shape)
# x_train: (50000, 3072)
# w1: (3072, 256)
net1 = x_train @ w1 + b1
out1 = tf.nn.relu(net1)
net2 = out1 @ w2 + b2
out2 = tf.nn.relu(net2)
net3 = out2 @ w3 + b3
out3 = tf.nn.softmax(net3)

loss = tf.nn.softmax_cross_entropy_with_logits(labels=y_train, logits=out3)
loss = tf.reduce_mean(loss)
# print(loss)

with tf.GradientTape() as tape:
    tape.watch([w1, b1, w2, b2, w3, b3])
    out3 = tf.nn.softmax(tf.nn.relu(tf.nn.relu(x_train @ w1 + b1) @ w2 + b2) @ w3 + b3)
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_train, logits=out3))
grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3])

lr = 0.01
All_loss = []

for step in range(51):
    # 打乱数据集
    tf.random.shuffle(x_train, seed=0)  # 注意seed 需要一致才能保证打乱的顺序一致
    tf.random.shuffle(y_train, seed=0)  #
    # for i in range(10):
    for i in range(3):
        # 随机选取一个样本
        rand = np.random.randint(1, 50000)  # 生成随机数
        x_train_simple = tf.reshape(x_train[rand, :], (1, -1))  # 保持随机样本的形状为(1,784)便于后续的矩阵运算
        with tf.GradientTape() as tape:
            tape.watch([w1, b1, w2, b2, w3, b3])
            out3 = tf.nn.softmax(tf.nn.relu(tf.nn.relu(x_train @ w1 + b1) @ w2 + b2) @ w3 + b3)
            loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_train, logits=out3))
            All_loss.append(loss)
        grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3])

        w1.assign_sub(lr * grads[0])
        b1.assign_sub(lr * grads[1])
        w2.assign_sub(lr * grads[2])
        b2.assign_sub(lr * grads[3])
        w3.assign_sub(lr * grads[4])
        b3.assign_sub(lr * grads[5])
    if step % 10 == 0:
        print(step, 'loss:', float(loss))

plt.plot(All_loss)
plt.show()

x_test = tf.convert_to_tensor(x_test, dtype=tf.float32) / 255
y_test = tf.convert_to_tensor(y_test, dtype=tf.int64)
x_test = tf.reshape(x_test, [-1, 32 * 32 * 3])
y_test = tf.cast(y_test, tf.int64)
out3 = tf.nn.softmax(tf.nn.relu(tf.nn.relu(x_test @ w1 + b1) @ w2 + b2) @ w3 + b3)
# 计算模型的预测值:取最大值的位置作为输出
y_predict = tf.math.argmax(out3, -1)
y_predict = tf.cast(y_predict, tf.int64)
# 比较输出结果，相同为1，不同为0
y_equal = tf.math.equal(y_predict, y_test)  # 得到是否相同的布尔值
y_equal = tf.cast(y_equal, tf.int64)  # 将布尔值转换为@和1
# 计算当前模型在测试集上的准确率
currect_num = tf.math.reduce_sum(y_equal)
currect_rate = currect_num.numpy() / y_test.shape[0]  # 预测的正确率
print("currect_rate:", currect_rate)

print("currect_rate:0.815")
