import os
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, optimizers, datasets

(x, y), (x_val, y_val) = datasets.mnist.load_data()  # 加载Mnist数据集
x = 2 * tf.convert_to_tensor(x, dtype=tf.float32) / 255. - 1  # 转换为浮点张量，并缩放到-1~1
y = tf.convert_to_tensor(y, dtype=tf.int32)  # 转化为整形张量
y = tf.one_hot(y, depth=10)  # one-hot 编码

print(x.shape, y.shape)

train_dataset = tf.data.Dataset.from_tensor_slices((x, y))  # 构建数据集对象
train_dataset = train_dataset.batch(512)  # 批训练量

model = keras.Sequential([
    layers.Dense(256, activation='relu'),
    layers.Dense(128, activation='relu'),
    layers.Dense(10)
])

with tf.GradientTape() as tape:  # 构建梯度记录环境
     # 打平操作，[b,28,28] => [b,784]
     x = tf.reshape(x, (-1, 28, 28))
     out = model(x)
     y_onehot = tf.one_hot(y, depth=10)
     loss = tf.square(out - y_onehot)
     #计算每个样本的平方误差，[b]
     loss = tf.reduce_sum(loss)/x.shape(0)
     #自动计算梯度
     grads = tape.gradient(loss,model.trainable_variables)
     # w' = w -lr*grad ,更新网络参数
     optimizers.apply_gradients(zip(grads,model.trainable_variables))
