import os
import tensorflow as tf  # 导入 TF 库
from tensorflow import keras  # 导入 TF 子库 keras
from tensorflow.keras import layers, optimizers, datasets  # 导入 TF 子库等

if __name__ == '__main__':
    (x, y), (x_val, y_val) = datasets.mnist.load_data()  # 加载 MNIST 数据集
    # 转换为浮点张量，并缩放到-1~1 元数据是0~255的像素表示
    x = 2 * tf.convert_to_tensor(x, dtype=tf.float32) / 255. - 1
    # 转换为整形张量
    y = tf.convert_to_tensor(y, dtype=tf.int32)
    # one-hot 编码 指定类别总数为10
    y = tf.one_hot(y, depth=10)
    print(x.shape, y.shape)
    train_dataset = tf.data.Dataset.from_tensor_slices((x, y))
    train_dataset = train_dataset.batch(512)

    layers.Dense(256, activation='relu')
    model = keras.Sequential([  # 3 个非线性层的嵌套模型
        layers.Dense(256, activation='relu'),  # 隐藏层 1
        layers.Dense(128, activation='relu'),  # 隐藏层 2
        layers.Dense(10)  # 输出层，输出节点数为 10
    ])
    with tf.GradientTape() as tape:
        x = tf.reshape(x, (-1, 28 * 28))
        out = model(x)
        y_onehot = tf.one_hot(y, depth=10)
        loss = tf.square(out - y_onehot)
        loss = tf.reduce_sum(loss) / x.shape[0]
        grads=tape.gradient(loss,model.trainable_variables)
        optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
        optimizer.apply_gradients(zip(grads, model.trainable_variables))
