import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Lambda
# 加载MNIST数据集
mnist = tf.keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# 数据预处理
train_images = train_images.reshape(-1, 784).astype('float32') / 255.0
test_images = test_images.reshape(-1, 784).astype('float32') / 255.0
train_labels = tf.keras.utils.to_categorical(train_labels, 10)
test_labels = tf.keras.utils.to_categorical(test_labels, 10)
# Softmax实现
def manual_softmax(x):
    # 数学公式实现：e^z / sum(e^z)
    x_shifted = x - tf.reduce_max(x, axis=1, keepdims=True)
    exps = tf.exp(x_shifted)
    return exps / tf.reduce_sum(exps, axis=1, keepdims=True)
# 构建模型
model = tf.keras.Sequential([
    tf.keras.layers.Dense(10, activation=None, input_shape=(784,)),
    Lambda(manual_softmax)
])
# 编译模型
model.compile(optimizer=tf.keras.optimizers.SGD(0.01),
              loss='categorical_crossentropy',
              metrics=['accuracy'])
# 训练模型
history = model.fit(train_images, train_labels,
                    epochs=10,
                    batch_size=100,
                    validation_split=0.1)
# 保存模型和训练历史
model.save('mnist_manual_softmax_model.h5')
np.save('train_manual_softmax_history.npy', history.history)