from tensorflow import keras
from tensorflow.keras.datasets import mnist
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras import datasets
from tensorflow.keras import models, regularizers
from tensorflow.keras import optimizers
# 加载数据
(x_train, y_train), (x_test, y_test) = mnist.load_data()

# 1. 重塑数据，增加通道维度 (height, width, channels)
x_train = x_train.reshape((x_train.shape[0], 28, 28, 1))
x_test = x_test.reshape((x_test.shape[0], 28, 28, 1))

# 2. 将数据类型转换为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')

# 3. 归一化到 [0, 1] 范围
x_train /= 255.0
x_test /= 255.0

# 4. 对标签进行one-hot编码
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)

# 创建数据增强生成器
# 这里包含了针对MNIST的安全增强参数
datagen = ImageDataGenerator(
    rotation_range=10,       # 随机旋转角度范围 (±10度)
    width_shift_range=0.1,   # 随机水平平移范围 (总宽度的10%)
    height_shift_range=0.1,  # 随机垂直平移范围 (总高度的10%)
    zoom_range=0.1,          # 随机缩放范围 [1-0.1, 1+0.1]
    shear_range=5.,          # 随机错切角度 (5度)
    # fill_mode='nearest'     # 填充新创建的像素，使用最近邻填充
)

# 注意：不需要使用rescale，因为我们已经手动归一化了。
# 让生成器拟合训练数据（计算任何需要的统计信息，这里主要是为了zca_whitening等，我们没用）
datagen.fit(x_train)

# 从训练集中取一个批次的原图
x_batch, y_batch = next(datagen.flow(x_train, y_train, batch_size=9))

# 可视化
plt.figure(figsize=(10, 10))
for i in range(9):
    plt.subplot(3, 3, i+1)
    # 需要将图像从 (28, 28, 1) 重塑为 (28, 28) 以便显示
    plt.imshow(x_batch[i].reshape(28, 28), cmap='gray')
    plt.title(f"Label: {np.argmax(y_batch[i])}")
    plt.axis('off')
plt.suptitle('Augmented MNIST Samples')
plt.show()


# 构建模型
model = tf.keras.models.Sequential(name='kabule')
# 卷积层
# 原始LeNet-5: 输入32x32，6个5x5卷积核，这里适配MNIST的28x28
model.add(tf.keras.layers.Conv2D(6, (5, 5), activation='relu',input_shape=(28, 28, 1),padding='same'))
# 平均池化层
model.add(tf.keras.layers.AveragePooling2D((2, 2), strides=2))
model.add(tf.keras.layers.BatchNormalization())
# 卷积层
model.add(tf.keras.layers.Conv2D(16, (5, 5), activation='relu',padding='valid'))
# 平均池化层
model.add(tf.keras.layers.AveragePooling2D((2, 2), strides=2))
model.add(tf.keras.layers.BatchNormalization())
# 卷积层
model.add(tf.keras.layers.Conv2D(120, (5, 5), activation='relu',padding='valid'))
# 展平层
model.add(tf.keras.layers.Flatten(name='Flatten'))

# 全连接层
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dropout(0.1))
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dropout(0.1))
model.add(tf.keras.layers.Dense(84, activation='relu'))
model.add(tf.keras.layers.Dropout(0.1))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Dense(84, activation='relu'))
model.add(tf.keras.layers.Dropout(0.1))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Dense(84, activation='relu'))
# 输出层：全连接层 Output
model.add(tf.keras.layers.Dense(10, activation='softmax'))
model.summary()

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

# 使用数据生成器来训练模型
# 注意：我们不会对验证集做增强！
batch_size = 32
epochs = 5

history = model.fit(
    datagen.flow(x_train, y_train, batch_size=batch_size), # 使用生成器
    steps_per_epoch=len(x_train) / batch_size, # 每个epoch需要迭代的步数
    epochs=epochs,
    validation_data=(x_test, y_test), # 验证数据不做增强
    verbose=1
)

# 正确评估 - 使用原始数据
test_loss, test_acc = model.evaluate(x_test, y_test, verbose=0)
train_loss, train_acc = model.evaluate(x_train, y_train, verbose=0)

print(f'训练集损失: {train_loss:.4f}, 准确率: {train_acc:.4f}')
print(f'测试集损失: {test_loss:.4f}, 准确率: {test_acc:.4f}')