import tensorflow as tf
from tensorflow.keras import datasets
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras import models, regularizers
from tensorflow.keras import optimizers

# 加载MNIST数据集
(x, y), (x_test, y_test) = datasets.mnist.load_data()
print('x:', x.shape, 'y:', y.shape, 'x_test:', x_test.shape, 'y_test:', y_test.shape)

# 将加载的数据转换为Dataset对象
train = tf.data.Dataset.from_tensor_slices((x, y))
test = tf.data.Dataset.from_tensor_slices((x_test, y_test))

# 统一的预处理函数 - 修复数据形状问题
def preprocess(x, y):
    # 标准化到0-1
    x = tf.cast(x, dtype=tf.float32) / 255.
    # 保持2D形状并添加通道维度，用于CNN输入
    x = tf.reshape(x, [28, 28, 1])  # 形状变为 [28, 28, 1]
    y = tf.cast(y, dtype=tf.int32)
    y = tf.one_hot(y, depth=10)  # one-hot编码
    return x, y


# 应用预处理函数
train = train.map(preprocess)
test = test.map(preprocess)

# 批处理
batch_size = 128
train = train.batch(batch_size).shuffle(10000)  # 在批处理后打乱
test = test.batch(batch_size)

# xxxxxxxxxxxxxxxxxxxxxxxxxxx
model = tf.keras.models.Sequential()

model.add(tf.keras.layers.Conv2D(64, (3, 3), input_shape=(28, 28, 1), activation='relu', padding='same'))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.BatchNormalization())

model.add(tf.keras.layers.Conv2D(512, (3, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.BatchNormalization())

model.add(tf.keras.layers.Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.BatchNormalization())

# 展平层
model.add(tf.keras.layers.Flatten())

# 全连接层
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dropout(0.1))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dropout(0.1))
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dropout(0.1))


# 编译模型
model.compile(optimizer=optimizers.Adam(learning_rate=0.001), loss='categorical_crossentropy', metrics=['accuracy'])

# 训练模型
history = model.fit(train, epochs=10,validation_data=test,verbose=1)

# 评估测试集
test_loss, test_acc = model.evaluate(test, verbose=0)
print(f'测试集损失: {test_loss:.4f}')
print(f'测试集准确率: {test_acc:.4f}')

# 评估训练集
train_loss, train_acc = model.evaluate(train, verbose=0)
print(f'训练集损失: {train_loss:.4f}')
print(f'训练集准确率: {train_acc:.4f}')

# 计算过拟合程度
overfitting_degree = train_acc - test_acc
print(f'过拟合程度: {overfitting_degree:.4f}')

# 绘制训练过程
plt.figure(figsize=(15, 5))

# 准确率图表
plt.subplot(1, 2, 1)
plt.plot(history.history['accuracy'], 'b-', label='Training Accuracy', linewidth=2)
plt.plot(history.history['val_accuracy'], 'r-', label='Validation Accuracy', linewidth=2)
plt.title('Model Accuracy', fontsize=14, fontweight='bold')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.grid(True, alpha=0.3)
plt.ylim(0.9, 1.0)  # 聚焦在高准确率区域

# 损失图表
plt.subplot(1, 2, 2)
plt.plot(history.history['loss'], 'b-', label='Training Loss', linewidth=2)
plt.plot(history.history['val_loss'], 'r-', label='Validation Loss', linewidth=2)
plt.title('Model Loss', fontsize=14, fontweight='bold')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.grid(True, alpha=0.3)

plt.tight_layout()
plt.show()
