# 导入必要的库
import numpy as np
import matplotlib.pyplot as plt
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping

# 1. 加载MNIST数据集
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()

# 2. 数据预处理
# 将图像数据从[0,255]缩放到[0,1]
train_images = train_images.astype("float32") / 255
test_images = test_images.astype("float32") / 255

# 将图像数据调整为(60000, 28, 28, 1)
train_images = np.expand_dims(train_images, -1)
test_images = np.expand_dims(test_images, -1)

# 将标签转换为one-hot编码
num_classes = 10
train_labels = keras.utils.to_categorical(train_labels, num_classes)
test_labels = keras.utils.to_categorical(test_labels, num_classes)

# 3. 数据增强（重点改进）
datagen = ImageDataGenerator(
    rotation_range=10,      # 随机旋转角度
    zoom_range=0.1,         # 随机缩放
    width_shift_range=0.1,  # 水平平移
    height_shift_range=0.1, # 垂直平移
    validation_split=0.1    # 保留10%作为验证集
)

# 4. 构建增强版模型（针对7/9优化）
def build_enhanced_model(input_shape=(28, 28, 1)):
    model = keras.Sequential([
        layers.Input(shape=input_shape),
        
        # 第一卷积块
        layers.Conv2D(32, (3,3), activation='relu', padding='same'),
        layers.BatchNormalization(),
        layers.Conv2D(32, (3,3), activation='relu', padding='same'),
        layers.MaxPooling2D((2,2)),
        layers.Dropout(0.25),
        
        # 第二卷积块
        layers.Conv2D(64, (3,3), activation='relu', padding='same'),
        layers.BatchNormalization(),
        layers.Conv2D(64, (3,3), activation='relu', padding='same'),
        layers.MaxPooling2D((2,2)),
        layers.Dropout(0.25),
        
        # 分类头
        layers.Flatten(),
        layers.Dense(256, activation='relu'),
        layers.BatchNormalization(),
        layers.Dropout(0.5),
        layers.Dense(num_classes, activation='softmax')
    ])
    return model

model = build_enhanced_model()
model.summary()

# 5. 编译模型（使用更低学习率）
model.compile(
    optimizer=Adam(learning_rate=0.0005),
    loss="categorical_crossentropy",
    metrics=["accuracy"]
)

# 6. 训练策略优化
callbacks = [
    ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, verbose=1),
    EarlyStopping(monitor='val_accuracy', patience=5, restore_best_weights=True)
]

# 7. 训练模型（使用数据增强）
batch_size = 128
epochs = 30

history = model.fit(
    datagen.flow(train_images, train_labels, batch_size=batch_size, subset='training'),
    validation_data=datagen.flow(train_images, train_labels, batch_size=batch_size, subset='validation'),
    epochs=epochs,
    callbacks=callbacks
)

# 8. 评估模型
score = model.evaluate(test_images, test_labels, verbose=0)
print("\nTest accuracy: {:.2f}%".format(score[1] * 100))
print("Test loss: {:.4f}".format(score[0]))

# 9. 错误分析（重点关注7和9）
def analyze_specific_errors(model, test_images, test_labels, target_classes=[7,9]):
    predictions = model.predict(test_images)
    pred_classes = np.argmax(predictions, axis=1)
    true_classes = np.argmax(test_labels, axis=1)
    
    error_indices = np.where(pred_classes != true_classes)[0]
    error_matrix = np.zeros((10,10), dtype=int)
    
    for idx in error_indices:
        true = true_classes[idx]
        pred = pred_classes[idx]
        error_matrix[true][pred] += 1
    
    # 打印7和9的错误情况
    for digit in target_classes:
        print(f"\n数字{digit}的错误分布:")
        for mistaken_as, count in enumerate(error_matrix[digit]):
            if count > 0:
                print(f"  被误认为{mistaken_as}: {count}次 ({(count/len(np.where(true_classes==digit)[0])*100):.1f}%)")
    
    # 可视化典型错误案例
    plt.figure(figsize=(15,6))
    display_count = 0
    for idx in error_indices:
        if true_classes[idx] in target_classes and display_count < 10:
            plt.subplot(2,5,display_count+1)
            plt.imshow(test_images[idx].squeeze(), cmap='gray')
            plt.title(f"True: {true_classes[idx]}\nPred: {pred_classes[idx]}")
            plt.axis('off')
            display_count += 1
    plt.suptitle('典型错误分类案例', y=1.05)
    plt.tight_layout()
    plt.show()

analyze_specific_errors(model, test_images, test_labels)

# 10. 保存模型
model.save('mnist_enhanced_model.h5')
print("模型已保存为 mnist_enhanced_model.h5")

# 11. 可视化训练过程
def plot_training_history(history):
    plt.figure(figsize=(12,4))
    
    # 准确率曲线
    plt.subplot(1,2,1)
    plt.plot(history.history['accuracy'], label='Train Accuracy')
    plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
    plt.title('Accuracy Curves')
    plt.ylabel('Accuracy')
    plt.xlabel('Epoch')
    plt.legend()
    
    # 损失曲线
    plt.subplot(1,2,2)
    plt.plot(history.history['loss'], label='Train Loss')
    plt.plot(history.history['val_loss'], label='Validation Loss')
    plt.title('Loss Curves')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend()
    
    plt.tight_layout()
    plt.show()

plot_training_history(history)