"""
Demo2: 手写数字识别 - 深度学习入门
===================================

这个demo将教你：
1. 如何处理图像数据
2. 如何构建神经网络
3. 如何训练深度学习模型
4. 如何可视化训练过程
5. 如何使用卷积神经网络(CNN)

MNIST数据集包含70000张手写数字图片（0-9）
每张图片是28x28像素的灰度图
"""

import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import confusion_matrix, classification_report
import warnings
warnings.filterwarnings('ignore')

# 设置中文显示
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

print("=" * 60)
print("欢迎来到深度学习入门 - 手写数字识别")
print("=" * 60)

# ============================================
# 步骤1: 加载MNIST数据集
# ============================================
print("\n【步骤1】加载MNIST数据集...")
print("首次运行会自动下载数据集，请耐心等待...")

(X_train, y_train), (X_test, y_test) = mnist.load_data()

print(f"✓ 数据加载成功！")
print(f"  - 训练集样本数: {len(X_train)}")
print(f"  - 测试集样本数: {len(X_test)}")
print(f"  - 图片尺寸: {X_train.shape[1]} x {X_train.shape[2]}")
print(f"  - 类别数量: {len(np.unique(y_train))} (数字0-9)")

# ============================================
# 步骤2: 数据可视化
# ============================================
print("\n【步骤2】数据可视化...")

# 显示一些样本图片
fig, axes = plt.subplots(3, 10, figsize=(15, 5))
fig.suptitle('MNIST手写数字样本展示', fontsize=16, fontweight='bold')

for i in range(30):
    ax = axes[i // 10, i % 10]
    ax.imshow(X_train[i], cmap='gray')
    ax.set_title(f'标签: {y_train[i]}', fontsize=10)
    ax.axis('off')

plt.tight_layout()
plt.savefig('mnist_samples.png', dpi=300, bbox_inches='tight')
print("✓ 样本图片已保存: mnist_samples.png")

# 统计各类别数量
fig, ax = plt.subplots(figsize=(10, 6))
unique, counts = np.unique(y_train, return_counts=True)
bars = ax.bar(unique, counts, color='skyblue', edgecolor='black', alpha=0.7)
ax.set_xlabel('数字类别', fontsize=12)
ax.set_ylabel('样本数量', fontsize=12)
ax.set_title('训练集各类别分布', fontsize=14, fontweight='bold')
ax.grid(axis='y', alpha=0.3)

for bar, count in zip(bars, counts):
    height = bar.get_height()
    ax.text(bar.get_x() + bar.get_width()/2., height,
            f'{count}', ha='center', va='bottom', fontweight='bold')

plt.tight_layout()
plt.savefig('class_distribution.png', dpi=300, bbox_inches='tight')
print("✓ 类别分布图已保存: class_distribution.png")

# ============================================
# 步骤3: 数据预处理
# ============================================
print("\n【步骤3】数据预处理...")

# 保存原始数据用于后续可视化
X_train_original = X_train.copy()
X_test_original = X_test.copy()

# 归一化：将像素值从[0, 255]缩放到[0, 1]
X_train = X_train.astype('float32') / 255.0
X_test = X_test.astype('float32') / 255.0

# 为CNN添加通道维度 (samples, height, width, channels)
X_train_cnn = X_train.reshape(-1, 28, 28, 1)
X_test_cnn = X_test.reshape(-1, 28, 28, 1)

# 为全连接网络展平图像 (samples, 784)
X_train_flat = X_train.reshape(-1, 28 * 28)
X_test_flat = X_test.reshape(-1, 28 * 28)

# 标签one-hot编码
y_train_cat = to_categorical(y_train, 10)
y_test_cat = to_categorical(y_test, 10)

print(f"✓ 数据预处理完成")
print(f"  - CNN输入形状: {X_train_cnn.shape}")
print(f"  - 全连接网络输入形状: {X_train_flat.shape}")
print(f"  - 标签形状: {y_train_cat.shape}")

# ============================================
# 步骤4: 构建模型1 - 简单全连接神经网络
# ============================================
print("\n【步骤4】构建模型1 - 简单全连接神经网络...")

model_simple = keras.Sequential([
    layers.Input(shape=(784,)),
    layers.Dense(128, activation='relu', name='hidden1'),
    layers.Dropout(0.2),
    layers.Dense(64, activation='relu', name='hidden2'),
    layers.Dropout(0.2),
    layers.Dense(10, activation='softmax', name='output')
], name='Simple_NN')

model_simple.compile(
    optimizer='adam',
    loss='categorical_crossentropy',
    metrics=['accuracy']
)

print("✓ 简单神经网络构建完成")
print("\n模型结构:")
model_simple.summary()

# ============================================
# 步骤5: 训练模型1
# ============================================
print("\n【步骤5】训练简单神经网络...")
print("训练中，请稍候...")

history_simple = model_simple.fit(
    X_train_flat, y_train_cat,
    batch_size=128,
    epochs=10,
    validation_split=0.1,
    verbose=1
)

print("✓ 简单神经网络训练完成")

# ============================================
# 步骤6: 构建模型2 - 卷积神经网络(CNN)
# ============================================
print("\n【步骤6】构建模型2 - 卷积神经网络(CNN)...")

model_cnn = keras.Sequential([
    layers.Input(shape=(28, 28, 1)),
    
    # 第一个卷积块
    layers.Conv2D(32, (3, 3), activation='relu', name='conv1'),
    layers.MaxPooling2D((2, 2), name='pool1'),
    
    # 第二个卷积块
    layers.Conv2D(64, (3, 3), activation='relu', name='conv2'),
    layers.MaxPooling2D((2, 2), name='pool2'),
    
    # 第三个卷积块
    layers.Conv2D(64, (3, 3), activation='relu', name='conv3'),
    
    # 展平并连接全连接层
    layers.Flatten(),
    layers.Dense(64, activation='relu', name='fc1'),
    layers.Dropout(0.5),
    layers.Dense(10, activation='softmax', name='output')
], name='CNN')

model_cnn.compile(
    optimizer='adam',
    loss='categorical_crossentropy',
    metrics=['accuracy']
)

print("✓ CNN模型构建完成")
print("\n模型结构:")
model_cnn.summary()

# ============================================
# 步骤7: 训练模型2
# ============================================
print("\n【步骤7】训练CNN模型...")
print("训练中，请稍候...")

history_cnn = model_cnn.fit(
    X_train_cnn, y_train_cat,
    batch_size=128,
    epochs=10,
    validation_split=0.1,
    verbose=1
)

print("✓ CNN模型训练完成")

# ============================================
# 步骤8: 模型评估和对比
# ============================================
print("\n【步骤8】模型评估和对比...")

# 评估简单神经网络
loss_simple, acc_simple = model_simple.evaluate(X_test_flat, y_test_cat, verbose=0)
print(f"\n简单神经网络:")
print(f"  - 测试集损失: {loss_simple:.4f}")
print(f"  - 测试集准确率: {acc_simple:.4f} ({acc_simple*100:.2f}%)")

# 评估CNN
loss_cnn, acc_cnn = model_cnn.evaluate(X_test_cnn, y_test_cat, verbose=0)
print(f"\nCNN模型:")
print(f"  - 测试集损失: {loss_cnn:.4f}")
print(f"  - 测试集准确率: {acc_cnn:.4f} ({acc_cnn*100:.2f}%)")

# 确定最佳模型
if acc_cnn > acc_simple:
    best_model_name = "CNN"
    best_model = model_cnn
    best_acc = acc_cnn
    X_test_best = X_test_cnn
else:
    best_model_name = "简单神经网络"
    best_model = model_simple
    best_acc = acc_simple
    X_test_best = X_test_flat

print(f"\n{'='*60}")
print(f"🏆 最佳模型: {best_model_name}")
print(f"🎯 准确率: {best_acc:.4f} ({best_acc*100:.2f}%)")
print(f"{'='*60}")

# ============================================
# 步骤9: 可视化训练过程
# ============================================
print("\n【步骤9】可视化训练过程...")

fig, axes = plt.subplots(2, 2, figsize=(14, 10))
fig.suptitle('模型训练过程对比', fontsize=16, fontweight='bold')

# 简单神经网络 - 准确率
ax1 = axes[0, 0]
ax1.plot(history_simple.history['accuracy'], label='训练准确率', linewidth=2)
ax1.plot(history_simple.history['val_accuracy'], label='验证准确率', linewidth=2)
ax1.set_xlabel('Epoch')
ax1.set_ylabel('准确率')
ax1.set_title('简单神经网络 - 准确率')
ax1.legend()
ax1.grid(True, alpha=0.3)

# 简单神经网络 - 损失
ax2 = axes[0, 1]
ax2.plot(history_simple.history['loss'], label='训练损失', linewidth=2)
ax2.plot(history_simple.history['val_loss'], label='验证损失', linewidth=2)
ax2.set_xlabel('Epoch')
ax2.set_ylabel('损失')
ax2.set_title('简单神经网络 - 损失')
ax2.legend()
ax2.grid(True, alpha=0.3)

# CNN - 准确率
ax3 = axes[1, 0]
ax3.plot(history_cnn.history['accuracy'], label='训练准确率', linewidth=2)
ax3.plot(history_cnn.history['val_accuracy'], label='验证准确率', linewidth=2)
ax3.set_xlabel('Epoch')
ax3.set_ylabel('准确率')
ax3.set_title('CNN模型 - 准确率')
ax3.legend()
ax3.grid(True, alpha=0.3)

# CNN - 损失
ax4 = axes[1, 1]
ax4.plot(history_cnn.history['loss'], label='训练损失', linewidth=2)
ax4.plot(history_cnn.history['val_loss'], label='验证损失', linewidth=2)
ax4.set_xlabel('Epoch')
ax4.set_ylabel('损失')
ax4.set_title('CNN模型 - 损失')
ax4.legend()
ax4.grid(True, alpha=0.3)

plt.tight_layout()
plt.savefig('training_history.png', dpi=300, bbox_inches='tight')
print("✓ 训练过程图已保存: training_history.png")

# ============================================
# 步骤10: 混淆矩阵
# ============================================
print("\n【步骤10】生成混淆矩阵...")

y_pred = best_model.predict(X_test_best, verbose=0)
y_pred_classes = np.argmax(y_pred, axis=1)

cm = confusion_matrix(y_test, y_pred_classes)

fig, ax = plt.subplots(figsize=(10, 8))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', ax=ax)
ax.set_xlabel('预测类别', fontsize=12)
ax.set_ylabel('真实类别', fontsize=12)
ax.set_title(f'{best_model_name} - 混淆矩阵', fontsize=14, fontweight='bold')
plt.tight_layout()
plt.savefig('confusion_matrix.png', dpi=300, bbox_inches='tight')
print("✓ 混淆矩阵已保存: confusion_matrix.png")

print("\n分类报告:")
print(classification_report(y_test, y_pred_classes))

# ============================================
# 步骤11: 预测示例
# ============================================
print("\n【步骤11】预测示例...")

# 随机选择一些测试样本
num_samples = 10
random_indices = np.random.choice(len(X_test), num_samples, replace=False)

fig, axes = plt.subplots(2, 5, figsize=(15, 6))
fig.suptitle('预测结果示例', fontsize=16, fontweight='bold')

for i, idx in enumerate(random_indices):
    ax = axes[i // 5, i % 5]
    
    # 显示图片
    ax.imshow(X_test_original[idx], cmap='gray')
    
    # 预测
    if best_model_name == "CNN":
        pred = best_model.predict(X_test_cnn[idx:idx+1], verbose=0)
    else:
        pred = best_model.predict(X_test_flat[idx:idx+1], verbose=0)
    
    pred_class = np.argmax(pred)
    true_class = y_test[idx]
    confidence = np.max(pred) * 100
    
    # 设置标题颜色（正确=绿色，错误=红色）
    color = 'green' if pred_class == true_class else 'red'
    ax.set_title(f'真实: {true_class}\n预测: {pred_class} ({confidence:.1f}%)', 
                 color=color, fontweight='bold')
    ax.axis('off')

plt.tight_layout()
plt.savefig('predictions.png', dpi=300, bbox_inches='tight')
print("✓ 预测结果已保存: predictions.png")

# ============================================
# 步骤12: 保存模型
# ============================================
print("\n【步骤12】保存模型...")

model_simple.save('simple_nn_model.h5')
model_cnn.save('cnn_model.h5')

print("✓ 模型已保存:")
print("  - simple_nn_model.h5")
print("  - cnn_model.h5")

# ============================================
# 总结
# ============================================
print("\n" + "="*60)
print("🎉 恭喜！你已经完成了深度学习入门项目！")
print("="*60)
print("\n你学到了什么：")
print("  ✓ 如何处理图像数据")
print("  ✓ 如何构建全连接神经网络")
print("  ✓ 如何构建卷积神经网络(CNN)")
print("  ✓ 如何训练深度学习模型")
print("  ✓ 如何可视化训练过程")
print("  ✓ 如何评估模型性能")
print("  ✓ 如何保存和加载模型")

print("\n💡 扩展练习建议：")
print("  1. 尝试增加或减少网络层数，观察效果")
print("  2. 尝试调整学习率、批次大小等超参数")
print("  3. 尝试添加数据增强（旋转、缩放等）")
print("  4. 尝试使用预训练模型进行迁移学习")
print("  5. 尝试识别自己手写的数字")

print("\n📚 下一步：运行 demo3_chess/chess_piece_detection.py 学习实战应用！")
print("="*60)

plt.show()
