import tensorflow as tf
from tensorflow.keras import datasets
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras import models, regularizers
from tensorflow.keras import optimizers

# 加载MNIST数据集
(x, y), (x_test, y_test) = datasets.mnist.load_data()
print('x:', x.shape, 'y:', y.shape, 'x_test:', x_test.shape, 'y_test:', y_test.shape)

# 将加载的数据转换为Dataset对象
train = tf.data.Dataset.from_tensor_slices((x, y))
test = tf.data.Dataset.from_tensor_slices((x_test, y_test))

# # 显示前10个训练图片
# plt.figure(figsize=(10, 5))
# for i in range(10):
#     plt.subplot(2, 5, i + 1)
#     plt.imshow(x[i], cmap='gray')
#     plt.title(f"Label: {y[i]}")
#     plt.axis('off')
#
# plt.tight_layout()
# plt.show()


# 统一的预处理函数
def preprocess(x, y):
    # 标准化到0-1
    x = tf.cast(x, dtype=tf.float32) / 255.
    # 保持2D形状并添加通道维度，用于CNN输入
    x = tf.reshape(x, [28, 28, 1])  # 形状变为 [28, 28, 1]
    y = tf.cast(y, dtype=tf.int32)
    y = tf.one_hot(y, depth=10)  # one-hot编码
    return x, y


# 应用预处理函数
train = train.map(preprocess)
test = test.map(preprocess)

# 批处理
batch_size = 128
train = train.batch(batch_size).shuffle(10000)  # 在批处理后打乱
test = test.batch(batch_size)

# 构建LeNet-5模型
model = tf.keras.models.Sequential(name='LeNet-5')
# 第一层：卷积层 C1
# 原始LeNet-5: 输入32x32，6个5x5卷积核，这里适配MNIST的28x28
model.add(tf.keras.layers.Conv2D(6, (5, 5), activation='tanh',input_shape=(28, 28, 1),padding='same',name='C1'))
# 第二层：平均池化层 S2
model.add(tf.keras.layers.AveragePooling2D((2, 2), strides=2, name='S2'))
# 第三层：卷积层 C3
model.add(tf.keras.layers.Conv2D(16, (5, 5), activation='tanh',padding='valid', name='C3'))
# 第四层：平均池化层 S4
model.add(tf.keras.layers.AveragePooling2D((2, 2), strides=2, name='S4'))
# 第五层：卷积层 C5
model.add(tf.keras.layers.Conv2D(120, (5, 5), activation='tanh',padding='valid',name='C5'))
# 展平层
model.add(tf.keras.layers.Flatten(name='Flatten'))
# 第六层：全连接层 F6
model.add(tf.keras.layers.Dense(84, activation='tanh', name='F6'))
# 输出层：全连接层 Output
model.add(tf.keras.layers.Dense(10, activation='softmax', name='Output'))
model.summary()

# 编译模型
model.compile(optimizer=optimizers.Adam(learning_rate=0.001),loss='categorical_crossentropy',metrics=['accuracy'])

# 训练模型
print("开始训练LeNet-5模型...")
history = model.fit(train,epochs=5, validation_data=test,verbose=1)

# 详细的性能评估
print("\n" + "=" * 50)
print("LeNet-5 详细性能评估")
print("=" * 50)

# 评估测试集
test_loss, test_acc = model.evaluate(test, verbose=0)
print(f'测试集损失: {test_loss:.4f}')
print(f'测试集准确率: {test_acc:.4f}')

# 评估训练集
train_loss, train_acc = model.evaluate(train, verbose=0)
print(f'训练集损失: {train_loss:.4f}')
print(f'训练集准确率: {train_acc:.4f}')

# 计算过拟合程度
overfitting_degree = train_acc - test_acc
print(f'过拟合程度: {overfitting_degree:.4f}')

# 绘制训练过程
plt.figure(figsize=(15, 5))

# 准确率图表
plt.subplot(1, 2, 1)
plt.plot(history.history['accuracy'], 'b-', label='Training Accuracy', linewidth=2)
plt.plot(history.history['val_accuracy'], 'r-', label='Validation Accuracy', linewidth=2)
plt.title('LeNet-5 Model Accuracy', fontsize=14, fontweight='bold')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.grid(True, alpha=0.3)
plt.ylim(0.8, 1.0)  # 调整范围以适应LeNet-5的性能

# 损失图表
plt.subplot(1, 2, 2)
plt.plot(history.history['loss'], 'b-', label='Training Loss', linewidth=2)
plt.plot(history.history['val_loss'], 'r-', label='Validation Loss', linewidth=2)
plt.title('LeNet-5 Model Loss', fontsize=14, fontweight='bold')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.grid(True, alpha=0.3)

plt.tight_layout()
plt.show()

# 保存模型
model.save('lenet5_mnist_model.h5')
print("LeNet-5模型已保存为 'lenet5_mnist_model.h5'")

# 加载模型并进行预测
print("\n进行LeNet-5预测演示...")
loaded_model = models.load_model('lenet5_mnist_model.h5')

# 从测试集中取一个批次进行预测
for test_images, test_labels in test.take(1):
    predictions = loaded_model.predict(test_images, verbose=0)

    # 显示预测结果
    plt.figure(figsize=(15, 10))
    correct_count = 0
    for i in range(12):
        plt.subplot(3, 4, i + 1)
        # 获取图像数据（形状为 [28, 28, 1]）
        img = test_images[i].numpy().squeeze()  # 移除通道维度
        plt.imshow(img, cmap='gray')
        predicted_label = np.argmax(predictions[i])
        true_label = np.argmax(test_labels[i].numpy())

        # 根据预测是否正确选择颜色
        if predicted_label == true_label:
            color = 'green'
            correct_count += 1
        else:
            color = 'red'

        plt.title(f'True: {true_label}\nPred: {predicted_label}',
                  color=color, fontsize=12, fontweight='bold')
        plt.axis('off')

    plt.suptitle(f'LeNet-5 Predictions (Accuracy: {correct_count}/12 = {correct_count / 12:.2%})',
                 fontsize=16, fontweight='bold')
    plt.tight_layout()
    plt.show()



# 收集所有预测和真实标签
all_predictions = []
all_true_labels = []

print("正在计算详细分类报告...")
for i, (test_batch_x, test_batch_y) in enumerate(test):
    batch_predictions = loaded_model.predict(test_batch_x, verbose=0)
    all_predictions.extend(np.argmax(batch_predictions, axis=1))
    all_true_labels.extend(np.argmax(test_batch_y.numpy(), axis=1))

# 计算总体准确率
total_accuracy = np.mean(np.array(all_predictions) == np.array(all_true_labels))
print(f"\n总体准确率: {total_accuracy:.4f}")

# 计算每个类别的准确率
from sklearn.metrics import classification_report, confusion_matrix
import seaborn as sns

print("\n分类报告:")
print(classification_report(all_true_labels, all_predictions, digits=4))

# 绘制混淆矩阵
plt.figure(figsize=(10, 8))
cm = confusion_matrix(all_true_labels, all_predictions)
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
            xticklabels=range(10), yticklabels=range(10))
plt.title('LeNet-5 Confusion Matrix', fontsize=14, fontweight='bold')
plt.xlabel('Predicted Label')
plt.ylabel('True Label')
plt.show()

# 计算每个数字的准确率
print("\n每个数字的准确率:")
class_accuracy = {}
for i in range(10):
    mask = np.array(all_true_labels) == i
    if np.sum(mask) > 0:
        class_acc = np.mean(np.array(all_predictions)[mask] == i)
        class_accuracy[i] = class_acc
        accuracy_star = '★' * int(class_acc * 10)  # 用星号表示准确率
        print(f'数字 {i}: {class_acc:.4f} {accuracy_star}')

# 最终统计摘要
print("\n" + "=" * 50)
print("LeNet-5 训练统计摘要")
print("=" * 50)
print(f"最终训练准确率: {history.history['accuracy'][-1]:.4f}")
print(f"最终验证准确率: {history.history['val_accuracy'][-1]:.4f}")
print(f"测试集准确率: {test_acc:.4f}")
print(f"最佳验证准确率: {max(history.history['val_accuracy']):.4f}")
print(f"训练轮次: {len(history.history['accuracy'])}")
print(f"过拟合程度: {overfitting_degree:.4f}")

# LeNet-5性能评级
if test_acc >= 0.99:
    rating = "优秀 - 超越原版LeNet-5!"
elif test_acc >= 0.98:
    rating = "很好 - 接近原版LeNet-5性能"
elif test_acc >= 0.95:
    rating = "良好 - 标准LeNet-5性能"
elif test_acc >= 0.90:
    rating = "一般 - 可接受的性能"
else:
    rating = "需要改进"

print(f"模型性能评级: {rating}")

# 显示LeNet-5架构信息
print("\n" + "=" * 50)
print("LeNet-5 架构信息")
print("=" * 50)
print("原始LeNet-5架构（1989年由Yann LeCun提出）：")
print("C1: 卷积层 6@28×28 (5×5卷积核, tanh激活)")
print("S2: 平均池化层 6@14×14 (2×2池化)")
print("C3: 卷积层 16@10×10 (5×5卷积核, tanh激活)")
print("S4: 平均池化层 16@5×5 (2×2池化)")
print("C5: 卷积层 120@1×1 (5×5卷积核, tanh激活)")
print("F6: 全连接层 84单元 (tanh激活)")
print("Output: 输出层 10单元 (softmax激活)")