import tensorflow as tf
from tensorflow.keras import layers, models
import matplotlib.pyplot as plt
import numpy as np

# 设置随机种子，确保结果可复现
tf.random.set_seed(42)
np.random.seed(42)

# 加载MNIST数据集
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()

# 数据预处理
x_train = (x_train.reshape(-1, 28, 28, 1).astype('float32') / 255.0 - 0.1307) / 0.3081
x_test = (x_test.reshape(-1, 28, 28, 1).astype('float32') / 255.0 - 0.1307) / 0.3081

# 转换为独热编码
y_train = tf.keras.utils.to_categorical(y_train, 10)
y_test = tf.keras.utils.to_categorical(y_test, 10)

class LeNet(tf.keras.Model):
    def __init__(self):
        super(LeNet, self).__init__()
        self.conv1 = tf.keras.layers.Conv2D(6, (5, 5), activation='relu')
        self.pool1 = tf.keras.layers.AveragePooling2D((2, 2))
        self.conv2 = tf.keras.layers.Conv2D(16, (5, 5), activation='relu')
        self.pool2 = tf.keras.layers.AveragePooling2D((2, 2))
        self.flatten = tf.keras.layers.Flatten()
        self.fc1 = tf.keras.layers.Dense(120, activation='relu')
        self.fc2 = tf.keras.layers.Dense(84, activation='relu')
        self.out = tf.keras.layers.Dense(10, activation='softmax')

    def call(self, x):
        x = self.conv1(x)
        x = self.pool1(x)
        x = self.conv2(x)
        x = self.pool2(x)
        x = self.flatten(x)
        x = self.fc1(x)
        x = self.fc2(x)
        return self.out(x)

# 创建模型
model = LeNet()
model.build(input_shape=(None, 28, 28, 1))

# 查看模型结构
model.summary()

# 编译模型
model.compile(
    optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
    loss='categorical_crossentropy',
    metrics=['accuracy']
)

# 训练模型
history = model.fit(
    x_train, y_train,
    batch_size=64,
    epochs=5,
    validation_split=0.1,  # 用10%的训练数据作为验证集
    verbose=1
)

# 在测试集上评估模型
test_loss, test_acc = model.evaluate(x_test, y_test, verbose=0)
print(f'\n测试集准确率: {test_acc:.4f}')

# 绘制训练过程中的损失和准确率曲线
plt.figure(figsize=(12, 4))

# 绘制损失曲线
plt.subplot(1, 2, 1)
plt.plot(history.history['loss'], label='训练损失')
plt.plot(history.history['val_loss'], label='验证损失')
plt.title('损失曲线')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()

# 绘制准确率曲线
plt.subplot(1, 2, 2)
plt.plot(history.history['accuracy'], label='训练准确率')
plt.plot(history.history['val_accuracy'], label='验证准确率')
plt.title('准确率曲线')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()

plt.tight_layout()
plt.show()

# 可视化一些预测结果
def visualize_predictions(model, x_test, y_test, num_samples=5):
    # 获取随机样本
    indices = np.random.choice(len(x_test), num_samples, replace=False)
    samples = x_test[indices]
    true_labels = np.argmax(y_test[indices], axis=1)
    
    # 预测
    predictions = model.predict(samples)
    pred_labels = np.argmax(predictions, axis=1)
    
    # 绘制结果
    plt.figure(figsize=(10, 4))
    for i in range(num_samples):
        plt.subplot(1, num_samples, i+1)
        plt.imshow(samples[i].squeeze(), cmap='gray')
        plt.title(f'真实: {true_labels[i]}\n预测: {pred_labels[i]}')
        plt.axis('off')
    plt.tight_layout()
    plt.show()

# 可视化预测结果
visualize_predictions(model, x_test, y_test)

# 保存模型
model.save('fixed_lenet_mnist_tensorflow.keras')
print("模型已保存为 'fixed_lenet_mnist_tensorflow.keras'")


import tf2onnx

model.to('cpu')  # 确保模型在CPU上
tf2onnx.convert.from_keras(model, output_path='lenet_mnist_tensorflow.onnx', opset=13)

import openvino as ov

ov_model = ov.convert_model('lenet_mnist_tensorflow.onnx')
ov.save_model(ov_model, 'lenet_mnist_tensorflow.xml')
print("模型已转换并保存为 'lenet_mnist_tensorflow.xml' 和 'lenet_mnist_tensorflow.bin'")
