import tensorflow as tf
from tensorflow.keras import layers, models
import numpy as np
import matplotlib.pyplot as plt
import os

# 设置GPU内存增长，避免一次性占用所有GPU内存
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
    try:
        # 设置GPU内存增长
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)
        print(f"检测到 {len(gpus)} 个GPU设备，已设置内存增长模式")
    except RuntimeError as e:
        print(f"设置GPU内存增长失败: {e}")
else:
    print("未检测到GPU设备，将使用CPU进行计算")

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

def load_local_mnist(data_path="./mnist_data"):
    """
    从本地目录加载MNIST数据
    """
    print(f"尝试从本地目录 {data_path} 加载MNIST数据...")
    
    # 检查是否存在本地MNIST数据文件
    mnist_npz_path = os.path.join(data_path, "mnist.npz") if os.path.isdir(data_path) else data_path
    
    if os.path.exists(mnist_npz_path) and mnist_npz_path.endswith('.npz'):
        try:
            # 从本地NPZ文件加载数据
            with np.load(mnist_npz_path, allow_pickle=True) as f:
                x_train, y_train = f['x_train'], f['y_train']
                x_test, y_test = f['x_test'], f['y_test']
            print(f"成功从本地文件 {mnist_npz_path} 加载数据")
            return (x_train, y_train), (x_test, y_test)
        except Exception as e:
            print(f"从本地NPZ文件加载数据失败: {e}")
    
    # 如果本地加载失败，使用tf.keras方式加载
    print("使用TensorFlow内置方式加载MNIST数据...")
    return tf.keras.datasets.mnist.load_data()

def preprocess_data(x_train, y_train, x_test, y_test):
    """预处理数据"""
    print("正在预处理数据...")
    
    # 调整图像尺寸以适应模型 (32x32)
    x_train_resized = np.zeros((x_train.shape[0], 32, 32, 1))
    x_test_resized = np.zeros((x_test.shape[0], 32, 32, 1))
    
    for i in range(x_train.shape[0]):
        x_train_resized[i] = tf.image.resize(np.expand_dims(x_train[i], axis=-1), [32, 32])
    
    for i in range(x_test.shape[0]):
        x_test_resized[i] = tf.image.resize(np.expand_dims(x_test[i], axis=-1), [32, 32])
    
    # 归一化
    x_train_normalized = x_train_resized.astype('float32') / 255.0
    x_test_normalized = x_test_resized.astype('float32') / 255.0
    
    # 标签one-hot编码
    y_train_categorical = tf.keras.utils.to_categorical(y_train, 10)
    y_test_categorical = tf.keras.utils.to_categorical(y_test, 10)
    
    print(f"训练集形状: {x_train_normalized.shape}")
    print(f"测试集形状: {x_test_normalized.shape}")
    print(f"训练标签形状: {y_train_categorical.shape}")
    print(f"测试标签形状: {y_test_categorical.shape}")
    
    return (x_train_normalized, y_train_categorical), (x_test_normalized, y_test_categorical)

def create_compatible_cnn_model():
    """创建与当前TensorFlow版本兼容的CNN模型"""
    model = models.Sequential([
        # 第一个卷积块
        layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 1)),
        layers.MaxPooling2D((2, 2)),
        
        # 第二个卷积块
        layers.Conv2D(64, (3, 3), activation='relu'),
        layers.MaxPooling2D((2, 2)),
        
        # 第三个卷积块
        layers.Conv2D(64, (3, 3), activation='relu'),
        
        # 展平层
        layers.Flatten(),
        
        # 全连接层
        layers.Dense(64, activation='relu'),
        layers.Dropout(0.5),
        
        # 输出层
        layers.Dense(10, activation='softmax')
    ])
    
    return model

def visualize_samples(x_data, y_data, num_samples=9):
    """可视化数据样本"""
    plt.figure(figsize=(8, 8))
    for i in range(num_samples):
        plt.subplot(3, 3, i+1)
        plt.imshow(x_data[i].reshape(32, 32), cmap='gray')
        plt.title(f'标签: {np.argmax(y_data[i])}')
        plt.axis('off')
    plt.tight_layout()
    plt.show()

def plot_training_history(history):
    """绘制训练历史"""
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
    
    # 绘制损失曲线
    ax1.plot(history.history['loss'], label='训练损失')
    ax1.plot(history.history['val_loss'], label='验证损失')
    ax1.set_title('模型损失')
    ax1.set_xlabel('轮次')
    ax1.set_ylabel('损失')
    ax1.legend()
    ax1.grid(True)
    
    # 绘制准确率曲线
    ax2.plot(history.history['accuracy'], label='训练准确率')
    ax2.plot(history.history['val_accuracy'], label='验证准确率')
    ax2.set_title('模型准确率')
    ax2.set_xlabel('轮次')
    ax2.set_ylabel('准确率')
    ax2.legend()
    ax2.grid(True)
    
    plt.tight_layout()
    plt.show()

def main():
    """主函数"""
    print("MNIST手写数字识别 - 兼容版CNN实现")
    print("=" * 50)
    
    # 加载数据
    print("\n加载MNIST数据...")
    (x_train, y_train), (x_test, y_test) = load_local_mnist("../MNIST_data")
    
    # 预处理数据
    (x_train_processed, y_train_processed), (x_test_processed, y_test_processed) = preprocess_data(
        x_train, y_train, x_test, y_test)
    
    # 可视化样本数据
    print("\n可视化样本数据...")
    visualize_samples(x_train_processed, y_train_processed)
    
    # 创建模型
    print("\n创建兼容版CNN模型...")
    model = create_compatible_cnn_model()
    
    # 显示模型结构
    model.summary()
    
    # 编译模型
    model.compile(
        optimizer='adam',
        loss='categorical_crossentropy',
        metrics=['accuracy']
    )
    
    # 训练模型
    print("\n开始训练模型...")
    history = model.fit(
        x_train_processed, y_train_processed,
        batch_size=128,
        epochs=5,
        verbose=1,
        validation_data=(x_test_processed, y_test_processed)
    )
    
    # 绘制训练历史
    print("\n绘制训练历史...")
    plot_training_history(history)
    
    # 评估模型
    print("\n评估模型...")
    test_loss, test_accuracy = model.evaluate(x_test_processed, y_test_processed, verbose=0)
    print(f"测试损失: {test_loss:.4f}")
    print(f"测试准确率: {test_accuracy:.4f}")
    
    # 预测示例
    print("\n预测示例...")
    predictions = model.predict(x_test_processed[:10])
    predicted_classes = np.argmax(predictions, axis=1)
    true_classes = np.argmax(y_test_processed[:10], axis=1)
    
    print("前10个测试样本的预测结果:")
    for i in range(10):
        print(f"样本 {i+1}: 真实标签={true_classes[i]}, 预测标签={predicted_classes[i]}, "
              f"置信度={np.max(predictions[i]):.4f}")
    
    # 保存模型（使用SavedModel格式以提高兼容性）
    print("\n保存模型...")
    model.save('compatible_cnn_model.h5')
    print("模型已保存为 'compatible_cnn_model.h5'")
    
    # 额外保存为SavedModel格式
    model.save('compatible_cnn_model_savedmodel')
    print("模型已额外保存为SavedModel格式 'compatible_cnn_model_savedmodel'")
    
    # 立即测试模型加载
    print("\n立即测试模型加载...")
    try:
        # 测试H5格式加载
        print("测试H5格式模型加载...")
        loaded_model = tf.keras.models.load_model('compatible_cnn_model.h5')
        print("H5格式模型加载成功！")
        
        # 测试SavedModel格式加载
        print("测试SavedModel格式模型加载...")
        loaded_model_savedmodel = tf.keras.models.load_model('compatible_cnn_model_savedmodel')
        print("SavedModel格式模型加载成功！")
        
        # 进行简单测试
        print("进行加载模型测试...")
        test_predictions = loaded_model.predict(x_test_processed[:5])
        print(f"加载的模型预测测试: {np.argmax(test_predictions, axis=1)}")
        
        # 评估加载的模型
        print("评估加载的模型...")
        test_loss, test_accuracy = loaded_model.evaluate(x_test_processed, y_test_processed, verbose=0)
        print(f"加载模型的测试损失: {test_loss:.4f}")
        print(f"加载模型的测试准确率: {test_accuracy:.4f}")
        
    except Exception as e:
        print(f"模型加载测试失败: {e}")
        print("尝试使用兼容模式重新保存和加载...")
        
        # 尝试另一种保存方式
        try:
            # 仅保存权重
            model.save_weights('compatible_cnn_weights.h5')
            print("模型权重已保存为 'compatible_cnn_weights.h5'")
            
            # 创建新模型并加载权重
            new_model = create_compatible_cnn_model()
            new_model.compile(
                optimizer='adam',
                loss='categorical_crossentropy',
                metrics=['accuracy']
            )
            new_model.load_weights('compatible_cnn_weights.h5')
            print("通过权重加载模型成功！")
            
            # 测试新模型
            test_predictions = new_model.predict(x_test_processed[:5])
            print(f"通过权重加载的模型预测测试: {np.argmax(test_predictions, axis=1)}")
            
        except Exception as e2:
            print(f"通过权重加载也失败了: {e2}")

if __name__ == "__main__":
    main()