import tensorflow as tf
from tensorflow.keras import layers, models
import numpy as np
import matplotlib.pyplot as plt
import os
import datetime

# 初始化TensorBoard可用性标志
TENSORBOARD_AVAILABLE = False
try:
    from tensorflow.keras.callbacks import TensorBoard

    TENSORBOARD_AVAILABLE = True
except ImportError:
    print("TensorBoard不可用，将跳过相关功能")

# GPU内存配置（保持与原环境兼容）
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
    try:
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)  # 动态分配内存
        print(f"检测到 {len(gpus)} 个GPU设备，已设置内存增长模式")
    except RuntimeError as e:
        print(f"设置GPU内存增长失败: {e}")
else:
    print("未检测到GPU设备，将使用CPU进行计算")

# 中文字体配置
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False


def load_local_mnist(data_path="./mnist_data"):
    """从本地或内置加载MNIST数据"""
    print(f"尝试从本地目录 {data_path} 加载MNIST数据...")
    mnist_npz_path = os.path.join(data_path, "mnist.npz") if os.path.isdir(data_path) else data_path

    if os.path.exists(mnist_npz_path) and mnist_npz_path.endswith('.npz'):
        try:
            with np.load(mnist_npz_path, allow_pickle=True) as f:
                x_train, y_train = f['x_train'], f['y_train']
                x_test, y_test = f['x_test'], f['y_test']
            print(f"成功从本地文件 {mnist_npz_path} 加载数据")
            return (x_train, y_train), (x_test, y_test)
        except Exception as e:
            print(f"本地加载失败: {e}")

    # 本地失败则使用内置加载
    print("使用TensorFlow内置方式加载MNIST数据...")
    return tf.keras.datasets.mnist.load_data()


def preprocess_image(image):
    """预处理图像：resize→转RGB→归一化"""
    image = tf.expand_dims(image, axis=-1)  # (28,28) → (28,28,1)
    image = tf.image.resize(image, [112, 112])  # 调整尺寸适应模型
    image = tf.image.grayscale_to_rgb(image)  # 转RGB通道
    return tf.cast(image, tf.float32) / 255.0  # 归一化到[0,1]


def load_and_preprocess_data():
    """加载并预处理数据"""
    (x_train, y_train), (x_test, y_test) = load_local_mnist("../MNIST_data")

    # 转换数据类型并one-hot编码
    x_train = x_train.astype(np.float32)
    x_test = x_test.astype(np.float32)
    y_train = tf.keras.utils.to_categorical(y_train, 10)
    y_test = tf.keras.utils.to_categorical(y_test, 10)

    print(f"训练集形状: {x_train.shape} | 测试集形状: {x_test.shape}")
    print(f"训练标签形状: {y_train.shape} | 测试标签形状: {y_test.shape}")
    return (x_train, y_train), (x_test, y_test)


# 新增：Inception模块定义（GoogLeNet核心）
def inception_module(x, filters_1x1, filters_3x3_reduce, filters_3x3,
                     filters_5x5_reduce, filters_5x5, filters_pool_proj):
    # 1x1卷积分支
    conv_1x1 = layers.Conv2D(filters_1x1, (1, 1), padding='same', activation='relu')(x)

    # 3x3卷积分支（先1x1降维）
    conv_3x3 = layers.Conv2D(filters_3x3_reduce, (1, 1), padding='same', activation='relu')(x)
    conv_3x3 = layers.Conv2D(filters_3x3, (3, 3), padding='same', activation='relu')(conv_3x3)

    # 5x5卷积分支（先1x1降维）
    conv_5x5 = layers.Conv2D(filters_5x5_reduce, (1, 1), padding='same', activation='relu')(x)
    conv_5x5 = layers.Conv2D(filters_5x5, (5, 5), padding='same', activation='relu')(conv_5x5)

    # 最大池化分支（后接1x1卷积）
    pool_proj = layers.MaxPooling2D((3, 3), strides=(1, 1), padding='same')(x)
    pool_proj = layers.Conv2D(filters_pool_proj, (1, 1), padding='same', activation='relu')(pool_proj)

    # 拼接所有分支
    return layers.concatenate([conv_1x1, conv_3x3, conv_5x5, pool_proj], axis=-1)


# 替换原VGG模型为GoogLeNet模型
def create_googlenet_model():
    input_tensor = layers.Input(shape=(112, 112, 3))  # 与预处理后尺寸一致

    # 初始卷积层（GoogLeNet初始结构）
    x = layers.Conv2D(64, (7, 7), strides=(2, 2), padding='same', activation='relu')(input_tensor)
    x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.Conv2D(64, (1, 1), padding='same', activation='relu')(x)
    x = layers.Conv2D(192, (3, 3), padding='same', activation='relu')(x)
    x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    # Inception模块组（简化版）
    x = inception_module(x, 64, 96, 128, 16, 32, 32)  # Inception 3a
    x = inception_module(x, 128, 128, 192, 32, 96, 64)  # Inception 3b
    x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    x = inception_module(x, 192, 96, 208, 16, 48, 64)  # Inception 4a
    x = inception_module(x, 160, 112, 224, 24, 64, 64)  # Inception 4b

    # 全局平均池化（替代全连接层减少参数）
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dropout(0.4)(x)  # 原GoogLeNet使用40% Dropout

    # 输出层（MNIST 10分类）
    x = layers.Dense(10, activation='softmax')(x)

    return models.Model(input_tensor, x)


def visualize_samples(x_data, y_data):
    """可视化9个样本"""
    plt.figure(figsize=(8, 8))
    for i in range(9):
        plt.subplot(3, 3, i + 1)
        img = preprocess_image(x_data[i])[:, :, 0]  # 取灰度通道显示
        plt.imshow(img, cmap='gray')
        plt.title(f'标签: {np.argmax(y_data[i])}')
        plt.axis('off')
    plt.tight_layout()
    plt.show()


def plot_training_history(history):
    """绘制损失/准确率曲线"""
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
    ax1.plot(history.history['loss'], label='训练损失')
    ax1.plot(history.history['val_loss'], label='验证损失')
    ax1.set_title('损失变化')
    ax1.set_xlabel('轮次')
    ax1.set_ylabel('损失值')
    ax1.legend()

    ax2.plot(history.history['accuracy'], label='训练准确率')
    ax2.plot(history.history['val_accuracy'], label='验证准确率')
    ax2.set_title('准确率变化')
    ax2.set_xlabel('轮次')
    ax2.set_ylabel('准确率')
    ax2.legend()
    plt.tight_layout()
    plt.show()


class MNISTDataGenerator(tf.keras.utils.Sequence):
    """自定义数据生成器（节省内存）"""

    def __init__(self, x_data, y_data, batch_size=32, shuffle=True):
        self.x_data = x_data
        self.y_data = y_data
        self.batch_size = batch_size
        self.shuffle = shuffle
        self.indices = np.arange(len(x_data))
        self.on_epoch_end()

    def __len__(self):
        return int(np.ceil(len(self.x_data) / self.batch_size))

    def __getitem__(self, index):
        start_idx = index * self.batch_size
        end_idx = min((index + 1) * self.batch_size, len(self.x_data))
        batch_indices = self.indices[start_idx:end_idx]

        batch_x = np.zeros((len(batch_indices), 112, 112, 3), dtype=np.float32)
        batch_y = self.y_data[batch_indices]

        for i, idx in enumerate(batch_indices):
            batch_x[i] = preprocess_image(self.x_data[idx]).numpy()
        return batch_x, batch_y

    def on_epoch_end(self):
        if self.shuffle:
            np.random.shuffle(self.indices)


def main():
    print("MNIST手写数字识别 - GoogLeNet实现")
    print("=" * 50)

    # 数据加载与预处理
    (x_train, y_train), (x_test, y_test) = load_and_preprocess_data()

    # 可视化样本
    print("\n可视化训练样本...")
    visualize_samples(x_train, y_train)

    # 创建数据生成器
    train_gen = MNISTDataGenerator(x_train, y_train, batch_size=32)
    test_gen = MNISTDataGenerator(x_test, y_test, batch_size=32, shuffle=False)

    # 创建并编译模型（关键修改：使用GoogLeNet）
    model = create_googlenet_model()
    print("\n模型结构：")
    model.summary()
    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

    # TensorBoard配置
    callbacks = []
    if TENSORBOARD_AVAILABLE:
        log_dir = os.path.join(
            "../tb_logs_googlenet",  # 独立日志目录区分VGG
            datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
        )
        os.makedirs(log_dir, exist_ok=True)
        tb_callback = TensorBoard(
            log_dir=log_dir,
            histogram_freq=1,
            write_graph=True,
            write_images=True
        )
        callbacks.append(tb_callback)
        print(f"\nTensorBoard日志将保存到: {log_dir}")

    # 模型训练
    print("\n开始训练...")
    history = model.fit(
        train_gen,
        epochs=5,
        validation_data=test_gen,
        callbacks=callbacks,
        workers=1,
        use_multiprocessing=False
    )

    # 训练后分析
    print("\n训练完成！")
    plot_training_history(history)

    # 模型评估
    test_loss, test_acc = model.evaluate(test_gen, verbose=0)
    print(f"\n测试集损失: {test_loss:.4f} | 测试集准确率: {test_acc:.4f}")

    # 保存模型
    model.save('googlenet_mnist_model.h5')
    print("\n模型已保存为 'googlenet_mnist_model.h5'")

    # 输出TensorBoard启动命令
    if TENSORBOARD_AVAILABLE:
        print("\n可通过以下命令查看训练可视化：")
        print(f"tensorboard --logdir {log_dir}")


if __name__ == "__main__":
    main()