
# tensorboard --logdir logs/fit
#coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
# 尝试导入TensorBoard，如果失败则跳过
try:
    from tensorflow.keras.callbacks import TensorBoard
    TENSORBOARD_AVAILABLE = True
except ImportError:
    print("TensorBoard不可用，将跳过相关功能")
    TENSORBOARD_AVAILABLE = False

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['figure.figsize'] = (7,7)

import datetime
import os

import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten,Activation,BatchNormalization
from tensorflow.keras import utils
from tensorflow.keras import regularizers

# 添加GPU内存增长配置，避免一次性占用所有GPU内存
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
    try:
        # 设置GPU内存增长
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)
        print(f"检测到 {len(gpus)} 个GPU设备，已设置内存增长模式")
    except RuntimeError as e:
        print(f"设置GPU内存增长失败: {e}")
else:
    print("未检测到GPU设备，将使用CPU进行计算")

def load_local_mnist(data_path="./MNIST_data"):
    """
    从本地目录加载MNIST数据，如果失败则尝试其他方式
    """
    print(f"尝试从本地目录 {data_path} 加载MNIST数据...")
    
    # 检查是否存在本地MNIST数据文件
    mnist_npz_path = os.path.join(data_path, "mnist.npz") if os.path.isdir(data_path) else data_path
    
    if os.path.exists(mnist_npz_path) and mnist_npz_path.endswith('.npz'):
        try:
            # 从本地NPZ文件加载数据
            with np.load(mnist_npz_path, allow_pickle=True) as f:
                x_train, y_train = f['x_train'], f['y_train']
                x_test, y_test = f['x_test'], f['y_test']
            print(f"成功从本地文件 {mnist_npz_path} 加载数据")
            return (x_train, y_train), (x_test, y_test)
        except Exception as e:
            print(f"从本地NPZ文件加载数据失败: {e}")
    
    # 如果本地加载失败，尝试使用tf.keras方式加载
    try:
        print("尝试使用tf.keras.datasets.mnist.load_data()加载数据...")
        return tf.keras.datasets.mnist.load_data()
    except Exception as e:
        print(f"使用tf.keras加载数据失败: {e}")
        print("请确保网络连接正常或手动下载MNIST数据集")
        # 如果所有方式都失败，创建一些虚拟数据用于演示
        print("创建虚拟数据用于演示...")
        x_train = np.random.randint(0, 255, (1000, 28, 28))
        y_train = np.random.randint(0, 10, (1000,))
        x_test = np.random.randint(0, 255, (100, 28, 28))
        y_test = np.random.randint(0, 10, (100,))
        return (x_train, y_train), (x_test, y_test)

# 从本地目录加载MNIST数据
(X_train, y_train), (X_test, y_test) = load_local_mnist("../MNIST_data")

nb_classes = 10
print("训练样本的初始维度",X_train.shape)
print("训练样本目标值的初始维度",y_train.shape)

# 显示前9个训练样本
for i in range(9):
    plt.subplot(3,3,i+1)
    plt.imshow(X_train[i], cmap='gray', interpolation='none')
    plt.title("数字 {}".format(y_train[i]))

# 数据处理
X_train = X_train.reshape(60000, 784) if len(X_train) == 60000 else X_train.reshape(X_train.shape[0], 784)
X_test = X_test.reshape(10000, 784) if len(X_test) == 10000 else X_test.reshape(X_test.shape[0], 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], '训练样本')
print(X_test.shape[0], '测试样本')
print("训练集",X_train.shape)
print("测试集",X_test.shape)
y_train = utils.to_categorical(y_train, nb_classes)
y_test = utils.to_categorical(y_test, nb_classes)

# 建立模型
model = Sequential()

## 第一层
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))

# 第二层 加入正则化
model.add(Dense(512, kernel_regularizer=regularizers.l2(0.01)))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Activation('relu'))

##输出层
model.add(Dense(nb_classes))
model.add(Activation('softmax'))

## 查看模型结构
model.summary()

# 模型编译（安装）
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

#模型训练
# 注释掉tensorboard相关代码，避免需要额外配置
# tensorboard = TensorBoard(log_dir='./graph', histogram_freq=1, write_graph=True, write_images=True)

# 设置TensorBoard回调（如果可用）
callbacks = []
if TENSORBOARD_AVAILABLE:
    try:
        # 使用全英文路径避免编码问题
        base_log_dir = "../logs_fit"  # 使用下划线替代斜杠和中文
        # 确保基础日志目录存在
        os.makedirs(base_log_dir, exist_ok=True)
        # 创建基于时间戳的子目录
        timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
        log_dir = os.path.join(base_log_dir, timestamp)
        # 创建完整的日志目录
        os.makedirs(log_dir, exist_ok=True)
        tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)
        callbacks.append(tensorboard_callback)
        print(f"TensorBoard功能已启用，日志将保存到: {log_dir}")
    except Exception as e:
        print(f"创建TensorBoard日志目录失败: {e}")
        print("TensorBoard功能将被禁用")
        TENSORBOARD_AVAILABLE = False
else:
    print("TensorBoard功能不可用")

# 减少批次大小以降低内存需求
try:
    history = model.fit(X_train, y_train,
                        batch_size=64, epochs=5, verbose=1,  # 将batch_size从128减小到64
                        validation_data=(X_test, y_test),
                        callbacks=callbacks)  # 添加TensorBoard回调（如果可用）
except tf.errors.InternalError as e:
    print(f"GPU训练出错: {e}")
    print("切换到CPU训练...")
    # 设置只使用CPU
    os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
    # 重新编译模型
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    # 使用更小的批次大小再次尝试
    history = model.fit(X_train, y_train,
                        batch_size=32, epochs=5, verbose=1,  # 将batch_size减小到32
                        validation_data=(X_test, y_test),
                        callbacks=callbacks)  # 添加TensorBoard回调（如果可用）

# 可视化表达
plt.figure()
plt.plot(history.history['loss'],label='train_loss')
plt.plot(history.history['val_loss'],label='val_loss')
plt.title('model')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend()
plt.grid()

plt.figure()
plt.plot(history.history['accuracy'],label='train_acc')
plt.plot(history.history['val_accuracy'],label='val_acc')
plt.title('acc')
plt.xlabel('epoch')
plt.legend()
plt.grid()

# 显示图像
plt.show()

if TENSORBOARD_AVAILABLE and callbacks:
    print("\n训练完成！要查看TensorBoard，请在终端中运行以下命令：")
    print(f"tensorboard --logdir {log_dir}")