import tensorflow as tf
# 完全禁用GPU以避免CUDA相关错误


# 检查本地权重文件是否存在
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# 加载数据集
import pathlib

from keras_preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
import numpy as np

# 配置GPU（现在将显示没有GPU）
def configure_gpu():
    """配置GPU以避免内存问题"""
    try:
        # 检查可用的GPU设备
        print("TensorFlow版本:", tf.__version__)
        gpus = tf.config.list_physical_devices('GPU')
        print("可用的GPU设备:", gpus)
        
        if gpus:
            print("检测到GPU设备，但由于稳定性问题将不使用")
            return False
        else:
            print("未检测到GPU设备，将使用CPU进行训练")
            return False
    except Exception as e:
        print(f"GPU配置过程中出现错误: {e}")
        print("将使用CPU进行训练")
        return False

# 在导入其他模块前配置GPU
GPU_AVAILABLE = configure_gpu()

def show_batch(image_batch, label_batch):
    plt.figure(figsize=(10,10))
    for n in range(min(15, len(image_batch))):  # 确保不超过批次大小
        ax = plt.subplot(5,5,n+1)
        # 确保图像值在[0,1]范围内
        image = image_batch[n]
        if image.max() > 1.0:
            image = image / 255.0
        plt.imshow(image)
        # 处理categorical模式下的one-hot编码标签
        if label_batch[n].ndim == 1:  # one-hot编码格式
            label = np.argmax(label_batch[n])  # 获取最大概率索引作为标签
        else:  # 简单数值格式
            label = int(label_batch[n])
        plt.title(f"Label: {label}")  # 添加标签标题
        plt.axis('off')
    plt.tight_layout()  # 调整布局
    plt.show()  # 显示图像

def load_data():
    # 修复路径错误：'D:/roject' 应该是 'D:/project'
    train_dir = pathlib.Path('D:/project/dataset/hotdog/train/')
    test_dir = pathlib.Path('D:/project/dataset/hotdog/test/')
    
    # 检查目录是否存在
    if not train_dir.exists():
        print(f"警告: 训练数据目录不存在: {train_dir}")
        train_count = 0
    else:
        train_count = len(list(train_dir.glob('*/*.png')))
        
    if not test_dir.exists():
        print(f"警告: 测试数据目录不存在: {test_dir}")
        test_count = 0
    else:
        test_count = len(list(test_dir.glob('*/*.png')))
    
    print(f"训练数据: {train_count}")
    print(f"测试数据: {test_count}")
    image_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255)
    BATCH = 16  # 减小批次大小以减少内存使用
    IMG_HEIGHT = 224
    IMG_WIDTH = 224
    train_data_gen = image_generator.flow_from_directory(batch_size=BATCH,
                                                         directory=train_dir,
                                                         shuffle=True,
                                                         target_size=(IMG_HEIGHT, IMG_WIDTH),
                                                         class_mode='categorical')  # 改为categorical
    test_data_gen = image_generator.flow_from_directory(batch_size=BATCH,
                                                         directory=test_dir,
                                                         shuffle=False,
                                                         target_size=(IMG_HEIGHT, IMG_WIDTH),
                                                         class_mode='categorical')  # 改为categorical
    image_batch, label_batch = next(train_data_gen)
    show_batch(image_batch, label_batch)
    return train_data_gen, test_data_gen

def load_model():
    # 定义权重文件路径
    weights_path = os.path.join(os.path.dirname(__file__), 'resnet50_weights.h5')

    # 创建基础ResNet50模型（不包括顶层）
    base_model = tf.keras.applications.ResNet50(
        weights=None,  # 不自动加载权重
        include_top=False,  # 不包括顶层
        input_shape=(224, 224, 3)
    )
    
    # 构建完整模型
    model = tf.keras.Sequential([
        base_model,
        tf.keras.layers.GlobalAveragePooling2D(),
        tf.keras.layers.Dense(2, activation='softmax')  # 2个类别
    ])
    
    # 如果本地权重文件存在，则从本地加载预训练权重到基础模型
    if os.path.exists(weights_path):
        print(f"从本地加载ResNet50预训练权重: {weights_path}")
        # 只加载基础模型的权重
        base_model.load_weights(weights_path, by_name=True, skip_mismatch=True)
    else:
        print("从网络下载ResNet50预训练权重...")
        # 从网络加载预训练权重到基础模型
        pretrained_model = tf.keras.applications.ResNet50(
            weights='imagenet',
            include_top=False,
            input_shape=(224, 224, 3)
        )
        # 将权重复制到基础模型
        base_model.set_weights(pretrained_model.get_weights())
        # 保存权重到本地文件
        base_model.save_weights(weights_path)
        print(f"预训练权重已保存到本地: {weights_path}")
        # 清理预训练模型
        del pretrained_model

    # 冻结预训练模型的权重
    for layer in base_model.layers:
        layer.trainable = False
    
    # 检查当前使用的设备
    device = '/GPU:0' if GPU_AVAILABLE else '/CPU:0'
    print(f"模型将在 {device} 上运行")
    
    # 编译模型
    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.00001),
                  loss='categorical_crossentropy',  # 与class_mode='categorical'匹配
                  metrics=['accuracy'])
    
    return model

if __name__ == '__main__':
    # 在主函数中添加学习率调度
    if __name__ == '__main__':
        train_data_gen, test_data_gen = load_data()
        net = load_model()

        # 添加学习率调度器
        lr_scheduler = tf.keras.callbacks.ReduceLROnPlateau(
            monitor='val_accuracy',
            factor=0.5,
            patience=3,
            min_lr=1e-7,
            verbose=1
        )

        # 添加早停以防止过拟合
        early_stopping = tf.keras.callbacks.EarlyStopping(
            monitor='val_accuracy',
            patience=5,
            restore_best_weights=True
        )

        history = net.fit(
            train_data_gen,
            steps_per_epoch=100,
            epochs=20,  # 增加训练轮数
            validation_data=test_data_gen,
            validation_steps=10,
            callbacks=[lr_scheduler, early_stopping]  # 添加回调函数
        )
