import tensorflow as tf
import matplotlib.pyplot as plt
from time import *
import os

# 确保创建结果目录
os.makedirs('results', exist_ok=True)

# 数据加载，将会按照8比2的比例分割数据集，其中8份作为训练集，2份作为测试集
def data_load(data_dir, img_height, img_width, batch_size):
    train_ds = tf.keras.preprocessing.image_dataset_from_directory(
        data_dir,
        label_mode='categorical',
        validation_split=0.2,  # 划分比例
        subset="training",  # 训练集
        seed=123,
        image_size=(img_height, img_width),
        batch_size=batch_size)
    val_ds = tf.keras.preprocessing.image_dataset_from_directory(
        data_dir,
        label_mode='categorical',
        validation_split=0.2,  # 划分比例
        subset="validation",  # 验证集
        seed=123,
        image_size=(img_height, img_width),
        batch_size=batch_size)
    class_names = train_ds.class_names  # 获取数据集的类名
    return train_ds, val_ds, class_names  # 返回训练集、验证集和类名


# 模型加载（修复Rescaling层的兼容性问题）
def model_load(IMG_SHAPE=(224, 224, 3), class_num=245):
    # 兼容不同版本的TensorFlow
    try:
        # 新版本TF（2.7+）使用layers.Rescaling
        rescaling_layer = tf.keras.layers.Rescaling(1. / 255)
    except AttributeError:
        # 旧版本TF使用experimental.preprocessing.Rescaling
        rescaling_layer = tf.keras.layers.experimental.preprocessing.Rescaling(1. / 255)
    
    # 通过keras构建模型
    model = tf.keras.models.Sequential([
        rescaling_layer,  # 归一化，将像素值处理成0到1之间的值
        tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),  # 卷积层，32个输出通道，3*3的卷积核，激活函数为relu
        tf.keras.layers.MaxPooling2D(2, 2),  # 池化层，特征图大小减半
        # Add another convolution
        tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),  # 卷积层，64个输出通道，3*3的卷积核，激活函数为relu
        tf.keras.layers.MaxPooling2D(2, 2),  # 池化层，特征图大小减半
        tf.keras.layers.Flatten(),  # 将二维的特征图拉直
        # 全连接层
        tf.keras.layers.Dense(128, activation='relu'),  # 128个神经元的全连接层
        tf.keras.layers.Dense(class_num, activation='softmax')  # 输出层，对应数据集具体的类别数目
    ])
    model.summary()  # 输出模型信息
    # 模型训练
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])  # 编译模型
    return model  # 返回模型


# 展示训练过程的曲线
def show_loss_acc(history):
    # 从history中获取准确率信息
    acc = history.history['accuracy']
    val_acc = history.history['val_accuracy']
    # 从history中获取loss信息
    loss = history.history['loss']
    val_loss = history.history['val_loss']
    # 绘制上方的准确率折线图
    plt.figure(figsize=(8, 8))
    plt.subplot(2, 1, 1)
    plt.plot(acc, label='Training Accuracy')
    plt.plot(val_acc, label='Validation Accuracy')
    plt.legend(loc='lower right')
    plt.ylabel('Accuracy')
    plt.ylim([min(plt.ylim()), 1])
    plt.title('Training and Validation Accuracy')
    # 绘制下方的损失折线图
    plt.subplot(2, 1, 2)
    plt.plot(loss, label='Training Loss')
    plt.plot(val_loss, label='Validation Loss')
    plt.legend(loc='upper right')
    plt.ylabel('Cross Entropy')
    plt.title('Training and Validation Loss')
    plt.xlabel('epoch')
    plt.savefig('results/results_cnn.png', dpi=100)


# cnn模型训练的主流程
def train(epochs):
    begin_time = time()  # 记录开始时间
    # 加载数据（修改为你的数据集路径）
    train_ds, val_ds, class_names = data_load("D:/garbage-master/python/garbage", 224, 224, 4)
    print("类别名称:", class_names)  # 输出类名
    model = model_load(class_num=len(class_names))  # 加载模型
    
    # 优化数据读取性能
    train_ds = train_ds.prefetch(buffer_size=tf.data.AUTOTUNE)
    val_ds = val_ds.prefetch(buffer_size=tf.data.AUTOTUNE)
    
    history = model.fit(train_ds, validation_data=val_ds, epochs=epochs)  # 开始训练
    
    # 保存模型（修改为你的保存路径）
    model_save_path = "D:/garbage-master/python/result/cnn_model.h5"
    model.save(model_save_path)
    print(f"模型已保存至: {model_save_path}")
    
    end_time = time()  # 结束时间
    run_time = end_time - begin_time  # 记录时间差
    print('训练总耗时：', run_time, "秒")  # 输出时间
    show_loss_acc(history)  # 保存折线图
    print("训练结果图已保存至: results/results_cnn.png")


if __name__ == '__main__':
    # 检查GPU设备
    physical_devices = tf.config.list_physical_devices('GPU')
    if physical_devices:  # 检查列表是否为空
        try:
            tf.config.experimental.set_memory_growth(physical_devices[0], True)  # 设置GPU内存按需分配
            print("已启用GPU训练")
        except Exception as e:
            print("GPU配置出错，使用CPU训练:", e)
    else:
        print("未找到GPU设备，使用CPU训练")
    
    # 执行训练（根据是否有GPU选择设备）
    try:
        if physical_devices:
            with tf.device('/GPU:0'):
                train(epochs=30)  # 可修改epoch轮数
        else:
            train(epochs=30)
    except Exception as e:
        print("训练过程出错:", e)