import tensorflow as tf
from tensorflow.keras import layers
from tensorflow import keras
import matplotlib.pyplot as plt
from time import *
import os
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry

# 创建结果目录
os.makedirs('results', exist_ok=True)

def download_with_retry(url, local_filename, retries=3):
    """带重试机制的文件下载函数"""
    session = requests.Session()
    retry = Retry(connect=retries, backoff_factor=0.5)
    adapter = HTTPAdapter(max_retries=retry)
    session.mount('http://', adapter)
    session.mount('https://', adapter)
    
    if not os.path.exists(local_filename):
        print(f"开始下载: {url}")
        try:
            response = session.get(url, stream=True, timeout=30)
            response.raise_for_status()
            
            with open(local_filename, 'wb') as f:
                for chunk in response.iter_content(chunk_size=8192):
                    f.write(chunk)
            print(f"下载完成: {local_filename}")
        except Exception as e:
            print(f"下载失败: {e}")
            if os.path.exists(local_filename):
                os.remove(local_filename)
            raise
    else:
        print(f"使用已存在的文件: {local_filename}")
    return local_filename

def data_load(data_dir, img_height, img_width, batch_size):
    """加载数据集并处理class_names获取问题"""
    # 先加载原始数据集（未应用prefetch）
    raw_train_ds = tf.keras.preprocessing.image_dataset_from_directory(
        data_dir,
        label_mode='categorical',
        validation_split=0.2,
        subset="training",
        seed=123,
        image_size=(img_height, img_width),
        batch_size=batch_size)
    
    raw_val_ds = tf.keras.preprocessing.image_dataset_from_directory(
        data_dir,
        label_mode='categorical',
        validation_split=0.2,
        subset="validation",
        seed=123,
        image_size=(img_height, img_width),
        batch_size=batch_size)
    
    # 在应用prefetch前获取class_names
    class_names = raw_train_ds.class_names
    
    # 应用性能优化
    train_ds = raw_train_ds.prefetch(buffer_size=tf.data.AUTOTUNE)
    val_ds = raw_val_ds.prefetch(buffer_size=tf.data.AUTOTUNE)
    
    return train_ds, val_ds, class_names

# 模型加载，指定图片处理的大小和是否进行迁移学习
def model_load(IMG_SHAPE=(224, 224, 3), class_num=40):
    # 创建模型缓存目录
    cache_dir = os.path.expanduser('~/.keras/models')
    os.makedirs(cache_dir, exist_ok=True)
    
    # 权重文件URL和本地路径
    weights_url = 'https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
    weights_path = os.path.join(cache_dir, 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
    
    try:
        # 尝试下载权重文件（带重试）
        download_with_retry(weights_url, weights_path)
        weights = weights_path
    except Exception as e:
        print(f"无法下载预训练权重: {e}")
        print("将使用随机初始化权重（性能可能较差）")
        weights = None
    
    # 微调的过程中不需要进行归一化的处理
    base_model = tf.keras.applications.ResNet50(
        input_shape=IMG_SHAPE,
        include_top=False,
        weights=weights
    )
    
    # 冻结预训练层
    base_model.trainable = False
    
    # 兼容不同版本的TensorFlow
    try:
        rescaling_layer = tf.keras.layers.Rescaling(1. / 127.5, offset=-1)
    except AttributeError:
        rescaling_layer = tf.keras.layers.experimental.preprocessing.Rescaling(1. / 127.5, offset=-1)
    
    model = tf.keras.models.Sequential([
        rescaling_layer,
        base_model,
        tf.keras.layers.GlobalAveragePooling2D(),
        tf.keras.layers.Dense(class_num, activation='softmax')
    ])
    
    model.summary()
    
    # 模型训练
    model.compile(
        optimizer='adam',
        loss='categorical_crossentropy',
        metrics=['accuracy']
    )
    
    return model

# 展示训练过程的曲线
def show_loss_acc(history):
    acc = history.history['accuracy']
    val_acc = history.history['val_accuracy']
    loss = history.history['loss']
    val_loss = history.history['val_loss']

    plt.figure(figsize=(12, 10))
    plt.subplot(2, 1, 1)
    plt.plot(acc, label='训练准确率')
    plt.plot(val_acc, label='验证准确率')
    plt.legend(loc='lower right')
    plt.ylabel('准确率')
    plt.ylim([min(plt.ylim()), 1])
    plt.title('训练和验证准确率')

    plt.subplot(2, 1, 2)
    plt.plot(loss, label='训练损失')
    plt.plot(val_loss, label='验证损失')
    plt.legend(loc='upper right')
    plt.ylabel('交叉熵损失')
    plt.title('训练和验证损失')
    plt.xlabel('轮次 (Epoch)')
    plt.grid(True)
    
    plt.tight_layout()
    plt.savefig('results/results_resnet50.png', dpi=300)
    plt.close()
    print("训练结果图已保存至: results/results_resnet50.png")

def train(epochs, batch_size=4):
    begin_time = time()
    data_dir = "D:/garbage-master/python/garbage"
    
    print(f"开始加载数据: {data_dir}")
    train_ds, val_ds, class_names = data_load(data_dir, 224, 224, batch_size)
    
    print(f"数据集类别: {len(class_names)} 个")
    print(class_names)
    
    print("开始构建模型...")
    model = model_load(class_num=len(class_names))
    
    # 创建回调函数（修改文件扩展名为.keras）
    callbacks = [
        tf.keras.callbacks.ModelCheckpoint(
            "D:/garbage-master/python/result/resnet50_best.keras",  # 改为.keras
            monitor='val_accuracy',
            save_best_only=True,
            verbose=1
        ),
        tf.keras.callbacks.EarlyStopping(
            monitor='val_loss',
            patience=10,
            restore_best_weights=True
        ),
        tf.keras.callbacks.ReduceLROnPlateau(
            monitor='val_loss',
            factor=0.2,
            patience=5,
            min_lr=0.00001
        )
    ]
    
    print(f"开始训练模型，共 {epochs} 个轮次...")
    history = model.fit(
        train_ds,
        validation_data=val_ds,
        epochs=epochs,
        callbacks=callbacks,
        verbose=1
    )
    
    end_time = time()
    run_time = end_time - begin_time
    print(f'训练完成，总耗时: {run_time:.2f} 秒')
    
    # 保存最终模型（保持.h5格式，但显式指定保存格式）
    model_path = "D:/garbage-master/python/result/resnet50_final.h5"
    model.save(model_path, save_format='h5')  # 显式指定h5格式
    print(f"最终模型已保存至: {model_path}")
    
    # 保存训练曲线
    show_loss_acc(history)

if __name__ == '__main__':
    # 设置中文字体
    plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]
    
    # 检查GPU设备
    physical_devices = tf.config.list_physical_devices('GPU')
    if physical_devices:
        try:
            tf.config.experimental.set_memory_growth(physical_devices[0], True)
            print(f"已检测到GPU: {tf.config.list_physical_devices('GPU')}")
        except Exception as e:
            print(f"GPU配置失败: {e}")
    else:
        print("未找到GPU设备，将使用CPU训练（速度较慢）")
    
    # 训练参数
    config = {
        'epochs': 10,        # 初始使用较少轮次测试
        'batch_size': 4      # 可根据GPU内存调整
    }
    
    # 执行训练
    try:
        if physical_devices:
            with tf.device('/GPU:0'):
                train(**config)
        else:
            train(**config)
    except Exception as e:
        print(f"训练过程出错: {e}")