import tensorflow as tf
import matplotlib.pyplot as plt
from time import *
import os
import numpy as np
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from sklearn.metrics import classification_report, confusion_matrix, precision_score, recall_score, f1_score
''''''
# 创建结果目录
os.makedirs('results', exist_ok=True)

def download_with_retry(url, local_filename, retries=3):
    """带重试机制的文件下载函数"""
    session = requests.Session()
    retry = Retry(connect=retries, backoff_factor=0.5)
    adapter = HTTPAdapter(max_retries=retry)
    session.mount('http://', adapter)
    session.mount('https://', adapter)
    
    if not os.path.exists(local_filename):
        print(f"开始下载: {url}")
        try:
            response = session.get(url, stream=True, timeout=30)
            response.raise_for_status()
            
            with open(local_filename, 'wb') as f:
                for chunk in response.iter_content(chunk_size=8192):
                    f.write(chunk)
            print(f"下载完成: {local_filename}")
        except Exception as e:
            print(f"下载失败: {e}")
            if os.path.exists(local_filename):
                os.remove(local_filename)
            raise
    else:
        print(f"使用已存在的文件: {local_filename}")
    return local_filename

# 加载数据集，按照8比2的比例划分数据集
def data_load(data_dir, img_height, img_width, batch_size):
    # 先加载原始数据集（未应用prefetch）
    raw_train_ds = tf.keras.preprocessing.image_dataset_from_directory(
        data_dir,
        label_mode='categorical',
        validation_split=0.2,
        subset="training",
        seed=123,
        color_mode="rgb",
        image_size=(img_height, img_width),
        batch_size=batch_size)
    
    raw_val_ds = tf.keras.preprocessing.image_dataset_from_directory(
        data_dir,
        label_mode='categorical',
        validation_split=0.2,
        subset="validation",
        seed=123,
        color_mode="rgb",
        image_size=(img_height, img_width),
        batch_size=batch_size)
    
    # 在应用prefetch前获取class_names
    class_names = raw_train_ds.class_names
    
    # 应用性能优化
    train_ds = raw_train_ds.prefetch(buffer_size=tf.data.AUTOTUNE)
    val_ds = raw_val_ds.prefetch(buffer_size=tf.data.AUTOTUNE)
    
    return train_ds, val_ds, class_names

# 模型加载，指定图片处理的大小和是否进行迁移学习
def model_load(IMG_SHAPE=(224, 224, 3), class_num=40):
    # 创建模型缓存目录
    cache_dir = os.path.expanduser('~/.keras/models')
    os.makedirs(cache_dir, exist_ok=True)
    
    # 权重文件URL和本地路径
    weights_url = 'https://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v2/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_1.0_224_no_top.h5'
    weights_path = os.path.join(cache_dir, 'mobilenet_v2_weights_tf_dim_ordering_tf_kernels_1.0_224_no_top.h5')
    
    try:
        # 尝试下载权重文件（带重试）
        download_with_retry(weights_url, weights_path)
        weights = weights_path
    except Exception as e:
        print(f"无法下载预训练权重: {e}")
        print("将使用随机初始化权重（性能可能较差）")
        weights = None
    
    # 加载MobileNetV2模型
    base_model = tf.keras.applications.MobileNetV2(
        input_shape=IMG_SHAPE,
        include_top=False,
        weights=weights
    )
    
    # 冻结预训练层
    base_model.trainable = False
    base_model.trainable = True  # 重新设置为True以启用训练
    
    # 兼容不同版本的TensorFlow
    try:
        rescaling_layer = tf.keras.layers.Rescaling(1. / 127.5, offset=-1)
    except AttributeError:
        rescaling_layer = tf.keras.layers.experimental.preprocessing.Rescaling(1. / 127.5, offset=-1)
    
    model = tf.keras.models.Sequential([
        rescaling_layer,
        base_model,
        tf.keras.layers.GlobalAveragePooling2D(),
        tf.keras.layers.Dense(class_num, activation='softmax')
    ])
    
    model.summary()
    for layer in base_model.layers[-10:]:
        layer.trainable = True
    
    # 模型编译时添加精确率和召回率评估
    model.compile(
        optimizer='adam',
        loss='categorical_crossentropy',
        metrics=['accuracy', 
                 tf.keras.metrics.Precision(name='precision'),
                 tf.keras.metrics.Recall(name='recall')]
    )
    
    return model

# 展示训练过程的曲线（包含精确率和召回率）
def show_loss_acc(history):
    acc = history.history['accuracy']
    val_acc = history.history['val_accuracy']
    loss = history.history['loss']
    val_loss = history.history['val_loss']
    precision = history.history['precision']
    val_precision = history.history['val_precision']
    recall = history.history['recall']
    val_recall = history.history['val_recall']
    
    plt.figure(figsize=(15, 12))
    
    plt.subplot(2, 2, 1)
    plt.plot(acc, label='训练准确率')
    plt.plot(val_acc, label='验证准确率')
    plt.legend(loc='lower right')
    plt.ylabel('准确率')
    plt.ylim([min(plt.ylim()), 1])
    plt.title('训练和验证准确率')
    
    plt.subplot(2, 2, 2)
    plt.plot(loss, label='训练损失')
    plt.plot(val_loss, label='验证损失')
    plt.legend(loc='upper right')
    plt.ylabel('交叉熵损失')
    plt.title('训练和验证损失')
    
    plt.subplot(2, 2, 3)
    plt.plot(precision, label='训练精确率')
    plt.plot(val_precision, label='验证精确率')
    plt.legend(loc='lower right')
    plt.ylabel('精确率')
    plt.ylim([min(plt.ylim()), 1])
    plt.title('训练和验证精确率')
    
    plt.subplot(2, 2, 4)
    plt.plot(recall, label='训练召回率')
    plt.plot(val_recall, label='验证召回率')
    plt.legend(loc='lower right')
    plt.ylabel('召回率')
    plt.ylim([min(plt.ylim()), 1])
    plt.title('训练和验证召回率')
    
    plt.tight_layout()
    plt.savefig('results/results_mobilenet.png', dpi=300)
    plt.close()
    print("训练结果图已保存至: results/results_mobilenet.png")

# 评估模型在验证集上的性能
def evaluate_model(model, val_ds, class_names):
    """评估模型在验证集上的详细性能指标"""
    print("开始评估模型...")
    
    # 获取验证集所有预测结果和真实标签
    all_preds = []
    all_labels = []
    
    for images, labels in val_ds:
        preds = model.predict(images)
        all_preds.extend(np.argmax(preds, axis=1))
        all_labels.extend(np.argmax(labels, axis=1))
    
    all_preds = np.array(all_preds)
    all_labels = np.array(all_labels)
    
    # 计算总体指标
    precision = precision_score(all_labels, all_preds, average='weighted')
    recall = recall_score(all_labels, all_preds, average='weighted')
    f1 = f1_score(all_labels, all_preds, average='weighted')
    accuracy = np.mean(all_preds == all_labels)
    
    print(f"\n总体性能指标:")
    print(f"准确率: {accuracy:.4f}")
    print(f"精确率: {precision:.4f}")
    print(f"召回率: {recall:.4f}")
    print(f"F1分数: {f1:.4f}")
    
    # 输出各类别详细指标
    print("\n各类别详细指标:")
    print(classification_report(all_labels, all_preds, target_names=class_names, digits=4))
    
    # 计算混淆矩阵
    cm = confusion_matrix(all_labels, all_preds)
    
    # 可视化混淆矩阵
    plt.figure(figsize=(12, 10))
    plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
    plt.title('混淆矩阵')
    plt.colorbar()
    tick_marks = np.arange(len(class_names))
    plt.xticks(tick_marks, class_names, rotation=90)
    plt.yticks(tick_marks, class_names)
    
    # 在混淆矩阵上标注数值
    thresh = cm.max() / 2.
    for i in range(cm.shape[0]):
        for j in range(cm.shape[1]):
            plt.text(j, i, format(cm[i, j], 'd'),
                     horizontalalignment="center",
                     color="white" if cm[i, j] > thresh else "black")
    
    plt.tight_layout()
    plt.ylabel('真实标签')
    plt.xlabel('预测标签')
    plt.savefig('results/confusion_matrix.png', dpi=300, bbox_inches='tight')
    plt.close()
    print("混淆矩阵图已保存至: results/confusion_matrix.png")
    
    # 保存评估结果到文件
    with open('results/evaluation_metrics.txt', 'w', encoding='utf-8') as f:
        f.write("模型评估报告\n")
        f.write("=" * 50 + "\n")
        f.write(f"总体准确率: {accuracy:.4f}\n")
        f.write(f"总体精确率: {precision:.4f}\n")
        f.write(f"总体召回率: {recall:.4f}\n")
        f.write(f"总体F1分数: {f1:.4f}\n\n")
        f.write("各类别详细指标:\n")
        f.write(classification_report(all_labels, all_preds, target_names=class_names, digits=4))
    
    print("评估结果已保存至: results/evaluation_metrics.txt")
    return accuracy, precision, recall, f1

# 训练模型主流程
def train(epochs, batch_size=4):
    begin_time = time()
    data_dir = "D:/garbage-master/python/garbage"
    
    print(f"开始加载数据: {data_dir}")
    train_ds, val_ds, class_names = data_load(data_dir, 224, 224, batch_size)
    
    print(f"数据集类别: {len(class_names)} 个")
    print(class_names)
    
    print("开始构建模型...")
    model = model_load(class_num=len(class_names))
    
    # 创建回调函数
    callbacks = [
        tf.keras.callbacks.ModelCheckpoint(
            "D:/garbage-master/python/result/mobilenet_best.keras",
            monitor='val_accuracy',
            save_best_only=True,
            verbose=1
        ),
        tf.keras.callbacks.EarlyStopping(
            monitor='val_loss',
            patience=10,
            restore_best_weights=True
        ),
        tf.keras.callbacks.ReduceLROnPlateau(
            monitor='val_loss',
            factor=0.2,
            patience=5,
            min_lr=0.00001
        )
    ]
    
    print(f"开始训练模型，共 {epochs} 个轮次...")
    history = model.fit(
        train_ds,
        validation_data=val_ds,
        epochs=epochs,
        callbacks=callbacks,
        verbose=1
    )
    
    end_time = time()
    run_time = end_time - begin_time
    print(f'训练完成，总耗时: {run_time:.2f} 秒')
    
    # 保存最终模型
    model_path = "D:/garbage-master/python/result/mobilenet_final1.h5"
    model.save(model_path, save_format='h5')
    print(f"最终模型已保存至: {model_path}")
    
    # 保存训练曲线
    show_loss_acc(history)
    
    # 评估模型
    evaluate_model(model, val_ds, class_names)
    
    return model

if __name__ == '__main__':
    # 检查GPU设备
    physical_devices = tf.config.list_physical_devices('GPU')
    if physical_devices:
        try:
            tf.config.experimental.set_memory_growth(physical_devices[0], True)
            print(f"已检测到GPU: {tf.config.list_physical_devices('GPU')}")
        except Exception as e:
            print(f"GPU配置失败: {e}")
    else:
        print("未找到GPU设备，将使用CPU训练（速度较慢）")
    
    # 训练参数
    config = {
        'epochs': 20,
        'batch_size': 4
    }
    
    # 执行训练
    try:
        if physical_devices:
            with tf.device('/GPU:0'):
                train(**config)
        else:
            train(**config)
    except Exception as e:
        print(f"训练过程出错: {e}")