import os
import random
import shutil
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from tensorflow.keras.optimizers import Adam
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt

# 1. 数据准备和分割
def shuffle_and_split(input_dir, output_dir, train_ratio=0.7, val_ratio=0.15, test_ratio=0.15, random_seed=42):
    """
    将所有图片完全打乱后直接分割为训练集、验证集和测试集
    
    参数:
        input_dir: 输入目录路径 (包含所有图片)
        output_dir: 输出目录路径
        train_ratio: 训练集比例
        val_ratio: 验证集比例
        test_ratio: 测试集比例
        random_seed: 随机种子
    """
    # 确保比例总和为1
    assert abs(train_ratio + val_ratio + test_ratio - 1.0) < 0.001
    
    # 创建输出目录结构
    os.makedirs(output_dir, exist_ok=True)
    train_dir = os.path.join(output_dir, 'train')
    val_dir = os.path.join(output_dir, 'validation')
    test_dir = os.path.join(output_dir, 'test')
    
    for dir_path in [train_dir, val_dir, test_dir]:
        os.makedirs(dir_path, exist_ok=True)
    
    # 获取所有文件
    all_files = [f for f in os.listdir(input_dir) 
                if f.lower().endswith(('.jpg', '.jpeg', '.png'))]
    
    # 完全随机打乱所有文件
    random.seed(random_seed)
    random.shuffle(all_files)
    
    # 计算分割点
    num_files = len(all_files)
    train_end = int(train_ratio * num_files)
    val_end = train_end + int(val_ratio * num_files)
    
    # 分割数据集
    train_files = all_files[:train_end]
    val_files = all_files[train_end:val_end]
    test_files = all_files[val_end:]
    
    # 复制文件到相应目录
    print("正在处理数据集...")
    for file in train_files:
        src = os.path.join(input_dir, file)
        dst = os.path.join(train_dir, file)
        shutil.copy(src, dst)
    
    for file in val_files:
        src = os.path.join(input_dir, file)
        dst = os.path.join(val_dir, file)
        shutil.copy(src, dst)
    
    for file in test_files:
        src = os.path.join(input_dir, file)
        dst = os.path.join(test_dir, file)
        shutil.copy(src, dst)
    
    # 打印统计信息
    print("\n数据集分割完成! 统计信息:")
    print(f"总文件数: {num_files}")
    print(f"训练集: {len(train_files)} ({len(train_files)/num_files:.1%})")
    print(f"验证集: {len(val_files)} ({len(val_files)/num_files:.1%})")
    print(f"测试集: {len(test_files)} ({len(test_files)/num_files:.1%})")
    
    return train_dir, val_dir, test_dir

# 2. 获取数据和标签
def load_data(data_dir):
    classes = ['cardboard', 'metal', 'plastic']
    images = []
    labels = []
    
    # 检查目录是否存在
    if not os.path.exists(data_dir):
        print(f"警告：目录 {data_dir} 不存在!")
        return np.array([]), np.array([])
    
    # 获取目录中的所有文件
    all_files = [f for f in os.listdir(data_dir) 
                if f.lower().endswith(('.jpg', '.jpeg', '.png'))]
    
    print(f"在 {data_dir} 中找到 {len(all_files)} 个图片文件")
    
    # 逐个处理文件
    for img_file in all_files:
        img_path = os.path.join(data_dir, img_file)
        # 尝试从文件名解析类别
        class_found = False
        for idx, class_name in enumerate(classes):
            if class_name.lower() in img_file.lower():
                try:
                    img = tf.keras.preprocessing.image.load_img(
                        img_path, target_size=(128, 128))
                    img_array = tf.keras.preprocessing.image.img_to_array(img)
                    images.append(img_array)
                    labels.append(idx)
                    class_found = True
                    break
                except Exception as e:
                    print(f"加载图片 {img_path} 出错: {e}")
                    class_found = True
                    break
        
        if not class_found:
            print(f"警告：无法识别 {img_file} 的类别，跳过此文件")
    
    if not images:
        print(f"错误：在 {data_dir} 中没有加载任何图片!")
    
    return np.array(images), np.array(labels)

# 3. 创建和训练模型
def create_model(input_shape, num_classes):
    model = Sequential([
        Conv2D(32, (3, 3), activation='relu', input_shape=input_shape),
        MaxPooling2D((2, 2)),
        Conv2D(64, (3, 3), activation='relu'),
        MaxPooling2D((2, 2)),
        Conv2D(128, (3, 3), activation='relu'),
        MaxPooling2D((2, 2)),
        Flatten(),
        Dense(256, activation='relu'),
        Dropout(0.5),
        Dense(num_classes, activation='softmax')
    ])
    
    # 打印模型结构
    model.summary()
    return model

def train_model(X_train, y_train, X_val, y_val, input_shape, num_classes):
    # 检查训练数据是否为空
    if len(X_train) == 0 or len(y_train) == 0:
        print("错误：训练数据为空，无法训练模型!")
        return None, None
    
    # 创建模型
    model = create_model(input_shape, num_classes)
    
    # 使用适当的损失函数
    if num_classes == 1:
        loss = 'binary_crossentropy'
        metrics = ['accuracy']
    elif num_classes > 1:
        loss = 'sparse_categorical_crossentropy'
        metrics = ['sparse_categorical_accuracy']
    
    model.compile(optimizer=Adam(learning_rate=0.001),
                  loss=loss,
                  metrics=metrics)
    
    # 训练参数
    batch_size = min(32, len(X_train))  # 确保batch_size不超过数据量
    epochs = 15
    
    print(f"开始训练，数据量：训练集={len(X_train)}，验证集={len(X_val)}")
    
    # 训练模型 - 添加回调函数监控训练过程
    callbacks = [
        tf.keras.callbacks.EarlyStopping(patience=3, monitor='val_loss', restore_best_weights=True),
        tf.keras.callbacks.TensorBoard(log_dir='./logs')
    ]
    
    if len(X_val) == 0:
        # 如果没有验证集，直接使用训练集分割一部分作为验证
        print("警告：验证集为空，使用20%的训练数据作为验证")
        history = model.fit(
            X_train, y_train,
            batch_size=batch_size,
            epochs=epochs,
            validation_split=0.2,
            callbacks=callbacks
        )
    else:
        history = model.fit(
            X_train, y_train,
            batch_size=batch_size,
            epochs=epochs,
            validation_data=(X_val, y_val),
            callbacks=callbacks
        )
    
    return model, history

# 4. 保存模型 - 使用推荐的格式
def save_model(model, model_path='garbage_classifier_model.keras'):
    if model:
        # 使用推荐的Keras格式保存模型
        model.save(model_path)
        print(f"模型已保存为 {model_path}")
    else:
        print("无法保存模型，模型对象为空")

# 5. 加载模型
def load_model(model_path='garbage_classifier_model.keras'):
    if os.path.exists(model_path):
        model = tf.keras.models.load_model(model_path)
        print(f"模型已从 {model_path} 加载")
        return model
    else:
        print(f"错误：模型文件 {model_path} 不存在")
        return None

# 6. 推理和评估函数
def predict_class(model, image_path):
    if model is None:
        print("错误：无法进行预测，模型未加载")
        return None, None
        
    if not os.path.exists(image_path):
        print(f"错误：图片路径 {image_path} 不存在")
        return None, None
    
    img = tf.keras.preprocessing.image.load_img(
        image_path, target_size=(128, 128))
    img_array = tf.keras.preprocessing.image.img_to_array(img)
    img_array = img_array / 255.0
    img_array = np.expand_dims(img_array, axis=0)
    
    prediction = model.predict(img_array)
    class_idx = np.argmax(prediction)
    classes = ['cardboard', 'metal', 'plastic']
    return classes[class_idx], prediction[0][class_idx]

def evaluate_model(model, X_test, y_test):
    if model is None:
        print("错误：无法评估模型，模型未加载")
        return 0, 0
        
    if len(X_test) == 0:
        print("警告：测试集为空，无法评估")
        return 0, 0
    
    test_loss, test_acc = model.evaluate(X_test, y_test, verbose=1)
    print(f'\n测试准确率: {test_acc:.4f}, 测试损失: {test_loss:.4f}')
    return test_acc, test_loss

def evaluate_directory(model, test_dir):
    """评估指定目录中的图片"""
    if not os.path.exists(test_dir):
        print(f"错误：测试目录 {test_dir} 不存在!")
        return 0, 0
    
    # 加载测试目录中的图片
    print(f"\n加载测试数据: {test_dir}")
    X_test, y_test = load_data(test_dir)
    
    if len(X_test) == 0:
        print("警告：测试目录中没有有效图片")
        return 0, 0
    
    # 预处理
    X_test = X_test / 255.0
    
    # 评估
    test_loss, test_acc = model.evaluate(X_test, y_test, verbose=1)
    print(f'\n测试集准确率: {test_acc:.4f}, 损失: {test_loss:.4f}')
    
    # 随机选择几张图片进行预测展示
    print("\n在测试目录中进行随机预测演示...")
    num_display = min(5, len(X_test))
    random_indices = np.random.choice(len(X_test), num_display, replace=False)
    
    plt.figure(figsize=(15, 3*num_display))
    for i, idx in enumerate(random_indices):
        # 获取图片路径
        file_name = os.listdir(test_dir)[idx]
        img_path = os.path.join(test_dir, file_name)
        
        # 预测
        predicted_class, confidence = predict_class(model, img_path)
        
        # 显示图像
        plt.subplot(num_display, 1, i+1)
        img = plt.imread(img_path)
        plt.imshow(img)
        plt.title(f"文件: {file_name}\n预测: {predicted_class} ({confidence:.2f})")
        plt.axis('off')
    
    plt.tight_layout()
    plt.savefig('test_predictions.png')
    plt.show()
    
    return test_acc, test_loss

def plot_training_history(history):
    if history is None:
        print("警告：没有训练历史可展示")
        return
    
    # 确保历史记录中有键存在
    history_keys = history.history.keys()
    
    plt.figure(figsize=(12, 4))
    
    # 损失图表
    plt.subplot(1, 2, 1)
    plt.plot(history.history['loss'], label='训练损失')
    if 'val_loss' in history_keys:
        plt.plot(history.history['val_loss'], label='验证损失')
    plt.title('训练和验证损失')
    plt.xlabel('迭代次数')
    plt.ylabel('损失')
    plt.legend()
    
    # 准确率图表
    plt.subplot(1, 2, 2)
    # 确定准确率键名
    if 'sparse_categorical_accuracy' in history_keys:
        acc_key = 'sparse_categorical_accuracy'
        val_acc_key = 'val_sparse_categorical_accuracy'
    elif 'accuracy' in history_keys:
        acc_key = 'accuracy'
        val_acc_key = 'val_accuracy'
    else:
        print("警告：训练历史中没有找到准确率记录")
        # 只绘制损失图
        plt.tight_layout()
        plt.savefig('training_history.png')
        plt.show()
        return
    
    plt.plot(history.history[acc_key], label='训练准确率')
    if val_acc_key in history_keys:
        plt.plot(history.history[val_acc_key], label='验证准确率')
    plt.title('训练和验证准确率')
    plt.xlabel('迭代次数')
    plt.ylabel('准确率')
    plt.legend()
    
    plt.tight_layout()
    plt.savefig('training_history.png')
    plt.show()

# 训练函数
def train():
    # 设置数据路径
    input_directory = "data"  # 原始数据目录
    output_directory = "final_shuffled_data"  # 新组织结构输出目录
    
    # 删除已存在的输出目录(如果存在)
    if os.path.exists(output_directory):
        print(f"删除已存在的输出目录: {output_directory}")
        shutil.rmtree(output_directory)
    
    # 1. 重新组织数据集 (70%训练, 15%验证, 15%测试)
    print("开始准备和分割数据集...")
    train_dir, val_dir, test_dir = shuffle_and_split(
        input_directory, output_directory, 
        train_ratio=0.7, val_ratio=0.15, test_ratio=0.15)
    
    print("\n新的目录结构:")
    print(f"""
{output_directory}/
    ├── train/          (所有训练图片)
    ├── validation/     (所有验证图片)
    └── test/           (所有测试图片)
    """)
    
    # 2. 加载数据
    print("\n加载训练数据...")
    X_train, y_train = load_data(train_dir)
    print("加载验证数据...")
    X_val, y_val = load_data(val_dir)
    print("加载测试数据...")
    X_test, y_test = load_data(test_dir)
    
    # 检查数据加载情况
    print(f"\n训练集: {len(X_train)} 图片")
    print(f"验证集: {len(X_val)} 图片")
    print(f"测试集: {len(X_test)} 图片")
    
    if len(X_train) == 0:
        print("错误：没有训练数据可用，程序终止")
        return
    
    # 数据预处理
    X_train = X_train / 255.0
    X_val = X_val / 255.0
    X_test = X_test / 255.0
    
    # 3. 训练模型
    input_shape = (128, 128, 3)
    num_classes = 3
    
    print("\n开始训练模型...")
    model, history = train_model(X_train, y_train, X_val, y_val, input_shape, num_classes)
    
    # 4. 保存模型
    save_model(model)
    
    # 5. 评估模型
    print("\n评估模型性能...")
    test_acc, test_loss = evaluate_model(model, X_test, y_test)
    
    # 可视化训练过程
    if history:
        plot_training_history(history)
    
    print("\n训练完成!")

# 测试函数
def test(model_path='garbage_classifier_model.keras', test_dir='final_shuffled_data/test'):
    """
    测试模型性能
    
    参数:
        model_path: 模型文件路径
        test_dir: 测试数据目录
    """
    # 1. 加载模型
    model = load_model(model_path)
    if model is None:
        return
    
    # 2. 评估测试目录
    print("\n评估测试目录...")
    test_acc, test_loss = evaluate_directory(model, test_dir)
    
    # 3. 示例推理
    print("\n运行示例推理...")
    # 使用测试集中的图片进行推理
    if os.path.exists(test_dir) and len(os.listdir(test_dir)) > 0:
        # 随机选择一个测试图片
        test_files = os.listdir(test_dir)
        random_file = random.choice(test_files)
        test_image_path = os.path.join(test_dir, random_file)
        
        if os.path.exists(test_image_path):
            predicted_class, confidence = predict_class(model, test_image_path)
            print(f"\n预测结果: {os.path.basename(test_image_path)}")
            print(f"预测类别: {predicted_class}, 置信度: {confidence:.4f}")
            
            # 显示图像
            img = plt.imread(test_image_path)
            plt.imshow(img)
            plt.title(f"预测: {predicted_class} ({confidence:.2f})")
            plt.axis('off')
            plt.savefig('example_prediction.png')
            plt.show()
        else:
            print(f"\n测试图片不存在: {test_image_path}")
    else:
        print("\n没有可用的测试图片进行预测")
    
    print("\n测试完成!")

if __name__ == "__main__":
    # 启用eager执行模式以便调试
    tf.config.run_functions_eagerly(True)
    
    # 用户选择模式
    print("请选择操作模式:")
    print("1. 训练模型")
    print("2. 测试模型")
    choice = input("请输入选择 (1/2): ")
    
    if choice == '1':
        train()
    elif choice == '2':
        # 设置测试参数
        model_path = 'garbage_classifier_model.keras'
        test_dir = 'final_shuffled_data/test'
        
        # 自定义测试目录（可选）
        custom_test_dir = input("输入自定义测试目录（留空使用默认测试目录）: ")
        if custom_test_dir and os.path.exists(custom_test_dir):
            test_dir = custom_test_dir
        
        test(model_path, test_dir)
    else:
        print("无效选择")