"""
集成学习猫狗识别训练脚本
使用多种集成学习算法（随机森林、梯度提升树、AdaBoost等）训练模型
并通过超参数优化找到最佳模型
"""

import cv2
import numpy as np
import os
import pickle
from imutils import paths
from tqdm import tqdm
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier, VotingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, classification_report
import time
import logging

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('train_ensemble.log'),
        logging.StreamHandler()
    ]
)

# ==================== 核心参数配置 ====================
TRAIN_FOLDER = r"e:\homework\imagge\data\train"  # 训练数据文件夹
FEATURE_METHOD = "flat"  # 特征提取方法：仅使用flat
USE_CPU_ONLY = True  # 仅使用CPU
TEST_SIZE = 0.25  # 测试集比例
RANDOM_STATE = 2023  # 随机种子
BATCH_SIZE = 64  # 批处理大小
MODEL_SAVE_PATH = "best_ensemble_model.pkl"  # 模型保存路径

# ==================== 数据加载函数 ====================
def load_dataset(train_folder, batch_size=64):
    """
    加载猫狗数据集并提取flat特征
    
    参数:
        train_folder: 训练数据文件夹路径
        batch_size: 批处理大小
    
    返回:
        X: 特征数组
        y: 标签数组
    """
    logging.info("=" * 60)
    logging.info("开始加载猫狗数据集")
    logging.info(f"数据路径: {train_folder}")
    logging.info(f"特征提取方法: {FEATURE_METHOD}")
    logging.info("=" * 60)
    
    # 获取所有图像路径
    image_paths_list = list(paths.list_images(train_folder))
    total_images = len(image_paths_list)
    logging.info(f"找到 {total_images} 张图像")
    
    X = []
    y = []
    
    # 计算批次数
    num_batches = total_images // batch_size + (1 if total_images % batch_size else 0)
    
    # 使用进度条批量处理图像
    for idx in tqdm(range(num_batches), desc="Loading cat/dog dataset"):
        batch_images = []
        batch_labels = []
        
        start = idx * batch_size
        end = min((idx + 1) * batch_size, total_images)
        
        for i in range(start, end):
            image_path = image_paths_list[i]
            
            # 读取灰度图像并调整大小为32x32
            img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
            if img is None:
                continue
            img = cv2.resize(img, (32, 32))
            batch_images.append(img)
            
            # 从文件名提取标签 (cat=0, dog=1)
            label_str = os.path.basename(image_path).split('.')[0]
            label = 1 if label_str.lower() == 'dog' else 0
            batch_labels.append(label)
        
        # 展平图像为1024维向量
        batch_images = np.array(batch_images)
        batch_pixels = batch_images.reshape((batch_images.shape[0], -1))
        
        X.extend(batch_pixels)
        y.extend(batch_labels)
    
    X = np.array(X, dtype='float32')
    y = np.array(y, dtype='int32')
    
    logging.info(f"数据加载完成: X.shape={X.shape}, y.shape={y.shape}")
    logging.info(f"猫的数量: {np.sum(y == 0)}, 狗的数量: {np.sum(y == 1)}")
    
    return X, y

# ==================== 集成模型训练函数 ====================
def train_ensemble_models(X_train, X_test, y_train, y_test):
    """
    训练多个集成学习模型并进行超参数优化
    
    参数:
        X_train, X_test: 训练和测试特征
        y_train, y_test: 训练和测试标签
    
    返回:
        best_model: 最佳模型
        best_model_name: 最佳模型名称
        best_accuracy: 最佳准确率
    """
    logging.info("\n" + "=" * 60)
    logging.info("开始训练集成学习模型")
    logging.info("=" * 60)
    
    models = {}
    results = {}
    
    # ==================== 1. 随机森林 ====================
    logging.info("\n[1/4] 训练随机森林模型...")
    rf_param_grid = {
        'n_estimators': [50, 100, 200],
        'max_depth': [10, 15, 20, None],
        'min_samples_split': [2, 5],
        'min_samples_leaf': [1, 2]
    }
    
    rf_base = RandomForestClassifier(random_state=RANDOM_STATE, n_jobs=-1)
    rf_grid = GridSearchCV(rf_base, rf_param_grid, cv=3, scoring='accuracy', n_jobs=-1, verbose=1)
    
    start_time = time.time()
    rf_grid.fit(X_train, y_train)
    train_time = time.time() - start_time
    
    rf_best = rf_grid.best_estimator_
    rf_train_acc = rf_best.score(X_train, y_train)
    rf_val_acc = rf_best.score(X_test, y_test)
    
    models['RandomForest'] = rf_best
    results['RandomForest'] = {
        'train_acc': rf_train_acc,
        'val_acc': rf_val_acc,
        'best_params': rf_grid.best_params_,
        'train_time': train_time
    }
    
    logging.info(f"RandomForest: train_acc={rf_train_acc:.4f}, val_acc={rf_val_acc:.4f}")
    logging.info(f"最佳参数: {rf_grid.best_params_}")
    logging.info(f"训练时间: {train_time:.2f}秒")
    
    # ==================== 2. 梯度提升树 ====================
    logging.info("\n[2/4] 训练梯度提升树模型...")
    gb_param_grid = {
        'n_estimators': [50, 100, 150],
        'learning_rate': [0.01, 0.05, 0.1],
        'max_depth': [3, 5, 7],
        'subsample': [0.8, 1.0]
    }
    
    gb_base = GradientBoostingClassifier(random_state=RANDOM_STATE)
    gb_grid = GridSearchCV(gb_base, gb_param_grid, cv=3, scoring='accuracy', n_jobs=-1, verbose=1)
    
    start_time = time.time()
    gb_grid.fit(X_train, y_train)
    train_time = time.time() - start_time
    
    gb_best = gb_grid.best_estimator_
    gb_train_acc = gb_best.score(X_train, y_train)
    gb_val_acc = gb_best.score(X_test, y_test)
    
    models['GradientBoosting'] = gb_best
    results['GradientBoosting'] = {
        'train_acc': gb_train_acc,
        'val_acc': gb_val_acc,
        'best_params': gb_grid.best_params_,
        'train_time': train_time
    }
    
    logging.info(f"GradientBoosting: train_acc={gb_train_acc:.4f}, val_acc={gb_val_acc:.4f}")
    logging.info(f"最佳参数: {gb_grid.best_params_}")
    logging.info(f"训练时间: {train_time:.2f}秒")
    
    # ==================== 3. AdaBoost ====================
    logging.info("\n[3/4] 训练AdaBoost模型...")
    ada_param_grid = {
        'n_estimators': [50, 100, 200],
        'learning_rate': [0.5, 1.0, 1.5]
    }
    
    ada_base = AdaBoostClassifier(random_state=RANDOM_STATE)
    ada_grid = GridSearchCV(ada_base, ada_param_grid, cv=3, scoring='accuracy', n_jobs=-1, verbose=1)
    
    start_time = time.time()
    ada_grid.fit(X_train, y_train)
    train_time = time.time() - start_time
    
    ada_best = ada_grid.best_estimator_
    ada_train_acc = ada_best.score(X_train, y_train)
    ada_val_acc = ada_best.score(X_test, y_test)
    
    models['AdaBoost'] = ada_best
    results['AdaBoost'] = {
        'train_acc': ada_train_acc,
        'val_acc': ada_val_acc,
        'best_params': ada_grid.best_params_,
        'train_time': train_time
    }
    
    logging.info(f"AdaBoost: train_acc={ada_train_acc:.4f}, val_acc={ada_val_acc:.4f}")
    logging.info(f"最佳参数: {ada_grid.best_params_}")
    logging.info(f"训练时间: {train_time:.2f}秒")
    
    # ==================== 4. 投票集成 ====================
    logging.info("\n[4/4] 训练投票集成模型...")
    voting_clf = VotingClassifier(
        estimators=[
            ('rf', rf_best),
            ('gb', gb_best),
            ('ada', ada_best)
        ],
        voting='soft',
        n_jobs=-1
    )
    
    start_time = time.time()
    voting_clf.fit(X_train, y_train)
    train_time = time.time() - start_time
    
    voting_train_acc = voting_clf.score(X_train, y_train)
    voting_val_acc = voting_clf.score(X_test, y_test)
    
    models['VotingEnsemble'] = voting_clf
    results['VotingEnsemble'] = {
        'train_acc': voting_train_acc,
        'val_acc': voting_val_acc,
        'best_params': 'Ensemble of RF+GB+Ada',
        'train_time': train_time
    }
    
    logging.info(f"VotingEnsemble: train_acc={voting_train_acc:.4f}, val_acc={voting_val_acc:.4f}")
    logging.info(f"训练时间: {train_time:.2f}秒")
    
    # ==================== 选择最佳模型 ====================
    logging.info("\n" + "=" * 60)
    logging.info("模型性能对比")
    logging.info("=" * 60)
    
    best_model_name = None
    best_accuracy = 0.0
    
    for model_name, result in results.items():
        logging.info(f"{model_name}:")
        logging.info(f"  训练准确率: {result['train_acc']:.4f}")
        logging.info(f"  验证准确率: {result['val_acc']:.4f}")
        logging.info(f"  训练时间: {result['train_time']:.2f}秒")
        logging.info(f"  最佳参数: {result['best_params']}")
        logging.info("")
        
        if result['val_acc'] > best_accuracy:
            best_accuracy = result['val_acc']
            best_model_name = model_name
    
    best_model = models[best_model_name]
    
    logging.info("=" * 60)
    logging.info(f"最佳模型: {best_model_name}")
    logging.info(f"最佳验证准确率: {best_accuracy:.4f} ({best_accuracy*100:.2f}%)")
    logging.info("=" * 60)
    
    return best_model, best_model_name, best_accuracy, results

# ==================== 主函数 ====================
def main():
    """主训练流程"""
    logging.info("\n" + "=" * 60)
    logging.info("集成学习猫狗识别 - 训练程序")
    logging.info("=" * 60)
    logging.info(f"特征提取方法: {FEATURE_METHOD}")
    logging.info(f"计算资源: CPU only")
    logging.info(f"测试集比例: {TEST_SIZE}")
    logging.info(f"随机种子: {RANDOM_STATE}")
    
    # 1. 加载数据集
    X, y = load_dataset(TRAIN_FOLDER, BATCH_SIZE)
    
    # 2. 数据归一化
    logging.info("\n归一化特征数据...")
    X = X / 255.0  # 归一化到[0, 1]
    
    # 3. 划分训练集和测试集
    logging.info(f"\n划分数据集 (训练集:{1-TEST_SIZE:.0%}, 测试集:{TEST_SIZE:.0%})...")
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=TEST_SIZE, random_state=RANDOM_STATE, stratify=y
    )
    logging.info(f"训练集大小: {X_train.shape[0]}")
    logging.info(f"测试集大小: {X_test.shape[0]}")
    
    # 4. 训练集成模型
    best_model, best_model_name, best_accuracy, results = train_ensemble_models(
        X_train, X_test, y_train, y_test
    )
    
    # 5. 详细评估最佳模型
    logging.info("\n" + "=" * 60)
    logging.info("最佳模型详细评估")
    logging.info("=" * 60)
    
    y_pred = best_model.predict(X_test)
    test_accuracy = accuracy_score(y_test, y_pred)
    
    logging.info(f"\n测试集准确率: {test_accuracy:.4f} ({test_accuracy*100:.2f}%)")
    logging.info("\n分类报告:")
    logging.info("\n" + classification_report(y_test, y_pred, target_names=['Cat', 'Dog']))
    
    # 6. 保存最佳模型
    logging.info("\n" + "=" * 60)
    logging.info("保存模型")
    logging.info("=" * 60)
    
    model_data = {
        'model': best_model,
        'model_name': best_model_name,
        'accuracy': best_accuracy,
        'test_accuracy': test_accuracy,
        'results': results,
        'feature_method': FEATURE_METHOD
    }
    
    with open(MODEL_SAVE_PATH, 'wb') as f:
        pickle.dump(model_data, f)
    
    logging.info(f"最佳模型已保存到: {MODEL_SAVE_PATH}")
    logging.info(f"模型名称: {best_model_name}")
    logging.info(f"模型准确率: {test_accuracy:.4f} ({test_accuracy*100:.2f}%)")
    
    logging.info("\n" + "=" * 60)
    logging.info("训练完成！")
    logging.info("=" * 60)

if __name__ == '__main__':
    main()
