# 导入必要的库
import os
import warnings
# 设置环境变量以避免joblib在Windows上的CPU核心数检测问题
os.environ['LOKY_MAX_CPU_COUNT'] = '4'  # 可以根据你的CPU核心数调整
# 设置TensorFlow环境变量以减少信息输出
os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'  # 只显示错误信息
# 抑制一些不重要的警告
warnings.filterwarnings('ignore', category=UserWarning)
warnings.filterwarnings('ignore', category=FutureWarning)

import numpy as np  # 用于处理多维数组和矩阵运算
import faiss  # 用于高效相似性搜索和稠密向量聚类
from util import createXY  # 用于创建数据集的特征和标签
from sklearn.model_selection import train_test_split  # 用于拆分数据集为训练集和测试集
from sklearn.neighbors import KNeighborsClassifier  # sklearn中的K近邻分类器
from sklearn.ensemble import VotingClassifier, BaggingClassifier, RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier  # 集成学习方法
from sklearn.linear_model import LogisticRegression  # 逻辑回归
from sklearn.svm import SVC  # 支持向量机
from sklearn.tree import DecisionTreeClassifier  # 决策树
from sklearn.naive_bayes import GaussianNB  # 朴素贝叶叶
from sklearn.metrics import accuracy_score  # 评估指标
import logging  # 用于记录日志
from tqdm import tqdm  # 用于在循环中显示进度条
from FaissKNeighbors import FaissKNeighbors  # 导入自定义的FaissKNeighbors类
import pickle  # 用于模型保存
import time  # 用于时间计算
from collections import Counter  # 用于统计
from joblib import dump  # 确保导入 joblib

# 配置logging, 确保能够打印正在运行的函数名
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# 配置参数（不再使用命令行参数）
def get_config():
    """
    获取配置参数，使用固定值
    """
    config = {
        'mode': 'cpu',  # 固定使用CPU模式
        'feature': 'flat',  # 固定使用flat特征提取
        'ensemble': 'all',  # 使用所有集成学习方法
        'train_dir': 'data/train',  # 训练集目录
        'dest_dir': '.',  # 输出目录
        'save_models': True,  # 保存模型
        'plot_results': False  # 不绘制结果图表
    }
    return config

# 创建基础分类器
def create_base_classifiers(mode='cpu', res=None):
    """
    创建用于集成学习的基础分类器
    
    参数:
    mode: 运行模式 ('cpu' 或 'gpu')
    res: FAISS GPU资源对象
    
    返回:
    classifiers: 分类器字典
    """
    classifiers = {}
    
    # KNN分类器 (使用FAISS)
    classifiers['knn_faiss_3'] = FaissKNeighbors(k=3, res=res)
    classifiers['knn_faiss_5'] = FaissKNeighbors(k=5, res=res)
    
    # KNN分类器 (使用sklearn)
    classifiers['knn_sklearn_3'] = KNeighborsClassifier(n_neighbors=3)
    classifiers['knn_sklearn_5'] = KNeighborsClassifier(n_neighbors=5)
    
    # 其他分类器
    classifiers['logistic'] = LogisticRegression(random_state=2023, max_iter=1000)
    classifiers['svm'] = SVC(random_state=2023, probability=True, cache_size=1000, kernel='linear')
    classifiers['decision_tree'] = DecisionTreeClassifier(random_state=2023)
    classifiers['naive_bayes'] = GaussianNB()
    classifiers['random_forest'] = RandomForestClassifier(n_estimators=100, random_state=2023)
    
    logging.info(f"创建了 {len(classifiers)} 个基础分类器")
    return classifiers

# 投票集成方法
def voting_ensemble(X_train, y_train, X_test, y_test, mode='cpu', res=None):
    """
    实现投票集成方法
    """
    logging.info("开始训练投票集成模型...")
    
    # 创建基础分类器（只使用支持predict_proba的分类器）
    base_classifiers = [
        ('knn_sklearn_3', KNeighborsClassifier(n_neighbors=3)),
        ('knn_sklearn_5', KNeighborsClassifier(n_neighbors=5)),
        ('logistic', LogisticRegression(random_state=2023, max_iter=1000)),
        ('svm', SVC(random_state=2023, probability=True, cache_size=1000, kernel='linear')),
        ('naive_bayes', GaussianNB()),
        ('random_forest', RandomForestClassifier(n_estimators=100, random_state=2023))
    ]
    
    # 硬投票
    hard_voting = VotingClassifier(estimators=base_classifiers, voting='hard')
    start_time = time.time()
    hard_voting.fit(X_train, y_train)
    train_time = time.time() - start_time
    
    hard_predictions = hard_voting.predict(X_test)
    hard_accuracy = accuracy_score(y_test, hard_predictions)
    
    # 软投票
    soft_voting = VotingClassifier(estimators=base_classifiers, voting='soft')
    start_time = time.time()
    soft_voting.fit(X_train, y_train)
    soft_train_time = time.time() - start_time
    
    soft_predictions = soft_voting.predict(X_test)
    soft_accuracy = accuracy_score(y_test, soft_predictions)
    
    logging.info(f"硬投票准确率: {hard_accuracy:.4f}, 训练时间: {train_time:.2f}秒")
    logging.info(f"软投票准确率: {soft_accuracy:.4f}, 训练时间: {soft_train_time:.2f}秒")
    
    return {
        'hard_voting': {'model': hard_voting, 'accuracy': hard_accuracy, 'time': train_time},
        'soft_voting': {'model': soft_voting, 'accuracy': soft_accuracy, 'time': soft_train_time}
    }

# Bagging集成方法
def bagging_ensemble(X_train, y_train, X_test, y_test, mode='cpu', res=None):
    """
    实现Bagging集成方法
    """
    logging.info("开始训练Bagging集成模型...")
    
    results = {}
    
    # 基于KNN的Bagging
    knn_bagging = BaggingClassifier(
        estimator=KNeighborsClassifier(n_neighbors=3),
        n_estimators=10,
        random_state=2023
    )
    
    start_time = time.time()
    knn_bagging.fit(X_train, y_train)
    train_time = time.time() - start_time
    
    predictions = knn_bagging.predict(X_test)
    accuracy = accuracy_score(y_test, predictions)
    
    results['knn_bagging'] = {'model': knn_bagging, 'accuracy': accuracy, 'time': train_time}
    logging.info(f"KNN Bagging准确率: {accuracy:.4f}, 训练时间: {train_time:.2f}秒")
    
    # 基于决策树的Bagging
    tree_bagging = BaggingClassifier(
        estimator=DecisionTreeClassifier(random_state=2023),
        n_estimators=10,
        random_state=2023
    )
    
    start_time = time.time()
    tree_bagging.fit(X_train, y_train)
    train_time = time.time() - start_time
    
    predictions = tree_bagging.predict(X_test)
    accuracy = accuracy_score(y_test, predictions)
    
    results['tree_bagging'] = {'model': tree_bagging, 'accuracy': accuracy, 'time': train_time}
    logging.info(f"决策树 Bagging准确率: {accuracy:.4f}, 训练时间: {train_time:.2f}秒")
    
    return results

# Pasting集成方法
def pasting_ensemble(X_train, y_train, X_test, y_test):
    """
    实现Pasting集成方法
    """
    logging.info("开始训练Pasting集成模型...")
    results = {}

    # 基于决策树的Pasting
    pasting = BaggingClassifier(
        estimator=DecisionTreeClassifier(random_state=2023),
        n_estimators=10,
        bootstrap=False,  # 不使用放回抽样
        random_state=2023
    )
    start_time = time.time()
    pasting.fit(X_train, y_train)
    train_time = time.time() - start_time

    predictions = pasting.predict(X_test)
    accuracy = accuracy_score(y_test, predictions)

    results['pasting'] = {'model': pasting, 'accuracy': accuracy, 'time': train_time}
    logging.info(f"Pasting准确率: {accuracy:.4f}, 训练时间: {train_time:.2f}秒")
    return results

# AdaBoost集成方法
def adaboost_ensemble(X_train, y_train, X_test, y_test):
    """
    实现AdaBoost集成方法
    """
    logging.info("开始训练AdaBoost集成模型...")
    results = {}

    # 基于决策树的AdaBoost
    adaboost = AdaBoostClassifier(
        estimator=DecisionTreeClassifier(random_state=2023),  # 修改为 estimator
        n_estimators=50,
        algorithm='SAMME',  # 显式指定使用 SAMME 算法
        random_state=2023
    )
    start_time = time.time()
    adaboost.fit(X_train, y_train)
    train_time = time.time() - start_time

    predictions = adaboost.predict(X_test)
    accuracy = accuracy_score(y_test, predictions)

    results['adaboost'] = {'model': adaboost, 'accuracy': accuracy, 'time': train_time}
    logging.info(f"AdaBoost准确率: {accuracy:.4f}, 训练时间: {train_time:.2f}秒")
    return results

# Gradient Boosting集成方法
def gradient_boosting_ensemble(X_train, y_train, X_test, y_test):
    """
    实现Gradient Boosting集成方法
    """
    logging.info("开始训练Gradient Boosting集成模型...")
    results = {}

    # Gradient Boosting
    gradient_boosting = GradientBoostingClassifier(
        n_estimators=100,
        random_state=2023
    )
    start_time = time.time()
    gradient_boosting.fit(X_train, y_train)
    train_time = time.time() - start_time

    predictions = gradient_boosting.predict(X_test)
    accuracy = accuracy_score(y_test, predictions)

    results['gradient_boosting'] = {'model': gradient_boosting, 'accuracy': accuracy, 'time': train_time}
    logging.info(f"Gradient Boosting准确率: {accuracy:.4f}, 训练时间: {train_time:.2f}秒")
    return results

# Stacking集成方法
def stacking_ensemble(X_train, y_train, X_test, y_test, mode='cpu', res=None):
    """
    实现Stacking集成方法
    """
    logging.info("开始训练Stacking集成模型...")
    
    # 第一层分类器
    base_classifiers = [
        KNeighborsClassifier(n_neighbors=3),
        KNeighborsClassifier(n_neighbors=5),
        DecisionTreeClassifier(random_state=2023),
        GaussianNB(),
        SVC(random_state=2023, probability=True, cache_size=1000, kernel='linear')
    ]
    
    # 第二层分类器（元学习器）
    meta_classifier = LogisticRegression(random_state=2023, max_iter=1000)
    
    start_time = time.time()
    
    # 训练第一层分类器并生成元特征
    meta_features_train = np.zeros((X_train.shape[0], len(base_classifiers)))
    meta_features_test = np.zeros((X_test.shape[0], len(base_classifiers)))
    
    # 使用交叉验证生成训练集的元特征
    from sklearn.model_selection import cross_val_predict
    
    for i, clf in enumerate(base_classifiers):
        logging.info(f"训练第一层分类器 {i+1}/{len(base_classifiers)}")
        
        # 为训练集生成预测
        meta_features_train[:, i] = cross_val_predict(clf, X_train, y_train, cv=3, method='predict_proba')[:, 1]
        
        # 训练分类器并为测试集生成预测
        clf.fit(X_train, y_train)
        meta_features_test[:, i] = clf.predict_proba(X_test)[:, 1]
    
    # 训练元学习器
    meta_classifier.fit(meta_features_train, y_train)
    
    # 进行最终预测
    final_predictions = meta_classifier.predict(meta_features_test)
    accuracy = accuracy_score(y_test, final_predictions)
    train_time = time.time() - start_time
    
    logging.info(f"Stacking准确率: {accuracy:.4f}, 训练时间: {train_time:.2f}秒")
    
    return {
        'stacking': {
            'model': {'level1': base_classifiers, 'meta': meta_classifier},
            'accuracy': accuracy,
            'time': train_time
        }
    }

# 评估所有分类器
def evaluate_individual_classifiers(X_train, y_train, X_test, y_test, mode='cpu', res=None):
    """
    评估单个分类器的性能
    """
    logging.info("评估单个分类器性能...")
    
    classifiers = create_base_classifiers(mode, res)
    results = {}
    
    for name, clf in tqdm(classifiers.items(), desc="评估分类器"):
        start_time = time.time()
        clf.fit(X_train, y_train)
        train_time = time.time() - start_time
        
        predictions = clf.predict(X_test)
        accuracy = accuracy_score(y_test, predictions)
        
        results[name] = {'model': clf, 'accuracy': accuracy, 'time': train_time}
        logging.info(f"{name}准确率: {accuracy:.4f}, 训练时间: {train_time:.2f}秒")
    
    return results

# 保存模型
def save_models(results, dest_dir='.'):
    """
    保存训练好的模型
    """
    logging.info(f"保存模型到目录: {dest_dir}")
    
    for category, models in results.items():
        if isinstance(models, dict):
            for model_name, metrics in models.items():
                if 'model' in metrics:
                    model_path = f"{dest_dir}/{category}_{model_name}_model.pkl"
                    with open(model_path, 'wb') as f:
                        pickle.dump(metrics['model'], f)
                    logging.info(f"已保存模型: {model_path}")

# 保存最佳模型
def save_best_model(model, file_path="best_ensemble_model.pkl"):
    """
    保存最佳模型
    
    参数:
    model: 要保存的模型
    file_path: 文件路径
    """
    dump(model, "best_ensemble_model.pkl")  # 使用 joblib 保存模型
    logging.info("最佳模型已保存为 best_ensemble_model.pkl")

# 显示结果表格
def display_results_table(all_results):
    print("\n" + "="*80)
    print("集成学习模型性能对比结果")
    print("="*80)
    print(f"{'分类器':<20} {'训练时间(秒)':<15} {'预测准确率':<15} {'准确率':<10}")
    print("-"*80)
    
    # 收集所有结果
    all_models = []
    for category, models in all_results.items():
        if isinstance(models, dict):
            for model_name, metrics in models.items():
                if 'accuracy' in metrics:
                    all_models.append({
                        'name': model_name,
                        'category': category,
                        'accuracy': metrics['accuracy'],
                        'time': metrics['time']
                    })
    
    # 按准确率排序
    all_models.sort(key=lambda x: x['accuracy'], reverse=True)
    
    # 显示结果
    for model in all_models:
        print(f"{model['name']:<20} {model['time']:<15.2f} {model['accuracy']:<15.6f} {model['accuracy']:<10.4f}")
    
    print("-"*80)
    
    # 显示最佳模型
    best_model = all_models[0]
    print(f"最佳模型: {best_model['name']}")
    print(f"最高准确率: {best_model['accuracy']:.6f}")
    print(f"训练时间: {best_model['time']:.2f}秒")
    print("="*80)

# 主函数
def main():
    config = get_config()
    
    logging.info(f"使用模式: {config['mode'].upper()}")
    logging.info(f"特征提取方法: {config['feature'].upper()}")
    logging.info(f"训练集目录: {config['train_dir']}")
    logging.info(f"缓存输出目录: {config['dest_dir']}")

    # 载入和预处理数据
    X, y = createXY(train_folder=config['train_dir'], dest_folder=config['dest_dir'], method=config['feature'])
    X = np.array(X).astype('float32')
    faiss.normalize_L2(X)
    y = np.array(y)
    logging.info("数据加载和预处理完成。")
    logging.info(f"数据集大小: {X.shape}, 标签分布: {Counter(y)}")

    # 数据集分割
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=2023)
    logging.info("数据集划分为训练集和测试集。")
    logging.info(f"训练集大小: {X_train.shape}, 测试集大小: {X_test.shape}")

    # 存储所有结果
    all_results = {}
    
    # 评估单个分类器
    individual_results = evaluate_individual_classifiers(X_train, y_train, X_test, y_test)
    all_results['individual'] = individual_results
    
    # 训练集成学习模型
    voting_results = voting_ensemble(X_train, y_train, X_test, y_test)
    all_results['voting'] = voting_results
    
    bagging_results = bagging_ensemble(X_train, y_train, X_test, y_test)
    all_results['bagging'] = bagging_results
    
    stacking_results = stacking_ensemble(X_train, y_train, X_test, y_test)
    all_results['stacking'] = stacking_results
    
    # 新增 Pasting、AdaBoost 和 Gradient Boosting
    pasting_results = pasting_ensemble(X_train, y_train, X_test, y_test)
    all_results['pasting'] = pasting_results

    adaboost_results = adaboost_ensemble(X_train, y_train, X_test, y_test)
    all_results['adaboost'] = adaboost_results

    gradient_boosting_results = gradient_boosting_ensemble(X_train, y_train, X_test, y_test)
    all_results['gradient_boosting'] = gradient_boosting_results

    # 显示结果表格
    display_results_table(all_results)
    
    # 保存模型
    if config['save_models']:
        save_models(all_results, config['dest_dir'])
        logging.info("所有模型已成功保存！")
    
    # 保存最佳模型
    best_model = max(all_results['stacking'], key=lambda x: x['accuracy'])  # 假设最佳模型在stacking结果中
    save_best_model(best_model['model'])
    
    return all_results

if __name__ == '__main__':
    main()
