# 新增文件: train_ensemble.py
import os
import time
import logging
import pickle
import numpy as np
import faiss
from util import createXY
from FaissKNeighbors import FaissKNeighbors
from sklearn.model_selection import train_test_split, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, AdaBoostClassifier, GradientBoostingClassifier, VotingClassifier, StackingClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.metrics import accuracy_score
import joblib
from tqdm import tqdm

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# 固定参数：只用 flat，只用 cpu
DATA_FOLDER = "../data/train"  # 用户可根据自己目录调整
DEST_FOLDER = "."
METHOD = 'flat'
RANDOM_STATE = 2023
SAVE_DIR = os.path.join('.', 'saved_models')
os.makedirs(SAVE_DIR, exist_ok=True)


def load_data():
    logging.info(f"开始构建或读取特征 X,y，method={METHOD}")
    X, y = createXY(train_folder=DATA_FOLDER, dest_folder=DEST_FOLDER, method=METHOD)
    X = np.array(X).astype('float32')
    # 对向量做 L2 归一化（Faiss 常用）
    try:
        faiss.normalize_L2(X)
    except Exception as e:
        logging.warning(f"faiss.normalize_L2 出错: {e}")
    y = np.array(y)
    logging.info(f"加载完成: X.shape={X.shape}, y.shape={y.shape}")
    return X, y


def evaluate_model(name, model, X_train, X_test, y_train, y_test):
    start = time.time()
    model.fit(X_train, y_train)
    train_time = time.time() - start

    start = time.time()
    y_pred = model.predict(X_test)
    pred_time = time.time() - start

    acc = accuracy_score(y_test, y_pred)
    logging.info(f"{name}: 训练 {train_time:.2f}s, 预测 {pred_time:.2f}s, 准确率 {acc:.4f}")
    return {'name': name, 'train_time': train_time, 'pred_time': pred_time, 'accuracy': acc, 'model': model}


def main():
    X, y = load_data()

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=RANDOM_STATE, stratify=y)

    results = []

    # 简单基学习器（带或不带缩放）
    # 对线性模型和 SVM 使用 StandardScaler
    lr = make_pipeline(StandardScaler(), LogisticRegression(max_iter=500, random_state=RANDOM_STATE))
    rf = RandomForestClassifier(n_estimators=100, random_state=RANDOM_STATE, n_jobs=-1)
    svc = make_pipeline(StandardScaler(), SVC(kernel='rbf', probability=True, random_state=RANDOM_STATE))

    # 集成方法
    # sklearn 1.2+ 使用 `estimator` 替代已弃用的 `base_estimator`
    bagging = BaggingClassifier(estimator=DecisionTreeClassifier(max_depth=10), n_estimators=20, random_state=RANDOM_STATE, n_jobs=-1)
    adaboost = AdaBoostClassifier(n_estimators=50, random_state=RANDOM_STATE)
    gboost = GradientBoostingClassifier(n_estimators=100, random_state=RANDOM_STATE)

    # Voting (hard 与 soft)
    # 为 soft voting 要求所有基学习器实现 predict_proba
    # 使用 lr, rf, svc（svc 设置 probability=True）作为基学习器
    voting_hard = VotingClassifier(estimators=[('lr', lr), ('rf', rf), ('svc', svc)], voting='hard')
    voting_soft = VotingClassifier(estimators=[('lr', lr), ('rf', rf), ('svc', svc)], voting='soft')

    # Stacking
    estimators = [('lr', lr), ('rf', rf), ('svc', svc)]
    stacking = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression(max_iter=500), n_jobs=-1)

    # Faiss KNN 比较（不属于 sklearn 管线）

    classifiers = {
        'logistic_regression': lr,
        'random_forest': rf,
        'svm': svc,
        'bagging': bagging,
        'adaboost': adaboost,
        'gradient_boosting': gboost,
        'hard_voting': voting_hard,
        'soft_voting': voting_soft,
        'stacking': stacking
    }

    # 先评估 sklearn 模型。对于部分模型可以做小规模超参搜索

    # 小规模超参数调优：RandomForest 与 GradientBoosting
    rf_param_dist = {
        'n_estimators': [50, 100, 150],
        'max_depth': [None, 10, 20],
    }
    gboost_param_dist = {
        'n_estimators': [50, 100],
        'learning_rate': [0.05, 0.1]
    }

    # 对 RandomForest 做小规模随机搜索（限制 n_iter 以控制时间）
    try:
        logging.info('开始对 RandomForest 进行小范围随机搜索...')
        rs_rf = RandomizedSearchCV(RandomForestClassifier(random_state=RANDOM_STATE, n_jobs=-1), rf_param_dist, n_iter=4, scoring='accuracy', cv=3, random_state=RANDOM_STATE, n_jobs=-1)
        rs_rf.fit(X_train, y_train)
        best_rf = rs_rf.best_estimator_
        logging.info(f"RandomForest 最佳参数: {rs_rf.best_params_}")
        classifiers['random_forest'] = best_rf
    except Exception as e:
        logging.warning(f"RandomizedSearchCV for RF 失败: {e}")

    try:
        logging.info('开始对 GradientBoosting 进行小范围随机搜索...')
        rs_gb = RandomizedSearchCV(GradientBoostingClassifier(random_state=RANDOM_STATE), gboost_param_dist, n_iter=3, scoring='accuracy', cv=3, random_state=RANDOM_STATE, n_jobs=-1)
        rs_gb.fit(X_train, y_train)
        best_gb = rs_gb.best_estimator_
        logging.info(f"GradientBoosting 最佳参数: {rs_gb.best_params_}")
        classifiers['gradient_boosting'] = best_gb
    except Exception as e:
        logging.warning(f"RandomizedSearchCV for GB 失败: {e}")

    # 依次训练与评估 sklearn 模型
    for name, clf in classifiers.items():
        try:
            res = evaluate_model(name, clf, X_train, X_test, y_train, y_test)
            results.append(res)
        except Exception as e:
            logging.warning(f"训练或评估 {name} 时出错: {e}")

    # Faiss KNN 单独评估 k 从1到5
    try:
        best_k = 1
        best_acc = 0.0
        for k in range(1, 6):
            fk = FaissKNeighbors(k=k, res=None)
            start = time.time()
            fk.fit(X_train, y_train)
            train_t = time.time() - start

            start = time.time()
            y_pred = fk.predict(X_test)
            pred_t = time.time() - start

            acc = accuracy_score(y_test, y_pred)
            logging.info(f"FaissKNeighbors k={k}: 训练 {train_t:.2f}s, 预测 {pred_t:.2f}s, 准确率 {acc:.4f}")
            results.append({'name': f'faiss_knn_k={k}', 'train_time': train_t, 'pred_time': pred_t, 'accuracy': acc, 'model': fk})
            if acc > best_acc:
                best_acc = acc
                best_k = k
        logging.info(f"FaissKNeighbors 最佳 k={best_k}, 准确率={best_acc:.4f}")
    except Exception as e:
        logging.warning(f"FaissKNeighbors 评估失败: {e}")

    # 根据准确率选择最佳模型并保存
    results_sorted = sorted(results, key=lambda x: x['accuracy'], reverse=True)

    # 打印表格样式输出（简洁）
    logging.info('\nClassifier\tTraining Time (s)\tPrediction Time (s)\tAccuracy')
    for r in results_sorted:
        logging.info(f"{r['name']}\t{r['train_time']:.2f}\t{r['pred_time']:.2f}\t{r['accuracy']:.4f}")

    best = results_sorted[0]
    best_model = best['model']
    best_name = best['name']
    best_acc = best['accuracy']

    save_path = os.path.join(SAVE_DIR, 'best_ensemble.pkl')
    try:
        joblib.dump(best_model, save_path)
        logging.info(f"已保存最佳模型 {best_name} (准确率={best_acc:.4f}) 到 {save_path}")
        # 同时保存一个描述文件
        with open(os.path.join(SAVE_DIR, 'best_info.txt'), 'w') as f:
            f.write(f"name: {best_name}\naccuracy: {best_acc:.6f}\n")
    except Exception as e:
        logging.warning(f"保存模型失败: {e}")

    logging.info('训练完成。请在本机运行脚本以完成 2+ 小时的训练任务并截图输出。')


if __name__ == '__main__':
    main()
