"""
训练多个分类器并输出训练时间、预测时间和准确率的表格。
用法示例:
    python train_ensemble.py -f vgg

默认从 util.createXY 加载数据，会使用缓存文件 if present (X_vgg.pkl / y_vgg.pkl 或 X.pkl / y.pkl)
"""
import argparse
import time
import logging
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, AdaBoostClassifier, GradientBoostingClassifier, VotingClassifier, StackingClassifier
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


def build_classifiers(random_state=2023):
    # 基础弱分类器
    lr = LogisticRegression(max_iter=2000, random_state=random_state)
    rf = RandomForestClassifier(n_estimators=100, random_state=random_state)
    svm = SVC(kernel='rbf', probability=True, random_state=random_state)
    dt = DecisionTreeClassifier(random_state=random_state)
    knn3 = KNeighborsClassifier(n_neighbors=3)

    # 集成方法
    hard_voting = VotingClassifier(estimators=[('lr', lr), ('rf', rf), ('svc', svm)], voting='hard')
    soft_voting = VotingClassifier(estimators=[('lr', lr), ('rf', rf), ('svc', svm)], voting='soft')
    # scikit-learn changed BaggingClassifier API: 'base_estimator' was replaced by 'estimator'
    # Use whichever parameter is supported for compatibility across versions.
    import inspect
    bagging_kwargs = {}
    sig = inspect.signature(BaggingClassifier)
    if 'estimator' in sig.parameters:
        bagging_kwargs['estimator'] = dt
    else:
        bagging_kwargs['base_estimator'] = dt
    bagging = BaggingClassifier(n_estimators=10, random_state=random_state, **bagging_kwargs)

    pasting_kwargs = {k: v for k, v in bagging_kwargs.items()}
    pasting = BaggingClassifier(n_estimators=10, bootstrap=False, random_state=random_state, **pasting_kwargs)
    adaboost = AdaBoostClassifier(n_estimators=50, random_state=random_state)
    gradient_boosting = GradientBoostingClassifier(n_estimators=100, random_state=random_state)
    stacking = StackingClassifier(estimators=[('lr', lr), ('rf', rf), ('svc', svm)], final_estimator=LogisticRegression(), passthrough=False)

    classifiers = {
        'logistic_regression': lr,
        'random_forest': rf,
        'svm': svm,
        'hard_voting': hard_voting,
        'soft_voting': soft_voting,
        'bagging': bagging,
        'pasting': pasting,
        'adaboost': adaboost,
        'gradient_boosting': gradient_boosting,
        'stacking': stacking,
    }

    return classifiers


def timed_train_predict(clf, X_train, y_train, X_test, y_test):
    # 对部分分类器进行简单的预处理（如需要标准化）
    scaler = None
    need_scale = hasattr(clf, 'kernel') or isinstance(clf, LogisticRegression) or isinstance(clf, SVC)
    X_train_proc = X_train
    X_test_proc = X_test
    if need_scale:
        scaler = StandardScaler()
        X_train_proc = scaler.fit_transform(X_train)
        X_test_proc = scaler.transform(X_test)

    t0 = time.time()
    clf.fit(X_train_proc, y_train)
    train_time = time.time() - t0

    t1 = time.time()
    preds = clf.predict(X_test_proc)
    pred_time = time.time() - t1

    acc = float(accuracy_score(y_test, preds))
    return train_time, pred_time, acc


def format_table(results):
    # results: list of (name, train_time, pred_time, acc)
    name_w = max(len(r[0]) for r in results) + 2
    h = f"{'Classifier'.ljust(name_w)} {'Training Time (s)'.rjust(16)} {'Prediction Time (s)'.rjust(18)} {'Accuracy'.rjust(10)}"
    lines = [h, '-' * (len(h) + 2)]
    for name, tt, pt, acc in results:
        lines.append(f"{name.ljust(name_w)} {tt:16.5f} {pt:18.5f} {acc:10.6f}")
    return '\n'.join(lines)


def get_args():
    parser = argparse.ArgumentParser(description='训练并比较多个分类器')
    parser.add_argument('-f', '--feature', type=str, choices=['vgg', 'flat'], default='vgg', help='特征方法 (vgg 或 flat)')
    parser.add_argument('--train_folder', type=str, default='./data/train', help='训练图像文件夹')
    parser.add_argument('--test_size', type=float, default=0.25, help='测试集占比')
    parser.add_argument('--max_samples', type=int, default=0, help='可选：子采样样本数量(>0 表示使用该数量的样本)')
    return parser.parse_args()


def main():
    args = get_args()
    logging.info(f"使用特征方法: {args.feature}")
    # 诊断性解析 train_folder，并列出样例文件（帮助定位路径问题）
    import os
    train_folder = args.train_folder
    abs_train_folder = os.path.abspath(os.path.join(os.getcwd(), train_folder))
    logging.info(f"train_folder (raw): {train_folder}")
    logging.info(f"train_folder (resolved): {abs_train_folder}")
    logging.info(f"train_folder exists: {os.path.exists(abs_train_folder)}")
    if os.path.exists(abs_train_folder):
        sample_files = []
        for root, _, files in os.walk(abs_train_folder):
            for f in files:
                if f.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp')):
                    sample_files.append(os.path.join(root, f))
                    if len(sample_files) >= 20:
                        break
            if len(sample_files) >= 20:
                break
        logging.info(f"样例图片数 (最多 20): {len(sample_files)}")
        for p in sample_files[:20]:
            logging.info(f" sample: {p}")

    # 延迟导入 util.createXY，避免在模块导入时加载大型依赖（cv2 / tensorflow）
    # 使用改进版 util_autozip 来自动识别并解压 zip 数据集（如果传入或位于当前目录）
    from util_autozip import createXY # type: ignore
    # 将 --max_samples 传递给 createXY 以便在解压/特征提取阶段就限制图片数量（更快）
    logging.info(f"当前工作目录: {os.getcwd()}")
    logging.info("即将调用 createXY() 以生成特征（会尝试自动解压 zip 如果需要）")
    if args.max_samples and args.max_samples > 0:
        X, y = createXY(train_folder=args.train_folder, dest_folder='.', method=args.feature, max_images=args.max_samples)
    else:
        X, y = createXY(train_folder=args.train_folder, dest_folder='.', method=args.feature)
    X = np.array(X)
    y = np.array(y)

    # 可选子采样，用于快速本地调试
    if args.max_samples and args.max_samples > 0 and args.max_samples < X.shape[0]:
        logging.info(f"使用子集进行训练: {args.max_samples} 个样本（随机抽取）")
        rng = np.random.default_rng(2023)
        idx = rng.choice(X.shape[0], size=args.max_samples, replace=False)
        X = X[idx]
        y = y[idx]

    if X.size == 0 or y.size == 0:
        logging.error('X 或 y 为空，无法训练。请检查数据或缓存文件。')
        return

    X = X.astype('float32')
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=args.test_size, random_state=2023)

    classifiers = build_classifiers()

    results = []
    for name, clf in classifiers.items():
        logging.info(f"开始训练: {name}")
        try:
            tt, pt, acc = timed_train_predict(clf, X_train, y_train, X_test, y_test)
            results.append((name, tt, pt, acc))
            logging.info(f"完成 {name}: 训练 {tt:.2f}s, 预测 {pt:.2f}s, 准确率 {acc:.4f}")
        except Exception as e:
            logging.exception(f"训练 {name} 失败: {e}")

    # 打印对齐表格
    print('\n' + format_table(results))


if __name__ == '__main__':
    main()