"""
集成学习训练脚本（CPU + flat 特征），终端输出格式贴近题目截图。

包含：
- 仅使用 flat（32x32 灰度展平）特征，不使用 VGG、不用 GPU、无需命令行参数
- 训练基学习器：LogisticRegression、RandomForest、SVC、GradientBoosting
- 训练集成：hard_voting、soft_voting、bagging、pasting、adaboost、stacking
- 评估训练/预测耗时与准确率，并以固定顺序打印表格：
    logistic_regression、random_forest、svm、hard_voting、soft_voting、bagging、pasting、adaboost、gradient_boosting、stacking

说明
- 依赖 util.createXY 生成/复用 X.pkl、y.pkl；首次会较慢，之后复用缓存
- 仅使用 CPU；如需改路径，请修改 TRAIN_FOLDER 常量
"""

import os
import time
import json
import logging
from typing import Dict, List, Tuple

import numpy as np
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (
    RandomForestClassifier,
    GradientBoostingClassifier,
    VotingClassifier,
    StackingClassifier,
    BaggingClassifier,
    AdaBoostClassifier,
)
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from joblib import dump

from util import createXY


# 日志配置
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")


# 常量设置
TRAIN_FOLDER = "../data/train"  # 默认训练数据路径（图片目录或zip）
DEST_FOLDER = "."              # X.pkl/y.pkl 缓存目录
MODEL_DIR = "models"           # 最佳模型保存目录
MODEL_PATH = os.path.join(MODEL_DIR, "best_model.pkl")
META_PATH = os.path.join(MODEL_DIR, "metadata.json")
RANDOM_STATE = 2023


def _maybe_import_xgboost():
    """保留以便需要时扩展；当前结果表不包含 xgboost。"""
    try:
        from xgboost import XGBClassifier  # type: ignore
        return XGBClassifier
    except Exception:
        logging.warning("未检测到 xgboost（可选）。当前脚本结果表不包含 xgboost 项。")
        return None


def _resolve_data_path() -> str:
    """在若干常见候选路径中自动探测数据位置（目录或zip）。"""
    here = os.path.dirname(os.path.abspath(__file__))
    candidates = [
        TRAIN_FOLDER,
        os.path.join(here, "../data/train"),
        os.path.join(here, "../data/train.zip"),
        os.path.join(here, "../../data/train"),
        os.path.join(here, "../../data/train.zip"),
        os.path.join(here, "cat_dog_data.zip"),
    ]
    for p in candidates:
        p_norm = os.path.normpath(p)
        if os.path.exists(p_norm):
            return p_norm
    return os.path.normpath(TRAIN_FOLDER)


def load_data() -> Tuple[np.ndarray, np.ndarray]:
    train_path = _resolve_data_path()
    logging.info(f"使用训练数据路径: {train_path}")
    X, y = createXY(train_folder=train_path, dest_folder=DEST_FOLDER, method="flat")
    X = np.asarray(X, dtype=np.float32)
    y = np.asarray(y, dtype=np.int64)
    if X.size == 0 or y.size == 0:
        raise ValueError(
            "未读取到任何样本。请确认数据放在 ../data/train 目录或 ../data/train.zip，文件名以 cat./dog. 开头。"
        )
    logging.info(f"X.shape: {X.shape}  |  y.shape: {y.shape}")
    logging.info(f"X 内存占用约: {X.nbytes / (1024*1024):.2f} MB")
    return X, y


def build_estimators() -> Dict[str, Pipeline]:
    """构建无需网格搜索的基学习器流水线，便于输出与截图一致的名称和时长。"""
    ests: Dict[str, Pipeline] = {}

    ests["logistic_regression"] = Pipeline([
        ("scaler", StandardScaler()),
        ("clf", LogisticRegression(max_iter=1000, random_state=RANDOM_STATE)),
    ])

    ests["random_forest"] = Pipeline([
        ("scaler", StandardScaler()),
        ("clf", RandomForestClassifier(n_estimators=400, max_depth=None, random_state=RANDOM_STATE, n_jobs=-1)),
    ])

    ests["svm"] = Pipeline([
        ("scaler", StandardScaler()),
        ("clf", SVC(kernel="rbf", C=1.0, gamma="scale", probability=True, random_state=RANDOM_STATE)),
    ])

    ests["gradient_boosting"] = Pipeline([
        ("scaler", StandardScaler()),
        ("clf", GradientBoostingClassifier(n_estimators=300, learning_rate=0.1, max_depth=3, random_state=RANDOM_STATE)),
    ])

    # bagging/pasting 使用决策树作为基学习器；pasting 为 bootstrap=False
    base_tree = DecisionTreeClassifier(random_state=RANDOM_STATE)
    ests["bagging"] = Pipeline([
        ("scaler", StandardScaler()),
        ("clf", BaggingClassifier(estimator=base_tree, n_estimators=100, n_jobs=-1, random_state=RANDOM_STATE, bootstrap=True)),
    ])
    ests["pasting"] = Pipeline([
        ("scaler", StandardScaler()),
        ("clf", BaggingClassifier(estimator=base_tree, n_estimators=100, n_jobs=-1, random_state=RANDOM_STATE, bootstrap=False)),
    ])

    ests["adaboost"] = Pipeline([
        ("scaler", StandardScaler()),
        ("clf", AdaBoostClassifier(n_estimators=300, learning_rate=0.5, random_state=RANDOM_STATE)),
    ])

    return ests


def fit_and_log(name: str, pipe: Pipeline, X_train, y_train):
    start = time.time()
    pipe.fit(X_train, y_train)
    train_time = time.time() - start
    logging.info(f"{name}训练完成，用时{train_time:.4f}秒")
    return pipe, train_time


def evaluate_model(model, X_test, y_test):
    start = time.time()
    y_pred = model.predict(X_test)
    pred_time = time.time() - start
    acc = accuracy_score(y_test, y_pred)
    return acc, pred_time


def main():
    os.makedirs(MODEL_DIR, exist_ok=True)

    # 1) 加载数据（flat + CPU）
    X, y = load_data()
    logging.info("数据加载和预处理完成。")
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.25, random_state=RANDOM_STATE, stratify=y
    )
    logging.info("数据集划分为训练集和测试集。")

    # 2) 训练基学习器
    logging.info("准备划分为训练集和测试集，开始训练与评估。")
    ests = build_estimators()
    results = []
    fitted: Dict[str, Pipeline] = {}

    train_order = [
        "logistic_regression",
        "random_forest",
        "svm",
        "bagging",
        "pasting",
        "adaboost",
        "gradient_boosting",
    ]

    for name in train_order:
        model, t = fit_and_log(name, ests[name], X_train, y_train)
        acc, ptime = evaluate_model(model, X_test, y_test)
        logging.info(f"{name}评估完成，用时{ptime:.4f}秒")
        results.append({"name": name, "type": "base", "accuracy": float(acc), "train_time": float(t), "predict_time": float(ptime)})
        fitted[name] = model

    # 3) 集成模型（基于最佳基学习器）
    # 3) 集成模型
    estimators_for_voting = [
        ("lr", fitted["logistic_regression"]),
        ("rf", fitted["random_forest"]),
        ("svm", fitted["svm"]),
        ("gb", fitted["gradient_boosting"]),
    ]

    # hard voting
    start = time.time()
    voting_hard = VotingClassifier(estimators=estimators_for_voting, voting="hard", n_jobs=-1)
    voting_hard.fit(X_train, y_train)
    train_time = time.time() - start
    acc, pred_time = evaluate_model(voting_hard, X_test, y_test)
    results.append({"name": "hard_voting", "type": "ensemble", "accuracy": float(acc), "train_time": float(train_time), "predict_time": float(pred_time)})
    logging.info(f"hard_voting训练完成，用时{train_time:.4f}秒")
    logging.info(f"hard_voting评估完成，用时{pred_time:.4f}秒")

    # soft voting（所有基学习器具备 predict_proba 才可用；我们已确保）
    start = time.time()
    voting_soft = VotingClassifier(estimators=estimators_for_voting, voting="soft", n_jobs=-1)
    voting_soft.fit(X_train, y_train)
    train_time = time.time() - start
    acc, pred_time = evaluate_model(voting_soft, X_test, y_test)
    results.append({"name": "soft_voting", "type": "ensemble", "accuracy": float(acc), "train_time": float(train_time), "predict_time": float(pred_time)})
    logging.info(f"soft_voting训练完成，用时{train_time:.4f}秒")
    logging.info(f"soft_voting评估完成，用时{pred_time:.4f}秒")

    # stacking
    meta = LogisticRegression(max_iter=1000, random_state=RANDOM_STATE)
    start = time.time()
    stacking = StackingClassifier(estimators=estimators_for_voting, final_estimator=meta, n_jobs=-1, passthrough=False)
    stacking.fit(X_train, y_train)
    train_time = time.time() - start
    acc, pred_time = evaluate_model(stacking, X_test, y_test)
    results.append({"name": "stacking", "type": "ensemble", "accuracy": float(acc), "train_time": float(train_time), "predict_time": float(pred_time)})
    logging.info(f"stacking评估完成，用时{pred_time:.4f}秒")

    # 4) 选择并保存最佳模型
    # 固定顺序打印表格以贴合截图 
    order_for_table = [
        "logistic_regression",
        "random_forest",
        "svm",
        "hard_voting",
        "soft_voting",
        "bagging",
        "pasting",
        "adaboost",
        "gradient_boosting",
        "stacking",
    ]

    name_to_res = {r["name"]: r for r in results}

    # 选择最佳模型
    results_sorted = sorted(results, key=lambda r: r["accuracy"], reverse=True)
    best_name = results_sorted[0]["name"]

    # 表格输出
    print("\nClassifier                 Training Time (s)    Prediction Time (s)    Accuracy")
    print("-----------------------    ----------------    -------------------    --------")
    for n in order_for_table:
        if n not in name_to_res:
            continue
        r = name_to_res[n]
        print(f"{n:<24}    {r['train_time']:.4f}{'':<8}    {r['predict_time']:.6f}{'':<5}    {r['accuracy']:.4f}")

    logging.info(f"最终选择最佳模型：{best_name}")

    # 找到实例并保存
    chosen = None
    # 从已拟合的对象中取回
    if best_name in fitted:
        chosen = fitted[best_name]
    elif best_name == "hard_voting":
        chosen = voting_hard
    elif best_name == "soft_voting":
        chosen = voting_soft
    elif best_name == "stacking":
        chosen = stacking

    dump(chosen, MODEL_PATH)
    with open(META_PATH, "w", encoding="utf-8") as f:
        json.dump({
            "feature": "flat",
            "train_folder": TRAIN_FOLDER,
            "random_state": RANDOM_STATE,
            "best_model": best_name,
            "metrics_sorted": results_sorted,
        }, f, ensure_ascii=False, indent=2)

    logging.info(f"最佳模型已保存：{MODEL_PATH}")
    logging.info(f"元数据已保存：{META_PATH}")


if __name__ == "__main__":
    main()
