"""项目五：使用 LazyPredict 对猫狗分类进行批量模型评估并保存最佳模型。

替换原先的命令行 KNN 搜索模式，不再使用 GPU/VGG 参数，统一 flat+CPU。
"""

import os
import time
import warnings
import logging
import pkgutil
import types
from pathlib import Path

import numpy as np
import joblib
from sklearn.model_selection import train_test_split

from util import createXY

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
warnings.filterwarnings("ignore")

# ---- Torch stub 处理 ----
# Windows 上 lazypredict 导入 sklearn 时会尝试触发 array_api_compat 对 torch 的探测。
# 若环境有残缺的 torch 安装导致 DLL 加载失败，可通过插入一个最小 stub 避免真实导入。
# 条件：只有在 import torch 失败且出现 WinError 127 时才使用 stub。
try:
    import torch  # noqa: F401
except Exception:
    if 'torch' not in globals():
        stub = types.ModuleType('torch')
        # 提供必要的属性占位，避免下游访问 AttributeError。
        for name in ['__version__', 'device', 'Tensor']:
            setattr(stub, name, None)
        import sys
        sys.modules['torch'] = stub
        logging.info('已注入 torch stub 以规避 DLL 加载错误。')

# 延迟导入 LazyClassifier，确保 stub 已生效。
try:
    # 在导入 LazyPredict 之前，先对 sklearn.utils.discovery.all_estimators 做安全补丁，
    # 只返回我们允许的经典分类器，避免触发 sklearn 内部新模块导入（如 sklearn.frozen）造成兼容问题。
    from sklearn.neighbors import KNeighborsClassifier
    from sklearn.linear_model import LogisticRegression, Perceptron, PassiveAggressiveClassifier
    from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, GradientBoostingClassifier, AdaBoostClassifier, BaggingClassifier
    from sklearn.svm import LinearSVC
    from sklearn.naive_bayes import GaussianNB, BernoulliNB
    from sklearn.tree import DecisionTreeClassifier
    from sklearn.linear_model import SGDClassifier
    from sklearn.dummy import DummyClassifier

    from sklearn.utils import discovery as _discovery

    _SAFE_CLASSIFIERS = [
        ("KNeighborsClassifier", KNeighborsClassifier),
        ("LogisticRegression", LogisticRegression),
        ("RandomForestClassifier", RandomForestClassifier),
        ("LinearSVC", LinearSVC),
        ("RidgeClassifier", None),  # RidgeClassifier 可选，由 LazyPredict 不一定强制需要
        ("SGDClassifier", SGDClassifier),
        ("ExtraTreesClassifier", ExtraTreesClassifier),
        ("DecisionTreeClassifier", DecisionTreeClassifier),
        ("GradientBoostingClassifier", GradientBoostingClassifier),
        ("AdaBoostClassifier", AdaBoostClassifier),
        ("BaggingClassifier", BaggingClassifier),
        ("GaussianNB", GaussianNB),
        ("BernoulliNB", BernoulliNB),
        ("Perceptron", Perceptron),
        ("PassiveAggressiveClassifier", PassiveAggressiveClassifier),
        ("DummyClassifier", DummyClassifier),
    ]

    def _patched_all_estimators(type_filter=None):
        return [(name, cls) for name, cls in _SAFE_CLASSIFIERS if cls is not None]

    _discovery.all_estimators = _patched_all_estimators  # type: ignore[attr-defined]

    from lazypredict.Supervised import LazyClassifier
except ImportError:
    raise ImportError('lazypredict 安装/版本不兼容，请确保使用 0.2.12 并与 scikit-learn 1.3.x 兼容。')


def ensure_dir(p: str):
    Path(p).mkdir(parents=True, exist_ok=True)


def main():
    logging.info('开始 LazyPredict 训练（flat + CPU）')
    cache_dir = '.cache_flat'
    ensure_dir(cache_dir)
    X, y = createXY(train_folder=os.path.join('data', 'train'), dest_folder=cache_dir, method='flat')
    X = np.asarray(X, dtype=np.float32)
    y = np.asarray(y)

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=2025, stratify=y)

    # 使用我们对 sklearn.all_estimators 的安全补丁，直接构造 LazyClassifier 即可；
    # 不再传入 include 参数（某些版本不支持）。
    clf = LazyClassifier(verbose=0, ignore_warnings=True, custom_metric=None)
    t0 = time.time()
    models_df, predictions_df = clf.fit(X_train, X_test, y_train, y_test)
    elapsed = time.time() - t0
    logging.info('LazyPredict 评估完成，总耗时 %.2f 秒', elapsed)

    ensure_dir('models')
    leaderboard_path = os.path.join('models', 'lazy_leaderboard.csv')
    models_df.to_csv(leaderboard_path, encoding='utf-8-sig')
    logging.info('排行榜已保存: %s', leaderboard_path)

    # 选择最佳模型
    best_name = models_df.sort_values(by='Accuracy', ascending=False).index[0]
    logging.info('最佳模型: %s', best_name)

    # 获取训练好的实例（兼容不同版本属性名）
    trained = None
    for attr in ['trained_models', 'models', 'models_']:
        if hasattr(clf, attr):
            maybe = getattr(clf, attr)
            if isinstance(maybe, dict) and best_name in maybe:
                trained = maybe[best_name]
                break

    if trained is None:
        # 兜底重训一个同名模型
        from sklearn.utils import all_estimators
        name_to_cls = {name: cls for name, cls in all_estimators(type_filter='classifier')}
        if best_name in name_to_cls:
            trained = name_to_cls[best_name]()
            trained.fit(X_train, y_train)
        else:
            from sklearn.neighbors import KNeighborsClassifier
            best_name = 'KNeighborsClassifier'
            trained = KNeighborsClassifier(n_neighbors=5)
            trained.fit(X_train, y_train)

    save_path = os.path.join('models', 'lazy_best_model.joblib')
    joblib.dump({'model': trained, 'label_map': {0: 'cat', 1: 'dog'}, 'feature': 'flat', 'best_name': best_name}, save_path)
    logging.info('最佳模型已保存: %s | 模型: %s', save_path, best_name)

    # 控制台输出精简排行榜（前 10 行）
    head_df = models_df.head(10)[['Accuracy', 'Balanced Accuracy', 'ROC AUC', 'F1 Score', 'Time Taken']]
    logging.info('\n' + head_df.to_string())


if __name__ == '__main__':
    main()
