import os
import time
import logging
import warnings
from pathlib import Path

import numpy as np
import joblib
try:
    import faiss  # 可选：若未安装则使用 numpy 实现 L2 归一化
    def l2_normalize(X: np.ndarray):
        faiss.normalize_L2(X)
except Exception:  # noqa: BLE001
    faiss = None  # type: ignore
    def l2_normalize(X: np.ndarray):
        norms = np.linalg.norm(X, axis=1, keepdims=True) + 1e-12
        X /= norms
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score

# 基学习器与集成学习
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, BaggingClassifier, AdaBoostClassifier, GradientBoostingClassifier, VotingClassifier, StackingClassifier
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.exceptions import ConvergenceWarning

from util import createXY

# 日志配置
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# 全局抑制非关键告警，避免训练日志噪声
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=ConvergenceWarning)


def ensure_dir(path: str):
    Path(path).mkdir(parents=True, exist_ok=True)


def build_models():
    # 统一使用包含 StandardScaler 的 Pipeline，确保数值稳定性
    models = {}

    models['logistic_regression'] = Pipeline([
        ('scaler', StandardScaler()),
        ('clf', LogisticRegression(max_iter=500, n_jobs=None))  # CPU-only
    ])

    models['random_forest'] = Pipeline([
        ('scaler', StandardScaler(with_mean=False)),  # 对稀疏/高维特征更稳妥（对树也无害）
        ('clf', RandomForestClassifier(n_estimators=200, n_jobs=-1, random_state=2025))
    ])

    # 线性 SVM，避免 RBF SVC 带来的极长训练时间
    models['svm'] = Pipeline([
        ('scaler', StandardScaler()),
        # 显式设置 dual='auto' 以消除 sklearn FutureWarning，并在样本量与特征维度关系不同的情况下自适应
        ('clf', LinearSVC(max_iter=5000, random_state=2025, dual='auto'))
    ])

    models['bagging'] = Pipeline([
        ('scaler', StandardScaler(with_mean=False)),
        ('clf', BaggingClassifier(
            estimator=DecisionTreeClassifier(max_depth=None, random_state=2025),
            n_estimators=100,
            n_jobs=-1,
            random_state=2025,
            bootstrap=True
        ))
    ])

    # pasting：与 bagging 类似，但不放回抽样
    models['pasting'] = Pipeline([
        ('scaler', StandardScaler(with_mean=False)),
        ('clf', BaggingClassifier(
            estimator=DecisionTreeClassifier(max_depth=None, random_state=2025),
            n_estimators=100,
            n_jobs=-1,
            random_state=2025,
            bootstrap=False
        ))
    ])

    models['adaboost'] = Pipeline([
        ('scaler', StandardScaler(with_mean=False)),
        ('clf', AdaBoostClassifier(n_estimators=200, learning_rate=0.5, random_state=2025))
    ])

    models['gradient_boosting'] = Pipeline([
        ('scaler', StandardScaler(with_mean=False)),
        ('clf', GradientBoostingClassifier(random_state=2025))
    ])

    # 投票分类器：
    est_soft = [
        ('lr', LogisticRegression(max_iter=500, n_jobs=None, random_state=2025)),
        ('rf', RandomForestClassifier(n_estimators=200, n_jobs=-1, random_state=2025)),
        ('gb', GradientBoostingClassifier(random_state=2025)),
    ]

    models['hard_voting'] = Pipeline([
        ('scaler', StandardScaler()),
        ('clf', VotingClassifier(estimators=[
            ('lr', LogisticRegression(max_iter=500, n_jobs=None, random_state=2025)),
            ('rf', RandomForestClassifier(n_estimators=200, n_jobs=-1, random_state=2025)),
            ('svm', LinearSVC(max_iter=5000, random_state=2025))
        ], voting='hard'))
    ])

    models['soft_voting'] = Pipeline([
        ('scaler', StandardScaler()),
        ('clf', VotingClassifier(estimators=est_soft, voting='soft'))
    ])

    # stacking：只使用可输出概率的基分类器
    models['stacking'] = Pipeline([
        ('scaler', StandardScaler()),
        ('clf', StackingClassifier(
            estimators=est_soft,
            final_estimator=LogisticRegression(max_iter=500, n_jobs=None, random_state=2025),
            passthrough=False
        ))
    ])

    return models


def format_table(rows):
    # rows: list of dicts with keys: name, train_time, pred_time, acc
    header = f"{'Classifier':<20} {'Training Time (s)':>16} {'Prediction Time (s)':>20} {'Accuracy':>10}"
    lines = [header, '-' * len(header)]
    for r in rows:
        lines.append(f"{r['name']:<20} {r['train_time']:>16.4f} {r['pred_time']:>20.6f} {r['acc']:>10.4f}")
    return '\n'.join(lines)


def main():
    logging.info("读取图像，生成 X 和 y")
    cache_dir = '.cache_flat'
    ensure_dir(cache_dir)

    X, y = createXY(train_folder=os.path.join('data', 'train'), dest_folder=cache_dir, method='flat')
    X = np.array(X).astype('float32')
    y = np.array(y)

    # 对 KNN 等可能使用的距离计算保持一致性；树模型不强制，但标准化会在各 Pipeline 中处理
    l2_normalize(X)

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=2025, stratify=y)

    models = build_models()

    results = []
    best_name, best_acc, best_model = None, -1.0, None

    for name, model in models.items():
        t0 = time.time()
        model.fit(X_train, y_train)
        train_time = time.time() - t0

        t1 = time.time()
        y_pred = model.predict(X_test)
        pred_time_total = time.time() - t1
        pred_time = pred_time_total / max(len(X_test), 1)

        acc = accuracy_score(y_test, y_pred)
        logging.info(f"{name} 模型训练评估完成，用时{train_time:.3f}秒。")

        results.append({
            'name': name,
            'train_time': train_time,
            'pred_time': pred_time,
            'acc': acc
        })

        if acc > best_acc:
            best_name, best_acc, best_model = name, acc, model

    # 排序并打印表格
    results_sorted = sorted(results, key=lambda r: r['acc'], reverse=True)
    logging.info("\n" + format_table(results_sorted))

    # 保存最佳模型
    ensure_dir('models')
    save_path = os.path.join('models', 'best_model.joblib')
    joblib.dump({'model': best_model, 'label_map': {0: 'cat', 1: 'dog'}, 'feature': 'flat'}, save_path)
    logging.info(f"最佳模型已保存: {save_path} | 模型: {best_name} | 准确率: {best_acc:.4f}")


if __name__ == '__main__':
    main()
