import argparse
import json
import os
import sys
from pathlib import Path
from typing import List, Tuple, Optional
import warnings

import numpy as np
import pandas as pd
from PIL import Image

from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score
import joblib

import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt


def list_images(folder: Path) -> List[Path]:
    exts = {".jpg", ".jpeg", ".png", ".bmp", ".gif", ".tiff", ".webp"}
    files: List[Path] = []
    if not folder.exists():
        return files
    for root, _, fnames in os.walk(folder):
        for f in fnames:
            p = Path(root) / f
            if p.suffix.lower() in exts:
                files.append(p)
    return sorted(files)


def infer_classes_from_subdirs(train_dir: Path) -> Optional[List[str]]:
    subdirs = [d.name for d in train_dir.iterdir() if d.is_dir()]
    return subdirs if len(subdirs) >= 2 else None


def load_image(path: Path, size: int = 64, grayscale: bool = False) -> np.ndarray:
    with Image.open(path) as img:
        if grayscale:
            img = img.convert("L")
        else:
            img = img.convert("RGB")
        img = img.resize((size, size), Image.BILINEAR)
        arr = np.asarray(img, dtype=np.float32) / 255.0
        return arr.reshape(-1)


def build_dataset_by_subdirs(base_dir: Path, classes: List[str], size: int, grayscale: bool,
                             max_per_class: Optional[int], seed: int) -> Tuple[np.ndarray, np.ndarray]:
    X_list: List[np.ndarray] = []
    y_list: List[str] = []
    for cls in classes:
        cls_dir = base_dir / cls
        files = list_images(cls_dir)
        if max_per_class:
            rng = np.random.RandomState(seed)
            if len(files) > max_per_class:
                idx = rng.choice(len(files), size=max_per_class, replace=False)
                files = [files[i] for i in idx]
        for p in files:
            X_list.append(load_image(p, size=size, grayscale=grayscale))
            y_list.append(cls)
    if not X_list:
        return np.empty((0, size * size * (1 if grayscale else 3))), np.empty((0,))
    X = np.vstack(X_list)
    y = np.array(y_list)
    return X, y


def build_dataset_by_filenames(base_dir: Path, size: int, grayscale: bool,
                               max_per_class: Optional[int], seed: int) -> Tuple[np.ndarray, np.ndarray]:
    files = list_images(base_dir)
    # Try common cat/dog naming
    def label_from_name(name: str) -> Optional[str]:
        low = name.lower()
        if "cat" in low:
            return "cat"
        if "dog" in low:
            return "dog"
        # fallback: prefix before first '.'
        return low.split(".")[0] if "." in low else None

    per_class_files: dict[str, list[Path]] = {}
    X_list: List[np.ndarray] = []
    y_list: List[str] = []
    for p in files:
        lab = label_from_name(p.name)
        if lab is None:
            continue
        per_class_files.setdefault(lab, []).append(p)

    rng = np.random.RandomState(seed)
    for lab, flist in per_class_files.items():
        use_list = flist
        if max_per_class and len(flist) > max_per_class:
            idx = rng.choice(len(flist), size=max_per_class, replace=False)
            use_list = [flist[i] for i in idx]
        for p in use_list:
            X_list.append(load_image(p, size=size, grayscale=grayscale))
            y_list.append(lab)
    if not X_list:
        return np.empty((0, size * size * (1 if grayscale else 3))), np.empty((0,))
    X = np.vstack(X_list)
    y = np.array(y_list)
    return X, y


def _stratified_take_per_class(X: np.ndarray, y: np.ndarray, n_train: int, n_test: int, seed: int):
    rng = np.random.RandomState(seed)
    classes = np.unique(y)
    tr_idx_all, te_idx_all = [], []
    for c in classes:
        idx = np.where(y == c)[0]
        if len(idx) == 0:
            continue
        rng.shuffle(idx)
        n_te = min(n_test, len(idx))
        te_idx = idx[:n_te]
        remain = idx[n_te:]
        n_tr = min(n_train, len(remain))
        tr_idx = remain[:n_tr]
        tr_idx_all.append(tr_idx)
        te_idx_all.append(te_idx)
    if tr_idx_all:
        tr_idx_all = np.concatenate(tr_idx_all)
    else:
        tr_idx_all = np.array([], dtype=int)
    if te_idx_all:
        te_idx_all = np.concatenate(te_idx_all)
    else:
        te_idx_all = np.array([], dtype=int)
    return X[tr_idx_all], X[te_idx_all], y[tr_idx_all], y[te_idx_all]


def load_dataset(train_dir: Path, test_dir: Optional[Path], size: int, grayscale: bool,
                 train_per_class: Optional[int], test_per_class: Optional[int], val_size: float, seed: int):
    classes = infer_classes_from_subdirs(train_dir)
    # 当没有独立测试集时：从训练目录先取 (train+test) 数量，再精确分配
    if not (test_dir and test_dir.exists()):
        total_per_class = None
        if (train_per_class is not None) or (test_per_class is not None):
            # 若只提供其一，另一项按0处理
            total_per_class = (train_per_class or 0) + (test_per_class or 0)
            if total_per_class == 0:
                total_per_class = None
        if classes is not None:
            X_all, y_all = build_dataset_by_subdirs(train_dir, classes, size, grayscale, total_per_class, seed)
        else:
            X_all, y_all = build_dataset_by_filenames(train_dir, size, grayscale, total_per_class, seed)
        if X_all.shape[0] == 0:
            raise RuntimeError(f"未在 {train_dir} 读取到有效图片，请检查目录结构与文件名。")
        # 若未指定精确数量，则回退到常规定比例切分
        if (train_per_class is None) and (test_per_class is None):
            X_train, X_test, y_train, y_test = train_test_split(
                X_all, y_all, test_size=val_size, stratify=y_all, random_state=seed
            )
        else:
            X_train, X_test, y_train, y_test = _stratified_take_per_class(
                X_all, y_all, n_train=train_per_class or 0, n_test=test_per_class or 0, seed=seed
            )
        return X_train, X_test, y_train, y_test

    # 有独立测试集时：分别从 train/test 目录按各自 per_class 取样
    if classes is not None:
        X, y = build_dataset_by_subdirs(train_dir, classes, size, grayscale, train_per_class, seed)
    else:
        X, y = build_dataset_by_filenames(train_dir, size, grayscale, train_per_class, seed)

    if X.shape[0] == 0:
        raise RuntimeError(f"未在 {train_dir} 读取到有效图片，请检查目录结构与文件名。")

    # 真实测试集
    if classes is not None:
        X_test, y_test = build_dataset_by_subdirs(test_dir, classes, size, grayscale, test_per_class, seed)
    else:
        X_test, y_test = build_dataset_by_filenames(test_dir, size, grayscale, test_per_class, seed)
    # 若测试集没有标签（常见），则回退到按比例切分
    if X_test.shape[0] == 0 or y_test.shape[0] == 0:
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=val_size, stratify=y, random_state=seed
        )
    else:
        X_train, y_train = X, y
    return X_train, X_test, y_train, y_test


def save_df_as_table_png(df: pd.DataFrame, path: Path, title: str = "LazyPredict Results"):
    fig, ax = plt.subplots(figsize=(12, max(4, 0.4 * (len(df) + 1))))
    ax.axis('off')
    ax.set_title(title, fontsize=14, pad=12)
    tbl = ax.table(cellText=df.values, colLabels=df.columns, loc='center')
    tbl.auto_set_font_size(False)
    tbl.set_fontsize(8)
    tbl.scale(1, 1.3)
    fig.tight_layout()
    path.parent.mkdir(parents=True, exist_ok=True)
    fig.savefig(path.as_posix(), dpi=200, bbox_inches='tight')
    plt.close(fig)


def resolve_estimator(name: str, use_gpu: bool = False):
    """Return an sklearn estimator class instance by common name."""
    # Minimal, commonly-used classifiers covered by LazyPredict
    try:
        from sklearn.linear_model import LogisticRegression, RidgeClassifier, RidgeClassifierCV, SGDClassifier, Perceptron, PassiveAggressiveClassifier
        from sklearn.neighbors import KNeighborsClassifier
        from sklearn.svm import SVC, NuSVC, LinearSVC
        from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier, BaggingClassifier
        from sklearn.naive_bayes import GaussianNB, BernoulliNB
        from sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier
        from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
        from sklearn.calibration import CalibratedClassifierCV
        from sklearn.neighbors import NearestCentroid
        from sklearn.semi_supervised import LabelPropagation, LabelSpreading
        from sklearn.dummy import DummyClassifier
    except Exception:
        pass

    mapping = {
        'LogisticRegression': lambda: LogisticRegression(max_iter=2000),
        'RidgeClassifier': lambda: RidgeClassifier(),
        'RidgeClassifierCV': lambda: RidgeClassifierCV(),
        'SGDClassifier': lambda: SGDClassifier(random_state=0),
        'Perceptron': lambda: Perceptron(random_state=0),
        'PassiveAggressiveClassifier': lambda: PassiveAggressiveClassifier(random_state=0),
        'KNeighborsClassifier': lambda: KNeighborsClassifier(),
        'SVC': lambda: SVC(probability=True, random_state=0),
        'NuSVC': lambda: NuSVC(probability=True, random_state=0),
        'LinearSVC': lambda: LinearSVC(),
        'RandomForestClassifier': lambda: RandomForestClassifier(random_state=0),
        'ExtraTreesClassifier': lambda: ExtraTreesClassifier(random_state=0),
        'AdaBoostClassifier': lambda: AdaBoostClassifier(random_state=0),
        'BaggingClassifier': lambda: BaggingClassifier(random_state=0),
        'GaussianNB': lambda: GaussianNB(),
        'BernoulliNB': lambda: BernoulliNB(),
        'DecisionTreeClassifier': lambda: DecisionTreeClassifier(random_state=0),
        'ExtraTreeClassifier': lambda: ExtraTreeClassifier(random_state=0),
        'LinearDiscriminantAnalysis': lambda: LinearDiscriminantAnalysis(),
        'QuadraticDiscriminantAnalysis': lambda: QuadraticDiscriminantAnalysis(),
        'CalibratedClassifierCV': lambda: CalibratedClassifierCV(),
        'NearestCentroid': lambda: NearestCentroid(),
        'LabelPropagation': lambda: LabelPropagation(),
        'LabelSpreading': lambda: LabelSpreading(),
        'DummyClassifier': lambda: DummyClassifier(strategy='most_frequent'),
    }

    # Optional external libs
    if name == 'XGBClassifier':
        try:
            from xgboost import XGBClassifier  # type: ignore
            if use_gpu:
                return XGBClassifier(tree_method='gpu_hist', predictor='gpu_predictor')
            return XGBClassifier()
        except Exception:
            return None
    if name == 'LGBMClassifier':
        try:
            from lightgbm import LGBMClassifier  # type: ignore
            if use_gpu:
                # 需要GPU版LightGBM；若当前安装不支持，会抛出或忽略
                try:
                    return LGBMClassifier(device='gpu')
                except Exception:
                    warnings.warn('当前 LightGBM 可能不支持GPU，已回退CPU。')
                    return LGBMClassifier()
            return LGBMClassifier()
        except Exception:
            return None

    factory = mapping.get(name)
    return factory() if factory else None


def main():
    parser = argparse.ArgumentParser(description="Train Cats vs Dogs with LazyPredict and save best model")
    parser.add_argument('--train_dir', type=str, default='data/train')
    parser.add_argument('--test_dir', type=str, default='data/test')
    parser.add_argument('--img_size', type=int, default=64)
    parser.add_argument('--grayscale', action='store_true', help='Use grayscale instead of RGB')
    parser.add_argument('--per_class', type=int, default=None, help='[兼容] 同时设置训练/测试每类数量')
    parser.add_argument('--train_per_class', type=int, default=100, help='训练集每类最大样本数（默认每类100，共200张）')
    parser.add_argument('--test_per_class', type=int, default=100, help='测试集每类最大样本数（默认每类100，共200张）')
    parser.add_argument('--max_per_class', type=int, default=None, help='[兼容参数] 等价于 --per_class')
    parser.add_argument('--val_size', type=float, default=0.2)
    parser.add_argument('--seed', type=int, default=42)
    parser.add_argument('--results_dir', type=str, default='results')
    parser.add_argument('--models_dir', type=str, default='models')
    parser.add_argument('--top_k', type=int, default=30, help='Rows to keep in screenshot to control height')
    parser.add_argument('--gpu_only', action='store_true', help='仅评测GPU友好模型以加速（XGB/LGBM）')
    parser.add_argument('--no_gpu', action='store_true', help='关闭GPU尝试，全部用CPU（默认）')
    parser.add_argument('--use_gpu', action='store_true', help='启用GPU（覆盖默认CPU模式）')
    parser.add_argument('--list_models', action='store_true', help='仅列出将要评测的模型并退出')
    args = parser.parse_args()

    train_dir = Path(args.train_dir)
    test_dir = Path(args.test_dir) if args.test_dir else None

    print(f"[1/5] 加载数据: {train_dir} (测试集: {test_dir}) …")
    # 兼容处理
    if args.max_per_class is not None:
        args.per_class = args.max_per_class
    if args.per_class is not None:
        args.train_per_class = args.per_class
        args.test_per_class = args.per_class

    X_train, X_test, y_train, y_test = load_dataset(
        train_dir, test_dir, size=args.img_size, grayscale=args.grayscale,
        train_per_class=args.train_per_class, test_per_class=args.test_per_class,
        val_size=args.val_size, seed=args.seed
    )
    print(f"样本数: 训练 {X_train.shape[0]}, 测试 {X_test.shape[0]}, 特征维度 {X_train.shape[1]}")

    print("[2/5] LazyPredict 训练并评估多模型 … 这可能需要几分钟")
    try:
        from lazypredict.Supervised import LazyClassifier, CLASSIFIERS
    except Exception as e:
        print("未找到 lazypredict，请先安装: pip install lazypredict")
        raise e

    scaler = StandardScaler()
    X_train_s = scaler.fit_transform(X_train)
    X_test_s = scaler.transform(X_test)

    # 构造与截图一致的模型全集，但在可用时把 XGB/LGBM 换成 GPU 版本
    # 默认CPU；仅当 --use_gpu 且未显式 --no_gpu 时才启用GPU
    use_gpu = bool(args.use_gpu and not args.no_gpu)
    classifiers_param = None
    device_map: dict[str, str] = {}

    def _xgb_gpu_fn(random_state=None):
        from xgboost import XGBClassifier
        params = dict(tree_method='gpu_hist', predictor='gpu_predictor')
        if random_state is not None:
            params['random_state'] = random_state
        return XGBClassifier(**params)
    _xgb_gpu_fn.__name__ = 'XGBClassifier'

    def _lgbm_gpu_fn(random_state=None):
        from lightgbm import LGBMClassifier
        params = {}
        # 有些发行版GPU通过 device='gpu'，有些通过 gpu_platform_id/gpu_device_id
        # 首先尝试通用参数，失败则回退CPU
        try:
            return LGBMClassifier(device='gpu', random_state=random_state)
        except Exception:
            return LGBMClassifier(random_state=random_state)
    _lgbm_gpu_fn.__name__ = 'LGBMClassifier'

    if args.gpu_only:
        gpu_callables = []
        if use_gpu:
            try:
                import xgboost  # noqa: F401
                gpu_callables.append(_xgb_gpu_fn)
                device_map['XGBClassifier'] = 'GPU'
            except Exception:
                device_map['XGBClassifier'] = 'CPU'
            try:
                import lightgbm  # noqa: F401
                gpu_callables.append(_lgbm_gpu_fn)
                device_map['LGBMClassifier'] = 'GPU'
            except Exception:
                device_map['LGBMClassifier'] = 'CPU'
        else:
            # CPU 版本的 XGB/LGBM
            try:
                from xgboost import XGBClassifier as _X
                gpu_callables.append(_X)
                device_map['XGBClassifier'] = 'CPU'
            except Exception:
                pass
            try:
                from lightgbm import LGBMClassifier as _L
                gpu_callables.append(_L)
                device_map['LGBMClassifier'] = 'CPU'
            except Exception:
                pass
        if gpu_callables:
            classifiers_param = gpu_callables
        else:
            print('[提示] 未检测到可用的GPU友好模型或相关库，已回退到默认全部模型。')

    if classifiers_param is None:
        # 基于默认 CLASSIFIERS 替换 XGB/LGBM 为 GPU 函数（若启用）
        callables = []
        for name, cls in CLASSIFIERS:
            if name == 'XGBClassifier' and use_gpu:
                try:
                    import xgboost  # noqa: F401
                    callables.append(_xgb_gpu_fn)
                    device_map['XGBClassifier'] = 'GPU'
                except Exception:
                    callables.append(cls)
                    device_map['XGBClassifier'] = 'CPU'
            elif name == 'LGBMClassifier' and use_gpu:
                try:
                    import lightgbm  # noqa: F401
                    callables.append(_lgbm_gpu_fn)
                    device_map['LGBMClassifier'] = 'GPU'
                except Exception:
                    callables.append(cls)
                    device_map['LGBMClassifier'] = 'CPU'
            else:
                callables.append(cls)
                if name not in device_map:
                    device_map[name] = 'CPU'
        classifiers_param = callables

    # 仅列出模型并退出
    if args.list_models:
        names = []
        for c in classifiers_param:
            try:
                names.append(getattr(c, '__name__', type(c).__name__))
            except Exception:
                names.append(str(c))
        print("将要评测的模型 (顺序与LazyPredict一致/可能略有差异):")
        for n in names:
            print(n)
        return

    clf = LazyClassifier(verbose=0, ignore_warnings=True, random_state=args.seed, classifiers=classifiers_param)
    models_df, predictions = clf.fit(X_train_s, X_test_s, y_train, y_test)
    # 标注每个模型的设备
    try:
        models_df['Device'] = [device_map.get(name, 'CPU') for name in models_df.index]
    except Exception:
        pass

    # 选择排序指标：优先 Accuracy；若全为0或NaN，回退 Balanced Accuracy 或 F1 Score
    candidate_metrics = ['Accuracy', 'Balanced Accuracy', 'F1 Score', 'ROC AUC']
    chosen_metric = None
    for m in candidate_metrics:
        if m in models_df.columns:
            s = pd.to_numeric(models_df[m], errors='coerce')
            if s.notna().any() and (s.fillna(0).max() > 0):
                chosen_metric = m
                break
    if chosen_metric is None:
        # 兜底：若都不可用/均为0，则仍用 Accuracy（如存在），否则用第一列
        chosen_metric = 'Accuracy' if 'Accuracy' in models_df.columns else models_df.columns[0]

    models_df = models_df.sort_values(by=chosen_metric, ascending=False)
    top_df = models_df.head(args.top_k)

    results_dir = Path(args.results_dir)
    results_dir.mkdir(parents=True, exist_ok=True)
    csv_path = results_dir / 'lazyclassifier_results.csv'
    png_path = results_dir / 'lazyclassifier_results.png'
    models_df.to_csv(csv_path, index=True)
    save_df_as_table_png(top_df.reset_index().rename(columns={'index': 'Model'}), png_path,
                         title=f'LazyPredict - Cats vs Dogs (Top by {chosen_metric})')
    # 控制台打印表格，便于直接截图
    try:
        pd.set_option('display.max_rows', None)
        pd.set_option('display.max_columns', None)
        pd.set_option('display.width', 160)
        pd.set_option('display.colheader_justify', 'center')
        printable = models_df.copy()
        for col in ['Accuracy', 'Balanced Accuracy', 'ROC AUC', 'F1 Score']:
            if col in printable.columns:
                printable[col] = pd.to_numeric(printable[col], errors='coerce').map(lambda x: f"{x:.4f}" if pd.notna(x) else "-")
        if 'Time Taken' in printable.columns:
            printable['Time Taken'] = pd.to_numeric(printable['Time Taken'], errors='coerce').map(lambda x: f"{x:.2f}" if pd.notna(x) else "-")
        print("\n模型对比（按", chosen_metric, "排序）：")
        print(printable.to_string())
    except Exception as _:
        pass
    print(f"结果已保存: {csv_path} 与 {png_path}")

    # 选择最佳模型名称
    best_name = top_df.index[0]
    best_score = float(top_df.iloc[0][chosen_metric]) if chosen_metric in top_df.columns else None
    print(f"[3/5] 最优模型: {best_name} ({chosen_metric}={best_score})")

    print("[4/5] 重新拟合最优模型并保存 …")
    est = resolve_estimator(best_name, use_gpu=use_gpu or args.gpu_only)
    if est is None:
        print(f"警告: 无法构造 {best_name} 对应的估计器，改用 LogisticRegression 作为备选保存。")
        from sklearn.linear_model import LogisticRegression
        est = LogisticRegression(max_iter=2000)

    pipeline = Pipeline([
        ("scaler", StandardScaler()),
        ("estimator", est),
    ])

    X_all = np.vstack([X_train, X_test])
    y_all = np.concatenate([y_train, y_test])
    pipeline.fit(X_all, y_all)

    models_dir = Path(args.models_dir)
    models_dir.mkdir(parents=True, exist_ok=True)
    model_path = models_dir / 'best_model.pkl'
    joblib.dump(pipeline, model_path)

    meta = {
        "best_model_name": best_name,
        "ranking_metric": chosen_metric,
        "best_score": best_score,
        "img_size": args.img_size,
        "grayscale": args.grayscale,
        "max_per_class": args.max_per_class,
        "random_state": args.seed,
    }
    with open(results_dir / 'best_model.json', 'w', encoding='utf-8') as f:
        json.dump(meta, f, ensure_ascii=False, indent=2)
    print(f"模型已保存到: {model_path}")

    # 在测试集上报告一个简单分数（来自重新拟合的管道，训练包含了测试集，仅做运行验证，不作泛化评估）
    y_pred = pipeline.predict(X_test)
    print(f"[5/5] 运行验证(非严格评估) Accuracy: {accuracy_score(y_test, y_pred):.4f}")


if __name__ == '__main__':
    sys.exit(main())
