import os
import time
import json
import stat
from typing import List, Tuple, Dict, Any

import numpy as np
from PIL import Image

from sklearn.model_selection import StratifiedShuffleSplit, GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (
	RandomForestClassifier,
	VotingClassifier,
	BaggingClassifier,
	AdaBoostClassifier,
	GradientBoostingClassifier,
	StackingClassifier,
)
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
import joblib
from tqdm import tqdm
from prettytable import PrettyTable


def now() -> str:
	return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())


def log(msg: str) -> None:
	print(f"[{now()}][信息] {msg}")


def to_gray_flat(img_path: str, size: int = 32) -> np.ndarray:
	"""读取图像为32x32灰度并拉平到1D float32向量[0,1]."""
	img = Image.open(img_path).convert("L").resize((size, size))
	arr = np.asarray(img, dtype=np.float32) / 255.0
	return arr.reshape(-1)


def iter_labeled_images(root: str, limit_per_class: int = None) -> Tuple[List[str], List[int], List[str]]:
	"""遍历 Kaggle 猫狗数据: 文件名以 cat. / dog. 开头. 返回 (paths, labels, class_names)."""
	if not os.path.isdir(root):
		raise FileNotFoundError(f"未找到数据目录: {root}")

	paths, labels = [], []
	per_class_count = {"cat": 0, "dog": 0}
	class_map = {"cat": 0, "dog": 1}

	for fname in os.listdir(root):
		fpath = os.path.join(root, fname)
		if not os.path.isfile(fpath):
			continue
		name = fname.lower()
		cls = None
		if name.startswith("cat."):
			cls = "cat"
		elif name.startswith("dog."):
			cls = "dog"
		else:
			continue

		if limit_per_class is not None and per_class_count[cls] >= limit_per_class:
			continue

		paths.append(fpath)
		labels.append(class_map[cls])
		per_class_count[cls] += 1

	if not paths:
		raise RuntimeError(f"在 {root} 未发现 cat.* 或 dog.* 图像")

	class_names = ["cat", "dog"]
	return paths, labels, class_names


class FaissKNNClassifier:
	"""使用 FAISS 的 KNN 分类器，若无法导入 faiss 则回退到 sklearn KNeighborsClassifier。"""

	def __init__(self, n_neighbors: int = 5):
		self.n_neighbors = n_neighbors
		self.use_faiss = False
		self.index = None
		self.y = None
		self.classes_ = np.array([0, 1], dtype=int)
		self._fallback = None

		try:
			import faiss  # type: ignore

			self._faiss = faiss
			self.use_faiss = True
		except Exception:
			self._faiss = None
			self.use_faiss = False

	def fit(self, X: np.ndarray, y: np.ndarray):
		if self.use_faiss:
			d = X.shape[1]
			index = self._faiss.IndexFlatL2(d)
			index.add(X.astype(np.float32))
			self.index = index
			self.y = y.astype(int)
			self.classes_ = np.unique(self.y)
		else:
			self._fallback = KNeighborsClassifier(n_neighbors=self.n_neighbors, metric="euclidean")
			self._fallback.fit(X, y)
			self.classes_ = self._fallback.classes_
		return self

	def predict(self, X: np.ndarray) -> np.ndarray:
		if self.use_faiss:
			D, I = self.index.search(X.astype(np.float32), self.n_neighbors)
			preds = []
			for neigh_idx in I:
				votes = self.y[neigh_idx]
				# 多数投票
				counts = np.bincount(votes, minlength=len(self.classes_))
				preds.append(np.argmax(counts))
			return np.array(preds)
		else:
			return self._fallback.predict(X)

	def predict_proba(self, X: np.ndarray) -> np.ndarray:
		if self.use_faiss:
			D, I = self.index.search(X.astype(np.float32), self.n_neighbors)
			proba = np.zeros((X.shape[0], len(self.classes_)), dtype=np.float32)
			for row, neigh_idx in enumerate(I):
				votes = self.y[neigh_idx]
				counts = np.bincount(votes, minlength=len(self.classes_))
				proba[row] = counts / counts.sum() if counts.sum() > 0 else counts
			return proba
		else:
			if hasattr(self._fallback, "predict_proba"):
				return self._fallback.predict_proba(X)
			# 退化处理
			preds = self._fallback.predict(X)
			proba = np.zeros((X.shape[0], len(self.classes_)), dtype=np.float32)
			for i, p in enumerate(preds):
				proba[i, p] = 1.0
			return proba


def build_dataset(train_dir: str, img_size: int = 32, limit_per_class: int = 1000) -> Tuple[np.ndarray, np.ndarray, List[str]]:
	paths, labels, class_names = iter_labeled_images(train_dir, limit_per_class=limit_per_class)
	X = np.zeros((len(paths), img_size * img_size), dtype=np.float32)
	for i, p in enumerate(tqdm(paths, desc="加载图像", ncols=100)):
		X[i] = to_gray_flat(p, size=img_size)
	y = np.array(labels, dtype=int)
	return X, y, class_names


def evaluate_model(name: str, model, X_train, y_train, X_val, y_val) -> Dict[str, Any]:
	t0 = time.perf_counter()
	model.fit(X_train, y_train)
	t1 = time.perf_counter()
	y_pred = model.predict(X_val)
	t2 = time.perf_counter()
	acc = accuracy_score(y_val, y_pred)
	return {
		"name": name,
		"model": model,
		"train_time": t1 - t0,
		"pred_time": t2 - t1,
		"acc": acc,
	}


def main():
	base = os.path.dirname(os.path.abspath(__file__))
	data_dir = os.path.join(base, "data", "train")

	IMG_SIZE = 32
	# 默认不再限制每类样本数量（使用一个远大于数据集规模的上限）；
	# 仍可通过环境变量 LIMIT_PER_CLASS 覆盖以做快速调试。
	LIMIT_PER_CLASS = int(os.environ.get("LIMIT_PER_CLASS", "9999999"))
	TEST_SIZE = 0.2
	RANDOM_STATE = 42

	log(f"读取数据集: {data_dir}")
	X, y, class_names = build_dataset(data_dir, img_size=IMG_SIZE, limit_per_class=LIMIT_PER_CLASS)

	splitter = StratifiedShuffleSplit(n_splits=1, test_size=TEST_SIZE, random_state=RANDOM_STATE)
	(train_idx, val_idx) = next(splitter.split(X, y))
	X_train, X_val = X[train_idx], X[val_idx]
	y_train, y_val = y[train_idx], y[val_idx]
	log(f"训练集: {X_train.shape}, 验证集: {X_val.shape}")

	results: List[Dict[str, Any]] = []

	# 1) KNN (faiss 可选)
	best_knn = None
	best_knn_acc = -1
	for k in [3, 5, 7]:
		knn = FaissKNNClassifier(n_neighbors=k)
		r = evaluate_model(f"faiss_knn_k{k}", knn, X_train, y_train, X_val, y_val)
		results.append(r)
		if r["acc"] > best_knn_acc:
			best_knn_acc = r["acc"]
			best_knn = r["model"]
	log(f"KNN 最佳k={getattr(best_knn, 'n_neighbors', 'N/A')} 验证准确率={best_knn_acc:.4f}")

	# 2) Logistic Regression (带标准化与小网格)
	lr_pipe = Pipeline([
		("scaler", StandardScaler()),
		("lr", LogisticRegression(max_iter=1000, n_jobs=None))
	])
	lr_grid = {"lr__C": [0.1, 1.0, 3.0]}
	lr_cv = GridSearchCV(lr_pipe, lr_grid, cv=3, n_jobs=-1, verbose=0)
	r_lr = evaluate_model("logistic_regression", lr_cv, X_train, y_train, X_val, y_val)
	best_lr = r_lr["model"].best_estimator_
	results.append({**r_lr, "model": best_lr})
	log(f"LR 最佳参数: {r_lr['model'].best_params_}")

	# 3) Random Forest
	rf = RandomForestClassifier(random_state=RANDOM_STATE)
	rf_grid = {"n_estimators": [100, 200], "max_depth": [None, 10, 20]}
	rf_cv = GridSearchCV(rf, rf_grid, cv=3, n_jobs=-1, verbose=0)
	r_rf = evaluate_model("random_forest", rf_cv, X_train, y_train, X_val, y_val)
	best_rf = r_rf["model"].best_estimator_
	results.append({**r_rf, "model": best_rf})
	log(f"RF 最佳参数: {r_rf['model'].best_params_}")

	# 4) SVM (RBF, 概率输出用于 soft voting/stacking)
	svc_pipe = Pipeline([
		("scaler", StandardScaler()),
		("svc", SVC(kernel="rbf", probability=True))
	])
	svc_grid = {"svc__C": [0.5, 1, 2], "svc__gamma": ["scale", 0.01, 0.001]}
	svc_cv = GridSearchCV(svc_pipe, svc_grid, cv=3, n_jobs=-1, verbose=0)
	r_svm = evaluate_model("svm_rbf", svc_cv, X_train, y_train, X_val, y_val)
	best_svm = r_svm["model"].best_estimator_
	results.append({**r_svm, "model": best_svm})
	log(f"SVM 最佳参数: {r_svm['model'].best_params_}")

	# 5) Voting (hard)
	voting_hard = VotingClassifier(estimators=[
		("lr", best_lr), ("rf", best_rf), ("svc", best_svm)
	], voting="hard")
	r_vh = evaluate_model("voting_hard", voting_hard, X_train, y_train, X_val, y_val)
	results.append(r_vh)

	# 6) Voting (soft)
	voting_soft = VotingClassifier(estimators=[
		("lr", best_lr), ("rf", best_rf), ("svc", best_svm)
	], voting="soft")
	r_vs = evaluate_model("voting_soft", voting_soft, X_train, y_train, X_val, y_val)
	results.append(r_vs)

	# 7) Bagging & Pasting (以决策树为基学习器)
	base_tree = DecisionTreeClassifier(max_depth=None, random_state=RANDOM_STATE)
	bagging = BaggingClassifier(
		estimator=base_tree, n_estimators=50, max_samples=0.8, bootstrap=True, random_state=RANDOM_STATE
	)
	r_bag = evaluate_model("bagging", bagging, X_train, y_train, X_val, y_val)
	results.append(r_bag)

	pasting = BaggingClassifier(
		estimator=base_tree, n_estimators=50, max_samples=0.8, bootstrap=False, random_state=RANDOM_STATE
	)
	r_pas = evaluate_model("pasting", pasting, X_train, y_train, X_val, y_val)
	results.append(r_pas)

	# 8) AdaBoost
	ada = AdaBoostClassifier(
		estimator=DecisionTreeClassifier(max_depth=1, random_state=RANDOM_STATE),
		n_estimators=100,
		learning_rate=0.5,
		random_state=RANDOM_STATE,
	)
	r_ada = evaluate_model("adaboost", ada, X_train, y_train, X_val, y_val)
	results.append(r_ada)

	# 9) Gradient Boosting
	gbdt = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1, max_depth=3, random_state=RANDOM_STATE)
	r_gbdt = evaluate_model("gradient_boosting", gbdt, X_train, y_train, X_val, y_val)
	results.append(r_gbdt)

	# 10) Stacking (使用概率特征)
	stacking = StackingClassifier(
		estimators=[("lr", best_lr), ("rf", best_rf), ("svc", best_svm)],
		final_estimator=LogisticRegression(max_iter=500),
		passthrough=True,
		cv=3,
		n_jobs=-1,
	)
	r_stack = evaluate_model("stacking", stacking, X_train, y_train, X_val, y_val)
	results.append(r_stack)

	# 汇总与展示
	table = PrettyTable(["模型", "训练时间(s)", "预测时间(s)", "验证准确率"])
	for r in results:
		table.add_row([r["name"], f"{r['train_time']:.2f}", f"{r['pred_time']:.2f}", f"{r['acc']:.4f}"])
	results_sorted = sorted(results, key=lambda d: d["acc"], reverse=True)

	print("\n================ 结果汇总 ================")
	print(table)
	best = results_sorted[0]
	log(f"最佳模型: {best['name']} | 验证准确率={best['acc']:.4f}")

	# 额外摘要：按需求明确打印各集成方法的准确率
	name_to_acc = {r["name"]: r["acc"] for r in results}
	def _fmt_acc(key: str) -> str:
		acc_val = name_to_acc.get(key)
		return f"{acc_val:.4f}" if isinstance(acc_val, (int, float, np.floating)) else "N/A"

	print("\n—— 集成方法准确率摘要 ——")
	print(f"hard_voting准确率: {_fmt_acc('voting_hard')}")
	print(f"soft_voting准确率: {_fmt_acc('voting_soft')}")
	print(f"Stacking准确率: {_fmt_acc('stacking')}")
	print(f"bagging准确率: {_fmt_acc('bagging')}")
	print(f"pasting准确率: {_fmt_acc('pasting')}")
	print(f"adaboost准确率: {_fmt_acc('adaboost')}")
	# 需求中的“gradient_boost”与实现命名“gradient_boosting”对应
	print(f"gradient_boost准确率: {_fmt_acc('gradient_boosting')}")

	payload = {
		"model_name": best["name"],
		"model": best["model"],
		"class_names": class_names,
		"img_size": IMG_SIZE,
		"created_at": now(),
		"metrics": {k: {kk: (float(vv) if isinstance(vv, (int, float, np.floating)) else vv) for kk, vv in r.items() if kk in ("train_time", "pred_time", "acc")} for k, r in [(r["name"], r) for r in results_sorted]},
	}

	def robust_save(payload_obj, base_dir: str) -> str:
		primary = os.path.join(base_dir, "best_ensemble.pkl")
		tmp_primary = primary + ".tmp"

		def _try_dump_replace(tmp_path: str, final_path: str) -> bool:
			joblib.dump(payload_obj, tmp_path)
			# 原子替换，避免部分工具短时锁定
			os.replace(tmp_path, final_path)
			return True

		# 尝试写入根目录
		try:
			return primary if _try_dump_replace(tmp_primary, primary) else primary
		except Exception:
			# 处理可能的只读属性与占用
			try:
				if os.path.exists(primary):
					os.chmod(primary, stat.S_IWRITE)
					os.remove(primary)
				return primary if _try_dump_replace(tmp_primary, primary) else primary
			except Exception:
				pass

		# 回落到 models/ 目录
		models_dir = os.path.join(base_dir, "models")
		os.makedirs(models_dir, exist_ok=True)
		alt = os.path.join(models_dir, "best_ensemble.pkl")
		tmp_alt = alt + ".tmp"
		_try_dump_replace(tmp_alt, alt)
		return alt

	saved_to = robust_save(payload, base)
	log(f"已保存最佳模型到: {saved_to}")


if __name__ == "__main__":
	main()

