"""猫狗识别项目 - 集成学习模型训练脚本 (重构版)

保持原有功能与输出不变：
- 读取并预处理图片
- 标准化、PCA降维
- 构建 Faiss 索引
- 构建并训练投票集成模型（硬/软投票），评估并选择表现更好的模型
- 保存模型、标准化器、PCA、索引与训练标签
"""

from typing import Tuple, List
import os
import numpy as np
from PIL import Image
import joblib
import faiss
from sklearn.ensemble import VotingClassifier, RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import warnings

warnings.filterwarnings("ignore")


def load_and_preprocess_images(data_dir: str, image_size: Tuple[int, int] = (224, 224)) -> Tuple[np.ndarray, np.ndarray]:
    """从指定文件夹读取并预处理猫狗图片。

    保留原脚本行为：仅读取以 .jpg 结尾的文件，按文件名前缀 'cat'/'dog' 设标签。
    返回 (images, labels)：images 为 dtype float32，labels 为 dtype int32。
    """
    images: List[np.ndarray] = []
    labels: List[int] = []

    image_files = [f for f in os.listdir(data_dir) if f.endswith('.jpg')]
    print(f"找到 {len(image_files)} 张图片")

    for idx, filename in enumerate(image_files):
        try:
            img_path = os.path.join(data_dir, filename)
            img = Image.open(img_path)

            if img.mode != 'RGB':
                img = img.convert('RGB')

            img = img.resize(image_size)
            img_array = np.array(img) / 255.0
            img_flat = img_array.flatten()

            images.append(img_flat)

            if filename.startswith('cat'):
                labels.append(0)
            elif filename.startswith('dog'):
                labels.append(1)

            if (idx + 1) % 1000 == 0:
                print(f"已处理 {idx + 1}/{len(image_files)} 张图片")

        except Exception as exc:
            print(f"处理图片 {filename} 时出错: {exc}")
            continue

    images_arr = np.array(images, dtype=np.float32)
    labels_arr = np.array(labels, dtype=np.int32)

    print("\n数据处理完成!")
    print(f"图片数据形状: {images_arr.shape}")
    print(f"标签数据形状: {labels_arr.shape}")
    print(f"猫的数量: {np.sum(labels_arr == 0)}")
    print(f"狗的数量: {np.sum(labels_arr == 1)}")

    return images_arr, labels_arr


def build_faiss_index(X_scaled: np.ndarray) -> faiss.IndexFlatL2:
    """根据标准化后的特征构建 Faiss 索引（L2 距离）。"""
    dimension = X_scaled.shape[1]
    index = faiss.IndexFlatL2(dimension)
    X_f = X_scaled.astype(np.float32)
    index.add(X_f)
    return index


def build_ensemble_models() -> Tuple[VotingClassifier, VotingClassifier]:
    """构建硬投票与软投票的 VotingClassifier（参数与原脚本保持一致）。"""
    rf_classifier = RandomForestClassifier(
        n_estimators=100,
        max_depth=20,
        min_samples_split=10,
        min_samples_leaf=5,
        random_state=42,
        n_jobs=-1,
    )

    lr_classifier = LogisticRegression(
        C=1.0,
        max_iter=1000,
        solver='lbfgs',
        random_state=42,
        n_jobs=-1,
    )

    ensemble_hard = VotingClassifier(
        estimators=[('random_forest', rf_classifier), ('logistic_regression', lr_classifier)],
        voting='hard',
        n_jobs=-1,
    )

    ensemble_soft = VotingClassifier(
        estimators=[('random_forest', rf_classifier), ('logistic_regression', lr_classifier)],
        voting='soft',
        n_jobs=-1,
    )

    return ensemble_hard, ensemble_soft


def main():
    print("=" * 50)
    print("开始数据处理...")
    print("=" * 50)

    # ========== 配置（保持原路径与常量） ==========
    DATA_DIR = r"c:\Users\xingn\Desktop\作业\cat_dog_data\data\train"
    IMAGE_SIZE = (224, 224)

    # 1. 加载与预处理
    X, y = load_and_preprocess_images(DATA_DIR, IMAGE_SIZE)

    # 2. 划分数据集
    print("\n" + "=" * 50)
    print("划分训练集和测试集...")
    print("=" * 50)
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42, stratify=y
    )

    print(f"训练集大小: {X_train.shape[0]} 样本")
    print(f"测试集大小: {X_test.shape[0]} 样本")
    print(f"特征维度: {X_train.shape[1]} 维")

    # 3. 标准化
    print("\n对特征进行标准化...")
    scaler = StandardScaler()
    X_train_scaled = scaler.fit_transform(X_train)
    X_test_scaled = scaler.transform(X_test)
    print("标准化完成!")

    # 4. PCA 降维
    print("\n" + "=" * 50)
    print("使用PCA进行降维...")
    print("=" * 50)
    print(f"原始特征维度: {X_train_scaled.shape[1]} 维")

    n_components = 500
    pca = PCA(n_components=n_components, random_state=42)
    X_train_pca = pca.fit_transform(X_train_scaled)
    X_test_pca = pca.transform(X_test_scaled)

    explained_variance = np.sum(pca.explained_variance_ratio_) * 100
    print(f"降维后特征维度: {X_train_pca.shape[1]} 维")
    print(f"保留的方差比例: {explained_variance:.2f}%")
    print("PCA降维完成!")

    # 5. 构建 Faiss 索引
    print("\n" + "=" * 50)
    print("构建Faiss索引...")
    print("=" * 50)
    faiss_index = build_faiss_index(X_train_scaled)
    print(f"Faiss索引构建完成!")
    print(f"索引中的向量数量: {faiss_index.ntotal}")

    # 6. 构建模型
    print("\n" + "=" * 50)
    print("构建集成学习模型...")
    print("=" * 50)
    ensemble_model_hard, ensemble_model_soft = build_ensemble_models()

    print("✓ 随机森林与逻辑回归分类器配置完成")
    print("✓ 硬投票/软投票集成模型构建完成")

    # 7. 训练与评估
    print("\n" + "=" * 50)
    print("开始模型训练...")
    print("=" * 50)

    print("\n[1/2] 训练硬投票集成模型...")
    ensemble_model_hard.fit(X_train_pca, y_train)
    print("✓ 硬投票模型训练完成")

    y_train_pred_hard = ensemble_model_hard.predict(X_train_pca)
    y_test_pred_hard = ensemble_model_hard.predict(X_test_pca)

    train_acc_hard = accuracy_score(y_train, y_train_pred_hard)
    test_acc_hard = accuracy_score(y_test, y_test_pred_hard)
    test_f1_hard = f1_score(y_test, y_test_pred_hard)

    print(f"  训练集准确率: {train_acc_hard:.4f}")
    print(f"  测试集准确率: {test_acc_hard:.4f}")
    print(f"  测试集F1分数: {test_f1_hard:.4f}")

    print("\n[2/2] 训练软投票集成模型...")
    ensemble_model_soft.fit(X_train_pca, y_train)
    print("✓ 软投票模型训练完成")

    y_train_pred_soft = ensemble_model_soft.predict(X_train_pca)
    y_test_pred_soft = ensemble_model_soft.predict(X_test_pca)

    train_acc_soft = accuracy_score(y_train, y_train_pred_soft)
    test_acc_soft = accuracy_score(y_test, y_test_pred_soft)
    test_f1_soft = f1_score(y_test, y_test_pred_soft)

    print(f"  训练集准确率: {train_acc_soft:.4f}")
    print(f"  测试集准确率: {test_acc_soft:.4f}")
    print(f"  测试集F1分数: {test_f1_soft:.4f}")

    print("\n" + "=" * 50)
    print("投票策略对比:")
    print("=" * 50)
    print(f"硬投票 - 测试集准确率: {test_acc_hard:.4f}, F1分数: {test_f1_hard:.4f}")
    print(f"软投票 - 测试集准确率: {test_acc_soft:.4f}, F1分数: {test_f1_soft:.4f}")

    if test_acc_soft >= test_acc_hard:
        ensemble_model = ensemble_model_soft
        print("\n✓ 选择软投票模型作为最终模型")
    else:
        ensemble_model = ensemble_model_hard
        print("\n✓ 选择硬投票模型作为最终模型")

    # 8. 保存模型与索引
    print("\n" + "=" * 50)
    print("保存模型和索引...")
    print("=" * 50)

    os.makedirs('./saved_models', exist_ok=True)
    os.makedirs('./saved_indexes', exist_ok=True)

    model_path = './saved_models/ensemble_model.joblib'
    joblib.dump(ensemble_model, model_path)
    print(f"✓ 集成模型已保存到: {model_path}")

    scaler_path = './saved_models/scaler.joblib'
    joblib.dump(scaler, scaler_path)
    print(f"✓ 标准化器已保存到: {scaler_path}")

    pca_path = './saved_models/pca.joblib'
    joblib.dump(pca, pca_path)
    print(f"✓ PCA模型已保存到: {pca_path}")

    index_path = './saved_indexes/faiss_index.faiss'
    faiss.write_index(faiss_index, index_path)
    print(f"✓ Faiss索引已保存到: {index_path}")

    labels_path = './saved_indexes/train_labels.npy'
    np.save(labels_path, y_train)
    print(f"✓ 训练集标签已保存到: {labels_path}")

    print("\n" + "=" * 50)
    print("所有任务完成!")
    print("=" * 50)
    print("\n模型训练总结:")
    print(f"- 训练样本数: {len(X_train)}")
    print(f"- 测试样本数: {len(X_test)}")
    print(f"- 最终模型测试准确率: {max(test_acc_hard, test_acc_soft):.4f}")
    print(f"- 最终模型测试F1分数: {max(test_f1_hard, test_f1_soft):.4f}")
    print("\n可以运行 ensemble_webapp.py 启动网页应用进行预测!")


if __name__ == "__main__":
    main()
