import numpy as np
import time
import faiss  # 用于高效相似性搜索和稠密向量聚类
from util import createXY  # 用于创建数据集的特征和标签
from sklearn.model_selection import train_test_split  # 用于拆分数据集为训练集和测试集
import logging  # 用于记录日志
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
from sklearn.ensemble import (
    BaggingClassifier, 
    AdaBoostClassifier, 
    RandomForestClassifier, 
    StackingClassifier, 
    VotingClassifier
)
from tabulate import tabulate
import pickle

# 配置 logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# 载入和预处理数据
X, y = createXY(train_folder="../data/train", dest_folder=".")
X = np.array(X).astype('float32')
faiss.normalize_L2(X)  # 对数据进行 L2 归一化
y = np.array(y)
logging.info("数据加载和预处理完成。")

# 数据集分割为训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=2023)
logging.info("数据集划分为训练集和测试集。")

# 定义分类器
classifiers = {
    "logistic_regression": LogisticRegression(),
    "random_forest": RandomForestClassifier(random_state=42),
    "svm": SVC(probability=True),
    "hard_voting": VotingClassifier(
        estimators=[
            ('lr', LogisticRegression()), 
            ('rf', RandomForestClassifier(random_state=42)), 
            ('svc', SVC(probability=True))
        ],
        voting='hard'
    ),
    "soft_voting": VotingClassifier(
        estimators=[
            ('lr', LogisticRegression()), 
            ('rf', RandomForestClassifier(random_state=42)), 
            ('svc', SVC(probability=True))
        ],
        voting='soft'
    ),
    "bagging": BaggingClassifier(
        base_estimator=DecisionTreeClassifier(),
        n_estimators=500,
        max_samples=100,
        bootstrap=True,
        n_jobs=-1,
    ),
    "pasting": BaggingClassifier(
        base_estimator=DecisionTreeClassifier(),
        n_estimators=500,
        max_samples=100,
        bootstrap=False,
        n_jobs=-1,
    ),
    "adaboost": AdaBoostClassifier(
        base_estimator=DecisionTreeClassifier(max_depth=1),
        n_estimators=200,
        algorithm="SAMME.R",
        learning_rate=0.5
    ),
    "gradient_boosting": XGBClassifier(
        n_estimators=200,
        max_depth=2,
        learning_rate=0.5
    ),
    "stacking": StackingClassifier(
        estimators=[
            ('lr', LogisticRegression()), 
            ('rf', RandomForestClassifier(random_state=42)), 
            ('svc', SVC(probability=True))
        ],
        final_estimator=LogisticRegression()
    )
}

results = []
best_accuracy = 0
best_model = None

# 训练和评估分类器
for name, clf in classifiers.items():
    start_time = time.time()
    clf.fit(X_train, y_train)
    fit_time = time.time() - start_time
    logging.info(f"{name} 模型训练完成。用时 {fit_time:.4f} 秒。")

    start_time = time.time()
    accuracy = clf.score(X_test, y_test)
    score_time = time.time() - start_time
    logging.info(f"{name} 模型评估完成。用时 {score_time:.4f} 秒。")

    results.append([name, fit_time, score_time, accuracy])

    if accuracy > best_accuracy:
        best_accuracy = accuracy
        best_model = clf

# 保存准确率最高的模型
if best_model is not None:
    logging.info(f"准确率最高的模型是 {best_model.__class__.__name__}，准确率为 {best_accuracy:.4f}。")
    with open("best_model.pkl", "wb") as f:
        pickle.dump(best_model, f)
        logging.info("最佳模型已保存在 best_model.pkl 文件中。")

# 打印结果表格
headers = ["Classifier", "Training Time (s)", "Prediction Time (s)", "Accuracy"]
print(tabulate(results, headers=headers, tablefmt="simple"))
