import numpy as np
import logging
import time
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomForestClassifier, VotingClassifier, 
                              BaggingClassifier, AdaBoostClassifier, 
                              GradientBoostingClassifier, StackingClassifier)
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from tqdm import tqdm
import pandas as pd
import faiss
from util import createXY
import joblib  # For saving the model

# 配置logging，确保能够打印模型信息
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# 载入和预处理数据
def load_data():
    X, y = createXY(train_folder="data/train", dest_folder=".", method='flat')  # 修改特征提取方法为'flat'或'vgg'
    X = np.array(X).astype('float32')
    faiss.normalize_L2(X)  # 对数据进行L2归一化
    y = np.array(y)
    return X, y

# 定义要使用的模型
models = {
    "logistic_regression": LogisticRegression(),
    "random_forest": RandomForestClassifier(),
    "svm": SVC(),
    "hard_voting": VotingClassifier(estimators=[
        ('lr', LogisticRegression()), 
        ('rf', RandomForestClassifier()),
        ('svc', SVC())
    ], voting='hard'),
    "soft_voting": VotingClassifier(estimators=[
        ('lr', LogisticRegression()), 
        ('rf', RandomForestClassifier()),
        ('svc', SVC(probability=True))
    ], voting='soft'),
    "bagging": BaggingClassifier(),
    "pasting": BaggingClassifier(),  # 在scikit-learn中使用BaggingClassifier模拟pasting
    "adaboost": AdaBoostClassifier(),
    "gradient_boosting": GradientBoostingClassifier(),
    "stacking": StackingClassifier(estimators=[
        ('lr', LogisticRegression()), 
        ('rf', RandomForestClassifier()),
        ('svc', SVC())
    ], final_estimator=LogisticRegression())
}

# 主函数，训练并记录每个模型的训练和评估时间
def main():
    # 载入数据
    X, y = load_data()
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=2023)

    # 存储每个模型的结果
    results = {
        "Classifier": [],
        "Training Time (s)": [],
        "Prediction Time (s)": [],
        "Accuracy": []
    }
    
    best_accuracy = 0
    best_model = None
    
    # 遍历每个模型，进行训练和评估
    for model_name, model in tqdm(models.items(), desc="Training Models"):
        logging.info(f"开始训练模型: {model_name}")
        
        # 记录训练时间
        train_start = time.time()
        for _ in tqdm(range(1), desc=f"Training {model_name}", leave=False):
            model.fit(X_train, y_train)
        train_end = time.time()
        
        # 记录评估时间
        pred_start = time.time()
        y_pred = model.predict(X_test)
        pred_end = time.time()
        
        # 计算准确率
        accuracy = accuracy_score(y_test, y_pred)
        
        # 记录模型结果
        results["Classifier"].append(model_name)
        results["Training Time (s)"].append(train_end - train_start)
        results["Prediction Time (s)"].append(pred_end - pred_start)
        results["Accuracy"].append(accuracy)
        
        # 输出详细日志
        logging.info(f"{model_name}模型训练完成。用时{train_end - train_start:.4f}秒")
        logging.info(f"{model_name}模型评估完成。用时{pred_end - pred_start:.4f}秒")
        
        # 检查并保存最佳模型
        if accuracy > best_accuracy:
            best_accuracy = accuracy
            best_model = model
            joblib.dump(best_model, f"best_model_{model_name}.joblib")  # 保存模型

    # 创建结果表格并打印
    df = pd.DataFrame(results)
    logging.info("模型评估结果：\n" + df.to_string(index=False))

# 运行主函数
if __name__ == '__main__':
    main()
