import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, BaggingClassifier, AdaBoostClassifier
from sklearn.svm import SVC, NuSVC, LinearSVC
from sklearn.linear_model import LogisticRegression, RidgeClassifier, RidgeClassifierCV, Perceptron, PassiveAggressiveClassifier, SGDClassifier
from sklearn.neighbors import KNeighborsClassifier, NearestCentroid
from sklearn.naive_bayes import GaussianNB, BernoulliNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis, LinearDiscriminantAnalysis
from sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.semi_supervised import LabelPropagation, LabelSpreading
from sklearn.dummy import DummyClassifier
from lightgbm import LGBMClassifier
from xgboost import XGBClassifier
from sklearn.metrics import accuracy_score, balanced_accuracy_score, roc_auc_score, f1_score
import joblib
import argparse
import logging
from util import createXY
import time
import os

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

def get_args():
    parser = argparse.ArgumentParser(description='使用CPU或GPU训练模型。')
    parser.add_argument('-m', '--mode', type=str, required=True, choices=['cpu', 'gpu'], help='选择训练模式：CPU或GPU。')
    parser.add_argument('-f', '--feature', type=str, required=True, choices=['flat', 'vgg'], help='选择特征提取方法：flat或vgg。')
    parser.add_argument('-l', '--library', type=str, required=True, choices=['sklearn', 'faiss'], help='选择使用的库：sklearn或faiss。')
    args = parser.parse_args()
    return args

def main():
    args = get_args()

    # 载入和预处理数据
    X, y = createXY(train_folder="C:/Users/86178/Desktop/faiss_dog_cat_question-main/cat_dog_data/data/train", dest_folder="C:/Users/86178/Desktop/faiss_dog_cat_question-main/cat_dog_data/data/output", method=args.feature)
    X = np.array(X).astype('float32')
    y = np.array(y)
    logging.info("数据加载完成。")

    # 标准化数据
    scaler = StandardScaler()
    X = scaler.fit_transform(X)
    logging.info("数据标准化完成。")

    # 保存标准化器
    scaler_filename = "D:/pycharm/best_model/scaler.joblib"
    joblib.dump(scaler, scaler_filename)
    logging.info(f"标准化器已保存到: {scaler_filename}")

    # 数据集分割为训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=2023)

    # 定义要训练的模型列表
    models = {
        'RandomForestClassifier': RandomForestClassifier(n_jobs=-1),
        'LogisticRegression': LogisticRegression(max_iter=1000, solver='saga', n_jobs=-1),
        'KNeighborsClassifier': KNeighborsClassifier(n_jobs=-1),
        'GaussianNB': GaussianNB(),
        'NuSVC': NuSVC(probability=True, nu=0.1, cache_size=500),
        'LGBMClassifier': LGBMClassifier(n_jobs=-1),
        'XGBClassifier': XGBClassifier(use_label_encoder=False, eval_metric='logloss', n_jobs=-1),
        'SVC': SVC(kernel='linear', probability=True, cache_size=500),
        'QuadraticDiscriminantAnalysis': QuadraticDiscriminantAnalysis(),
        'ExtraTreesClassifier': ExtraTreesClassifier(n_jobs=-1),
        'BaggingClassifier': BaggingClassifier(n_jobs=-1),
        'AdaBoostClassifier': AdaBoostClassifier(),
        'NearestCentroid': NearestCentroid(),
        'CalibratedClassifierCV': CalibratedClassifierCV(cv=5, n_jobs=-1),
        'DecisionTreeClassifier': DecisionTreeClassifier(),
        'BernoulliNB': BernoulliNB(),
        'RidgeClassifierCV': RidgeClassifierCV(),
        'LinearDiscriminantAnalysis': LinearDiscriminantAnalysis(),
        'RidgeClassifier': RidgeClassifier(),
        'ExtraTreeClassifier': ExtraTreeClassifier(),
        'LinearSVC': LinearSVC(dual=False, max_iter=10000),  # 移除了 n_jobs 参数
        'Perceptron': Perceptron(n_jobs=-1),
        'PassiveAggressiveClassifier': PassiveAggressiveClassifier(n_jobs=-1),
        'SGDClassifier': SGDClassifier(n_jobs=-1),
        'LabelPropagation': LabelPropagation(n_jobs=-1),
        'LabelSpreading': LabelSpreading(n_jobs=-1),
        'DummyClassifier': DummyClassifier()
    }

    # 训练并评估模型
    results = {}
    for model_name, model in models.items():
        try:
            logging.info(f"开始训练模型: {model_name}")
            start_time = time.time()
            model.fit(X_train, y_train)
            end_time = time.time()
            train_time = end_time - start_time

            y_pred = model.predict(X_test)
            y_prob = model.predict_proba(X_test)[:, 1] if hasattr(model, "predict_proba") else None
            metrics = {
                'Accuracy': accuracy_score(y_test, y_pred),
                'Balanced Accuracy': balanced_accuracy_score(y_test, y_pred),
                'ROC AUC': roc_auc_score(y_test, y_pred) if y_prob is not None else None,
                'F1 Score': f1_score(y_test, y_pred, average='weighted'),
                'Train Time': train_time
            }
            results[model_name] = metrics

            # 提前计算 ROC AUC 的显示值
            roc_auc_str = f'{metrics["ROC AUC"]:.4f}' if metrics["ROC AUC"] is not None else '-'

            logging.info(
                f'模型: {model_name}, 准确率: {metrics["Accuracy"]:.4f}, 平衡准确率: {metrics["Balanced Accuracy"]:.4f}, '
                f'ROC AUC: {roc_auc_str}, F1分数: {metrics["F1 Score"]:.4f}, 训练时间: {metrics["Train Time"]:.2f}s'
            )
        except Exception as e:
            logging.error(f"模型: {model_name} 训练失败: {e}")

    # 找到准确率最高的模型
    best_model_name = max(results, key=lambda k: results[k]['Accuracy'])
    logging.info(f'最佳模型: {best_model_name}, 最高准确率: {results[best_model_name]["Accuracy"]:.4f}')

    # 保存最佳模型到硬盘
    model_dir = "D:/pycharm/best_model"
    os.makedirs(model_dir, exist_ok=True)
    model_filename = os.path.join(model_dir, "best_model.joblib")
    joblib.dump(models[best_model_name], model_filename)
    logging.info(f"最佳模型已保存到: {model_filename}")

if __name__ == '__main__':
    main()