# 导入必要的库
import numpy as np
import logging
from util import createXY
from sklearn.model_selection import train_test_split
from lazypredict.Supervised import LazyClassifier
import joblib
import warnings
import os
warnings.filterwarnings('ignore')

# 配置logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

def main():
    logging.info("开始使用 lazypredict 训练猫狗分类模型")
    
    # 获取当前脚本所在目录
    current_dir = os.path.dirname(os.path.abspath(__file__))
    train_folder = os.path.join(current_dir, "data", "train")
    
    logging.info(f"训练数据路径: {train_folder}")
    
    # 检查训练数据目录是否存在
    if not os.path.exists(train_folder):
        logging.error(f"训练数据目录不存在: {train_folder}")
        return
    
    # 载入和预处理数据
    logging.info("正在加载数据...")
    X, y = createXY(train_folder=train_folder, dest_folder=current_dir, method='vgg')
    X = np.array(X).astype('float32')
    y = np.array(y)
    logging.info(f"数据加载完成。X.shape: {X.shape}, y.shape: {y.shape}")
    
    # 检查是否成功加载数据
    if len(X) == 0 or len(y) == 0:
        logging.error("数据加载失败！没有读取到任何图像。")
        logging.error(f"请检查训练数据目录: {train_folder}")
        return

    # 数据集分割为训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=2023)
    logging.info(f"数据集划分完成。训练集: {X_train.shape[0]}, 测试集: {X_test.shape[0]}")

    # 使用 LazyClassifier 训练多个模型
    logging.info("开始使用 LazyPredict 训练多个分类器...")
    logging.info("这可能需要一些时间，请耐心等待...")
    
    clf = LazyClassifier(verbose=0, ignore_warnings=True, custom_metric=None)
    models, predictions = clf.fit(X_train, X_test, y_train, y_test)
    
    # 显示所有模型的结果
    logging.info("\n" + "="*80)
    logging.info("所有模型的性能对比：")
    logging.info("="*80)
    print(models)
    logging.info("="*80)
    
    # 获取准确率最高的模型
    best_model_name = models['Accuracy'].idxmax()
    best_accuracy = models.loc[best_model_name, 'Accuracy']
    
    logging.info(f"\n最佳模型: {best_model_name}")
    logging.info(f"准确率: {best_accuracy:.4f}")
    logging.info(f"平衡准确率: {models.loc[best_model_name, 'Balanced Accuracy']:.4f}")
    logging.info(f"ROC AUC: {models.loc[best_model_name, 'ROC AUC']:.4f}")
    logging.info(f"F1 Score: {models.loc[best_model_name, 'F1 Score']:.4f}")
    
    # 重新训练最佳模型
    logging.info(f"\n正在重新训练最佳模型 {best_model_name}...")
    
    # 导入最佳模型类
    from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
    from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, BaggingClassifier, AdaBoostClassifier
    from sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier
    from sklearn.neighbors import KNeighborsClassifier
    from sklearn.naive_bayes import GaussianNB, BernoulliNB
    from sklearn.linear_model import LogisticRegression, RidgeClassifier, PassiveAggressiveClassifier, Perceptron
    from sklearn.svm import SVC, LinearSVC
    from sklearn.calibration import CalibratedClassifierCV
    from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
    from sklearn.semi_supervised import LabelPropagation, LabelSpreading
    from sklearn.dummy import DummyClassifier
    from xgboost import XGBClassifier
    from lightgbm import LGBMClassifier
    
    # 模型字典
    model_dict = {
        'QuadraticDiscriminantAnalysis': QuadraticDiscriminantAnalysis(),
        'RandomForestClassifier': RandomForestClassifier(random_state=2023),
        'ExtraTreesClassifier': ExtraTreesClassifier(random_state=2023),
        'BaggingClassifier': BaggingClassifier(random_state=2023),
        'AdaBoostClassifier': AdaBoostClassifier(random_state=2023),
        'DecisionTreeClassifier': DecisionTreeClassifier(random_state=2023),
        'ExtraTreeClassifier': ExtraTreeClassifier(random_state=2023),
        'KNeighborsClassifier': KNeighborsClassifier(),
        'GaussianNB': GaussianNB(),
        'BernoulliNB': BernoulliNB(),
        'LogisticRegression': LogisticRegression(random_state=2023, max_iter=1000),
        'RidgeClassifier': RidgeClassifier(random_state=2023),
        'PassiveAggressiveClassifier': PassiveAggressiveClassifier(random_state=2023),
        'Perceptron': Perceptron(random_state=2023),
        'SVC': SVC(random_state=2023),
        'LinearSVC': LinearSVC(random_state=2023, max_iter=1000),
        'CalibratedClassifierCV': CalibratedClassifierCV(),
        'LinearDiscriminantAnalysis': LinearDiscriminantAnalysis(),
        'LabelPropagation': LabelPropagation(),
        'LabelSpreading': LabelSpreading(),
        'DummyClassifier': DummyClassifier(random_state=2023),
        'XGBClassifier': XGBClassifier(random_state=2023, eval_metric='logloss'),
        'LGBMClassifier': LGBMClassifier(random_state=2023, verbose=-1),
    }
    
    # 获取最佳模型实例
    if best_model_name in model_dict:
        best_model = model_dict[best_model_name]
    else:
        logging.warning(f"未找到模型 {best_model_name}，使用 RandomForestClassifier 作为默认")
        best_model = RandomForestClassifier(random_state=2023)
        best_model_name = 'RandomForestClassifier'
    
    # 在完整数据集上训练最佳模型
    best_model.fit(X_train, y_train)
    train_accuracy = best_model.score(X_train, y_train)
    test_accuracy = best_model.score(X_test, y_test)
    
    logging.info(f"训练集准确率: {train_accuracy:.4f}")
    logging.info(f"测试集准确率: {test_accuracy:.4f}")
    
    # 保存模型到硬盘
    model_filename = f'best_model_{best_model_name}.pkl'
    joblib.dump(best_model, model_filename)
    logging.info(f"\n模型已保存到: {model_filename}")
    
    # 保存模型信息
    model_info = {
        'model_name': best_model_name,
        'accuracy': best_accuracy,
        'train_accuracy': train_accuracy,
        'test_accuracy': test_accuracy
    }
    joblib.dump(model_info, 'model_info.pkl')
    logging.info(f"模型信息已保存到: model_info.pkl")
    
    logging.info("\n训练完成！")

if __name__ == '__main__':
    main()
