from utils import createXY
from utils import format_table
from tqdm import tqdm
import numpy as np
import time
import logging
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, VotingClassifier, BaggingClassifier, AdaBoostClassifier, GradientBoostingClassifier, StackingClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from itertools import islice
from sklearn.utils import shuffle
import pickle
from tabulate import tabulate

# 配置日志记录
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

def create_models():
    """创建并返回模型字典"""
    base_models = {
        'logistic_regression': LogisticRegression(C=0.5622117123602399, solver='liblinear', max_iter=1000),
        'random_forest': RandomForestClassifier(n_estimators=191, max_depth=None, min_samples_split=8, random_state=42),
        'svm': SVC(C=45.61699842170359, gamma='scale', kernel='rbf', probability=True),
    }
    ensemble_models = {
        'hard_voting': VotingClassifier(estimators=[('logistic', base_models['logistic_regression']), 
                                                    ('rf', base_models['random_forest']), 
                                                    ('svm', base_models['svm'])], voting='hard'),
        'soft_voting': VotingClassifier(estimators=[('logistic', base_models['logistic_regression']), 
                                                    ('rf', base_models['random_forest']), 
                                                    ('svm', base_models['svm'])], voting='soft'),
        'stacking': StackingClassifier(estimators=[('logistic', base_models['logistic_regression']), 
                                                   ('rf', base_models['random_forest']), 
                                                   ('svm', base_models['svm'])], final_estimator=LogisticRegression())
    }
    other_models = {
        'bagging': BaggingClassifier(estimator=LogisticRegression(), n_estimators=50, random_state=42),
        'pasting': BaggingClassifier(estimator=LogisticRegression(), n_estimators=50, bootstrap=False, random_state=42),
        'adaboost': AdaBoostClassifier(n_estimators=50, learning_rate=1.0, random_state=42, algorithm='SAMME'),
        'gradient_boosting': GradientBoostingClassifier(n_estimators=100, random_state=42),
    }
    return {**base_models, **ensemble_models, **other_models}

def train_and_evaluate(model, X_train, y_train, X_test, y_test):
    """训练并评估模型"""
    train_start = time.time()
    model.fit(X_train, y_train)
    train_end = time.time()
    train_time = train_end - train_start

    predict_start = time.time()
    y_pred = model.predict(X_test)
    predict_end = time.time()
    predict_time = predict_end - predict_start

    accuracy = accuracy_score(y_test, y_pred)

    return train_time, predict_time, accuracy

# 加载和预处理数据
train_file = r'D:\project5_基于集成学习的猫狗识别\data_try' 
dest_file = r'D:\project5_基于集成学习的猫狗识别\dest_file'
X, y = createXY(train_folder=train_file, dest_folder=dest_file)
X, y = shuffle(X, y, random_state=42)  # 随机打乱数据
X = np.array(X).astype('float32')
X = X / np.linalg.norm(X, axis=1, keepdims=True)
logging.info("数据加载和预处理完成。")

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

logging.info("数据集划分为训练集和测试集。")

# 创建模型列表
models = create_models()

# 训练和评估模型
results = []
clf =None
best_accuracy = 0


for name, model in list(models.items())[:10]:
    
    train_time, predict_time, accuracy = train_and_evaluate(model, X_train, y_train, X_test, y_test)
    if accuracy > best_accuracy:
        best_accuracy = accuracy
        clf = model
    
    logging.info(f"{name.capitalize()} Training Time: {train_time:.2f} seconds")
    logging.info(f"{name.capitalize()} Prediction Time: {predict_time:.2f} seconds")
    
    results.append([name, train_time, predict_time, accuracy])

# 保存最好的模型    
with open('best_model.pkl', 'wb') as file:
    pickle.dump(clf, file)  # 将模型保存到文件

# 输出格式化的表格
headers = ['Classifier', 'Training Time (s)', 'Prediction Time (s)', 'Accuracy']
print(tabulate(results, headers=headers, tablefmt="simple"))
