import numpy as np
import logging
from util import createXY
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import pickle

# 配置工业级日志
logging.basicConfig(
    filename='ensemble_train.log',
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)

logging.info('开始加载数据集...')
X, y = createXY(train_folder="./cat_dog_data/data/train", dest_folder=".", method='flat')
X = np.array(X, dtype=np.float32)
y = np.array(y)
if X.dtype == object or len(X.shape) == 1:
    X = np.vstack(X)
logging.info(f"数据集加载完成，X.shape: {X.shape}, y.shape: {y.shape}")

# 归一化
norms = np.linalg.norm(X, axis=1, keepdims=True)
X = X / (norms + 1e-12)
logging.info("数据归一化完成")

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=2025)
logging.info("数据集划分完成")

# 定义集成学习模型及超参数
rf = RandomForestClassifier(n_estimators=100, max_depth=10, random_state=2025)
gb = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1, max_depth=5, random_state=2025)
ensemble = VotingClassifier(estimators=[('rf', rf), ('gb', gb)], voting='soft')

models = {'RandomForest': rf, 'GradientBoosting': gb, 'EnsembleVoting': ensemble}
best_acc = 0.0
best_model = None
best_name = ''

for name, model in models.items():
    logging.info(f"开始训练模型: {name}")
    model.fit(X_train, y_train)
    y_pred = model.predict(X_test)
    acc = accuracy_score(y_test, y_pred)
    logging.info(f"模型: {name} 测试集准确率: {acc:.4f}")
    print(f"模型: {name} 测试集准确率: {acc:.4f}")
    if acc > best_acc:
        best_acc = acc
        best_model = model
        best_name = name

logging.info(f"最佳模型: {best_name}, 测试集最高准确率: {best_acc:.4f}")
print(f"最佳模型: {best_name}, 测试集最高准确率: {best_acc:.4f}")

# 保存最佳模型
with open('best_ensemble_model.pkl', 'wb') as f:
    pickle.dump(best_model, f)
logging.info("最佳模型已保存为 best_ensemble_model.pkl")
print("最佳模型已保存为 best_ensemble_model.pkl")
