from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score, GridSearchCV
import joblib

class ModelTrainer:
    def __init__(self, random_state=42):
        self.random_state = random_state
        self.models = {}
        self.best_model = None
    
    def initialize_models(self):
        """初始化多种模型"""
        self.models = {
            '逻辑回归': LogisticRegression(random_state=self.random_state),
            '随机森林': RandomForestClassifier(n_estimators=100, random_state=self.random_state),
            '支持向量机': SVC(kernel='rbf', random_state=self.random_state)
        }
        return self.models
    
    def train_models(self, X_train, y_train):
        """训练所有模型"""
        trained_models = {}
        for name, model in self.models.items():
            model.fit(X_train, y_train)
            trained_models[name] = model
            print(f"{name} 训练完成")
        self.trained_models = trained_models
        return trained_models
    
    def evaluate_models(self, X_train, y_train, cv=5):
        """交叉验证评估模型"""
        print("交叉验证结果（准确率）:")
        model_scores = {}
        
        for name, model in self.trained_models.items():
            scores = cross_val_score(model, X_train, y_train, cv=cv)
            mean_score = scores.mean()
            std_score = scores.std() * 2
            model_scores[name] = mean_score
            print(f"{name}: {mean_score:.4f} (+/- {std_score:.4f})")
        
        # 选择最佳模型
        best_model_name = max(model_scores, key=model_scores.get)
        self.best_model = self.trained_models[best_model_name]
        print(f"\n最佳模型: {best_model_name}")
        
        return model_scores
    
    def save_best_model(self, filepath='models/best_model.pkl'):
        """保存最佳模型"""
        if self.best_model is None:
            raise ValueError("没有可用的最佳模型，请先训练和评估模型")
        
        joblib.dump(self.best_model, filepath)
        print(f"最佳模型已保存到: {filepath}")
