from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier, RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from xgboost import XGBClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
from sklearn.model_selection import GridSearchCV
import joblib
import os
import numpy as np
import pandas as pd
from data_processor import DataProcessor

class ModelTrainer:
    def __init__(self, config):
        self.config = config
        self.data_processor = DataProcessor(config)
        # 初始化所有模型及其参数网格
        self.models = {
            'AdaBoost': {
                'model': AdaBoostClassifier(random_state=42),
                'params': {
                    'n_estimators': [50, 100, 200],
                    'learning_rate': [0.01, 0.1, 1.0],
                    'algorithm': ['SAMME']
                }
            },
            'GradientBoosting': {
                'model': GradientBoostingClassifier(random_state=42),
                'params': {
                    'n_estimators': [100, 200],
                    'learning_rate': [0.01, 0.1],
                    'max_depth': [3, 5, 7],
                    'min_samples_split': [2, 5]
                }
            },
            'RandomForest': {
                'model': RandomForestClassifier(random_state=42),
                'params': {
                    'n_estimators': [100, 200],
                    'max_depth': [3, 5, 7, None],
                    'min_samples_split': [2, 5]
                }
            },
            'LogisticRegression': {
                'model': LogisticRegression(random_state=42),
                'params': {
                    'C': [0.001, 0.01, 0.1, 1, 10],
                    'penalty': ['l1', 'l2'],
                    'solver': ['liblinear']
                }
            },
            'SVC': {
                'model': SVC(probability=True, random_state=42),
                'params': {
                    'C': [0.1, 1, 10],
                    'kernel': ['rbf', 'linear'],
                    'gamma': ['scale', 'auto']
                }
            },
            'XGBoost': {
                'model': XGBClassifier(random_state=42),
                'params': {
                    'n_estimators': [100, 200],
                    'max_depth': [3, 5, 7],
                    'learning_rate': [0.01, 0.1],
                    'subsample': [0.8, 0.9, 1.0],
                    'colsample_bytree': [0.8, 0.9, 1.0]
                }
            }
        }
        
        self.best_model = None
        self.best_score = 0  # 初始化为0，用于存储最佳加权评分
        self.model_metrics = {}
        self.trained_models = {}

    def train(self, X_train, y_train):
        """
        训练模型并使用交叉验证评估性能
        """
        # 确保数据已经过离群值去除和缺失值填补
        # 如果X_train是numpy数组，需要先转换为DataFrame
        if isinstance(X_train, np.ndarray):
            # 创建DataFrame，使用特征名称作为列名
            X_train_df = pd.DataFrame(X_train, columns=self.data_processor.feature_names)
        else:
            X_train_df = X_train
        
        X_train_processed = self.data_processor.remove_outliers(X_train_df)
        X_train_processed = self.data_processor.fill_missing_values(X_train_processed)
        
        # 如果需要将处理后的DataFrame转回numpy数组
        if isinstance(X_train, np.ndarray):
            X_train_processed = X_train_processed.values
        
        # 训练所有模型并选择最佳模型
        print("\n开始训练和评估模型...")
        
        # 存储所有模型的评估结果
        results = []
        
        for name, model_info in self.models.items():
            print(f"\n训练 {name} 模型...")
            
            # 创建网格搜索对象，使用交叉验证
            grid_search = GridSearchCV(
                estimator=model_info['model'],
                param_grid=model_info['params'],
                cv=5,  # 5折交叉验证
                scoring='accuracy',
                n_jobs=-1,
                verbose=1
            )
            
            # 训练模型
            grid_search.fit(X_train_processed, y_train)
            
            # 保存训练好的最佳模型
            self.trained_models[name] = grid_search.best_estimator_
            
            # 获取交叉验证的结果
            cv_results = grid_search.cv_results_
            
            # 计算交叉验证的平均性能指标
            # 注意：这里我们使用交叉验证的平均分数，而不是在训练集上重新评估
            best_index = grid_search.best_index_
            
            # 获取交叉验证的准确率
            cv_accuracy = cv_results['mean_test_score'][best_index]
            
            # 为了获取更多指标，我们需要使用交叉验证手动计算
            # 使用StratifiedKFold进行交叉验证
            from sklearn.model_selection import StratifiedKFold
            from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score
            
            cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
            
            # 初始化指标列表
            precisions = []
            recalls = []
            f1_scores = []
            roc_aucs = []
            
            # 获取最佳模型
            best_model = grid_search.best_estimator_
            
            # 进行交叉验证
            for train_idx, val_idx in cv.split(X_train_processed, y_train):
                # 分割数据
                X_cv_train, X_cv_val = X_train_processed.iloc[train_idx], X_train_processed.iloc[val_idx]
                
                # 检查y_train的类型并相应地处理
                if isinstance(y_train, pd.Series):
                    # 如果y_train是Pandas Series，使用iloc进行索引
                    y_cv_train = y_train.iloc[train_idx]
                    y_cv_val = y_train.iloc[val_idx]
                else:
                    # 如果y_train是NumPy数组，直接使用索引
                    y_cv_train, y_cv_val = y_train[train_idx], y_train[val_idx]
                
                # 训练模型
                best_model.fit(X_cv_train, y_cv_train)
                
                # 预测
                y_cv_pred = best_model.predict(X_cv_val)
                y_cv_proba = best_model.predict_proba(X_cv_val)[:, 1]
                
                # 计算指标
                precisions.append(precision_score(y_cv_val, y_cv_pred))
                recalls.append(recall_score(y_cv_val, y_cv_pred))
                f1_scores.append(f1_score(y_cv_val, y_cv_pred))
                roc_aucs.append(roc_auc_score(y_cv_val, y_cv_proba))
            
            # 计算平均指标
            avg_precision = np.mean(precisions)
            avg_recall = np.mean(recalls)
            avg_f1 = np.mean(f1_scores)
            avg_roc_auc = np.mean(roc_aucs)
            
            # 计算各种评估指标
            metrics = {
                'Model': name,
                'Accuracy': float(cv_accuracy),
                'Precision': float(avg_precision),
                'Recall': float(avg_recall),
                'F1_Score': float(avg_f1),
                'ROC_AUC': float(avg_roc_auc),
                'Best_Params': str(grid_search.best_params_)
            }
            
            # 计算加权评分
            weighted_score = (
                0.3 * metrics['F1_Score'] + 
                0.3 * metrics['ROC_AUC'] + 
                0.2 * metrics['Recall'] + 
                0.2 * metrics['Precision']
            )
            
            # 将加权评分添加到指标字典中
            metrics['Weighted_Score'] = float(weighted_score)
            
            self.model_metrics[name] = metrics
            results.append(metrics)
            
            print(f"\n{name} 模型交叉验证评估结果:")
            print(f"最佳参数: {metrics['Best_Params']}")
            print(f"准确率: {metrics['Accuracy']:.4f}")
            print(f"精确率: {metrics['Precision']:.4f}")
            print(f"召回率: {metrics['Recall']:.4f}")
            print(f"F1分数: {metrics['F1_Score']:.4f}")
            print(f"ROC AUC: {metrics['ROC_AUC']:.4f}")
            print(f"加权评分: {metrics['Weighted_Score']:.4f}")
            
            # 更新最佳模型（使用加权评分代替仅使用准确率）
            if weighted_score > self.best_score:
                self.best_score = weighted_score
                self.best_model = grid_search.best_estimator_
                print(f"\n发现新的最佳模型: {name}")
                print(f"最佳加权评分: {self.best_score:.4f}")
        
        # 创建评估结果DataFrame
        self.evaluation_results = pd.DataFrame(results)
        
        # 调整列的顺序，使加权评分更加突出
        if 'Weighted_Score' in self.evaluation_results.columns:
            cols = ['Model', 'Weighted_Score', 'Accuracy', 'Precision', 'Recall', 'F1_Score', 'ROC_AUC', 'Best_Params']
            self.evaluation_results = self.evaluation_results[cols]
        
        print("\n所有模型交叉验证评估结果汇总:")
        print(self.evaluation_results.to_string(index=False))
        
        # 保存评估结果到CSV文件
        os.makedirs('src/results', exist_ok=True)
        self.evaluation_results.to_csv('src/results/model_evaluation.csv', index=False)
        print("\n评估结果已保存到: src/results/model_evaluation.csv")
        
        return self.best_model

    def save_model(self, model, filepath):
        """保存模型"""
        os.makedirs(os.path.dirname(filepath), exist_ok=True)
        joblib.dump(model, filepath)
        print(f"模型已保存到: {filepath}")
    
    def load_model(self, filepath):
        """加载模型"""
        if os.path.exists(filepath):
            return joblib.load(filepath)
        return None
    
    def predict(self, data):
        """使用最佳模型进行预测"""
        if self.best_model is None:
            raise ValueError("模型尚未训练")
        return self.best_model.predict(data)

    def evaluate(self, model, X_test, y_test):
        """
        评估模型
        """
        # 确保测试数据也经过相同的处理
        # 如果X_test是numpy数组，需要先转换为DataFrame
        if isinstance(X_test, np.ndarray):
            # 创建DataFrame，使用特征名称作为列名
            X_test_df = pd.DataFrame(X_test, columns=self.data_processor.feature_names)
        else:
            X_test_df = X_test
        
        X_test_processed = self.data_processor.remove_outliers(X_test_df)
        X_test_processed = self.data_processor.fill_missing_values(X_test_processed)
        
        # 如果需要将处理后的DataFrame转回numpy数组
        if isinstance(X_test, np.ndarray):
            X_test_processed = X_test_processed.values
        
        # 在测试集上评估
        y_pred = model.predict(X_test_processed)
        y_pred_proba = model.predict_proba(X_test_processed)[:, 1]
        
        # 计算各种评估指标
        metrics = {
            'Accuracy': float(accuracy_score(y_test, y_pred)),
            'Precision': float(precision_score(y_test, y_pred)),
            'Recall': float(recall_score(y_test, y_pred)),
            'F1_Score': float(f1_score(y_test, y_pred)),
            'ROC_AUC': float(roc_auc_score(y_test, y_pred_proba))
        }
        
        self.model_metrics[model.__class__.__name__] = metrics
        
        print(f"\n{model.__class__.__name__} 模型评估结果:")
        for metric, value in metrics.items():
            print(f"{metric}: {value:.4f}")
        
        return metrics 