import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
import warnings
import xgboost as xgb
from sklearn.metrics import accuracy_score, roc_auc_score, classification_report
import matplotlib.pyplot as plt
import joblib
import os

# 抑制XGBoost和scikit-learn的警告信息
warnings.filterwarnings('ignore', category=UserWarning, module='xgboost')
warnings.filterwarnings('ignore', category=UserWarning, module='sklearn')

# 设置XGBoost环境变量
os.environ['XGBOOST_VERBOSE'] = '0'

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False

class ModelTrainer:
    """统一的模型训练器"""
    
    def __init__(self, config=None):
        self.config = config or self._get_default_config()
        self.base_models = {}
        self.meta_model = None
        self.training_results = {}
        
    def _get_default_config(self):
        """默认配置"""
        return {
            'random_state': 1234,
            'cv_folds': 5,
            'base_models': {
                'random_forest': {
                    'n_estimators': 400,
                    'max_depth': 10,
                    'min_samples_leaf': 1,
                    'min_samples_split': 10,
                    'n_jobs': -1
                },
                'xgboost': {
                    'n_estimators': 100,
                    'max_depth': 5,
                    'learning_rate': 0.1,
                    'n_jobs': 1,  # 设置为1避免多线程警告
                    'verbosity': 0,  # 减少输出
                    'enable_categorical': False,  # 禁用分类特征支持
                    'tree_method': 'hist'  # 使用hist方法避免警告
                },
                'logistic_regression': {
                    'C': 0.5,
                    'max_iter': 1000,
                    'n_jobs': -1  # 恢复原始参数
                },
                'svc': {
                    'C': 1.0,
                    'kernel': 'rbf',
                    'probability': True
                },
                'lda': {
                    'solver': 'svd',  # 恢复原始参数
                    'shrinkage': None
                },
                'gradient_boosting': {
                    'n_estimators': 100,
                    'learning_rate': 0.1,
                    'max_depth': 3
                }
            },
            'meta_model': {
                'type': 'logistic_regression',
                'C': 1.0,
                'max_iter': 1000,
                'n_jobs': -1  # 恢复原始参数
            }
        }
    
    def create_base_models(self):
        """创建基模型"""
        models_config = self.config['base_models']
        
        self.base_models = {
            'random_forest': RandomForestClassifier(
                random_state=self.config['random_state'],
                **models_config['random_forest']
            ),
            'xgboost': xgb.XGBClassifier(
                random_state=self.config['random_state'],
                **models_config['xgboost']
            ),
            'logistic_regression': LogisticRegression(
                random_state=self.config['random_state'],
                **models_config['logistic_regression']
            ),
            'svc': SVC(
                random_state=self.config['random_state'],
                **models_config['svc']
            ),
            'lda': LinearDiscriminantAnalysis(
                **models_config['lda']
            ),
            'gradient_boosting': GradientBoostingClassifier(
                random_state=self.config['random_state'],
                **models_config['gradient_boosting']
            )
        }
        
        print(f"创建了 {len(self.base_models)} 个基模型")
        return self.base_models
    
    def train_base_models(self, X_train, y_train, X_test, y_test):
        """训练基模型"""
        print("=== 训练基模型 ===")
        results = []
        
        for name, model in self.base_models.items():
            print(f"训练 {name}...")
            
            # 训练模型
            model.fit(X_train, y_train)
            
            # 预测
            if hasattr(model, "predict_proba"):
                y_pred_proba = model.predict_proba(X_test)[:, 1]
                y_pred = model.predict(X_test)
                auc_score = roc_auc_score(y_test, y_pred_proba)
            else:
                y_pred = model.predict(X_test)
                y_pred_proba = model.decision_function(X_test)
                auc_score = roc_auc_score(y_test, y_pred_proba)
            
            # 计算指标
            accuracy = accuracy_score(y_test, y_pred)
            report = classification_report(y_test, y_pred, output_dict=True)
            
            # 保存结果
            result = {
                'model': name,
                'accuracy': accuracy,
                'auc': auc_score,
                'precision': report['1']['precision'],
                'recall': report['1']['recall'],
                'f1': report['1']['f1-score']
            }
            results.append(result)
            
            print(f"  {name:20s} 准确率: {accuracy:.4f}, ROC-AUC: {auc_score:.4f}")
            
            # 保存模型
            self._save_model(model, name)
        
        self.training_results['base_models'] = pd.DataFrame(results)
        return self.training_results['base_models']
    
    def generate_meta_features(self, X, y, n_folds=5):
        """生成元特征"""
        print("=== 生成元特征 ===")
        
        meta_features = np.zeros((X.shape[0], len(self.base_models)))
        kf = KFold(n_splits=n_folds, shuffle=True, random_state=self.config['random_state'])
        
        for fold, (train_idx, val_idx) in enumerate(kf.split(X)):
            print(f"处理第 {fold + 1}/{n_folds} 折交叉验证...")
            
            X_train, X_val = X[train_idx], X[val_idx]
            y_train = y.iloc[train_idx]
            
            for i, (name, model) in enumerate(self.base_models.items()):
                model.fit(X_train, y_train)
                
                if hasattr(model, "predict_proba"):
                    meta_features[val_idx, i] = model.predict_proba(X_val)[:, 1]
                else:
                    meta_features[val_idx, i] = model.decision_function(X_val)
        
        return meta_features
    
    def generate_test_meta_features(self, X_train, y_train, X_test):
        """生成测试集元特征"""
        print("=== 生成测试集元特征 ===")
        
        test_meta_features = np.zeros((X_test.shape[0], len(self.base_models)))
        
        for i, (name, model) in enumerate(self.base_models.items()):
            model.fit(X_train, y_train)
            
            if hasattr(model, "predict_proba"):
                test_meta_features[:, i] = model.predict_proba(X_test)[:, 1]
            else:
                test_meta_features[:, i] = model.decision_function(X_test)
        
        return test_meta_features
    
    def train_meta_model(self, meta_features, y_train, test_meta_features, y_test):
        """训练元模型"""
        print("=== 训练元模型 ===")
        
        # 创建元模型
        meta_config = self.config['meta_model']
        if meta_config['type'] == 'logistic_regression':
            self.meta_model = LogisticRegression(
                random_state=self.config['random_state'],
                **{k: v for k, v in meta_config.items() if k != 'type'}
            )
        
        # 训练元模型
        self.meta_model.fit(meta_features, y_train)
        
        # 预测
        y_pred = self.meta_model.predict(test_meta_features)
        y_pred_proba = self.meta_model.predict_proba(test_meta_features)[:, 1]
        
        # 评估
        accuracy = accuracy_score(y_test, y_pred)
        auc_score = roc_auc_score(y_test, y_pred_proba)
        
        print(f"元模型性能:")
        print(f"  准确率: {accuracy:.4f}")
        print(f"  ROC-AUC: {auc_score:.4f}")
        print("\n分类报告:")
        print(classification_report(y_test, y_pred))
        
        # 保存元模型
        self._save_model(self.meta_model, 'meta_model')
        
        # 保存结果
        self.training_results['meta_model'] = {
            'accuracy': accuracy,
            'auc': auc_score,
            'predictions': y_pred,
            'probabilities': y_pred_proba
        }
        
        return self.meta_model
    
    def _save_model(self, model, name):
        """保存模型"""
        os.makedirs('../models', exist_ok=True)
        joblib.dump(model, f'../models/{name}.pkl')
    
    def save_training_results(self, path='../results/training_results.csv'):
        """保存训练结果"""
        os.makedirs(os.path.dirname(path), exist_ok=True)
        
        if 'base_models' in self.training_results:
            self.training_results['base_models'].to_csv(path, index=False)
            print(f"训练结果已保存到 {path}")
    
    def plot_roc_curves(self, X_test, y_test, save_path='../results/figures/roc_curves.png'):
        """绘制ROC曲线"""
        print("=== 绘制ROC曲线 ===")
        
        plt.figure(figsize=(10, 8))
        
        # 绘制基模型的ROC曲线
        for name, model in self.base_models.items():
            if hasattr(model, "predict_proba"):
                y_pred_proba = model.predict_proba(X_test)[:, 1]
            else:
                y_pred_proba = model.decision_function(X_test)
            
            from sklearn.metrics import roc_curve
            fpr, tpr, _ = roc_curve(y_test, y_pred_proba)
            auc_score = roc_auc_score(y_test, y_pred_proba)
            
            plt.plot(fpr, tpr, label=f'{name} (AUC = {auc_score:.4f})')
        
        # 绘制元模型的ROC曲线
        if self.meta_model is not None:
            # 使用已保存的元模型预测结果
            if 'meta_model' in self.training_results:
                y_pred_proba = self.training_results['meta_model']['probabilities']
                fpr, tpr, _ = roc_curve(y_test, y_pred_proba)
                auc_score = roc_auc_score(y_test, y_pred_proba)
                plt.plot(fpr, tpr, label=f'Meta Model (AUC = {auc_score:.4f})', linewidth=3)
        
        plt.plot([0, 1], [0, 1], 'k--', label='Random')
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('假正例率 (FPR)')
        plt.ylabel('真正例率 (TPR)')
        plt.title('ROC曲线比较')
        plt.legend()
        plt.grid(alpha=0.3)
        
        os.makedirs(os.path.dirname(save_path), exist_ok=True)
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"ROC曲线已保存到 {save_path}")


if __name__ == '__main__':
    # 测试模型训练器
    from data_processor import DataProcessor
    
    # 加载和预处理数据
    processor = DataProcessor()
    train_data, test_data = processor.load_data('../data/train.csv', '../data/test2.csv')
    X_train, X_test, y_train, y_test = processor.preprocess_data(train_data, test_data)
    
    # 训练模型
    trainer = ModelTrainer()
    trainer.create_base_models()
    
    # 训练基模型
    base_results = trainer.train_base_models(X_train, y_train, X_test, y_test)
    
    # 生成元特征
    meta_features = trainer.generate_meta_features(X_train, y_train)
    test_meta_features = trainer.generate_test_meta_features(X_train, y_train, X_test)
    
    # 训练元模型
    meta_model = trainer.train_meta_model(meta_features, y_train, test_meta_features, y_test)
    
    # 保存结果
    trainer.save_training_results()
    
    # 绘制ROC曲线
    trainer.plot_roc_curves(X_test, y_test) 