import pandas as pd
import numpy as np
import joblib
import os
from sklearn.metrics import accuracy_score, roc_auc_score, classification_report, confusion_matrix
from sklearn.metrics import precision_recall_curve, f1_score
import matplotlib.pyplot as plt
import seaborn as sns

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False

class ModelEvaluator:
    """统一的模型评估器"""
    
    def __init__(self, config=None):
        self.config = config or self._get_default_config()
        self.models = {}
        self.preprocessor = None
        self.feature_selector = None
        self.evaluation_results = {}
        
    def _get_default_config(self):
        """默认配置"""
        return {
            'model_dir': '../models',
            'results_dir': '../results',
            'figures_dir': '../results/figures',
            'drop_columns': ['EmployeeNumber', 'Over18', 'StandardHours'],
            'target_column': 'Attrition'
        }
    
    def load_models(self):
        """加载所有模型"""
        print("=== 加载模型 ===")
        
        model_dir = self.config['model_dir']
        
        # 加载预处理器
        preprocessor_path = os.path.join(model_dir, 'preprocessor.pkl')
        if os.path.exists(preprocessor_path):
            self.preprocessor = joblib.load(preprocessor_path)
            print("✓ 预处理器加载成功")
        
        # 加载特征选择器
        selector_path = os.path.join(model_dir, 'preprocessor_selector.pkl')
        if os.path.exists(selector_path):
            self.feature_selector = joblib.load(selector_path)
            print("✓ 特征选择器加载成功")
        
        # 加载基模型
        base_models = ['random_forest', 'xgboost', 'logistic_regression', 'svc', 'lda', 'gradient_boosting']
        for model_name in base_models:
            model_path = os.path.join(model_dir, f'{model_name}.pkl')
            if os.path.exists(model_path):
                self.models[model_name] = joblib.load(model_path)
                print(f"✓ {model_name} 模型加载成功")
        
        # 加载元模型
        meta_model_path = os.path.join(model_dir, 'meta_model.pkl')
        if os.path.exists(meta_model_path):
            self.models['meta_model'] = joblib.load(meta_model_path)
            print("✓ 元模型加载成功")
        
        print(f"总共加载了 {len(self.models)} 个模型")
        return self.models
    
    def preprocess_data(self, data):
        """预处理数据"""
        if self.preprocessor is None:
            raise ValueError("预处理器未加载，请先调用 load_models()")
        
        # 删除不需要的列
        data_clean = data.drop(columns=self.config['drop_columns'], errors='ignore')
        
        # 应用预处理器
        data_processed = self.preprocessor.transform(data_clean)
        
        # 应用特征选择器
        if self.feature_selector is not None:
            data_processed = self.feature_selector.transform(data_processed)
        
        return data_processed
    
    def predict_single_model(self, model, X_test, model_name):
        """单个模型预测"""
        if hasattr(model, "predict_proba"):
            y_pred_proba = model.predict_proba(X_test)[:, 1]
            y_pred = model.predict(X_test)
        else:
            y_pred = model.predict(X_test)
            y_pred_proba = model.decision_function(X_test)
        
        return y_pred, y_pred_proba
    
    def predict_meta_model(self, X_test, y_test):
        """元模型预测"""
        if 'meta_model' not in self.models:
            raise ValueError("元模型未加载")
        
        # 生成元特征
        test_meta_features = np.zeros((X_test.shape[0], len(self.models) - 1))
        
        for i, (name, model) in enumerate(self.models.items()):
            if name != 'meta_model':
                _, y_pred_proba = self.predict_single_model(model, X_test, name)
                test_meta_features[:, i] = y_pred_proba
        
        # 元模型预测
        meta_model = self.models['meta_model']
        y_pred = meta_model.predict(test_meta_features)
        y_pred_proba = meta_model.predict_proba(test_meta_features)[:, 1]
        
        return y_pred, y_pred_proba
    
    def evaluate_model(self, model, X_test, y_test, model_name):
        """评估单个模型"""
        y_pred, y_pred_proba = self.predict_single_model(model, X_test, model_name)
        
        # 计算评估指标
        accuracy = accuracy_score(y_test, y_pred)
        auc_score = roc_auc_score(y_test, y_pred_proba)
        f1 = f1_score(y_test, y_pred)
        
        # 分类报告
        report = classification_report(y_test, y_pred, output_dict=True)
        
        # 混淆矩阵
        cm = confusion_matrix(y_test, y_pred)
        
        result = {
            'model': model_name,
            'accuracy': accuracy,
            'auc': auc_score,
            'f1': f1,
            'precision': report['1']['precision'],
            'recall': report['1']['recall'],
            'predictions': y_pred,
            'probabilities': y_pred_proba,
            'confusion_matrix': cm
        }
        
        return result
    
    def evaluate_all_models(self, X_test, y_test):
        """评估所有模型"""
        print("=== 模型评估 ===")
        
        results = []
        
        # 评估基模型
        for name, model in self.models.items():
            if name != 'meta_model':
                print(f"评估 {name}...")
                result = self.evaluate_model(model, X_test, y_test, name)
                results.append(result)
                
                print(f"  {name:20s} 准确率: {result['accuracy']:.4f}, "
                      f"ROC-AUC: {result['auc']:.4f}, F1: {result['f1']:.4f}")
        
        # 评估元模型
        if 'meta_model' in self.models:
            print("评估 meta_model...")
            y_pred, y_pred_proba = self.predict_meta_model(X_test, y_test)
            
            accuracy = accuracy_score(y_test, y_pred)
            auc_score = roc_auc_score(y_test, y_pred_proba)
            f1 = f1_score(y_test, y_pred)
            report = classification_report(y_test, y_pred, output_dict=True)
            cm = confusion_matrix(y_test, y_pred)
            
            meta_result = {
                'model': 'meta_model',
                'accuracy': accuracy,
                'auc': auc_score,
                'f1': f1,
                'precision': report['1']['precision'],
                'recall': report['1']['recall'],
                'predictions': y_pred,
                'probabilities': y_pred_proba,
                'confusion_matrix': cm
            }
            results.append(meta_result)
            
            print(f"  {'meta_model':15s} 准确率: {accuracy:.4f}, "
                  f"ROC-AUC: {auc_score:.4f}, F1: {f1:.4f}")
        
        self.evaluation_results = results
        return results
    
    def generate_evaluation_report(self, save_path=None):
        """生成评估报告"""
        if not self.evaluation_results:
            print("没有评估结果，请先调用 evaluate_all_models()")
            return
        
        # 创建结果DataFrame
        report_data = []
        for result in self.evaluation_results:
            report_data.append({
                'Model': result['model'],
                'Accuracy': result['accuracy'],
                'ROC-AUC': result['auc'],
                'F1-Score': result['f1'],
                'Precision': result['precision'],
                'Recall': result['recall']
            })
        
        report_df = pd.DataFrame(report_data)
        
        # 保存报告
        if save_path is None:
            save_path = os.path.join(self.config['results_dir'], 'evaluation_report.csv')
        
        os.makedirs(os.path.dirname(save_path), exist_ok=True)
        report_df.to_csv(save_path, index=False)
        print(f"评估报告已保存到 {save_path}")
        
        # 打印报告
        print("\n=== 模型性能对比 ===")
        print(report_df.to_string(index=False))
        
        return report_df
    
    def plot_confusion_matrices(self, save_path=None):
        """绘制混淆矩阵"""
        if not self.evaluation_results:
            print("没有评估结果，请先调用 evaluate_all_models()")
            return
        
        n_models = len(self.evaluation_results)
        fig, axes = plt.subplots(2, 3, figsize=(15, 10))
        axes = axes.flatten()
        
        for i, result in enumerate(self.evaluation_results):
            if i >= len(axes):
                break
                
            cm = result['confusion_matrix']
            model_name = result['model']
            
            sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', ax=axes[i])
            axes[i].set_title(f'{model_name} Confusion Matrix')
            axes[i].set_xlabel('Predicted')
            axes[i].set_ylabel('Actual')
        
        # 隐藏多余的子图
        for i in range(n_models, len(axes)):
            axes[i].set_visible(False)
        
        plt.tight_layout()
        
        if save_path is None:
            save_path = os.path.join(self.config['figures_dir'], 'confusion_matrices.png')
        
        os.makedirs(os.path.dirname(save_path), exist_ok=True)
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"混淆矩阵图已保存到 {save_path}")
    
    def plot_roc_curves(self, X_test, y_test, save_path=None):
        """绘制ROC曲线"""
        if not self.evaluation_results:
            print("没有评估结果，请先调用 evaluate_all_models()")
            return
        
        plt.figure(figsize=(10, 8))
        
        for result in self.evaluation_results:
            model_name = result['model']
            y_pred_proba = result['probabilities']
            auc_score = result['auc']
            
            from sklearn.metrics import roc_curve
            fpr, tpr, _ = roc_curve(y_test, y_pred_proba)
            
            plt.plot(fpr, tpr, label=f'{model_name} (AUC = {auc_score:.4f})')
        
        plt.plot([0, 1], [0, 1], 'k--', label='Random')
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('假正例率 (FPR)')
        plt.ylabel('真正例率 (TPR)')
        plt.title('ROC曲线比较')
        plt.legend()
        plt.grid(alpha=0.3)
        
        if save_path is None:
            save_path = os.path.join(self.config['figures_dir'], 'roc_curves.png')
        
        os.makedirs(os.path.dirname(save_path), exist_ok=True)
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"ROC曲线图已保存到 {save_path}")
    
    def predict_new_data(self, data, model_name='meta_model'):
        """对新数据进行预测"""
        if not self.models:
            raise ValueError("模型未加载，请先调用 load_models()")
        
        if model_name not in self.models:
            raise ValueError(f"模型 {model_name} 不存在")
        
        # 预处理数据
        X_processed = self.preprocess_data(data)
        
        # 预测
        if model_name == 'meta_model':
            # 对于元模型，需要生成元特征
            # 这里简化处理，实际应用中需要完整的训练数据
            raise NotImplementedError("元模型预测需要完整的训练数据")
        else:
            model = self.models[model_name]
            y_pred, y_pred_proba = self.predict_single_model(model, X_processed, model_name)
        
        return y_pred, y_pred_proba


if __name__ == '__main__':
    # 测试模型评估器
    from data_processor import DataProcessor
    
    # 加载数据
    processor = DataProcessor()
    train_data, test_data = processor.load_data('../data/train.csv', '../data/test2.csv')
    X_train, X_test, y_train, y_test = processor.preprocess_data(train_data, test_data)
    
    # 评估模型
    evaluator = ModelEvaluator()
    evaluator.load_models()
    
    # 评估所有模型
    results = evaluator.evaluate_all_models(X_test, y_test)
    
    # 生成评估报告
    report_df = evaluator.generate_evaluation_report()
    
    # 绘制图表
    evaluator.plot_confusion_matrices()
    evaluator.plot_roc_curves(X_test, y_test) 