from sklearn.metrics import (
    accuracy_score, precision_score, recall_score, 
    f1_score, roc_auc_score, confusion_matrix
)
import numpy as np

class ModelEvaluator:
    def __init__(self, model, X_test, y_test):
        self.model = model
        self.X_test = X_test
        self.y_test = y_test
        
    def full_evaluation(self, threshold=0.5):
        """完整模型评估"""
        if hasattr(self.model, 'predict_proba'):
            y_proba = self.model.predict_proba(self.X_test)[:, 1]
            y_pred = (y_proba >= threshold).astype(int)
        else:
            y_pred = self.model.predict(self.X_test)
        
        metrics = {
            'accuracy': accuracy_score(self.y_test, y_pred),
            'precision': precision_score(self.y_test, y_pred),
            'recall': recall_score(self.y_test, y_pred),
            'f1': f1_score(self.y_test, y_pred),
            'roc_auc': roc_auc_score(self.y_test, y_proba) if hasattr(self.model, 'predict_proba') else 0.5,
            'confusion_matrix': confusion_matrix(self.y_test, y_pred),
            'false_positive_rate': self._calculate_fpr(y_pred),
            'false_negative_rate': self._calculate_fnr(y_pred)
        }
        return metrics
    
    def _calculate_fpr(self, y_pred):
        """计算误报率"""
        tn, fp, _, _ = confusion_matrix(self.y_test, y_pred).ravel()
        return fp / (fp + tn)
    
    def _calculate_fnr(self, y_pred):
        """计算漏报率"""
        _, _, fn, tp = confusion_matrix(self.y_test, y_pred).ravel()
        return fn / (fn + tp) 