from sklearn.metrics import accuracy_score

class DefenseEvaluator:
    def __init__(self, original_model, defended_model):
        self.original_model = original_model
        self.defended_model = defended_model
        
    def evaluate_defense(self, X_clean, X_adv, y_true):
        """评估防御效果"""
        orig_clean_acc = accuracy_score(y_true, self.original_model.predict(X_clean))
        orig_adv_acc = accuracy_score(y_true, self.original_model.predict(X_adv))
        defended_clean_acc = accuracy_score(y_true, self.defended_model.predict(X_clean))
        defended_adv_acc = accuracy_score(y_true, self.defended_model.predict(X_adv))
        
        return {
            'original': {'clean': orig_clean_acc, 'adversarial': orig_adv_acc},
            'defended': {'clean': defended_clean_acc, 'adversarial': defended_adv_acc}
        } 