# 生成对抗样本
generator = AdversarialGenerator(model, feature_names)
X_adv = generator.fgsm_attack(X_test, y_test)

# 应用防御
enhancer = DefenseEnhancer()
defended_model = enhancer.adversarial_training(model, X_train, y_train, X_adv)
X_processed = enhancer.feature_squeezing(X_adv)

# 评估防御效果
evaluator = DefenseEvaluator(model, defended_model)
results = evaluator.evaluate_defense(X_test, X_adv, y_test)

print(f"原始模型对抗样本准确率: {results['original']['adversarial']:.2f}")
print(f"加固模型对抗样本准确率: {results['defended']['adversarial']:.2f}") 