import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import (
    classification_report, confusion_matrix, roc_curve, auc, 
    precision_recall_curve, accuracy_score, precision_score, 
    recall_score, f1_score, roc_auc_score
)
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
import warnings
import argparse
import os
import joblib
from datetime import datetime
import time
from typing import Dict, List, Tuple, Any
import json
import xgboost as xgb  # 新增：导入XGBoost
warnings.filterwarnings('ignore')
plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'Arial Unicode MS']
plt.rcParams['axes.unicode_minus'] = False

class MalwareClassifier:
    def __init__(self, random_state: int = None):
        self.random_state = random_state if random_state is not None else int(time.time() * 1000) % 2147483647
        self.models = {}
        self.feature_names = []
        self.scaler = StandardScaler()
        self.best_model = None
        self.feature_importance = None
        self.results = {}
        self.experiment_id = f"exp_{int(time.time())}"
        np.random.seed(self.random_state)
    
    def train_and_evaluate(self, X: np.ndarray, y: np.ndarray, test_size: float = 0.2, val_size: float = 0.2):
        """使用全部特征训练和评估所有模型"""
        # 保存原始数据用于后续分析
        self.original_X = X.copy()
        self.original_y = y.copy()
        
        # 三分法数据划分
        X_temp, X_test, y_temp, y_test = train_test_split(
            X, y, test_size=test_size, random_state=self.random_state, stratify=y
        )
        
        val_size_adjusted = val_size / (1 - test_size)
        X_train, X_val, y_train, y_val = train_test_split(
            X_temp, y_temp, test_size=val_size_adjusted, 
            random_state=self.random_state + 1, stratify=y_temp
        )
        
        # 特征标准化
        X_train_scaled = self.scaler.fit_transform(X_train)
        X_val_scaled = self.scaler.transform(X_val)
        X_test_scaled = self.scaler.transform(X_test)
        
        self.results = {
            'X_train': X_train, 'X_val': X_val, 'X_test': X_test,
            'y_train': y_train, 'y_val': y_val, 'y_test': y_test,
            'X_train_scaled': X_train_scaled, 'X_val_scaled': X_val_scaled, 'X_test_scaled': X_test_scaled,
            'model_scores': {}
        }
        
        # 计算正负样本比例，用于XGBoost处理样本不平衡
        y_train_series = pd.Series(y_train)
        scale_pos_weight = y_train_series.value_counts()[0] / y_train_series.value_counts()[1]
        
        # 初始化XGBoost模型（需要scale_pos_weight参数）
        if self.models['XGBoost'] is None:
            self.models['XGBoost'] = xgb.XGBClassifier(
                n_estimators=200,
                learning_rate=0.1,
                max_depth=7,
                subsample=0.8,
                colsample_bytree=0.8,
                use_label_encoder=False,
                eval_metric='logloss',
                random_state=self.random_state,
                scale_pos_weight=scale_pos_weight,  # 自动处理样本不平衡
                verbosity=0  # 减少输出信息
            )
        
        # 训练和评估每个模型
        best_score = 0
        for name, model in self.models.items():
            print(f"正在训练 {name} 模型...")
            
            # 选择特征类型
            if name in ['Logistic Regression', 'SVM']:
                X_train_use, X_val_use, X_test_use = X_train_scaled, X_val_scaled, X_test_scaled
            else:
                X_train_use, X_val_use, X_test_use = X_train, X_val, X_test
            
            # 训练模型
            model.fit(X_train_use, y_train)
            
            # 预测
            y_val_pred = model.predict(X_val_use)
            y_test_pred = model.predict(X_test_use)
            y_val_pred_proba = model.predict_proba(X_val_use)[:, 1] if hasattr(model, 'predict_proba') else None
            y_test_pred_proba = model.predict_proba(X_test_use)[:, 1] if hasattr(model, 'predict_proba') else None
            
            # 计算指标
            val_f1 = f1_score(y_val, y_val_pred)
            test_metrics = {
                'test_accuracy': accuracy_score(y_test, y_test_pred),
                'test_precision': precision_score(y_test, y_test_pred),
                'test_recall': recall_score(y_test, y_test_pred),
                'test_f1': f1_score(y_test, y_test_pred),
                'test_auc': roc_auc_score(y_test, y_test_pred_proba) if y_test_pred_proba is not None else 0.0
            }
            
            # 交叉验证
            cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=self.random_state)
            cv_scores = cross_val_score(model, X_train_use, y_train, cv=cv, scoring='f1')
            
            # 存储结果
            self.results['model_scores'][name] = {
                'model': model,
                'val_f1': val_f1,
                'test_pred': y_test_pred,
                'test_pred_proba': y_test_pred_proba,
                'cv_mean': cv_scores.mean(),
                'cv_std': cv_scores.std(),
                **test_metrics
            }
            
            print(f"  {name}: F1={test_metrics['test_f1']:.4f}, AUC={test_metrics['test_auc']:.4f}")
            
            if val_f1 > best_score:
                best_score = val_f1
                self.best_model = name
        
        print(f"\n最佳模型: {self.best_model} (F1: {self.results['model_scores'][self.best_model]['test_f1']:.4f})")
    def load_data(self, csv_file: str) -> Tuple[np.ndarray, np.ndarray, List[str]]:
        """加载CSV数据并预处理"""
        df = pd.read_csv(csv_file)
        
        # 提取特征列
        rule_features = [col for col in df.columns if col.startswith('rule_')]
        
        # 完整的图特征列表（与特征提取器完全一致）
        graph_features = [
            # 基础统计
            'malicious_node_count', 'malicious_ratio', 'category_diversity',
            'IG_ratio', 'DT_ratio', 'DE_ratio', 'PE_ratio', 'SP_ratio',
            
            # 内部连通性
            'malicious_internal_edges', 'malicious_density', 'malicious_components',
            
            # 全图中心性
            'mean_degree_centrality_full', 'max_degree_centrality_full',
            'mean_malicious_pagerank', 'max_malicious_pagerank', 'sum_malicious_pagerank',
            'mean_malicious_betweenness', 'max_malicious_betweenness',
            
            # 子图中心性
            'mean_degree_centrality_sub', 'max_degree_centrality_sub',
            'mean_pagerank_sub', 'max_pagerank_sub',
            'mean_betweenness_sub', 'max_betweenness_sub',
            
            # 社区结构特征
            'num_malicious_communities', 'max_community_maliciousness_ratio', 'malicious_community_entropy'
        ]
        
        available_graph_features = [col for col in graph_features if col in df.columns]
        feature_columns = rule_features + available_graph_features
        self.feature_names = feature_columns.copy()
        
        X = df[feature_columns].values
        y = df['label'].values
        package_names = df['package_name'].values if 'package_name' in df.columns else np.arange(len(df))
        
        # 处理缺失值
        imputer = SimpleImputer(strategy='median')
        X = imputer.fit_transform(X)
        
        self.package_names = package_names
        return X, y, package_names
    def extreme_case_testing(self):
        """极端情况测试"""
        if not self.best_model:
            return
        
        print("\n极端情况测试...")
        
        X = self.original_X
        y = self.original_y
        
        best_model_info = self.results['model_scores'][self.best_model]
        best_model = best_model_info['model']
        
        malicious_indices = np.where(y == 1)[0]
        benign_indices = np.where(y == 0)[0]
        
        # 全良性测试
        X_benign = X[benign_indices]
        if self.best_model in ['Logistic Regression', 'SVM']:
            X_benign_use = self.scaler.transform(X_benign)
        else:
            X_benign_use = X_benign
        
        benign_predictions = best_model.predict(X_benign_use)
        benign_accuracy = (benign_predictions == 0).sum() / len(benign_predictions)
        false_positive_rate = (benign_predictions == 1).sum() / len(benign_predictions)
        
        # 全恶意测试
        X_malicious = X[malicious_indices]
        if self.best_model in ['Logistic Regression', 'SVM']:
            X_malicious_use = self.scaler.transform(X_malicious)
        else:
            X_malicious_use = X_malicious
        
        malicious_predictions = best_model.predict(X_malicious_use)
        malicious_detection_rate = (malicious_predictions == 1).sum() / len(malicious_predictions)
        false_negative_rate = (malicious_predictions == 0).sum() / len(malicious_predictions)
        
        self.results['extreme_tests'] = {
            'benign_accuracy': benign_accuracy,
            'false_positive_rate': false_positive_rate,
            'malicious_detection_rate': malicious_detection_rate,
            'false_negative_rate': false_negative_rate
        }
        
        print(f"全良性测试: 准确率{benign_accuracy:.4f}, 误报率{false_positive_rate:.4f}")
        print(f"全恶意测试: 检出率{malicious_detection_rate:.4f}, 漏报率{false_negative_rate:.4f}")
    def prepare_models(self):
        """准备机器学习模型"""
        # 计算正负样本比例，用于XGBoost处理样本不平衡
        # 注意：这里我们需要在train_and_evaluate中调用这个函数时已经有了训练数据
        # 所以我们把scale_pos_weight的计算移到train_and_evaluate函数中
        
        self.models = {
            'Random Forest': RandomForestClassifier(
                n_estimators=200, random_state=self.random_state,
                class_weight='balanced', max_features='sqrt'
            ),
            'Gradient Boosting': GradientBoostingClassifier(
                n_estimators=150, random_state=self.random_state, learning_rate=0.1
            ),
            'XGBoost': None,  # 暂时设为None，在train_and_evaluate中初始化
            'Logistic Regression': LogisticRegression(
                random_state=self.random_state, class_weight='balanced',
                max_iter=2000, C=1.0
            ),
            'SVM': SVC(
                random_state=self.random_state, class_weight='balanced',
                probability=True, kernel='rbf'
            ),
            'Naive Bayes': GaussianNB(),
            'Decision Tree': DecisionTreeClassifier(
                random_state=self.random_state, class_weight='balanced', max_depth=20
            )
        }
    def feature_importance_analysis(self):
        """特征重要性分析"""
        if not self.best_model:
            return
        
        model_info = self.results['model_scores'][self.best_model]
        model = model_info['model']
        
        if hasattr(model, 'feature_importances_'):
            importance = model.feature_importances_
        elif hasattr(model, 'coef_'):
            importance = np.abs(model.coef_[0])
        else:
            return
        
        feature_importance_df = pd.DataFrame({
            'feature': self.feature_names,
            'importance': importance
        }).sort_values('importance', ascending=False)
        
        feature_importance_df['feature_type'] = feature_importance_df['feature'].apply(
            lambda x: 'Rule' if x.startswith('rule_') else 'Graph'
        )
        
        self.feature_importance = feature_importance_df
        
        # 输出Top特征
        print(f"\nTop 10 重要特征:")
        for _, row in feature_importance_df.head(10).iterrows():
            print(f"  {row['feature']}: {row['importance']:.4f}")
        
        # 特征类型统计
        rule_importance = feature_importance_df[feature_importance_df['feature_type'] == 'Rule']['importance'].sum()
        graph_importance = feature_importance_df[feature_importance_df['feature_type'] == 'Graph']['importance'].sum()
        total = rule_importance + graph_importance
        
        if total > 0:
            print(f"\n特征类型贡献: 规则{rule_importance/total*100:.1f}% | 图{graph_importance/total*100:.1f}%")
        
        return feature_importance_df
    def run_ablation_study(self, X: np.ndarray, y: np.ndarray, test_size: float = 0.2, val_size: float = 0.2):
        """运行消融实验，比较不同特征组合的效果 - 修复版"""
        print("\n🧪 开始消融实验...")
        
        # 定义特征组合 - 使用统一的分组函数
        feature_groups = self._define_feature_groups()
        
        ablation_results = {}
        
        for group_name, feature_indices in feature_groups.items():
            print(f"\n在评估: {group_name} ({len(feature_indices)}个特征)")
            
            # 检查特征数量
            if len(feature_indices) == 0:
                print(f"⚠️  跳过 {group_name}: 没有对应的特征")
                ablation_results[group_name] = {
                    'best_model': 'None',
                    'best_f1': 0.0,
                    'best_accuracy': 0.0,
                    'best_precision': 0.0,
                    'best_recall': 0.0,
                    'feature_count': 0,
                    'models': {},
                    'error': 'No features available'
                }
                continue
            
            try:
                # 提取对应特征
                X_subset = X[:, feature_indices]
                
                # 检查特征矩阵是否有效
                if X_subset.shape[1] == 0:
                    print(f"⚠️  跳过 {group_name}: 特征矩阵为空")
                    ablation_results[group_name] = {
                        'best_model': 'None',
                        'best_f1': 0.0,
                        'best_accuracy': 0.0,
                        'best_precision': 0.0,
                        'best_recall': 0.0,
                        'feature_count': 0,
                        'models': {},
                        'error': 'Empty feature matrix'
                    }
                    continue
                
                # 分割数据
                X_train, X_temp, y_train, y_temp = train_test_split(
                    X_subset, y, test_size=(test_size + val_size), 
                    random_state=self.random_state, stratify=y
                )
                
                # 进一步分割测试集和验证集
                relative_val_size = val_size / (test_size + val_size)
                X_val, X_test, y_val, y_test = train_test_split(
                    X_temp, y_temp, test_size=relative_val_size, 
                    random_state=self.random_state, stratify=y_temp
                )
                
                # 数据预处理
                from sklearn.preprocessing import StandardScaler
                from sklearn.impute import SimpleImputer
                
                scaler = StandardScaler()
                imputer = SimpleImputer(strategy='mean')
                
                # 处理缺失值和标准化
                X_train_imputed = imputer.fit_transform(X_train)
                X_val_imputed = imputer.transform(X_val)
                X_test_imputed = imputer.transform(X_test)
                
                X_train_scaled = scaler.fit_transform(X_train_imputed)
                X_val_scaled = scaler.transform(X_val_imputed)
                X_test_scaled = scaler.transform(X_test_imputed)
                
                # 计算样本权重（用于XGBoost）
                y_train_series = pd.Series(y_train)
                scale_pos_weight = y_train_series.value_counts()[0] / y_train_series.value_counts()[1]
                
                # 训练模型 - 只选择几个关键模型以加快速度
                models_to_test = {
                    'Random Forest': RandomForestClassifier(
                        n_estimators=100, random_state=self.random_state,
                        class_weight='balanced', max_features='sqrt'
                    ),
                    'XGBoost': xgb.XGBClassifier(
                        n_estimators=100, learning_rate=0.1, max_depth=6,
                        use_label_encoder=False, eval_metric='logloss',
                        random_state=self.random_state, scale_pos_weight=scale_pos_weight,
                        verbosity=0
                    ),
                    'Logistic Regression': LogisticRegression(
                        random_state=self.random_state, class_weight='balanced',
                        max_iter=1000, C=1.0
                    )
                }
                
                group_models = {}
                group_scores = {}
                
                for model_name, model in models_to_test.items():
                    try:
                        # 选择合适的数据
                        if model_name == 'Logistic Regression':
                            X_train_use, X_val_use, X_test_use = X_train_scaled, X_val_scaled, X_test_scaled
                        else:
                            X_train_use, X_val_use, X_test_use = X_train_imputed, X_val_imputed, X_test_imputed
                        
                        # 训练
                        model.fit(X_train_use, y_train)
                        
                        # 在验证集上评估
                        y_val_pred = model.predict(X_val_use)
                        val_f1 = f1_score(y_val, y_val_pred, zero_division=0)
                        val_accuracy = accuracy_score(y_val, y_val_pred)
                        val_precision = precision_score(y_val, y_val_pred, zero_division=0)
                        val_recall = recall_score(y_val, y_val_pred, zero_division=0)
                        
                        group_models[model_name] = model
                        group_scores[model_name] = {
                            'val_f1': val_f1,
                            'val_accuracy': val_accuracy,
                            'val_precision': val_precision,
                            'val_recall': val_recall
                        }
                        
                        print(f"    {model_name}: F1={val_f1:.4f}")
                        
                    except Exception as model_error:
                        print(f"⚠️  模型 {model_name} 训练失败: {model_error}")
                        continue
                
                # 检查是否有成功训练的模型
                if not group_scores:
                    print(f"⚠️  {group_name}: 所有模型训练失败")
                    ablation_results[group_name] = {
                        'best_model': 'None',
                        'best_f1': 0.0,
                        'best_accuracy': 0.0,
                        'best_precision': 0.0,
                        'best_recall': 0.0,
                        'feature_count': len(feature_indices),
                        'models': {},
                        'error': 'All models failed to train'
                    }
                    continue
                
                # 找到最佳模型
                best_model_name = max(group_scores.keys(), key=lambda x: group_scores[x]['val_f1'])
                best_model = group_models[best_model_name]
                
                # 在测试集上评估最佳模型
                if best_model_name == 'Logistic Regression':
                    X_test_use = X_test_scaled
                else:
                    X_test_use = X_test_imputed
                
                y_test_pred = best_model.predict(X_test_use)
                test_f1 = f1_score(y_test, y_test_pred, zero_division=0)
                test_accuracy = accuracy_score(y_test, y_test_pred)
                test_precision = precision_score(y_test, y_test_pred, zero_division=0)
                test_recall = recall_score(y_test, y_test_pred, zero_division=0)
                
                ablation_results[group_name] = {
                    'best_model': best_model_name,
                    'best_f1': test_f1,
                    'best_accuracy': test_accuracy,
                    'best_precision': test_precision,
                    'best_recall': test_recall,
                    'feature_count': len(feature_indices),
                    'models': group_models,
                    'scaler': scaler,
                    'imputer': imputer
                }
                
                print(f"✅ {group_name}: 最佳模型={best_model_name}, F1={test_f1:.4f}")
                
            except Exception as e:
                print(f"❌ {group_name} 评估失败: {e}")
                ablation_results[group_name] = {
                    'best_model': 'None',
                    'best_f1': 0.0,
                    'best_accuracy': 0.0,
                    'best_precision': 0.0,
                    'best_recall': 0.0,
                    'feature_count': len(feature_indices) if feature_indices else 0,
                    'models': {},
                    'error': str(e)
                }

        self.ablation_results = ablation_results
        return ablation_results
    def _define_feature_groups(self) -> Dict[str, List[int]]:
        """定义特征组合"""
        print("\n🔍 特征分组调试信息:")
        
        if not hasattr(self, 'feature_names') or self.feature_names is None:
            print("⚠️  警告: feature_names未设置")
            total_features = self.X.shape[1] if hasattr(self, 'X') else 0
            return {
                'rules_only': [],
                'graph_only': list(range(total_features)),
                'all_features': list(range(total_features))
            }
        
        print(f"总特征数: {len(self.feature_names)}")
        print(f"前10个特征名: {self.feature_names[:10]}")
        
        feature_groups = {
            'rules_only': [],
            'graph_only': [],
            'all_features': list(range(len(self.feature_names)))
        }
        
        # 根据特征名称分类
        for i, feature_name in enumerate(self.feature_names):
            if feature_name.startswith('rule_'):
                # 这是规则特征
                feature_groups['rules_only'].append(i)
            else:
                # 这是图特征
                feature_groups['graph_only'].append(i)
        
        print(f"✅ 特征分组结果:")
        print(f"  规则特征: {len(feature_groups['rules_only'])} 个")
        print(f"  图特征: {len(feature_groups['graph_only'])} 个")
        print(f"  总特征: {len(feature_groups['all_features'])} 个")
        
        # 显示一些示例
        if feature_groups['rules_only']:
            rule_examples = [self.feature_names[i] for i in feature_groups['rules_only'][:5]]
            print(f"  规则特征示例: {rule_examples}")
        
        if feature_groups['graph_only']:
            graph_examples = [self.feature_names[i] for i in feature_groups['graph_only'][:5]]
            print(f"  图特征示例: {graph_examples}")
        
        return feature_groups
    def generate_feature_contribution_report(self, output_dir: str):
        """生成特征贡献度报告"""
        if self.feature_importance is None or not hasattr(self, 'original_X'):
            print("无法生成特征贡献度报告：缺少重要性数据或原始数据。")
            return

        experiment_dir = f"{output_dir}/{self.experiment_id}"
        os.makedirs(experiment_dir, exist_ok=True)
        report_file = f"{experiment_dir}/feature_contribution_report.txt"

        # 计算每个特征的有效值数量
        total_samples = self.original_X.shape[0]
        effective_counts = (self.original_X != 0).sum(axis=0)

        # 创建报告DataFrame
        report_df = self.feature_importance.copy()
        report_df['effective_count'] = report_df['feature'].apply(
            lambda f: effective_counts[self.feature_names.index(f)]
        )
        report_df['total_samples'] = total_samples
        report_df['effective_ratio'] = report_df['effective_count'] / total_samples

        # 重新排序列
        report_df = report_df[[
            'feature', 'feature_type', 'effective_count', 
            'effective_ratio', 'importance'
        ]]

        # 保存为文本文件
        with open(report_file, 'w', encoding='utf-8') as f:
            f.write("特征贡献度与有效值分析报告\n")
            f.write("="*80 + "\n")
            f.write(f"基于模型: {self.best_model}\n")
            f.write(f"总样本数: {total_samples}\n")
            f.write(f"生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
            f.write("说明:\n")
            f.write("  - effective_count: 特征值不为0的样本数量\n")
            f.write("  - effective_ratio: 有效值样本占总样本的比例\n")
            f.write("  - importance:      模型计算出的特征贡献度（已归一化）\n")
            f.write("="*80 + "\n\n")
            
            # 分类别输出
            f.write("规则特征 (Rule Features):\n")
            f.write("-"*50 + "\n")
            rule_features = report_df[report_df['feature_type'] == 'Rule']
            for _, row in rule_features.iterrows():
                f.write(f"{row['feature']:<25} | 有效值: {row['effective_count']:>5} ({row['effective_ratio']:>6.1%}) | 重要性: {row['importance']:>8.4f}\n")
            
            f.write(f"\n规则特征统计: 共{len(rule_features)}个特征\n")
            f.write(f"平均有效值比例: {rule_features['effective_ratio'].mean():.1%}\n")
            f.write(f"总体重要性贡献: {rule_features['importance'].sum():.4f}\n")
            
            f.write("\n\n图特征 (Graph Features):\n")
            f.write("-"*50 + "\n")
            graph_features = report_df[report_df['feature_type'] == 'Graph']
            for _, row in graph_features.iterrows():
                f.write(f"{row['feature']:<35} | 有效值: {row['effective_count']:>5} ({row['effective_ratio']:>6.1%}) | 重要性: {row['importance']:>8.4f}\n")
            
            f.write(f"\n图特征统计: 共{len(graph_features)}个特征\n")
            f.write(f"平均有效值比例: {graph_features['effective_ratio'].mean():.1%}\n")
            f.write(f"总体重要性贡献: {graph_features['importance'].sum():.4f}\n")
            
            # Top特征分析
            f.write("\n\nTop 20 重要特征:\n")
            f.write("-"*50 + "\n")
            top_features = report_df.head(20)
            for i, (_, row) in enumerate(top_features.iterrows(), 1):
                f.write(f"{i:>2}. {row['feature']:<35} | 类型: {row['feature_type']:<5} | 重要性: {row['importance']:>8.4f} | 有效值: {row['effective_ratio']:>6.1%}\n")

        # 同时保存为CSV文件以便进一步分析
        csv_file = f"{experiment_dir}/feature_contribution_report.csv"
        report_df.to_csv(csv_file, index=False, encoding='utf-8')

        print(f"特征贡献度报告已保存:")
        print(f"  文本报告: {report_file}")
        print(f"  CSV数据: {csv_file}")
    def generate_ablation_report(self, output_dir: str):
        """生成消融实验报告 - 修复版"""
        if not hasattr(self, 'ablation_results') or not self.ablation_results:
            return
        
        experiment_dir = f"{output_dir}/{self.experiment_id}"
        os.makedirs(experiment_dir, exist_ok=True)
        report_file = f"{experiment_dir}/ablation_study_report.txt"
        
        with open(report_file, 'w', encoding='utf-8') as f:
            f.write("消融实验分析报告\n")
            f.write("="*60 + "\n\n")
            f.write(f"实验目的: 验证图特征相对于规则特征的性能提升\n")
            f.write(f"实验设计: 对比仅规则特征、仅图特征、全部特征三组实验\n")
            f.write(f"生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n")
            
            # 详细结果
            for group_name, group_results in self.ablation_results.items():
                f.write(f"{group_name}:\n")
                f.write("-" * 40 + "\n")
                
                if 'error' in group_results or group_results['best_model'] == 'None':
                    f.write(f"  状态: 失败 ({group_results.get('error', '无可用模型')})\n")
                    f.write(f"  特征数: {group_results['feature_count']}\n\n")
                    continue
                
                f.write(f"  最佳模型: {group_results['best_model']}\n")
                f.write(f"  特征数量: {group_results['feature_count']}\n")
                f.write(f"  准确率: {group_results['best_accuracy']:.4f}\n")
                f.write(f"  精确率: {group_results['best_precision']:.4f}\n")
                f.write(f"  召回率: {group_results['best_recall']:.4f}\n")
                f.write(f"  F1分数: {group_results['best_f1']:.4f}\n\n")
            
            # 关键发现
            f.write("关键发现:\n")
            f.write("-" * 40 + "\n")
            
            # 计算有效结果
            valid_results = {k: v for k, v in self.ablation_results.items() 
                            if 'error' not in v and v['best_model'] != 'None'}
            
            if len(valid_results) >= 2:
                f1_scores = {k: v['best_f1'] for k, v in valid_results.items()}
                
                f.write(f"1. F1分数对比:\n")
                for group_name, f1_score in f1_scores.items():
                    f.write(f"   - {group_name}: {f1_score:.4f}\n")
                f.write("\n")
                
                # 性能提升分析
                if 'rules_only' in f1_scores:
                    baseline = f1_scores['rules_only']
                    f.write(f"2. 相对于仅规则特征的性能变化:\n")
                    for group_name, f1_score in f1_scores.items():
                        if group_name != 'rules_only':
                            improvement = ((f1_score - baseline) / baseline) * 100
                            direction = "提升" if improvement > 0 else "下降"
                            f.write(f"   - {group_name}: {direction} {abs(improvement):.1f}%\n")
                    f.write("\n")
                
                f.write(f"3. 结论:\n")
                best_group = max(f1_scores.keys(), key=lambda x: f1_scores[x])
                f.write(f"   - 最佳特征组合: {best_group} (F1: {f1_scores[best_group]:.4f})\n")
                
                if 'rules_only' in f1_scores and 'graph_only' in f1_scores:
                    if f1_scores['graph_only'] > f1_scores['rules_only']:
                        f.write(f"   - 图特征的检测效果优于规则特征\n")
                    else:
                        f.write(f"   - 规则特征的检测效果优于图特征\n")
                
                if 'all_features' in f1_scores:
                    all_f1 = f1_scores['all_features']
                    max_single = max([v for k, v in f1_scores.items() if k != 'all_features'])
                    if all_f1 > max_single:
                        f.write(f"   - 结合两种特征类型能获得最佳性能\n")
                    f.write(f"   - 这证明了多特征融合在恶意软件检测中的重要价值\n")
            else:
                f.write("实验结果不足，无法进行对比分析\n")
        
        print(f"消融实验报告已保存: {report_file}")
    def create_visualizations(self, output_dir: str = "ml_analysis_results"):
        """创建可视化"""
        if not self.results:
            return
        
        experiment_dir = f"{output_dir}/{self.experiment_id}"
        os.makedirs(experiment_dir, exist_ok=True)
        
        self._plot_model_comparison(experiment_dir)
        self._plot_confusion_matrices(experiment_dir)
        if self.feature_importance is not None:
            self._plot_feature_importance(experiment_dir)
            self._plot_feature_distribution(experiment_dir)
        
        # 添加消融实验可视化
        if hasattr(self, 'ablation_results'):
            self._plot_ablation_study(experiment_dir)
        
        print(f"可视化图表已保存到: {experiment_dir}")
    def _plot_model_comparison(self, output_dir: str):
        """模型性能比较图"""
        models = list(self.results['model_scores'].keys())
        metrics = ['test_accuracy', 'test_precision', 'test_recall', 'test_f1']
        metric_labels = ['准确率', '精确率', '召回率', 'F1分数']
        
        fig, ax = plt.subplots(figsize=(12, 6))
        x_pos = np.arange(len(models))
        width = 0.2
        
        for i, (metric, label) in enumerate(zip(metrics, metric_labels)):
            values = [self.results['model_scores'][model][metric] for model in models]
            ax.bar(x_pos + i * width, values, width, label=label, alpha=0.8)
        
        ax.set_xlabel('模型')
        ax.set_ylabel('分数')
        ax.set_title('模型性能对比')
        ax.set_xticks(x_pos + width * 1.5)
        ax.set_xticklabels(models, rotation=45)
        ax.legend()
        ax.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.savefig(f"{output_dir}/model_comparison.png", dpi=300, bbox_inches='tight')
        plt.close()
    
    def _plot_confusion_matrices(self, output_dir: str):
        """混淆矩阵"""
        n_models = len(self.results['model_scores'])
        
        # 动态计算子图布局：确保能容纳所有模型
        if n_models <= 6:
            rows, cols = 2, 3
        elif n_models <= 8:
            rows, cols = 2, 4
        elif n_models <= 9:
            rows, cols = 3, 3
        elif n_models <= 12:
            rows, cols = 3, 4
        else:
            rows, cols = 4, 4
        
        fig, axes = plt.subplots(rows, cols, figsize=(cols * 5, rows * 4))
        
        # 确保axes是一个数组（当只有一个子图时，axes不是数组）
        if n_models == 1:
            axes = [axes]
        else:
            axes = axes.flatten()
        
        for i, (name, info) in enumerate(self.results['model_scores'].items()):
            y_test = self.results['y_test']
            y_pred = info['test_pred']
            cm = confusion_matrix(y_test, y_pred)
            sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', 
                       xticklabels=['良性', '恶意'], yticklabels=['良性', '恶意'],
                       ax=axes[i])
            axes[i].set_title(f'{name}\nF1: {info["test_f1"]:.3f}')
        
        # 隐藏多余的子图
        for i in range(n_models, len(axes)):
            axes[i].set_visible(False)
        
        plt.tight_layout()
        plt.savefig(f"{output_dir}/confusion_matrices.png", dpi=300, bbox_inches='tight')
        plt.close()
    def _plot_feature_importance(self, output_dir: str):
        """特征重要性图"""
        if self.feature_importance is None or self.feature_importance.empty:
            return
        
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 8), gridspec_kw={'width_ratios': [3, 1]})
        
        top_features = self.feature_importance.head(20)
        colors = ['#ff7f0e' if x.startswith('rule_') else '#1f77b4' for x in top_features['feature']]
        
        ax1.barh(top_features['feature'], top_features['importance'], color=colors, alpha=0.8)
        ax1.invert_yaxis()
        ax1.set_xlabel('重要性')
        ax1.set_title('Top 20 特征重要性')
        ax1.grid(True, alpha=0.3, axis='x')
        
        rule_importance = self.feature_importance[self.feature_importance['feature_type'] == 'Rule']['importance'].sum()
        graph_importance = self.feature_importance[self.feature_importance['feature_type'] == 'Graph']['importance'].sum()
        
        if rule_importance + graph_importance > 0:
            ax2.pie([rule_importance, graph_importance], labels=['规则特征', '图特征'], 
                   colors=['#ff7f0e', '#1f77b4'], autopct='%1.1f%%', startangle=90)
            ax2.set_title('特征类型重要性占比')
        
        plt.tight_layout()
        plt.savefig(f"{output_dir}/feature_importance.png", dpi=300, bbox_inches='tight')
        plt.close()
    def _plot_ablation_study(self, output_dir: str):
        """消融实验结果可视化 - 修复版"""
        if not hasattr(self, 'ablation_results') or not self.ablation_results:
            return
        
        # 检查并转换数据格式
        groups = list(self.ablation_results.keys())
        
        # 验证数据结构
        sample_group = self.ablation_results[groups[0]]
        if 'best_model' in sample_group:
            # 新格式：需要提取每组的最佳模型结果
            plot_data = {}
            for group_name, group_result in self.ablation_results.items():
                if 'error' in group_result or group_result['best_model'] == 'None':
                    # 跳过有错误的组
                    continue
                
                plot_data[group_name] = {
                    'F1分数': group_result['best_f1'],
                    '准确率': group_result['best_accuracy'],
                    '精确率': group_result['best_precision'],
                    '召回率': group_result['best_recall'],
                    '模型': group_result['best_model']
                }
            
            if not plot_data:
                print("没有有效的消融实验数据用于可视化")
                return
            
            # 创建简化的对比图
            fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6))
            
            # F1分数对比
            groups = list(plot_data.keys())
            f1_values = [plot_data[group]['F1分数'] for group in groups]
            colors = ['#ff7f0e', '#1f77b4', '#2ca02c'][:len(groups)]
            
            bars1 = ax1.bar(groups, f1_values, color=colors, alpha=0.8)
            ax1.set_ylabel('F1分数')
            ax1.set_title('消融实验: F1分数对比')
            ax1.grid(True, alpha=0.3, axis='y')
            
            # 在柱子上标注数值和最佳模型
            for i, (bar, group) in enumerate(zip(bars1, groups)):
                height = bar.get_height()
                ax1.text(bar.get_x() + bar.get_width()/2., height + 0.01,
                        f'{height:.3f}\n({plot_data[group]["模型"]})',
                        ha='center', va='bottom', fontsize=10)
            
            # 综合指标对比
            metrics = ['F1分数', '准确率', '精确率', '召回率']
            metric_colors = ['#ff7f0e', '#1f77b4', '#2ca02c', '#d62728']
            
            x_pos = np.arange(len(groups))
            width = 0.2
            
            for i, metric in enumerate(metrics):
                values = [plot_data[group][metric] for group in groups]
                ax2.bar(x_pos + i * width, values, width, label=metric, 
                       color=metric_colors[i], alpha=0.8)
            
            ax2.set_xlabel('特征组合')
            ax2.set_ylabel('分数')
            ax2.set_title('消融实验: 综合指标对比')
            ax2.set_xticks(x_pos + width * 1.5)
            ax2.set_xticklabels(groups)
            ax2.legend()
            ax2.grid(True, alpha=0.3, axis='y')
            
            plt.tight_layout()
            plt.savefig(f"{output_dir}/ablation_study.png", dpi=300, bbox_inches='tight')
            plt.close()
            
            # 创建详细的性能提升分析图
            fig, ax = plt.subplots(figsize=(12, 8))
            
            # 计算相对于rules_only的性能提升
            if 'rules_only' in plot_data:
                baseline_f1 = plot_data['rules_only']['F1分数']
                
                improvements = {}
                for group in groups:
                    if group != 'rules_only':
                        improvement = ((plot_data[group]['F1分数'] - baseline_f1) / baseline_f1) * 100
                        improvements[group] = improvement
                
                if improvements:
                    improve_groups = list(improvements.keys())
                    improve_values = list(improvements.values())
                    
                    bars = ax.bar(improve_groups, improve_values, 
                                color=['#1f77b4' if x > 0 else '#ff4444' for x in improve_values],
                                alpha=0.8)
                    
                    ax.axhline(y=0, color='black', linestyle='-', alpha=0.3)
                    ax.set_ylabel('性能提升 (%)')
                    ax.set_title('相对于仅规则特征的性能提升')
                    ax.grid(True, alpha=0.3, axis='y')
                    
                    # 标注数值
                    for bar, value in zip(bars, improve_values):
                        height = bar.get_height()
                        ax.text(bar.get_x() + bar.get_width()/2., 
                               height + (0.5 if height > 0 else -1.5),
                               f'{value:.1f}%', ha='center', va='bottom' if height > 0 else 'top')
                    
                    plt.tight_layout()
                    plt.savefig(f"{output_dir}/ablation_improvement.png", dpi=300, bbox_inches='tight')
                    plt.close()
            
            print(f"消融实验可视化已保存: {output_dir}/ablation_study.png")
            if 'rules_only' in plot_data:
                print(f"性能提升分析已保存: {output_dir}/ablation_improvement.png")
        
        else:
            # 旧格式：保持原有逻辑（向后兼容）
            print("检测到旧格式的消融实验数据，使用兼容模式")
            self._plot_ablation_study_legacy(output_dir)
    
    def _plot_ablation_study_legacy(self, output_dir: str):
        """消融实验结果可视化 - 旧版兼容"""
        # 这里放置原来的_plot_ablation_study代码作为后备
        # 为了简化，我们先跳过这个函数，因为新格式应该是主要使用的
        print("旧格式消融实验可视化暂时跳过")
        pass    
    def _plot_feature_distribution(self, output_dir: str):
        """特征分布图"""
        if not hasattr(self, 'original_X') or self.feature_importance is None:
            return
        
        # 计算有效值比例
        total_samples = self.original_X.shape[0]
        effective_ratios = (self.original_X != 0).sum(axis=0) / total_samples
        
        # 创建分布图
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6))
        
        # 有效值比例分布
        rule_mask = [f.startswith('rule_') for f in self.feature_names]
        rule_ratios = effective_ratios[rule_mask]
        graph_ratios = effective_ratios[~np.array(rule_mask)]
        
        ax1.hist(rule_ratios, bins=20, alpha=0.7, label='规则特征', color='#ff7f0e')
        ax1.hist(graph_ratios, bins=20, alpha=0.7, label='图特征', color='#1f77b4')
        ax1.set_xlabel('有效值比例')
        ax1.set_ylabel('特征数量')
        ax1.set_title('特征有效值比例分布')
        ax1.legend()
        ax1.grid(True, alpha=0.3)
        
        # 重要性vs有效值比例散点图
        importance_values = [self.feature_importance[self.feature_importance['feature'] == f]['importance'].iloc[0] 
                           for f in self.feature_names]
        colors = ['#ff7f0e' if f.startswith('rule_') else '#1f77b4' for f in self.feature_names]
        
        ax2.scatter(effective_ratios, importance_values, c=colors, alpha=0.6)
        ax2.set_xlabel('有效值比例')
        ax2.set_ylabel('特征重要性')
        ax2.set_title('特征重要性 vs 有效值比例')
        ax2.grid(True, alpha=0.3)
        
        # 添加图例
        from matplotlib.patches import Patch
        legend_elements = [Patch(facecolor='#ff7f0e', label='规则特征'),
                          Patch(facecolor='#1f77b4', label='图特征')]
        ax2.legend(handles=legend_elements)
        
        plt.tight_layout()
        plt.savefig(f"{output_dir}/feature_distribution.png", dpi=300, bbox_inches='tight')
        plt.close()

    def run_5_fold_cross_validation(self, X: np.ndarray, y: np.ndarray):
        """使用5折交叉验证训练和评估所有模型"""
        print("开始5折交叉验证...")
        
        # 保存原始数据用于后续分析
        self.original_X = X.copy()
        self.original_y = y.copy()
        
        # 计算正负样本比例，用于XGBoost处理样本不平衡
        y_series = pd.Series(y)
        if len(y_series.value_counts()) > 1:
            scale_pos_weight = y_series.value_counts()[0] / y_series.value_counts()[1]
        else:
            scale_pos_weight = 1.0
        
        # 准备模型（更新XGBoost配置）
        self.models = {
            'Random Forest': RandomForestClassifier(
                n_estimators=200, random_state=self.random_state,
                class_weight='balanced', max_features='sqrt'
            ),
            'Gradient Boosting': GradientBoostingClassifier(
                n_estimators=150, random_state=self.random_state, learning_rate=0.1
            ),
            'XGBoost': xgb.XGBClassifier(
                n_estimators=200,
                learning_rate=0.1,
                max_depth=7,
                subsample=0.8,
                colsample_bytree=0.8,
                use_label_encoder=False,
                eval_metric='logloss',
                random_state=self.random_state,
                scale_pos_weight=scale_pos_weight,
                verbosity=0
            ),
            'Logistic Regression': LogisticRegression(
                random_state=self.random_state, class_weight='balanced',
                max_iter=2000, C=1.0
            ),
            'SVM': SVC(
                random_state=self.random_state, class_weight='balanced',
                probability=True, kernel='rbf'
            ),
            'Naive Bayes': GaussianNB(),
            'Decision Tree': DecisionTreeClassifier(
                random_state=self.random_state, class_weight='balanced', max_depth=20
            )
        }
        
        # 5折分层交叉验证
        cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=self.random_state)
        
        # 初始化结果存储
        self.results = {
            'cv_results': {},
            'model_scores': {}
        }
        
        best_cv_f1 = 0
        best_model_name = None
        
        # 对每个模型进行5折交叉验证
        for name, model in self.models.items():
            print(f"\n正在评估 {name} 模型...")
            
            # 存储5折的详细结果
            fold_results = {
                'accuracy': [], 'precision': [], 'recall': [], 'f1': [], 'auc': []
            }
            
            fold_predictions = []
            fold_probabilities = []
            fold_true_labels = []
            
            # 执行5折交叉验证
            for fold, (train_idx, val_idx) in enumerate(cv.split(X, y), 1):
                X_train_fold, X_val_fold = X[train_idx], X[val_idx]
                y_train_fold, y_val_fold = y[train_idx], y[val_idx]
                
                # 特征预处理
                if name in ['Logistic Regression', 'SVM']:
                    scaler = StandardScaler()
                    X_train_use = scaler.fit_transform(X_train_fold)
                    X_val_use = scaler.transform(X_val_fold)
                else:
                    X_train_use, X_val_use = X_train_fold, X_val_fold
                
                # 训练模型
                model_fold = model.__class__(**model.get_params())
                model_fold.fit(X_train_use, y_train_fold)
                
                # 预测
                y_pred = model_fold.predict(X_val_use)
                y_prob = model_fold.predict_proba(X_val_use)[:, 1] if hasattr(model_fold, 'predict_proba') else None
                
                # 计算指标
                fold_results['accuracy'].append(accuracy_score(y_val_fold, y_pred))
                fold_results['precision'].append(precision_score(y_val_fold, y_pred, zero_division=0))
                fold_results['recall'].append(recall_score(y_val_fold, y_pred, zero_division=0))
                fold_results['f1'].append(f1_score(y_val_fold, y_pred, zero_division=0))
                
                if y_prob is not None:
                    fold_results['auc'].append(roc_auc_score(y_val_fold, y_prob))
                else:
                    fold_results['auc'].append(0.0)
                
                # 保存预测结果用于后续分析
                fold_predictions.extend(y_pred)
                if y_prob is not None:
                    fold_probabilities.extend(y_prob)
                fold_true_labels.extend(y_val_fold)
                
                print(f"  Fold {fold}: F1={fold_results['f1'][-1]:.4f}, AUC={fold_results['auc'][-1]:.4f}")
            
            # 计算平均指标和标准差
            cv_mean_f1 = np.mean(fold_results['f1'])
            cv_std_f1 = np.std(fold_results['f1'])
            
            cv_results = {
                'accuracy_mean': np.mean(fold_results['accuracy']),
                'accuracy_std': np.std(fold_results['accuracy']),
                'precision_mean': np.mean(fold_results['precision']),
                'precision_std': np.std(fold_results['precision']),
                'recall_mean': np.mean(fold_results['recall']),
                'recall_std': np.std(fold_results['recall']),
                'f1_mean': cv_mean_f1,
                'f1_std': cv_std_f1,
                'auc_mean': np.mean(fold_results['auc']),
                'auc_std': np.std(fold_results['auc'])
            }
            
            # 在全数据集上训练最终模型
            if name in ['Logistic Regression', 'SVM']:
                final_scaler = StandardScaler()
                X_final = final_scaler.fit_transform(X)
            else:
                X_final = X
                final_scaler = None
            
            final_model = model.__class__(**model.get_params())
            final_model.fit(X_final, y)
            
            # 存储结果
            self.results['cv_results'][name] = cv_results
            self.results['model_scores'][name] = {
                'model': final_model,
                'scaler': final_scaler,
                'cv_predictions': fold_predictions,
                'cv_probabilities': fold_probabilities if fold_probabilities else None,
                'cv_true_labels': fold_true_labels,
                **cv_results
            }
            
            print(f"  {name}: F1={cv_mean_f1:.4f}±{cv_std_f1:.4f}, AUC={cv_results['auc_mean']:.4f}±{cv_results['auc_std']:.4f}")
            
            # 更新最佳模型
            if cv_mean_f1 > best_cv_f1:
                best_cv_f1 = cv_mean_f1
                best_model_name = name
        
        self.best_model = best_model_name
        print(f"\n最佳模型: {self.best_model} (CV F1: {best_cv_f1:.4f}±{self.results['cv_results'][self.best_model]['f1_std']:.4f})")
        
        return self.results    
    def generate_report(self, output_dir: str):
        """生成分析报告 - 5折交叉验证版"""
        experiment_dir = f"{output_dir}/{self.experiment_id}"
        os.makedirs(experiment_dir, exist_ok=True)
        report_file = f"{experiment_dir}/analysis_report.txt"
        
        with open(report_file, 'w', encoding='utf-8') as f:
            f.write("NPM恶意包检测分析报告 (5折交叉验证)\n")
            f.write("="*50 + "\n\n")
            f.write(f"实验ID: {self.experiment_id}\n")
            f.write(f"随机种子: {self.random_state}\n")
            f.write(f"特征维度: {len(self.feature_names)}\n")
            f.write(f"评估方法: 5折分层交叉验证\n")
            f.write(f"生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n")
            
            f.write("模型性能 (5折CV平均):\n")
            f.write("-"*30 + "\n")
            for name, info in self.results['cv_results'].items():
                f.write(f"{name}:\n")
                f.write(f"  准确率: {info['accuracy_mean']:.4f}±{info['accuracy_std']:.4f}\n")
                f.write(f"  精确率: {info['precision_mean']:.4f}±{info['precision_std']:.4f}\n")
                f.write(f"  召回率: {info['recall_mean']:.4f}±{info['recall_std']:.4f}\n")
                f.write(f"  F1分数: {info['f1_mean']:.4f}±{info['f1_std']:.4f}\n")
                f.write(f"  AUC: {info['auc_mean']:.4f}±{info['auc_std']:.4f}\n\n")
            
            f.write(f"最佳模型: {self.best_model}\n\n")
            
            if 'extreme_tests' in self.results:
                extreme = self.results['extreme_tests']
                f.write("极端测试结果 (在整个数据集上):\n")
                f.write("-"*30 + "\n")
                f.write(f"良性准确率: {extreme['benign_accuracy']:.4f}\n")
                f.write(f"恶意检出率: {extreme['malicious_detection_rate']:.4f}\n")
                f.write(f"误报率: {extreme['false_positive_rate']:.4f}\n")
                f.write(f"漏报率: {extreme['false_negative_rate']:.4f}\n\n")
            
            # 特征统计
            if hasattr(self, 'original_X'):
                total_samples = self.original_X.shape[0]
                effective_counts = (self.original_X != 0).sum(axis=0)
                rule_count = sum(1 for f in self.feature_names if f.startswith('rule_'))
                graph_count = len(self.feature_names) - rule_count
                
                f.write("特征统计:\n")
                f.write("-"*30 + "\n")
                f.write(f"规则特征数: {rule_count}\n")
                f.write(f"图特征数: {graph_count}\n")
                f.write(f"总特征数: {len(self.feature_names)}\n")
                f.write(f"平均有效值比例: {(effective_counts / total_samples).mean():.1%}\n")
        
        print(f"分析报告已保存: {report_file}")
    def save_model(self, output_dir: str = "ml_analysis_results"):
        """保存模型"""
        if not self.best_model:
            return
        
        experiment_dir = f"{output_dir}/{self.experiment_id}"
        os.makedirs(experiment_dir, exist_ok=True)
        
        model_obj = self.results['model_scores'][self.best_model]['model']
        joblib.dump(model_obj, f"{experiment_dir}/best_model.pkl")
        joblib.dump(self.scaler, f"{experiment_dir}/scaler.pkl")
        joblib.dump(self.feature_names, f"{experiment_dir}/feature_names.pkl")
        
        metadata = {
            'experiment_id': self.experiment_id,
            'random_state': self.random_state,
            'best_model': self.best_model,
            'feature_count': len(self.feature_names),
            'timestamp': datetime.now().isoformat()
        }
        
        with open(f"{experiment_dir}/metadata.json", 'w') as f:
            json.dump(metadata, f, indent=2)
        
        print(f"模型已保存: {experiment_dir}")

def main():
    parser = argparse.ArgumentParser(description="NPM恶意包机器学习分类分析")
    parser.add_argument('csv_file', help='特征CSV文件路径')
    parser.add_argument('--output', '-o', default='ml_analysis_results', help='输出目录')
    parser.add_argument('--random-state', type=int, default=None, help='随机种子')
    parser.add_argument('--extreme-test', action='store_true', help='执行极端情况测试')
    parser.add_argument('--ablation-study', action='store_true', help='执行消融实验')
    
    args = parser.parse_args()
    
    try:
        print("NPM恶意包检测分析 (5折交叉验证)")
        print("="*50)
        
        classifier = MalwareClassifier(random_state=args.random_state)
        
        X, y, _ = classifier.load_data(args.csv_file)
        print(f"数据加载完成: {X.shape[0]}样本, {X.shape[1]}特征")
        
        # 使用5折交叉验证替代原来的train_and_evaluate
        classifier.run_5_fold_cross_validation(X, y)
        classifier.feature_importance_analysis()
        
        if args.extreme_test:
            classifier.extreme_case_testing()
        
        # 执行消融实验
        if args.ablation_study:
            classifier.run_ablation_study(X, y, test_size=0.3, val_size=0.2)  # 修改参数避免test_size=0
        
        # 生成所有结果
        classifier.create_visualizations(args.output)
        classifier.generate_report(args.output)
        classifier.generate_feature_contribution_report(args.output)
        
        # 生成消融实验报告
        if args.ablation_study:
            classifier.generate_ablation_report(args.output)
        
        classifier.save_model(args.output)
        
        best_f1_mean = classifier.results['cv_results'][classifier.best_model]['f1_mean']
        best_f1_std = classifier.results['cv_results'][classifier.best_model]['f1_std']
        print(f"\n分析完成! 最佳模型: {classifier.best_model} (CV F1: {best_f1_mean:.4f}±{best_f1_std:.4f})")
        
        print(f"结果保存至: {args.output}/{classifier.experiment_id}")
        
    except Exception as e:
        print(f"错误: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    main()