

#!/usr/bin/env python3
"""
Feature Extractor for NPM Package Analysis
基于PDCG和预定义规则的特征向量提取工具
"""

import json
import re
import argparse
from pathlib import Path
from typing import Dict, List, Set, Tuple
import pandas as pd
from collections import Counter
import networkx as nx  # 新增：图分析库
# 在文件顶部添加新的导入
import math
from networkx.algorithms import community
def analyze_package_pdcg(package_path: str, rules_data: dict) -> Tuple[List[int], Set[str], Dict]:
    """
    分析指定路径下的整个NPM包的PDCG文件，返回包级别的特征向量和图特征
    
    Args:
        package_path: NPM包的PDCG文件夹路径
        rules_data: 规则知识库数据
        
    Returns:
        tuple: (二进制特征向量, 命中的规则ID集合, 图特征字典)
    """
    # 初始化包级别的规则ID集合
    package_hit_rules = set()
    
    # 使用pathlib递归查找所有.pdcg.json文件
    package_dir = Path(package_path)
    if not package_dir.exists():
        raise FileNotFoundError(f"包路径不存在: {package_path}")
    
    # 查找所有PDCG文件
    pdcg_files = list(package_dir.rglob("*.pdcg.json"))
    
    if not pdcg_files:
        print(f"  警告: 包中未找到PDCG文件")
        # 返回全零向量和空图特征
        all_rule_ids = [rule['rule_id'] for rule in rules_data['rules']]
        empty_graph_features = _get_empty_graph_features()
        return [0] * len(all_rule_ids), set(), empty_graph_features
    
    print(f"  找到 {len(pdcg_files)} 个PDCG文件")
    
    # 2.1. 构建包级全图
    package_graph = nx.DiGraph()
    processed_files = 0
    
    for pdcg_file in pdcg_files:
        try:
            # 加载PDCG数据
            with open(pdcg_file, 'r', encoding='utf-8') as f:
                pdcg_data = json.load(f)
            
            # 获取文件名（用于节点ID前缀）
            file_prefix = pdcg_file.stem.replace('.pdcg', '')
            
            # 将当前PDCG的节点和边合并到包级图中
            _merge_pdcg_to_graph(package_graph, pdcg_data, file_prefix)
            processed_files += 1
            
        except Exception as e:
            print(f"    警告: 处理文件 {pdcg_file.name} 失败: {e}")
            continue
    
    print(f"  成功处理 {processed_files}/{len(pdcg_files)} 个PDCG文件")
    print(f"  包级图统计: {package_graph.number_of_nodes()} 个节点, {package_graph.number_of_edges()} 条边")
    
    # 2.2. 标记恶意节点
    _mark_malicious_nodes(package_graph, rules_data['rules'])
    
    # 统计恶意节点
    malicious_nodes = [node for node, data in package_graph.nodes(data=True) 
                      if data.get('is_malicious', False)]
    package_hit_rules = set()
    for node in malicious_nodes:
        node_rules = package_graph.nodes[node].get('hit_rules', set())
        package_hit_rules.update(node_rules)
    
    print(f"  恶意节点数: {len(malicious_nodes)}, 命中规则数: {len(package_hit_rules)}")
    
    # 3. 计算图特征
    graph_features = calculate_graph_features(package_graph)
    
    # 生成二进制特征向量（保持原有逻辑）
    all_rule_ids = [rule['rule_id'] for rule in rules_data['rules']]
    feature_vector = _vectorize(package_hit_rules, all_rule_ids)
    
    return feature_vector, package_hit_rules, graph_features
def _merge_pdcg_to_graph(package_graph: nx.DiGraph, pdcg_data: dict, file_prefix: str) -> None:
    """
    将单个PDCG文件的节点和边合并到包级图中
    
    Args:
        package_graph: 包级NetworkX图对象
        pdcg_data: 单个PDCG的JSON数据
        file_prefix: 文件前缀（用于避免节点ID冲突）
    """
    nodes = pdcg_data.get('nodes', [])
    edges = pdcg_data.get('edges', [])
    
    # 添加节点（使用文件前缀避免ID冲突）
    for node in nodes:
        original_id = node['id']
        unique_id = f"{file_prefix}_{original_id}"
        
        # 复制节点属性
        node_attrs = {key: value for key, value in node.items() if key != 'id'}
        node_attrs['original_id'] = original_id
        node_attrs['source_file'] = file_prefix
        
        package_graph.add_node(unique_id, **node_attrs)
    
    # 添加边（更新节点ID引用）
    for edge in edges:
        source_id = f"{file_prefix}_{edge['source']}"
        target_id = f"{file_prefix}_{edge['target']}"
        edge_type = edge['type']
        
        # 只有当源节点和目标节点都存在时才添加边
        if package_graph.has_node(source_id) and package_graph.has_node(target_id):
            package_graph.add_edge(source_id, target_id, type=edge_type)
def _mark_malicious_nodes(package_graph: nx.DiGraph, rules: list) -> None:
    """
    优化版恶意节点标记：大幅减少无效遍历和重复计算
    """
    total_nodes = package_graph.number_of_nodes()
    print(f"    开始恶意节点标记（节点数: {total_nodes}）")
    
    # 1. 预分组节点，避免全图重复扫描
    call_nodes = []
    arg_nodes = []
    
    for node_id, node_data in package_graph.nodes(data=True):
        node_type = node_data.get('type')
        if node_type == 'CALL':
            call_nodes.append(node_id)
        elif node_type == 'ARGUMENT':
            arg_nodes.append(node_id)
    
    print(f"    节点分组: CALL={len(call_nodes)}, ARGUMENT={len(arg_nodes)}")
    
    # 2. 预构建全局边映射，避免重复计算
    edge_map = {}
    for source, target, edge_data in package_graph.edges(data=True):
        if edge_data.get('type') == 'has_arg':
            if source not in edge_map:
                edge_map[source] = []
            edge_map[source].append(target)
    
    # 3. 按规则类型分组，减少无效匹配
    call_rules = []
    arg_rules = []
    combo_rules = []
    
    for rule in rules:
        pdcg_pattern = rule['pdcg_pattern']
        if 'primary_call' in pdcg_pattern:
            combo_rules.append(rule)
        elif pdcg_pattern.get('node_type') == 'CALL':
            call_rules.append(rule)
        elif pdcg_pattern.get('node_type') == 'ARGUMENT':
            arg_rules.append(rule)
    
    print(f"    规则分组: CALL={len(call_rules)}, ARG={len(arg_rules)}, COMBO={len(combo_rules)}")
    
    # 4. 高效匹配 - 只对相关节点类型应用相关规则
    matched_count = 0
    
    # 处理CALL节点规则
    for rule in call_rules:
        rule_id = rule['rule_id']
        pdcg_pattern = rule['pdcg_pattern']
        
        for node_id in call_nodes:
            if _fast_match_call_node(package_graph, node_id, edge_map, pdcg_pattern):
                _mark_node_malicious(package_graph, node_id, rule_id, rule)
                matched_count += 1
    
    # 处理ARGUMENT节点规则
    for rule in arg_rules:
        rule_id = rule['rule_id']
        pdcg_pattern = rule['pdcg_pattern']
        
        for node_id in arg_nodes:
            if _fast_match_arg_node(package_graph, node_id, pdcg_pattern):
                _mark_node_malicious(package_graph, node_id, rule_id, rule)
                matched_count += 1
    
    # 处理组合规则（只对CALL节点）
    for rule in combo_rules:
        rule_id = rule['rule_id']
        pdcg_pattern = rule['pdcg_pattern']
        
        for node_id in call_nodes:
            if _fast_match_combo_rule(package_graph, node_id, edge_map, pdcg_pattern):
                _mark_node_malicious(package_graph, node_id, rule_id, rule)
                matched_count += 1
    
    print(f"    恶意节点标记完成，命中次数: {matched_count}")
def calculate_graph_features(graph: nx.DiGraph) -> Dict:
    """
    计算包级图的图论特征 (V3 - 移除跨文件特征)
    """
    # 1. 提取恶意节点和构建恶意子图
    malicious_nodes = {node for node, data in graph.nodes(data=True) if data.get('is_malicious', False)}
    
    if not malicious_nodes:
        return _get_empty_graph_features()

    features = {}
    total_nodes = graph.number_of_nodes()
    malicious_subgraph = graph.subgraph(malicious_nodes)
    print(f"    开始计算图特征（总节点: {total_nodes}, 恶意节点: {len(malicious_nodes)}）")

    # 2. 基础统计特征
    features['malicious_node_count'] = len(malicious_nodes)
    features['malicious_ratio'] = len(malicious_nodes) / total_nodes if total_nodes > 0 else 0.0
    
    category_counts = Counter(data.get('category', 'UNKNOWN') for node, data in graph.nodes(data=True) if node in malicious_nodes)
    features['category_diversity'] = len(category_counts)
    for category in ['IG', 'DT', 'DE', 'PE', 'SP']:
        features[f'{category}_ratio'] = category_counts[category] / len(malicious_nodes) if malicious_nodes else 0.0

    # 3. 恶意子图内部连通性特征
    print("    计算内部连通性特征...")
    _calculate_connectivity_features(features, malicious_subgraph, malicious_nodes)

    # 4. 中心性特征 (增强版)
    print("    计算中心性特征...")
    _calculate_centrality_features(features, graph, malicious_subgraph, malicious_nodes)

    # 5. 社区结构特征
    print("    计算社区结构特征...")
    _calculate_community_features(features, graph, malicious_nodes)

    # 删除跨文件协作特征计算
    # print("    计算跨文件协作特征...")
    # _calculate_cross_file_features_enhanced(features, graph, malicious_nodes)

    print(f"    图特征计算完成")
    return features
def _calculate_connectivity_features(features: Dict, malicious_subgraph: nx.DiGraph, malicious_nodes: Set[str]):
    """计算恶意子图的连通性特征"""
    try:
        features['malicious_internal_edges'] = malicious_subgraph.number_of_edges()
        
        # 密度计算 - 确保鲁棒性
        if len(malicious_nodes) > 1:
            features['malicious_density'] = nx.density(malicious_subgraph)
        else:
            features['malicious_density'] = 0.0
        
        # 弱连通组件数
        features['malicious_components'] = nx.number_weakly_connected_components(malicious_subgraph)
        
    except Exception as e:
        print(f"      连通性特征计算失败: {e}")
        features['malicious_internal_edges'] = 0
        features['malicious_density'] = 0.0
        features['malicious_components'] = 1 if malicious_nodes else 0

def _calculate_centrality_features(features: Dict, full_graph: nx.DiGraph, malicious_subgraph: nx.DiGraph, malicious_nodes: Set[str]):
    """计算所有中心性特征并更新到features字典"""
    malicious_nodes_list = list(malicious_nodes)
    
    # --- 在全图中的中心性 (衡量在整个包中的影响力) ---
    print("      计算全图中心性...")
    
    # 度中心性 (Degree Centrality) - 新增
    try:
        degree_centrality = nx.degree_centrality(full_graph)
        malicious_degrees = [degree_centrality.get(n, 0) for n in malicious_nodes_list]
        features['mean_degree_centrality_full'] = sum(malicious_degrees) / len(malicious_degrees) if malicious_degrees else 0.0
        features['max_degree_centrality_full'] = max(malicious_degrees) if malicious_degrees else 0.0
    except Exception as e:
        print(f"        全图度中心性计算失败: {e}")
        features['mean_degree_centrality_full'] = 0.0
        features['max_degree_centrality_full'] = 0.0

    # PageRank
    try:
        pagerank = nx.pagerank(full_graph, alpha=0.85, tol=1.0e-4, max_iter=50)
        malicious_pageranks = [pagerank.get(n, 0) for n in malicious_nodes_list]
        features['mean_malicious_pagerank'] = sum(malicious_pageranks) / len(malicious_pageranks) if malicious_pageranks else 0.0
        features['max_malicious_pagerank'] = max(malicious_pageranks) if malicious_pageranks else 0.0
        features['sum_malicious_pagerank'] = sum(malicious_pageranks)
    except Exception as e:
        print(f"        全图PageRank计算失败: {e}")
        features['mean_malicious_pagerank'] = 0.0
        features['max_malicious_pagerank'] = 0.0
        features['sum_malicious_pagerank'] = 0.0

    # 近似介数中心性
    try:
        total_nodes = full_graph.number_of_nodes()
        k = min(total_nodes, max(100, int(total_nodes * 0.01)))
        betweenness = nx.betweenness_centrality(full_graph, k=k, normalized=True, seed=42)
        malicious_betweenness = [betweenness.get(n, 0) for n in malicious_nodes_list]
        features['mean_malicious_betweenness'] = sum(malicious_betweenness) / len(malicious_betweenness) if malicious_betweenness else 0.0
        features['max_malicious_betweenness'] = max(malicious_betweenness) if malicious_betweenness else 0.0
    except Exception as e:
        print(f"        全图介数中心性计算失败: {e}")
        features['mean_malicious_betweenness'] = 0.0
        features['max_malicious_betweenness'] = 0.0

    # --- 在恶意子图中的中心性 (衡量在恶意网络内部的重要性) ---
    print("      计算恶意子图中心性...")
    
    if len(malicious_nodes) > 1:
        # 度中心性
        try:
            sub_degree_centrality = nx.degree_centrality(malicious_subgraph)
            sub_malicious_degrees = [sub_degree_centrality.get(n, 0) for n in malicious_nodes_list]
            features['mean_degree_centrality_sub'] = sum(sub_malicious_degrees) / len(sub_malicious_degrees) if sub_malicious_degrees else 0.0
            features['max_degree_centrality_sub'] = max(sub_malicious_degrees) if sub_malicious_degrees else 0.0
        except Exception as e:
            print(f"        子图度中心性计算失败: {e}")
            features['mean_degree_centrality_sub'] = 0.0
            features['max_degree_centrality_sub'] = 0.0
        
        # PageRank
        try:
            sub_pagerank = nx.pagerank(malicious_subgraph, alpha=0.85, tol=1.0e-4, max_iter=50)
            sub_malicious_pageranks = [sub_pagerank.get(n, 0) for n in malicious_nodes_list]
            features['mean_pagerank_sub'] = sum(sub_malicious_pageranks) / len(sub_malicious_pageranks) if sub_malicious_pageranks else 0.0
            features['max_pagerank_sub'] = max(sub_malicious_pageranks) if sub_malicious_pageranks else 0.0
        except Exception as e:
            print(f"        子图PageRank计算失败: {e}")
            features['mean_pagerank_sub'] = 0.0
            features['max_pagerank_sub'] = 0.0
        
        # 介数中心性 (子图)
        try:
            sub_betweenness = nx.betweenness_centrality(malicious_subgraph, normalized=True)
            sub_malicious_betweenness = [sub_betweenness.get(n, 0) for n in malicious_nodes_list]
            features['mean_betweenness_sub'] = sum(sub_malicious_betweenness) / len(sub_malicious_betweenness) if sub_malicious_betweenness else 0.0
            features['max_betweenness_sub'] = max(sub_malicious_betweenness) if sub_malicious_betweenness else 0.0
        except Exception as e:
            print(f"        子图介数中心性计算失败: {e}")
            features['mean_betweenness_sub'] = 0.0
            features['max_betweenness_sub'] = 0.0
    else:
        # 单节点情况
        features['mean_degree_centrality_sub'] = 0.0
        features['max_degree_centrality_sub'] = 0.0
        features['mean_pagerank_sub'] = 0.0
        features['max_pagerank_sub'] = 0.0
        features['mean_betweenness_sub'] = 0.0
        features['max_betweenness_sub'] = 0.0

def _calculate_community_features(features: Dict, graph: nx.DiGraph, malicious_nodes: Set[str]):
    """计算社区结构特征"""
    communities = []
    
    try:
        if graph.number_of_nodes() > 0:
            try:
                # Louvain算法要求图是无向的
                detected_communities = community.louvain_communities(graph.to_undirected(), seed=42)
                communities.extend(detected_communities)
            except Exception:
                # 如果Louvain失败，使用弱连通组件
                communities = [c for c in nx.weakly_connected_components(graph)]
    except Exception as e:
        print(f"      社区检测失败: {e}")
        communities = []

    if not communities:
        features['max_community_maliciousness_ratio'] = 0.0
        features['malicious_community_entropy'] = 0.0
        features['num_malicious_communities'] = 0
        return

    try:
        # 计算每个社区的恶意率
        community_maliciousness_ratios = []
        for community_set in communities:
            malicious_in_community = malicious_nodes.intersection(community_set)
            if len(community_set) > 0:
                community_maliciousness_ratios.append(len(malicious_in_community) / len(community_set))
        
        features['max_community_maliciousness_ratio'] = max(community_maliciousness_ratios) if community_maliciousness_ratios else 0.0
        
        # 计算恶意节点在社区间的分布熵
        malicious_distribution = [len(malicious_nodes.intersection(c)) for c in communities]
        malicious_distribution_counts = [count for count in malicious_distribution if count > 0]
        total_malicious_in_communities = sum(malicious_distribution_counts)
        
        if total_malicious_in_communities > 0:
            probabilities = [count / total_malicious_in_communities for count in malicious_distribution_counts]
            features['malicious_community_entropy'] = -sum(p * math.log2(p) for p in probabilities if p > 0)
        else:
            features['malicious_community_entropy'] = 0.0
        
        features['num_malicious_communities'] = len(malicious_distribution_counts)
        
    except Exception as e:
        print(f"      社区特征计算失败: {e}")
        features['max_community_maliciousness_ratio'] = 0.0
        features['malicious_community_entropy'] = 0.0
        features['num_malicious_communities'] = 0


def _get_empty_graph_features() -> Dict:
    """
    返回空的图特征字典（V3 - 移除跨文件特征）
    """
    return {
        # 基础统计
        'malicious_node_count': 0,
        'malicious_ratio': 0.0,
        'category_diversity': 0,
        'IG_ratio': 0.0, 'DT_ratio': 0.0, 'DE_ratio': 0.0, 'PE_ratio': 0.0, 'SP_ratio': 0.0,
        
        # 内部连通性
        'malicious_internal_edges': 0,
        'malicious_density': 0.0,
        'malicious_components': 0,
        
        # 全图中心性
        'mean_degree_centrality_full': 0.0,
        'max_degree_centrality_full': 0.0,
        'mean_malicious_pagerank': 0.0,
        'max_malicious_pagerank': 0.0,
        'sum_malicious_pagerank': 0.0,
        'mean_malicious_betweenness': 0.0,
        'max_malicious_betweenness': 0.0,
        
        # 子图中心性
        'mean_degree_centrality_sub': 0.0,
        'max_degree_centrality_sub': 0.0,
        'mean_pagerank_sub': 0.0,
        'max_pagerank_sub': 0.0,
        'mean_betweenness_sub': 0.0,
        'max_betweenness_sub': 0.0,
        
        # 社区结构特征
        'num_malicious_communities': 0,
        'max_community_maliciousness_ratio': 0.0,
        'malicious_community_entropy': 0.0,
        

    }
def save_results_to_csv(results: List[Dict], output_csv: str, rules_data: dict) -> None:
    """
    将分析结果保存为CSV文件 (V2 - 增强版)
    """
    import pandas as pd
    
    if not results:
        raise ValueError("没有结果可保存")
    
    df = pd.DataFrame(results)
    
    base_columns = ['package_name', 'data_type', 'label', 'hit_rules_count', 'hit_rules']
    rule_columns = [f'rule_{rule["rule_id"]}' for rule in rules_data['rules']]
    
    # 全新的、统一的图特征列 (V2)
    graph_columns = list(_get_empty_graph_features().keys())
    
    ordered_columns = base_columns + rule_columns + graph_columns
    ordered_columns = [col for col in ordered_columns if col in df.columns]
    df = df[ordered_columns]
    
    df.to_csv(output_csv, index=False, encoding='utf-8')
    
    print(f"CSV文件已保存，包含以下列:")
    print(f"  基础信息列: {len(base_columns)}")
    print(f"  规则特征列: {len(rule_columns)}")
    print(f"  图特征列: {len([col for col in graph_columns if col in df.columns])}")
    print(f"  总列数: {len(ordered_columns)}")


def _fast_match_call_node(graph: nx.DiGraph, node_id: str, edge_map: dict, pdcg_pattern: dict) -> bool:
    """快速CALL节点匹配 - 避免重复数据结构构建"""
    node_data = graph.nodes[node_id]
    callee_name = node_data.get('callee_name', '')
    
    # 检查callee_name精确匹配
    if 'callee_name' in pdcg_pattern:
        if callee_name != pdcg_pattern['callee_name']:
            return False
    
    # 检查callee_name正则匹配
    if 'callee_name_regex' in pdcg_pattern:
        pattern = pdcg_pattern['callee_name_regex']
        if not re.search(pattern, callee_name):
            return False
    
    # 检查参数
    if 'arguments' in pdcg_pattern:
        return _fast_check_arguments(graph, node_id, edge_map, pdcg_pattern['arguments'])
    
    return True

def _fast_match_arg_node(graph: nx.DiGraph, node_id: str, pdcg_pattern: dict) -> bool:
    """快速ARGUMENT节点匹配"""
    node_data = graph.nodes[node_id]
    content = node_data.get('content', '')
    
    # 精确匹配
    if 'content' in pdcg_pattern:
        expected = pdcg_pattern['content']
        actual_content = content.strip('"\'')
        if actual_content == expected:
            return True
    
    # 正则匹配
    if 'content_regex' in pdcg_pattern:
        pattern = pdcg_pattern['content_regex']
        if re.search(pattern, content):
            return True
    
    # 包含匹配
    if 'content_contains' in pdcg_pattern:
        expected = pdcg_pattern['content_contains']
        if expected in content:
            return True
    
    return False

def _fast_match_combo_rule(graph: nx.DiGraph, node_id: str, edge_map: dict, pdcg_pattern: dict) -> bool:
    """快速组合规则匹配"""
    node_data = graph.nodes[node_id]
    primary_call = pdcg_pattern['primary_call']
    
    # 检查主调用的callee_name
    callee_name = node_data.get('callee_name', '')
    if 'callee_name_regex' in primary_call:
        pattern = primary_call['callee_name_regex']
        if not re.search(pattern, callee_name):
            return False
    
    # 检查必需参数
    required_arguments = pdcg_pattern['required_arguments']
    return _fast_check_arguments(graph, node_id, edge_map, required_arguments)

def _fast_check_arguments(graph: nx.DiGraph, call_node_id: str, edge_map: dict, arguments: list) -> bool:
    """快速参数检查 - 使用预构建的边映射"""
    if call_node_id not in edge_map:
        return False
    
    arg_node_ids = edge_map[call_node_id]
    
    for arg_spec in arguments:
        index = arg_spec['index']
        
        # 找到指定索引的参数节点
        arg_node = None
        for arg_node_id in arg_node_ids:
            arg_node_candidate = graph.nodes.get(arg_node_id)
            if arg_node_candidate and arg_node_candidate.get('arg_index') == index:
                arg_node = arg_node_candidate
                break
        
        if not arg_node:
            return False
        
        content = arg_node.get('content', '')
        
        # 快速内容检查
        if 'content' in arg_spec:
            expected = arg_spec['content']
            actual = content.strip('"\'')
            if actual != expected:
                return False
        
        if 'content_contains' in arg_spec:
            expected = arg_spec['content_contains']
            if expected not in content:
                return False
        
        if 'content_regex' in arg_spec:
            pattern = arg_spec['content_regex']
            if not re.search(pattern, content):
                return False
    
    return True

def _mark_node_malicious(graph: nx.DiGraph, node_id: str, rule_id: str, rule: dict) -> None:
    """标记单个节点为恶意 - 避免重复属性设置"""
    node_data = graph.nodes[node_id]
    
    # 初始化恶意属性（仅在首次时）
    if 'is_malicious' not in node_data:
        node_data['is_malicious'] = True
        node_data['hit_rules'] = set()
        
        # 设置类别（仅在第一次时）
        category = rule.get('category', 'UNKNOWN')
        if category == 'INFORMATION_GATHERING':
            node_data['category'] = 'IG'
        elif category == 'DATA_TRANSMISSION':
            node_data['category'] = 'DT'
        elif category == 'DATA_ENCODING':
            node_data['category'] = 'DE'
        elif category == 'PAYLOAD_EXECUTION':
            node_data['category'] = 'PE'
        else:
            node_data['category'] = 'SP'
    
    # 添加命中的规则ID
    node_data['hit_rules'].add(rule_id)

def _vectorize(hit_rules: set, all_rule_ids: list) -> list:
    """
    将命中的规则ID集合转换为二进制特征向量
    
    Args:
        hit_rules: 命中的规则ID集合
        all_rule_ids: 所有规则ID的有序列表
        
    Returns:
        list: 二进制特征向量
    """
    # 创建规则ID到索引的映射
    rule_index_map = {rule_id: idx for idx, rule_id in enumerate(all_rule_ids)}
    
    # 创建全零向量
    num_rules = len(all_rule_ids)
    feature_vector = [0] * num_rules
    
    # 设置命中规则对应位置为1
    for rule_id in hit_rules:
        if rule_id in rule_index_map:
            index = rule_index_map[rule_id]
            feature_vector[index] = 1
    
    return feature_vector
def batch_analyze_packages(pdcg_root: str, rules_file: str, output_csv: str, data_types: List[str] = ['malicious', 'benign']) -> None:
    """
    批量分析包级别的恶意行为特征并输出CSV
    
    Args:
        pdcg_root: PDCG根目录路径 (E:\PDCG)
        rules_file: 规则文件路径
        output_csv: 输出CSV文件路径
        data_types: 要处理的数据类型列表
    """
    print(f"开始批量包级别特征提取（包含图特征）")
    print(f"PDCG根目录: {pdcg_root}")
    print(f"规则文件: {rules_file}")
    print(f"输出CSV: {output_csv}")
    print(f"处理数据类型: {data_types}")
    
    # 加载规则库
    try:
        with open(rules_file, 'r', encoding='utf-8') as f:
            rules_data = json.load(f)
        print(f"成功加载 {len(rules_data['rules'])} 条规则")
    except Exception as e:
        print(f"加载规则文件失败: {e}")
        return
    
    # 收集所有包路径
    all_packages = []
    for data_type in data_types:
        data_dir = Path(pdcg_root) / data_type
        if not data_dir.exists():
            print(f"警告: 目录不存在 {data_dir}")
            continue
            
        label = 1 if data_type == 'malicious' else 0
        
        # 获取所有包目录
        package_dirs = [d for d in data_dir.iterdir() if d.is_dir()]
        print(f"在 {data_type} 中找到 {len(package_dirs)} 个包")
        
        for package_dir in package_dirs:
            all_packages.append((package_dir, label, data_type))
    
    print(f"总共需要处理 {len(all_packages)} 个包")
    
    # 分析每个包并收集结果
    results = []
    failed_packages = []
    
    for i, (package_path, label, data_type) in enumerate(all_packages, 1):
        try:
            print(f"[{i}/{len(all_packages)}] 分析包: {package_path.name}")
            
            # 分析包级别特征（现在返回三个值）
            feature_vector, hit_rules, graph_features = analyze_package_pdcg(str(package_path), rules_data)
            
            # 准备结果行
            result_row = {
                'package_name': package_path.name,
                'data_type': data_type,
                'label': label,
                'hit_rules_count': len(hit_rules),
                'hit_rules': ','.join(sorted(hit_rules)) if hit_rules else '',
            }
            
            # 添加每个规则的二进制特征列
            all_rule_ids = [rule['rule_id'] for rule in rules_data['rules']]
            for j, rule_id in enumerate(all_rule_ids):
                result_row[f'rule_{rule_id}'] = feature_vector[j]
            
            # 添加图特征列
            result_row.update(graph_features)
            
            results.append(result_row)
            
            print(f"  ✅ 成功: 命中 {len(hit_rules)} 条规则, {graph_features['malicious_node_count']} 个恶意节点")
            
        except Exception as e:
            print(f"  ❌ 失败: {e}")
            failed_packages.append((package_path.name, str(e)))
    
    # 保存为CSV
    if results:
        save_results_to_csv(results, output_csv, rules_data)
        print(f"\n✅ 成功保存 {len(results)} 个包的特征到: {output_csv}")
    else:
        print("\n❌ 没有成功分析的包，无法生成CSV")
    
    # 输出统计信息
    print_batch_summary(results, failed_packages, data_types)

def print_batch_summary(results: List[Dict], failed_packages: List, data_types: List[str]) -> None:
    """打印批量处理总结"""
    print("\n" + "="*80)
    print("批量包级别特征提取完成!")
    print("="*80)
    
    if results:
        # 按数据类型统计
        for data_type in data_types:
            type_results = [r for r in results if r['data_type'] == data_type]
            if type_results:
                hit_counts = [r['hit_rules_count'] for r in type_results]
                avg_hits = sum(hit_counts) / len(hit_counts)
                max_hits = max(hit_counts)
                min_hits = min(hit_counts)
                
                print(f"\n{data_type.upper()} 数据统计:")
                print(f"  处理包数: {len(type_results)}")
                print(f"  平均命中规则: {avg_hits:.1f}")
                print(f"  最大命中规则: {max_hits}")
                print(f"  最小命中规则: {min_hits}")
        
        # 整体统计
        total_packages = len(results)
        total_hits = sum(r['hit_rules_count'] for r in results)
        avg_hits_overall = total_hits / total_packages if total_packages > 0 else 0
        
        print(f"\n整体统计:")
        print(f"  成功处理包数: {total_packages}")
        print(f"  失败包数: {len(failed_packages)}")
        print(f"  总命中规则次数: {total_hits}")
        print(f"  平均每包命中规则: {avg_hits_overall:.1f}")
        
        # 最活跃的规则
        all_hit_rules = []
        for r in results:
            if r['hit_rules']:
                all_hit_rules.extend(r['hit_rules'].split(','))
        
        if all_hit_rules:
            from collections import Counter
            rule_counter = Counter(all_hit_rules)
            top_rules = rule_counter.most_common(10)
            
            print(f"\n最常命中的规则 (Top 10):")
            for rule_id, count in top_rules:
                percentage = (count / total_packages) * 100
                print(f"  {rule_id}: {count}次 ({percentage:.1f}%)")
    
    if failed_packages:
        print(f"\n失败的包:")
        for pkg_name, error in failed_packages[:10]:  # 只显示前10个
            print(f"  {pkg_name}: {error}")
        if len(failed_packages) > 10:
            print(f"  ... 还有 {len(failed_packages) - 10} 个失败的包")
    
    print("="*80)
def main():
    """主执行函数"""
    parser = argparse.ArgumentParser(
        description="NPM包恶意行为特征提取工具 - 支持单包和批量模式",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
示例用法:
  # 单包分析
  python feature_extractor.py /path/to/npm/package
  
  # 批量包级别分析
  python feature_extractor.py --batch E:/PDCG --output features.csv
  python feature_extractor.py --batch E:/PDCG --output features.csv --types malicious benign
        """
    )
    
    parser.add_argument(
        'package_path',
        nargs='?',
        help='单包模式: NPM包文件夹路径'
    )
    
    parser.add_argument(
        '--batch',
        help='批量模式: PDCG根目录路径 (如 E:/PDCG)'
    )
    
    parser.add_argument(
        '--output',
        help='批量模式: 输出CSV文件路径'
    )
    
    parser.add_argument(
        '--types',
        nargs='+',
        default=['malicious', 'benign'],
        choices=['malicious', 'benign'],
        help='批量模式: 要处理的数据类型'
    )
    
    parser.add_argument(
        '--rules',
        default=r'simple_sensitivity_analysis\malicious_rules.json',
        help='规则知识库JSON文件路径 (默认: malicious_rules.json)'
    )
    
    args = parser.parse_args()
    
    try:
        if args.batch:
            # 批量模式
            if not args.output:
                print("错误: 批量模式需要指定 --output 参数")
                return
                
            batch_analyze_packages(
                pdcg_root=args.batch,
                rules_file=args.rules,
                output_csv=args.output,
                data_types=args.types
            )
            
        elif args.package_path:
            # 单包模式
            print(f"加载规则文件: {args.rules}")
            with open(args.rules, 'r', encoding='utf-8') as f:
                rules_data = json.load(f)
            
            print(f"成功加载 {len(rules_data['rules'])} 条规则")
            print("-" * 60)
            
            print(f"开始分析包: {args.package_path}")
            feature_vector, hit_rules, graph_features = analyze_package_pdcg(args.package_path, rules_data)
            
            # 输出结果（单包模式）- 移除跨文件协作显示
            print("-" * 60)
            print("分析结果:")
            print(f"包路径: {args.package_path}")
            print(f"命中规则数量: {len(hit_rules)}")
            print(f"命中规则ID: {sorted(hit_rules) if hit_rules else '无'}")
            print(f"特征向量维度: {len(feature_vector)}")
            print(f"特征向量: {feature_vector}")
            
            # 显示图特征摘要（移除跨文件协作）
            print(f"\n图特征摘要:")
            print(f"  恶意节点数: {graph_features.get('malicious_node_count', 0)}")
            print(f"  恶意密度: {graph_features.get('malicious_density', 0):.3f}")
            print(f"  类别多样性: {graph_features.get('category_diversity', 0)}")
            print(f"  社区数量: {graph_features.get('num_malicious_communities', 0)}")
            
            if hit_rules:
                print("\n命中规则详细信息:")
                for rule in rules_data['rules']:
                    if rule['rule_id'] in hit_rules:
                        print(f"  {rule['rule_id']}: {rule['name']} ({rule['category']})")
        else:
            parser.print_help()
            
    except FileNotFoundError as e:
        print(f"错误: 文件不存在 - {e}")
    except json.JSONDecodeError as e:
        print(f"错误: JSON解析失败 - {e}")
    except Exception as e:
        print(f"错误: {e}")
        import traceback
        traceback.print_exc()

if __name__ == '__main__':
    main()

