import pandas as pd
import pickle
import os
import argparse
from collections import defaultdict
import networkx as nx

def get_file_from_node(node_name):
    """从节点名称中提取文件路径"""
    if ':' in node_name:
        return node_name.split(':')[0]
    return node_name

def build_import_map(graph):
    """
    构建导入映射表：每个文件导入了哪些其他文件
    返回: dict {source_file: set(imported_files)}
    """
    import_map = defaultdict(set)

    for source, target, data in graph.edges(data=True):
        if data.get('type') == 'imports':
            # source是导入者文件，target是被导入的文件
            import_map[source].add(target)
            print(f"Import: {source} -> {target}")

    return import_map

def get_all_accessible_nodes(file_node, graph, import_map):
    """
    获取从某个文件可以访问的所有节点
    包括：
    1. 同文件内的所有节点
    2. 导入文件中的所有节点
    """
    accessible_nodes = set()

    # 1. 添加同文件内的所有节点
    for node in graph.nodes():
        if get_file_from_node(node) == file_node:
            accessible_nodes.add(node)

    # 2. 添加导入文件中的所有节点
    imported_files = import_map.get(file_node, set())
    for imported_file in imported_files:
        for node in graph.nodes():
            if get_file_from_node(node) == imported_file:
                accessible_nodes.add(node)

    return accessible_nodes

def validate_invocation(source_node, target_node, graph, import_map):
    """
    验证一个调用是否有效
    规则：target必须在source的可访问范围内
    """
    source_file = get_file_from_node(source_node)
    target_file = get_file_from_node(target_node)

    # 情况1：同文件内的调用总是有效的
    if source_file == target_file:
        return True

    # 情况2：检查target_file是否被source_file导入
    imported_files = import_map.get(source_file, set())
    if target_file in imported_files:
        return True

    return False

def post_process_graph(graph_path, output_dir):
    """
    后处理图数据，过滤无效的invoke边
    """
    print(f"Loading graph from {graph_path}")

    # 加载图
    with open(graph_path, 'rb') as f:
        graph = pickle.load(f)

    print(f"Original graph: {len(graph.nodes())} nodes, {len(graph.edges())} edges")

    # 构建导入映射
    print("\nBuilding import map...")
    import_map = build_import_map(graph)
    print(f"Found {len(import_map)} files with imports")

    # 统计原始边
    edge_stats = defaultdict(int)
    for _, _, data in graph.edges(data=True):
        edge_stats[data.get('type', 'unknown')] += 1

    print(f"\nOriginal edge statistics:")
    for edge_type, count in edge_stats.items():
        print(f"  {edge_type}: {count}")

    # 创建新图
    new_graph = nx.MultiDiGraph()

    # 复制所有节点
    for node, attrs in graph.nodes(data=True):
        new_graph.add_node(node, **attrs)

    # 处理边
    valid_invokes = 0
    invalid_invokes = 0
    invalid_invocation_details = []

    for source, target, key, attrs in graph.edges(keys=True, data=True):
        edge_type = attrs.get('type')

        if edge_type == 'invokes':
            # 验证invoke边
            if validate_invocation(source, target, graph, import_map):
                new_graph.add_edge(source, target, key=key, **attrs)
                valid_invokes += 1
            else:
                invalid_invokes += 1
                source_file = get_file_from_node(source)
                target_file = get_file_from_node(target)
                invalid_invocation_details.append({
                    'source': source,
                    'target': target,
                    'source_file': source_file,
                    'target_file': target_file,
                    'reason': f"File {target_file} not imported by {source_file}"
                })
                if invalid_invokes <= 10:  # 只打印前10个无效调用
                    print(f"  Invalid invoke: {source} -> {target}")
                    print(f"    Reason: {target_file} not imported by {source_file}")
        else:
            # 其他类型的边直接复制
            new_graph.add_edge(source, target, key=key, **attrs)

    # 统计新图的边
    new_edge_stats = defaultdict(int)
    for _, _, data in new_graph.edges(data=True):
        new_edge_stats[data.get('type', 'unknown')] += 1

    print(f"\nProcessing results:")
    print(f"  Valid invocations: {valid_invokes}")
    print(f"  Invalid invocations removed: {invalid_invokes}")

    print(f"\nNew edge statistics:")
    for edge_type, count in new_edge_stats.items():
        print(f"  {edge_type}: {count}")

    # 保存新图
    os.makedirs(output_dir, exist_ok=True)

    # 保存pickle格式
    output_pickle = os.path.join(output_dir, 'graph_cleaned.pkl')
    with open(output_pickle, 'wb') as f:
        pickle.dump(new_graph, f)
    print(f"\nCleaned graph saved to {output_pickle}")

    # 保存节点表
    nodes_data = []
    for node, attributes in new_graph.nodes(data=True):
        node_info = {
            'node_id': node,
            'type': attributes.get('type', ''),
            'file': get_file_from_node(node),
            'start_line': attributes.get('start_line', ''),
            'end_line': attributes.get('end_line', ''),
            'code_length': len(attributes.get('code', '')) if attributes.get('code') else 0,
        }
        nodes_data.append(node_info)

    nodes_df = pd.DataFrame(nodes_data)
    nodes_excel_path = os.path.join(output_dir, 'nodes_cleaned.xlsx')
    nodes_df.to_excel(nodes_excel_path, index=False)
    print(f"Nodes table saved to {nodes_excel_path}")

    # 保存边表
    edges_data = []
    for u, v, key, attributes in new_graph.edges(keys=True, data=True):
        edge_info = {
            'source': u,
            'target': v,
            'source_file': get_file_from_node(u),
            'target_file': get_file_from_node(v),
            'edge_key': key,
            'type': attributes.get('type', ''),
        }
        edges_data.append(edge_info)

    edges_df = pd.DataFrame(edges_data)
    edges_excel_path = os.path.join(output_dir, 'edges_cleaned.xlsx')
    edges_df.to_excel(edges_excel_path, index=False)
    print(f"Edges table saved to {edges_excel_path}")

    # 保存无效调用详情（用于分析）
    if invalid_invocation_details:
        invalid_df = pd.DataFrame(invalid_invocation_details)
        invalid_excel_path = os.path.join(output_dir, 'invalid_invocations.xlsx')
        invalid_df.to_excel(invalid_excel_path, index=False)
        print(f"Invalid invocations saved to {invalid_excel_path}")

    # 保存统计信息
    stats_data = {
        'Metric': ['Total Nodes', 'Total Edges', 'Valid Invocations', 'Removed Invalid Invocations']
        + [f'Edges - {etype}' for etype in new_edge_stats.keys()],
        'Count': [
            len(new_graph.nodes()),
            len(new_graph.edges()),
            valid_invokes,
            invalid_invokes
        ] + [new_edge_stats[etype] for etype in new_edge_stats.keys()]
    }

    stats_df = pd.DataFrame(stats_data)
    stats_excel_path = os.path.join(output_dir, 'graph_statistics_cleaned.xlsx')
    stats_df.to_excel(stats_excel_path, index=False)
    print(f"Statistics saved to {stats_excel_path}")

    return new_graph

def main():
    parser = argparse.ArgumentParser(description='Post-process Java dependency graph to remove invalid invocations')
    parser.add_argument('--input', type=str, required=True, help='Path to input pickle file')
    parser.add_argument('--output-dir', type=str, required=True, help='Output directory for cleaned data')
    args = parser.parse_args()

    post_process_graph(args.input, args.output_dir)

if __name__ == "__main__":
    # 默认处理之前生成的图
    import sys
    if len(sys.argv) == 1:
        # 如果没有提供参数，使用默认值
        input_path = "index_data_v2_4/pp_data/graph_index_v2.3/pingp-auto-test.pkl"
        output_dir = "index_data_cleaned/pingp-auto-test"

        if os.path.exists(input_path):
            print(f"Processing default file: {input_path}")
            post_process_graph(input_path, output_dir)
        else:
            print(f"Default file not found: {input_path}")
            print("Please provide input file path using --input parameter")
    else:
        main()