import time
import traceback
import logging
import concurrent.futures  ### MODIFICATION ###: 从 ProcessPoolExecutor 改为 ThreadPoolExecutor
from collections import defaultdict
import xxhash
import networkx as nx

from config.log_config import get_logger
from matching.non_isomorphic_calculate import get_top_level_node_no_change_token, jaccard_similarity
from config.config import MatchMethod
from utils.calculate_utils import calculate_subtree_similarity, build_graph_from_feature
from entity.function_candidate import FunctionCandidate
logger = logging.getLogger(__name__)
# 同构子树的语义相似度阈值
TOKEN_SIMILARITY_THRESHOLD = 0.6


#  剪枝
def prune(g, dead_roots):
    """
    删除 dead_roots 和它们的所有后代，并统计节点数
    """
    # 统计总节点数
    total_nodes = g.number_of_nodes()

    # 找出要删除的节点
    to_remove = set()
    for v in dead_roots:
        if v in g:
            to_remove.add(v)
            to_remove.update(nx.descendants(g, v))

    # 统计同构节点数（删除的节点数）
    isomorphic_nodes = len(to_remove)
    # 统计异构节点数（保留的节点数）
    non_isomorphic_nodes = total_nodes - isomorphic_nodes

    # 计算比例
    iso_ratio = isomorphic_nodes / total_nodes if total_nodes > 0 else 0
    non_iso_ratio = non_isomorphic_nodes / total_nodes if total_nodes > 0 else 0

    # 剪枝操作
    gc = g.copy()
    gc.remove_nodes_from(to_remove)

    # 返回剪枝后的图和统计信息
    stats = {
        "total_nodes": total_nodes,
        "isomorphic_nodes": isomorphic_nodes,
        "non_isomorphic_nodes": non_isomorphic_nodes,
        "isomorphic_ratio": iso_ratio,
        "non_isomorphic_ratio": non_iso_ratio
    }

    return gc, stats


def node_signature(g, v):
    """
    用于判定"节点自身内容相同"的签名：这里只用 kind（类型）
    """
    return str(g.nodes[v]["kind"])


def find_hash_matched_candidates(g1, g2):
    """基于哈希值找到潜在的同构子树根节点对"""
    # 将节点按哈希值分组
    hash_to_nodes1 = defaultdict(list)
    hash_to_nodes2 = defaultdict(list)

    for v in g1.nodes:
        hash_to_nodes1[g1.nodes[v]["subtree_hash"]].append(v)

    for v in g2.nodes:
        hash_to_nodes2[g2.nodes[v]["subtree_hash"]].append(v)

    # 找出两个图中共有的哈希值
    common_hashes = set(hash_to_nodes1.keys()) & set(hash_to_nodes2.keys())

    # 估计每个哈希值对应子树的大小
    hash_to_size = {}
    for h in common_hashes:
        # 选择第一个图中的一个节点
        v = hash_to_nodes1[h][0]
        # 计算子树大小（节点数量）
        subtree_nodes = {v}.union(nx.descendants(g1, v))
        hash_to_size[h] = len(subtree_nodes)

    # 按子树大小从大到小排序哈希值
    sorted_hashes = sorted(common_hashes, key=lambda h: hash_to_size[h], reverse=True)

    # 生成候选对
    candidate_pairs = []
    for h in sorted_hashes:
        for v1 in hash_to_nodes1[h]:
            for v2 in hash_to_nodes2[h]:
                candidate_pairs.append((v1, v2, hash_to_size[h]))

    # 按子树大小排序
    candidate_pairs.sort(key=lambda x: x[2], reverse=True)

    return candidate_pairs, sorted_hashes


def greedy_select_optimal_pairs(G1, G2, valid_pairs, early_stop_ratio=0.95):
    """贪心选择最优的不重叠子树组合"""
    # 按子树大小和相似度排序
    sorted_pairs = sorted(valid_pairs, key=lambda x: (x[2], x[3]), reverse=True)

    selected_pairs = []
    used_nodes1 = set()
    used_nodes2 = set()

    # 贪心选择不重叠的子树
    for v1, v2, size, similarity, mapping in sorted_pairs:
        # 提取子树节点
        subtree1_nodes = {v1}.union(nx.descendants(G1, v1))
        subtree2_nodes = {v2}.union(nx.descendants(G2, v2))

        # 检查是否与已选子树重叠
        if not (subtree1_nodes & used_nodes1) and not (subtree2_nodes & used_nodes2):
            selected_pairs.append((v1, v2, size, similarity, mapping))
            used_nodes1.update(subtree1_nodes)
            used_nodes2.update(subtree2_nodes)

            # 早期终止检查：如果已覆盖足够多的节点，可以提前结束
            g1_coverage = len(used_nodes1) / len(G1) if len(G1) > 0 else 0
            g2_coverage = len(used_nodes2) / len(G2) if len(G2) > 0 else 0

            if g1_coverage >= early_stop_ratio or g2_coverage >= early_stop_ratio:
                break

    return selected_pairs, used_nodes1, used_nodes2


def find_max_common_subtrees_candidates(g1, g2):
    """
    基于 Tree Hash 找出所有潜在的同构子树根节点对。
    这里假设 build_merkle_hash 已经按拓扑序正确计算了哈希。
    """
    # 收集所有节点的 (hash, size) 键
    key_to_nodes = defaultdict(list)

    for g in [g1, g2]:
        for v in g.nodes:
            h = g.nodes[v]["subtree_hash"]
            size = g.nodes[v]["subtree_size"]
            # 将哈希和大小组合作为唯一键
            key = (h, size)
            key_to_nodes[key].append((g, v))

    # 只保留那些在两个图中都出现的键
    candidate_pairs = []
    for key, nodes in key_to_nodes.items():
        graph1_nodes = [v for g, v in nodes if g is g1]
        graph2_nodes = [v for g, v in nodes if g is g2]
        if graph1_nodes and graph2_nodes:  # 两个图中都有
            size = key[1]
            # 生成所有可能的配对
            for v1 in graph1_nodes:
                for v2 in graph2_nodes:
                    mapping = construct_trivial_mapping(g1, g2, v1, v2)
                    similarity = calculate_subtree_similarity(g1, g2, mapping)
                    candidate_pairs.append((v1, v2, size, similarity, mapping))

    # 按大小和相似度排序，便于后续贪心选择
    candidate_pairs.sort(key=lambda x: (x[2], x[3]), reverse=True)
    return candidate_pairs


def construct_trivial_mapping(g1, g2, root1, root2):
    """
    因为两棵子树结构相同 (由 hash 保证)，所以可以同步遍历来构建映射。
    """
    mapping = {}
    queue1 = [root1]
    queue2 = [root2]

    while queue1 and queue2:
        n1 = queue1.pop(0)
        n2 = queue2.pop(0)
        mapping[n1] = n2

        children1 = sorted(g1.successors(n1), key=lambda x: str(x))
        children2 = sorted(g2.successors(n2), key=lambda x: str(x))

        queue1.extend(children1)
        queue2.extend(children2)
    return mapping


def hybrid_max_isomorphic_subtree(g1, g2, similarity_threshold=0.5, parallel=True, early_stop_ratio=0.95):
    """
    使用混合方法寻找两个图之间的最大同构子图
    """
    start_time = time.time()
    build_merkle_hash(g2)
    valid_pairs = find_max_common_subtrees_candidates(g1, g2)
    valid_pairs = [p for p in valid_pairs if p[3] >= similarity_threshold]
    verification_time = time.time()
    verification_stats = {
        "verification_time": verification_time - start_time,
        "valid_pairs": len(valid_pairs)
    }
    selected_pairs, same1, same2 = greedy_select_optimal_pairs(g1, g2, valid_pairs)
    selection_time = time.time()
    selection_stats = {
        "selection_time": selection_time - verification_time,
        "selected_pairs": len(selected_pairs),
        "isomorphic_nodes_G1": len(same1),
        "isomorphic_nodes_G2": len(same2),
        "coverage_ratio_G1": len(same1) / len(g1) if len(g1) > 0 else 0,
        "coverage_ratio_G2": len(same2) / len(g2) if len(g2) > 0 else 0,
    }
    stats = {
        "total_time": selection_time - start_time,
        "G1_nodes": len(g1),
        "G2_nodes": len(g2),
        "selection_stats": selection_stats,
    }
    return same1, same2, stats


def build_merkle_hash(g):
    """自底向上计算每个节点的子树哈希和大小"""
    for v in reversed(list(nx.topological_sort(g))):
        child_hashes = []
        total_subtree_size = 1
        for child in g.successors(v):
            child_hashes.append(g.nodes[child]["subtree_hash"])
            total_subtree_size += g.nodes[child].get("subtree_size", 0)
        child_hashes.sort()
        content = node_signature(g, v) + "|" + "|".join(child_hashes)
        g.nodes[v]["subtree_hash"] = xxhash.xxh64(content, seed=0).hexdigest()
        g.nodes[v]["subtree_size"] = total_subtree_size


### MODIFICATION ###: 函数名和参数已更改
def _process_single_candidate_for_threading(args):
    """
    在单独线程中处理单个 NPM 候选函数以计算同构比例。
    """
    # 解包参数
    # --- MODIFICATION 1: 参数变为 (candidate, g1, ...) ---
    (candidate, g1, target_node_count, first_stage_sim_key_str,
     same_ast_similarity_threshold) = args

    candidate_id = candidate.candidate_id
    candidate_doc = candidate.candidate_doc

    try:
        candidate_node_count = candidate_doc['ast_feature'].get('nodeCount', 0)
        if candidate_node_count > 10 * target_node_count or target_node_count > 10 * candidate_node_count:
            return None

        # --- MODIFICATION 2: 不再需要反序列化 g1 ---
        # g1 对象直接从主线程传递过来，可读但要注意线程安全
        # NetworkX 对象本身不是线程安全的，但在这里我们是只读的，所以安全
        g1_thread_copy = g1

        g2 = build_graph_from_feature(candidate_doc['ast_feature'])

        # 为 G1 和 G2 计算 Merkle 哈希
        # --- MODIFICATION 3: 线程内修改对象 ---
        # 每个线程都会修改 g1_thread_copy 的节点属性，这可能会造成竞争条件
        # 最安全的方式是让每个线程操作自己的图副本
        g1_thread_copy = g1.copy()  # 在线程内创建副本，100%安全
        build_merkle_hash(g1_thread_copy)
        build_merkle_hash(g2)

        # --- 同构计算 ---
        same1, same2, stats = hybrid_max_isomorphic_subtree(
            g1_thread_copy, g2,  # 使用副本
            similarity_threshold=TOKEN_SIMILARITY_THRESHOLD
        )
        p1, stats1 = prune(g1_thread_copy, same1)  # 使用副本
        p2, stats2 = prune(g2, same2)
        isomorphic_ratio = (stats1["isomorphic_ratio"] + stats2["isomorphic_ratio"]) / 2

        # --- 异构计算 ---
        arkts_top_level_node_nochangeToken = get_top_level_node_no_change_token(p1)
        npm_top_level_node_nochangeToken = get_top_level_node_no_change_token(p2)

        diff_similarity = jaccard_similarity(
            set(arkts_top_level_node_nochangeToken),
            set(npm_top_level_node_nochangeToken)
        )

        distance = abs(same_ast_similarity_threshold - isomorphic_ratio) + abs(1 - diff_similarity)
        candidate_name = candidate_doc.get('name', '')
        # --- MODIFICATION 4: 返回字典而不是修改对象 ---

        match_candidate = FunctionCandidate.from_candidate_with_results(
            original_candidate=candidate,  # 传入原始对象
            candidate_name=candidate_name,
            isomorphic_ratio=isomorphic_ratio,
            diff_similarity=diff_similarity,
            distance=distance,
            match_method=MatchMethod.HYBRID.value
        )
        return match_candidate

    except Exception as e:
        logging.error(f"[线程] 处理 candidate_id={candidate_id} 时出错: {str(e)}")
        logging.error(traceback.format_exc())
        return None


### MODIFICATION ###: 函数名和内部逻辑已更改
def isomorphic_ratio_stage(g1, target_node_count, npm_candidates, has_vector_or_hash, first_stage_method,
                                     same_ast_similarity_threshold):
    """
    第二阶段：计算同构比例 (多线程版本)
    """
    first_stage_sim_key_str = first_stage_method
    isomorphic_candidates = []

    if has_vector_or_hash and npm_candidates:
        logger.debug(f"使用{first_stage_sim_key_str}筛选后的 {len(npm_candidates)} 个候选函数计算同构比例（多线程版）")
        start_time = time.time()

        # --- MODIFICATION 5: 移除哈希计算和序列化 ---
        # 在多线程中，主线程和子线程共享内存，无需序列化。
        # g1 的哈希计算可以在每个工作线程内部进行，这样可以避免潜在的线程不安全问题。
        # G1_serialized = nx.node_link_data(g1, edges="links") # 不再需要

        # --- MODIFICATION 6: 准备传递给工作线程的参数列表 ---
        # 注意：现在直接传递 g1 对象
        tasks = [
            (candidate, g1, target_node_count, first_stage_sim_key_str, same_ast_similarity_threshold)
            for candidate in npm_candidates
        ]

        # --- MODIFICATION 7: 使用 ThreadPoolExecutor ---
        # num_workers 可以设置得更高，因为线程开销比进程小很多
        # 但由于 GIL，对于CPU密集型任务，过高的线程数不会提升性能
        num_workers = min(5, (len(npm_candidates) or 1) * 2)
        with concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) as executor:
            future_to_task = {executor.submit(_process_single_candidate_for_threading, task): task for task in tasks}

            max_ratio_so_far = 0.0
            for future in concurrent.futures.as_completed(future_to_task):
                try:
                    result = future.result()
                    if result is not None:
                        isomorphic_candidates.append(result)
                        # --- -MODIFICATION 8: 访问字典键而不是对象属性 --
                        max_ratio_so_far = max(max_ratio_so_far, result.isomorphic_ratio)
                except Exception as e:
                    # 这里的异常是线程本身崩溃等严重错误
                    # 记录任务信息以便调试
                    original_candidate_id = future_to_task[future][0].candidate_id
                    logging.error(f"主线程：处理候选 {original_candidate_id} 的线程发生通信或严重错误: {e}")

        logger.debug(
            f"同构和异构比例计算完成，与{len(npm_candidates)}候选npm函数完成比较，耗时: {time.time() - start_time:.2f}秒，最优比例为：{max_ratio_so_far}")

    return isomorphic_candidates

