import time
import traceback
import logging
import concurrent.futures
from collections import defaultdict
import xxhash
import networkx as nx

from config.log_config import get_logger
from matching.non_isomorphic_calculate import get_top_level_node_no_change_token, jaccard_similarity
from config.config import MatchMethod
from utils.calculate_utils import calculate_subtree_similarity, build_graph_from_feature
from entity.function_candidate import FunctionCandidate
logger = get_logger()
# 同构子树的语义相似度阈值
TOKEN_SIMILARITY_THRESHOLD = 0.6

#这个版本的同构算法使用了并行进程，现在使用的是v2的多线程，并行最大只能5，再多就容易出现MongoDB的OOM
GLOBAL_G1 = None


#  剪枝
def prune(g, dead_roots):
    """
    删除 dead_roots 和它们的所有后代，并统计节点数
    """
    # 统计总节点数
    total_nodes = g.number_of_nodes()

    # 找出要删除的节点
    to_remove = set()
    for v in dead_roots:
        if v in g:
            to_remove.add(v)
            to_remove.update(nx.descendants(g, v))

    # 统计同构节点数（删除的节点数）
    isomorphic_nodes = len(to_remove)
    # 统计异构节点数（保留的节点数）
    non_isomorphic_nodes = total_nodes - isomorphic_nodes

    # 计算比例
    iso_ratio = isomorphic_nodes / total_nodes if total_nodes > 0 else 0
    non_iso_ratio = non_isomorphic_nodes / total_nodes if total_nodes > 0 else 0

    # 剪枝操作
    gc = g.copy()
    gc.remove_nodes_from(to_remove)

    # 返回剪枝后的图和统计信息
    stats = {
        "total_nodes": total_nodes,
        "isomorphic_nodes": isomorphic_nodes,
        "non_isomorphic_nodes": non_isomorphic_nodes,
        "isomorphic_ratio": iso_ratio,
        "non_isomorphic_ratio": non_iso_ratio
    }

    return gc, stats


def greedy_select_optimal_pairs(G1, G2, valid_pairs, early_stop_ratio=0.95):
    """贪心选择最优的不重叠子树组合"""
    # 按子树大小和相似度排序
    sorted_pairs = sorted(valid_pairs, key=lambda x: (x[2], x[3]), reverse=True)

    selected_pairs = []
    used_nodes1 = set()
    used_nodes2 = set()

    # 贪心选择不重叠的子树
    for v1, v2, size, similarity, mapping in sorted_pairs:
        # 提取子树节点
        subtree1_nodes = {v1}.union(nx.descendants(G1, v1))
        subtree2_nodes = {v2}.union(nx.descendants(G2, v2))

        # 检查是否与已选子树重叠
        if not (subtree1_nodes & used_nodes1) and not (subtree2_nodes & used_nodes2):
            selected_pairs.append((v1, v2, size, similarity, mapping))
            used_nodes1.update(subtree1_nodes)
            used_nodes2.update(subtree2_nodes)

            # 早期终止检查：如果已覆盖足够多的节点，可以提前结束
            g1_coverage = len(used_nodes1) / len(G1) if len(G1) > 0 else 0
            g2_coverage = len(used_nodes2) / len(G2) if len(G2) > 0 else 0

            if g1_coverage >= early_stop_ratio or g2_coverage >= early_stop_ratio:
                break

    return selected_pairs, used_nodes1, used_nodes2


def construct_trivial_mapping(g1, g2, root1, root2):
    """
    因为两棵子树结构相同 (由 hash 保证)，所以可以同步遍历来构建映射。
    """
    mapping = {}
    queue1 = [root1]
    queue2 = [root2]

    while queue1 and queue2:
        n1 = queue1.pop(0)
        n2 = queue2.pop(0)
        mapping[n1] = n2

        children1 = sorted(g1.successors(n1), key=lambda x: str(x))
        children2 = sorted(g2.successors(n2), key=lambda x: str(x))

        queue1.extend(children1)
        queue2.extend(children2)
    return mapping


def node_signature(g, v):
    """
    用于判定"节点自身内容相同"的签名：这里只用 kind（类型）
    """
    return str(g.nodes[v]["kind"])


def find_max_common_subtrees_candidates(g1, g2):
    """
    基于 Tree Hash 找出所有潜在的同构子树根节点对。
    这里假设 build_merkle_hash 已经按拓扑序正确计算了哈希。
    """
    # 收集所有节点的 (hash, size) 键
    key_to_nodes = defaultdict(list)

    for g in [g1, g2]:
        for v in g.nodes:
            h = g.nodes[v]["subtree_hash"]
            size = g.nodes[v]["subtree_size"]
            # 将哈希和大小组合作为唯一键
            key = (h, size)
            key_to_nodes[key].append((g, v))

    # 只保留那些在两个图中都出现的键
    candidate_pairs = []
    for key, nodes in key_to_nodes.items():
        graph1_nodes = [v for g, v in nodes if g is g1]
        graph2_nodes = [v for g, v in nodes if g is g2]
        if graph1_nodes and graph2_nodes:  # 两个图中都有
            size = key[1]
            # 生成所有可能的配对
            for v1 in graph1_nodes:
                for v2 in graph2_nodes:
                    mapping = construct_trivial_mapping(g1, g2, v1, v2)
                    similarity = calculate_subtree_similarity(g1, g2, mapping)
                    candidate_pairs.append((v1, v2, size, similarity, mapping))

    # 按大小和相似度排序，便于后续贪心选择
    candidate_pairs.sort(key=lambda x: (x[2], x[3]), reverse=True)
    return candidate_pairs


def hybrid_max_isomorphic_subtree(g1, g2, similarity_threshold=0.5, parallel=True, early_stop_ratio=0.95):
    """
    使用混合方法寻找两个图之间的最大同构子图
    """
    start_time = time.time()
    build_merkle_hash(g2)
    valid_pairs = find_max_common_subtrees_candidates(g1, g2)
    valid_pairs = [p for p in valid_pairs if p[3] >= similarity_threshold]
    verification_time = time.time()
    verification_stats = {
        "verification_time": verification_time - start_time,
        "valid_pairs": len(valid_pairs)
    }
    selected_pairs, same1, same2 = greedy_select_optimal_pairs(g1, g2, valid_pairs)
    selection_time = time.time()
    selection_stats = {
        "selection_time": selection_time - verification_time,
        "selected_pairs": len(selected_pairs),
        "isomorphic_nodes_G1": len(same1),
        "isomorphic_nodes_G2": len(same2),
        "coverage_ratio_G1": len(same1) / len(g1) if len(g1) > 0 else 0,
        "coverage_ratio_G2": len(same2) / len(g2) if len(g2) > 0 else 0,
    }
    stats = {
        "total_time": selection_time - start_time,
        "G1_nodes": len(g1),
        "G2_nodes": len(g2),
        "selection_stats": selection_stats,
    }
    return same1, same2, stats


def worker_init(g1_serialized):
    """
    每个工作进程启动时执行的初始化函数。
    它只被调用一次，用于反序列化 G1 并存入全局变量。
    """
    global GLOBAL_G1
    # 在子进程中反序列化 G1
    GLOBAL_G1 = nx.node_link_graph(g1_serialized, edges="links")
    # 为 G1 计算哈希，每个子进程只计算一次
    build_merkle_hash(GLOBAL_G1)


def _process_single_candidate_for_isomorphic(args):
    """
    在单独进程中处理单个 NPM 候选函数。
    参数 args 不再包含 G1_serialized。
    """
    # 2. 解包参数，注意 g1_serialized 已经没有了
    (candidate, target_node_count, first_stage_sim_key_str,
     same_ast_similarity_threshold) = args
    candidate: FunctionCandidate
    candidate_id = candidate.candidate_id
    candidate_doc = candidate.candidate_doc

    try:
        # 3. 从全局变量获取 G1，而不是反序列化
        global GLOBAL_G1
        g1 = GLOBAL_G1

        # 获取NPM函数的节点数
        candidate_node_count = candidate_doc['ast_feature'].get('nodeCount', 0)
        if candidate_node_count > 10 * target_node_count or target_node_count > 10 * candidate_node_count:
            return None

        # 构建NPM函数的图 G2
        g2 = build_graph_from_feature(candidate_doc['ast_feature'])

        # 为 G2 计算 Merkle 哈希
        build_merkle_hash(g2)

        # --- 同构计算 ---
        same1, same2, stats = hybrid_max_isomorphic_subtree(
            g1, g2,
            similarity_threshold=TOKEN_SIMILARITY_THRESHOLD
        )
        # 剪枝
        p1, stats1 = prune(g1, same1)
        p2, stats2 = prune(g2, same2)
        isomorphic_ratio = (stats1["isomorphic_ratio"] + stats2["isomorphic_ratio"]) / 2

        # --- 异构计算 ---
        arkts_top_level_node_nochangeToken = get_top_level_node_no_change_token(p1)
        npm_top_level_node_nochangeToken = get_top_level_node_no_change_token(p2)
        diff_similarity = jaccard_similarity(
            set(arkts_top_level_node_nochangeToken),
            set(npm_top_level_node_nochangeToken)
        )
        distance = abs(same_ast_similarity_threshold - isomorphic_ratio) + abs(1 - diff_similarity)

        candidate.candidate_name = candidate_doc.get('name', '')
        candidate.isomorphic_ratio = isomorphic_ratio
        candidate.diff_similarity = diff_similarity
        candidate.distance = distance
        candidate.match_method = MatchMethod.HYBRID.value

        return candidate
    except Exception as e:
        logging.error(f"[子进程] 处理 candidate_id={candidate_id} 时出错: {str(e)}")
        logging.error(traceback.format_exc())
        return None


def isomorphic_ratio_stage(g1, target_node_count, npm_candidates, has_vector_or_hash, first_stage_method,
                           same_ast_similarity_threshold):
    """
    第二阶段：计算同构比例 (并行化版本 - 内存优化，合并异构计算)
    """
    first_stage_sim_key_str = first_stage_method
    isomorphic_candidates = []

    if has_vector_or_hash and npm_candidates:
        logging.debug(f"使用{first_stage_sim_key_str}筛选后的 {len(npm_candidates)} 个候选函数计算同构比例")


        # 4. 在主进程中为 G1 计算一次哈希 (可选，但为了数据一致性推荐)
        build_merkle_hash(g1)

        # 序列化 G1 以便传递给子进程的初始化函数
        g1_serialized = nx.node_link_data(g1, edges="links")

        # 5. 准备任务参数，不再包含 G1_serialized
        tasks = [
            (candidate, target_node_count, first_stage_sim_key_str, same_ast_similarity_threshold)
            for candidate in npm_candidates
        ]

        num_workers = 5 # 或者根据你的CPU核心数调整

        with concurrent.futures.ProcessPoolExecutor(
                max_workers=num_workers,
                initializer=worker_init,
                initargs=(g1_serialized,)  # 将序列化的 G1 传递给初始化函数
        ) as executor:
            future_to_task = {executor.submit(_process_single_candidate_for_isomorphic, task): task for task in tasks}
            max_ratio_so_far = 0.0
            processed_count = 0
            for future in concurrent.futures.as_completed(future_to_task):
                try:
                    result = future.result()
                    if result is not None:
                        isomorphic_candidates.append(result)
                        max_ratio_so_far = max(max_ratio_so_far, result.isomorphic_ratio)
                except Exception as e:
                    logging.error(f"获取任务结果时发生异常: {e}")
                    pass
                finally:
                    processed_count += 1
    return isomorphic_candidates


def build_merkle_hash(g):
    """自底向上计算每个节点的子树哈希和大小"""
    for v in reversed(list(nx.topological_sort(g))):
        child_hashes = []
        total_subtree_size = 1
        for child in g.successors(v):
            child_hashes.append(g.nodes[child]["subtree_hash"])
            total_subtree_size += g.nodes[child].get("subtree_size", 0)
        child_hashes.sort()
        content = node_signature(g, v) + "|" + "|".join(child_hashes)
        g.nodes[v]["subtree_hash"] = xxhash.xxh64(content, seed=0).hexdigest()
        g.nodes[v]["subtree_size"] = total_subtree_size

