from collections import deque
import re
from typing import Iterable, List, Optional, Tuple
from chernc.parser.rust_parser import check_grammar
from chernc.treediff.gumtree.gen.generators.treesitter_tree_generator import TreeSitterTreeGenerator
from chernc.treediff.gumtree.matchers.configuration_options import ConfigurationOptions
from chernc.treediff.gumtree.matchers.gumtree_properties import GumtreeProperties
from chernc.treediff.gumtree.matchers.heuristic.gt.greedy_bottom_up_matcher import GreedyBottomUpMatcher
from chernc.treediff.gumtree.matchers.heuristic.gt.greedy_subtree_matcher import GreedySubtreeMatcher
from chernc.treediff.gumtree.matchers.heuristic.gt.hybrid_bottom_up_matcher import HybridBottomUpMatcher
from chernc.treediff.gumtree.matchers.mapping import Mapping
from chernc.treediff.gumtree.matchers.mapping_store import MappingStore
from chernc.treediff.gumtree.matchers.optimal.zs.string_metrics import StringMetrics
from chernc.treediff.gumtree.matchers.similarity_metrics import SimilarityMetrics
from chernc.treediff.gumtree.tree.tree import TreeLike


def byte_to_char(text_bytes: bytes, byte_pos: int) -> int:
    """
    将字节位置转换为字符位置。

    :param text_bytes: 字符串的bytes表示
    :type text_bytes: bytes
    :param byte_pos: 字节位置
    :type byte_pos: int
    :return: 对应的字符位置
    :rtype: int
    """
    # 将bytes解码为字符串
    text = text_bytes.decode("utf-8")

    # 初始化字符计数器
    char_count = 0

    # 遍历字符串，计算字符位置
    for byte_index, char in enumerate(text):
        # 如果当前字节索引大于等于给定的字节位置，返回当前字符位置
        if byte_index >= byte_pos:
            return char_count

        # 增加字符计数器
        char_count += 1

    # 如果给定的字节位置超过了字符串的长度，返回最后一个字符位置
    return char_count


def get_line_num(text_bytes: bytes, byte_pos: int) -> int:
    return text_bytes[:byte_pos].count(b"\n")


def find_common_ancestor(nodes: List[TreeLike]) -> Optional[TreeLike]:
    """
    给定一组节点，返回它们的公共父节点。
    如果不存在公共父节点，则返回 None。
    """
    if len(nodes) == 0:
        return None
    # 获取所有节点的祖先路径
    ancestor_paths = []
    for node in nodes:
        ancestors = node.get_parents()  # 获取节点的所有父节点，包括自身
        ancestor_paths.append(set(ancestors))

    # 找到所有路径的交集
    common_ancestors = set.intersection(*ancestor_paths)

    # 返回深度最深的公共父节点（即最接近根节点的公共父节点）
    if common_ancestors:
        return max(common_ancestors, key=lambda x: len(x.get_parents()))
    return None


def is_descendant(node: TreeLike, target: TreeLike) -> bool:
    """
    判断 node 是否是 target 的后代。
    """
    if node == target:
        return True
    for child in node.get_children():
        if is_descendant(child, target):
            return True
    return False


def get_layer_nodes(nodes: List[TreeLike], minsize: int) -> List[List[TreeLike]]:
    """
    给定树的根节点 root 和一组节点 nodes，确保将它们聚合到每层的子节点中，
    每层的节点数不超过 minsize。
    """
    common_ancestor = find_common_ancestor(nodes)  # 找到公共父节点
    if not common_ancestor:
        raise ValueError("没有找到公共父节点")

    # 初始化队列，用于层次遍历
    queue = deque([common_ancestor])
    result = []
    current_layer = []

    while queue:
        node = queue.popleft()

        # 检查该节点是否在给定节点列表中
        if node in nodes:
            current_layer.append(node)

        # 如果当前层已经有足够的节点，添加到结果并清空当前层
        if len(current_layer) >= minsize:
            result.append(current_layer)
            current_layer = []

        # 将子节点加入队列继续遍历
        for child in node.get_children():
            queue.append(child)

    # 如果最后一层的节点未满，也要保存
    if current_layer:
        result.append(current_layer)

    return result


def match_tree_simple(
    source: str, destination: str, st_minprio=1, st_priocalc="height", bu_minsize=1000, bu_minsim=0.2
) -> Optional[Tuple[TreeLike, TreeLike, MappingStore[TreeLike]]]:
    src = TreeSitterTreeGenerator("rust").generate_tree(source)  # Retrieves and applies the default parser for the file
    dst = TreeSitterTreeGenerator("rust").generate_tree(destination)  # Retrieves and applies the default parser for the file

    src_root = src.get_root()
    dst_root = dst.get_root()
    if src_root is None or dst_root is None:
        return None

    topdown_matcher = GreedySubtreeMatcher()
    properties = GumtreeProperties()
    properties.put(ConfigurationOptions.st_minprio, st_minprio)
    properties.put(ConfigurationOptions.st_priocalc, st_priocalc)
    topdown_matcher.configure(properties)
    mappings = topdown_matcher.match_with_new_store(src_root, dst_root)  # Computes the mappings between the trees

    return src_root, dst_root, mappings


def match_tree(
    source: str, destination: str, st_minprio=1, st_priocalc="height", bu_minsize=1000, bu_minsim=0.2
) -> Optional[Tuple[TreeLike, TreeLike, MappingStore[TreeLike]]]:
    src = TreeSitterTreeGenerator("rust").generate_tree(source)  # Retrieves and applies the default parser for the file
    dst = TreeSitterTreeGenerator("rust").generate_tree(destination)  # Retrieves and applies the default parser for the file

    src_root = src.get_root()
    dst_root = dst.get_root()
    if src_root is None or dst_root is None:
        return None

    topdown_matcher = GreedySubtreeMatcher()
    properties = GumtreeProperties()
    properties.put(ConfigurationOptions.st_minprio, st_minprio)
    properties.put(ConfigurationOptions.st_priocalc, st_priocalc)
    topdown_matcher.configure(properties)
    mappings = topdown_matcher.match_with_new_store(src_root, dst_root)  # Computes the mappings between the trees

    # bottomup_matcher = GreedyBottomUpMatcher()
    # properties = GumtreeProperties()
    # properties.put(ConfigurationOptions.bu_minsize, bu_minsize)
    # properties.put(ConfigurationOptions.bu_minsim, bu_minsim)
    # bottomup_matcher.configure(properties)
    # mappings = bottomup_matcher.match(src_root, dst_root, mappings)  # Computes the mappings between the trees

    bottomup_matcher = HybridBottomUpMatcher()
    properties = GumtreeProperties()
    properties.put(ConfigurationOptions.bu_minsize, bu_minsize)
    properties.put(ConfigurationOptions.bu_minsim, bu_minsim)
    bottomup_matcher.configure(properties)
    mappings = bottomup_matcher.match(src_root, dst_root, mappings)  # Computes the mappings between the trees

    return src_root, dst_root, mappings


def _get_text_range(nodes: List[TreeLike]) -> Tuple[int, int]:
    max_start = None
    max_end = None

    # 遍历 lcas 列表来更新 max_start 和 max_end
    for lca in nodes:
        start, end = lca.get_pos(), lca.get_end_pos()

        # 如果 max_start 和 max_end 为 None，初始化它们
        if max_start is None or max_end is None:
            max_start, max_end = start, end
        else:
            # 更新 max_start 为 lcas 中最小的 start，更新 max_end 为 lcas 中最大的 end
            max_start = min(max_start, start)
            max_end = max(max_end, end)
    if max_start is None or max_end is None:
        max_start = 0
        max_end = 0
    return max_start, max_end


def match_tree_range(
    source: str, destination: str, st_minprio=1, st_priocalc="height", bu_minsize=30, bu_minsim=0.2
) -> Optional[Tuple[int, int]]:
    """
    匹配两个源代码文件的树结构，并返回匹配的范围。

    :param source: 源代码文件内容
    :type source: str
    :param destination: 目标代码文件内容
    :type destination: str
    :return: 匹配的范围，开始字符位置，结束字符位置，如果无法生成树结构则返回None
    :rtype: Optional[Tuple[int, int]]
    """
    ret = match_tree(
        source=source,
        destination=destination,
        st_minprio=st_minprio,
        st_priocalc=st_priocalc,
        bu_minsize=bu_minsize,
        bu_minsim=bu_minsim,
    )
    if ret is None:
        return None
    src_root, dst_root, mappings = ret

    text_bytes = source.encode("utf-8")
    text_bytes_lines = text_bytes.split(b"\n")

    nodes: List[Mapping[TreeLike]] = []
    for map_tuple in mappings:
        p = map_tuple.src
        # 模糊匹配可能把树的根视作匹配成功，这里我们禁止，防止一直匹配到整个文本
        if p.is_root():
            continue
        if map_tuple not in nodes and map_tuple is not None:
            nodes.append(map_tuple)
        # print(map_tuple, p.get_pos(), p.get_end_pos())

    lca_results: List[List[TreeLike]] = []
    if len(nodes) == 0:
        lca_results = [[src_root]]
    else:
        lca_results = get_layer_nodes([p.src for p in nodes], minsize=len(dst_root.get_children()) + 10)
    if len(lca_results) == 0:
        return None

    def tokenizer(s: str):
        s = s.replace("_", " ").lower()
        return s.split()

    three_gram_dist = StringMetrics.q_grams_distance(tokenizer=tokenizer)
    sim_max = None
    max_lca_i = None
    for i, lcas in enumerate(lca_results):
        ms, me = _get_text_range(lcas)
        code_lcas = text_bytes[ms:me].decode("utf-8")
        code_dst = destination.encode("utf-8")[dst_root.get_pos() : dst_root.get_end_pos()].decode("utf-8")
        if not check_grammar(code_dst) and check_grammar(code_lcas):
            continue
        sim = 1.0 - three_gram_dist(code_lcas, code_dst)
        if sim_max is None:
            sim_max = sim
            max_lca_i = i
        elif sim > sim_max:
            sim_max = sim
            max_lca_i = i

        # print(lcas, lcas, sim)
        # print("============")

    if max_lca_i is None:
        return None

    lcas = lca_results[max_lca_i]
    max_start, max_end = _get_text_range(lcas)
    char_start = byte_to_char(text_bytes, max_start)
    char_end = byte_to_char(text_bytes, max_end)
    return (char_start, char_end)


def compute_tree_similarity(
    source: str, destination: str, st_minprio=1, st_priocalc="height", bu_minsize=30, bu_minsim=0.2
) -> Optional[float]:
    ret = match_tree_simple(
        source=source,
        destination=destination,
        st_minprio=st_minprio,
        st_priocalc=st_priocalc,
        bu_minsize=bu_minsize,
        bu_minsim=bu_minsim,
    )
    if ret is None:
        return None
    src_root, dst_root, mappings = ret
    sim = SimilarityMetrics.jaccard_similarity(src_root, dst_root, mappings)
    return sim


def index_matched_blocks_by_tree(blocksA: Iterable[str], blocksB: Iterable[str], threshold: float=0.3) -> List[Tuple[int, int]]:
    ratios: List[Tuple[int, str, int, str, float]] = []
    for a_i, a in enumerate(blocksA):
        for b_i, b in enumerate(blocksB):
            sim = compute_tree_similarity(a, b)
            
            if sim is None:
                sim = 0.0
            # if a.count("\n") < 5 or b.count("\n") < 5:
            #     a_m = re.search(r"fn ([a-zA-Z][a-zA-Z_0-9]*?)\(", a)
            #     b_m = re.search(r"fn ([a-zA-Z][a-zA-Z_0-9]*?)\(", b)
            #     if a_m is not None and b_m is not None:
            #         if a_m.group(1) == b_m.group(1):
            #             sim += 0.3
            if sim > 1.0:
                sim = 1.0
            if sim < 0.0:
                sim = 0.0
            ratios.append((a_i, a, b_i, b, sim))
    matched_A = set()
    matched_B = set()
    match: List[Tuple[int, int]] = []
    for a_i, a, b_i, b, ratio in sorted(ratios, key=lambda x: x[-1], reverse=True):
        if a in matched_A or b in matched_B:
            continue
        if ratio > threshold:
            matched_A.add(a)
            matched_B.add(b)
            match.append((a_i, b_i))
    return match
