import json
import re
from typing import List

import numpy as np
from networkx import Graph
import faiss

from docx_text_processor import Paragraph
import textdistance
import heapq
from queue import PriorityQueue
from graph_node_embedding import train_model, visualize_embeddings
from agent import Agents
import tools
import pickle
from langchain.callbacks import StdOutCallbackHandler, FileCallbackHandler
import networkx as nx
from torch_geometric.utils import from_networkx

from qwen3_text_emb import Qwen3TextEmbedding

with open("callback_log.txt", "w") as f:
    file_callback = FileCallbackHandler(f)


def read_data():
    # 读取 注释文本 文件
    with open('法華經義疏_candidates.json', 'r', encoding='utf-8') as f:
        books_data = json.load(f)

    paragraphs = {}
    id = 0
    for chapter_name in books_data:
        paragraphs[chapter_name] = []
        for paragraph in books_data[chapter_name]:
            paragraphs[chapter_name].append(Paragraph(id, paragraph["text"], paragraph["candidates"]))
            id += 1

    # 读取 classic.json 文件
    with open('classic.json', 'r', encoding='utf-8') as f:
        classic_data = json.load(f)
        # {"chapter_name":["text1","text2","text3"]}
    return paragraphs, classic_data


def read_data_for_graph():
    with open('法華經義疏_candidates.json', 'r', encoding='utf-8') as f:
        books_data = json.load(f)

    graph = Graph()
    paragraphs = {}
    id = 0
    for chapter_name in books_data:
        graph.add_node(id, book="annotation", chapter=chapter_name, text=chapter_name, pid=0)
        chapter_id = id
        id += 1
        paragraphs[chapter_name] = [Paragraph(chapter_id, chapter_name, [])]
        for pid, paragraph in enumerate(books_data[chapter_name]):
            paragraphs[chapter_name].append(Paragraph(id, paragraph["text"], paragraph["candidates"]))
            graph.add_node(id, chapter=chapter_name, book="annotation", paragraph_id=pid + 1, text=paragraph["text"],
                           quote=paragraph["candidates"], chapter_id=chapter_id)
            # pid+1 是因为节点编号从1开始，0为chapter节点
            graph.add_edge(chapter_id, id)  # chapter结点和段落结点关联
            id += 1

    # 读取 classic.json 文件
    classics = {}
    with open('classic.json', 'r', encoding='utf-8') as f:
        classic_data = json.load(f)
        for chapter_name in classic_data:
            graph.add_node(id, book="classic", chapter=chapter_name, text=chapter_name, pid=0)
            id += 1
            chapter_id = id
            classics[chapter_name] = [Paragraph(chapter_id, chapter_name, [])]
            for pid, paragraph in enumerate(classic_data[chapter_name]):
                classics[chapter_name].append(Paragraph(id, paragraph, []))
                graph.add_node(id, chapter=chapter_name, book="classic", paragraph_id=pid + 1, text=paragraph,
                               chapter_id=chapter_id)
                # pid+1 是因为节点编号从1开始，0为chapter节点
                graph.add_edge(chapter_id, id)
                id += 1
    # 对齐paragraphs和classics的chapter
    for chapter_name in paragraphs:
        most_sim_classic_chapter_name = ""
        max_sim_score = 0
        for classic_chapter_name in classics:
            score = len(textdistance.lcsstr(chapter_name, classic_chapter_name))
            if score > max_sim_score:
                max_sim_score = score
                most_sim_classic_chapter_name = classic_chapter_name
        graph.add_edge(paragraphs[chapter_name][0].id, classics[most_sim_classic_chapter_name][0].id)
        graph.nodes[paragraphs[chapter_name][0].id]["chapter_id"] = classics[most_sim_classic_chapter_name][0].id
        graph.nodes[classics[most_sim_classic_chapter_name][0].id]["chapter_id"] = paragraphs[chapter_name][0].id
    return graph, paragraphs, classics


def simple_match_text():
    paragraphs, classic_data = read_data()
    for paragraph in paragraphs:
        # 初始化优先队列，用于保存top 3相似度的文本
        top_matches = PriorityQueue(maxsize=3)
        for classic_part in classic_data:
            sim = textdistance.jaccard(paragraph.text, classic_part)
            top_matches.put((1 - sim, classic_part, paragraph.text))  # 1-sim 越小越相似，priority queue中数越小优先级越高
        paragraph.set_top_matches(top_matches)  # 经典文本，注释文本，去掉sim信息

    return paragraphs


def build_qwen_emb(graph):
    # 用来计算语义相似度
    qwen_model = Qwen3TextEmbedding("/home/liwei/Qwen3-Embedding-0.6B")
    for node in graph.nodes:
        graph.nodes[node]["text_emb"] = qwen_model.get_embeddings(
            f'文本内容：{graph.nodes[node]["text"]} 所在章节：{graph.nodes[node]["chapter_name"]}')  # 这种方式没有使用额外prompt
    pickle.dump(graph, open("graph_with_text_emb.pkl", "wb"))
    return graph


def build_graph_emb():
    # 创建图
    G = pickle.load(open("graph_with_qwen_emb.pkl", "rb"))
    data = from_networkx(G, node_attrs=['text_emb'])

    # 转换为PyTorch Geometric数据格式
    print("PyTorch Geometric数据:", data)

    # 训练模型并获取嵌入
    node_embeddings = train_model(data)
    print(f"学习到的节点嵌入形状: {node_embeddings.shape}")

    # 将节点嵌入添加回图的每个节点中作为graph_emb属性
    # 确保节点顺序与嵌入顺序一致
    for i, node_id in enumerate(G.nodes()):
        # 将PyTorch张量转换为numpy数组（如果需要）
        embedding = node_embeddings[i].detach().numpy()
        # 添加到节点属性中
        G.nodes[node_id]['graph_emb'] = embedding

    # 可视化嵌入
    visualize_embeddings(node_embeddings, G)

    # 保存更新后的图（可选）
    pickle.dump(G, open("graph_with_combined_emb.pkl", "wb"))
    print("已将更新后的图保存到graph_with_combined_emb.pkl")


def build_faiss_index():
    """构建FAISS索引，存储图神经网络和Qwen3 embedding的向量"""
    # 加载包含embedding的图
    try:
        G = pickle.load(open("graph_with_combined_emb.pkl", "rb"))
    except FileNotFoundError:
        print("graph_with_combined_emb.pkl 文件不存在，请先运行 build_graph_emb() 函数")
        return None

    # 分离经典文献和注释文献的节点
    classic_nodes = []
    annotation_nodes = []
    classic_embeddings = []
    annotation_embeddings = []

    for node_id in G.nodes:
        node = G.nodes[node_id]
        # 确保节点有必要的嵌入向量
        if "text_emb" in node and "graph_emb" in node:
            # 组合文本嵌入和图嵌入，可以根据需要调整权重
            combined_emb = np.concatenate([node["text_emb"], node["graph_emb"]])

            if node["book"] == "classic":
                classic_nodes.append(node_id)
                classic_embeddings.append(combined_emb)
            else:
                annotation_nodes.append(node_id)
                annotation_embeddings.append(combined_emb)

    # 转换为numpy数组
    classic_embeddings = np.array(classic_embeddings).astype('float32')
    annotation_embeddings = np.array(annotation_embeddings).astype('float32')

    # 检查嵌入维度
    if len(classic_embeddings) == 0 or len(annotation_embeddings) == 0:
        print("没有找到有效的嵌入向量")
        return None

    embedding_dim = classic_embeddings.shape[1]

    # 创建FAISS索引（使用内积计算，相当于余弦相似度在归一化向量上）
    classic_index = faiss.IndexFlatIP(embedding_dim)
    annotation_index = faiss.IndexFlatIP(embedding_dim)

    # 归一化向量，使内积等价于余弦相似度
    faiss.normalize_L2(classic_embeddings)
    faiss.normalize_L2(annotation_embeddings)

    # 添加向量到索引
    classic_index.add(classic_embeddings)
    annotation_index.add(annotation_embeddings)

    # 保存索引和节点映射
    # 保存后的索引可以在后续程序运行中通过faiss.read_index函数重新加载使用
    faiss.write_index(classic_index, "classic_vectors.index")
    faiss.write_index(annotation_index, "annotation_vectors.index")

    # 保存节点ID映射
    with open("classic_nodes.pkl", "wb") as f:
        pickle.dump(classic_nodes, f)
    with open("annotation_nodes.pkl", "wb") as f:
        pickle.dump(annotation_nodes, f)

    print(f"FAISS索引构建完成，经典文献节点数：{len(classic_nodes)}，注释文献节点数：{len(annotation_nodes)}")
    return classic_index, annotation_index, classic_nodes, annotation_nodes


def search_similar_vectors(query, text_emb_model, k=5, is_vector=False):
    """
    搜索与查询最相似的k个节点，结合向量相似度和文本相似度（包括引号内文本和正则表达式匹配）
    参数:
        query: 查询节点的ID（字符串）或查询向量（numpy数组）
        k: 返回的最相似节点数量
        is_vector: 如果为True，表示query是向量；如果为False，表示query是节点ID
        use_text_similarity: 是否结合文本相似度计算
        text_weight: 文本相似度在最终分数中的权重（0-1之间）
    返回:
        最相似的k个节点ID和对应的相似度分数
    """
    # 加载图、索引和节点映射
    try:
        # 只有当需要从图中获取向量或文本时才加载图
        G = pickle.load(open("graph_with_combined_emb.pkl", "rb")) if not is_vector else None
        classic_index = faiss.read_index("classic_vectors.index")

        with open("classic_nodes.pkl", "rb") as f:
            classic_nodes = pickle.load(f)
    except FileNotFoundError as e:
        print(f"文件不存在: {e}")
        return None

    # 准备查询向量
    query_text = None
    query_candidates = []
    query_reverted_text = None

    if is_vector:
        # 直接使用传入的向量
        if isinstance(query, np.ndarray):
            query_emb = query.astype('float32').reshape(1, -1)
        else:
            print("传入的向量必须是numpy数组")
            return None
    else:
        # 通过节点ID获取向量和文本信息
        if query not in G.nodes:
            print(f"查询节点 {query} 不存在")
            return None

        if "graph_emb" not in G.nodes[query]:
            print(f"查询节点 {query} 缺少图嵌入向量")
            return None

        # 获取查询节点的文本信息
        query_text = G.nodes[query].get("text", "")
        # 检查是否有正则表达式处理后的文本
        query_reverted_text = None
        if "reverted_text" in G.nodes[query]:
            query_reverted_text = G.nodes[query]["reverted_text"]

        # todo: 这里没有考虑reverted  text 可能有多个的情况

        # 拼接原始文本和reverted_text并重新编码
        try:
            # 准备拼接的文本
            combined_text = query_text
            if query_reverted_text:
                combined_text = f"{query_text} [正则处理后] {query_reverted_text}"

            # 使用Qwen embedding重新编码
            new_text_emb = text_emb_model.get_embeddings(
                f'文本内容：{combined_text} 所在章节：{G.nodes[query]["chapter"]}')[0]

            # 组合新的文本嵌入和图嵌入
            query_emb = np.concatenate([new_text_emb, G.nodes[query]["graph_emb"]]).astype('float32').reshape(1, -1)
        except Exception as e:
            print(f"使用Qwen embedding重新编码时出错: {e}")
            # 如果出错，回退到使用原始的text_emb（如果有）
            if "text_emb" in G.nodes[query]:
                query_emb = np.concatenate([
                    G.nodes[query]["text_emb"],
                    G.nodes[query]["graph_emb"]
                ]).astype('float32').reshape(1, -1)
            else:
                # 只使用图嵌入
                query_emb = G.nodes[query]["graph_emb"].astype('float32').reshape(1, -1)

    # 归一化查询向量
    faiss.normalize_L2(query_emb)  # 原地归一化

    # 从经典文献中搜索相似节点
    distances, indices = classic_index.search(query_emb, k)
    # 转换索引为节点ID
    similar_node_ids = [classic_nodes[i] for i in indices[0]]

    results = [(distances[0][i], similar_node_ids[i]) for i in range(len(similar_node_ids))]
    results.sort(reverse=True, key=lambda x: x[0])
    return results


def map_annotation_to_classic_similarities(top_k=5, save_path="annotation_to_classic_mappings.pkl"):
    """
    为带有candidates的annotation节点找到最相似的classic节点，并保存映射关系
    参数:
top_k: 为每个annotation节点查找的最相似classic节点数量
        save_path: 保存映射关系的文件路径
    返回:
        保存的映射关系字典
    """
    try:
        # 加载带有组合嵌入的图数据
        print("正在加载图数据...")
        G = pickle.load(open("graph_with_combined_emb.pkl", "rb"))
        print(f"成功加载图数据，包含 {len(G.nodes)} 个节点")
    except FileNotFoundError:
        print("错误：graph_with_combined_emb.pkl 文件不存在，请先运行 build_graph_emb() 函数")
        return None
    except Exception as e:
        print(f"加载图数据时出错：{e}")
        return None

    # 创建映射字典，用于存储annotation节点到相似classic节点的映射
    mappings = {}
    processed_count = 0
    annotated_count = 0

    # 迭代图中的所有节点
    print("开始迭代节点并查找相似节点...")
    for node_id in G.nodes():
        node = G.nodes[node_id]

        # 检查节点是否为annotation类型且具有candidates特征
        # 注意：根据之前的代码，candidates可能存储在quote属性中
        has_candidates = "candidates" in node and node["candidates"]
        has_quote = "quote" in node and node["quote"]

        if node["book"] == "annotation" and (has_candidates or has_quote):
            annotated_count += 1
            try:
                # 搜索相似向量
                results = search_similar_vectors(node_id, k=top_k)

                if results:
                    # 保存相似度分数和对应的节点ID
                    mappings[node_id] = {
                        "similar_nodes": [(score, classic_node_id) for score, classic_node_id in results],
                        "text": node["text"],
                        "chapter": node["chapter"],
                        "candidates": node.get("candidates", []) or node.get("quote", [])
                    }
                    processed_count += 1

                    # 打印进度信息
                    if processed_count % 10 == 0:
                        print(f"已处理 {processed_count} 个带有candidates的annotation节点")
            except Exception as e:
                print(f"处理节点 {node_id} 时出错：{e}")
                continue

    # 保存映射关系
    try:
        with open(save_path, "wb") as f:
            pickle.dump(mappings, f)
        print(f"成功保存节点映射关系到 {save_path}")
        print(f"总共有 {annotated_count} 个带有candidates的annotation节点")
        print(f"成功处理 {processed_count} 个节点，生成相似度映射")
    except Exception as e:
        print(f"保存映射关系时出错：{e}")
        return None

    return mappings


def regex_revert(text, regex_pattern):
    """使用正则表达式从文本中提取匹配内容，并返回最长的匹配结果

    Args:
        text (str): 待匹配的原始文本
        regex_pattern (str): 用于匹配的正则表达式模式

    Returns:
        str: 最长的匹配结果，如果没有匹配则返回空字符串
    """
    # 编译正则表达式模式，提高匹配效率
    p = re.compile(regex_pattern)
    # 在文本中查找所有匹配正则表达式的内容
    matches = re.findall(p, text)

    """
    if matches:
        # 从所有匹配结果中筛选出长度最长的项（使用lambda函数按字符串长度排序）
        longest_match = max(matches, key=lambda x: len(x))
        return longest_match
    # 如果没有找到匹配结果，返回空字符串
    return ""
    """
    return matches


def chapter_name_match(text, chapter_names):
    names = []
    for chapter_name in chapter_names:
        pure_name_index = chapter_name.find("第")
        if pure_name_index != -1:
            chapter_name = chapter_name[:pure_name_index]
        print(textdistance.lcsstr(chapter_name, text))
        if chapter_name in text or len(textdistance.lcsstr(chapter_name, text)) / float(len(chapter_name)) > 0.8:
            names.append(chapter_name)
    return names


def judge_quote():
    graph, paragraphs, classics = read_data_for_graph()
    agents = Agents()
    quote_num = 0
    for chapter_name in paragraphs:
        for paragraph in paragraphs[chapter_name]:
            chapter_names = chapter_name_match(paragraph.text, classics.keys())
            if len(chapter_names) > 0:
                for chapter_name in chapter_names:
                    graph.add_edge(paragraph.id, classics[chapter_name][0].id)
            quote = ""
            if tools.quote_detection(paragraph.text):
                quote = tools.quote_extract(paragraph.text)
            citation = agents.agents["judge"].invoke(
                {"input": f"原文：{paragraph.text} 包含的章节名称：{str(chapter_names)} 引号内容：{str(quote)}"},
                config={"callbacks": [file_callback]})
            print(citation)

            if citation.has_citation == "是":
                quote_num += 1
                paragraph.add_llm_info("has_citation", citation.has_citation)
                if citation.citation:
                    graph.nodes[paragraph.id]["citation"] = citation.citation
                    paragraph.add_llm_info("citation", citation.citation)
                print(citation)
    pickle.dump((graph, paragraphs, classics), open("graph_with_citation_judge.pkl", "wb"))


def regex_match():
    graph, paragraphs, classics = pickle.load(open("graph_with_citation_judge.pkl", "rb"))
    agents = Agents()
    for chapter_name in paragraphs:
        for paragraph in paragraphs[chapter_name]:
            if "has_citation" not in paragraph.llm_info:
                continue
            quote = []
            if tools.quote_detection(paragraph.text):  # 分布骤抽取引号内容，先判断是否包含引号
                quote = tools.quote_extract(paragraph.text)
            chapter_names = chapter_name_match(paragraph.text, classics.keys())
            regex = agents.agents["regex"].invoke(
                {"text": paragraph.text, "chapter_names": chapter_names, "quote": quote})
            print("text:", paragraph.text)
            print("AI:", regex)
            if regex.has_regex == "是":
                paragraph.add_llm_info("quote_regex", regex)
                graph.nodes[paragraph.id]["quote_regex"] = regex
            if regex.regex:
                # 找到直接连接的classic_id，也就是章节对应的
                most_possible_classic_chapter = graph.nodes[graph.nodes[paragraph.id]["chapter_id"]]["chapter_id"]
                # 遍历该章节的所有段落，寻找正则表达式最匹配的
                for classic_paragraph_id in graph.neighbors(most_possible_classic_chapter):
                    if graph.nodes[classic_paragraph_id]["book"] == "annotation" and graph.nodes[classic_paragraph_id][
                        "chapter_id"] == most_possible_classic_chapter:
                        for reg_pattern in regex.regex:
                            reverted_text = regex_revert(graph.nodes[classic_paragraph_id]["text"], reg_pattern)
                            if reverted_text:
                                graph.add_edge(paragraph.id, classic_paragraph_id)
                                # 注释段有可能会包含对多段的引文，所以这里需要处理
                                if "reverted_text" not in graph.nodes[paragraph.id]:
                                    assert type(reverted_text) == list
                                    graph.nodes[paragraph.id]["reverted_text"] = {}
                                if reg_pattern not in graph.nodes[paragraph.id]["reverted_text"]:
                                    graph.nodes[paragraph.id]["reverted_text"][reg_pattern] = reverted_text
                                else:
                                    graph.nodes[paragraph.id]["reverted_text"][reg_pattern].extend(reverted_text)
                # 如果能找到一段内的匹配，将匹配结果保存在paragraph信息中
                if "reverted_text" in graph.nodes[paragraph.id]:
                    paragraph.add_llm_info("quote_regex_match", graph.nodes[paragraph.id]["reverted_text"])
                # 尝试多段连续的，这种情况可能无法匹配到具体段落
                else:
                    for classic_chapter_name in classics:
                        classic_text = "\n".join([p.text for p in classics[classic_chapter_name]])
                        for reg_pattern in regex.regex:
                            reverted_text = regex_revert(classic_text, reg_pattern)
                            if reverted_text:
                                # 如果通过正则表达式匹配到，则在图中建立边
                                graph.add_edge(paragraph.id, classics[classic_chapter_name][0].id)
                                # 注释段有可能会包含对多段的引文，所以这里需要处理
                                if "reverted_text" not in graph.nodes[paragraph.id]:
                                    graph.nodes[paragraph.id]["reverted_text"] = {}
                                if reg_pattern not in graph.nodes[paragraph.id]["reverted_text"]:
                                    assert type(reverted_text) == list
                                    graph.nodes[paragraph.id]["reverted_text"][reg_pattern] = reverted_text
                                else:
                                    graph.nodes[paragraph.id]["reverted_text"][reg_pattern].extend(reverted_text)

            pickle.dump((graph, paragraphs, classics), open("graph_with_quote_regex.pkl", "wb"))


def evaluate_mappings_with_agent(mappings_path="annotation_to_classic_mappings.pkl",
                                 graph_path="graph_with_combined_emb.pkl",
                                 save_path="graph_with_evaluated_mappings.pkl"):
    """
    迭代map_annotation_to_classic_similarities获得的图中annotation类型节点，
    使用智能体判断每个节点中哪个映射关系存在对应出文的可能性最高，并将结果保存到图中
    
    参数:
        mappings_path: map_annotation_to_classic_similarities函数生成的映射关系文件路径
        graph_path: 包含图数据的文件路径
        save_path: 保存评估结果的图数据文件路径
    
    返回:
        处理后的图对象
    """
    # 添加必要的导入
    import pickle
    import os
    from agent import Agents

    try:
        # 加载映射关系
        print(f"正在加载映射关系文件 {mappings_path}...")
        with open(mappings_path, "rb") as f:
            mappings = pickle.load(f)
        print(f"成功加载映射关系，包含 {len(mappings)} 个annotation节点的映射")
    except FileNotFoundError:
        print(f"错误：{mappings_path} 文件不存在，请先运行 map_annotation_to_classic_similarities 函数")
        return None
    except Exception as e:
        print(f"加载映射关系时出错：{e}")
        return None

    try:
        # 加载图数据
        print(f"正在加载图数据 {graph_path}...")
        with open(graph_path, "rb") as f:
            G = pickle.load(f)
        print(f"成功加载图数据，包含 {len(G.nodes)} 个节点")
    except FileNotFoundError:
        print(f"错误：{graph_path} 文件不存在")
        return None
    except Exception as e:
        print(f"加载图数据时出错：{e}")
        return None

    try:
        # 初始化智能体
        print("正在初始化智能体...")
        agents = Agents()
        annotation_match_agent = agents.agents["annotation_match"]
        judge_agent = agents.agents["judge"]
        print("智能体初始化成功")
    except Exception as e:
        print(f"初始化智能体时出错：{e}")
        return None

    processed_count = 0
    total_mappings = len(mappings)

    # 迭代每个annotation节点的映射关系
    print(f"开始评估 {total_mappings} 个annotation节点的映射关系...")
    for node_id, mapping_data in mappings.items():
        try:
            # 确保节点存在于图中
            if node_id not in G.nodes():
                print(f"警告：节点 {node_id} 不在图中，跳过")
                continue

            annotation_node = G.nodes[node_id]
            best_match = None
            best_score = -1
            evaluation_results = []

            # 获取annotation文本
            annotation_text = mapping_data["text"]

            # 遍历所有相似的classic节点
            for similarity_score, classic_node_id in mapping_data["similar_nodes"]:
                try:
                    # 确保classic节点存在于图中
                    if classic_node_id not in G.nodes():
                        print(f"警告：节点 {classic_node_id} 不在图中，跳过")
                        continue

                    classic_node = G.nodes[classic_node_id]
                    classic_text = classic_node.get("text", "")

                    if not classic_text:
                        continue

                    # 使用annotation_match_agent判断是否存在对应出文关系
                    try:
                        # 准备输入参数
                        input_data = {
                            "text1": annotation_text,
                            "text2": classic_text
                        }

                        # 调用智能体进行判断 - 直接获取结构化输出
                        result = annotation_match_agent.invoke(input_data)

                        # 解析结构化输出
                        is_match = result.is_annotation == "是"
                        key_words = getattr(result, "key_words", None)
                        reason = getattr(result, "reason", None)

                        # 计算综合得分（结合相似度分数和智能体判断结果）
                        combined_score = similarity_score
                        if is_match:
                            # todo: 这里未必要这样算。可以尝试多种可能方式：
                            #
                            # 1. 直接采用agent的结果
                            # 2. 对匹配的结果增加额外分数
                            # 3. 考虑原本分数和大模型结果的与或或运算，同时满足才通过
                            # 4. 如果原本分数和大模型结果出先矛盾，那么将原本分数以及大模型结果汇总后，让另一个agent进一步判断
                            combined_score += 0.5  # 对匹配的结果增加额外分数
                            # 如果有关键词匹配，可以进一步增加分数
                            if key_words:
                                combined_score += 0.2

                        # 记录详细的评估结果
                        evaluation_result = {
                            "classic_node_id": classic_node_id,
                            "similarity_score": similarity_score,
                            "combined_score": combined_score,
                            "is_match": is_match,
                            "classic_text": classic_text,
                            "key_words": key_words,
                            "reason": reason
                        }
                        evaluation_results.append(evaluation_result)

                        # 更新最佳匹配
                        if combined_score > best_score:
                            best_score = combined_score
                            best_match = evaluation_result
                    except Exception as e:
                        print(f"调用智能体评估节点 {node_id} 和 {classic_node_id} 时出错：{e}")
                        # 即使智能体调用失败，也使用原始相似度分数
                        combined_score = similarity_score

                        evaluation_results.append({
                            "classic_node_id": classic_node_id,
                            "similarity_score": similarity_score,
                            "combined_score": combined_score,
                            "is_match": False,
                            "classic_text": classic_text,
                            "key_words": None,
                            "reason": f"智能体调用失败: {str(e)}"
                        })

                        if combined_score > best_score:
                            best_score = combined_score
                            best_match = {
                                "classic_node_id": classic_node_id,
                                "similarity_score": similarity_score,
                                "combined_score": combined_score,
                                "is_match": False,
                                "classic_text": classic_text,
                                "key_words": None,
                                "reason": f"智能体调用失败: {str(e)}"
                            }
                except Exception as e:
                    print(f"处理节点 {classic_node_id} 时出错：{e}")
                    continue

            # 将评估结果保存到图节点中
            G.nodes[node_id]["evaluation_results"] = evaluation_results

            # 如果找到最佳匹配，保存到图节点中
            if best_match:
                G.nodes[node_id]["best_match"] = best_match

                # 在图中添加边，表示annotation节点与最佳匹配的classic节点之间的关系
                edge_key = f"best_match_{best_match['combined_score']:.4f}"
                G.add_edge(
                    node_id,
                    best_match["classic_node_id"],
                    key=edge_key,
                    type="best_match",
                    similarity_score=best_match["similarity_score"],
                    combined_score=best_match["combined_score"],
                    key_words=best_match.get("key_words"),
                    reason=best_match.get("reason")
                )

            processed_count += 1

            # 打印进度信息
            if processed_count % 10 == 0:
                print(f"已处理 {processed_count}/{total_mappings} 个annotation节点")
        except Exception as e:
            print(f"处理节点 {node_id} 时出错：{e}")
            continue

    # 保存更新后的图数据
    try:
        with open(save_path, "wb") as f:
            pickle.dump(G, f)
        print(f"成功保存包含评估结果的图数据到 {save_path}")
        print(f"总共处理了 {processed_count} 个annotation节点")
    except Exception as e:
        print(f"保存图数据时出错：{e}")
        return None

    return G


def regex_revert(text, regex_pattern):
    """使用正则表达式从文本中提取匹配内容，并返回最长的匹配结果

    Args:
        text (str): 待匹配的原始文本
        regex_pattern (str): 用于匹配的正则表达式模式

    Returns:
        str: 最长的匹配结果，如果没有匹配则返回空字符串
    """
    # 编译正则表达式模式，提高匹配效率
    p = re.compile(regex_pattern)
    # 在文本中查找所有匹配正则表达式的内容
    matches = re.findall(p, text)

    """
    if matches:
        # 从所有匹配结果中筛选出长度最长的项（使用lambda函数按字符串长度排序）
        longest_match = max(matches, key=lambda x: len(x))
        return longest_match
    # 如果没有找到匹配结果，返回空字符串
    return ""
    """
    return matches


def chapter_name_match(text, chapter_names):
    names = []
    for chapter_name in chapter_names:
        pure_name_index = chapter_name.find("第")
        if pure_name_index != -1:
            chapter_name = chapter_name[:pure_name_index]
        print(textdistance.lcsstr(chapter_name, text))
        if chapter_name in text or len(textdistance.lcsstr(chapter_name, text)) / float(len(chapter_name)) > 0.8:
            names.append(chapter_name)
    return names


def judge_quote():
    graph, paragraphs, classics = read_data_for_graph()
    agents = Agents()
    quote_num = 0
    for chapter_name in paragraphs:
        for paragraph in paragraphs[chapter_name]:
            chapter_names = chapter_name_match(paragraph.text, classics.keys())
            if len(chapter_names) > 0:
                for chapter_name in chapter_names:
                    graph.add_edge(paragraph.id, classics[chapter_name][0].id)
            quote = ""
            if tools.quote_detection(paragraph.text):
                quote = tools.quote_extract(paragraph.text)
            citation = agents.agents["judge"].invoke(
                {"input": f"原文：{paragraph.text} 包含的章节名称：{str(chapter_names)} 引号内容：{str(quote)}"},
                config={"callbacks": [file_callback]})
            print(citation)

            if citation.has_citation == "是":
                quote_num += 1
                paragraph.add_llm_info("has_citation", citation.has_citation)
                if citation.citation:
                    graph.nodes[paragraph.id]["citation"] = citation.citation
                    paragraph.add_llm_info("citation", citation.citation)
                print(citation)
    pickle.dump((graph, paragraphs, classics), open("graph_with_citation_judge.pkl", "wb"))


def regex_match():
    graph, paragraphs, classics = pickle.load(open("graph_with_citation_judge.pkl", "rb"))
    agents = Agents()
    for chapter_name in paragraphs:
        for paragraph in paragraphs[chapter_name]:
            if "has_citation" not in paragraph.llm_info:
                continue
            quote = []
            if tools.quote_detection(paragraph.text):  # 分布骤抽取引号内容，先判断是否包含引号
                quote = tools.quote_extract(paragraph.text)
            chapter_names = chapter_name_match(paragraph.text, classics.keys())
            regex = agents.agents["regex"].invoke(
                {"text": paragraph.text, "chapter_names": chapter_names, "quote": quote})
            print("text:", paragraph.text)
            print("AI:", regex)
            if regex.has_regex == "是":
                paragraph.add_llm_info("quote_regex", regex)
                graph.nodes[paragraph.id]["quote_regex"] = regex
            if regex.regex:
                # 找到直接连接的classic_id，也就是章节对应的
                most_possible_classic_chapter = graph.nodes[graph.nodes[paragraph.id]["chapter_id"]]["chapter_id"]
                # 遍历该章节的所有段落，寻找正则表达式最匹配的
                for classic_paragraph_id in graph.neighbors(most_possible_classic_chapter):
                    if graph.nodes[classic_paragraph_id]["book"] == "annotation" and graph.nodes[classic_paragraph_id][
                        "chapter_id"] == most_possible_classic_chapter:
                        for reg_pattern in regex.regex:
                            reverted_text = regex_revert(graph.nodes[classic_paragraph_id]["text"], reg_pattern)
                            if reverted_text:
                                graph.add_edge(paragraph.id, classic_paragraph_id)
                                # 注释段有可能会包含对多段的引文，所以这里需要处理
                                if "reverted_text" not in graph.nodes[paragraph.id]:
                                    assert type(reverted_text) == list
                                    graph.nodes[paragraph.id]["reverted_text"] = {}
                                if reg_pattern not in graph.nodes[paragraph.id]["reverted_text"]:
                                    graph.nodes[paragraph.id]["reverted_text"][reg_pattern] = reverted_text
                                else:
                                    graph.nodes[paragraph.id]["reverted_text"][reg_pattern].extend(reverted_text)
                # 如果能找到一段内的匹配，将匹配结果保存在paragraph信息中
                if "reverted_text" in graph.nodes[paragraph.id]:
                    paragraph.add_llm_info("quote_regex_match", graph.nodes[paragraph.id]["reverted_text"])
                # 尝试多段连续的，这种情况可能无法匹配到具体段落
                else:
                    for classic_chapter_name in classics:
                        classic_text = "\n".join([p.text for p in classics[classic_chapter_name]])
                        for reg_pattern in regex.regex:
                            reverted_text = regex_revert(classic_text, reg_pattern)
                            if reverted_text:
                                # 如果通过正则表达式匹配到，则在图中建立边
                                graph.add_edge(paragraph.id, classics[classic_chapter_name][0].id)
                                # 注释段有可能会包含对多段的引文，所以这里需要处理
                                if "reverted_text" not in graph.nodes[paragraph.id]:
                                    graph.nodes[paragraph.id]["reverted_text"] = {}
                                if reg_pattern not in graph.nodes[paragraph.id]["reverted_text"]:
                                    assert type(reverted_text) == list
                                    graph.nodes[paragraph.id]["reverted_text"][reg_pattern] = reverted_text
                                else:
                                    graph.nodes[paragraph.id]["reverted_text"][reg_pattern].extend(reverted_text)

            pickle.dump((graph, paragraphs, classics), open("graph_with_quote_regex.pkl", "wb"))


def finalize_annotation_mappings(
        evaluated_graph_path="graph_with_evaluated_mappings.pkl",
        save_path="graph_with_final_mappings.pkl",
        confidence_threshold=0.7
):
    """
    利用 evaluate_mappings_with_agent 记录的结果，在限定匹配段落的情况下，
    重新迭代每个有匹配的annotation节点，并使用正则表达式匹配结果，给出最终的注释和原文的对应关系

    参数:
        evaluated_graph_path: evaluate_mappings_with_agent函数生成的包含评估结果的图数据文件路径
        save_path: 保存最终映射结果的文件路径
        confidence_threshold: 综合得分的置信度阈值

    返回:
        处理后的图对象
    """
    try:
        # 加载包含评估结果的图数据
        print(f"正在加载包含评估结果的图数据 {evaluated_graph_path}...")
        with open(evaluated_graph_path, "rb") as f:
            G = pickle.load(f)
        print(f"成功加载图数据，包含 {len(G.nodes)} 个节点")
    except FileNotFoundError:
        print(f"错误：{evaluated_graph_path} 文件不存在，请先运行 evaluate_mappings_with_agent 函数")
        return None
    except Exception as e:
        print(f"加载图数据时出错：{e}")
        return None

    try:
        # 初始化智能体
        print("正在初始化智能体...")
        agents = Agents()
        regex_agent = agents.agents["regex"]
        judge_agent = agents.agents["judge"]
        print("智能体初始化成功")
    except Exception as e:
        print(f"初始化智能体时出错：{e}")
        return None

    processed_count = 0
    final_matched_count = 0

    # 迭代图中的所有annotation节点
    print("开始迭代有匹配结果的annotation节点...")
    for node_id in list(G.nodes()):
        node = G.nodes[node_id]

        # 检查节点是否为annotation类型且有评估结果
        if node.get("book") == "annotation" and "evaluation_results" in node:
            try:
                # 检查是否有最佳匹配且综合得分高于阈值
                if "best_match" in node and node["best_match"]["combined_score"] >= confidence_threshold:
                    best_match = node["best_match"]
                    classic_node_id = best_match["classic_node_id"]
                    classic_node = G.nodes[classic_node_id]

                    # 获取annotation和classic文本
                    annotation_text = node["text"]
                    classic_text = classic_node.get("text", "")

                    if not classic_text:
                        continue

                    # 使用regex_agent分析annotation文本，提取可能的引用范围
                    try:
                        # 准备输入参数
                        quote = []
                        # 检测并提取引号内容
                        from tools import quote_detection, quote_extract
                        if quote_detection(annotation_text):
                            quote = quote_extract(annotation_text)

                        # 提取章节名称
                        chapter_names = []
                        # 尝试从节点或相邻节点获取章节信息
                        if "chapter" in node:
                            chapter_names = [node["chapter"]]
                        elif "chapter_id" in node:
                            chapter_node = G.nodes.get(node["chapter_id"])
                            if chapter_node and "chapter" in chapter_node:
                                chapter_names = [chapter_node["chapter"]]

                        # 调用regex_agent生成正则表达式
                        regex_result = regex_agent.invoke({
                            "text": annotation_text,
                            "chapter_names": chapter_names,
                            "quote": quote
                        })

                        # 保存regex_agent的结果
                        G.nodes[node_id]["regex_analysis"] = regex_result

                        # 如果有生成的正则表达式，尝试在classic文本中匹配
                        if regex_result.has_regex == "是" and regex_result.regex:
                            exact_matches = []

                            for reg_pattern in regex_result.regex:
                                try:
                                    # 处理正则表达式格式，去掉可能的r前缀
                                    if reg_pattern.startswith("r"):
                                        clean_pattern = reg_pattern[1:].strip('"\'')
                                    else:
                                        clean_pattern = reg_pattern.strip('"\'')

                                    # 编译正则表达式
                                    p = re.compile(clean_pattern)

                                    # 在classic文本中查找所有匹配
                                    matches = p.findall(classic_text)

                                    # 对于捕获组匹配，提取完整匹配字符串
                                    if matches and isinstance(matches[0], tuple):
                                        # 提取第一个捕获组的内容
                                        matches = [m[0] for m in matches if m]

                                    if matches:
                                        exact_matches.extend(matches)
                                except Exception as e:
                                    print(f"正则表达式匹配出错：{e}，正则表达式：{reg_pattern}")
                                    continue

                            # 保存精确匹配结果
                            if exact_matches:
                                # 去重并保留最长的匹配结果
                                unique_matches = list(set(exact_matches))
                                # 按长度排序，优先选择更长的匹配
                                unique_matches.sort(key=lambda x: len(x), reverse=True)

                                G.nodes[node_id]["exact_matches"] = unique_matches

                                # 在图中更新边关系，添加精确匹配信息
                                # 查找现有的best_match边
                                for edge_key in list(G.edges[(node_id, classic_node_id)].keys()):
                                    if edge_key.startswith("best_match_"):
                                        # 更新边属性
                                        G.edges[(node_id, classic_node_id)][edge_key]["exact_matches"] = unique_matches
                                        G.edges[(node_id, classic_node_id)][edge_key][
                                            "regex_patterns"] = regex_result.regex
                                        G.edges[(node_id, classic_node_id)][edge_key]["final_match"] = True

                                        final_matched_count += 1
                                        break
                        # 调用judge_agent进行最终确认
                        try:
                            judge_input = {
                                "input": f"原文：{annotation_text} 包含的章节名称：{str(chapter_names)} 引号内容：{str(quote)}"
                            }

                            judge_result = judge_agent.invoke(judge_input)

                            # 保存judge_agent的结果
                            G.nodes[node_id]["judge_verification"] = judge_result

                            # 如果judge_agent确认包含引用，也标记为最终匹配
                            if judge_result.has_citation == "是" and "final_match" not in G.nodes[node_id]:
                                for edge_key in list(G.edges[(node_id, classic_node_id)].keys()):
                                    if edge_key.startswith("best_match_"):
                                        G.edges[(node_id, classic_node_id)][edge_key][
                                            "judge_confirmation"] = judge_result.has_citation
                                        G.edges[(node_id, classic_node_id)][edge_key]["final_match"] = True

                                        final_matched_count += 1
                                        break
                        except Exception as e:
                            print(f"调用judge_agent进行验证时出错：{e}")
                    except Exception as e:
                        print(f"分析annotation节点 {node_id} 时出错：{e}")
                        continue

                processed_count += 1

                # 打印进度信息
                if processed_count % 10 == 0:
                    print(f"已处理 {processed_count} 个annotation节点，找到 {final_matched_count} 个最终匹配")
            except Exception as e:
                print(f"处理节点 {node_id} 时出错：{e}")
                continue

    # 保存更新后的图数据
    try:
        with open(save_path, "wb") as f:
            pickle.dump(G, f)
        print(f"成功保存包含最终映射结果的图数据到 {save_path}")
        print(f"总共处理了 {processed_count} 个annotation节点")
        print(f"找到 {final_matched_count} 个最终确认的注释和原文对应关系")
    except Exception as e:
        print(f"保存图数据时出错：{e}")
        return None

    return G


# 更新主函数，添加FAISS相关功能
if __name__ == "__main__":
    pass
# judge_quote()
# regex_match()
# 构建Qwen3嵌入
# graph, _, _ = read_data_for_graph()
# build_qwen_emb(graph)
# 构建图嵌入
# build_graph_emb()
# 构建FAISS索引
# build_faiss_index()

# 示例：搜索相似向量
# results = search_similar_vectors(query_node_id=10, k=5)
# if results:
#     print("最相似的节点:")
#     for score, node_id in results:
#         print(f"节点 {node_id}, 相似度分数: {score}")

# 下一步修改sim_measure()和simple_match_text()来使用多种相似度计算方法，为dp做准备
