# -*- coding: utf-8 -*-
# @DATE :  2025/7/8 
# @Author: HQ

import numpy as np
# from llama_index.core import Document
# from langchain.prompts import PromptTemplate
# from llama_index.core.node_parser import SentenceSplitter, SemanticSplitterNodeParser
# from langchain_community.document_loaders import PyPDFLoader, DirectoryLoader, CSVLoader
# from langchain_experimental.text_splitter import SemanticChunker
# from graph_config import *
# import re
# from llama_index.core.graph_stores.types import (
#     EntityNode,
#     BaseNode,
#     # KG_NODES_KEY,
#     # KG_RELATIONS_KEY,
#     # Relation,
# )
# from sentence_transformers import SentenceTransformer, util
# import networkx as nx
import pandas as pd
# from pandas import DataFrame
# from py2neo import Graph, Node, Relationship
from typing import List, Any, Tuple, Dict
import os
# from langchain_community.vectorstores import FAISS
# from langchain_community.embeddings import HuggingFaceEmbeddings
# from graspologic.partition import hierarchical_leiden
# from langchain_core.messages import HumanMessage, SystemMessage
# from concurrent.futures import ThreadPoolExecutor, as_completed

KG_TRIPLET_EXTRACT_TMPL = """
-Goal-
Given a text document, identify **all entities** and their entity types from the text and **all relationships** among the identified entities.
Given the text, extract up to {max_knowledge_triplets} entity-relation triplets.

-Steps-
1. Identify all entities. For each identified entity, extract the following information in the exact format below:
("entity")
- entity_name: Name of the entity, capitalized
- entity_type: Type of the entity
- entity_description: Comprehensive description of the entity's attributes and activities

2. From the entities identified in step 1, identify all pairs of (source_entity, target_entity) that are *clearly related* to each other.
For each pair of related entities, extract the following information in the exact block format below:

("relationship")
- source_entity: <name of source entity (as it appears in step 1)>
- target_entity: <name of source target (as it appears in step 1)>
- relation: <relationship between source_entity and target_entity>
- relationship_description: <explanation as to why you think the source entity and the target entity are related to each other>

3. Entity Extraction Guidelines:
- Include health-related metrics.
- Include diseases and conditions.
- Include symptoms and complications.
- Include treatments and interventions.
- Include lifestyle.
- Include factors and consequences.

4. Relationship Extraction Guidelines:
- Include cause-effect, influence, improvement, mitigation, inclusion, classification, or recommendation links.


5. Output Formatting:
- Conclude your answer after listing all entities and all relationships. 
- Entity_description should contain the entity's  health metric values, risk levels and accurate definitions.

- Do not add any additional commentary, text, or explanatory notes outside these blocks.
- The output must strictly follow the format above, so that it can be correctly parsed by the regex patterns below:
  - entity_pattern = r'-\\s*entity_name:\\s*(.+?)\\s*-\\s*entity_type:\\s*(.+?)\\s*-\\s*entity_description:\\s*(.+?)(?=\\n\\s*\\d+\\.|\\Z)'
  - relationship_pattern = r'-\\s*source_entity:\\s*(.+?)\\s*-\\s*target_entity:\\s*(.+?)\\s*-\\s*relation:\\s*(.+?)\\s*-\\s*relationship_description:\\s*(.+?)(?=\\n\\s*\\d+\\.|\\Z)'

-An example output-

("entity")
- entity_name: Albert Einstein
- entity_type: Person
- entity_description: Albert Einstein was a theoretical physicist who developed the theory of relativity and made significant contributions to physics.

("entity")
- entity_name: Theory of Relativity
- entity_type: Scientific Theory
- entity_description: A scientific theory developed by Albert Einstein, describing the laws of physics in relation to observers in different frames of reference.

("entity")
- entity_name: Nobel Prize in Physics
- entity_type: Award
- entity_description: A prestigious international award in the field of physics, awarded annually by the Royal Swedish Academy of Sciences.

("relationship")
- source_entity: Albert Einstein
- target_entity: Theory of Relativity
- relation: developed
- relationship_description: Albert Einstein is the developer of the theory of relativity.

("relationship")
- source_entity: Albert Einstein
- target_entity: Nobel Prize in Physics
- relation: won
- relationship_description: Albert Einstein won the Nobel Prize in Physics in 1921.

-Real Data-
######################
text: {text}
######################
output:
"""


class DocumentLoader:
    TEXT_RESTRUCTURING_SYSTEM_PROMPT = """
        You are a text restructuring assistant. Your goal is to convert the provided text—potentially containing irregular line breaks, unnecessary newlines, and hyphenated word splits—into a coherent, readable format. Follow these steps:

        1. Combine Broken Lines:
           Merge lines disrupted in the middle of sentences or words. Remove any extraneous newlines that interrupt the natural flow.

        2. Fix Hyphenated Word Splits:
           If a word is split across lines with a hyphen (e.g., "intro- duce"), rejoin the parts to form the complete word.

        3. Organize into Paragraphs:
           Identify logical breaks in the content and adjust the text into paragraphs. Keep meaningful line breaks, but remove those creating awkward or incomplete sentences.

        4. Retain Original Meaning and Punctuation:
           Do not alter the text’s content or punctuation; focus solely on improving its readability.

        5. Output Only the Restructured Data:
           Provide the final text as continuous plain text with appropriate paragraph breaks, without adding any extra explanation or commentary.

        6. Focusing on the Knowledge Related to Women:
           Do not extract any mathematical formulas, equations, tables, figures, or website references.Ignore citations, references, footnotes, and appendix sections.Keep only the core content of the paper that conveys knowledge about women.
        """

    def __init__(self, llm):
        self.llm = llm

    def create_pdf_document(self, file_path: str) -> List[Document]:
        from langchain.text_splitter import RecursiveCharacterTextSplitter
        # from langchain_community.document_loaders.pdf import PDFMinerLoader
        from langchain_community.document_loaders import PyPDFLoader

        pdf_files = os.listdir(file_path)
        # system = """
        # You are a text restructuring assistant. Your task is to transform a provided block of text—one that may contain irregular line breaks, extraneous newline characters, and hyphenated word splits—into a clear and readable format. Follow these guidelines:
        #
        # Combine Broken Lines:
        # Merge lines that are broken in the middle of sentences or words. Remove unnecessary newline characters that interrupt the flow of text.
        #
        # Fix Hyphenated Word Breaks:
        # If a word is split across lines with a hyphen (for example, "intro-\nduce"), join the parts to form the complete word ("introduce").
        #
        # Organize into Paragraphs:
        # Identify logical breaks in the content and separate the text into paragraphs. Preserve meaningful line breaks, but eliminate those that create awkward or incomplete sentences.
        #
        # Retain Original Meaning and Punctuation:
        # Do not change the content or punctuation; simply reformat the text to improve readability.
        #
        # When given an input text block, output the restructured text as a continuous, plain text string with appropriate paragraph breaks.
        # """

        pages = {}
        for pdf in pdf_files:
            if pdf.endswith(".pdf"):
                pages[pdf] = []
                # loader = PDFMinerLoader(file_path=file_path + '\\' + pdf, concatenate_pages=False)
                loader = PyPDFLoader(file_path=file_path + '\\' + pdf)
                for page in loader.load():
                    pages[pdf].append(page)

        import time
        text_splitter = RecursiveCharacterTextSplitter(
            separators=["\n\n", "\n"],
            chunk_size=500,  # Adjust this value to control the maximum chunk size
            chunk_overlap=50  # Adjust for the desired overlap between chunks (if any)
        )
        texts = []
        documents = []
        for pdf, page in pages.items():
            responds = []
            for item in page:
                start_time = time.perf_counter()
                respond = self.llm.invoke(
                    [SystemMessage(content=self.TEXT_RESTRUCTURING_SYSTEM_PROMPT)] + [
                        HumanMessage(content=item.page_content)]).content
                responds.append(respond)
                end_time = time.perf_counter()
                print('seconds', round((end_time - start_time), 4))
                # pdf_texts = '\n'.join(responds)
                texts.extend(text_splitter.split_text(respond))

        documents.extend([Document(text=text) for text in texts])
        return documents

    def create_csv_document(self, file_path: str) -> List[Document]:
        files = os.listdir(file_path)
        documents = []
        for file in files:
            df = pd.read_csv(file_path + '\\' + file)
            df_columns = df.columns
            # Convert data into LlamaIndex Document objects
            documents = [
                Document(text=[f"{columns}: {row[columns]}" for columns in df_columns])
                for _, row in df.iterrows()
            ]
        # self.documents = documents
        return documents

    def create_mayo_document(self, file_path: str) -> List[Document]:
        files = os.listdir(file_path)
        documents = []
        for file in files:
            if file.endswith(".csv"):
                df = pd.read_csv(file_path + '\\' + file)
                # Convert data into LlamaIndex Document objects
                documents.extend([
                    Document(text=f"""{row['mondo_name']}:
                    Symptoms: {row['mayo_symptoms']}
                    Causes: {row['mayo_causes']}
                    Risk Factors: {row['mayo_risk_factors']}
                    Complications: {row['mayo_complications']}
                    Prevention: {row['mayo_prevention']}
                    When to See a Doctor: {row['mayo_see_doc']}
                    """)
                    for _, row in df.iterrows()
                ])
        return documents


class GraphExtraction:
    def __init__(self, llm: Any, embedding_model_path: str):
        self.llm = llm
        self.entity_pattern = re.compile(
            r'\("entity"\)\s*'
            r'-\s*entity_name:\s*(.+?)\s*'
            r'-\s*entity_type:\s*(.+?)\s*'
            r'-\s*entity_description:\s*(.+?)(?=\("entity"\)|\("relationship"\)|$)',
            re.DOTALL
        )
        self.relationship_pattern = re.compile(
            r'\("relationship"\)\s*'
            r'-\s*source_entity:\s*(.+?)\s*'
            r'-\s*target_entity:\s*(.+?)\s*'
            r'-\s*relation:\s*(.+?)\s*'
            r'-\s*relationship_description:\s*(.+?)(?=\("entity"\)|\("relationship"\)|$)',
            re.DOTALL
        )
        self.embedding_model = SentenceTransformer(embedding_model_path)

    def document_to_nodes(
            self,
            documents: List[Document],
            chunk_size: int = 2048,
            chunk_overlap: int = 0
    ) -> List[BaseNode]:

        splitter = SentenceSplitter(
            chunk_size=chunk_size,
            chunk_overlap=chunk_overlap,
        )
        nodes = splitter.get_nodes_from_documents(documents)
        return nodes

    def parse_fn(self, response_str: str) -> tuple[list[Any], list[Any]]:
        """
        从给定文本中匹配出所有entity与relationship，并返回结构化结果。
        返回:
            entities: List[dict]，其中每个dict包含entity字段
            relationships: List[dict]，其中每个dict包含relationship字段
        """
        # 1. 匹配所有实体
        entities = self.entity_pattern.findall(response_str)
        # 2. 匹配所有关系
        relationships = self.relationship_pattern.findall(response_str)

        return entities, relationships

    def entity_respond(self, text: str, max_knowledge_triplets: int = 10):
        extract_prompt = PromptTemplate(
            input_variables=["text", "max_knowledge_triplets"],
            template=KG_TRIPLET_EXTRACT_TMPL_WOMAN
        )
        chains = extract_prompt | self.llm
        result = chains.invoke({
            "text": text,
            "max_knowledge_triplets": max_knowledge_triplets
        })
        return result

    def resolve_entity(self, entities: List[dict]) -> List[dict]:

        # --- 1. Preprocessing ---
        entities = [node for node in entities if isinstance(node, dict)]

        df = pd.DataFrame([{
            'temp_id': f"ent_{i}",
            'name': item['name'],
            'label': item['label'],
            'description': item['properties'].get('entity_description')
        } for i, item in enumerate(entities)])
        df['name_processed'] = df['name'].str.lower().str.strip()
        df['label_processed'] = df['label'].str.lower().str.strip()
        df['description_processed'] = df['description'].fillna('').astype(str)

        # --- 2. Primary Matching (Description + Label) ---
        desc_similarity_threshold = 0.95  # Threshold for semantic similarity (0.0 - 1.0)
        matched_pairs = []

        try:
            embeddings = self.embedding_model.encode(df['description_processed'].tolist(), convert_to_tensor=True,
                                                     show_progress_bar=True)
            cosine_scores = util.cos_sim(embeddings, embeddings)

            for i in range(len(cosine_scores)):
                for j in range(i + 1, len(cosine_scores)):
                    score = cosine_scores[i][j].item()
                    # label1 = df.iloc[i]['label_processed']
                    # label2 = df.iloc[j]['label_processed']

                    # *** Core Logic: High Description Similarity AND Matching Labels ***
                    if score >= desc_similarity_threshold and df.iloc[i]['name_processed'] == \
                            df.iloc[j]['name_processed']:
                        temp_id1 = df.iloc[i]['temp_id']
                        temp_id2 = df.iloc[j]['temp_id']
                        matched_pairs.append(tuple(sorted((temp_id1, temp_id2))) + (score * 100,))
                        # print(
                        #     f"  Match Found: {temp_id1} ('{df.iloc[i]['name']}') <-> {temp_id2} ('{df.iloc[j]['name']}') | Score: {score * 100:.1f} | Label: '{label1}'")

        except ImportError:
            print("警告：sentence-transformers 未安装。")
        except Exception as e:
            print(f"错误：语义比较失败 - {e}")

        # --- 4. Clustering ---
        unique_pairs_for_graph = set([(p[0], p[1]) for p in matched_pairs])
        # print(f"\nBuilding clusters from {len(unique_pairs_for_graph)} unique matched pair(s).")

        G = nx.Graph()
        G.add_edges_from(unique_pairs_for_graph)
        clusters = list(nx.connected_components(G))

        # --- 5. Canonical Representation ---
        canonical_ids = []
        all_clustered_ids = set()
        entity_dict = {}
        # print(f"\n--- Identified Clusters and Chosen Representatives ---")
        if clusters:
            for i, cluster in enumerate(clusters):
                all_clustered_ids.update(cluster)
                cluster_df = df[df['temp_id'].isin(cluster)].copy()
                # Strategy: Choose representative with the shortest name within the cluster
                cluster_df['name_len'] = cluster_df['name'].str.len()
                representative = cluster_df.loc[cluster_df['name_len'].idxmin()]
                for _, item in cluster_df.iterrows():
                    entity_dict[item['name']] = representative['name']
                canonical_ids.append(representative['temp_id'])

                # print(f"  Cluster {i + 1}: {cluster}")
                # print(f"    - Representative Chosen: {representative['temp_id']} ('{representative['name']}')")
                # print(cluster_df[['temp_id', 'name', 'label']].to_string())
                # print("-" * 30)
        else:
            print("  No clusters found.")

        # Identify singletons (entities not in any cluster)
        all_ids = set(df['temp_id'])
        singleton_ids = list(all_ids - all_clustered_ids)
        # print(f"\n--- Singletons (Unique Entities) ---")
        # if singleton_ids:
        #     print(f"  {singleton_ids}")
        # else:
        #     print("  No singletons found.")

        # Combine canonical IDs from clusters and singleton IDs
        final_ids = canonical_ids + singleton_ids

        # --- 6. Final Output ---
        # print("\n\n--- Final Processed (Deduplicated) Data ---")
        # Retrieve the full data for the final selected IDs
        final_df = df[df['temp_id'].isin(final_ids)].set_index('temp_id').loc[final_ids].reset_index()  # Preserve order

        # Convert back to the original dictionary format (optional)
        final_data_list = []
        for _, row in final_df.iterrows():
            final_data_list.append({
                'name': row['name_processed'],
                'label': row['label_processed'],
                # Reconstruct properties - assuming only description was there
                'properties': {'entity_description': row['description_processed']}
                # Note: If original properties had more keys, this needs adjustment
            })
        return final_data_list

    def resolve_relationship(self, relationships: List[dict]) -> List[dict]:

        # --- 1. Load and Standardize ---
        df = pd.DataFrame(relationships)

        # Extract description, handle potential missing properties/description
        df['relationship_description'] = df['properties'].apply(
            lambda x: x.get('relationship_description', '') if isinstance(x, dict) else '')

        # Standardize key fields
        df['source_id_std'] = df['source_id'].astype(str).str.lower().str.strip()
        df['label_std'] = df['label'].astype(str).str.lower().str.strip()
        df['target_id_std'] = df['target_id'].astype(str).str.lower().str.strip()

        # --- 2. Create Structural Key ---
        df['structural_key'] = df.apply(lambda row: (row['source_id_std'], row['label_std'], row['target_id_std']),
                                        axis=1)

        # --- 3 & 4. Group by Key and Select Representatives ---
        # Find the index of the first occurrence of each unique structural key
        # This effectively selects the first relationship encountered for each unique structure
        representative_indices = df.drop_duplicates(subset=['structural_key'], keep='first').index

        # --- 5. Filter ---
        df_deduplicated = df.loc[representative_indices].copy()

        # Optional: Reset index if desired
        df_deduplicated.reset_index(drop=True, inplace=True)

        # --- Optional: Convert back to original dictionary format ---
        final_data_list = []
        for _, row in df_deduplicated.iterrows():
            final_data_list.append({
                'label': row['label_std'],
                'source_id': row['source_id_std'],
                'target_id': row['target_id_std'],
                'properties': {'relationship_description': row['relationship_description']}
            })
        return final_data_list

    def extract_entity_relationship(self, documents: List[Document], max_knowledge_triplets: int = 10) -> Tuple[
        Any, List[Dict]]:
        existing_nodes = []
        existing_relations = []
        nodes = self.document_to_nodes(documents)
        for node in nodes:
            text = node.get_content(metadata_mode="llm")
            try:
                result = self.entity_respond(text, max_knowledge_triplets)
                llm_response = result.content
                # print('llm是否响应', llm_response)
                entities, entities_relationship = self.parse_fn(llm_response)
                # print('提取的关系和实体是', entities, entities_relatiship)
            except ValueError:
                entities = []
                entities_relationship = []
            metadata = node.metadata.copy()
            for entity, entity_type, description in entities:
                # entity, entity_type, description = item.get('entity_name'), item.get('entity_type'), item.get('entity_description')
                metadata[
                    "entity_description"
                ] = description  # Not used in the current implementation. But will be useful in future work.
                entity_node = {
                    'name': entity, 'label': entity_type, 'properties': {'entity_description': description}
                }
                existing_nodes.append(entity_node)

            metadata = node.metadata.copy()
            for subj, obj, rel, description in entities_relationship:
                subj_node = EntityNode(name=subj, properties=metadata)
                obj_node = EntityNode(name=obj, properties=metadata)
                metadata["relationship_description"] = description
                rel_node = {
                    'label': rel,
                    'source_id': subj_node.id,
                    'target_id': obj_node.id,
                    'properties': {'relationship_description': description}}
                # existing_nodes.extend([subj_node, obj_node])
                existing_relations.append(rel_node)

        self.entity_node = self.resolve_entity(existing_nodes)
        self.existing_relations = self.resolve_relationship(existing_relations)
        entity_node, existing_relations = self.entity_node, self.existing_relations
        return entity_node, existing_relations

    def graph_store(self, entity: List[dict], relations: List[dict]) -> nx.Graph:
        """Converts internal graph representation to NetworkX graph."""
        nx_graph = nx.Graph()
        for node in entity:
            nx_graph.add_node(node.get('name'), label=node.get('label'), properties=node.get('properties'))
        for relation in relations:
            nx_graph.add_edge(
                relation.get('source_id'),
                relation.get('target_id'),
                relationship=relation.get('label'),
                description=relation['properties'],
            )
        return nx_graph


def cal_final_weight(nx_graph):
    """Calculates the graph nodes weight before building the community"""
    from collections import Counter
    from sentence_transformers import SentenceTransformer, util
    edges = list(nx_graph.edges())
    # 为了统一顺序（无向图），先排序
    normalized_edges = [tuple(sorted(edge)) for edge in edges]
    # 统计频次
    edge_freq_counter = Counter(normalized_edges)

    embedding_model_path = r'./data/models/BAAI/bge-small-en-v1___5'
    embedding_model = SentenceTransformer(embedding_model_path)
    embeddings = embedding_model.encode(normalized_edges, convert_to_tensor=True, show_progress_bar=True)
    cosine_scores = util.cos_sim(embeddings, embeddings)

    # topic
    edge_texts = [f"{head} {tail}" for head, tail in edges]
    # 编码实体对
    edge_embeddings = embedding_model.encode(edge_texts, convert_to_tensor=True, show_progress_bar=True)

    # 编码主题词
    topic_embeddings = embedding_model.encode(['menstrual', 'obstructive sleep apnea'], convert_to_tensor=True)
    menstrual_embedding = topic_embeddings[0]
    osa_embedding = topic_embeddings[1]

    labels = []
    for emb in edge_embeddings:
        sim_menstrual = util.cos_sim(emb, menstrual_embedding).item()
        sim_osa = util.cos_sim(emb, osa_embedding).item()
        if sim_menstrual > sim_osa:
            labels.append('menstrual')
        else:
            labels.append('osa')
    edge_topics = {edge: label for edge, label in zip(normalized_edges, labels)}
    for (u, v), topic in edge_topics.items():
        if nx_graph.has_edge(u, v):
            nx_graph[u][v]['topic'] = topic

    weight_dict = {}
    weight_all_values = list(edge_freq_counter.values())
    for i in range(len(cosine_scores)):
        for j in range(i + 1, len(cosine_scores)):
            structure_weight = weight_all_values[i]
            embedding_similarity_score = cosine_scores[i][j].item()
            topic_similarity_score = 0 if labels[i] == 'menstrual' else 1
            final_weight = 0.5 * structure_weight + 0.3 * embedding_similarity_score + 0.2 * topic_similarity_score
            weight_dict[normalized_edges[i]] = final_weight

    for edge, weight in weight_dict.items():
        nx_graph.add_edge(edge[0], edge[1], weight=weight)

    return nx_graph


def build_communities(nx_graph, max_cluster_size=1000, resolution=1, extra_forced_iterations=0, randomness=0.001,
                      random_seed=42):
    """Builds communities from the graph and summarizes them."""

    community_hierarchical_clusters = hierarchical_leiden(
        nx_graph, max_cluster_size=max_cluster_size, resolution=resolution,
        extra_forced_iterations=extra_forced_iterations, randomness=randomness,
        random_seed=random_seed

    )

    community_mapping = {
        item.node: {
            "cluster": item.cluster,
            "level": item.level,
        }
        for item in community_hierarchical_clusters
    }
    """
    #画图
    import numpy as np
    import matplotlib.pyplot as plt
    import matplotlib.cm as cm
    import networkx as nx
    from collections import Counter
    for node in nx_graph.nodes():
        if node in community_mapping:
            nx_graph.nodes[node]['cluster'] = community_mapping[node]['cluster']
        else:
            nx_graph.nodes[node]['cluster'] = -1  # 没找到的标为 -1

    # 只保留 cluster != -1 的节点
    valid_nodes = [n for n, d in nx_graph.nodes(data=True) if d['cluster'] != -1]
    valid_subgraph = nx_graph.subgraph(valid_nodes)
    # 获取所有社区编号
    clusters = [d['cluster'] for _, d in valid_subgraph.nodes(data=True)]
    unique_clusters = list(set(clusters))
    num_clusters = len(unique_clusters)

    #----------------------------------------------
    # 只保留中间的社区，避免散落的社区
    community_dict = {node: data['cluster'] for node, data in valid_subgraph.nodes(data=True)}
    # 统计每个社区节点数
    community_counts = Counter(community_dict.values())


    # 设置阈值，保留节点数大于等于阈值的社区
    min_nodes = 5
    large_communities = {c for c, cnt in community_counts.items() if cnt > min_nodes}
    valid_nodes = [n for n, d in nx_graph.nodes(data=True) if d['cluster'] in large_communities]
    valid_subgraph = nx_graph.subgraph(valid_nodes)

    unique_clusters = list(set(large_communities)) 
    num_clusters = len(large_communities)
    #----------------------------------------------

    # 为每个社区分配一个颜色
    color_map = cm.get_cmap('tab20', num_clusters)
    cluster_color_dict = {cluster: color_map(i) for i, cluster in enumerate(unique_clusters)}
    # 为每个节点分配颜色
    node_colors = [cluster_color_dict[d['cluster']] for _, d in valid_subgraph.nodes(data=True)]
    # 画图
    plt.figure(figsize=(10, 8))
    pos = nx.spring_layout(valid_subgraph, seed=42)
    nx.draw_networkx_nodes(valid_subgraph, pos, node_color=node_colors, node_size=50, alpha=0.8)
    nx.draw_networkx_edges(valid_subgraph, pos, alpha=0.3, width=0.5)
    # 添加图例
    from matplotlib.patches import Patch
    legend_elements = [
        Patch(facecolor=cluster_color_dict[cluster], label=f'Cluster {cluster}')
        for cluster in unique_clusters
    ]
    plt.legend(handles=legend_elements, title="Clusters", loc="best", fontsize="small")
    plt.title("Community Partition Visualization")
    plt.axis("off")
    plt.savefig('community(1).png', dpi=300)

    #---------------------------------------
    # 画3D图
    import plotly.graph_objects as go
    #接分配颜色
    pos_3d = nx.spring_layout(valid_subgraph, dim=3, seed=42)
    xyz = np.array([pos_3d[n] for n in valid_subgraph.nodes()])
    x_nodes, y_nodes, z_nodes = xyz[:, 0], xyz[:, 1], xyz[:, 2]
    # Step 4：准备边的坐标
    edge_x = []
    edge_y = []
    edge_z = []
    for u, v in valid_subgraph.edges():
        x0, y0, z0 = pos_3d[u]
        x1, y1, z1 = pos_3d[v]
        edge_x += [x0, x1, None]
        edge_y += [y0, y1, None]
        edge_z += [z0, z1, None]
    # Step 5：构建 Plotly 图层
    edge_trace = go.Scatter3d(
        x=edge_x, y=edge_y, z=edge_z,
        mode='lines',
        line=dict(color='grey', width=1),
        hoverinfo='none'
    )
    node_trace = go.Scatter3d(
        x=x_nodes, y=y_nodes, z=z_nodes,
        mode='markers',
        marker=dict(size=6, color=node_colors, opacity=0.9),
        hoverinfo='text',
        text=[
            f"Node: {n}, Cluster: {valid_subgraph.nodes[n]['cluster']}"
            for n in valid_subgraph.nodes()
        ]
    )
    # Step 6：组合并画图
    fig = go.Figure(data=[edge_trace, node_trace],
                    layout=go.Layout(
                        title='3D Community Network',
                        showlegend=False,
                        margin=dict(l=0, r=0, b=0, t=40),
                        scene=dict(
                            xaxis=dict(showbackground=False),
                            yaxis=dict(showbackground=False),
                            zaxis=dict(showbackground=False),
                        )
                    ))
    # Step 7：保存为 HTML
    fig.write_html("3d_community_graph.html")
    fig.show()  # 可视化（可选）


    # 查找有关联的社区
    connected_cluster_pairs = set()
    for u, v in nx_graph.edges():
        cluster_u = nx_graph.nodes[u].get('cluster', -1)
        cluster_v = nx_graph.nodes[v].get('cluster', -1)
        # 只考虑 cluster != -1 且 不同的社区
        if cluster_u != -1 and cluster_v != -1 and cluster_u != cluster_v:
            # 加入有序对，避免重复（cluster_a, cluster_b）和（cluster_b, cluster_a）视为同一对
            connected_cluster_pairs.add(tuple(sorted((cluster_u, cluster_v))))

    # 查找没有关联的社区
    all_clusters = set(d['cluster'] for _, d in nx_graph.nodes(data=True) if d['cluster'] != -1)
    isolated_clusters = all_clusters - connected_cluster_pairs
    # 将结果转换为列表方便查看
    connected_cluster_pairs = list(connected_cluster_pairs)
    # 打印
    print(f"有连接的社区对共有 {len(connected_cluster_pairs)} 对：")
    for pair in connected_cluster_pairs:
        print(f"Cluster {pair[0]} <--> Cluster {pair[1]}")


    """

    community_info = {}
    for item in community_hierarchical_clusters:
        cluster_id = item.cluster
        node = item.node
        if cluster_id not in community_info:
            community_info[cluster_id] = []

        for neighbor in nx_graph.neighbors(node):
            if community_mapping[neighbor]['cluster'] == cluster_id:
                edge_data = nx_graph.get_edge_data(node, neighbor)
                if edge_data:
                    detail = (f"{node} -> {neighbor} -> {edge_data['relationship']}"
                              f" -> {edge_data['description']}")
                    community_info[cluster_id].append(detail)

    return community_info, community_mapping


def summarize_communities(llm, community_info):
    """Generate and store summaries for each community."""

    # for community_id, details in community_info.items():
    #     details_text = ("\n".join(details) + ".")  # Ensure it ends with a period
    #     self.community_summary[community_id] = self.generate_community_summary(llm, details_text)
    def generate_community_summary(llm, text):
        """Generate summary for a given text using an LLM."""
        messages = [SystemMessage(content=(
            "You are provided with a set of relationships from a knowledge graph, each represented as "
            "entity1->entity2->relation->relationship_description. Your task is to create a summary of these "
            "relationships. The summary should include the names of the entities involved and a concise synthesis "
            "of the relationship descriptions. The goal is to capture the most critical and relevant details that "
            "highlight the nature and significance of each relationship. Ensure that the summary is coherent and "
            "integrates the information in a way that emphasizes the key aspects of the relationships."
        ))] + [HumanMessage(content=text)]
        response = llm.invoke(messages).content

        return response

    community_summary = {}
    # 修改为多线程
    futures = {}
    with ThreadPoolExecutor() as executor:
        for community_id, details in community_info.items():
            details_text = "\n".join(details) + "."  # Ensure it ends with a period
            futures[executor.submit(generate_community_summary, llm, details_text)] = community_id
            community_summary[community_id] = {"三元组": details_text}
        for future in as_completed(futures):
            community_id = futures[future]
            try:
                summary = future.result()
                community_summary[community_id]["社区报告"] = summary
            except Exception as e:
                print(f"Error generating summary for {community_id}: {e}")

    return community_summary


class QueryEngine:
    def __init__(self,
                 embedding_model,
                 df_entity_node,
                 df_existing_relations,
                 df_community_summary,
                 node_read_dir="./data/knowledge_database/node_faiss_index",
                 relation_read_dir="./data/knowledge_database/relation_faiss_index",
                 community_read_dir="./data/knowledge_database/community_faiss_index",
                 ):
        self.embedding_model = embedding_model
        self.df_entity_node = df_entity_node
        self.df_existing_relations = df_existing_relations
        self.df_community_summary = df_community_summary
        entities = [df_entity_node.iloc[i].to_dict() for i in range(len(df_entity_node))]
        relations = [df_existing_relations.iloc[i].to_dict() for i in range(len(df_existing_relations))]
        self.nx_graph = self.create_nx_graph(entities, relations)
        from langchain.schema import Document
        doc_nodes = []
        for _, row in df_entity_node.iterrows():
            page_content = f"{row['name']}//{row['label']}//{row['properties']}"
            doc_nodes.append(Document(page_content=page_content,
                                      # metadata={"cluster": row["cluster"]}
                                      ))

        # ==== 构建实体关系向量库 ====

        doc_rels = []
        for _, row in df_existing_relations.iterrows():
            relation_str = str(row['properties'])
            if "'relationship_description':" in relation_str:
                rel_content = relation_str.split("'relationship_description':")[1]
                doc_rels.append(Document(page_content=rel_content))

        doc_community = []
        for idx, row in df_community_summary.iterrows():
            doc_community.append(Document(
                page_content=str(row['社区报告']),
                metadata={"cluster": idx}
            ))
        self.vectorstore_rels = FAISS.from_documents(doc_rels, embedding_model)
        self.vectorstore_nodes = FAISS.from_documents(doc_nodes, embedding_model)
        self.vectorstore_community = FAISS.from_documents(doc_community, embedding_model)
        # self.vectorstore_nodes = FAISS.load_local(node_read_dir, self.embedding_model,
        #                                           allow_dangerous_deserialization=True)
        # self.vectorstore_rels = FAISS.load_local(relation_read_dir, self.embedding_model,
        #                                           allow_dangerous_deserialization=True)
        # self.vectorstore_community = FAISS.load_local(community_read_dir, self.embedding_model,
        #                                           allow_dangerous_deserialization=True)

    def create_nx_graph(self, entity: List[dict], relations: List[dict]) -> nx.Graph:
        """Converts internal graph representation to NetworkX graph."""
        nx_graph = nx.Graph()
        for node in entity:
            nx_graph.add_node(node.get('name'), label=node.get('label'), properties=node.get('properties'))
        for relation in relations:
            nx_graph.add_edge(
                relation.get('source_id'),
                relation.get('target_id'),
                relationship=relation.get('label'),
                description=relation['properties'],
            )
        return nx_graph

    def read_faiss_index(self, k=3, read_dir="./data/knowledge_database/rag_faiss_index"):
        """
        :param read_dir: 向量化faiss文件的路径
        :return: 检索器
        """
        vector_store = FAISS.load_local(read_dir, self.embedding_model, allow_dangerous_deserialization=True)

        retriever_mmr = vector_store.as_retriever(
            search_type="mmr",
            search_kwargs={
                "k": k,
                'lambda_mult': 0.25,
            })

        retriever_sim = vector_store.as_retriever(
            search_type="similarity",
            search_kwargs={
                "k": k,
            }
        )

        return {
            "sim": retriever_sim,
            "mmr": retriever_mmr
        }

    def global_search(self, query_list):
        """
        传入的多个查询语句，在社区向量库中执行相似度搜索，返回所有查询对应的最相关文档，并将其内容格式化为文本上下文。
        Args:
            query_list:查询语句列表，每个字符串是一个需要检索的内容
        Returns:
            docs_community: 所有查询语句检索到的文档列表。
            community_context: 将所有文档内容拼接成的字符串文本，用于下游处理（如提供给大语言模型作为上下文）。
        """
        search_kwargs = {"k": 3, }
        retriever_sim_cummunities = self.vectorstore_community.as_retriever(
            search_type="similarity",
            search_kwargs=search_kwargs
        )
        docs_community = []
        for query in query_list:
            docs_community.extend(retriever_sim_cummunities.invoke(query))
        # 格式化为上下文
        community_context = self.format_context(docs_community)

        return docs_community, community_context

    def format_context(self, docs):
        context = ""
        for doc in docs:
            context += doc.page_content
            context += "\n"
        return context

    def local_search(self, query_list, depth=1, filered_by_community=False):
        """
        在节点向量库中执行局部相似度搜索，并基于搜索到的节点构建子图三元组信息。

        参数：
            query_list (List[str]): 查询语句列表。
            depth (int): 提取子图时的搜索深度（默认=1）。
            filered_by_community (bool): 是否基于社区搜索结果进行过滤，限制只在相关 cluster 内搜索。

        返回：
            List[str]: 三元组列表，每个三元组格式为 "头实体_关系_尾实体: 描述"。
        """
        # 默认检索参数（取最相关的3个节点）
        search_kwargs = {
            "k": 5,
        }
        # 如果启用了社区过滤，根据社区相似度筛选对应 cluster 的节点
        if filered_by_community:
            docs_communities, community_context = self.global_search(query_list)
            cluster_list = [doc.metadata["cluster"] for doc in docs_communities]
            cluster_filter = {"cluster": {"$in": cluster_list}}
            # 提高 k 值，同时加上 cluster 的过滤条件
            search_kwargs = {
                "k": 10,
                'filter': cluster_filter,
            }
        # 从关系向量库构造检索器
        retriever_sim_rels = self.vectorstore_rels.as_retriever(
            search_type="similarity",
            search_kwargs=search_kwargs
        )
        docs_rels = []
        for query in query_list:
            docs_rels.extend(retriever_sim_rels.invoke(query))
        rels_context = self.format_context(docs_rels)
        # 从节点向量库构造检索器
        retriever_sim_nodes = self.vectorstore_nodes.as_retriever(
            search_type="similarity",
            search_kwargs=search_kwargs
        )
        # 执行相似度查询，收集所有匹配的节点文档
        docs_nodes = []
        for query in query_list:
            docs_nodes.extend(retriever_sim_nodes.invoke(query))

        # 提取不重复的节点名（page_content 中 // 前为节点标识）
        node_set, node_entities = set(), set()
        for doc_node in docs_nodes:
            node_set.add(doc_node.page_content.split("//")[0])
            node_entities.add(doc_node.page_content)
        node_list = list(node_set)

        # 构建与这些节点相关的边（三元组）信息
        triplets = []
        for root_node in node_list:
            subgraph = self.extract_subgraph_bfs(root_node, depth)
            for edge in subgraph.edges:
                edge_data = subgraph.get_edge_data(edge[0], edge[1])
                triplet_str = f"{edge[0]}_{edge_data['relationship']}_{edge[1]}:{edge_data['description']}"
                triplets.append(triplet_str)

        return node_entities, triplets, rels_context

    def extract_subgraph(self, root_node: str, depth: int) -> nx.Graph:
        """
        从给定的 NetworkX 图谱中提取以 root_node 为中心、半径为 depth 的子图。

        Args:
            nx_graph (nx.Graph): 完整的图谱。
            root_node (str): 子图的中心节点。
            depth (int): 提取子图的深度（距离）。

        Returns:
            nx.Graph: 包含所有与 root_node 距离 <= depth 的节点及对应边的子图。
        """
        # 使用 NetworkX 提供的 ego_graph 工具直接提取指定半径的子图
        subgraph = nx.ego_graph(self.nx_graph, root_node, radius=depth)
        return subgraph

    def extract_subgraph_bfs(self, root_node: str, depth: int) -> nx.Graph:
        """
        手动基于 BFS 提取以 root_node 为中心、半径为 depth 的子图。
        """
        visited = {root_node}
        current_level = {root_node}
        for _ in range(depth):
            next_level = set()
            for node in current_level:
                neighbors = set(self.nx_graph.neighbors(node))
                next_level.update(neighbors - visited)
            visited.update(next_level)
            current_level = next_level
        # print(visited)
        # 基于访问节点列表生成子图
        return self.nx_graph.subgraph(visited).copy()


def clean_text(page_text):
    page_text = page_text.encode('unicode_escape').decode('utf-8').replace(' ', ' ')
    # print("原始\n", page_text)
    # 利用正则匹配\x开头的特殊字符
    result = re.findall(r'\\x[a-f0-9]{2}', page_text)

    result = list(set(result))
    # print(result)
    for x in result:
        # 替换找的的特殊字符
        page_text = page_text.replace(x, ' ')
    # 最后再解码
    page_text = page_text.encode('utf-8').decode('unicode_escape')
    # page_text = page_text.replace(u'\n', '')
    # page_text = page_text.strip()
    # 使用正则表达式将多个空格替换为一个空格
    # page_text = re.sub(r'\s+', ' ', page_text)
    # print("处理后\n", page_text)
    return page_text


def build_vectorstores(mode="kg",
                       df_entity_node=None,
                       df_existing_relations=None,
                       df_community_summary=None,
                       source_dir=None,
                       embedding_model=None,
                       ):
    """
    构建知识库向量索引。

    Args:
        mode (str): "kg" 构建知识图谱向量索引，"rag" 构建RAG检索索引。
        df_entity_node (DataFrame): 知识图谱中的实体节点数据。
        df_existing_relations (DataFrame): 实体之间的关系数据。
        df_community_summary (DataFrame): 社区总结数据。
        source_dir (str): 文档数据源目录路径（仅用于RAG）。
        embedding_model: 向量嵌入模型。
    """
    from langchain.schema import Document
    if mode == "kg":
        # ==== 构建实体结点向量库 ====
        doc_nodes = []
        for _, row in df_entity_node.iterrows():
            page_content = f"{row['name']}//{row['label']}//{row['properties']}"
            doc_nodes.append(Document(page_content=page_content, metadata={"cluster": row["cluster"]}))

        # ==== 构建实体关系向量库 ====
        doc_rels = []
        for _, row in df_existing_relations.iterrows():
            relation_str = str(row[3])
            if "'relationship_description':" in relation_str:
                rel_content = relation_str.split("'relationship_description':")[1]
                doc_rels.append(Document(page_content=rel_content))

        # ==== 构建社区总结向量库 ====
        doc_community = []
        for idx, row in df_community_summary.iterrows():
            doc_community.append(Document(
                page_content=str(row[1]),
                metadata={"cluster": idx}
            ))

        # ==== 构建并保存向量索引 ====
        vectorstore_nodes = FAISS.from_documents(doc_nodes, embedding_model)
        vectorstore_nodes.save_local("./data/knowledge_database/node_faiss_index")

        vectorstore_rels = FAISS.from_documents(doc_rels, embedding_model)
        vectorstore_rels.save_local("./data/knowledge_database/relation_faiss_index")

        vectorstore_community = FAISS.from_documents(doc_community, embedding_model)
        vectorstore_community.save_local("./data/knowledge_database/community_faiss_index")

    elif mode == "rag":

        # ==== 加载csv/pdf文件 ====
        loaders = []
        for file_name in os.listdir(source_dir):
            file_path = os.path.join(source_dir, file_name)
            if file_name.endswith(".csv"):
                loaders.append(CSVLoader(file_path, encoding="utf-8"))
            elif file_name.endswith(".pdf"):
                loaders.append(PyPDFLoader(file_path))

        # ==== 清洗文本 ====
        documents = []
        for loader in loaders:
            for doc in loader.load():
                doc.page_content = clean_text(doc.page_content)
                documents.append(doc)

        # ==== 拆分文档并构建索引 ====
        text_splitter = SemanticChunker(embeddings=embedding_model, min_chunk_size=200)
        doc_chunks = text_splitter.split_documents(documents)
        vectorstore = FAISS.from_documents(doc_chunks, embedding_model)
        vectorstore.save_local("./data/knowledge_database/rag_faiss_index")

    else:
        raise ValueError("参数 'mode' 必须是 'kg' 或 'rag'")


"""
def build_communities(nx_graph, max_cluster_size=1000, resolution=1):
    # Builds communities from the graph and summarizes them.

    community_hierarchical_clusters = hierarchical_leiden(
        nx_graph, max_cluster_size=max_cluster_size, resolution=resolution
    )

    community_mapping = {
        item.node: {
            "cluster": item.cluster,
            "level": item.level,
        }
     for item in community_hierarchical_clusters
    }

    community_info = {}
    for item in community_hierarchical_clusters:
        cluster_id = item.cluster
        node = item.node
        if cluster_id not in community_info:
            community_info[cluster_id] = []

        for neighbor in nx_graph.neighbors(node):
            if community_mapping[neighbor]['cluster'] == cluster_id:
                edge_data = nx_graph.get_edge_data(node, neighbor)
                if edge_data:
                    detail = (f"{node} -> {neighbor} -> {edge_data['relationship']}"
                              f" -> {edge_data['description']}")
                    community_info[cluster_id].append(detail)

    return community_info, community_mapping
"""


def summarize_communities(llm, community_info):
    """Generate and store summaries for each community."""

    # for community_id, details in community_info.items():
    #     details_text = ("\n".join(details) + ".")  # Ensure it ends with a period
    #     self.community_summary[community_id] = self.generate_community_summary(llm, details_text)
    def generate_community_summary(llm, text):
        """Generate summary for a given text using an LLM."""
        messages = [SystemMessage(content=(
            "You are provided with a set of relationships from a knowledge graph, each represented as "
            "entity1->entity2->relation->relationship_description. Your task is to create a summary of these "
            "relationships. The summary should include the names of the entities involved and a concise synthesis "
            "of the relationship descriptions. The goal is to capture the most critical and relevant details that "
            "highlight the nature and significance of each relationship. Ensure that the summary is coherent and "
            "integrates the information in a way that emphasizes the key aspects of the relationships."
        ))] + [HumanMessage(content=text)]
        response = llm.invoke(messages).content

        return response

    community_summary = {}
    # 修改为多线程
    futures = {}
    with ThreadPoolExecutor() as executor:
        for community_id, details in community_info.items():
            details_text = "\n".join(details) + "."  # Ensure it ends with a period
            futures[executor.submit(generate_community_summary, llm, details_text)] = community_id
            community_summary[community_id] = {"三元组": details_text}
        for future in as_completed(futures):
            community_id = futures[future]
            try:
                summary = future.result()
                community_summary[community_id]["社区报告"] = summary
            except Exception as e:
                print(f"Error generating summary for {community_id}: {e}")

    return community_summary


def generate_answers(llm, question, context):
    rag_instruction = """
    Using the information contained in the context, give a comprehensive answer to the question.
    Respond only to the question asked, response should be concise and relevant to the question.
    Provide the number of the source document when relevant.
    If the answer cannot be deduced from the context, do not give an answer.
    """
    human_instruction = """
    Context:{context}
    ---
    Now here is the question you need to answer.
    Question: {question}
    """
    human_message_formated = human_instruction.format(context=context, question=question)
    answer = llm.invoke(
        [SystemMessage(content=rag_instruction), HumanMessage(content=human_message_formated)]).content

    return answer


def evaluate(question, ref_answer, ref_context, generated_answer, retieved_context):
    from langchain_openai import AzureChatOpenAI
    import os
    os.environ[
        "AZURE_OPENAI_API_KEY"] = "G8E0KvS6TpVd7VWdviOT66ZkaQfUkstQIdZGpTFpayX8UceaB0B4JQQJ99BDACHYHv6XJ3w3AAAAACOG0YVo"
    os.environ["AZURE_OPENAI_ENDPOINT"] = "https://ninen-m9urer5t-eastus2.cognitiveservices.azure.com/"
    llm_eval = AzureChatOpenAI(
        azure_deployment="gpt-4.1-mini",  # or your deployment
        api_version="2024-12-01-preview",  # or your api version
        temperature=0.8,
        max_tokens=4096,
        timeout=60,
        max_retries=2,
        # other params...
    )

    from langchain_core.prompts import ChatPromptTemplate
    from langchain_core.output_parsers import JsonOutputParser
    eval_prompt = """
    You are an expert evaluator. Your task is to evaluate a generated answer in a Retrieval-Augmented Generation (RAG) system using the following inputs:

    1. Question: the original user question.
    2. Generated Answer: the answer generated by the system.
    3. Retrieved Context: the content retrieved by the RAG system to support the answer.
    4. Reference Answer: the ideal or ground-truth answer.
    5. Reference Context: the context from which the reference answer is derived.

    Please score the generated answer on a scale from 0.0 to 1.0 for the following metrics:

    - Context Relevance: How well does the retrieved context match the user question?
    - Faithfulness: To what extent is the generated answer factually consistent with the retrieved context only?
    - Answer Relevance: How well does the generated answer match the user question and the reference answer?

    Use this format in your response (only return the scores):

    {{
      "context_relevance": 0.0-1.0,
      "faithfulness": 0.0-1.0,
      "answer_relevance": 0.0-1.0
    }}
    """
    eval_input = """
    Inputs:

    [Question]
    {question}

    [Generated Answer]
    {generated_answer}

    [Retrieved Context]
    {retrieved_context}

    [Reference Answer]
    {ref_answer}

    [Reference Context]
    {ref_context}
    """.format(
        question=question,
        generated_answer=generated_answer,
        retrieved_context=retieved_context,
        ref_answer=ref_answer,
        ref_context=ref_context
    )
    prompt = ChatPromptTemplate.from_messages(
        [('system', eval_prompt),
         ('user', '{input}')])
    chain = (prompt | llm_eval | JsonOutputParser())
    response = chain.invoke(eval_input)

    return response


def Generate_Answers(llm, question):
    Type_Prompt = """You are a smart classification assistant. Your task is to determine whether a given question is a "global-level question" or a "local-level question".
       - If the question is broad, high-level, or about overall strategies or system-wide concerns (e.g., "What is menstruation?" or "What is menstruation?"), classify it as: "Global-level question".
       - If the question is narrow, specific, or focused on a single component, parameter, or detail (e.g., "What does heavy menstrual bleeding represent?" or "What causes irritable temper during menopause?"), classify it as: "Local-level question".
       Only return: "Global-level question" or "Local-level question".
       Question: {question}
       Type:"""
    ques_type = llm.invoke(Type_Prompt).content
    if ques_type == 'Global-level question':
        docs_community, community_context = query_engine.global_search(question)
        retieved_context = community_context
    else:
        retriever_dict = query_engine.read_faiss_index()
        context_arr = retriever_dict['sim'].invoke(question)
        retieved_context = query_engine.format_context(context_arr)
    answer = generate_answers(llm, question, retieved_context)
    print('第一次的回答：', answer)
    Answer_Reasonable = """You are an assistant for answering quality checks. Your current task is to determine whether the answer to a certain question is reasonable, comprehensive, and in-depth. If you think this answer is already good and comprehensive enough, please reply directly: 'Reasonable, can be ended'.
       If you think your answer is unreasonable or clearly insufficient, please reply 'unreasonable'
       ---
       Please handle the following:
       Problem: {question}
       Original answer: {answer}
       Type:
       """
    first_answer = llm.invoke(Answer_Reasonable)
    if first_answer == 'reasonable':
        final_answer = answer
        print("answer is reasonable")
        print('1最终的回答是：', final_answer)
    else:
        print("answer is unreasonable")
        retriever_dict = query_engine.read_faiss_index()
        context_arr = retriever_dict['sim'].invoke(question)
        retieved_context = query_engine.format_context(context_arr)
        answer_2nd = generate_answers(llm, question, retieved_context)
        Integrate_Answers = """You are a content integration assistant, whose task is to merge two existing answers and output a final answer that is closer to the problem, clearer, and more valuable.
               Please follow the following requirements:
               1. Clearly understand the problem and ensure that the final answer stays at the core of the problem.
               2. Analyze the two responses and extract valuable information from them.
               3. Remove redundant, duplicated, or irrelevant content and merge logically consistent parts.
               4. Use concise and professional language to provide clearer and more comprehensive answers.
               ---
               Please handle the following:
               Problem: {question}
               Answer 1: {answer}
               Answer 2: {answer_2nd}
               Please integrate these two answers into a final answer that is closer to the question and of higher quality."""
        final_answer = llm.invoke(Integrate_Answers).content
        print('2最终的回答是：', final_answer)

    return final_answer


template_library = [
                    {
                        "id": "cycle_length_regular",
                        "category": "cycle_len",
                        "template": (
                            "你上周期的周期长度为 {last_cycle_len} 天, 正常周期长度范围是24~38, 周期{cycle_type}"
                            "近6次周期在 {min}-{max} 天之间波动，均值为 {mean} 天，"
                            "波动范围为 {diff} 天，属于{diff_level}。"
                            "当前周期被判定为 {cycle_len_category}，整体周期{trend_comment}。"
                        )
                    },
                    {
                        "id": "cycle_length_irregular",
                        "category": "cycle_le",
                        "template": (
                            "近6个周期你出现{m}次偏离。"
                            "你上周期的周期长度为 {last_cycle_len} 天, 正常周期长度范围是24~38, 周期{cycle_type}"
                            "近6次周期在 {min}-{max} 天之间波动，均值为 {mean} 天，"
                            "波动范围为 {diff} 天，属于 {diff_level}。"
                            "当前周期被判定为{cycle_len_category}，整体周期{trend_comment}。"
                        )
                    },
                    {
                        "id": "cycle_diff_regular",
                        "category": "cycle_diff",
                        "template": (
                            "你的月经周期变化小，非常规律！"
                            "作为{age}岁的你你近6次周期中最长周期长度为{max}天，最短周期长度为{min}天，周期长度差异为{diff}天，正常周期长度差异范围是≤9天."
                            "完全符合正常规律标准（差异≤7天）。"
                        )
                    },
                    {
                        "id": "cycle_diff_irregular",
                        "category": "cycle_diff",
                        "template": (
                            "你的月经周期出现变化，需持续关注后续变化！"
                            "作为{age}岁的你你近6次周期中最长周期长度为{max}天，最短周期长度为{min}天，周期长度差异为{diff}天，正常周期长度差异范围是≤9天."
                            "超出正常规律标准（差异≤7天）。你的周期波动{type}"
                        )
                    },
                    {
                        "id": "period_regular",
                        "category": "period_len",
                        "template": (
                            "你上周期的经期长度为{n}天，经期长度{正常/不正常}，正常长度范围是≤8天。"
                            "近6次你的经期长度在{min}-{max}天之间波动，均值为{mean}天。都在正常长度范围内波动共出现{m}次偏离周期"
                        )
                    },
                    {
                        "id": "flow_regular",
                        "category": "flow_type",
                        "template": (
                            "你上周期的月经量为 {n}，属于{正常/不正常}范围。正常月经量通常为一次周期出血总量约为 30~80ml。"
                            "近6次你的月经量评估结果在 {m} 之间波动。建议结合日常出血感受（如是否频繁更换卫生巾、是否有血块）进一步观察趋势。"
                        )
                    },
                    {
                        "id": "skin_regular",
                        "category": "skin_temp",
                        "template": (
                            "你上周期的皮肤温度在经期内平均为 {mean}℃，该值在{正常/偏高/偏低}范围，基础体温多维持在 {1/4temp ~3/4temp}℃。"
                            "近6个周期中，你在经期内的皮肤温度平均值在 {min}~{max}℃ 之间波动，均值为 {mean}℃，共出现 {m} 次异常偏高/偏低趋势"
                        )
                    },
                    {
                        "id": "skin_irregular",
                        "category": "skin_temp",
                        "template": (
                            "你上周期的皮肤温度在经期内平均为 {mean}℃，该值在{正常/偏高/偏低}范围，基础体温多维持在 {1/4temp ~3/4temp}℃。"
                            "近6个周期中，你在经期内的皮肤温度平均值在 {min}~{max}℃ 之间波动，均值为 {mean}℃，共出现 {m} 次异常偏高/偏低趋势"
                        )
                    },
                ]

if __name__ == '__main__':
    from langchain_deepseek import ChatDeepSeek

    llm = ChatDeepSeek(
        model="ep-20250210212907-wbcvx",
        temperature=0.8,
        max_tokens=None,
        # timeout=10,
        # max_retries=3,
        api_key="8bfa4593-7558-4c38-9aac-705cdbafd8a1",
        api_base="https://ark.cn-beijing.volces.com/api/v3",
    )

    pdf_path = './data/knowledge_database/source_files'
    data_dir = "./data/knowledge_database"
    embedding_model_path = r'./data/models/BAAI/bge-small-en-v1___5'

    osa_entity = pd.read_csv(r'D:\LLM\ring-health-partner\entity_node_woman_new.csv')
    osa_relation = pd.read_csv(r'D:\LLM\ring-health-partner\relation_woman_new.csv')
    entity_node = osa_entity.to_dict(orient='records')
    existing_relations = osa_relation.to_dict(orient='records')

    graph_extractor = GraphExtraction(llm, embedding_model_path)
    # documents = graph_extractor.create_mayo_document(disease_features_merged)
    # entity_node, existing_relations = graph_extractor.extract_entity_relationship(document_list) # 提取实体和关系,
    osa_graph = graph_extractor.graph_store(entity_node, existing_relations)
    entity_node_df = pd.DataFrame(entity_node)
    existing_relations_df = pd.DataFrame(existing_relations)

    # 建立社区信息并且把cluster和level信息放在nodes的dataframe上
    osa_graph = cal_final_weight(osa_graph)
    community_info, community_mapping = build_communities(osa_graph, max_cluster_size=500, resolution=0.2,
                                                          extra_forced_iterations=0, randomness=0.001, random_seed=42)
    community_summary = summarize_communities(llm, community_info)
    df_community_summary = pd.DataFrame(community_summary).T
    df_community_summary.to_csv("community_summary.csv", index=False)

    # """实时检索"""
    embedding_model = HuggingFaceEmbeddings(
        model_name=embedding_model_path,
        encode_kwargs={"normalize_embeddings": True},
    )
    # df_tag_l2 = pd.read_csv(data_dir + "/tag_db/L2标签总览.csv")
    # df_entity_node = pd.read_csv(data_dir + "/knowledge_graph_csv/internal_entity_node.csv")
    # df_existing_relations = pd.read_csv(data_dir + "/knowledge_graph_csv/internal_existing_relations.csv")
    # df_community_summary = pd.read_csv(data_dir + "/knowledge_graph_csv/community_summary.csv")
    # 构建好faiss index在线直接使用
    # build_vectorstores(mode="rag",
    #                    df_entity_node=df_entity_node,
    #                    df_existing_relations=df_existing_relations,
    #                    df_community_summary=df_community_summary,
    #                    source_dir=pdf_path,
    #                    embedding_model=embedding_model,
    #                    )
    # 传统rag检索
    # retriever_dict = query_engine.read_faiss_index()
    # context_arr = retriever_dict['sim'].invoke(question)
    # retieved_context = query_engine.format_context(context_arr)

    df_entity_node = osa_entity
    df_existing_relations = osa_relation
    # # 离线存存储好的dataframe用于构建检索器
    query_engine = QueryEngine(
        embedding_model,
        df_entity_node,
        df_existing_relations,
        df_community_summary
    )
    from dataset import rag_internal_questions

    for i in range(6, 8):
        item = rag_internal_questions[i]
        question = item['question']
        node_entities, triplets, rels_context = query_engine.local_search([question], 1, False)
        print("检索出的实体结点")
        for node in node_entities:
            print(node)
        print("检索出的关系三元组")
        for rel in triplets:
            print(rel)
        print("直接计算三元组")
        print(rels_context)
        # 节点和关系组成context提供给到大模型生成
        retieved_context = f"Here is the definition of entities:{node_entities} Here is the relations:{triplets}"
        retriever_dict = query_engine.read_faiss_index()
        context_arr = retriever_dict['sim'].invoke(question)
        retieved_context = query_engine.format_context(context_arr)
        answer = generate_answers(llm, question, retieved_context)
        print(question)
        print("最终回答", answer)
        print("参考回答：", item['answer'])

        # 开始评估
        eval_res = evaluate(question, item['answer'], item['related_context'], answer, retieved_context)
        import json

        print("评估结果\n", json.dumps(eval_res, indent=4, ensure_ascii=False))



