# import tiktoken
#
# from graphrag.query.context_builder.entity_extraction import EntityVectorStoreKey
# from graphrag.query.indexer_adapters import (
#     read_indexer_covariates,
#     read_indexer_entities,
#     read_indexer_relationships,
#     read_indexer_reports,
#     read_indexer_text_units,
# )
# from graphrag.query.input.loaders.dfs import (
#     store_entity_semantic_embeddings,
# )
# from graphrag.query.llm.oai.chat_openai import ChatOpenAI
# from graphrag.query.llm.oai.embedding import OpenAIEmbedding
# from graphrag.query.llm.oai.typing import OpenaiApiType
# from graphrag.query.question_gen.local_gen import LocalQuestionGen
# from graphrag.query.structured_search.local_search.mixed_context import (
#     LocalSearchMixedContext,
# )
# from graphrag.query.structured_search.local_search.search import LocalSearch
# from graphrag.vector_stores.lancedb import LanceDBVectorStore
# from neo4j import GraphDatabase
# import pandas as pd
# import time
#
#
# class Rag:
#
#     @classmethod
#     async def local(cls, query):
#         INPUT_DIR = "D:\\python\\GraphRag\\open\\output"  # todo 一个放一个单独的用户出错的答题记录，保存到input里面
#         LANCEDB_URI = f"{INPUT_DIR}/lancedb"  # todo 然后我们可以为用户保存住那些已经加入的知识点的集合（使用neo4j图来表示）
#         # todo 以及为他展示一个未加入的知识点的集合
#
#         COMMUNITY_REPORT_TABLE = "create_final_community_reports"
#         ENTITY_TABLE = "create_final_nodes"
#         ENTITY_EMBEDDING_TABLE = "create_final_entities"
#         RELATIONSHIP_TABLE = "create_final_relationships"
#         TEXT_UNIT_TABLE = "create_final_text_units"
#         COMMUNITY_LEVEL = 2
#
#         entity_df = pd.read_parquet(f"{INPUT_DIR}/{ENTITY_TABLE}.parquet")
#         entity_embedding_df = pd.read_parquet(f"{INPUT_DIR}/{ENTITY_EMBEDDING_TABLE}.parquet")
#
#         entities = read_indexer_entities(entity_df, entity_embedding_df, COMMUNITY_LEVEL)
#
#         description_embedding_store = LanceDBVectorStore(
#             collection_name="default-entity-description",
#         )
#         description_embedding_store.connect(db_uri=LANCEDB_URI)
#         entity_description_embeddings = store_entity_semantic_embeddings(
#             entities=entities, vectorstore=description_embedding_store
#         )
#
#         relationship_df = pd.read_parquet(f"{INPUT_DIR}/{RELATIONSHIP_TABLE}.parquet")
#         relationships = read_indexer_relationships(relationship_df)
#
#         report_df = pd.read_parquet(f"{INPUT_DIR}/{COMMUNITY_REPORT_TABLE}.parquet")
#         reports = read_indexer_reports(report_df, entity_df, COMMUNITY_LEVEL)
#
#         text_unit_df = pd.read_parquet(f"{INPUT_DIR}/{TEXT_UNIT_TABLE}.parquet")
#         text_units = read_indexer_text_units(text_unit_df)
#
#         llm = ChatOpenAI(
#             api_key="ollama",
#             model="deepseek-R1",
#             api_base="http://localhost:11434/v1",  # "https://ai.devtool.tech/proxy/v1",
#             api_type=OpenaiApiType.OpenAI,
#             max_retries=20,
#         )
#
#         token_encoder = tiktoken.get_encoding("cl100k_base")
#
#         text_embedder = OpenAIEmbedding(
#             api_key="ollama",
#             api_base="http://localhost:11434/api",
#             api_type=OpenaiApiType.OpenAI,
#             model="nomic-embed-text",
#             max_retries=20,
#         )
#
#         context_builder = LocalSearchMixedContext(
#             community_reports=reports,
#             text_units=text_units,
#             entities=entities,
#             relationships=relationships,
#             covariates=None,
#             entity_text_embeddings=description_embedding_store,
#             embedding_vectorstore_key=EntityVectorStoreKey.ID,
#             text_embedder=text_embedder,
#             token_encoder=token_encoder,
#         )
#
#         local_context_params = {
#             "text_unit_prop": 0.5,
#             "community_prop": 0.1,
#             "conversation_history_max_turns": 5,
#             "conversation_history_user_turns_only": True,
#             "top_k_mapped_entities": 10,
#             "top_k_relationships": 10,
#             "include_entity_rank": True,
#             "include_relationship_weight": True,
#             "include_community_rank": True,
#             "return_candidate_context": True,
#             "embedding_vectorstore_key": EntityVectorStoreKey.ID,
#             "max_tokens": 12_000,
#         }
#
#         llm_params = {
#             "max_tokens": 2_000,
#             "temperature": 0.0,
#         }
#
#         search_engine = LocalSearch(
#             llm=llm,
#             context_builder=context_builder,
#             token_encoder=token_encoder,
#             llm_params=llm_params,
#             context_builder_params=local_context_params,
#             response_type="multiple paragraphs",
#         )
#
#         result = await search_engine.asearch(query)
#         return result.response
#
#     @classmethod
#     def generate_graph(cls):
#         GRAPHRAG_FOLDER = "D:\\python\\GraphRag\\open\\output"  # 你们文件夹地址
#
#         NEO4J_USER = "neo4j"  # 你们的用户名
#         NEO4J_PASSWORD = "13579Fsz"  # 你们的密码
#         NEO4J_URL = "bolt://localhost:7687"  # 就填这个就行
#         NEO4J_DATABASE = "neo4j"  # 你们的数据库,没动过就是这个
#         driver = GraphDatabase.driver(NEO4J_URL, auth=(NEO4J_USER, NEO4J_PASSWORD))
#
#         def batched_import(statement, df, batch_size=100):
#             total = len(df)
#             start_s = time.time()
#             for start in range(0, total, batch_size):
#                 batch = df.iloc[start:min(start + batch_size, total)]
#                 result = driver.execute_query("UNWIND $rows AS value" + statement,
#                                               rows=batch.to_dict('records'),
#                                               database=NEO4J_DATABASE)
#                 print(result.summary.counters)
#             print(f'{total} rows in {time.time() - start_s}s.')
#             return total
#
#         statements = """
#         create constraint chunk_id if not exists for (c:__Chunk__) require c.id is unique;
#         create constraint document_id if not exists for (d:__Document__) require d.id is unique;
#         create constraint entity_id if not exists for (c:__Community__) require c.community is unique;
#         create constraint entity_id if not exists for (e:__Entity__) require e.id is unique;
#         create constraint entity_title if not exists for (e:__Entity__) require e.name is unique;
#         create constraint entity_title if not exists for (e:__Covariate__) require e.title is unique;
#         create constraint related_id if not exists for ()-[rel:RELATED]->() require rel.id is unique;
#         """.split(";")
#         for statement in statements:
#             if len((statement or "").strip()) > 0:
#                 print(statement)
#                 driver.execute_query(statement)
#         # todo 导入文件
#         doc_df = pd.read_parquet(f"{GRAPHRAG_FOLDER}/create_final_documents.parquet")
#         # import documents
#         statement = """
#         MERGE (d:__Document__ {id:value.id})
#         SET d += value {.title}
#         """
#         batched_import(statement, doc_df)
#         # todo 导入文本
#         text_df = pd.read_parquet(f'{GRAPHRAG_FOLDER}/create_final_text_units.parquet',
#                                   columns=["id", "text", "n_tokens", "document_ids"])
#         statement = """
#         MERGE (c:__Chunk__ {id:value.id})
#         SET c += value {.text, .n_tokens}
#         WITH c, value
#         UNWIND value.document_ids AS document
#         MATCH (d:__Document__ {id:document})
#         MERGE (c)-[:PART_OF]->(d)
#         """
#         batched_import(statement, text_df)
#
#         # todo 导入实体
#         entity_df = pd.read_parquet(f'{GRAPHRAG_FOLDER}/create_final_entities.parquet',
#                                     columns=["title", "type", "description", "human_readable_id", "id", "text_unit_ids"]
#                                     )
#         entity_statement = """
#         MERGE (e:__Entity__ {id:value.id})
#         SET e += value {.human_readable_id, .description, name:replace(value.title,'"','')}
#         WITH e, value
#         CALL apoc.create.addLabels(e, case when coalesce(value.type,"") = "" then [] else [apoc.text.upperCamelCase(replace(value.type,'"',''))] end) yield node
#         UNWIND value.text_unit_ids AS text_unit
#         MATCH (c:__Chunk__ {id:text_unit})
#         MERGE (c)-[:HAS_ENTITY]->(e)
#         """
#         # CALL db.create.setNodeVectorProperty(e, "description", value.description)
#         batched_import(entity_statement, entity_df)
#
#         # todo 导入关系
#         rel_df = pd.read_parquet(f'{GRAPHRAG_FOLDER}/create_final_relationships.parquet',
#                                  columns=["source", "target", "id", "weight", "human_readable_id", "description",
#                                           "text_unit_ids"]
#                                  )
#         rel_statement = """
#             MATCH (source:__Entity__ {name:replace(value.source,'"','')})
#             MATCH (target:__Entity__ {name:replace(value.target,'"','')})
#             // not necessary to merge on id as there is only one relationship per pair
#             MERGE (source)-[rel:RELATED {id: value.id}]->(target)
#             SET rel += value { .weight, .human_readable_id, .description, .text_unit_ids}
#             RETURN count(*) as createdRels
#         """
#
#         batched_import(rel_statement, rel_df)
#
#         # todo 导入社区
#         community_df = pd.read_parquet(f'{GRAPHRAG_FOLDER}/create_final_communities.parquet',
#                                        columns=["id", "level", "title", "text_unit_ids", "relationship_ids"]
#                                        )
#         statement = """
#         MERGE (c:__Community__ {community:value.id})
#         SET c += value {.level, .title}
#         /*
#         UNWIND value.text_unit_ids as text_unit_id
#         MATCH (t:__Chunk__ {id:text_unit_id})
#         MERGE (c)-[:HAS_CHUNK]->(t)
#         WITH distinct c, value
#         */
#         WITH *
#         UNWIND value.relationship_ids as rel_id
#         MATCH (start:__Entity__)-[:RELATED {id:rel_id}]->(end:__Entity__)
#         MERGE (start)-[:IN_COMMUNITY]->(c)
#         MERGE (end)-[:IN_COMMUNITY]->(c)
#         RETURN count(distinct c) as createdCommunities
#         """
#         batched_import(statement, community_df)
#         community_report_df = pd.read_parquet(f'{GRAPHRAG_FOLDER}/create_final_community_reports.parquet',
#                                               columns=["id", "community", "level", "title", "summary", "findings",
#                                                        "rank", "rank_explanation", "full_content"]
#                                               )
#         # import communities
#         community_statement = """
#         MERGE (c:__Community__ {community:value.community})
#         SET c += value {.level, .title, .rank, .rank_explanation, .full_content, .summary}
#         WITH c, value
#         UNWIND range(0, size(value.findings)-1) AS finding_idx
#         WITH c, value, finding_idx, value.findings[finding_idx] as finding
#         MERGE (c)-[:HAS_FINDING]->(f:Finding {id:finding_idx})
#         SET f += finding
#         """
#         batched_import(community_statement, community_report_df)
