import os,json
import asyncio
import pandas as pd
import tiktoken
from openai import OpenAI

from graphrag.query.context_builder.entity_extraction import EntityVectorStoreKey
from graphrag.query.indexer_adapters import (
    read_indexer_covariates,
    read_indexer_entities,
    read_indexer_relationships,
    read_indexer_reports,
    read_indexer_text_units,
)
from graphrag.query.llm.oai.chat_openai import ChatOpenAI
from graphrag.query.llm.oai.embedding import OpenAIEmbedding
from graphrag.query.llm.oai.typing import OpenaiApiType
from graphrag.query.question_gen.local_gen import LocalQuestionGen
from graphrag.query.structured_search.local_search.mixed_context import (
    LocalSearchMixedContext,
)
from graphrag.query.structured_search.local_search.search import LocalSearch
from graphrag.vector_stores.lancedb import LanceDBVectorStore

# 输入目录
INPUT_DIR = "./myrag/output"
# LanceDB数据库URI
LANCEDB_URI = f"{INPUT_DIR}/lancedb"

# 社区报告表名
COMMUNITY_REPORT_TABLE = "create_final_community_reports"
# 实体表名
ENTITY_TABLE = "create_final_nodes"
# 实体嵌入表名
ENTITY_EMBEDDING_TABLE = "create_final_entities"
# 关系表名
RELATIONSHIP_TABLE = "create_final_relationships"
# 文本单元表名
TEXT_UNIT_TABLE = "create_final_text_units"
# 社区级别
COMMUNITY_LEVEL = 2

# 读取实体表，获取社区和度数据
entity_df = pd.read_parquet(f"{INPUT_DIR}/{ENTITY_TABLE}.parquet")
entity_embedding_df = pd.read_parquet(f"{INPUT_DIR}/{ENTITY_EMBEDDING_TABLE}.parquet")

# 读取实体数据
entities = read_indexer_entities(entity_df, entity_embedding_df, COMMUNITY_LEVEL)

# 加载描述嵌入到内存中的LanceDB向量存储中
# 要连接到远程数据库，请指定url和port值。
description_embedding_store = LanceDBVectorStore(
    collection_name="default-entity-description",
)
description_embedding_store.connect(db_uri=LANCEDB_URI)

# print(f"实体数量: {len(entity_df)}")
# print(entity_df.head())

############
# 读取关系表
relationship_df = pd.read_parquet(f"{INPUT_DIR}/{RELATIONSHIP_TABLE}.parquet")
# 读取关系数据
relationships = read_indexer_relationships(relationship_df)

# print(f"关系数量: {len(relationship_df)}")
# print(relationship_df.head())

################

# 读取社区报告表
report_df = pd.read_parquet(f"{INPUT_DIR}/{COMMUNITY_REPORT_TABLE}.parquet")
# 读取社区报告数据
reports = read_indexer_reports(report_df, entity_df, COMMUNITY_LEVEL)

# print(f"报告记录: {len(report_df)}")
# print(report_df.head())

###################

# 读取文本单元表
text_unit_df = pd.read_parquet(f"{INPUT_DIR}/{TEXT_UNIT_TABLE}.parquet")
# 读取文本单元数据
text_units = read_indexer_text_units(text_unit_df)

sd_api_key = os.getenv("DEEPSEEK_API_KEY")
llm_model = "deepseek-chat"
llm_api_base = "https://api.deepseek.com"

glm_api_key = os.getenv("GLM_API_KEY")
embedding_model = "embedding-3"
embedding_model_api_base = "https://open.bigmodel.cn/api/paas/v4"

llm = ChatOpenAI(
    api_key=sd_api_key,
    model=llm_model,
    api_base=llm_api_base,
    api_type=OpenaiApiType.OpenAI,  
    max_retries=20,
)

token_encoder = tiktoken.get_encoding("cl100k_base")

text_embedder = OpenAIEmbedding(
    api_key=glm_api_key,
    api_base=embedding_model_api_base,
    api_type=OpenaiApiType.OpenAI,
    model=embedding_model,
    deployment_name=embedding_model,
    max_retries=20,
)

context_builder = LocalSearchMixedContext(
    community_reports=reports,
    text_units=text_units,
    entities=entities,
    relationships=relationships,
    covariates=None,
    entity_text_embeddings=description_embedding_store,
    embedding_vectorstore_key=EntityVectorStoreKey.ID,  
    text_embedder=text_embedder,
    token_encoder=token_encoder,
)

local_context_params = {
    "text_unit_prop": 0.5,
    "community_prop": 0.1,
    "conversation_history_max_turns": 5,
    "conversation_history_user_turns_only": True,
    "top_k_mapped_entities": 10,
    "top_k_relationships": 10,
    "include_entity_rank": True,
    "include_relationship_weight": True,
    "include_community_rank": True,
    "return_candidate_context": True,
    "embedding_vectorstore_key": EntityVectorStoreKey.ID,  
    "max_tokens": 12_000, 
}

llm_params = {
    "max_tokens": 2_000, 
    "temperature": 0.0,
}

search_engine = LocalSearch(
    llm=llm,
    context_builder=context_builder,
    token_encoder=token_encoder,
    llm_params=llm_params,
    context_builder_params=local_context_params,
    response_type="multiple paragraphs", 
)




client = OpenAI(api_key=sd_api_key, base_url=llm_api_base)


prompt = "请帮我介绍下大连高新区有几个分园？"
result = search_engine.search(prompt)
# print(result)

prompt1 = f"请根据检索结果回答用户问题。\n用户问题：{prompt} \n检索结果：{json.dumps(result)}"
            
messages = [
    {"role": "user", "content": prompt1},
]


response = client.chat.completions.create(
    model="deepseek-chat",
    messages=messages,
    stream=False,
)

print(response.choices[0].message.content)