import asyncio
import os.path
import traceback
from pathlib import Path

import pandas as pd
import tiktoken
from graphrag.query.context_builder.entity_extraction import EntityVectorStoreKey
from graphrag.query.indexer_adapters import (
    read_indexer_entities,
    read_indexer_relationships,
    read_indexer_reports,
    read_indexer_text_units, read_indexer_communities,
)
from graphrag.query.llm.oai.chat_openai import ChatOpenAI
from graphrag.query.llm.oai.embedding import OpenAIEmbedding
from graphrag.query.llm.oai.typing import OpenaiApiType
from graphrag.query.structured_search.global_search.community_context import GlobalCommunityContext
from graphrag.query.structured_search.global_search.search import GlobalSearch
from graphrag.query.structured_search.local_search.mixed_context import (
    LocalSearchMixedContext,
)
from graphrag.query.structured_search.local_search.search import LocalSearch
from graphrag.vector_stores.lancedb import LanceDBVectorStore
from typing import Any
from mcp.server.fastmcp import FastMCP
import graphrag.api as api
from graphrag.config.load_config import load_config
from graphrag.index.typing import PipelineRunResult

mcp = FastMCP("rag_ML")
USER_AGENT = 'rag_ML-app/1.0'

#得到当前文件地址
file_path = os.path.abspath(__path__).replace("\\","/")

INPUT_DIR = f"{file_path}/openl1/output"
#INPUT_DIR = "openl2/output"
LANCEDB_URI = f"{INPUT_DIR}/lancedb"

COMMUNITY_REPORT_TABLE = "create_final_community_reports"
ENTITY_TABLE = "create_final_nodes"
ENTITY_EMBEDDING_TABLE = "create_final_entities"
RELATIONSHIP_TABLE = "create_final_relationships"
TEXT_UNIT_TABLE = "create_final_text_units"
COMMUNITY_LEVEL = 2

COMMUNITY_TABLE = "create_final_communities"

@mcp.tool()
async def localSearchStatus(query:str):
    """
        检查本地搜索服务状态是否正确
    """
    #得到当前路径
    current_path = os.path.abspath(".")
    input_path = os.path.abspath(INPUT_DIR)
    entity_table_exists = os.path.exists(f"{input_path}/{ENTITY_TABLE}.parquet")

    #当前时间  yyyy-mm-dd hh:mm:ss
    current_time = pd.Timestamp.now().strftime("%Y-%m-%d %H:%M:%S")
    result = f"{current_time}：当前路径：{current_path}\n，Input路径：{input_path}\n，实体文件是否存在：{entity_table_exists}\n\n"

    #写入本地日志文件
    # with open("d:/graphrag_server_log.log", "a") as f:
    #     f.write(result)
    return result

@mcp.tool()
async def localSearch(query:str):

    """
     基于本地搜索查询D3,C4.5决策树的相关知识
     :param query: 用户提出的具体问题""
     :return: 返回查询结果
    """
    current_time = pd.Timestamp.now().strftime("%Y-%m-%d %H:%M:%S")
    with open("d:/graphrag_server_log.log", "a") as f:
        f.write(f"{current_time}：call local_search\n")

    try:
        ####################################################################################
        # read nodes table to get community and degree data
        entity_df = pd.read_parquet(f"{INPUT_DIR}/{ENTITY_TABLE}.parquet")
        entity_embedding_df = pd.read_parquet(f"{INPUT_DIR}/{ENTITY_EMBEDDING_TABLE}.parquet")

        entities = read_indexer_entities(entity_df, entity_embedding_df, COMMUNITY_LEVEL)

        # load description embeddings to an in-memory lancedb vectorstore
        # to connect to a remote db, specify url and port values.
        description_embedding_store = LanceDBVectorStore(
            collection_name="default-entity-description",
        )
        description_embedding_store.connect(db_uri=LANCEDB_URI)

        print(f"Entity count: {len(entity_df)}")
        print(entity_df.head())

        ####################################################################################
        relationship_df = pd.read_parquet(f"{INPUT_DIR}/{RELATIONSHIP_TABLE}.parquet")
        relationships = read_indexer_relationships(relationship_df)

        print(f"Relationship count: {len(relationship_df)}")
        print(relationship_df.head())

        ####################################################################################
        report_df = pd.read_parquet(f"{INPUT_DIR}/{COMMUNITY_REPORT_TABLE}.parquet")
        reports = read_indexer_reports(report_df, entity_df, COMMUNITY_LEVEL)

        print(f"Report records: {len(report_df)}")
        print(report_df.head())

        ####################################################################################

        text_unit_df = pd.read_parquet(f"{INPUT_DIR}/{TEXT_UNIT_TABLE}.parquet")
        text_units = read_indexer_text_units(text_unit_df)

        print(f"Text unit records: {len(text_unit_df)}")
        print(text_unit_df.head())

        ####################################################################################
        llm_api_key = "sk-36f1e4fd00844220a6cebde72bb87afd"
        embeding_api_key = "0d28f030249b4fe38dc501510748b595.9SGm9tuJBlcgqKBm"

        llm_model = "deepseek-chat"
        embedding_model = "embedding-3"

        llm_api_base = "https://api.deepseek.com"
        embedding_model_api_base = "https://open.bigmodel.cn/api/paas/v4/"


        llm = ChatOpenAI(
            api_key=llm_api_key,
            model=llm_model,
            api_base=llm_api_base,
            api_type=OpenaiApiType.OpenAI,
            max_retries=20,
        )

        token_encoder = tiktoken.get_encoding("cl100k_base")

        text_embedder = OpenAIEmbedding(
            api_key=embeding_api_key,
            api_base=embedding_model_api_base,
            api_type=OpenaiApiType.OpenAI,
            model=embedding_model,
            deployment_name=embedding_model,
            max_retries=20,
        )

        context_builder = LocalSearchMixedContext(
            community_reports=reports,
            text_units=text_units,
            entities=entities,
            relationships=relationships,
            covariates=None,
            entity_text_embeddings=description_embedding_store,
            embedding_vectorstore_key=EntityVectorStoreKey.ID,
            text_embedder=text_embedder,
            token_encoder=token_encoder,
        )

        local_context_params = {
            "text_unit_prop": 0.5,
            "community_prop": 0.1,
            "conversation_history_max_turns": 5,
            "conversation_history_user_turns_only": True,
            "top_k_mapped_entities": 10,
            "top_k_relationships": 10,
            "include_entity_rank": True,
            "include_relationship_weight": True,
            "include_community_rank": True,
            "return_candidate_context": True,
            "embedding_vectorstore_key": EntityVectorStoreKey.ID,
            "max_tokens": 12_000,
        }

        llm_params = {
            "max_tokens": 2_000,
            "temperature": 0.0,
        }

        search_engine = LocalSearch(
            llm=llm,
            context_builder=context_builder,
            token_encoder=token_encoder,
            llm_params=llm_params,
            context_builder_params=local_context_params,
            response_type="multiple paragraphs",
        )

        #同步执行正面的方法
        result =  await search_engine.asearch(query)

        current_time = pd.Timestamp.now().strftime("%Y-%m-%d %H:%M:%S")

        result_utf8 = result.context_text.encode('utf-8')
        with open("d:/graphrag_server_log.log", "a") as f:
            f.write(f"{current_time}：call local_search_end {result_utf8}\n")

        return result
    except Exception as e:
        current_time = pd.Timestamp.now().strftime("%Y-%m-%d %H:%M:%S")

        #获取错误调用堆栈及错误信息
        error_info = traceback.format_exc() + "\n" + str(e)
        with open("d:/graphrag_server_log.log", "a") as f:
            f.write(f"{current_time}：call local_search_error {error_info}\n")
        result = "查询失败"
    finally:
        return result

@mcp.tool()
async def localSearchAPI(query:str):
    """
     基于本地搜索API查询D3,C4.5决策树的相关知识
     :param query: 用户提出的具体问题""
     :return: 返回查询结果
    """
    PROJECT_DIRECTORY = "D:/sources/AI Program/MCPProjects/JiuTian/GraphRAG/graphrag_server/src/openl2"
    graphrag_config = load_config(Path(PROJECT_DIRECTORY))
    ####################################################################################
    # read nodes table to get community and degree data
    entities = pd.read_parquet(f"{INPUT_DIR}/{ENTITY_TABLE}.parquet")
    communities = pd.read_parquet(f"{INPUT_DIR}/{COMMUNITY_TABLE}.parquet")
    community_reports = pd.read_parquet(f"{INPUT_DIR}/{COMMUNITY_REPORT_TABLE}.parquet")

    response, context = await api.local_search(
        config = graphrag_config,
        entities = entities,
        communities = communities,
        community_reports = community_reports,
        community_level=2,
        dynamic_community_selection=False,
        query=query,
        response_type = "multiple paragraphs",
    )
    return response
@mcp.tool()
async def globalSearch(query:str):
    """
     基于全局搜索查询查询D3,C4.5决策树的相关知识
     :param query: 用户提出的具体问题""
     :return: 返回查询结果
    """

    token_encoder = tiktoken.get_encoding("cl100k_base")

    community_df = pd.read_parquet(f"{INPUT_DIR}/{COMMUNITY_TABLE}.parquet")
    entity_df = pd.read_parquet(f"{INPUT_DIR}/{ENTITY_TABLE}.parquet")
    report_df = pd.read_parquet(f"{INPUT_DIR}/{COMMUNITY_REPORT_TABLE}.parquet")
    entity_embedding_df = pd.read_parquet(f"{INPUT_DIR}/{ENTITY_EMBEDDING_TABLE}.parquet")

    communities = read_indexer_communities(community_df, entity_df, report_df)
    reports = read_indexer_reports(report_df, entity_df, COMMUNITY_LEVEL)
    entities = read_indexer_entities(entity_df, entity_embedding_df, COMMUNITY_LEVEL)

    context_builder = GlobalCommunityContext(
        community_reports=reports,
        communities=communities,
        entities=entities,
        token_encoder=token_encoder,
    )

    context_builder_params = {
        "use_community_summary": False,
        "shuffle_data": True,
        "include_community_rank": True,
        "min_community_rank": 0,
        "community_rank_name": "rank",
        "include_community_weight": True,
        "community_weight_name": "occurrence weight",
        "normalize_community_weight": True,
        "max_tokens": 12_000,
        "context_name": "Reports",
    }

    map_llm_params = {
        "max_tokens": 1000,
        "temperature": 0.0,
        "response_format": {"type": "json_object"},
    }

    reduce_llm_params = {
        "max_tokens": 2000,
        "temperature": 0.0,
    }

    ####################################################################################
    llm_api_key = "0d28f030249b4fe38dc501510748b595.9SGm9tuJBlcgqKBm"

    llm_model = "glm-4-flash"

    llm_api_base = "https://open.bigmodel.cn/api/paas/v4/"

    llm = ChatOpenAI(
        api_key=llm_api_key,
        model=llm_model,
        api_base=llm_api_base,
        api_type=OpenaiApiType.OpenAI,
        max_retries=20,
    )

    search_engine = GlobalSearch(
        llm=llm,
        context_builder=context_builder,
        token_encoder=token_encoder,
        max_data_tokens=12_000,
        map_llm_params=map_llm_params,
        reduce_llm_params=reduce_llm_params,
        allow_general_knowledge=False,
        json_mode=True,
        context_builder_params=context_builder_params,
        concurrent_coroutines=32,
        response_type="multiple paragraphs",
    )

    result = await search_engine.asearch(query)
    print(result.response)

#######################################################
# print("演示本地搜索")
# asyncio.run(localSearch())

#######################################################
# print("演示全局搜索")
# asyncio.run(globalSearch())

@mcp.tool()
async def index_by_api(dir:str):
    """
     使用API来构建知识库索引
     :param dir: 数据集的目录
    """
    PROJECT_DIRECTORY = f"D:/sources/AI Program/MCPProjects/JiuTian/GraphRAG/graphrag_server/src/{dir}"
    graphrag_config = load_config(Path(PROJECT_DIRECTORY))

    index_result:list[PipelineRunResult] = await api.build_index(config = graphrag_config)

    for workflow_result in index_result:
        status = f"error\n{workflow_result.errors}" if workflow_result.errors else "success"
        print(f"Workflow Name: {workflow_result.workflow}\tStatus: {status}")

if __name__=='__main__':
    mcp.run(transport='stdio')
    #asyncio.run(index_by_api("openl2"))
    # asyncio.run(localSearch("使用要于搜索方法搜索ID3算法的相关知识。"))