# -*- encoding: utf-8 -*-
"""
@author: acedar  
@time: 2025/4/4 10:52
@file: graphrag_server_v1.py 
"""
#!/usr/bin/env python3
# coding=utf-8

import asyncio
import os.path
from collections.abc import AsyncGenerator
from pathlib import Path
from typing import Optional, Any

import pandas as pd

from graphrag.api import query as api
from graphrag.config.load_config import load_config
from graphrag.config.models.graph_rag_config import GraphRagConfig
from graphrag.config.resolve_path import resolve_paths
from graphrag.index.create_pipeline_config import create_pipeline_config
from graphrag.storage.factory import StorageFactory
from graphrag.utils.storage import load_table_from_storage
from pathlib import Path


ENTITY_NODES_TABLE = 'create_final_nodes'
ENTITY_EMBEDDING_TABLE = 'create_final_entities'
COMMUNITIES_TABLE = 'create_final_communities'
COMMUNITY_REPORT_TABLE = 'create_final_community_reports'
TEXT_UNIT_TABLE = 'create_final_text_units'
RELATIONSHIP_TABLE = 'create_final_relationships'
COVARIATE_TABLE = 'create_final_covariates'

local_search_parquet_list = [
    ENTITY_NODES_TABLE,
    ENTITY_EMBEDDING_TABLE,
    COMMUNITY_REPORT_TABLE,
    TEXT_UNIT_TABLE,
    RELATIONSHIP_TABLE
]
optional_parquet_list = [f'{COVARIATE_TABLE}.parquet']


async def resolve_parquet_files(
        config: GraphRagConfig,
        parquet_list: list[str],
        optional_list: list[str] | None = None,) -> dict[str, pd.DataFrame]:
    """Read parquet files to a dataframe dict."""
    dataframe_dict = {}
    pipeline_config = create_pipeline_config(config)
    storage_config = pipeline_config.storage.model_dump()  # type: ignore
    storage_obj = StorageFactory().create_storage(
        storage_type=storage_config["type"], kwargs=storage_config
    )
    for parquet_file in parquet_list:
        print("parquet_file:", os.path.abspath(parquet_file))
        df_key = parquet_file.split('.')[0]
        df_value = await load_table_from_storage(name=parquet_file, storage=storage_obj)
        dataframe_dict[df_key] = df_value

    # for optional parquet files, set the dict entry to None instead of erroring out if it does not exist
    if optional_list:
        for optional_file in optional_list:
            file_exists = await storage_obj.has(optional_file)
            df_key = optional_file.split('.')[0]
            if file_exists:
                df_value = await load_table_from_storage(name=optional_file, storage=storage_obj)
                dataframe_dict[df_key] = df_value
            else:
                dataframe_dict[df_key] = None

    return dataframe_dict

# *********************************** 以上为graphrag定义 ***********************************

from mcp.server.fastmcp import FastMCP
mcp = FastMCP("graphrag_mcp")
USER_AGENT = "graphrag_mcp-app/1.0"

@mcp.tool()
async def graphrag_query(query: str) -> str:
    """
    为斗破苍穹小说提供相关的知识补充[graphrag]
    :param query:
    :return:
    """
    # 进行local搜索
    root_dir = './doupocangqiong/'
    config_path = None
    community_level = 2
    response_type = 'Multiple Paragraphs'

    root = Path(root_dir).resolve()
    config = load_config(root, config_path)
    resolve_paths(config)

    dataframe_dict = await resolve_parquet_files(
        config=config,
        parquet_list=local_search_parquet_list,
        optional_list=optional_parquet_list
    )
    final_nodes: pd.DataFrame = dataframe_dict[ENTITY_NODES_TABLE]
    final_entities: pd.DataFrame = dataframe_dict[ENTITY_EMBEDDING_TABLE]
    final_community_reports: pd.DataFrame = dataframe_dict[COMMUNITY_REPORT_TABLE]
    final_text_units: pd.DataFrame = dataframe_dict[TEXT_UNIT_TABLE]
    final_relationships: pd.DataFrame = dataframe_dict[RELATIONSHIP_TABLE]
    final_covariates: pd.DataFrame | None = dataframe_dict[COVARIATE_TABLE]

    response, context_data = await api.local_search(
        config=config,
        nodes=final_nodes,
        entities=final_entities,
        community_reports=final_community_reports,
        text_units=final_text_units,
        relationships=final_relationships,
        covariates=final_covariates,
        community_level=community_level,
        response_type=response_type,
        query=query
    )
    return response


async def local_search_demo():
    query = '萧炎的女性朋友有哪些？'
    response = await rag_query(query)
    print("response", type(response), response)

if __name__ == "__main__":
    # 以标准 I/O 方式运行
    # asyncio.run(local_search_demo())

    mcp.run(transport='stdio')
