import asyncio
from pathlib import Path

from pydantic import validate_call
from typing import Optional, Any
import pandas as pd
from loguru import logger

from graphrag.index.cli import index_cli
from graphrag.index.emit import TableEmitterType
from graphrag.index.progress import ReporterType
from graphrag.index.progress import PrintProgressReporter
from graphrag.config import load_config, resolve_paths, GraphRagConfig
from graphrag.query.indexer_adapters import (
    read_indexer_covariates,
    read_indexer_entities,
    read_indexer_relationships,
    read_indexer_reports,
    read_indexer_text_units,
)
from graphrag.query.factories import get_global_search_engine, get_local_search_engine
from graphrag.query.cli import _resolve_parquet_files
from graphrag.query.api import _reformat_context_data, _get_embedding_description_store
from graphrag.query.structured_search.base import SearchResult
from graphrag.vector_stores.typing import VectorStoreFactory, VectorStoreType


def graphrag_init(root_path: str) -> None:
    index_cli(
        root_dir=root_path,
        verbose=False,
        resume="",
        update_index_id=None,
        memprofile=False,
        nocache=False,
        reporter=ReporterType.RICH,
        config_filepath=None,
        emit=[TableEmitterType.Parquet],
        dryrun=False,
        init=True,
        skip_validations=False,
        output_dir=None,
    )


def graphrag_index(root_path: str, resume_path: str = "") -> None:
    index_cli(
        root_dir=root_path,
        verbose=False,
        resume=resume_path,
        update_index_id=None,
        memprofile=False,
        nocache=False,
        reporter=ReporterType.RICH,
        config_filepath=None,
        emit=[TableEmitterType.Parquet],
        dryrun=False,
        init=False,
        skip_validations=False,
        output_dir=None,
    )


class GlobalSearchWrapper:

    def __init__(
        self,
        root_path: str,
        config_path: Optional[str] = None,
        data_path: Optional[str] = None,
        reporting_path: Optional[str] = None,
        community_level: int = 2,
        response_type: str = "Multiple Paragraphs",
    ) -> None:
        root = Path(root_path).resolve()
        self.config = load_config(root, config_path)
        self.config.storage.base_dir = data_path or self.config.storage.base_dir
        self.config.reporting.base_dir = (
            reporting_path or self.config.reporting.base_dir
        )
        resolve_paths(self.config)
        self.dataframe_dict = _resolve_parquet_files(
            root_dir=root_path,
            config=self.config,
            parquet_list=[
                "create_final_nodes.parquet",
                "create_final_entities.parquet",
                "create_final_community_reports.parquet",
            ],
            optional_list=[],
        )
        self.final_nodes: pd.DataFrame = self.dataframe_dict["create_final_nodes"]
        self.final_entities: pd.DataFrame = self.dataframe_dict["create_final_entities"]
        self.final_community_reports: pd.DataFrame = self.dataframe_dict[
            "create_final_community_reports"
        ]
        self.community_level = community_level
        self.response_type = response_type
        self.search_engine = self.get_engine(
            config=self.config,
            nodes=self.final_nodes,
            entities=self.final_entities,
            community_reports=self.final_community_reports,
            community_level=self.community_level,
            response_type=self.response_type,
        )

    def search(
        self,
        query: str,
        community_level: int = 2,
        response_type: str = "Multiple Paragraphs",
    ):
        response, context_data = asyncio.run(
            self.global_search(
                config=self.config,
                nodes=self.final_nodes,
                entities=self.final_entities,
                community_reports=self.final_community_reports,
                community_level=community_level,
                response_type=response_type,
                query=query,
            )
        )
        return response, context_data

    @validate_call(config={"arbitrary_types_allowed": True})
    async def global_search(
        self,
        config: GraphRagConfig,
        nodes: pd.DataFrame,
        entities: pd.DataFrame,
        community_reports: pd.DataFrame,
        community_level: int,
        response_type: str,
        query: str,
    ) -> tuple[
        str | dict[str, Any] | list[dict[str, Any]],
        str | list[pd.DataFrame] | dict[str, pd.DataFrame],
    ]:
        # 查询的层数与初始化的不同时才需要重新计算
        if (
            self.community_level != community_level
            or self.response_type != response_type
        ):
            search_engine = self.get_engine(
                config=config,
                nodes=nodes,
                entities=entities,
                community_reports=community_reports,
                community_level=community_level,
                response_type=response_type,
            )
        else:
            search_engine = self.search_engine
        result: SearchResult = await search_engine.asearch(query=query)
        logger.info(
            f"********** GraphRAG 全局搜索问题【{query}】, 耗时 {result.completion_time:.4f}s"
        )
        response = result.response
        context_data = _reformat_context_data(result.context_data)
        return response, context_data

    def get_engine(
        self,
        config: GraphRagConfig,
        nodes: pd.DataFrame,
        entities: pd.DataFrame,
        community_reports: pd.DataFrame,
        community_level: int,
        response_type: str,
    ):
        reports = read_indexer_reports(community_reports, nodes, community_level)
        _entities = read_indexer_entities(nodes, entities, community_level)
        return get_global_search_engine(
            config,
            reports=reports,
            entities=_entities,
            response_type=response_type,
        )


class LocalSearchWrapper:

    def __init__(
        self,
        root_path: str,
        config_path: Optional[str] = None,
        data_path: Optional[str] = None,
        reporting_path: Optional[str] = None,
        community_level: int = 2,
        response_type: str = "Multiple Paragraphs",
    ) -> None:
        self.root = Path(root_path).resolve()
        self.config = load_config(self.root, config_path)

        self.config.storage.base_dir = data_path or self.config.storage.base_dir
        self.config.reporting.base_dir = (
            reporting_path or self.config.reporting.base_dir
        )

        resolve_paths(self.config)

        self.dataframe_dict = _resolve_parquet_files(
            root_dir=root_path,
            config=self.config,
            parquet_list=[
                "create_final_nodes.parquet",
                "create_final_community_reports.parquet",
                "create_final_text_units.parquet",
                "create_final_relationships.parquet",
                "create_final_entities.parquet",
            ],
            optional_list=["create_final_covariates.parquet"],
        )
        self.final_nodes: pd.DataFrame = self.dataframe_dict["create_final_nodes"]
        self.final_community_reports: pd.DataFrame = self.dataframe_dict[
            "create_final_community_reports"
        ]
        self.final_text_units: pd.DataFrame = self.dataframe_dict[
            "create_final_text_units"
        ]
        self.final_relationships: pd.DataFrame = self.dataframe_dict[
            "create_final_relationships"
        ]
        self.final_entities: pd.DataFrame = self.dataframe_dict["create_final_entities"]
        self.final_covariates: pd.DataFrame | None = self.dataframe_dict[
            "create_final_covariates"
        ]
        self.reporter = PrintProgressReporter("")
        self.community_level = community_level
        self.response_type = response_type
        self.search_engine = self.get_engine(
            config=self.config,
            nodes=self.final_nodes,
            entities=self.final_entities,
            community_reports=self.final_community_reports,
            text_units=self.final_text_units,
            relationships=self.final_relationships,
            covariates=self.final_covariates,
            community_level=self.community_level,
            response_type=self.response_type,
        )

    def search(
        self,
        query: str,
        community_level: int = 2,
        response_type: str = "Multiple Paragraphs",
    ):
        response, context_data = asyncio.run(
            self.local_search(
                config=self.config,
                nodes=self.final_nodes,
                entities=self.final_entities,
                community_reports=self.final_community_reports,
                text_units=self.final_text_units,
                relationships=self.final_relationships,
                covariates=self.final_covariates,
                community_level=community_level,
                response_type=response_type,
                query=query,
            )
        )
        return response, context_data

    @validate_call(config={"arbitrary_types_allowed": True})
    async def local_search(
        self,
        config: GraphRagConfig,
        nodes: pd.DataFrame,
        entities: pd.DataFrame,
        community_reports: pd.DataFrame,
        text_units: pd.DataFrame,
        relationships: pd.DataFrame,
        covariates: pd.DataFrame | None,
        community_level: int,
        response_type: str,
        query: str,
    ) -> tuple[
        str | dict[str, Any] | list[dict[str, Any]],
        str | list[pd.DataFrame] | dict[str, pd.DataFrame],
    ]:
        # 查询的层数与初始化的不同时才需要重新计算
        if (
            self.community_level != community_level
            or self.response_type != response_type
        ):
            search_engine = self.get_engine(
                config=config,
                nodes=nodes,
                entities=entities,
                community_reports=community_reports,
                text_units=text_units,
                relationships=relationships,
                covariates=covariates,
                community_level=community_level,
                response_type=response_type,
            )
        else:
            search_engine = self.search_engine
        result: SearchResult = await search_engine.asearch(query=query)
        logger.info(
            f"********** GraphRAG 本地搜索问题【{query}】, 耗时 {result.completion_time:.4f}s"
        )
        response = result.response
        context_data = _reformat_context_data(result.context_data)
        return response, context_data

    def get_engine(
        self,
        config: GraphRagConfig,
        nodes: pd.DataFrame,
        entities: pd.DataFrame,
        community_reports: pd.DataFrame,
        text_units: pd.DataFrame,
        relationships: pd.DataFrame,
        covariates: pd.DataFrame | None,
        community_level: int,
        response_type: str,
    ):
        vector_store_args = (
            config.embeddings.vector_store if config.embeddings.vector_store else {}
        )
        self.reporter.info(f"Vector Store Args: {vector_store_args}")
        vector_store_type = vector_store_args.get("type", VectorStoreType.LanceDB)

        _entities = read_indexer_entities(nodes, entities, community_level)

        lancedb_dir = Path(config.storage.base_dir) / "lancedb"

        vector_store_args.update({"db_uri": str(lancedb_dir)})
        description_embedding_store = _get_embedding_description_store(
            entities=_entities,
            vector_store_type=vector_store_type,
            config_args=vector_store_args,
        )

        _covariates = (
            read_indexer_covariates(covariates) if covariates is not None else []
        )

        search_engine = get_local_search_engine(
            config=config,
            reports=read_indexer_reports(community_reports, nodes, community_level),
            text_units=read_indexer_text_units(text_units),
            entities=_entities,
            relationships=read_indexer_relationships(relationships),
            covariates={"claims": _covariates},
            description_embedding_store=description_embedding_store,
            response_type=response_type,
        )

        return search_engine


def test_global_search():
    searcher = GlobalSearchWrapper(
        root_path="./datas/doupotest",
        data_path="output/doupo/artifacts",
        reporting_path="output/doupo/reporting",
    )
    logger.info("ready, go......")
    answer = searcher.search("萧炎与萧战的关系")
    logger.debug("=" * 100)
    logger.info(answer[0])
    logger.debug("-" * 100)
    logger.info(answer[1])


def test_local_search():
    searcher = LocalSearchWrapper(
        root_path="./datas/doupotest",
        data_path="output/doupo/artifacts",
        reporting_path="output/doupo/reporting",
    )
    logger.info("ready, go......")
    answer = searcher.search("萧炎与萧战的关系")
    logger.debug("=" * 100)
    logger.info(answer[0])
    logger.debug("-" * 100)
    logger.info(answer[1])

if __name__ == "__main__":
    # graphrag_init("./datas/papers-graphrag")
    graphrag_index("./datas/papers-graphrag", resume_path="papers")
    # test_global_search()
    # test_local_search()
