import asyncio
import os
import sys
from datetime import datetime
import logging
from pathlib import Path
import pandas as pd
import dotenv
from typing import Optional, cast
from enum import Enum
import tiktoken
from pydantic import Field
from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse, StreamingResponse
from fastapi.requests import Request
from fastapi.middleware.cors import CORSMiddleware

from graphrag.query.llm.base import BaseLLMCallback
from graphrag.query.structured_search.global_search.callbacks import GlobalSearchLLMCallback
from graphrag.query.context_builder.entity_extraction import EntityVectorStoreKey
from graphrag.config.models.global_search_config import GlobalSearchConfig
from graphrag.config.models.local_search_config import LocalSearchConfig
from graphrag.query.structured_search.global_search.search import GlobalSearch
from graphrag.query.structured_search.local_search.search import LocalSearch
from graphrag.query.api import __get_embedding_description_store
from graphrag.query.cli import _configure_paths_and_settings
from graphrag.query.factories import get_global_search_engine, get_local_search_engine, get_text_embedder
from graphrag.query.indexer_adapters import (
    read_indexer_covariates,
    read_indexer_entities,
    read_indexer_relationships,
    read_indexer_reports,
    read_indexer_text_units,
)
from graphrag.vector_stores.typing import VectorStoreType
from graphrag.query.structured_search.global_search.community_context import (
    GlobalCommunityContext,
)
from graphrag.query.structured_search.local_search.mixed_context import (
    LocalSearchMixedContext,
)


dotenv.load_dotenv()

log = logging.getLogger(__name__)


COMMUNITY_LEVEL: int = 2

data_dir, root_dir, config = _configure_paths_and_settings(
    data_dir=os.environ.get("DATA_DIR"),
    root_dir=os.environ.get("ROOT_DIR", "ragtest"),
    config_dir=os.environ.get("CONFIG_DIR", "ragtest/settings.yaml"),
)

data_path = Path(data_dir)
final_nodes: pd.DataFrame = pd.read_parquet(
    data_path / "create_final_nodes.parquet"
)
final_entities: pd.DataFrame = pd.read_parquet(
    data_path / "create_final_entities.parquet"
)
final_community_reports: pd.DataFrame = pd.read_parquet(
    data_path / "create_final_community_reports.parquet"
)
final_text_units: pd.DataFrame = pd.read_parquet(
    data_path / "create_final_text_units.parquet"
)
final_relationships: pd.DataFrame = pd.read_parquet(
    data_path / "create_final_relationships.parquet"
)
final_covariates_path = data_path / "create_final_convariates.parquet"
final_covariates = (
    pd.read_parquet(final_covariates_path)
    if final_covariates_path.exists()
    else None
)

global_search: GlobalSearch
local_search: LocalSearch


class Methods(str, Enum):
    GLOBAL = "global"
    LOCAL = "local"


class SearchRequest(GlobalSearchConfig, LocalSearchConfig):
    data_dir: str = Field(
        description="Path to data directory.",
    )
    method: Methods = Field(
        description="Method to use for search. Can be either 'global' or 'local'. Defaults to 'global",
        default=Methods.LOCAL,
    )
    query: str = Field(
        description="Query to search for",
        default="你好",
    )
    stream: Optional[bool] = Field(
        description="Whether to stream the response or not. Defaults to False",
        default=False,
    )


app = FastAPI()
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


@app.on_event("startup")
async def startup():
    global global_search, local_search
    community_level = COMMUNITY_LEVEL
    response_type = "multiple paragraphs"
    reports = read_indexer_reports(
        final_community_reports, final_nodes, community_level
    )
    _entities = read_indexer_entities(
        final_nodes, final_entities, community_level
    )
    global_search = get_global_search_engine(
        config=config,
        reports=reports,
        entities=_entities,
        response_type=response_type,
    )

    vector_store_args = (
        config.embeddings.vector_store if config.embeddings.vector_store else {}
    )
    vector_store_type = vector_store_args.get("type", VectorStoreType.LanceDB)
    description_embedding_store = __get_embedding_description_store(
        entities=_entities,
        vector_store_type=vector_store_type,
        config_args=vector_store_args,
    )
    _covariates = read_indexer_covariates(
        final_covariates) if final_covariates is not None else []

    local_search = get_local_search_engine(
        config=config,
        reports=read_indexer_reports(
            final_community_reports, final_nodes, community_level
        ),
        text_units=read_indexer_text_units(final_text_units),
        entities=_entities,
        relationships=read_indexer_relationships(final_relationships),
        covariates={"claims": _covariates},
        description_embedding_store=description_embedding_store,
        response_type=response_type,
    )


@app.get("/context")
async def context():
    context_folders = dict()
    root = cast(str, root_dir)
    output = Path(root) / "output"
    if output.exists():
        if output.exists():
            folders = sorted(
                output.iterdir(),
                key=os.path.getmtime,
                reverse=True
            )
            for folder in folders:
                context_folders[folder.name] = str(
                    (folder / "artifacts").absolute())

    return JSONResponse(content=context_folders)


def global_community_context(data_dir: str):
    data_path = Path(data_dir)
    final_nodes: pd.DataFrame = pd.read_parquet(
        data_path / "create_final_nodes.parquet"
    )
    final_entities: pd.DataFrame = pd.read_parquet(
        data_path / "create_final_entities.parquet"
    )
    final_community_reports: pd.DataFrame = pd.read_parquet(
        data_path / "create_final_community_reports.parquet"
    )
    reports = read_indexer_reports(
        final_community_reports, final_nodes, COMMUNITY_LEVEL
    )
    _entities = read_indexer_entities(
        final_nodes, final_entities, COMMUNITY_LEVEL
    )
    token_encoder = tiktoken.get_encoding(config.encoding_model)
    context_builder = GlobalCommunityContext(
        community_reports=reports, entities=_entities, token_encoder=token_encoder
    )
    return context_builder


def local_search_mixed_context(data_dir: str):
    data_path = Path(data_dir)
    final_nodes: pd.DataFrame = pd.read_parquet(
        data_path / "create_final_nodes.parquet"
    )
    final_entities: pd.DataFrame = pd.read_parquet(
        data_path / "create_final_entities.parquet"
    )
    final_community_reports: pd.DataFrame = pd.read_parquet(
        data_path / "create_final_community_reports.parquet"
    )
    final_text_units: pd.DataFrame = pd.read_parquet(
        data_path / "create_final_text_units.parquet"
    )
    final_relationships: pd.DataFrame = pd.read_parquet(
        data_path / "create_final_relationships.parquet"
    )
    final_covariates_path = data_path / "create_final_convariates.parquet"
    final_covariates = (
        pd.read_parquet(final_covariates_path)
        if final_covariates_path.exists()
        else None
    )
    reports = read_indexer_reports(
        final_community_reports, final_nodes, COMMUNITY_LEVEL
    )
    _entities = read_indexer_entities(
        final_nodes, final_entities, COMMUNITY_LEVEL
    )

    vector_store_args = (
        config.embeddings.vector_store if config.embeddings.vector_store else {}
    )
    vector_store_type = vector_store_args.get("type", VectorStoreType.LanceDB)
    description_embedding_store = __get_embedding_description_store(
        entities=_entities,
        vector_store_type=vector_store_type,
        config_args=vector_store_args,
    )
    _covariates = read_indexer_covariates(
        final_covariates) if final_covariates is not None else []
    text_embedder = get_text_embedder(config)
    token_encoder = tiktoken.get_encoding(config.encoding_model)

    context_builder = LocalSearchMixedContext(
        community_reports=reports,
        text_units=read_indexer_text_units(final_text_units),
        entities=_entities,
        relationships=read_indexer_relationships(final_relationships),
        covariates={"claims": _covariates},
        entity_text_embeddings=description_embedding_store,
        # if the vectorstore uses entity title as ids, set this to EntityVectorStoreKey.TITLE
        embedding_vectorstore_key=EntityVectorStoreKey.ID,
        text_embedder=text_embedder,
        token_encoder=token_encoder,
    )
    return context_builder


@app.post("/search")
async def search(request: SearchRequest):
    method = request.method
    query = request.query
    stream = request.stream
    data_dir = request.data_dir

    if method == Methods.GLOBAL:
        if data_dir:
            global_search.context_builder = global_community_context(data_dir)

        global_search.map_llm_params.update({
            "max_tokens": request.map_max_tokens,
            "temperature": request.temperature,
            "top_p": request.top_p,
            "n": request.n,
        })
        global_search.reduce_llm_params.update({
            "max_tokens": request.reduce_max_tokens,
            "temperature": request.temperature,
            "top_p": request.top_p,
            "n": request.n,
        })
        global_search.context_builder_params.update({
            "max_tokens": request.max_tokens,
        })
        global_search.semaphore = asyncio.Semaphore(request.concurrency)
        if stream:
            response = await global_search.astream_search(query=query)
            if response is not None:
                return StreamingResponse(response, media_type="text/event-stream")
            else:
                return JSONResponse(content="No response")
        else:
            result = await global_search.asearch(query=query)
            return JSONResponse(content=result.response)
    else:
        if data_dir:
            local_search.context_builder = local_search_mixed_context(data_dir)

        local_search.llm_params.update({
            "max_tokens": request.llm_max_tokens,
            "temperature": request.temperature,
            "top_p": request.top_p,
            "n": request.n,
        })

        local_search.context_builder_params.update({
            "text_unit_prop": request.text_unit_prop,
            "community_prop": request.community_prop,
            "conversation_history_max_turns": request.conversation_history_max_turns,
            "top_k_mapped_entities": request.top_k_entities,
            "top_k_relationships": request.top_k_relationships,
            "max_tokens": request.max_tokens,
        })
        if stream:
            response = await local_search.astream_search(query=query)
            if response is not None:
                return StreamingResponse(response, media_type="text/event-stream")
            else:
                return JSONResponse(content="No response")
        else:
            result = await local_search.asearch(query=query)
            return JSONResponse(content=result.response)


if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8001)
