'''
Description:
Author: Wen
Date: 2024-08-27 01:09:52
'''


import streamlit as st
import asyncio
import os
from pathlib import Path
import pandas as pd
import dotenv
from typing import Optional, cast
from enum import Enum
from pydantic import Field
import tiktoken

from graphrag.query.llm.base import BaseLLMCallback
from graphrag.config import (
    GraphRagConfig,
)
from graphrag.query.structured_search.global_search.callbacks import GlobalSearchLLMCallback
from graphrag.query.context_builder.conversation_history import ConversationHistory
from graphrag.query.context_builder.entity_extraction import EntityVectorStoreKey
from graphrag.config.models.global_search_config import GlobalSearchConfig
from graphrag.config.models.local_search_config import LocalSearchConfig
from graphrag.query.api import __get_embedding_description_store
from graphrag.query.cli import _configure_paths_and_settings
from graphrag.query.factories import get_global_search_engine, get_local_search_engine, get_text_embedder
from graphrag.query.indexer_adapters import (
    read_indexer_covariates,
    read_indexer_entities,
    read_indexer_relationships,
    read_indexer_reports,
    read_indexer_text_units,
)
from graphrag.vector_stores.typing import VectorStoreType
from graphrag.query.structured_search.global_search.community_context import (
    GlobalCommunityContext,
)
from graphrag.query.structured_search.local_search.mixed_context import (
    LocalSearchMixedContext,
)


dotenv.load_dotenv()

COMMUNITY_LEVEL: int = 2

USER = "🧑‍💻"
ROBOT = "🤖"


class Methods(str, Enum):
    GLOBAL = "global"
    LOCAL = "local"


class Config(GlobalSearchConfig, LocalSearchConfig):
    context_dir: str = Field(
        description="Path to directory containing context files",
        default=os.environ.get("CONTEXT_DIR", ""),
    )
    method: Methods = Field(
        description="Method to use for search. Can be either 'global' or 'local'. Defaults to 'global",
        default=Methods.LOCAL,
    )
    query: str = Field(
        description="Query to search for",
        default="你好",
    )
    stream: Optional[bool] = Field(
        description="Whether to stream the response or not. Defaults to False",
        default=False,
    )


class LocalSearchStreamingLLMCallback(BaseLLMCallback):
    def __init__(self):
        super().__init__()
        self.response_str = ""
        self.temp_placeholder = st.empty()

    def on_llm_new_token(self, token: str):
        super().on_llm_new_token(token)
        self.response_str += token
        self.temp_placeholder.text(self.response_str)
        if token == "[DONE]":
            self.temp_placeholder.empty()


class GlobalSearchStreamingLLMCallback(GlobalSearchLLMCallback):
    def __init__(self):
        super().__init__()
        self.response_str = ""
        self.temp_placeholder = st.empty()

    def on_llm_new_token(self, token: str):
        super().on_llm_new_token(token)
        self.response_str += token
        self.temp_placeholder.text(self.response_str)
        if token == "[DONE]":
            self.temp_placeholder.empty()


def get_context_folders(root_dir: str | None):
    models = dict()
    root = cast(str, root_dir)
    output = Path(root) / "output"
    if output.exists():
        if output.exists():
            folders = sorted(
                output.iterdir(),
                key=os.path.getmtime,
                reverse=True
            )
            for folder in folders:
                models[folder.name] = str((folder / "artifacts").absolute())
    return models


def initlialize():
    st.session_state["initialized"] = True

    st.session_state["config"] = Config()
    st.session_state["conversation_history"] = ConversationHistory()

    data_dir, root_dir, settings = _configure_paths_and_settings(
        data_dir=os.environ.get("DATA_DIR"),
        root_dir=os.environ.get("ROOT_DIR", "ragtest"),
        config_dir=os.environ.get("CONFIG_DIR", "ragtest/settings.yaml"),
    )
    st.session_state["settings"] = settings
    st.session_state["context_folders"] = get_context_folders(root_dir)

    data_path = Path(data_dir)
    final_nodes: pd.DataFrame = pd.read_parquet(
        data_path / "create_final_nodes.parquet"
    )
    final_entities: pd.DataFrame = pd.read_parquet(
        data_path / "create_final_entities.parquet"
    )
    final_community_reports: pd.DataFrame = pd.read_parquet(
        data_path / "create_final_community_reports.parquet"
    )
    final_text_units: pd.DataFrame = pd.read_parquet(
        data_path / "create_final_text_units.parquet"
    )
    final_relationships: pd.DataFrame = pd.read_parquet(
        data_path / "create_final_relationships.parquet"
    )
    final_covariates_path = data_path / "create_final_convariates.parquet"
    final_covariates = (
        pd.read_parquet(final_covariates_path)
        if final_covariates_path.exists()
        else None
    )

    response_type = "multiple paragraphs"
    reports = read_indexer_reports(
        final_community_reports, final_nodes, COMMUNITY_LEVEL
    )
    _entities = read_indexer_entities(
        final_nodes, final_entities, COMMUNITY_LEVEL
    )

    if "global_search" not in st.session_state:
        st.session_state["global_search"] = get_global_search_engine(
            settings, reports, _entities, response_type
        )

    vector_store_args = (
        settings.embeddings.vector_store if settings.embeddings.vector_store else {}
    )
    vector_store_type = vector_store_args.get("type", VectorStoreType.LanceDB)
    description_embedding_store = __get_embedding_description_store(
        entities=_entities,
        vector_store_type=vector_store_type,
        config_args=vector_store_args,
    )
    _covariates = read_indexer_covariates(
        final_covariates
    ) if final_covariates is not None else []

    if "local_search" not in st.session_state:
        config = settings
        st.session_state["local_search"] = get_local_search_engine(
            config=config,
            reports=read_indexer_reports(
                final_community_reports, final_nodes, COMMUNITY_LEVEL
            ),
            text_units=read_indexer_text_units(final_text_units),
            entities=_entities,
            relationships=read_indexer_relationships(final_relationships),
            covariates={"claims": _covariates},
            description_embedding_store=description_embedding_store,
            response_type=response_type,
        )


def global_community_context(data_dir: str):
    data_path = Path(data_dir)
    final_nodes: pd.DataFrame = pd.read_parquet(
        data_path / "create_final_nodes.parquet"
    )
    final_entities: pd.DataFrame = pd.read_parquet(
        data_path / "create_final_entities.parquet"
    )
    final_community_reports: pd.DataFrame = pd.read_parquet(
        data_path / "create_final_community_reports.parquet"
    )
    reports = read_indexer_reports(
        final_community_reports, final_nodes, COMMUNITY_LEVEL
    )
    _entities = read_indexer_entities(
        final_nodes, final_entities, COMMUNITY_LEVEL
    )
    token_encoder = tiktoken.get_encoding(
        st.session_state["settings"].encoding_model
    )
    context_builder = GlobalCommunityContext(
        community_reports=reports, entities=_entities, token_encoder=token_encoder
    )
    return context_builder


def local_search_mixed_context(data_dir: str):
    data_path = Path(data_dir)
    final_nodes: pd.DataFrame = pd.read_parquet(
        data_path / "create_final_nodes.parquet"
    )
    final_entities: pd.DataFrame = pd.read_parquet(
        data_path / "create_final_entities.parquet"
    )
    final_community_reports: pd.DataFrame = pd.read_parquet(
        data_path / "create_final_community_reports.parquet"
    )
    final_text_units: pd.DataFrame = pd.read_parquet(
        data_path / "create_final_text_units.parquet"
    )
    final_relationships: pd.DataFrame = pd.read_parquet(
        data_path / "create_final_relationships.parquet"
    )
    final_covariates_path = data_path / "create_final_convariates.parquet"
    final_covariates = (
        pd.read_parquet(final_covariates_path)
        if final_covariates_path.exists()
        else None
    )
    reports = read_indexer_reports(
        final_community_reports, final_nodes, COMMUNITY_LEVEL
    )
    _entities = read_indexer_entities(
        final_nodes, final_entities, COMMUNITY_LEVEL
    )

    vector_store_args = (
        st.session_state["settings"].embeddings.vector_store if st.session_state["settings"].embeddings.vector_store else {}
    )
    vector_store_type = vector_store_args.get("type", VectorStoreType.LanceDB)
    description_embedding_store = __get_embedding_description_store(
        entities=_entities,
        vector_store_type=vector_store_type,
        config_args=vector_store_args,
    )
    _covariates = read_indexer_covariates(
        final_covariates) if final_covariates is not None else []
    text_embedder = get_text_embedder(st.session_state["settings"])
    token_encoder = tiktoken.get_encoding(
        st.session_state["settings"].encoding_model
    )

    context_builder = LocalSearchMixedContext(
        community_reports=reports,
        text_units=read_indexer_text_units(final_text_units),
        entities=_entities,
        relationships=read_indexer_relationships(final_relationships),
        covariates={"claims": _covariates},
        entity_text_embeddings=description_embedding_store,
        # if the vectorstore uses entity title as ids, set this to EntityVectorStoreKey.TITLE
        embedding_vectorstore_key=EntityVectorStoreKey.ID,
        text_embedder=text_embedder,
        token_encoder=token_encoder,
    )
    return context_builder


async def search(config: Config):
    context_dir = config.context_dir
    method = config.method
    query = config.query
    stream = config.stream

    if method == Methods.GLOBAL:
        if context_dir:
            st.session_state["global_search"].context_builder = global_community_context(
                context_dir
            )
        st.session_state["global_search"].map_llm_params.update({
            "max_tokens": config.map_max_tokens,
            "temperature": config.temperature,
            "top_p": config.top_p,
            "n": config.n,
        })
        st.session_state["global_search"].reduce_llm_params.update({
            "max_tokens": config.reduce_max_tokens,
            "temperature": config.temperature,
            "top_p": config.top_p,
            "n": config.n,
        })
        st.session_state["global_search"].context_builder_params.update({
            "max_tokens": config.max_tokens,
        })
        st.session_state["global_search"].semaphore = asyncio.Semaphore(
            config.concurrency
        )
        if stream:
            response = ""
            stream_response = st.session_state["global_search"].stream_search(
                query=query, conversation_history=st.session_state["conversation_history"]
            )
            with st.chat_message("robot", avatar=ROBOT):
                response = st.write_stream(stream_response)
            return response
    else:
        if context_dir:
            st.session_state["local_search"].context_builder = local_search_mixed_context(
                context_dir
            )

        st.session_state["local_search"].llm_params.update({
            "max_tokens": config.llm_max_tokens,
            "temperature": config.temperature,
            "top_p": config.top_p,
            "n": config.n,
        })

        st.session_state["local_search"].context_builder_params.update({
            "text_unit_prop": config.text_unit_prop,
            "community_prop": config.community_prop,
            "conversation_history_max_turns": config.conversation_history_max_turns,
            "top_k_mapped_entities": config.top_k_entities,
            "top_k_relationships": config.top_k_relationships,
            "max_tokens": config.max_tokens,
        })
        if stream:
            response = ""
            stream_response = st.session_state["local_search"].stream_search(
                query=query, conversation_history=st.session_state["conversation_history"]
            )
            with st.chat_message("robot", avatar=ROBOT):
                response = st.write_stream(stream_response)
            return response
        else:
            result = await st.session_state["local_search"].asearch(query=query, conversation_history=st.session_state["conversation_history"])
            with st.chat_message("robot", avatar=ROBOT):
                st.write(result.response)
            return result.response


def sidebar():
    with st.sidebar:
        selected_context_dir = st.selectbox(
            label="Context Dir",
            options=list(st.session_state["context_folders"].keys()),
        )
        st.session_state["config"].context_dir = st.session_state["context_folders"][selected_context_dir]

        search_type = st.radio(
            label="Search Type",
            options=list(Methods),
            format_func=lambda value: Methods(value).value,
            index=0
        )
        if search_type is not None:
            st.session_state["config"].method = search_type

        stream = st.checkbox(label="Stream", value=True)
        st.session_state["config"].stream = stream

        # max_tokens (最大标记数): 控制生成的响应中允许的最大标记数量，以避免生成过长的文本，从而影响计算效率和资源利用。
        max_tokens = st.slider(
            label="Max Tokens",
            min_value=1000,
            max_value=24_000,
            value=12_000,
            step=1000
        )
        st.session_state["config"].max_tokens = max_tokens

        # temperature (温度): 用于控制生成式模型的输出温度。较高的温度会使生成的文本更随机，较低的温度会使输出更确定和一致。
        temperature = st.slider(
            label="Temperature",
            min_value=0.0,
            max_value=1.0,
            value=0.0,
            step=0.1
        )
        st.session_state["config"].temperature = temperature

        # top_p (核采样的 top-p 值): 控制核采样策略的概率阈值。通过设置 top_p，模型会从累计概率达到 p 的所有词中采样，确保输出的多样性。
        top_p = st.slider(
            label="Top P",
            min_value=0.0,
            max_value=1.0,
            value=1.0,
            step=0.1
        )
        st.session_state["config"].top_p = top_p

        # n (生成的候选数量): 定义了要生成的候选文本数量，适用于需要多个候选项来进行比较或选择的场景。
        n = st.slider(
            label="N",
            min_value=1,
            max_value=10,
            value=1,
            step=1
        )
        st.session_state["config"].n = n

        if search_type == Methods.LOCAL:
            # text_unit_prop (文本单元比例): 该参数定义了文本单元的比例，用于控制在本地搜索中文本单元的重要性。通常与如何分割或权衡不同文本单元有关。
            text_unit_prop = st.slider(
                label="Text Unit Prop",
                min_value=0.1,
                max_value=1.0,
                value=0.5,
                step=0.1
            )
            st.session_state["config"].text_unit_prop = text_unit_prop

            # community_prop (社区比例): 这个参数设定了社区比例，用于在搜索中如何考虑社区特性。这可能与如何将文本或用户聚类成社区有关，从而影响搜索结果的相关性。
            community_prop = st.slider(
                label="Community Prop",
                min_value=0.1,
                max_value=1.0,
                value=0.1,
                step=0.1
            )
            st.session_state["config"].community_prop = community_prop

            # conversation_history_max_turns (对话历史的最大轮次): 这个参数指定了对话历史中要保留的最大轮次数量。用于控制在生成响应时参考多少轮历史对话，以防止上下文过长而影响性能或准确性。
            conversation_history_max_turns = st.slider(
                label="Conversation History Max Turns",
                min_value=1,
                max_value=20,
                value=5,
                step=1
            )
            st.session_state["config"].conversation_history_max_turns = conversation_history_max_turns

            # top_k_entities (前 k 个实体): 该参数设定了在本地搜索中要考虑的前 k 个映射实体的数量。用于限制在处理或返回时考虑的实体数量，从而优化性能。
            top_k_entities = st.slider(
                label="Top K Mapped Entities",
                min_value=1,
                max_value=100,
                value=10,
                step=1
            )
            st.session_state["config"].top_k_entities = top_k_entities

            # top_k_relationships (前 k 个关系): 类似于 top_k_entities，这个参数定义了本地搜索中要考虑的前 k 个关系的数量，用于优化性能。
            top_k_relationships = st.slider(
                label="Top K Relationships",
                min_value=1,
                max_value=100,
                value=10,
                step=1
            )
            st.session_state["config"].top_k_relationships = top_k_relationships

            # llm_max_tokens (LLM 最大标记数): 专门为大语言模型（LLM）设定的最大标记数限制，确保生成的文本不会超过设定的标记数。
            llm_max_tokens = st.slider(
                label="LLM Max Tokens",
                min_value=100,
                max_value=4000,
                value=2000,
                step=100
            )
            st.session_state["config"].llm_max_tokens = llm_max_tokens
        else:
            # data_max_tokens (数据 LLM 最大标记数): 专门为处理数据时的语言模型设置的最大标记数限制。这确保了数据相关的处理不会超出预期的标记数限制。
            data_max_tokens = st.slider(
                label="Data Max Tokens",
                min_value=1000,
                max_value=24_000,
                value=12_000,
                step=1000
            )
            st.session_state["config"].data_max_tokens = data_max_tokens

            # map_max_tokens (映射 LLM 最大标记数): 为映射任务设定的最大标记数限制，用于控制在映射操作中生成或处理的文本的长度，确保映射操作的效率。
            map_max_tokens = st.slider(
                label="Map Max Tokens",
                min_value=100,
                max_value=2000,
                value=1000,
                step=100
            )
            st.session_state["config"].map_max_tokens = map_max_tokens

            # reduce_max_tokens (归约 LLM 最大标记数): 为归约任务设定的最大标记数限制，用于控制在归约操作中生成或处理的文本的长度，确保归约操作的效率。
            reduce_max_tokens = st.slider(
                label="Reduce Max Tokens",
                min_value=100,
                max_value=4_000,
                value=2_000,
                step=100
            )
            st.session_state["config"].reduce_max_tokens = reduce_max_tokens

            # concurrency (并发数): 该参数设定了可以同时处理的并发请求数量。通过调整这个参数，可以优化系统的并发处理能力，提高处理多个请求时的响应速度和性能。
            concurrency = st.slider(
                label="Concurrency",
                min_value=1,
                max_value=100,
                value=32,
                step=1
            )
            st.session_state["config"].concurrency = concurrency

        return st.session_state["config"]


async def display_response(query: str):
    st.session_state["config"].query = query

    if "messages" not in st.session_state:
        st.session_state["messages"] = []

    with st.chat_message("user", avatar=USER):
        st.markdown(query)

    st.session_state["messages"].append(
        {"role": "user", "content": query, "avatar": USER}
    )
    st.session_state["conversation_history"].add_turn(
        role="user", content=query
    )

    response = await search(st.session_state["config"])
    st.session_state["messages"].append(
        {"role": "robot", "content": response, "avatar": ROBOT}
    )


if "initialized" not in st.session_state:
    initlialize()

if __name__ == "__main__":
    sidebar()
    st.header(f'GraphRAG')
    if "messages" not in st.session_state:
        st.session_state["messages"] = []
    for message in st.session_state["messages"]:
        with st.chat_message(message["role"], avatar=message.get("avatar")):
            st.markdown(message["content"])

    if user_query := st.chat_input("Ask me something (or press enter to generate a response)"):
        asyncio.run(display_response(user_query))
