import os
from enum import Enum
from idlelib.history import History
from typing import Optional, Union, List
from urllib.parse import quote_plus
from uuid import uuid4

from llama_index.core import Settings, StorageContext, PromptTemplate
from llama_index.core.chat_engine import CondenseQuestionChatEngine
from llama_index.core.indices.knowledge_graph import KnowledgeGraphRAGRetriever
from llama_index.core.memory import ChatMemoryBuffer
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.storage.chat_store.redis import RedisChatStore

from server.module_chat.chat.core.agent.tool.api.chat_result import ChatResult
from server.module_chat.chat.core.agent.tool.function.functions_registry import register_function
from server.module_chat.chat.core.engine.my_kg_query_engine import MyKGQueryEngine
from server.module_chat.chat.core.stores.my_nebula_graph_store import MyNebulaGraphStore
from server.settings import Settings as MySettings
from server.utils.log_util import logger
from server.config.models_config import llm
from server.config.models_config import no_think_llm

Settings.llm = llm
Settings.chunk_size = 512

os.environ["GRAPHD_HOST"] = MySettings.database_settings.NEBULA_HOST
os.environ["NEBULA_ADDRESS"] = MySettings.database_settings.NEBULA_ADDRESS
os.environ["NEBULA_USER"] = MySettings.database_settings.NEBULA_USER
os.environ["NEBULA_PASSWORD"] = MySettings.database_settings.NEBULA_PASSWORD

space_name = MySettings.database_settings.NEBULA_SPACE_NAME
tags = [
    "business_system",
    "database",
    "table",
    "field",
    "file_system",
    "file",
    "data_directory"
]
tag_prop_names = [
    "name,data_owner,data_manager",
    "database_id,database_name,database_schema",
    "table_id,table_name,data_category_1,data_category_2,data_category_3,data_category_4,data_level",
    "file_id,file_name,data_category_1,data_category_2,data_category_3,data_category_4,data_level",
    "file_system_id,file_system_name",
    "file_id,file_name,data_category_1,data_category_2,data_category_3,data_category_4,data_level",
    "data_name,task_type,data_owner,data_manager,data_category_1,data_category_2,data_category_3,data_category_4,data_level"
]
edge_types, rel_prop_names = ["contains"], [""]

graph_store = MyNebulaGraphStore(
    space_name=space_name,
    edge_types=edge_types,
    rel_prop_names=rel_prop_names,
    tags=tags,
    tag_prop_names=tag_prop_names
)

storage_context = StorageContext.from_defaults(graph_store=graph_store)

redis_url = f'redis://:{quote_plus(MySettings.redis_settings.redis_password)}@{MySettings.redis_settings.redis_host}:{MySettings.redis_settings.redis_port}/3'

chat_store = RedisChatStore(redis_url=redis_url, ttl=86400)


@register_function(name="chat_db", description="数据智能分析")
def chat_db(query: str,
            conversation_id: str = "",
            history_len: int = -1,
            history: Union[int, List[History]] = [],
            stream: bool = True,
            temperature: float = 0.8,
            max_tokens: Optional[int] = 4096,
            *args,
            **kwargs
            ):
    try:
        # 获取问题类型
        query_type = get_query_type(query)

        if not conversation_id:
            conversation_id = str(uuid4())

        message_id = str(uuid4())

        chat_result = ChatResult(**{
            "conversation_id": conversation_id,
            "message_id": message_id,
            "text": ""
        })

        if QueryType.STATISTICS in query_type:
            response = data_statistics(query, conversation_id)
        elif QueryType.ANALYSIS in query_type:
            response = data_analysis(query, conversation_id)
        else:
            chat_result.text = "暂时无法回答您的问题，请增加更多问题描述"
            yield chat_result.model_dump_json(by_alias=True)

        if response is not None:
            if stream:
                for chunk in response.response_gen:
                    if chunk:
                        print(chunk, end="", flush=True)
                        chat_result.text = chunk
                        yield chat_result.model_dump_json(by_alias=True)
            else:
                chat_result.text = response
                yield chat_result.model_dump_json(by_alias=True)
    except Exception as e:
        print(e)
        logger.error(f"问题分析失败: {query}，详细错误信息: {e}")
        chat_result.text = "暂时无法回答您的问题，请增加更多问题描述"
        yield chat_result.model_dump_json(by_alias=True)


def get_query_type(query: str):
    get_query_type_prompt = PromptTemplate(
        """
        请根据用户问题的内容和意图，按照以下规则进行分类：
        【分类标准】
        0-数据统计类：需直接查询、统计或计算数据结果，不涉及复杂分析。通常包含明确指标或聚合计算（如总和、平均值、计数等）。示例：
        
        - "去年各季度销售额是多少？"
        
        - "本月新增用户数"
        
        - "A产品近3个月的退货率"
        
        1-数据分析类：需通过数据对比、趋势分析、模式识别或归因推断得出结论。常含因果关系、业务洞察等深层分析。示例：
        
        - "销售额下降的原因是什么？"
        
        - "用户流失率与促销活动有何关联？"
        
        - "预测下季度各地区销售趋势"
        
        【判断优先级】
        
        若问题同时要求统计结果和简单结论（如"各渠道销量对比"），归为0类
        
        若需建立数据模型/算法（如"用回归分析预测"），归为1类
        
        包含"为什么"、"如何影响"、"预测"等关键词时优先归为1类
        
        请严格输出数字0或1，禁止解释。
        用户问题：{query_str}
        """
    )

    query_type: str = no_think_llm.predict(
        get_query_type_prompt,
        query_str=query
    )
    return query_type


def data_statistics(query: str, conversation_id: str):
    query_engine = MyKGQueryEngine(
        llm=llm,
        stream=True,
        storage_context=storage_context,
        verbose=True
    )

    chat_memory = ChatMemoryBuffer.from_defaults(
        token_limit=3000,
        chat_store=chat_store,
        chat_store_key=conversation_id,
    )

    chat_engine = CondenseQuestionChatEngine.from_defaults(
        query_engine=query_engine,
        memory=chat_memory
    )

    response = chat_engine.stream_chat(query)
    return response


def data_analysis(query: str, conversation_id: str):
    graph_rag_retriever = KnowledgeGraphRAGRetriever(
        storage_context=storage_context,
        llm=no_think_llm,
        graph_traversal_depth=4,
        verbose=True
    )

    kg_rag_query_engine = RetrieverQueryEngine.from_args(
        retriever=graph_rag_retriever,
        llm=llm
    )

    chat_memory = ChatMemoryBuffer.from_defaults(
        token_limit=3000,
        chat_store=chat_store,
        chat_store_key=conversation_id,
    )

    chat_rag_engine = CondenseQuestionChatEngine.from_defaults(
        query_engine=kg_rag_query_engine,
        memory=chat_memory,
        verbose=True
    )

    rag_response = chat_rag_engine.stream_chat(query)
    return rag_response


class StrEnum(str, Enum):
    def __str__(self):
        return self.value


class QueryType(StrEnum):
    STATISTICS = "0"
    ANALYSIS = "1"
