import os
from pathlib import Path
from typing import Union, List, Optional, AsyncIterable
from urllib.parse import quote_plus

from fastapi import APIRouter, Body
from llama_index.core import Settings
from llama_index.core.base.llms.types import ChatMessage, MessageRole, TextBlock, ImageBlock
from llama_index.core import StorageContext
from llama_index.core.chat_engine import CondenseQuestionChatEngine
from llama_index.core.indices.knowledge_graph import KnowledgeGraphRAGRetriever
from llama_index.core.memory import ChatMemoryBuffer
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.storage.chat_store.redis import RedisChatStore
from sse_starlette import EventSourceResponse

from server.config import llm, no_think_llm
from server.config.models_config import embedding_model
from server.module_chat.chat.core.agent.agent import Agent
from server.module_chat.chat.core.agent.tool.api.chat_api_factory import ChatApiFactory
from server.module_chat.chat.core.agent.tool.api.chat_result import ChatResult
from server.module_chat.chat.core.agent.tool.function.function_factory import FunctionFactory
from server.module_chat.chat.core.agent.tool.tool_factory import ToolFactory
from server.module_chat.chat.core.stores.my_nebula_graph_store import MyNebulaGraphStore
from server.module_chat.chat.entity.vo.history import History
from server.module_chat.chat.core.engine.my_kg_query_engine import MyKGQueryEngine
from server.utils.response_util import ResponseUtil
from server.settings import Settings as MySettings

chatController = APIRouter(prefix="/chat", tags=["Act-Chat 对话"])


@chatController.post('/action', summary="与Act-Chat对话", )
async def chat_action(query: str = Body(..., description="用户输入", examples=["恼羞成怒"]),
                      conversation_id: str = Body("", alias="conversationId", description="对话框ID"),
                      history_len: int = Body(-1, alias="historyLen", description="从数据库中取历史消息的数量"),
                      history: Union[int, List[History]] = Body([],
                                                                description="历史对话，设为一个整数可以从数据库中读取历史消息",
                                                                examples=[[
                                                                    {"role": "user",
                                                                     "content": "我们来玩成语接龙，我先来，生龙活虎"},
                                                                    {"role": "assistant", "content": "虎头虎脑"}]]
                                                                ),
                      stream: bool = Body(False, description="流式输出"),
                      temperature: float = Body(0.8, description="LLM 采样温度", ge=0.0, le=2.0),
                      max_tokens: Optional[int] = Body(4096, alias="maxTokens",
                                                       description="限制LLM生成Token数量，默认None代表模型最大值"),
                      top_p: float = Body(1.0, alias="topP", description="LLM 核采样。勿与temperature同时设置", gt=0.0,
                                          lt=1.0),
                      agent: Agent = Body(..., description="agent 配置")
                      ):
    def chat_iterator() -> AsyncIterable[str]:
        tool = agent.tools[0]
        realTool = ToolFactory().create(tool.id)
        tool_type = realTool.type
        if tool_type == "api":
            tool_api = realTool.api
            chat_api = ChatApiFactory().create(tool_api)
            result_iterator = chat_api.send_message(query=query, conversation_id=conversation_id,
                                                    history_len=history_len,
                                                    history=history, stream=stream, temperature=temperature,
                                                    max_tokens=max_tokens, top_p=top_p)

        elif tool_type == "function":
            tool_function = realTool.function
            function = FunctionFactory().create(tool_function.name)
            result_iterator = function(query=query, conversation_id=conversation_id,
                                       history_len=history_len,
                                       history=history, stream=stream, temperature=temperature,
                                       max_tokens=max_tokens, top_p=top_p)
        return result_iterator

    if stream:
        return EventSourceResponse(chat_iterator())
    else:
        block_result = ChatResult(**{"conversation_id": "", "message_id": "", "text": ""})
        async for chunk in chat_iterator():
            result = ChatResult.parse_raw(chunk)
            block_result.conversation_id = result.conversation_id
            block_result.message_id = result.message_id
            block_result.text += result.text
        return ResponseUtil.success(data=block_result.dict(by_alias=True))


@chatController.post('/action1', summary="与Act-Chat对话1", )
async def chat_action1(query: str = Body(..., embed=True, description="用户输入", examples=["恼羞成怒"]), ):
    # 测试多模态
    messages = [
        ChatMessage(
            role=MessageRole.USER,
            content=[
                ImageBlock(path=Path("F://123.png"), image_mimetype="image/png"),
                TextBlock(text=query)
            ]
        )
    ]

    chat_response = llm.chat(messages)
    print(chat_response.message.content)

    messages = [ChatMessage(role="user", content="你想喝点什么?")]

    chat_response = llm.stream_chat(messages)

    for chunk in chat_response:
        print(chunk.delta)

    embeddings = embedding_model.get_text_embedding(
        "Open AI new Embeddings models is great."
    )
    print(embeddings)

    Settings.llm = llm
    Settings.embed_model = embedding_model
    Settings.chunk_size = 512

    os.environ["GRAPHD_HOST"] = MySettings.database_settings.NEBULA_HOST
    os.environ["NEBULA_ADDRESS"] = MySettings.database_settings.NEBULA_ADDRESS
    os.environ["NEBULA_USER"] = MySettings.database_settings.NEBULA_USER
    os.environ["NEBULA_PASSWORD"] = MySettings.database_settings.NEBULA_PASSWORD

    # space_name = "basketballplayer"
    # tags, tag_prop_names = ["player", "team"], ["name,age", "name"]
    # edge_types, rel_prop_names = ["follow", "serve"], ["degree", "start_year,end_year"]

    space_name = MySettings.database_settings.NEBULA_SPACE_NAME
    tags = [
        "business_system",
        "database",
        "table",
        "field",
        "file_system",
        "file",
        "data_directory"
    ]
    tag_prop_names = [
        "name,data_owner,data_manager",
        "database_id,database_name,database_schema",
        "table_id,table_name,data_category_1,data_category_2,data_category_3,data_category_4,data_level",
        "file_id,file_name,data_category_1,data_category_2,data_category_3,data_category_4,data_level",
        "file_system_id,file_system_name",
        "file_id,file_name,data_category_1,data_category_2,data_category_3,data_category_4,data_level",
        "data_name,task_type,data_owner,data_manager,data_category_1,data_category_2,data_category_3,data_category_4,data_level"
    ]
    edge_types, rel_prop_names = ["contains"], [""]

    graph_store = MyNebulaGraphStore(
        space_name=space_name,
        edge_types=edge_types,
        rel_prop_names=rel_prop_names,
        tags=tags,
        tag_prop_names=tag_prop_names
    )

    storage_context = StorageContext.from_defaults(graph_store=graph_store)

    redis_url = f'redis://:{quote_plus(MySettings.redis_settings.redis_password)}@{MySettings.redis_settings.redis_host}:{MySettings.redis_settings.redis_port}/3'

    chat_store = RedisChatStore(redis_url=redis_url, ttl=300)

    chat_memory = ChatMemoryBuffer.from_defaults(
        token_limit=3000,
        chat_store=chat_store,
        chat_store_key="user1",
    )

    query_engine = MyKGQueryEngine(
        llm=llm,
        storage_context=storage_context,
        verbose=True
    )

    chat_engine = CondenseQuestionChatEngine.from_defaults(
        query_engine=query_engine,
    )

    response = chat_engine.stream_chat(query)
    for chunk in response.response_gen:
        if chunk:
            print(chunk, end="", flush=True)
    print(response)

    graph_rag_retriever = KnowledgeGraphRAGRetriever(
        storage_context=storage_context,
        llm=no_think_llm,
        verbose=True
    )

    kg_rag_query_engine = RetrieverQueryEngine.from_args(
        retriever=graph_rag_retriever,
        llm=llm
    )

    # response = kg_rag_query_engine.query("查询业务系统system_18a0b606515677d8的信息？")
    # response = kg_rag_query_engine.query("查询工业互联网业务系统的信息？")
    # print(response)
    chat_rag_engine = CondenseQuestionChatEngine.from_defaults(
        query_engine=kg_rag_query_engine,
        memory=chat_memory,
        verbose=True
    )
    response = chat_rag_engine.stream_chat("查询工业互联网业务系统的信息？")
    for chunk in response.response_gen:
        if chunk:
            print(chunk, end="", flush=True)
    print(response)

    return ResponseUtil.success()
