from fastapi import Body
from sse_starlette.sse import EventSourceResponse
from configs import LLM_MODELS, TEMPERATURE
from server.utils import wrap_done, get_ChatOpenAI
from langchain.chains import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable
import asyncio
import json
from langchain.prompts.chat import ChatPromptTemplate
from typing import List, Optional, Union
from server.chat.utils import History
from langchain.prompts import PromptTemplate
from server.utils import get_prompt_template, BaseResponse, create_chat_response
from server.memory.conversation_db_buffer_memory import ConversationBufferDBMemory
from server.db.repository import add_message_to_db, add_message_to_custom_db
from server.callback_handler.conversation_callback_handler import ConversationCallbackHandler


async def recreate_query(uid: str,
                        query: str = "",
                         conversation_id: str = "",
                         place_name: str = "",
                         history: Union[int, List[History]] = [],
                         stream: bool = False,
                         model_name: str = LLM_MODELS[0],
                         temperature: float = TEMPERATURE,
                         # top_p: float = Body(TOP_P, description="LLM 核采样。勿与temperature同时设置", gt=0.0, lt=1.0),
                         prompt_name: str = "default",
                         knowledge_base_name: Optional[str] = None,
                         ):
    async def chat_iterator() -> AsyncIterable[str]:
        nonlocal history
        callback = AsyncIteratorCallbackHandler()
        callbacks = [callback]
        memory = None

        # 负责保存llm response到message db
        message_id = add_message_to_db(chat_type="llm_chat", query=query, conversation_id="")
        conversation_callback = ConversationCallbackHandler(conversation_id="", message_id=message_id,
                                                            chat_type="llm_chat",
                                                            query=query)
        callbacks.append(conversation_callback)


        model = get_ChatOpenAI(
            model_name=model_name,
            temperature=temperature,
            callbacks=callbacks,
        )

        prompt_template = get_prompt_template("llm_chat", prompt_name)
        input_msg = History(role="user", content=prompt_template).to_msg_template(False)
        chat_prompt = ChatPromptTemplate.from_messages([input_msg])

        chain = LLMChain(prompt=chat_prompt, llm=model, memory=memory)

        # Begin a task that runs in the background.
        task = asyncio.create_task(wrap_done(
            chain.acall({"input": query, "history": history, "place_name": place_name}),
            callback.done),
        )
        final_answer = ""
        if stream:
            async for token in callback.aiter():
                # Use server-sent-events to stream the response
                # yield json.dumps(
                #     {"text": token, "message_id": message_id},
                #     ensure_ascii=False)
                final_answer += token

                yield create_chat_response(token, instruction="-1", unique_id=conversation_id, finish=False,
                                           code=200, data_content="")
            add_message_to_custom_db(user_id=uid, conversation_id=conversation_id, chat_type="knowledge_base_chat",
                                     query=query, response=final_answer, instruction="-1",
                                     knowledge_base_name=knowledge_base_name)
            yield create_chat_response(final_answer, instruction="-1", unique_id=conversation_id, finish=True,
                                           code=200, data_content="")
        else:
            async for token in callback.aiter():
                final_answer += token
            # yield json.dumps(
            #     {"text": answer, "message_id": message_id},
            #     ensure_ascii=False)
            yield create_chat_response(final_answer, instruction="-1", unique_id=conversation_id, finish=True,
                                       code=200, data_content="")

        await task

    if stream:
        return chat_iterator()
    else:
        # 非流式返回时直接获取完整结果
        answer = ""
        async for chunk in chat_iterator():
            data = json.loads(chunk)
            answer += data["messages"]["response"]

        return BaseResponse(
            code=200,
            msg="Success",
            data={
                "final_answer": answer
            }
        )
