from fastapi import Body, Request, HTTPException
from sse_starlette.sse import EventSourceResponse
from configs import LLM_MODELS, TEMPERATURE
from configs.other_config import SECRET_KEY, ALGORITHM
from server.utils import wrap_done, get_ChatOpenAI
from langchain.chains import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable
import asyncio
import json
from langchain.prompts.chat import ChatPromptTemplate
from typing import List, Optional, Union
from server.chat.utils import History
from langchain.prompts import PromptTemplate
from server.utils import get_prompt_template
from server.memory.conversation_db_buffer_memory import ConversationBufferDBMemory
from server.db.repository import add_message_to_db
from server.callback_handler.conversation_callback_handler import ConversationCallbackHandler
from langchain.callbacks import AsyncIteratorCallbackHandler
from server.db.repository import get_message_by_user_id
from loguru import logger
from configs.logging_config import configure_logging
from server.utils import decode_verify_token, BaseResponse

configure_logging()


async def suggest_query(conversation_id: str = Body(..., description="对话框ID"),
               stream: bool = Body(False, description="流式输出"),
               model_name: str = Body(LLM_MODELS[0], description="LLM 模型名称。"),
               temperature: float = Body(0.7, description="LLM 采样温度", ge=0.0, le=2.0),
               prompt_name: str = Body("suggest_query", description="使用的prompt模板名称(在configs/prompt_config.py中配置)"),
               request: Request = None
               ):
    # 验证token
    try:
        token_info = decode_verify_token(request, SECRET_KEY, ALGORITHM)
    except HTTPException as e:
        return BaseResponse(code=401, msg=e.detail.get("msg"), data={"error": e.detail.get("error")})
    async def chat_iterator() -> AsyncIterable[str]:
        callback = AsyncIteratorCallbackHandler()
        callbacks = [callback]
        memory = None

        model = get_ChatOpenAI(
            model_name=model_name,
            temperature=temperature,
            max_tokens=None,
            callbacks=callbacks,
        )
        
        prompt_template = get_prompt_template("llm_chat", prompt_name)
        input_msg = History(role="user", content=prompt_template).to_msg_template(False)
        chat_prompt = ChatPromptTemplate.from_messages([input_msg])

        chain = LLMChain(prompt=chat_prompt, llm=model, memory=memory)

        history = get_message_by_user_id(user_id=conversation_id, history_len=3)

        # Begin a task that runs in the background.
        task = asyncio.create_task(wrap_done(
            chain.acall({"context": history}),
            callback.done),
        )

        if stream:
            async for token in callback.aiter():
                # Use server-sent-events to stream the response
                yield json.dumps(
                    {"text": token},
                    ensure_ascii=False)
        else:
            answer = ""
            async for token in callback.aiter():
                answer += token
            logger.info(f"会话ID：{conversation_id}, \n历史对话：{history}")
            yield json.dumps(
                {"text": answer},
                ensure_ascii=False)

        await task
    if stream:
        return EventSourceResponse(chat_iterator())
    else:
        # 非流式返回时直接获取完整结果
        final_answer = ""
        async for chunk in chat_iterator():
            data = json.loads(chunk)
            final_answer += data["text"]
        
        # 将final_answer通过\n切分
        try:
            suggestions = final_answer.split("+")
        except:
            suggestions = []
        logger.info(f"推荐的后续问题:\n{suggestions}")

        # 将suggestions中的空字符串去掉
        suggestions = [suggestion for suggestion in suggestions if suggestion]

        return BaseResponse(
            code=200,
            msg="Success",
            data={
                "suggestions": suggestions, 
                "conversation_id": conversation_id
            }
        )

        
