from fastapi import Body, Request
from fastapi.responses import StreamingResponse
from configs.model_config import LLM_MODELS, TEMPERATURE, HISTORY_LEN, MAX_TOKENS
from configs.kb_config import SCORE_THRESHOLD, VECTOR_SEARCH_TOP_K
from configs.other_config import INSTRUCTIONS, DEFAULT_CHARACTER, DEFAULT_NICKNAME
from server.utils import BaseResponse
from configs.logging_config import configure_logging
from loguru import logger
from server.utils import create_chat_response_event
from server.chat.utils import generate_request_id, log_content
from sse_starlette.sse import EventSourceResponse
from server.chat.utils import llm_chat
from server.db.repository.message_repository import get_message_by_user_id
from typing import Optional
import datetime

configure_logging()


async def mars_agent_chat(uid: str = Body(..., description="用户ID"),
                          query: str = Body(..., description="用户输入", examples=["恼羞成怒"]),
                          stream: bool = Body(False, description="流式输出"),
                          nickname: Optional[str] = Body(DEFAULT_NICKNAME, description="用户昵称"),
                          character: Optional[str] = Body(DEFAULT_CHARACTER, description="性格")
                          ):
    # 要求query 非空
    query = query.strip()
    if not query:
        return BaseResponse(code=400, msg="Query cannot be empty.")
    # 获取uid的历史对话
    try:
        histories = get_message_by_user_id(user_id=uid, history_len=HISTORY_LEN)
    except Exception as e:
        logger.error(f"Failed to get chat history: {e}")
        return BaseResponse(code=50003, msg="Failed to retrieve chat history.")
    # 生成唯一ID
    request_id = generate_request_id()

    # 检查是否识别到了意图
    data_content = None
    try:
        instruction = await llm_chat(query, "Intention_Recognition_Expert", 0.1)
    except Exception as e:
        logger.error(f"Failed to recognize the instruction: {e}")
        instruction = "-1"
    if instruction in INSTRUCTIONS:
        if instruction == "200":
            request_id = "D_" + request_id
            now = datetime.datetime.now()
            answer = f"现在是{now.year}年{now.month}月{now.day}日{now.hour}时{now.minute}分{now.second}秒"
        else:
            request_id = "I_" + request_id
            answer = None
        if stream:
            responses = EventSourceResponse(
                create_chat_response_event(response_text=answer,
                                           instruction="-100" if answer is not None else instruction,
                                           request_id=request_id,
                                           finish=True, data_content=data_content))
        else:
            responses = StreamingResponse(
                create_chat_response_event(response_text=answer,
                                           instruction="-100" if answer is not None else instruction,
                                           request_id=request_id,
                                           finish=True, data_content=data_content))
        log_content(uid, query, answer, instruction=instruction, request_id=request_id)
        return responses

    else:
        request_id = "D_" + request_id
        from server.chat.agent_chat_ import agent_chat
        response = await agent_chat(uid=uid,
                                    query=query,
                                    stream=stream,
                                    history=histories,
                                    request_id=request_id,
                                    model_name=LLM_MODELS[0],
                                    temperature=TEMPERATURE,
                                    max_tokens=MAX_TOKENS,
                                    instruction="-1",
                                    prompt_name="default",
                                    nickname=nickname,
                                    character=character)
        if stream:
            responses = EventSourceResponse(response)
        else:
            responses = StreamingResponse(response)
        return responses
