from fastapi import Body, HTTPException, Request
from fastapi.responses import StreamingResponse
from sse_starlette.sse import EventSourceResponse
from server.utils import decode_verify_token
from configs.other_config import SECRET_KEY, ALGORITHM
from server.utils import BaseResponse
from server.db.repository.message_repository import get_kepler_message_by_user_id
from server.chat.utils import generate_unique_id
from server.chat.knowledge_base_chat_kepler import knowledge_base_chat
from configs import VECTOR_SEARCH_TOP_K, SCORE_THRESHOLD, TEMPERATURE, MAX_TOKENS, LLM_MODELS
from configs.logging_config import configure_logging
from loguru import logger

configure_logging()

HISTORY_LEN = 5


# 这个接口带token验证，并且新加字段上面那个接口不带token验证
async def keplerpc_knowledgebase_chat(
        uid: str = Body(..., description="用户ID"),
        query: str = Body(..., description="用户输入", examples=["你好"]),
        knowledge_base_name: str = Body(..., description="知识库名称", examples=["samples"]),
        stream: bool = Body(False, description="流式输出"),
        prompt_name: str = Body(
            ...,
            description="使用的prompt模板名称(在configs/prompt_config.py中配置)"
        ),
        temperature: float = Body(TEMPERATURE, description="LLM 采样温度", ge=0.0, le=1.0),
        request: Request = None
):
    # 验证token
    try:
        token_info = decode_verify_token(request, SECRET_KEY, ALGORITHM)
    except HTTPException as e:
        return BaseResponse(code=401, msg=e.detail.get("msg"), data={"error": e.detail.get("error")})
    request_id = generate_unique_id()
    request_id = "D_" + request_id
    histories = get_kepler_message_by_user_id(user_id=uid, history_len=HISTORY_LEN)
    try:
        response = await knowledge_base_chat(uid=uid,
                                            query=query,
                                            request_id=request_id,
                                            history=histories,
                                            knowledge_base_name=knowledge_base_name,
                                            stream=stream,
                                            prompt_name=prompt_name,
                                            top_k=VECTOR_SEARCH_TOP_K,
                                            score_threshold=SCORE_THRESHOLD,
                                            model_name=LLM_MODELS[0],
                                            temperature=temperature,
                                            max_tokens=MAX_TOKENS,
                                            request=request)
    except Exception as e:
        logger.error(f"Failed to perform knowledge base chat: {e}")
        return BaseResponse(code=50006, msg="Failed to perform knowledge base chat.")
    if stream:
        event_source_response = EventSourceResponse(response)
        return event_source_response
    else:
        streaming_response = StreamingResponse(response)
        return streaming_response