from fastapi import Body, Request
from fastapi.responses import StreamingResponse
from configs.model_config import LLM_MODELS, TEMPERATURE, HISTORY_LEN, MAX_TOKENS
from configs.kb_config import SCORE_THRESHOLD, VECTOR_SEARCH_TOP_K
from server.redis.redis_utils import ChatHistory, RedisClient
from server.utils import BaseResponse
from startup import REDIS
from configs.logging_config import configure_logging
from loguru import logger
from instruction.XiaoKaAPP.XiaoKaAPP import intent_recognition
from instruction.utils import CHINESE_WEEKDAY
from server.utils import create_chat_response, create_chat_response_event
import datetime
from server.chat.utils import generate_unique_id, log_content, llm_chat
from server.FAQ.utils import load_data
from server.FAQ.faq_module import faq
from configs.other_config import QA_FILE_PATH, KV_FILE_PATH, TRUCK2ID_FILE_PATH, INSTRUCTIONS
from configs.model_config import FAQ_THRESHOLD
from sse_starlette.sse import EventSourceResponse
from server.db.repository.message_repository import get_message_by_user_id
from server.chat.knowledge_base_chat_ import knowledge_base_chat

configure_logging()


async def xiaokaapp_knowledgebase_chat(uid: str = Body(..., description="用户ID"),
                                       query: str = Body(..., description="用户输入", examples=["恼羞成怒"]),
                                       stream: bool = Body(False, description="流式输出"),
                                       knowledge_base_name: str = Body("XiaoKa", description="知识库名称"),
                                       prompt_name: str = Body("knowledge_XiaoKa", description="使用的prompt模版名称"),
                                       request: Request = None
                                       ):
    # 生成唯一ID
    request_id = generate_unique_id()

    data_content = None
    # 要求query 非空
    query = query.strip()
    if not query:
        return BaseResponse(code=400, msg="Query cannot be empty.")
    # 检查是否识别到了意图
    try:
        instruction = await intent_recognition(query)
    except Exception as e:
        logger.error(f"Failed to RE recognize intent: {e}")
        instruction = "-1"
    if instruction in INSTRUCTIONS:
        request_id = "I_" + generate_unique_id()
        answer = None
        log_content(uid=uid, query=query, answer=answer, instruction=instruction, unique_id=request_id)
        if stream:
            return EventSourceResponse(
                create_chat_response_event(answer, instruction, request_id, data_content, finish=True))
        else:
            return StreamingResponse(
                create_chat_response_event(answer, instruction, request_id, data_content, finish=True))
    try:
        instruction = await llm_chat(query, f"Intention_Recognition_Expert", 0.1)
    except Exception as e:
        logger.error(f"Failed to LLM recognize intent: {e}")
        instruction = "-1"
    if instruction in INSTRUCTIONS:
        request_id = "I_" + generate_unique_id()
        answer = None
        log_content(uid=uid, query=query, answer=answer, instruction=instruction, unique_id=request_id)
        if stream:
            return EventSourceResponse(
                create_chat_response_event(answer, instruction, request_id, data_content, finish=True))
        else:
            return StreamingResponse(
                create_chat_response_event(answer, instruction, request_id, data_content, finish=True))

    request_id = "D_" + request_id
    histories = get_message_by_user_id(user_id=uid, history_len=HISTORY_LEN)
    response = await knowledge_base_chat(uid=uid,
                                         query=query,
                                         stream=stream,
                                         history=histories,
                                         knowledge_base_name=knowledge_base_name,
                                         request_id=request_id,
                                         score_threshold=SCORE_THRESHOLD,
                                         top_k=VECTOR_SEARCH_TOP_K,
                                         model_name=LLM_MODELS[0],
                                         temperature=TEMPERATURE,
                                         max_tokens=MAX_TOKENS,
                                         request=request,
                                         prompt_name=prompt_name)
    if stream:
        responses = EventSourceResponse(response)
    else:
        responses = StreamingResponse(response)
    return responses
