from fastapi import Body, Request
from fastapi.responses import StreamingResponse
from configs.model_config import LLM_MODELS, TEMPERATURE, HISTORY_LEN, MAX_TOKENS
from configs.kb_config import SCORE_THRESHOLD, VECTOR_SEARCH_TOP_K
from server.utils import BaseResponse
from loguru import logger
from instruction.XiaoKaPC.XiaoKaPC import intent_recognition
from instruction.utils import CHINESE_WEEKDAY
from server.utils import create_chat_response, create_chat_response_event
import datetime
from server.chat.utils import generate_unique_id, log_content
from server.FAQ.utils import load_data
from server.FAQ.faq_module import faq
from configs.other_config import QA_FILE_PATH, KV_FILE_PATH, TRUCK2ID_FILE_PATH, INSTRUCTIONS
from configs.model_config import FAQ_THRESHOLD
from sse_starlette.sse import EventSourceResponse
from server.db.repository.message_repository import get_message_by_user_id
from server.chat.utils import llm_chat, log_content
from server.chat.knowledge_base_chat_xiaoka import knowledge_base_chat
from configs.basic_config import logger
from configs.logging_config import configure_logging

configure_logging()

# FAQ模型名字
FAQ_MODEL = "faq-question-answering_chinese-base"
# 加载FAQ的QA_datas
QA_datas = load_data(QA_FILE_PATH)
# 加载FAQ的KV_datas
KV_datas = load_data(KV_FILE_PATH)


async def xiaokapc_knowledgebase_chat(uid: str = Body(..., description="用户ID"),
                                      query: str = Body(..., description="用户输入", examples=["恼羞成怒"]),
                                      stream: bool = Body(False, description="流式输出"),
                                      knowledge_base_name: str = Body("XiaoKa", description="知识库名称"),
                                      prompt_name: str = Body("knowledge_XiaoKa", description="使用的prompt模板名称"),
                                      request: Request = None
                                      ):
    # 要求query 非空
    query = query.strip()
    if not query:
        return BaseResponse(code=400, msg="Query cannot be empty.")
    # 检查是否识别到了意图
    data_content = None
    try:
        instruction = await intent_recognition(query)
    except Exception as e:
        logger.error(f"Failed to RE recognize intent: {e}")
        instruction = "-1"
    if instruction in INSTRUCTIONS:
        request_id = "I_" + generate_unique_id()
        answer = None
        log_content(uid=uid, query=query, answer=answer, instruction=instruction, unique_id=request_id)
        return EventSourceResponse(create_chat_response_event(answer, instruction, request_id, data_content, finish=True))
    # 检查是否识别到了意图
    try:
        instruction = await llm_chat(query, "XiaoKa_Intention_Recognition", 0.1)
    except Exception as e:
        logger.error(f"Failed to recognize intent: {e}")
        instruction = "-1"
    # 生成唯一ID
    request_id = generate_unique_id()

    if instruction in INSTRUCTIONS:
        request_id = "I_" + request_id
        if instruction == "600":
            truck_name = await llm_chat(query, "extraction_truck2", 0.1)
            histories = get_message_by_user_id(uid, HISTORY_LEN)
            response = await knowledge_base_chat(uid=uid,
                                      query=query,
                                      stream=stream,
                                      history=histories,
                                      knowledge_base_name=knowledge_base_name,
                                      request_id=request_id,
                                      score_threshold=SCORE_THRESHOLD,
                                      top_k=VECTOR_SEARCH_TOP_K,
                                      model_name=LLM_MODELS[0],
                                      temperature=TEMPERATURE,
                                      max_tokens=MAX_TOKENS,
                                      request=request,
                                      prompt_name=prompt_name,
                                      instruction="600",
                                      truck_name=truck_name)
            return EventSourceResponse(response)
        else:
            answer = None
            log_content(uid, query, answer, instruction, request_id)
            return EventSourceResponse(create_chat_response_event(answer, instruction, request_id, finish=True, data_content=data_content))


    else:
        request_id = "D_" + request_id
        histories = get_message_by_user_id(uid, HISTORY_LEN)
        response = await knowledge_base_chat(uid=uid,
                                             query=query,
                                             stream=stream,
                                             history=histories,
                                             knowledge_base_name=knowledge_base_name,
                                             request_id=request_id,
                                             score_threshold=SCORE_THRESHOLD,
                                             top_k=VECTOR_SEARCH_TOP_K,
                                             model_name=LLM_MODELS[0],
                                             temperature=TEMPERATURE,
                                             max_tokens=MAX_TOKENS,
                                             request=request,
                                             prompt_name=prompt_name,
                                             instruction="-1",
                                             truck_name="")
        if stream:
            responses = EventSourceResponse(response)
        else:
            responses = StreamingResponse(response)
        return responses
