from configs.logging_config import configure_logging
from loguru import logger
from server.chat.knowledge_base_chat_kanshousuo import knowledge_base_chat
from server.chat.utils import generate_unique_id, log_content
from server.chat.chat_kanshousuo import recreate_query
from fastapi import Body, Request, HTTPException
from fastapi.responses import StreamingResponse
from server.utils import BaseResponse
from configs.kb_config import VECTOR_SEARCH_TOP_K
from configs.model_config import HISTORY_LEN, LLM_MODELS, TEMPERATURE, MAX_TOKENS
from sse_starlette.sse import EventSourceResponse
from server.utils import create_chat_response_event
from server.db.repository.message_repository import get_message_by_user_id
from server.chat.utils import llm_chat
from configs.other_config import INSTRUCTIONS
from server.utils import decode_verify_token
from configs.other_config import SECRET_KEY, ALGORITHM

SCORE_THRESHOLD = 0.9

configure_logging()


# 定义一个方法,参数包含uid,
async def kanshousuopc_knowledgebase_chat(
        uid: str = Body(..., description="用户ID"),
        query: str = Body(..., description="用户输入", examples=["你好"]),
        knowledge_base_name: str = Body(..., description="知识库名称", examples=["samples"]),
        stream: bool = Body(False, description="流式输出"),
        prompt_name: str = Body(
            ...,
            description="使用的prompt模板名称(在configs/prompt_config.py中配置)"
        ),
        request: Request = None
):

    request_id = generate_unique_id()
    request_id = "D_" + request_id
    histories = get_message_by_user_id(user_id=uid, history_len=HISTORY_LEN)
    # 知识库对话的返回值
    try:
        response = await knowledge_base_chat(uid=uid,
                                                query=query,
                                                request_id=request_id,
                                                history=histories,
                                                knowledge_base_name=knowledge_base_name,
                                                stream=stream,
                                                prompt_name=prompt_name,
                                                top_k=VECTOR_SEARCH_TOP_K,
                                                score_threshold=SCORE_THRESHOLD,
                                                model_name=LLM_MODELS[0],
                                                temperature=TEMPERATURE,
                                                max_tokens=MAX_TOKENS,
                                                request=request)
    except Exception as e:
        logger.error(f"Failed to perform knowledge base chat: {e}")
        return BaseResponse(code=50006, msg="Failed to perform knowledge base chat.")
    if stream:
        event_source_response = EventSourceResponse(response)
        return event_source_response
    else:
        streaming_response = StreamingResponse(response)
        return streaming_response


# 这个接口带token验证，上面那个接口不带token验证
async def kanshousuopc_knowledgebase_chat_token(
        uid: str = Body(..., description="用户ID"),
        query: str = Body(..., description="用户输入", examples=["你好"]),
        knowledge_base_name: str = Body(..., description="知识库名称", examples=["samples"]),
        stream: bool = Body(False, description="流式输出"),
        prompt_name: str = Body(
            ...,
            description="使用的prompt模板名称(在configs/prompt_config.py中配置)"
        ),
        request: Request = None
):
    # 验证token
    try:
        token_info = decode_verify_token(request, SECRET_KEY, ALGORITHM)
    except HTTPException as e:
        return BaseResponse(code=401, msg=e.detail.get("msg"), data={"error": e.detail.get("error")})
    request_id = generate_unique_id()
    request_id = "D_" + request_id
    histories = get_message_by_user_id(user_id=uid, history_len=HISTORY_LEN)
    # 知识库对话的返回值
    try:
        response = await knowledge_base_chat(uid=uid,
                                                query=query,
                                                request_id=request_id,
                                                history=histories,
                                                knowledge_base_name=knowledge_base_name,
                                                stream=stream,
                                                prompt_name=prompt_name,
                                                top_k=VECTOR_SEARCH_TOP_K,
                                                score_threshold=SCORE_THRESHOLD,
                                                model_name=LLM_MODELS[0],
                                                temperature=TEMPERATURE,
                                                max_tokens=MAX_TOKENS,
                                                request=request)
    except Exception as e:
        logger.error(f"Failed to perform knowledge base chat: {e}")
        return BaseResponse(code=50006, msg="Failed to perform knowledge base chat.")
    if stream:
        event_source_response = EventSourceResponse(response)
        return event_source_response
    else:
        streaming_response = StreamingResponse(response)
        return streaming_response

# 这个接口带token验证，并且新加字段上面那个接口不带token验证
async def kanshousuopc_knowledgebase_chat_token_v2(
        uid: str = Body(..., description="用户ID"),
        query: str = Body(..., description="用户输入", examples=["你好"]),
        knowledge_base_name: str = Body(..., description="知识库名称", examples=["samples"]),
        stream: bool = Body(False, description="流式输出"),
        prompt_name: str = Body(
            ...,
            description="使用的prompt模板名称(在configs/prompt_config.py中配置)"
        ),
        temperature: float = Body(TEMPERATURE, description="LLM 采样温度", ge=0.0, le=1.0),
        place_name: str = Body("", description="场所名称", examples=["看守所"]),
        request: Request = None
):
    # 验证token
    try:
        token_info = decode_verify_token(request, SECRET_KEY, ALGORITHM)
    except HTTPException as e:
        return BaseResponse(code=401, msg=e.detail.get("msg"), data={"error": e.detail.get("error")})
    request_id = generate_unique_id()
    request_id = "D_" + request_id
    histories = get_message_by_user_id(user_id=uid, history_len=HISTORY_LEN)
    # # 知识库对话的返回值
    # try:
    #     # 场所映射
    #     place_name_dict = {
    #         "看守所": "KanShouSuoOnly",
    #         "拘留所": "JuLiuSuoOnly",
    #         "强制隔离戒毒所": "QiangZhiGeLiJieJuDuSuoOnly",
    #         "强制医疗所": "QiangZhiYiLiaoSuoOnly"
    #     }
    #     knowledge_base_name = place_name_dict.get(place_name, "KanShouSuo") # 如果场所名称不在字典中，则使用默认知识库


    # 有以下几种场景，
    # 场所字段传了，没有历史对话；
    # 场所字段没传，没有历史对话；
    # 场所字段传了，有历史对话；
    # 场所字段没传，有历史对话
    # if histories and place_name != "":
    #     logger.info(f"原始query：{query}")
    #     query = await recreate_query(uid=uid, query=query, prompt_name="recreate_query_h_p", temperature=0.1, history=histories, place_name=place_name, stream=False, conversation_id=request_id)
    #     logger.info(f"优化后的query：{query}")
    # elif not histories and place_name != "":
    #     logger.info(f"原始query：{query}")
    #     query = await recreate_query(uid=uid, query=query, prompt_name="recreate_query_p", temperature=0.1, history=histories, place_name=place_name, stream=False, conversation_id=request_id)
    #     logger.info(f"优化后的query：{query}")
    # elif histories and place_name == "":
    #     logger.info(f"原始query：{query}")
    #     query = await recreate_query(uid=uid, query=query, prompt_name="recreate_query_h", temperature=0.1, history=histories, place_name=place_name, stream=False, conversation_id=request_id)
    #     logger.info(f"优化后的query：{query}")
    # elif not histories and place_name == "":
    #     event_source_response = await recreate_query(uid=uid, query=query, prompt_name="ask_user", temperature=0.1, history=histories, place_name=place_name, stream=True, knowledge_base_name=knowledge_base_name, conversation_id=request_id)
    #     return EventSourceResponse(event_source_response)

    # 这些是某个监所的场景
    if histories and place_name != "":
        logger.info(f"原始query：{query}")
        query = await recreate_query(uid=uid, query=query, prompt_name="recreate_query_h_p", temperature=0.1, history=histories, place_name=place_name, stream=False, conversation_id=request_id)
        query = query.dict()["data"]["final_answer"]
        logger.info(f"优化后的query：{query}")
    elif not histories and place_name != "":
        logger.info(f"原始query：{query}")
        query = await recreate_query(uid=uid, query=query, prompt_name="recreate_query_p", temperature=0.1, history=histories, place_name=place_name, stream=False, conversation_id=request_id)
        query = query.dict()["data"]["final_answer"]
        logger.info(f"优化后的query：{query}")
    
    # 这个是法律咨询的场景
    elif histories and place_name == "":
        logger.info(f"看守所Law模块，原始query：{query}")
        query = await recreate_query(uid=uid, query=query, prompt_name="recreate_query_law", temperature=0.1, history=histories, place_name=place_name, stream=False, conversation_id=request_id)
        query = query.dict()["data"]["final_answer"]
        logger.info(f"看守所Law模块，优化后的query：{query}")
    try:
        response = await knowledge_base_chat(uid=uid,
                                            query=query,
                                            request_id=request_id,
                                            history=histories,
                                            knowledge_base_name=knowledge_base_name,
                                            stream=stream,
                                            prompt_name=prompt_name,
                                            top_k=VECTOR_SEARCH_TOP_K,
                                            score_threshold=SCORE_THRESHOLD,
                                            model_name=LLM_MODELS[0],
                                            temperature=temperature,
                                            max_tokens=MAX_TOKENS,
                                            request=request)
    except Exception as e:
        logger.error(f"Failed to perform knowledge base chat: {e}")
        return BaseResponse(code=50006, msg="Failed to perform knowledge base chat.")
    if stream:
        event_source_response = EventSourceResponse(response)
        return event_source_response
    else:
        streaming_response = StreamingResponse(response)
        return streaming_response

