from fastapi import Body, Request
from fastapi.responses import StreamingResponse
from configs.model_config import LLM_MODELS, TEMPERATURE, HISTORY_LEN, MAX_TOKENS
from configs.kb_config import SCORE_THRESHOLD, VECTOR_SEARCH_TOP_K
from configs.other_config import INSTRUCTIONS
from server.redis.redis_utils import ChatHistory, RedisClient
from server.utils import BaseResponse
from startup import REDIS
from configs import logging_config
from loguru import logger
from server.utils import create_chat_response_event
import datetime
from server.chat.utils import generate_request_id, log_content
from sse_starlette.sse import EventSourceResponse
from server.chat.utils import llm_chat
from typing import Optional
from instruction.utils import CHINESE_WEEKDAY


async def mars_knowledgebase_chat(uid: str = Body(..., description="用户ID"),
                                  query: str = Body(..., description="用户输入", examples=["恼羞成怒"]),
                                  stream: bool = Body(False, description="流式输出"),
                                  knowledge_base_name: Optional[str] = Body("Mars", description="知识库名称", examples=["Mars"]),
                                  prompt_name: Optional[str] = Body("knowledge_Mars", description="提示词名称", examples=["knowledge_Mars"]),
                                  request: Request = None
                                  ):
    # 尝试创建 Redis 客户端
    try:
        # 实例化 Redis 连接类
        redis_client = RedisClient(
            host=REDIS["host"],
            port=REDIS["port"],
            db=REDIS["db"]["Mars"],
            max_connections=REDIS["max_connections"]
        )
    except ConnectionError as e:
        # 捕获连接错误，通常是无法连接到 Redis 服务器
        logger.error(f"ConnectionError: {e}")
        return BaseResponse(code=50002, msg="Failed to connect to Redis server.")
    except Exception as e:
        # 捕获其他所有异常
        logger.error(f"Unexpected error while initializing Redis client: {e}")
        return BaseResponse(code=50000, msg="An unexpected error occurred while connecting to Redis.")
    # 实例化对话历史处理类
    try:
        chat_history = ChatHistory(uid, redis_client, HISTORY_LEN)
    except Exception as e:
        # 捕获实例化 ChatHistory 时可能出现的任何异常
        logger.error(f"Failed to initialize ChatHistory: {e}")
        return BaseResponse(code=50001, msg="Failed to initialize chat history.")
    # 获取uid的历史对话
    try:
        histories = chat_history.get_history()
    except Exception as e:
        logger.error(f"Failed to get chat history: {e}")
        return BaseResponse(code=50003, msg="Failed to retrieve chat history.")
    # 生成唯一ID
    request_id = generate_request_id()

    # 检查是否识别到了意图
    data_content = None
    try:
        instruction = await llm_chat(query, "Intention_Recognition_Expert", 0.1)
    except Exception as e:
        logger.error(f"Failed to recognize the instruction: {e}")
        instruction = "-1"
    if instruction in INSTRUCTIONS:
        request_id = "I_" + request_id
        if instruction == "1":
            date = datetime.datetime.now()
            week_what = CHINESE_WEEKDAY[date.weekday()]
            answer = f"今天是: {date.year}年 {date.month}月{date.day}日 {week_what}"
            instruction = '-1'
        else:
            answer = None
        if stream:
            responses = EventSourceResponse(
                create_chat_response_event(response_text=answer, instruction=instruction, request_id=request_id,
                                           finish=True, data_content=data_content))
        else:
            responses = StreamingResponse(
                create_chat_response_event(response_text=answer, instruction=instruction, request_id=request_id,
                                           finish=True, data_content=data_content))
        log_content(uid, query, answer, instruction=instruction, request_id=request_id)
        return responses

    else:
        request_id = "D_" + request_id
        from server.chat.knowledge_base_chat_long_memory import knowledge_base_chat
        response = await knowledge_base_chat(uid=uid,
                                             query=query,
                                             stream=stream,
                                             history=histories,
                                             knowledge_base_name=knowledge_base_name,
                                             request_id=request_id,
                                             score_threshold=SCORE_THRESHOLD,
                                             top_k=VECTOR_SEARCH_TOP_K,
                                             model_name=LLM_MODELS[0],
                                             temperature=TEMPERATURE,
                                             max_tokens=MAX_TOKENS,
                                             request=request,
                                             prompt_name=prompt_name)
        if stream:
            responses = EventSourceResponse(response)
        else:
            responses = StreamingResponse(response)
        return responses
