import json
from fastapi import APIRouter, HTTPException

from db.Do import BaseReq, we_library, ChatHistory, ChatRole
from data import use_llm
from api import chat_tool

router = APIRouter()


@router.post("/chat")
def chat(req: BaseReq):
    """
    增强版聊天端点，支持:
    - 长期记忆
    - 图片输入
    - 工具调用
    - 代码执行
    """

    # 初始化组件
    memory = chat_tool.LongTermMemory()
    image_processor = chat_tool.ImageProcessor()
    tool_system = chat_tool.ToolCallingSystem()

    # 解析请求
    chat_id = req.id or 0
    current_role = req.currentRole
    prompt = req.prompt
    # image_data = req.image_data
    #
    # # 1. 处理多模态输入
    full_prompt = prompt
    # if image_data:
    #     image_description = image_processor.describe_image(image_data)
    #     full_prompt = f"{prompt}\n[图片内容: {image_description}]"

    # 2. 构建消息历史
    messages = []

    # 角色系统消息
    if current_role and current_role != "请选择角色":
        role_info = chat_tool.get_role_system_message(current_role)
        messages.append(role_info)

    # 加载历史消息
    if chat_id != 0:
        history_messages = chat_tool.load_chat_history(chat_id)
        messages.extend(history_messages)

        # 生成摘要并添加到长期记忆
        summary = memory.summarize_conversation(history_messages)
        memory.add_memory(summary, {"chat_id": chat_id})

    # 3. 检索相关记忆
    if chat_id == 0:  # 新对话时使用prompt检索
        related_memories = memory.retrieve(prompt)
    else:  # 已有对话时使用最近消息检索
        recent_content = history_messages[-1]['content'] if history_messages else prompt
        related_memories = memory.retrieve(recent_content)

    # 添加上下文记忆
    if related_memories:
        memory_context = "\n".join([m['text'] for m in related_memories])
        messages.append({"role": "system", "content": f"相关记忆:\n{memory_context}"})

    # 4. 添加用户消息
    messages.append({"role": "user", "content": full_prompt})

    # 5. 调用大模型（支持工具调用）
    llm_response = chat_tool.enhanced_generate_response(
        messages,
        tools=tool_system.get_tool_definitions() if tool_system else None
    )
    # 6. 处理工具调用不同类型的响应
    if llm_response["type"] == "tool_calls":
        tool_results = []
        for call in llm_response["content"]:
            # 解析参数
            try:
                arguments = json.loads(call["function"]["arguments"])
            except json.JSONDecodeError:
                arguments = call["function"]["arguments"]

            # 执行工具
            result = tool_system.execute_tool(call["function"]["name"], arguments)

            tool_results.append({
                "tool_call_id": call["id"],
                "role": "tool",
                "name": call["function"]["name"],
                "content": result
            })

        # 添加工具结果到消息历史
        messages.extend(tool_results)

        # 获取大模型对工具结果的解释
        final_response = chat_tool.enhanced_generate_response(messages)
        # 确保最终是文本响应
        if final_response["type"] == "text":
            final_content = final_response["content"]
        else:
            final_content = "工具执行完成"
    else:
        final_content = llm_response["content"]

    # 7. 添加助手回复到消息历史
    messages.append({"role": "assistant", "content": final_content})

    # 8. 更新长期记忆
    if chat_id == 0:  # 新对话
        memory.add_memory(full_prompt, {"type": "user_query"})
    memory.add_memory(final_content, {"type": "assistant_response"})

    # 9. 保存聊天记录（移除系统消息）
    messages_to_save = [msg for msg in messages if msg['role'] not in ['system', 'tool']]
    new_id = chat_tool.save_chat_history(chat_id, messages_to_save, prompt)

    return {
        "resp": final_content,
        "id": new_id,
        "tool_calls": llm_response.get('tool_calls', []) if isinstance(llm_response, dict) else []
    }




#
# @router.post("/chat")
# def chat(req: BaseReq):
#     try:
#         req_data = req.dict()
#         chat_id = int(req_data.get("id", 0))
#         current_role = req_data.get("currentRole")
#         prompt = req.prompt
#
#         # 1. 初始化消息列表
#         messages = []
#
#         # 2. 添加系统角色消息（如果适用）
#         system_message = chat_tool.get_role_system_message(current_role)
#         if system_message:
#             messages.append(system_message)
#
#         # 3. 加载历史消息（如果适用）
#         if chat_id != 0:
#             history_messages = chat_tool.load_chat_history(chat_id)
#             messages.extend(history_messages)
#
#         # 4. 添加用户新消息
#         user_message = {"role": "user", "content": prompt}
#         messages.append(user_message)
#
#         # 5. 调用大模型生成回复
#         resp = use_llm._generate_response(messages)
#
#         # 6. 添加助手回复到消息列表
#         assistant_message = {"role": "assistant", "content": resp}
#         messages.append(assistant_message)
#
#         # 7. 准备保存的消息（移除系统消息）
#         messages_to_save = messages.copy()
#         if system_message:
#             messages_to_save = messages_to_save[1:]  # 移除系统消息
#
#         # 8. 保存聊天记录
#         new_id = chat_tool.save_chat_history(chat_id, messages_to_save, prompt)
#
#         return {
#             "resp": resp,
#             "id": new_id
#         }
#
#     except HTTPException as he:
#         raise he  # 重新抛出HTTP异常
#     except Exception as e:
#         # logging.error(f"聊天处理异常: {str(e)}")
#         raise HTTPException(status_code=500, detail="聊天处理失败")
#
#
























































@router.post("/chat_list")
async def chat_list():
    # 获取记录列表
    return we_library.fetch_all("SELECT id,introduce FROM chat_history", tuple([]))


@router.get("/chat_del")
async def chat_del(id: int):
    # 删除记录列表
    we_library.execute_query("DELETE FROM chat_history WHERE id=?;", (id,))
    return True


@router.get("/chat_info")
def chat_info(id: int):
    # 根据id查询详细记录
    return we_library.fetch_one(f"SELECT id,content FROM chat_history WHERE id=?;", (id,))


@router.post("/chat_role")
async def chat_role():
    # 获取角色列表
    return we_library.fetch_all("SELECT id,role_name,role_setting,role_sound_ray FROM chat_role", tuple([]))


@router.post("/chat_role_save")
async def chat_role_save(do: ChatRole):
    # 保存修改角色
    return we_library.add_or_update(do, do.table_name)


@router.get("/chat_role_info")
def chat_role_info(id: int):
    # 根据id查询角色
    return we_library.fetch_one(f"SELECT id,role_name,role_setting,role_sound_ray FROM chat_role WHERE id=?;", (id,))


@router.get("/chat_role_del")
async def chat_role_del(id: int):
    # 删除角色
    we_library.execute_query("DELETE FROM chat_role WHERE id=?;", (id,))
    return True