import base64
import logging
import uuid
from contextlib import asynccontextmanager
from typing import Dict, Any

import toml
import uvicorn
from fastapi import FastAPI
from langchain_aws import ChatBedrock
from langchain_core.messages import HumanMessage, AIMessage
from langchain_mcp_adapters.client import MultiServerMCPClient
from langchain_openai import ChatOpenAI
from langgraph.prebuilt import create_react_agent
from pydantic import BaseModel, Field


def create_llm_model(llm_config):
    """根据配置创建LLM模型"""
    llm_type = llm_config.get("type", "openai")

    if llm_type == "openai":
        openai_config = llm_config["openai"]
        return ChatOpenAI(
            base_url=openai_config["openai_base_url"],
            api_key=openai_config["openai_api_key"],
            model=openai_config["openai_model"],
            # temperature=openai_config.get("temperature", 0.1),
        )
    elif llm_type == "bedrock":
        bedrock_config = llm_config["bedrock"]

        return ChatBedrock(
            model=bedrock_config["model_name"],
            region=bedrock_config["region_name"],
            # model_kwargs={
            #     "temperature": bedrock_config.get("temperature", 0.1),
            # },
            aws_access_key_id=bedrock_config["access_key_id"],
            aws_secret_access_key=bedrock_config["secret_access_key"],
        )
    else:
        raise ValueError(f"Unsupported LLM type: {llm_type}")


# 加载配置文件
config = toml.load("config.toml")

# 配置日志记录器
logging.basicConfig(
    level=getattr(logging, config["app"]["log_level"]),
    format=config["app"]["log_format"],
)


# --- 1. 定义数据模型 ---
class ChatRequest(BaseModel):
    conversation_id: str | None = Field(
        None, description="The ID of the conversation session."
    )
    content: str = Field(..., description="The user's message content.")


# 修改点: 新增一个响应模型，用于返回最终结果
class ChatResponse(BaseModel):
    conversation_id: str = Field(..., description="The ID of the conversation session.")
    final_answer: str = Field(..., description="The agent's final complete answer.")


# --- 2. 配置 Agent 和应用生命周期 (回到 lifespan 方式) ---
app_globals: Dict[str, Any] = {}

# 从配置文件获取滑动窗口配置
MAX_CONVERSATION_LENGTH = config["conversation"]["max_length"]


@asynccontextmanager
async def lifespan(app: FastAPI):
    logging.info("Application startup: Initializing agent...")

    # 从配置文件构建MCP客户端配置
    mcp_servers = {}
    for server_name, server_config in config["mcp"]["servers"].items():
        mcp_servers[server_name] = {
            "url": server_config["url"],
            "transport": server_config["transport"],
        }

    client = MultiServerMCPClient(mcp_servers)
    tools = await client.get_tools()

    # 从配置文件创建LLM模型
    model = create_llm_model(config["llm"])

    # 从配置文件获取系统提示词
    import_system_prompt = config["prompt"]["import_system_prompt"]
    export_system_prompt = config["prompt"]["export_system_prompt"]

    # 创建两个不同的agent
    import_agent_executor = create_react_agent(
        model, tools, prompt=import_system_prompt
    )
    export_agent_executor = create_react_agent(
        model, tools, prompt=export_system_prompt
    )

    app_globals["import_agent_executor"] = import_agent_executor
    app_globals["export_agent_executor"] = export_agent_executor
    app_globals["conversations"] = {}

    logging.info("Agent initialized successfully.")

    yield

    logging.info("Application shutdown.")
    app_globals.clear()


app = FastAPI(lifespan=lifespan)


# --- 4. 修改 API 端点 ---
async def _handle_chat(request: ChatRequest, agent_executor, agent_type: str):
    """通用的聊天处理函数"""
    if not agent_executor:
        return {"error": f"{agent_type} Agent not initialized."}

    # 解码base64编码的content
    try:
        decoded_content = base64.b64decode(request.content).decode("utf-8")
    except Exception as e:
        logging.error(f"Failed to decode base64 content: {e}")
        return {"error": "Invalid base64 content"}

    conv_id = request.conversation_id or str(uuid.uuid4())
    history = app_globals["conversations"].get(conv_id, [])
    current_messages = history + [HumanMessage(content=decoded_content)]
    inputs = {"messages": current_messages}

    final_state = None
    logging.info(f"--- [CONV_ID: {conv_id}] Starting {agent_type} Agent Run ---")

    # 异步迭代 Agent 的每一步
    async for chunk in agent_executor.astream(inputs, stream_mode="values"):
        # 修改点: 在日志中打印每一步的最新消息
        last_message = chunk["messages"][-1]
        last_message.pretty_print()

        # 保存最新的状态快照
        final_state = chunk

    # 循环结束后，final_state 保存了最终的状态
    final_messages = final_state["messages"] if final_state else []

    # 从最终消息列表中找到最后一条 AI 的回复
    final_answer = ""
    if final_messages and isinstance(final_messages[-1], AIMessage):
        final_answer = final_messages[-1].content

    # 更新内存中的对话历史，使用滑动窗口机制
    if len(final_messages) > MAX_CONVERSATION_LENGTH:
        # 保留最新的消息，丢弃最旧的消息
        final_messages = final_messages[-MAX_CONVERSATION_LENGTH:]
        logging.info(
            f"--- [CONV_ID: {conv_id}] Applied sliding window, keeping last {MAX_CONVERSATION_LENGTH} messages ---"
        )

    app_globals["conversations"][conv_id] = final_messages

    logging.info(
        f"--- [CONV_ID: {conv_id}] {agent_type} Agent Run Finished. Final Answer: {final_answer} ---"
    )

    # 修改点: 返回一个包含最终结果的 JSON 对象
    return ChatResponse(conversation_id=conv_id, final_answer=final_answer)


@app.post("/report/import/chat", response_model=ChatResponse)
async def import_chat(request: ChatRequest):
    import_agent_executor = app_globals["import_agent_executor"]
    return await _handle_chat(request, import_agent_executor, "Import")


@app.post("/report/export/chat", response_model=ChatResponse)
async def export_chat(request: ChatRequest):
    export_agent_executor = app_globals["export_agent_executor"]
    return await _handle_chat(request, export_agent_executor, "Export")


# --- 5. 启动服务 ---
if __name__ == "__main__":
    uvicorn.run(app, host=config["app"]["host"], port=config["app"]["port"])
