from contextlib import asynccontextmanager
from fastapi import FastAPI
from fastapi.responses import JSONResponse
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from langchain_mcp_adapters.tools import load_mcp_tools
from langgraph.checkpoint.redis.aio import AsyncRedisSaver
from langgraph.prebuilt import create_react_agent
from langchain_core.runnables import RunnableConfig
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_openai import ChatOpenAI
from pydantic import SecretStr
from fastapi import status

from config.database_config import DatabaseConfig
from config.llm_config import LLMConfig
from config.mcp_config import MCPConfig
from domain.user_question import UserQuestion

@asynccontextmanager
async def lifespan(app: FastAPI):
    """
    Lifespan event handler for FastAPI application.
    Initializes the MCP server connection, Redis checkpointer, and LLM model.
    This function sets up the necessary components for the agent to function properly.
    It yields control to the application, allowing it to run, and cleans up resources on shutdown.
    The MCP server is used for communication with the agent, Redis is used for state management,
    and the LLM model is used for generating responses based on user input.
    The system prompt is set up to guide the agent's behavior.
    The Redis TTL configuration is set to refresh on read, ensuring that the state is kept alive
    as long as it is being accessed. The agent executor is created using the LLM model and tools loaded from the MCP server.
    The agent executor is wrapped in a chain to allow for multi-step reasoning.
    """
    
    redis_handle = DatabaseConfig(db_type="redis-stack")
    llm_handle = LLMConfig()
    mcp_server_handle = MCPConfig()

    # Initialize MCP server connection
    server_params = StdioServerParameters(command=mcp_server_handle.command, args=[mcp_server_handle.path])
    
    async with stdio_client(server_params) as (read, write):
        async with ClientSession(read, write) as session:
            await session.initialize()

            # Set up system prompt
            redis_ttl_config = {"default_ttl": redis_handle.default_ttl, "refresh_on_read": True}
            async with AsyncRedisSaver.from_conn_string(redis_handle.url, ttl=redis_ttl_config) as checkpointer:
                await checkpointer.asetup()

                # model initialization            
                model = ChatOpenAI(
                    base_url=llm_handle.base_url,
                    api_key=SecretStr(llm_handle.api_key),
                    model=llm_handle.model,
                    temperature=llm_handle.temperature,
                )

                # Load tools from MCP server
                tools = await load_mcp_tools(session)

                agent_executor = create_react_agent(model=model, tools=tools, checkpointer=checkpointer)
                
                app.state.checkpointer = checkpointer
                app.state.agent_executor = agent_executor
                app.state.system_prompt = llm_handle.system_prompt
                yield

app = FastAPI(lifespan=lifespan)

@app.post("/v1/agent/execute",
          summary="Execute agent with input data",
          description="""
          This endpoint allows you to execute the agent with the provided input data.
          The agent will process the input and return a response based on its reasoning and available tools.
          """)
async def run_agent(user_question: UserQuestion):
    runnable_config = RunnableConfig(configurable={"thread_id": user_question.id})
    
    message = []
    checkpoint = await app.state.checkpointer.aget_tuple(runnable_config)
    if checkpoint is None:
        # If no checkpoint exists, describe this user is first use of the agent, so we need to provide a system prompt.
        message.append(SystemMessage(content=app.state.system_prompt))
    message.append(HumanMessage(content=user_question.question))
    
    response = await app.state.agent_executor.ainvoke({"messages": message}, runnable_config)
    
    return JSONResponse(content={
        "response": response["messages"][-1].content,
        "id": user_question.id
    }, status_code=status.HTTP_200_OK)

@app.get("/v1/agent/health",
           summary="Health check for the agent",
           description="""
           This endpoint checks the health of the agent.
           It returns a simple message indicating that the agent is running.
           """)
async def health_check():
    """
    Health check endpoint for the agent.
    Returns a simple message indicating that the agent is running.
    This can be used to verify that the agent service is operational.
    """
    return JSONResponse(content={"message": "Agent is running"}, status_code=status.HTTP_200_OK)
