# Create server parameters for stdio connection
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from langchain_mcp_adapters.tools import load_mcp_tools
from langgraph.prebuilt import create_react_agent
from langchain_openai import AzureChatOpenAI
import asyncio
from langchain_core.messages import HumanMessage

params = {
    "azure_endpoint": "https://azs-dev-us-01.openai.azure.com/",
    "openai_api_key": "73cf593826a54526bebd341aa0ca551e",
    "model_name": "gpt-3.5-turbo",
    "deployment_name": "gpt-4o",
    "openai_api_version": "2023-07-01-preview",
    "temperature": 0,
    "max_tokens": 4096,
    "top_p": 0,
    "frequency_penalty": 0,
    "presence_penalty": 0,
}
model = AzureChatOpenAI(**params)

server_params = StdioServerParameters(
    command="python",
    # Make sure to update to the full absolute path to your math_server.py file
    args=["map.py"],
)


async def run_agent():
    async with stdio_client(server_params) as (read, write):
        async with ClientSession(read, write) as session:
            # Initialize the connection
            await session.initialize()
            # Get tools
            tools = await load_mcp_tools(session)
            # Create and run the agent
            agent = create_react_agent(model, tools)
            # agent_response = await agent.ainvoke(
            #     {"messages": "杭州绿城西溪国际到杭州东站路线是什么"}
            # )
            print(agent)
            return agent


# Run the async function
if __name__ == "__main__":
    asyncio.run(run_agent())
# print(result)
# print(result["messages"][-1].content)
