import os
import asyncio

from langchain_mcp_adapters.client import MultiServerMCPClient
from langgraph.prebuilt import create_react_agent
from langchain_ollama import ChatOllama

MODEL=os.getenv("MODEL")
BASE_URL=os.getenv("BASE_URL")
model = ChatOllama(model=MODEL, base_url=BASE_URL)

async def get_tools():
    client = MultiServerMCPClient(
    {
        "math": {
            "command": "python",
            # Replace with absolute path to your math_server.py file
            "args": ["./mcp_server_stdio.py"],
            "transport": "stdio",
        },
        "weather": {
            # Ensure your start your weather server on port 8000
            "url": "http://127.0.0.1:8000/sse",
            "transport": "sse",
        }
    }
    )
    return await client.get_tools()

def main():
    tools = asyncio.run(get_tools())
    agent = create_react_agent(
        model,
        tools
    )
    math_response =  agent.invoke(
        {"messages": [{"role": "user", "content": "what's (35555121+123123181) x 112122?,please use tools to do math"}]}
    )
    print(math_response["messages"][-1])
    weather_response =  agent.invoke(
        {"messages": [{"role": "user", "content": "what is the weather in nyc?"}]}
    ) 
    print(weather_response["messages"][-1])

if __name__ == "__main__":
    main()