import asyncio

from langchain_mcp_adapters.client import MultiServerMCPClient
from langchain_ollama import ChatOllama
from langgraph.prebuilt import create_react_agent
import logging

from common_config import CHAT_OLLAMA_MODEL

chat = ChatOllama(model=CHAT_OLLAMA_MODEL)

logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)


async def main(query: str):
    logger.debug("Initializing client")
    client = MultiServerMCPClient(
        {
            "math": {
                "url": "http://127.0.0.1:8000/mcp/",
                "transport": "streamable_http"
            }
        }
    )
    logger.debug("Getting tools")
    tools = await client.get_tools()
    logger.debug("Creating agent")
    agent = create_react_agent(chat, tools)
    logger.debug("Invoking agent")
    _response = await agent.ainvoke({"message": query})
    logger.debug("Response received")
    return _response


if __name__ == "__main__":
    response = asyncio.run(main("what is 4 * 4"))
    print("------------------------")
    print(response)
