import asyncio

from langchain_mcp_adapters.client import MultiServerMCPClient
from langchain_ollama import ChatOllama
from langchain.agents import create_react_agent
import logging

from common_config import CHAT_OLLAMA_MODEL

ollama = ChatOllama(model=CHAT_OLLAMA_MODEL)

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

async def tool_call(query: str):
    print(f"Initializing client with query: {query}")
    client = MultiServerMCPClient(
        {
            "math": {
                "url": "http://127.0.0.1:8000/mcp/",
                "transport": "streamable_http"
            }
        }
    )
    tools = await client.get_tools()
    print(f"Getting tools: {tools}")
    agent = create_react_agent(ollama, tools)
    print(f"Creating agent: {agent}")

    print("Invoking agent.")
    _response = agent.invoke({"message": query})
    print(f"Response received")
    return _response


if __name__ == "__main__":
    response = asyncio.run(tool_call("what is 4 * 4"))
    print("------------------------")
    print(response)
