import asyncio

from langchain_core.prompts import PromptTemplate
from langchain_mcp_adapters.client import MultiServerMCPClient
from langchain_ollama import ChatOllama
from langchain.agents import create_react_agent
import logging

from common_config import CHAT_OLLAMA_MODEL
from tool_call.ollama_prompt import TOOL_CALL_TEMPLATE

ollama = ChatOllama(model=CHAT_OLLAMA_MODEL)

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

async def tool_call(query: str):
    print(f"Initializing client with query: {query}")
    client = MultiServerMCPClient(
        {
            "math": {
                "url": "http://127.0.0.1:8000/mcp/",
                "transport": "streamable_http"
            }
        }
    )
    prompt = PromptTemplate.from_template(TOOL_CALL_TEMPLATE)

    tools = await client.get_tools()

    print(f"Getting tools: {tools}")
    agent = create_react_agent(ollama, tools, prompt=prompt)
    print(f"Creating Ollama LLM Agent: {agent}")


    print("Invoking agent.")
    _response = agent.invoke({"input": query})
    print(f"Response received: {_response}")
    return _response


if __name__ == "__main__":
    response = asyncio.run(tool_call("what is 4 * 4"))
    print("------------------------")
    print(response)
