import asyncio
import json

import logging
from mcp.client.session import ClientSession
from mcp.client.sse import sse_client
import ollama
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

def convert_tool_format(tools):
    """
    将工具转换为Ollama所需的格式。

    https://ollama.com/blog/tool-support

    参数：
        tools (list): 工具对象列表

    返回：
        dict: Ollama所需格式的工具
    """
    converted_tools = []

    for tool in tools:
        converted_tool = {
             'type': 'function',
             'function': {
                 'name': tool.name,
                 'description': tool.description,
                 'parameters': tool.inputSchema
                 }
             }

        converted_tools.append(converted_tool)

    return converted_tools

import uuid

async def call_tool(session, response, messages):
    # 请求工具使用。调用工具并将结果发送给模型。
    tool_requests = response.message.tool_calls
    if not tool_requests:
        raise ValueError("No tool requests found in response")

    for tool_request in tool_requests:
        tool = tool_request.function
        print("Requesting tool: ",tool.name)

        try:
            # 通过MCP会话调用工具
            tool_response = await session.call_tool(tool.name, tool.arguments)

            # 将工具响应转换为预期格式
            tool_result = {
                "toolUseId": tool['name'] + str(uuid.uuid4()),
                "content": [{"text": str(tool_response)}]
            }

        except Exception as err:
            logger.error("Tool call failed: %s", str(err))
            tool_result = {
                "toolUseId": tool['toolUseId'],
                "content": [{"text": f"Error: {str(err)}"}],
                "status": "error"
            }

        # 将工具结果添加到消息中
        messages.append({
            "role": "user",
            "content": str({"toolResult": tool_result})
        })
    return messages

from ollama import ChatResponse

async def converse_using_ollama(session, messages, tools):
    while True:
        converted_tools = convert_tool_format(tools)
        print("messages: ", str(messages))
        response: ChatResponse = ollama.chat(
            model="qwen2:7b",
            messages=messages,
            tools=converted_tools
        )
        print("Response from Ollama:")
        print(response.message.content)
        output_message = response['message']
        messages.append(output_message)

        if response.message.tool_calls:
            await call_tool(session, response, messages)
        else:
            print("No more tool use requests, we're done")
            break


async def complete(message):
    logger.info("Starting session")
    messages = []

    async with sse_client("http://localhost:8000/sse") as streams:
        async with ClientSession(streams[0], streams[1]) as session:
            try:
                await session.initialize()
                # 稍等片刻，让会话初始化
                await asyncio.sleep(1)
                logger.info("Session initialized")

                # 列出可用工具并转换为可序列化格式
                tools_result = await session.list_tools()
                tools_list = [{
                    "name": tool.name,
                    "description": tool.description,
                    "inputSchema": tool.inputSchema} for tool in tools_result.tools]
                logger.info("Available tools: %s", tools_list)
                args = {"a":3,"b":5}
                rlt = await session.call_tool("add", args)
                system_message = {
                    "role" : "system",
                    "content" : "You are a helpful AI assistant. You have access to the following tools: " + json.dumps(tools_list) + " Use these tools if called to answer any questions posed by the prompt (user)."}
                messages.append(system_message)
                messages.append(message)
            except TypeError as err:
                logger.error("Tool call failed for tools: %s - with error", '', str(err))
                tool_result = {
                    "content": [{"text": f"Error: {str(err)}"}],
                    "status": "error"
                }
            await converse_using_ollama(session, messages, tools_result.tools)

async def main():
    message = {
      "role" : "user",
      "content" : "Describe the planet pluto using any information available from the supplied tools?"
    }
    await complete(message)


from threading import Event


async def call_tool_with_event(session, tool_name: str, params: dict):
    """
    使用Event对象等待工具完成
    """
    completion_event = Event()
    result = None
    error = None

    def callback(res, err=None):
        # nonlocal result, error
        # result = res
        # error = err
        completion_event.set()

    await session.call_tool(tool_name, params)

    completion_event.wait()  # 阻塞直到事件被设置

    # if error:
    #     raise error
    return result

async def sync_add():
    async with sse_client("http://localhost:8081/sse") as streams:
        async with ClientSession(*streams) as session:
            await session.initialize()
            topic = "物流管理系统"
            content = "软件简介   物流信息管理系统主要用于物流企业对运输任务、车辆、司机及货物等信息的全面管理，实现物流过程的信息化和智能化。"
            args = {"topic": topic,"content":content}
            rlt = await session.call_tool("plantuml_generator",args)
            asyncio.sleep(3)
            print(rlt.content[0].text)

if __name__ == '__main__':
    loop = asyncio.get_event_loop()
    loop.run_until_complete(sync_add())
    # asyncio.run(sync_add())