Spaces:
Sleeping
Sleeping
File size: 2,389 Bytes
a1bb2a4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
"""
Author: Alex Punnen
Status: Demo
Note: I have hosted the server in Hugging Face Spaces.
https://huggingface.co/spaces/alexcpn/mcpserver-demo/tree/main
With the Docker file it is running at "https://alexcpn-mcpserver-demo.hf.space:7860/mcp/"
This is a simple client to call the MCP server.
"""
import asyncio
from fastmcp import Client
from openai import OpenAI
from dotenv import load_dotenv
import os
import json
async def example():
#async with Client("http://127.0.0.1:7860/mcp/") as client:
async with Client("https://alexcpn-mcpserver-demo.hf.space/mcp/") as client:
await client.ping()
# List available tools
tools = await client.list_tools()
print("Available tools:", tools)
tool_result = await client.call_tool("add", {"a": "1", "b": "2"})
print("Tool result:", tool_result)
# lets give this context to an LLM to generate a prompt
# Load the .env file and get the API key
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
# Initialize OpenAI client with OpenAI's official base URL
openai_client = OpenAI(
api_key=api_key,
base_url="https://api.openai.com/v1"
)
a = 123124522
b= 865734234
question = f"Using the tools available {tools} frame the JSON RPC call to the tool add with a={a} and b={b}, do not add anything else to the output" + \
"here is the JSON RPC call format {{\"method\": \"<method name>\", \"params\": {{\"<param 1 name>\": {<param 1 value>}, \"<param 2 name>\": {<param 2 value>} etc }}}}"
# Use a simple model like gpt-3.5-turbo
completion = openai_client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content":question }
]
)
tool_call = json.loads(completion.choices[0].message.content)
# Print the response
print("LLM response:", tool_call)
print(tool_call["method"], tool_call["params"])
# call the tool with LLM response
tool_result = await client.call_tool(tool_call["method"], tool_call["params"])
print("Tool result:", tool_result)
# lets give this context to an LLM to generate a prompt
if __name__ == "__main__":
asyncio.run(example()) |