#!/usr/bin/env python3 import gradio as gr import json import asyncio import logging import os from typing import Dict, Any, Optional from handit_tracker import HanditTracker # Set up logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # Global tracker instance tracker = HanditTracker() def configure_handit_tracker(api_key: str, tracking_url: str = "") -> str: """Configure the Handit tracker with API credentials. Args: api_key: Your Handit API key for authentication tracking_url: Optional custom tracking URL (leave empty for default) Returns: Configuration status message """ print(f"🔧 MCP SERVER: configure_handit_tracker called with api_key: {'***' + api_key[-8:] if api_key and len(api_key) > 8 else 'None'}", flush=True) try: if not api_key: print("❌ MCP SERVER: API key is required") return "❌ API key is required" tracker.config(api_key, tracking_url if tracking_url else None) print("✅ MCP SERVER: Handit tracker configured successfully") return "✅ Handit tracker configured successfully" except Exception as e: print(f"❌ MCP SERVER: Configuration error: {str(e)}") return f"❌ Configuration error: {str(e)}" def start_agent_tracing(agent_name: str, api_key: str) -> str: """Start tracing an AI agent execution. Call this at the beginning of any agent workflow. Args: agent_name: Name of the agent to start tracing (e.g., 'customer-service-bot') api_key: Handit API key for authentication Returns: JSON response with execution_id for tracking """ print(f"🎬 MCP SERVER: start_agent_tracing called with agent_name: {agent_name}, api_key: {'***' + api_key[-8:] if api_key and len(api_key) > 8 else 'None'}", flush=True) try: if not agent_name or not api_key: print("❌ MCP SERVER: Both agent_name and api_key are required") return json.dumps({"error": "Both agent_name and api_key are required"}) # Configure tracker print("🔧 MCP SERVER: Configuring tracker...") tracker.config(api_key) # Run async function in sync context print("🔄 MCP SERVER: Starting async tracing...") loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) print(f"🔧 MCP SERVER: Starting async tracing for agent: {agent_name}") try: result = loop.run_until_complete(tracker.start_tracing_async(agent_name)) print(f"📊 MCP SERVER: Async tracing result: {result}") finally: loop.close() if result and 'executionId' in result: response = { "status": "success", "execution_id": result['executionId'], "agent_name": agent_name, "message": f"Started tracing for agent: {agent_name}" } print(f"✅ MCP SERVER: Successfully started tracing, returning: {response}") return json.dumps(response, indent=2) else: print(f"❌ MCP SERVER: Failed to start agent tracing - result: {result}") return json.dumps({ "error": "Failed to start agent tracing", "result": str(result), "has_executionId": bool(result and 'executionId' in result) if result else False, "result_type": str(type(result)) }) except Exception as e: print(f"❌ MCP SERVER: Error starting tracing: {str(e)}") import traceback traceback.print_exc() return json.dumps({"error": f"Error starting tracing: {str(e)}"}) def track_agent_node(input_data: str, output_data: str, node_name: str, agent_name: str, node_type: str, execution_id: str, api_key: str) -> str: """Track a specific node execution within an agent workflow. Call this for each step in your agent. Args: input_data: Input data for the node (JSON string or text) output_data: Output data from the node (JSON string or text) node_name: Name of the node being tracked (e.g., 'sentiment-analysis') agent_name: Name of the agent this node belongs to node_type: Type of node - must be one of: llm, model, tool, function, api, other execution_id: Execution ID from start_agent_tracing api_key: Handit API key for authentication Returns: JSON response with tracking status """ print(f"📊 MCP SERVER: track_agent_node called with:", flush=True) print(f" - node_name: {node_name}") print(f" - agent_name: {agent_name}") print(f" - node_type: {node_type}") print(f" - execution_id: {execution_id}") print(f" - input_data length: {len(input_data) if input_data else 0}") print(f" - output_data length: {len(output_data) if output_data else 0}") print(f" - api_key: {'***' + api_key[-8:] if api_key and len(api_key) > 8 else 'None'}") try: if not api_key: print("❌ MCP SERVER: API key is required for tracking node") return json.dumps({"error": "api_key is required"}) tracker.config(api_key) # Validate required arguments required_args = { "input_data": json.loads(input_data), "output_data": output_data, "node_name": node_name, "agent_name": agent_name, "node_type": node_type, "execution_id": execution_id } for arg_name, arg_value in required_args.items(): if not arg_value: print(f"❌ MCP SERVER: {arg_name} is required") return json.dumps({"error": f"{arg_name} is required"}) # Validate node_type valid_types = ["llm", "model", "tool", "function", "api", "other"] if node_type not in valid_types: print(f"❌ MCP SERVER: Invalid node_type: {node_type}") return json.dumps({"error": f"node_type must be one of: {', '.join(valid_types)}"}) # Run async function in sync context print("🔄 MCP SERVER: Starting async node tracking...") loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) try: result = loop.run_until_complete( tracker.track_node_async(input_data, output_data, node_name, agent_name, node_type, execution_id) ) print(f"📊 MCP SERVER: Async tracking result: {result}") finally: loop.close() if result: response = { "status": "success", "node_name": node_name, "node_type": node_type, "execution_id": execution_id, "message": f"Successfully tracked node: {node_name}" } print(f"✅ MCP SERVER: Successfully tracked node, returning: {response}") return json.dumps(response, indent=2) else: print("❌ MCP SERVER: Failed to track agent node - no result") return json.dumps({"error": "Failed to track agent node"}) except Exception as e: print(f"❌ MCP SERVER: Error tracking node: {str(e)}") import traceback traceback.print_exc() return json.dumps({"error": f"Error tracking node: {str(e)}"}) def get_optimized_prompts(agent_slug: str, api_key: str) -> str: """Get optimized prompts for an agent. Use this before LLM calls to get the best performing prompts. Args: agent_slug: Slug identifier for the agent (e.g., 'customer-service-v1') api_key: Handit API key for authentication Returns: JSON response with optimized prompts """ print(f"🧠 MCP SERVER: get_optimized_prompts called with agent_slug: {agent_slug}, api_key: {'***' + api_key[-8:] if api_key and len(api_key) > 8 else 'None'}", flush=True) try: if not agent_slug or not api_key: print("❌ MCP SERVER: Both agent_slug and api_key are required") return json.dumps({"error": "Both agent_slug and api_key are required"}) # Configure tracker print("🔧 MCP SERVER: Configuring tracker for prompts...") tracker.config(api_key) # Run async function in sync context print("🔄 MCP SERVER: Getting async prompts...") loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) try: prompts = loop.run_until_complete(tracker.get_optimized_prompts_async(agent_slug)) print(f"📊 MCP SERVER: Async prompts result: {prompts}") finally: loop.close() response = { "status": "success", "agent_slug": agent_slug, "prompts": prompts, "message": f"Retrieved optimized prompts for agent: {agent_slug}" } print(f"✅ MCP SERVER: Successfully got prompts, returning: {response}") return json.dumps(response, indent=2) except Exception as e: print(f"❌ MCP SERVER: Error getting optimized prompts: {str(e)}") import traceback traceback.print_exc() return json.dumps({"error": f"Error getting optimized prompts: {str(e)}"}) def end_agent_tracing(execution_id: str, agent_name: str = "", api_key: str = "") -> str: """End tracing an AI agent execution. Call this at the end of any agent workflow. Args: execution_id: Execution ID returned from start_agent_tracing agent_name: Name of the agent (optional) api_key: Handit API key for authentication Returns: JSON response with completion status """ print(f"🏁 MCP SERVER: end_agent_tracing called with execution_id: {execution_id}, agent_name: {agent_name}, api_key: {'***' + api_key[-8:] if api_key and len(api_key) > 8 else 'None'}", flush=True) try: if not execution_id: return json.dumps({"error": "execution_id is required"}) if not api_key: print("❌ MCP SERVER: API key is required for ending tracing") return json.dumps({"error": "api_key is required"}) tracker.config(api_key) # Run async function in sync context loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) try: result = loop.run_until_complete(tracker.end_tracing_async(execution_id, agent_name)) finally: loop.close() if result: response = { "status": "success", "execution_id": execution_id, "message": f"Successfully ended tracing for execution: {execution_id}" } return json.dumps(response, indent=2) else: return json.dumps({"error": "Failed to end agent tracing"}) except Exception as e: return json.dumps({"error": f"Error ending tracing: {str(e)}"}) # Create the Gradio interface def create_handit_mcp_interface(): with gr.Blocks( title="🚀 Handit MCP Server - Self-Improving AI Agent Tools", theme=gr.themes.Soft(), ) as demo: gr.Markdown(""" # 🚀 Handit MCP Server ### Self-Improving AI Agent Tools via Model Context Protocol This server exposes Handit's AI tracking and optimization tools via MCP (Model Context Protocol). **Perfect for hackathons!** 🏆 ## 🔧 MCP Tools Available: - `configure_handit_tracker`: Set up API credentials - `start_agent_tracing`: Begin tracking an agent execution - `track_agent_node`: Track individual steps in your agent - `get_optimized_prompts`: Get the best performing prompts - `end_agent_tracing`: Complete agent tracking ## 📡 MCP Server URL Access the MCP server at: **`http://localhost:{}/gradio_api/mcp/sse`** """.format(os.environ.get("GRADIO_SERVER_PORT", "7860"))) with gr.Tabs(): # Configuration Tab with gr.TabItem("⚙️ Configure Tracker"): with gr.Row(): config_api_key = gr.Textbox(label="Handit API Key", type="password") config_url = gr.Textbox(label="Custom Tracking URL (Optional)") config_result = gr.Textbox(label="Result", interactive=False) config_btn = gr.Button("Configure", variant="primary") config_btn.click( configure_handit_tracker, inputs=[config_api_key, config_url], outputs=config_result ) # Start Tracing Tab with gr.TabItem("🎬 Start Tracing"): with gr.Row(): start_agent = gr.Textbox(label="Agent Name", placeholder="e.g., customer-service-bot") start_key = gr.Textbox(label="API Key", type="password") start_result = gr.Textbox(label="Result", interactive=False, lines=10) start_btn = gr.Button("Start Tracing", variant="primary") start_btn.click( start_agent_tracing, inputs=[start_agent, start_key], outputs=start_result ) # Track Node Tab with gr.TabItem("📊 Track Node"): with gr.Row(): with gr.Column(): track_input = gr.Textbox(label="Input Data", lines=3) track_output = gr.Textbox(label="Output Data", lines=3) track_node = gr.Textbox(label="Node Name") with gr.Column(): track_agent = gr.Textbox(label="Agent Name") track_type = gr.Dropdown( label="Node Type", choices=["llm", "model", "tool", "function", "api", "other"], value="llm" ) track_exec_id = gr.Textbox(label="Execution ID") track_result = gr.Textbox(label="Result", interactive=False, lines=10) track_btn = gr.Button("Track Node", variant="primary") track_btn.click( track_agent_node, inputs=[track_input, track_output, track_node, track_agent, track_type, track_exec_id, config_api_key], outputs=track_result ) # Get Prompts Tab with gr.TabItem("🧠 Get Optimized Prompts"): with gr.Row(): prompts_slug = gr.Textbox(label="Agent Slug", placeholder="e.g., customer-service-v1") prompts_key = gr.Textbox(label="API Key", type="password") prompts_result = gr.Textbox(label="Result", interactive=False, lines=15) prompts_btn = gr.Button("Get Prompts", variant="primary") prompts_btn.click( get_optimized_prompts, inputs=[prompts_slug, prompts_key], outputs=prompts_result ) # End Tracing Tab with gr.TabItem("🏁 End Tracing"): with gr.Row(): end_exec_id = gr.Textbox(label="Execution ID") end_agent = gr.Textbox(label="Agent Name (Optional)") end_result = gr.Textbox(label="Result", interactive=False, lines=8) end_btn = gr.Button("End Tracing", variant="primary") end_btn.click( end_agent_tracing, inputs=[end_exec_id, end_agent, config_api_key], outputs=end_result ) gr.Markdown(""" --- ### 🎯 Hackathon Usage: 1. **For LLM Integration**: Use the MCP server URL with your AI assistant 2. **For Testing**: Use the tabs above to test functionality 3. **For Development**: Inspect the API at `/gradio_api/mcp/schema` **Made with ❤️ for hackathons using Gradio's native MCP integration!** """) return demo if __name__ == "__main__": # Create the Gradio interface demo = create_handit_mcp_interface() # Launch with MCP server enabled - THIS IS THE KEY! demo.launch( share=True, show_error=True, mcp_server=True # 🔥 This enables MCP server functionality )