/**
 * Example client demonstrating sampling support
 *
 * This example shows how to:
 * 1. Create an MCP client with a sampling callback
 * 2. Connect to a server that uses sampling
 * 3. Call tools that request LLM completions
 *
 * To run this example:
 * 1. Start the sampling server: cd examples/server/sampling && npm run dev
 * 2. Run this client: tsx examples/client/sampling-client.ts
 */

import { MCPClient } from "mcp-use";
import type { CreateMessageRequest } from "@mcp-use/modelcontextprotocol-sdk/types.js";

// Mock LLM function - replace this with your actual LLM integration
async function mockLLM(prompt: string): Promise<string> {
  // In a real implementation, you would call your LLM API here
  // For example: OpenAI, Anthropic, etc.

  // Simulate some processing time
  await new Promise((resolve) => setTimeout(resolve, 100));

  // Simple mock responses based on prompt content
  if (prompt.includes("sentiment")) {
    if (
      prompt.toLowerCase().includes("love") ||
      prompt.toLowerCase().includes("great")
    ) {
      return "positive";
    } else if (
      prompt.toLowerCase().includes("hate") ||
      prompt.toLowerCase().includes("terrible")
    ) {
      return "negative";
    }
    return "neutral";
  }

  if (prompt.includes("Summarize")) {
    return "This is a mock summary of the provided text. In a real implementation, this would be generated by your LLM.";
  }

  if (prompt.includes("Translate")) {
    if (prompt.includes("Spanish")) {
      return "Hola, mundo!";
    }
    return "Translation would appear here";
  }

  return "Mock LLM response";
}

// Create sampling callback
async function samplingCallback(
  params: CreateMessageRequest["params"]
): Promise<
  import("@mcp-use/modelcontextprotocol-sdk/types.js").CreateMessageResult
> {
  console.log("📥 Received sampling request:");
  console.log("   Messages:", params.messages.length);
  console.log("   Model preferences:", params.modelPreferences);

  // Extract the last message content
  const lastMessage = params.messages[params.messages.length - 1];
  const content = Array.isArray(lastMessage.content)
    ? lastMessage.content[0]
    : lastMessage.content;

  if (content.type !== "text") {
    throw new Error("Only text content is supported in this example");
  }

  // Call your LLM (replace with actual LLM call)
  const response = await mockLLM(content.text);

  console.log("📤 Sending sampling response:", response);

  return {
    role: "assistant",
    content: { type: "text", text: response },
    model: "mock-llm-v1",
    stopReason: "endTurn",
  };
}

async function main() {
  console.log("🚀 Starting Sampling Client Example\n");

  // Create client with sampling callback
  const client = new MCPClient(
    {
      mcpServers: {
        sampling: {
          url: "http://localhost:3000/mcp",
        },
      },
    },
    {
      samplingCallback,
    }
  );

  try {
    // Create and initialize session
    console.log("📡 Connecting to sampling server...");
    await client.createAllSessions();
    console.log("✅ Connected!\n");

    // List available tools
    const session = client.getSession("sampling");
    if (!session) {
      throw new Error("Failed to get session");
    }

    const connector = session.connector;
    const tools = connector.tools;
    console.log("🔧 Available tools:");
    tools.forEach((tool) => {
      console.log(`   - ${tool.name}: ${tool.description}`);
    });
    console.log();

    // Test analyze-sentiment tool
    console.log("🧪 Testing analyze-sentiment tool...");
    const sentimentResult = await connector.callTool("analyze-sentiment", {
      text: "I love this product! It's amazing!",
    });
    console.log("   Result:", sentimentResult.content[0].text);
    console.log();

    // Test summarize-text tool
    console.log("🧪 Testing summarize-text tool...");
    const summaryResult = await connector.callTool("summarize-text", {
      text: "This is a long piece of text that needs to be summarized. It contains multiple sentences and ideas that should be condensed into a shorter form while preserving the key information.",
      maxLength: 20,
    });
    console.log("   Result:", summaryResult.content[0].text);
    console.log();

    // Test translate-text tool
    console.log("🧪 Testing translate-text tool...");
    const translateResult = await connector.callTool("translate-text", {
      text: "Hello, world!",
      targetLanguage: "Spanish",
    });
    console.log("   Result:", translateResult.content[0].text);
    console.log();

    console.log("✅ All tests completed successfully!");
  } catch (error: any) {
    console.error("❌ Error:", error.message);
    if (error.stack) {
      console.error(error.stack);
    }
    process.exit(1);
  } finally {
    // Clean up
    await client.closeAllSessions();
    console.log("\n👋 Client disconnected");
  }
}

// Run the example
main().catch(console.error);
