#!/usr/bin/env ruby
# frozen_string_literal: true

# Basic usage example for the LLM Tracer library
# This example demonstrates how to create different types of spans for LLM applications

require "bundler/setup"
require "llm_tracer"

# Initialize OpenTelemetry (in a real application, you'd configure exporters, etc.)
OpenTelemetry::SDK.configure

# Example 1: Simple workflow tracing
puts "=== Example 1: Simple Workflow ==="
LlmTracer.workflow(name: "customer_support_workflow", version: "1.0.0") do |workflow_span|
  puts "Created workflow span: #{workflow_span.name}"

  # Add custom attributes
  workflow_span.set_attribute("business.workflow.type", "customer_service")
  workflow_span.set_attribute("business.workflow.priority", "high")
end

# Example 2: Agent tracing within a workflow
puts "\n=== Example 2: Agent within Workflow ==="
LlmTracer.workflow(name: "content_generation_workflow", version: "2.1.0") do |workflow_span|
  puts "Created workflow span: #{workflow_span.name}"

  LlmTracer.agent(name: "content_creator", version: "1.5.0") do |agent_span|
    puts "Created agent span: #{agent_span.name}"

    # Simulate some work
    sleep(0.1)

    # Add agent-specific attributes
    agent_span.set_attribute("agent.capabilities", ["text_generation", "content_editing"])
  end
end

# Example 3: LLM call tracing
puts "\n=== Example 3: LLM Call ==="
LlmTracer.llm_call(
  model: "gpt-4",
  provider: "openai",
  prompt: "Generate a creative story about a robot learning to paint",
  temperature: 0.8,
  max_tokens: 500
) do |llm_span|
  puts "Created LLM call span: #{llm_span.name}"

  # Simulate LLM processing
  sleep(0.2)

  # Add response information
  LlmTracer.tracer.add_llm_response(
    llm_span,
    response_model: "gpt-4",
    response_provider: "openai",
    finish_reason: "stop",
    usage: {
      total_tokens: 150,
      prompt_tokens: 25,
      completion_tokens: 125
    }
  )
end

# Example 4: Tool call tracing
puts "\n=== Example 4: Tool Call ==="
LlmTracer.tool_call(
  name: "weather_api",
  input: { city: "San Francisco", country: "US" }
) do |tool_span|
  puts "Created tool call span: #{tool_span.name}"

  # Simulate tool execution
  sleep(0.1)

  # Simulate tool result
  result = { temperature: 72, condition: "sunny", humidity: 45 }

  # Add tool call result
  LlmTracer.tracer.add_tool_call_result(tool_span, output: result)
end

# Example 5: Complex nested workflow
puts "\n=== Example 5: Complex Nested Workflow ==="
LlmTracer.workflow(name: "document_analysis_workflow", version: "3.0.0") do |workflow_span|
  puts "Created workflow span: #{workflow_span.name}"

  LlmTracer.agent(name: "document_analyzer", version: "2.0.0") do |agent_span|
    puts "Created agent span: #{agent_span.name}"

    # First LLM call for document understanding
    LlmTracer.llm_call(
      model: "claude-3-sonnet",
      provider: "anthropic",
      prompt: "Analyze the following document and extract key insights",
      temperature: 0.3
    ) do |llm_span|
      puts "Created LLM call span: #{llm_span.name}"
      sleep(0.15)

      LlmTracer.tracer.add_llm_response(
        llm_span,
        response_model: "claude-3-sonnet",
        response_provider: "anthropic",
        finish_reason: "stop",
        usage: { total_tokens: 200, prompt_tokens: 50, completion_tokens: 150 }
      )
    end

    # Tool call to database
    LlmTracer.tool_call(name: "database_query", input: { query: "SELECT * FROM insights" }) do |tool_span|
      puts "Created tool call span: #{tool_span.name}"
      sleep(0.1)

      LlmTracer.tracer.add_tool_call_result(
        tool_span,
        output: { records: 15, insights: ["insight1", "insight2"] }
      )
    end

    # Second LLM call for synthesis
    LlmTracer.llm_call(
      model: "gpt-4",
      provider: "openai",
      prompt: "Synthesize the insights into a summary report",
      temperature: 0.5
    ) do |llm_span|
      puts "Created LLM call span: #{llm_span.name}"
      sleep(0.2)

      LlmTracer.tracer.add_llm_response(
        llm_span,
        response_model: "gpt-4",
        response_provider: "openai",
        finish_reason: "stop",
        usage: { total_tokens: 300, prompt_tokens: 75, completion_tokens: 225 }
      )
    end
  end
end

puts "\n=== All examples completed! ==="
puts "Check your OpenTelemetry backend to see the generated traces."
