#!/usr/bin/env ruby
# frozen_string_literal: true

# Simple demonstration of the LLM Tracer library
# This script shows how to use the different span types

require_relative "lib/llm_tracer"

# Initialize OpenTelemetry with console exporter for demonstration
OpenTelemetry::SDK.configure do |c|
  c.add_span_processor(
    OpenTelemetry::SDK::Trace::Export::SimpleSpanProcessor.new(
      OpenTelemetry::SDK::Trace::Export::ConsoleSpanExporter.new
    )
  )
end

puts "🚀 LLM Tracer Demo"
puts "=================="
puts

# Demo 1: Simple workflow
puts "📋 Demo 1: Simple Workflow"
LlmTracer.workflow(name: "demo_workflow", version: "1.0.0") do |span|
  puts "  ✓ Created workflow span: #{span.name}"
  span.set_attribute("demo.type", "simple_workflow")
  puts "  ✓ Added custom attribute: demo.type = simple_workflow"
end
puts

# Demo 2: Agent with LLM call
puts "🤖 Demo 2: Agent with LLM Call"
LlmTracer.workflow(name: "ai_assistant_workflow", version: "2.0.0") do |workflow_span|
  puts "  ✓ Created workflow span: #{workflow_span.name}"

  LlmTracer.agent(name: "ai_assistant", version: "1.5.0") do |agent_span|
    puts "  ✓ Created agent span: #{agent_span.name}"

    # Simulate LLM call
    LlmTracer.llm_call(
      model: "gpt-4",
      provider: "openai",
      prompt: "Explain quantum computing in simple terms",
      temperature: 0.7,
      max_tokens: 200
    ) do |llm_span|
      puts "  ✓ Created LLM call span: #{llm_span.name}"

      # Simulate response
      LlmTracer.tracer.add_llm_response(
        llm_span,
        response_model: "gpt-4",
        response_provider: "openai",
        finish_reason: "stop",
        usage: {
          total_tokens: 150,
          prompt_tokens: 30,
          completion_tokens: 120
        }
      )
      puts "  ✓ Added LLM response information"
    end
  end
end
puts

# Demo 3: Tool calls
puts "🔧 Demo 3: Tool Calls"
LlmTracer.workflow(name: "data_processing_workflow", version: "1.0.0") do |workflow_span|
  puts "  ✓ Created workflow span: #{workflow_span.name}"

  # Database query tool
  LlmTracer.tool_call(name: "database_query", input: { table: "users", limit: 100 }) do |db_span|
    puts "  ✓ Created database tool call span: #{db_span.name}"

    # Simulate database result
    result = { rows: 95, execution_time: 0.045 }
    LlmTracer.tracer.add_tool_call_result(db_span, output: result)
    puts "  ✓ Added database result: #{result[:rows]} rows in #{result[:execution_time]}s"
  end

  # API call tool
  LlmTracer.tool_call(name: "external_api", input: { endpoint: "/weather", city: "San Francisco" }) do |api_span|
    puts "  ✓ Created API tool call span: #{api_span.name}"

    # Simulate API result
    weather = { temperature: 72, condition: "sunny" }
    LlmTracer.tracer.add_tool_call_result(api_span, output: weather)
    puts "  ✓ Added API result: #{weather[:temperature]}°F, #{weather[:condition]}"
  end
end
puts

# Demo 4: Complex nested workflow
puts "🔄 Demo 4: Complex Nested Workflow"
LlmTracer.workflow(name: "content_creation_workflow", version: "3.0.0") do |workflow_span|
  puts "  ✓ Created workflow span: #{workflow_span.name}"

  # Research phase
  LlmTracer.agent(name: "researcher", version: "1.0.0") do |researcher_span|
    puts "  ✓ Created researcher agent span: #{researcher_span.name}"

    # Research tool calls
    LlmTracer.tool_call(name: "web_search", input: { query: "AI trends 2024" }) do |search_span|
      puts "    ✓ Created web search tool call span: #{search_span.name}"
      LlmTracer.tracer.add_tool_call_result(search_span, output: { results: 15, sources: 8 })
    end

    LlmTracer.tool_call(name: "database_lookup", input: { table: "research_papers", topic: "AI" }) do |db_span|
      puts "    ✓ Created database lookup tool call span: #{db_span.name}"
      LlmTracer.tracer.add_tool_call_result(db_span, output: { papers: 23, relevant: 18 })
    end
  end

  # Writing phase
  LlmTracer.agent(name: "writer", version: "2.0.0") do |writer_span|
    puts "  ✓ Created writer agent span: #{writer_span.name}"

    # Generate content with LLM
    LlmTracer.llm_call(
      model: "claude-3-sonnet",
      provider: "anthropic",
      prompt: "Write a comprehensive article about AI trends in 2024",
      temperature: 0.8,
      max_tokens: 1000
    ) do |llm_span|
      puts "    ✓ Created LLM call span: #{llm_span.name}"

      LlmTracer.tracer.add_llm_response(
        llm_span,
        response_model: "claude-3-sonnet",
        response_provider: "anthropic",
        finish_reason: "stop",
        usage: { total_tokens: 800, prompt_tokens: 150, completion_tokens: 650 }
      )
    end
  end

  # Review phase
  LlmTracer.agent(name: "reviewer", version: "1.0.0") do |reviewer_span|
    puts "  ✓ Created reviewer agent span: #{reviewer_span.name}"

    # Quality check with LLM
    LlmTracer.llm_call(
      model: "gpt-4",
      provider: "openai",
      prompt: "Review this article for accuracy and clarity",
      temperature: 0.3,
      max_tokens: 500
    ) do |llm_span|
      puts "    ✓ Created review LLM call span: #{llm_span.name}"

      LlmTracer.tracer.add_llm_response(
        llm_span,
        response_model: "gpt-4",
        response_provider: "openai",
        finish_reason: "stop",
        usage: { total_tokens: 300, prompt_tokens: 100, completion_tokens: 200 }
      )
    end
  end
end

puts
puts "🎉 Demo completed!"
puts "Check the console output above to see the generated OpenTelemetry spans."
puts "Each span includes the appropriate GenAI semantic conventions and attributes."
