#!/usr/bin/env ruby
# frozen_string_literal: true

# LLM Provider Integration Example
# This example shows how to integrate the LLM Tracer with different LLM providers

require "bundler/setup"
require "llm_tracer"

# Initialize OpenTelemetry
OpenTelemetry::SDK.configure

# Example: OpenAI-like integration
class OpenAIClient
  def initialize(api_key)
    @api_key = api_key
  end

  def chat_completion(messages, model: "gpt-4", temperature: 0.7, max_tokens: 1000)
    # Wrap the API call with LLM tracing
    LlmTracer.llm_call(
      model: model,
      provider: "openai",
      prompt: messages.map { |m| "#{m[:role]}: #{m[:content]}" }.join("\n"),
      temperature: temperature,
      max_tokens: max_tokens
    ) do |span|
      # Simulate API call
      sleep(0.3)

      # Simulate response
      response = {
        model: model,
        choices: [{ message: { content: "This is a simulated response from #{model}" } }],
        usage: {
          total_tokens: 150,
          prompt_tokens: 50,
          completion_tokens: 100
        },
        finish_reason: "stop"
      }

      # Add response information to the span
      LlmTracer.tracer.add_llm_response(
        span,
        response_model: response[:model],
        response_provider: "openai",
        finish_reason: response[:finish_reason],
        usage: response[:usage]
      )

      response
    end
  end
end

# Example: Anthropic-like integration
class AnthropicClient
  def initialize(api_key)
    @api_key = api_key
  end

  def messages(prompt, model: "claude-3-sonnet", max_tokens: 1000)
    LlmTracer.llm_call(
      model: model,
      provider: "anthropic",
      prompt: prompt,
      max_tokens: max_tokens
    ) do |span|
      # Simulate API call
      sleep(0.25)

      # Simulate response
      response = {
        model: model,
        content: [{ text: "This is a simulated response from #{model}" }],
        usage: {
          total_tokens: 200,
          prompt_tokens: 75,
          completion_tokens: 125
        },
        stop_reason: "end_turn"
      }

      # Add response information to the span
      LlmTracer.tracer.add_llm_response(
        span,
        response_model: response[:model],
        response_provider: "anthropic",
        finish_reason: response[:stop_reason],
        usage: response[:usage]
      )

      response
    end
  end
end

# Example: Tool integration
class WeatherTool
  def get_weather(city, country)
    LlmTracer.tool_call(
      name: "weather_api",
      input: { city: city, country: country }
    ) do |span|
      # Simulate API call
      sleep(0.1)

      # Simulate weather data
      weather_data = {
        city: city,
        country: country,
        temperature: rand(50..90),
        condition: ["sunny", "cloudy", "rainy", "partly_cloudy"].sample,
        humidity: rand(30..80)
      }

      # Add result to span
      LlmTracer.tracer.add_tool_call_result(span, output: weather_data)

      weather_data
    end
  end
end

# Example: Database tool
class DatabaseTool
  def query(sql, params = {})
    LlmTracer.tool_call(
      name: "database_query",
      input: { sql: sql, params: params }
    ) do |span|
      # Simulate database query
      sleep(0.05)

      # Simulate results
      results = {
        rows: rand(5..20),
        execution_time: rand(0.01..0.1).round(3),
        data: ["record1", "record2", "record3"]
      }

      # Add result to span
      LlmTracer.tracer.add_tool_call_result(span, output: results)

      results
    end
  end
end

# Example: Multi-agent workflow
class ContentGenerationWorkflow
  def initialize
    @openai_client = OpenAIClient.new("fake-api-key")
    @anthropic_client = AnthropicClient.new("fake-api-key")
    @weather_tool = WeatherTool.new
    @db_tool = DatabaseTool.new
  end

  def generate_content(topic, city)
    LlmTracer.workflow(name: "content_generation_workflow", version: "2.0.0") do |workflow_span|
      workflow_span.set_attribute("business.topic", topic)
      workflow_span.set_attribute("business.location", city)

      # Research phase
      LlmTracer.agent(name: "researcher", version: "1.0.0") do |researcher_span|
        # Get weather context
        weather = @weather_tool.get_weather(city, "US")
        researcher_span.set_attribute("research.weather", weather[:condition])

        # Get historical data
        history = @db_tool.query("SELECT * FROM historical_data WHERE city = ?", [city])
        researcher_span.set_attribute("research.historical_records", history[:rows])
      end

      # Content creation phase
      LlmTracer.agent(name: "content_creator", version: "1.5.0") do |creator_span|
        # Generate initial content with Claude
        claude_response = @anthropic_client.messages(
          "Create an engaging introduction about #{topic} in #{city}",
          model: "claude-3-sonnet"
        )
        creator_span.set_attribute("content.claude_response_length", claude_response[:usage][:completion_tokens])

        # Enhance content with GPT-4
        gpt_response = @openai_client.chat_completion([
          { role: "system", content: "You are a content enhancer" },
          { role: "user", content: "Enhance this content: #{claude_response[:content][0][:text]}" }
        ], model: "gpt-4")
        creator_span.set_attribute("content.gpt_enhancement_tokens", gpt_response[:usage][:completion_tokens])
      end

      # Final review phase
      LlmTracer.agent(name: "reviewer", version: "1.0.0") do |reviewer_span|
        # Final quality check
        final_review = @openai_client.chat_completion([
          { role: "system", content: "You are a content reviewer" },
          { role: "user", content: "Review this content for quality and accuracy" }
        ], model: "gpt-4", temperature: 0.3)

        reviewer_span.set_attribute("review.final_tokens", final_review[:usage][:total_tokens])
        reviewer_span.set_attribute("review.quality_score", "high")
      end
    end
  end
end

# Run the example
puts "=== LLM Provider Integration Example ==="

workflow = ContentGenerationWorkflow.new
workflow.generate_content("sustainable tourism", "San Francisco")

puts "\n=== Workflow completed! ==="
puts "Check your OpenTelemetry backend to see the complete trace."
