# frozen_string_literal: true

require "json"
require "opentelemetry"
require "opentelemetry/sdk"
require "opentelemetry/semantic_conventions"

require_relative "llm_tracer/version"

module LlmTracer
  class Error < StandardError; end

  # OpenTelemetry GenAI semantic conventions
  module GenAI
    # Span names
    WORKFLOW_SPAN_NAME = "gen_ai.workflow"
    AGENT_SPAN_NAME = "gen_ai.agent"
    LLM_CALL_SPAN_NAME = "gen_ai.llm_call"
    TOOL_CALL_SPAN_NAME = "gen_ai.tool_call"

    # Span kinds
    WORKFLOW_SPAN_KIND = OpenTelemetry::Trace::SpanKind::INTERNAL
    AGENT_SPAN_KIND = OpenTelemetry::Trace::SpanKind::INTERNAL
    LLM_CALL_SPAN_KIND = OpenTelemetry::Trace::SpanKind::CLIENT
    TOOL_CALL_SPAN_KIND = OpenTelemetry::Trace::SpanKind::INTERNAL

    # Attributes
    module Attributes
      # Common attributes
      REQUEST_MODEL = "gen_ai.request.model"
      REQUEST_PROVIDER = "gen_ai.request.provider"
      RESPONSE_MODEL = "gen_ai.response.model"
      RESPONSE_PROVIDER = "gen_ai.response.provider"

      # Operation attributes
      OPERATION_NAME = "gen_ai.operation.name"
      SYSTEM = "gen_ai.system"
      CONVERSATION_ID = "gen_ai.conversation.id"
      DATA_SOURCE_ID = "gen_ai.data_source.id"
      OUTPUT_TYPE = "gen_ai.output.type"

      # Workflow attributes
      WORKFLOW_NAME = "gen_ai.workflow.name"
      WORKFLOW_VERSION = "gen_ai.workflow.version"

      # Agent attributes
      AGENT_NAME = "gen_ai.agent.name"
      AGENT_VERSION = "gen_ai.agent.version"

      # LLM call attributes
      LLM_REQUEST_PROMPT = "gen_ai.llm.request.prompt"
      LLM_REQUEST_TEMPERATURE = "gen_ai.llm.request.temperature"
      LLM_REQUEST_MAX_TOKENS = "gen_ai.llm.request.max_tokens"
      LLM_RESPONSE_FINISH_REASON = "gen_ai.llm.response.finish_reason"
      LLM_RESPONSE_USAGE_TOTAL_TOKENS = "gen_ai.llm.response.usage.total_tokens"
      LLM_RESPONSE_USAGE_PROMPT_TOKENS = "gen_ai.llm.response.usage.prompt_tokens"
      LLM_RESPONSE_USAGE_COMPLETION_TOKENS = "gen_ai.llm.response.usage.completion_tokens"

      # Tool call attributes
      TOOL_CALL_NAME = "gen_ai.tool_call.name"
      TOOL_CALL_INPUT = "gen_ai.tool_call.input"
      TOOL_CALL_OUTPUT = "gen_ai.tool_call.output"
      TOOL_CALL_ERROR = "gen_ai.tool_call.error"

      # Error attributes
      ERROR_TYPE = "error.type"
      ERROR_MESSAGE = "error.message"
    end

    # Standardized operation names
    module Operations
      CHAT = "chat"
      CREATE_AGENT = "create_agent"
      EMBEDDINGS = "embeddings"
      EXECUTE_TOOL = "execute_tool"
      GENERATE_CONTENT = "generate_content"
      INVOKE_AGENT = "invoke_agent"
      TEXT_COMPLETION = "text_completion"
    end

    # Standardized system values
    module Systems
      ANTHROPIC = "anthropic"
      AWS_BEDROCK = "aws.bedrock"
      AZURE_AI_INFERENCE = "azure.ai.inference"
      AZURE_AI_OPENAI = "azure.ai.openai"
      COHERE = "cohere"
      DEEPSEEK = "deepseek"
      GCP_GEMINI = "gcp.gemini"
      GCP_GEN_AI = "gcp.gen_ai"
      GCP_VERTEX_AI = "gcp.vertex_ai"
      GROQ = "groq"
      IBM_WATSONX_AI = "ibm.watsonx.ai"
      MISTRAL_AI = "mistral_ai"
      OPENAI = "openai"
      PERPLEXITY = "perplexity"
      XAI = "xai"
      OTHER = "_OTHER"
    end

    # Standardized output types
    module OutputTypes
      IMAGE = "image"
      JSON = "json"
      SPEECH = "speech"
      TEXT = "text"
    end
  end

  # Main tracer class for creating LLM application spans
  class Tracer

    def initialize
      @tracer = OpenTelemetry.tracer_provider.tracer("llm_tracer", LlmTracer::VERSION)
    end

    # Create a workflow span
    # @param name [String] The name of the workflow
    # @param version [String] The version of the workflow
    # @param attributes [Hash] Additional attributes for the span
    # @param block [Proc] Block to execute within the span context
    def workflow(name:, version: nil, attributes: {}, &block)
      span_attributes = {
        GenAI::Attributes::WORKFLOW_NAME => name,
        GenAI::Attributes::WORKFLOW_VERSION => version
      }.merge(attributes).compact

      @tracer.in_span(
        GenAI::WORKFLOW_SPAN_NAME,
        attributes: span_attributes,
        kind: GenAI::WORKFLOW_SPAN_KIND
      ) do |span|
        yield span if block_given?
      end
    end

    # Create an agent span
    # @param name [String] The name of the agent
    # @param version [String] The version of the agent
    # @param operation_name [String] The operation being performed (e.g., "create_agent", "invoke_agent")
    # @param system [String] The GenAI system being used
    # @param conversation_id [String] The conversation ID if available
    # @param attributes [Hash] Additional attributes for the span
    # @param block [Proc] Block to execute within the span context
    def agent(
      name:,
      version: nil,
      operation_name: nil,
      system: nil,
      conversation_id: nil,
      attributes: {},
      &block
    )
      span_attributes = {
        GenAI::Attributes::AGENT_NAME => name,
        GenAI::Attributes::AGENT_VERSION => version,
        GenAI::Attributes::OPERATION_NAME => operation_name,
        GenAI::Attributes::SYSTEM => system,
        GenAI::Attributes::CONVERSATION_ID => conversation_id
      }.merge(attributes).compact

      @tracer.in_span(
        GenAI::AGENT_SPAN_NAME,
        attributes: span_attributes,
        kind: GenAI::AGENT_SPAN_KIND
      ) do |span|
        yield span if block_given?
      end
    end

    # Create an LLM call span
    # @param model [String] The model being used
    # @param provider [String] The provider of the model
    # @param operation_name [String] The operation being performed (e.g., "chat", "embeddings", "text_completion")
    # @param system [String] The GenAI system being used
    # @param prompt [String] The prompt sent to the LLM
    # @param temperature [Float] The temperature setting
    # @param max_tokens [Integer] The maximum tokens setting
    # @param conversation_id [String] The conversation ID if available
    # @param output_type [String] The expected output type
    # @param attributes [Hash] Additional attributes for the span
    # @param block [Proc] Block to execute within the span context
    def llm_call(
      model:,
      provider:,
      operation_name: nil,
      system: nil,
      prompt: nil,
      temperature: nil,
      max_tokens: nil,
      conversation_id: nil,
      output_type: nil,
      attributes: {},
      &block
    )
      span_attributes = {
        GenAI::Attributes::REQUEST_MODEL => model,
        GenAI::Attributes::REQUEST_PROVIDER => provider,
        GenAI::Attributes::OPERATION_NAME => operation_name,
        GenAI::Attributes::SYSTEM => system,
        GenAI::Attributes::LLM_REQUEST_PROMPT => prompt,
        GenAI::Attributes::LLM_REQUEST_TEMPERATURE => temperature,
        GenAI::Attributes::LLM_REQUEST_MAX_TOKENS => max_tokens,
        GenAI::Attributes::CONVERSATION_ID => conversation_id,
        GenAI::Attributes::OUTPUT_TYPE => output_type
      }.merge(attributes).compact

      @tracer.in_span(
        GenAI::LLM_CALL_SPAN_NAME,
        attributes: span_attributes,
        kind: GenAI::LLM_CALL_SPAN_KIND
      ) do |span|
        yield span if block_given?
      end
    end

    # Create a tool call span
    # @param name [String] The name of the tool being called
    # @param operation_name [String] The operation being performed (e.g., "execute_tool")
    # @param system [String] The GenAI system being used
    # @param input [Hash] The input parameters for the tool
    # @param conversation_id [String] The conversation ID if available
    # @param attributes [Hash] Additional attributes for the span
    # @param block [Proc] Block to execute within the span context
    def tool_call(
      name:,
      operation_name: nil,
      system: nil,
      input: nil,
      conversation_id: nil,
      attributes: {},
      &block
    )
      span_attributes = {
        GenAI::Attributes::TOOL_CALL_NAME => name,
        GenAI::Attributes::OPERATION_NAME => operation_name,
        GenAI::Attributes::SYSTEM => system,
        GenAI::Attributes::TOOL_CALL_INPUT => input&.to_json,
        GenAI::Attributes::CONVERSATION_ID => conversation_id
      }.merge(attributes).compact

      @tracer.in_span(
        GenAI::TOOL_CALL_SPAN_NAME,
        attributes: span_attributes,
        kind: GenAI::TOOL_CALL_SPAN_KIND
      ) do |span|
        yield span if block_given?
      end
    end

    # Helper method to add response information to an LLM call span
    # @param span [OpenTelemetry::Trace::Span] The span to update
    # @param response_model [String] The response model
    # @param response_provider [String] The response provider
    # @param finish_reason [String] The finish reason
    # @param usage [Hash] Token usage information
    def add_llm_response(
      span,
      response_model: nil,
      response_provider: nil,
      finish_reason: nil,
      usage: {}
    )
      span.set_attribute(GenAI::Attributes::RESPONSE_MODEL, response_model) if response_model
      span.set_attribute(GenAI::Attributes::RESPONSE_PROVIDER, response_provider) if response_provider
      span.set_attribute(GenAI::Attributes::LLM_RESPONSE_FINISH_REASON, finish_reason) if finish_reason

      if usage[:total_tokens]
        span.set_attribute(GenAI::Attributes::LLM_RESPONSE_USAGE_TOTAL_TOKENS, usage[:total_tokens])
      end
      if usage[:prompt_tokens]
        span.set_attribute(GenAI::Attributes::LLM_RESPONSE_USAGE_PROMPT_TOKENS, usage[:prompt_tokens])
      end
      if usage[:completion_tokens]
        span.set_attribute(GenAI::Attributes::LLM_RESPONSE_USAGE_COMPLETION_TOKENS, usage[:completion_tokens])
      end
    end

    # Helper method to add tool call output to a tool call span
    # @param span [OpenTelemetry::Trace::Span] The span to update
    # @param output [Object] The output from the tool call
    # @param error [String] Any error that occurred during the tool call
    def add_tool_call_result(span, output: nil, error: nil)
      span.set_attribute(GenAI::Attributes::TOOL_CALL_OUTPUT, output&.to_json) if output
      if error
        span.set_attribute(GenAI::Attributes::TOOL_CALL_ERROR, error)
        span.set_attribute(GenAI::Attributes::ERROR_TYPE, "_OTHER")
        span.set_attribute(GenAI::Attributes::ERROR_MESSAGE, error)
      end
    end

    # Helper method to add error information to any span
    # @param span [OpenTelemetry::Trace::Span] The span to update
    # @param error_type [String] The type of error
    # @param error_message [String] The error message
    def add_error(span, error_type: nil, error_message: nil)
      span.set_attribute(GenAI::Attributes::ERROR_TYPE, error_type) if error_type
      span.set_attribute(GenAI::Attributes::ERROR_MESSAGE, error_message) if error_message
    end
  end

  # Convenience method to get a tracer instance
  def self.tracer
    @tracer ||= Tracer.new
  end

  # Convenience methods for creating spans
  class << self
    def workflow(name:, version: nil, attributes: {}, &block)
      tracer.workflow(name: name, version: version, attributes: attributes, &block)
    end

    def agent(
      name:,
      version: nil,
      operation_name: nil,
      system: nil,
      conversation_id: nil,
      attributes: {},
      &block
    )
      tracer.agent(
        name: name,
        version: version,
        operation_name: operation_name,
        system: system,
        conversation_id: conversation_id,
        attributes: attributes,
        &block
      )
    end

    def llm_call(
      model:,
      provider:,
      operation_name: nil,
      system: nil,
      prompt: nil,
      temperature: nil,
      max_tokens: nil,
      conversation_id: nil,
      output_type: nil,
      attributes: {},
      &block
    )
      tracer.llm_call(
        model: model,
        provider: provider,
        operation_name: operation_name,
        system: system,
        prompt: prompt,
        temperature: temperature,
        max_tokens: max_tokens,
        conversation_id: conversation_id,
        output_type: output_type,
        attributes: attributes,
        &block
      )
    end

    def tool_call(
      name:,
      operation_name: nil,
      system: nil,
      input: nil,
      conversation_id: nil,
      attributes: {},
      &block
    )
      tracer.tool_call(
        name: name,
        operation_name: operation_name,
        system: system,
        input: input,
        conversation_id: conversation_id,
        attributes: attributes,
        &block
      )
    end
  end
end
