# encoding: utf-8

require 'c_tokenizer_ext'

# Ruby interface to the c extension
class StringEater::CTokenizer
  attr_reader :tokens

  def self.tokens
    @tokens ||= []
  end

  def self.add_field(name, opts = {})
    tokens << StringEater::Token.new_field(name, opts)
    define_method(name) { @extracted_tokens[name] }
  end

  def self.look_for(look_for_tokens)
    tokens << StringEater::Token.new_separator(look_for_tokens)
  end

  # This is very slow, only do it when necessary
  def self.dup_tokens
    Marshal.load(Marshal.dump(tokens))
  end

  def self.set_non_strict
    @class_non_strict = true
  end

  def self.non_strict?
    @class_non_strict == true
  end

  def initialize
    refresh_tokens
  end

  def set_non_strict
    @non_strict = true
  end

  def extract_all_fields
    @token_filter = lambda do |t|
      t.opts[:extract] = true if t.name
    end
    refresh_tokens
  end

  def extract_no_fields
    @token_filter = lambda do |t|
      t.opts[:extract] = false if t.name
    end
    refresh_tokens
  end

  def extract_fields(*fields)
    @token_filter = lambda do |t|
      t.opts[:extract] = fields.include?(t.name)
    end
    refresh_tokens
  end

  # This is very slow, only do it once before processing
  def refresh_tokens
    @tokens = self.class.dup_tokens

    @tokens.each { |t| @token_filter.call(t) } if @token_filter

    tokens_to_find = gen_tokens_to_find
    @tokens_to_find_indexes = tokens_to_find.map { |t| t[0] }
    @tokens_to_find_strings = tokens_to_find.map { |t| t[1] }

    tokens_to_extract = gen_tokens_to_extract
    @tokens_to_extract_indexes = tokens_to_extract.map { |t| t[0] }
    @tokens_to_extract_names = tokens.map { |t| t.name }

    @have_tokens_to_extract = (@tokens_to_extract_indexes.size > 0)
  end

  def describe_line
    tokens.reduce('') do |desc, t|
      desc << (t.string || t.name.to_s || 'xxxxxx')
    end
  end

  def do_extra_parsing
  end

  # Not sure this could be much more concise
  # rubocop:disable MethodLength
  def tokenize!(string, &block)
    @string = string
    @extracted_tokens ||= {}
    @extracted_tokens.clear
    @non_strict ||= self.class.non_strict?

    return unless @have_tokens_to_extract

    @extracted_tokens = ctokenize!(@string,
                                   @tokens_to_find_indexes,
                                   @tokens_to_find_strings,
                                   @tokens_to_extract_indexes,
                                   @tokens_to_extract_names,
                                   @non_strict)

    # extra parsing hook
    do_extra_parsing

    yield @extracted_tokens if block_given?

    # return self for chaining
    self
  end
  # rubocop:enable MethodLength

  private

  def set_token_startpoint(ix, startpoint)
    @tokens[ix].breakpoints[0] = startpoint
  end

  def get_token_startpoint(ix)
    @tokens[ix].breakpoints[0]
  end

  def set_token_endpoint(ix, endpoint)
    @tokens[ix].breakpoints[1] = endpoint
  end

  def extract_token?(ix)
    @tokens[ix].extract?
  end

  def gen_tokens_to_find
    tokens.each_with_index.map { |t, i| [i, t.string] if t.string }.compact
  end

  def gen_tokens_to_extract
    tokens.each_with_index.map { |t, i| [i, t.name] if t.extract? }.compact
  end
end
