
# = Class Lexer
# Instanciating this class provides an object which gives the tools to parse
# an input given some tokens
#
# == Property input
# Input which the Lexer uses to return the tokens
#
# == Property line
# Last line where the last token was found
#
# == Property column
# Last column where the last token was found
class Lexer
  attr_writer :tokens
  attr_reader :line, :column, :input, :tokens, :lastTk

  def initialize(input)
    if (input.is_a? File)
      @input = IO.read(input.path())
    else
      @input = input
    end
    
    #Removing all '\r' characters from input file
    oldInput = " "
    while @input != oldInput do
      oldInput = @input
      @input = @input.sub(/\r/, " ")
    end
    @line = 1
    @column = 1
    @tokens = tokens
    @lastTk = " "
    @escapeP = Array.new(["a", "t", "n", "v", "f", "r", "\\", "0", "\"", "\'", "b"])
    @escapeC = Array.new(["\a", "\t", "\n", "\v", "\f", "\r", "\\", "\0", "\"", "\'", "\b"])
  end

  # = Method skip
  # Moves the input pointer n characters above
  def skip(n)
    if (@input.length() < n)
      raise "skip: Trying to move pointer #{n} characters, but not " +
        "enough characters left: #{@input.length()}"
    end

    # Find any linefeed on the substring that wants to be skipped and update line and column
    
    skipping = @input[0, n]
    # DEBUG     puts "Characters skipping: #{skipping}, with n = #{n}"
    
    #Locate the last linefeed and update the column and, if any linefeed, the line
    llf = skipping.rindex("\n")
    
    if(llf != nil)
      @line += skipping.count("\n")
      @column = skipping.length() - llf
    else
      @column += skipping.length()
    end
    
    @input = @input[n, @input.length() - n]
  end

  # = Method newline
  # Moves the input pointer to the start of the following line
  def newline()
    lf = @input.index("\n")
    if (nil == lf)
      @input = ""
    else
      self.skip(lf + 1)
    end
  end

  # = Method yylex
  # Given the input pointer, this method retrieves the next matching token
  # in the file
  def yylex()
    # If there's no input left, retutn nil
    if(@input == "")
      return nil
    end
    
    # Consume any white spaces after the last match
    while ((match = @input.match(/\A\s+/)) != nil) do
      #DEBUG      puts "\nSkipping whitespaces\n"
      self.skip(match.to_s().length())
    end               

    # If there's no input left, retutn nil
    if(@input == "")
      return nil
    end
    
    
    # Return next token
    for token_class_name in @tokens
      
      token_class = Kernel.const_get(token_class_name);
      regexp = "\\A(?:" + token_class.getRegexp.source() + ")"
      #keywords should not be followed by any identifier character
      if token_class.getKind == "keyword"
        regexp += "(?![a-zA-Z_])"
      end
      match = @input.match(Regexp.new(regexp))
      matchS = match.to_s()
      #DEBUG puts "Testing regexp:  \\A(#{token_class.getRegexp.source}) on string: #{@input}\n String matched: #{match.to_s()}"
      
      if (match != nil)

        if token_class.getKind == "keyword"
          matchS = matchS.downcase
        end
        #DEBUG puts ("Found match: #{matchS} with token #{token_class_name}")
        #Remove any tailing whitespace characters from the match
        if(matchS.rindex(/\s/) == matchS.length() - 1)
          matchS = matchS[0, matchS.length() - 1]
        end
        
        #If a TkCString was found, remove the leading and tailing quotation marks or ticks
        #and substitute all escape characters for their real representation
        if(token_class_name == "TkCString")
          matchS = matchS[1, matchS.length() - 2]
          ind = -1;
          while
            ind = matchS.index(/\\/, ind + 1)
            break if ind == nil
            esc = matchS[ind + 1].chr
            #DEBUG puts esc
            for i in (1..@escapeP.length())
              break if esc == @escapeP[i]
            end
            next if i == @escapeP.length()
            #DEBUG puts @escapeP[i]
            #DEBUG puts i, @escapeP.length()
            matchS = matchS[0, ind] + @escapeC[i] + matchS[ind + 2, matchS.length() - 1]
            #DEBUG puts matchS
          end
        end
        
        #If the match was a TkCNum, transform de input into an integer
        if(token_class_name == "TkCnum")
          matchS = matchS.to_f();
        end        
        
        token = token_class.new(@line, @column, matchS)
        self.skip(match.to_s.length())
        
        #If a TkComment is found, ignore and find next token
        if (token_class_name == "TkComment")
          return self.yylex()
        end
        @lastTk = token_class_name
        return token
      end
    end 
    # Non mathing character found, throw exception
    char = @input[0]
    self.skip(1)
    raise InvalidToken.new(char, @line, @column - 1)        
  end
end