#!/usr/bin/python
"""
tpe_lex.py

Lexer for Top-down Parser Expressions.

This one will be extremely simple, since there are no literal strings or
character classes.  1 state.
"""

import sys

import lexer

SimpleToken = lexer.SimpleToken


# When we get '&' in the input stream, turn that into token AND.  These tokens
# are used as s-expression heads and class names, so they need to be
# alphanumeric, and not just punctuation like '/' and '('.

OP_MAP = {
    '&': 'AND',
    '!': 'NOT',

    '+': 'PLUS',
    '*': 'STAR',
    '?': 'QUESTION',

    '.': 'DOT',
    }

MACHINE = {
    'start': [
        # space is consumed then ignored.
        (r'\s+',),
        (r'<-', lexer.SimpleTokenType),
        # [ & ! / ? * + ( ) ]
        (r'[&!/?*+().]', lexer.MappedTokenType(OP_MAP)),

        # Ignore comments until the end of the line.
        # Have to escape # because of re.VERBOSE.
        # Using \n as line ending will work for \r\n -- don't care about plain
        # \r.
        (r'[#] .* \n',),

        # CAPS refer to tokens.  Must be at least 2 chars long.
        (r'[A-Z][A-Z0-9\-_]+', lexer.SimpleToken('TOKEN')),

        # Anything between single quotes is a token, like '+'.
        (r" ' ([^']+) ' ", lexer.SimpleToken('TOKEN', group=1)),

        # lower-case refers to rules.  Note that the above is matched first,
        # because this would match FOO as well.
        (r'[a-zA-Z0-9\-_]+', lexer.SimpleToken('IDENTIFIER')),
        ],
}

_lexer = None

def GetLexer():
  global _lexer
  if _lexer:
    return _lexer

  # For bootstrapping
  _lexer = lexer.Lexer(MACHINE, use_re=True)
  return _lexer
