#!/usr/bin/python -S
"""
lexer_test.py: Tests for lexer.py
"""

import re
import unittest

import lexer  # module under test
import highlight


# In the lexing case, this generates no token.
# In the coloring case, it generates <<EOF, which should be colored.
def EnterHere(match, context):
  print 'ADDING delimiter'
  context['delimiter'] = match.group(1)
  print context


# TODO: Should this be an object?
# def lex():
# def color():
#
# l.typed_tokens()  -- used for coloring
# l.string_tokens()
#
# l.lex(s, start=, channel=)...
#
# def typed_tokens():  # suitable for parsing
# def string_tokens():  # suitable for coloring

#
#
# I'm returning type, value, new pos
#
# tokendef
#


class ExitHere(object):

  def _FindDelimiter(self, s, pos, context):
    print 'CONTEXT', context
    d = context['delimiter']
    # needs newlines on both sides.
    delimiter = '\n' + d + '\n'
    i = s.find(delimiter, pos)
    if i == -1:
      # TODO: What exception should we raise?  Or return error token?
      # User lexer error?
      raise RuntimeError()
    return i, len(delimiter)

  def typed(self, s, pos, context, pos_table):
    end_str, delimiter_len = self._FindDelimiter(s, pos, context)

    # are we responsible for this?  If we return two tokens?
    if pos_table is not None:
      pass
    return ('HERE', s[pos:end_str], end_str + delimiter_len)

  def string(self, s, pos, context, pos_table):
    # In coloring case, it has to generate two tokens:
    # one for the contents, and other for EOF end marker.
    #
    # But you might include the delimiters in this case.

    end_str, delimiter_len = self._FindDelimiter(s, pos, context)

    # It's possible here to update the pos table for TWO tokens.
    if pos_table is not None:
      pass

    end_delimiter = end_str + delimiter_len
    token1 = ('STRING', s[pos : end_str])
    token2 = ('CLOSE', s[end_str : end_delimiter])

    # TODO: All edges should return a LIST of tokens...
    #return [token1, token2], end_delimiter
    return token1 + (end_delimiter,)


SH = {
    'start': [
        lexer.CreEdge('whitespace+'),
        lexer.CreEdge('!wordchar+', lexer.SimpleToken('WORD')),

        # tuple indicates state transition
        (lexer.CreEdge(" '<<' {wordchar+}", EnterHere), 'here'),
        ],
    'here': [
        (ExitHere(), 'start'),
        ],
}

class FunctionTest(unittest.TestCase):

  def testSh(self):
    l = lexer.Lexer(SH)
    print l
    tokens = l.lex("""
hi there
""")
    print list(tokens)

    sh = """
hi there <<EOF
...
...
EOF
ls foo
ls bar
"""
    tokens = l.lex(sh)
    print list(tokens)

    tokens = l.lex(sh, channel='string')
    print list(tokens)

    tokens = l.lex(sh, channel='string')
    scheme = highlight.HtmlColorer()
    css = """
<style type="text/css">
.x_DEFAULT {
}
.x_STRING {
  color: red;
}
</style>
"""
    text = highlight.highlight(tokens, scheme)
    out = css + text
    open('sh.html', 'w').write(out)


class LexerTest(unittest.TestCase):

  def testMakeError(self):
    print lexer.MakeErrorString('hello from \n lexer', 10)
    print lexer.MakeErrorString('hello from \n lexer', 11)

  def testComment(self):
    c = re.compile(r'[#] .* $', re.VERBOSE|re.MULTILINE)
    print c.match('#').group(0)
    print c.match('# helo\nyo').group(0)

    c = re.compile(r'[#]', re.VERBOSE)
    print c.match('#')

    print 'yo'
    c = re.compile(r'#')
    print c.match('#')


if __name__ == '__main__':
  unittest.main()
