""" Tokenizer module. Provides generic tokenizer implementation """
from unittest import TestCase
import re

class Token:
    __slots__ = ('type', 'value', 'line', 'pos')

    def __init__(self, type, value = "", line = 0, pos = 0):
        self.type  = type
        self.value = value
        self.line  = line
        self.pos   = pos

    def __str__(self):
        return self.value

    def __repr__(self):
        return '(%s: %s)' % (self.type, self.value)

class TokenizerError(Exception):
    __slots__ = ('message', 'line', 'pos')

    def __init__(self, message, line, pos):
        super(TokenizerError, self).__init__(message, line, pos)
        self.message    = message
        self.line       = line
        self.pos        = pos

class Tokenizer:
    __slots__ = ('_types', '_re', '_defaultError', '_errors')
    
    def __init__(self):
        self._types = []
        self._re    = None
        self._defaultError = "'{char}': unexpected symbol"
        self._errors = []

    def addType(self, type, pattern):
        self._types.append((type, pattern))
        self._re = None

    def addSkip(self, pattern):
        self._types.append(('__skip__%d' % len(self._types), pattern))
        self._re = None

    def addError(self, message, pattern = None):
        if pattern is None:
            self._defaultError = message
            return

        self._types.append(('__error__%d' % len(self._errors), pattern))
        self._errors.append(message)
        self._re = None

    def run(self, text):
        if self._re is None:
            self._buildRegexp()

        res = []

        curLine = 0
        curLinePos = 0
        nextPos = 0
        for m in self._re.finditer(text):
            type = m.lastgroup
            value = m.group(type)
            pos  = m.regs[0][0]

            if type.startswith('__error__'):
                m = re.match(r'__error__(?P<index>\d+)', type)
                index = int(m.group('index'))
                raise TokenizerError(self._errors[index].format(
                    value = value,
                    char  = value[0],
                    line  = curLine,
                    pos   = pos - curLinePos
                ), curLine, pos - curLinePos)

            if nextPos != pos:
                raise TokenizerError(self._defaultError.format(
                    value = text[nextPos : pos],
                    char  = text[nextPos],
                    line  = curLine,
                    pos   = nextPos - curLinePos
                ), curLine, nextPos - curLinePos)
            nextPos = pos + len(value)

            for index in range(len(value)):
                if value[index] == '\n':
                    curLine += 1
                    curLinePos = pos + index + 1

            if type.startswith('__skip__'):
                continue
                
            res.append(Token(
                type,
                value,
                curLine,
                pos - curLinePos))

        return res

    def _buildRegexp(self):
        pattern = '|'.join(['(?P<%s>%s)' % (name, p) for name, p in self._types])
        self._re = re.compile(pattern)

WHITESPACE      = r"\s+"
LINE_COMMENT    = r"//[^\n]*(\n|$)"
BLOCK_COMMENT   = r"/\*[^\*]*\*/"
WORD            = r"[a-zA-Z_]\w*"
STRING          = r"\"([^\"\n]|\\.)*\""
CHAR            = r"'([^\'\n]|\\.)*'"
INT             = r"\d+"
FLOAT           = r"\d+\.\d*|\.\d+"
SYMBOL          = r"[%s]" % re.escape(".,;:?=<>()[]{}&|^~+-*/%#")

def tokenize(text, callback = None):
    """ Cpp-like tokenizer """
    t = Tokenizer()
    t.addSkip(WHITESPACE)
    t.addSkip(LINE_COMMENT)
    t.addSkip(BLOCK_COMMENT)

    t.addError("unexpected end of file in block comment", r"/\*[^\*]*\*?$")
    t.addError("unexpected end of line in constant", r"\"[^\"\n]*\n|\'[^\'\n]*\n")

    t.addType('word',               WORD)
    t.addType('string',             STRING)
    t.addType('float',              FLOAT)
    t.addType('int',                INT)
    t.addType('symbol',             SYMBOL)

    if callback is None:
        return t.run(text)

    try:
        return t.run(text)
    except TokenizerError as e:
        callback(e.message, e.line, e.pos)
        return None

class _Tests(TestCase):
    def testBuildRegexp(self):
        t = Tokenizer()
        t.addSkip(r'\s+')
        t.addType('id',    r'[\w^\d]\w*')

        self.assertIsNone(t._re)
        t._buildRegexp()
        self.assertIsNotNone(t._re)
        self.assertEqual(t._re.pattern, r'(?P<__skip__0>\s+)|(?P<id>[\w^\d]\w*)')

        res = t.run("""    a word and    other
                    words   """)
        self.assertIsNotNone(res)
        self.assertEqual([t.value for t in res], ['a', 'word', 'and', 'other', 'words'])
        self.assertEqual([t.line  for t in res], [0, 0, 0, 0, 1])
        self.assertEqual([t.pos   for t in res], [4, 6, 11, 18, 20])

        try:
            t.run(" a text & an unknown symbol")
            self.fail()
        except TokenizerError as e:
            self.assertTrue(e.message.find('&') != -1)
            self.assertTrue(e.args[0].find('&') != -1)
            self.assertEqual(e.line,    0)
            self.assertEqual(e.args[1], 0)
            self.assertEqual(e.pos,     8)
            self.assertEqual(e.args[2], 8)

    def testTokenizeCpp(self):
        res = tokenize("""
            procedure name(a, b)
            {
                print("Hello world!");
                10 * 0.15 / .5 + 130.;
            }
        """)

        self.assertIsNotNone(res)
        self.assertEqual([(t.type, t.value) for t in res], [
            ('word',    'procedure'),
            ('word',    'name'),
            ('symbol',  '('),
            ('word',    'a'),
            ('symbol',  ','),
            ('word',    'b'),
            ('symbol',  ')'),
            ('symbol',  '{'),
                ('word',    'print'),
                ('symbol',  '('),
                ('string',  '"Hello world!"'),
                ('symbol',  ')'),
            ('symbol',  ';'),
                ('int',     '10'),
                ('symbol',  '*'),
                ('float',   '0.15'),
                ('symbol',  '/'),
                ('float',   '.5'),
                ('symbol',  '+'),
                ('float',   '130.'),
            ('symbol',  ';'),
            ('symbol',  '}'),
        ])

        res = tokenize("a/*comment*/b")
        self.assertEqual([t.value for t in res], ['a', 'b'])

        try:
            tokenize("a/*comment b")
            self.fail()
        except TokenizerError as e:
            self.assertEqual(e.message, "unexpected end of file in block comment")
            self.assertEqual(e.pos, 1)
