import string, re
from token import *

import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["COMMENT", "tokenize",
           "generate_tokens", "NL"]
del x
del token

def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'

Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'

Intnumber = r'[0-9]\d*'
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Number = group(Floatnumber, Intnumber)

# Single-line ' or " string.
String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
               r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')

# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"<>", r">=?", r"<=?", r"!=",
                 r"[+\-*/%&|^=<>]=?",
                 r"[()]")

Special = group(r'\r?\n', r'[:;.,`]')
Funny = group(Operator, Special)

PseudoExtras = group(r'\\\r?\n', Comment)
PlainToken = group(PseudoExtras, Number, Funny, String, Name)
Token = Whitespace + PlainToken

Reserved = """
ate caso enquanto entao escreva faca falso fim fim_caso
fim_enquanto fim_se inicio leia literal logico inteiro real
outro_caso repita se senao variaveis verdadeiro
and or not
""".split()
Reserved = [s.strip() for s in Reserved]
Ops = {';':SEMI,
       ':':COLON,
       ',':COMMA,
       '+':PLUS,
       '-':MINUS,
       '*':STAR,
       '/':SLASH,
       '=':EQUAL,
       '<':LESS,
       '>':GREATER,
       '<>':NOTEQUAL,
       '<=':LESSEQUAL,
       '>=':GREATEREQUAL,
       '(':LPAR,
       ')':RPAR,
       '^':CIRCUMFLEX,
       }

tokenprog = re.compile(Token)
class TokenError(Exception): pass

class StopTokenizing(Exception): pass

def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing
    print "%d,%d-%d,%d:\t%s\t%s" % \
        (srow, scol, erow, ecol, tok_name[type], repr(token))

def tokenize(readline, tokeneater=printtoken):
    """
    The tokenize() function accepts two parameters: one representing the
    input stream, and one providing an output mechanism for tokenize().

    The first parameter, readline, must be a callable object which provides
    the same interface as the readline() method of built-in file objects.
    Each call to the function should return one line of input as a string.

    The second parameter, tokeneater, must also be a callable object. It is
    called once for each token, with five arguments, corresponding to the
    tuples generated by generate_tokens().
    """
    try:
        tokenize_loop(readline, tokeneater)
    except StopTokenizing:
        pass

# backwards compatible interface
def tokenize_loop(readline, tokeneater):
    for token_info in generate_tokens(readline):
        tokeneater(*token_info)

def generate_tokens(readline):
    """
    The generate_tokens() generator requires one argment, readline, which
    must be a callable object which provides the same interface as the
    readline() method of built-in file objects. Each call to the function
    should return one line of input as a string.

    The generator produces 5-tuples with these members: the token type; the
    token string; a 2-tuple (srow, scol) of ints specifying the row and
    column where the token begins in the source; a 2-tuple (erow, ecol) of
    ints specifying the row and column where the token ends in the source;
    and the line on which the token was found. The line passed is the
    logical line; continuation lines are included.
    """
    lnum = parenlev = 0
    namechars, numchars = string.ascii_letters + '_', '0123456789'
    tabsize = 8

    while 1:                                   # loop over lines in stream
        line = readline()
        lnum = lnum + 1
        pos, max = 0, len(line)

        if parenlev == 0:  # new statement
            if not line: break
            column = 0
            while pos < max:                   # measure leading whitespace
                if line[pos] == ' ': column = column + 1
                elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
                elif line[pos] == '\f': column = 0
                else: break
                pos = pos + 1
            if pos == max: break # EOL

            if line[pos] in '#':           # skip comments or blank lines
                yield (COMMENT, line[pos:],
                           (lnum, pos), (lnum, len(line)), line)
                continue
            elif line[pos] in '\r\n':
                yield (NL, '', (lnum, pos), (lnum, len(line)), line)
                continue

        while pos < max:
            pseudomatch = tokenprog.match(line, pos)
            if pseudomatch:                                # scan for tokens
                start, end = pseudomatch.span(1)
                spos, epos, pos = (lnum, start), (lnum, end), end
                token, initial = line[start:end], line[start]
                if initial in numchars or \
                   (initial == '.' and token != '.'):      # ordinary number
                    yield (NUMBER, token, spos, epos, line)
                elif initial in '\r\n':
                    yield (NL, '', spos, epos, line)
                elif initial == '#':
                    yield (COMMENT, token, spos, epos, line)
                elif initial in namechars:                 # ordinary name
                    yield (token in Reserved and COMMAND or NAME,
                           token, spos, epos, line)
                elif initial in '"\'':                 # string
                    yield (STRING, token, spos, epos, line)
                else:
                    if initial in '([{': parenlev = parenlev + 1
                    elif initial in ')]}': parenlev = parenlev - 1
                    yield (Ops.get(token, OP), token, spos, epos, line)
            else:
                yield (ERRORTOKEN, line[pos],
                           (lnum, pos), (lnum, pos+1), line)
                pos = pos + 1

    yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')

if __name__ == '__main__':                     # testing
    import sys
    if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
    else: tokenize(sys.stdin.readline)
