class Token(object):
    SLASH = 0
    LEFT_BRACE = 1
    RIGHT_BRACE = 2
    CHAPTER = 3
    TEXT = 4
    SECTION = 5
    SUB_SECTION = 6
    SUB_SUB_SECTION = 7

    ITEM = 8
    BEGIN = 9
    END = 10
    ENUM = 11
    CODE_LIST = 12

    def __init__(self, tt, val):
        self.token_type = tt
        self.value = val

    def __repr__(self):
        return "%d : %s" % (self.token_type, self.value)

class Lexer(object):
    INIT = 0
    ENGLISH_WORD = 1
    RETURN = 2
    D_RET = 3

    def __init__(self, f):
        self.line = f.read()
        self.pos = 0
        self.status = Lexer.INIT
        self.result = []
        self.raw_mode = False

        self.current = self.tokenize()

    def set_raw_mode(self):
        self.raw_mode = True

    def clear_raw_mode(self):
        self.raw_mode = False

    def has_next(self):
        return self.pos < len(self.line)

    def next(self):
        if self.current is None:
            self.current = self.tokenize()

        return self.current

    def advance(self):
        self.current = self.tokenize()

    def get_raw_text(self):
        temp = []

        while (self.pos < len(self.line)):
            ch = self.line[self.pos]

            if (ch != '\\'):
                temp.append(ch)
                self.pos += 1
                continue

            if self.pos + 3 < len(self.line):
                if (self.line[self.pos + 1] == 'e' and
                    self.line[self.pos + 2] == 'n' and
                    self.line[self.pos + 3] == 'd'):

                    self.clear_raw_mode()

                    return Token(Token.TEXT, "".join(temp))

                else:
                    pos += 1
                    temp.append(ch)
                    continue


    def tokenize(self):
        if self.raw_mode:
            return self.get_raw_text()

        while (self.pos < len(self.line)):
            ch = self.line[self.pos]

            if self.status == Lexer.INIT:
                if ch == '\\':
                    self.pos += 1
                    return Token(Token.SLASH, "")
                elif ch == '{':
                    self.pos += 1
                    return Token(Token.LEFT_BRACE, "{")
                elif ch == '}':
                    self.pos += 1
                    return Token(Token.RIGHT_BRACE, "}")
                elif self.is_alpha(ch):
                    self.pos += 1
                    self.status = Lexer.ENGLISH_WORD
                    self.result.append(ch)
                elif ch == '~':
                    self.pos += 1
                    return Token(Token.TEXT, " ")
                elif ch == '\n':
                    self.pos += 1
                    self.status = Lexer.RETURN
                else:
                    self.pos += 1
                    return Token(Token.TEXT, ch)

            elif self.status == Lexer.ENGLISH_WORD:
                if self.is_alpha(ch):
                    self.pos += 1
                    self.result.append(ch)
                else:
                    self.status = Lexer.INIT
                    temp = self.result
                    self.result = []
                    return self.filter("".join(temp))

            elif self.status == Lexer.RETURN:
                if ch == ' ' or ch == '\t':
                    self.pos += 1
                elif ch == '\n':
                    self.status = Lexer.D_RET
                    self.pos += 1
                else:
                    # nothing happens
                    self.status = Lexer.INIT

            elif self.status == Lexer.D_RET:
                if ch == ' ' or ch == '\t' or ch == '\n':
                    self.pos += 1
                else:
                    self.status = Lexer.INIT
                    return Token(Token.TEXT, "\n\n")


    def filter(self, word):
        if word == "chapter":
            return Token(Token.CHAPTER, "chapter")
        elif word == "section":
            return Token(Token.SECTION, "section")
        elif word == "subsection":
            return Token(Token.SUB_SECTION, "subsection")
        elif word == "subsubsection":
            return Token(Token.SUB_SUB_SECTION, "subsubsection")
        elif word == "begin":
            return Token(Token.BEGIN, "begin")
        elif word == "end":
            return Token(Token.END, "end")
        elif word == "enumerate":
            return Token(Token.ENUM, "enumerate")
        elif word == "lstlisting":
            return Token(Token.CODE_LIST, "lstlisting")
        elif word == "item":
            return Token(Token.ITEM, "item")
        else:
            return Token(Token.TEXT, word)

    def is_alpha(self, ch):
        if 'a' <= ch <= 'z':
            return True

        if 'A' <= ch <= 'Z':
            return True

        if '0' <= ch <= '9':
            return True

