import re

from khronos.utils import Deque

class Token(object):
    regex = None
    flags = 0
    
    def __init__(self, match_text, type=None):
        self.value = match_text
        self.type = type if type is not None else self.__class__.__name__
        
    def __str__(self):
        return "%s(%r)" % (self.type, self.value)
        
class DiscardToken(Token):
    def __new__(cls, text):
        return None
        
class Lexer(object):
    def __init__(self, token_types=(), literals="", ignore=" \t"):
        self.token_types = list(token_types)
        self.literals = set(literals)
        self.ignore = set(ignore)
        self.regex = None
        self.text = None
        self.position = None
        self.buffer = None
        
    def init(self, text):
        if self.regex is None:
            self.regex = [(t, re.compile(t.regex, t.flags)) for t in self.token_types]
        self.text = text
        self.position = 0
        self.buffer = Deque()
        
    def finished(self):
        self.skip_ignore_chars()
        return self.position == len(self.text) and len(self.buffer) == 0
        
    def get_next(self):
        if len(self.buffer) > 0:
            return self.buffer.popleft()
        return self.read_token()
        
    def put_back(self, token):
        if token is not None:
            self.buffer.appendleft(token)
            
    def read_token(self):
        self.skip_ignore_chars()
        if self.position == len(self.text):
            return None
        # search for matching regular expressions
        matches = []
        for token_type, regex in self.regex:
            match = regex.match(self.text, self.position)
            if match is not None:
                matches.append((token_type, match))
        # no regular expression matches, try literals
        if len(matches) == 0:
            if self.text[self.position] in self.literals:
                char = self.text[self.position]
                self.position += 1
                return Token(char, type=char)
            raise Exception("lexer error - unable to recognize token")
        # at least one match, return longest one
        token_type, longest_match = max(matches, key=lambda (t, m): m.end())
        self.position = longest_match.end()
        token = token_type(self.text[longest_match.start():longest_match.end()])
        return token if token is not None else self.read_token()
        
    def skip_ignore_chars(self):
        while self.position < len(self.text) and self.text[self.position] in self.ignore:
            self.position += 1
            