# proevolytx@gmail.com
# 2012/9/29

from regexpr import *
from time import *

class EmptyLexeme:
    def __init__(s, name, finiteAutomaton):
        s.name = name
        s.fa = finiteAutomaton

    # for every peek we try to find the longest substring that
    # accepted by at least one finite automaton
    # -- not used for better performance --
    def FindLongest(s, stream):
        longest = 0
        s.fa.Reset()
        i = 0
        while not s.fa.IsDead():
            i += 1
            peek = stream.Peek(i)
            if len(peek) < i:
                break
            s.fa.Simulate(StringStream(peek[-1]))
            if s.fa.IsAccepted():
                longest = i
        return longest
    
    def Output(s, string):
        return None

class SameLexeme(EmptyLexeme):
    def Output(s, string):
        return {"type":s.name, "content":string}

class StringLexeme(EmptyLexeme):
    def Output(s, string):
        content = string[1:len(string) - 1]
        content = content.replace("\\\\", "\\")
        content = content.replace("\\\"", "\"")
        content = content.replace("\\t", "\t")
        content = content.replace("\\r", "\r")
        content = content.replace("\\n", "\n")
        return {"type":s.name, "content":content}

class IntLexeme(EmptyLexeme):
    def Output(s, string):
        return {"type":s.name, "content":int(string)}

class FloatLexeme(EmptyLexeme):
    def Output(s, string):
        return {"type":s.name, "content":float(string)}

class SpaceLexeme(EmptyLexeme):
    def __init__(s, tabsize, dfa):
        EmptyLexeme.__init__(s, "indent", dfa)
        s.tabsize = tabsize
    def Output(s, string):
        space = 0
        for ch in string:
            if ch == " ":
                space += 1
            else:
                space += s.tabsize
        return {"type":"space", "space":space}

class NewlineLexeme(EmptyLexeme):
    def Output(s, string):
        return {"type":"newline"}

class Lexer():
    DataPath = "lexer_dfa.dat"
    def __init__(s, tabsize = 4):
        try:
            fh = open(Lexer.DataPath, "rb")
            
            stringLex = StringLexeme("string", Lexer.LoadStaticDfa(fh))
            commentLex = EmptyLexeme("comment", Lexer.LoadStaticDfa(fh))
            keywordLex = SameLexeme("keyword", Lexer.LoadStaticDfa(fh))
            idLex = SameLexeme("id", Lexer.LoadStaticDfa(fh))
            intLex = IntLexeme("int", Lexer.LoadStaticDfa(fh))
            floatLex = FloatLexeme("float", Lexer.LoadStaticDfa(fh))          
            spaceLex = SpaceLexeme(tabsize, Lexer.LoadStaticDfa(fh))
            jointlineLex = EmptyLexeme("jointline", Lexer.LoadStaticDfa(fh))
            newlineLex = NewlineLexeme("newline", Lexer.LoadStaticDfa(fh))
            
            fh.close()
            
        except Exception, e:
            print "Failed to load dfa data, building new ones..."
            stringLex = StringLexeme("string", Lexer.RegExprToAutomaton(
                "\"([a-zA-Z0-9`~!@#$%^&\\*\\(\\)_\\+" +
                "\\-={}\\[\\]\\|:;'<>\\?,./ \\t]|" +
                "\\\\\\\\|\\\\\"|\\\\n|\\\\t|\\\\r)*\""))
            
            commentLex = EmptyLexeme("comment", Lexer.RegExprToAutomaton(
                "#[a-zA-Z0-9`~!@#$%^&\\*\\(\\)_\\+" +
                "\\-={}\\[\\]\\|:;'<>\\?,./ \\t\"\\\\]*"))

            keywordLex = SameLexeme("keyword", Lexer.RegExprToAutomaton(
                "and|assert|break|class|continue|def|elif|else|exec|" +
                "for|from|if|import|in|return|is|not|or|pass|print|while|" +
                "try|catch|except|raise|Exception|del|" + 
                "\\+|\\-|\\*|/|%|\\+=|\\-=|\\*=|%=|=|" +
                "<|<=|>|>=|==|!=|\\[|\\]|\\(|\\)|{|}|:|,|.|" +
                "@staticmethod"))

            idLex = SameLexeme(
                "id", Lexer.RegExprToAutomaton("[_a-zA-Z][_a-zA-Z0-9]*"))

            intLex = IntLexeme("int", Lexer.RegExprToAutomaton("[0-9]+"))

            floatLex = FloatLexeme("float", Lexer.RegExprToAutomaton(
                "[0-9]+.[0-9]+([eE][\\+\\-]?[0-9]+)?"))
                                   
            spaceLex = SpaceLexeme(
                tabsize, Lexer.RegExprToAutomaton("[ \\t]+"))

            jointlineLex = EmptyLexeme(
                "jointline", Lexer.RegExprToAutomaton(
                    "\\\\[ \\t]*(\\r|\\n|\\r\\n|\\n\\r)"))
                                   
            newlineLex = NewlineLexeme(
                "newline", Lexer.RegExprToAutomaton("\\r|\\n|\\r\\n|\\n\\r"))

            # save dfas
            try:
                fh = open(Lexer.DataPath, "wb")
                stringLex.fa.Save(fh)
                commentLex.fa.Save(fh)
                keywordLex.fa.Save(fh)
                idLex.fa.Save(fh)
                intLex.fa.Save(fh)
                floatLex.fa.Save(fh)        
                spaceLex.fa.Save(fh)
                jointlineLex.fa.Save(fh)
                newlineLex.fa.Save(fh)
                fh.close()
            except Exception, e:
                print "Failed to cache dfa data"

        s.lexs = [commentLex, keywordLex, idLex, stringLex,
                  intLex, floatLex, spaceLex, jointlineLex, newlineLex]
        s.peekLength = None
        s.peek = None

    @staticmethod
    def RegExprToAutomaton(expr):
        stream = StringStream(expr)
        tree = RegExpr(stream).Expr()
        nfa = Nfa.GenerateNfa(tree)
        dfa = Dfa.GenerateDfa(nfa)
        dfa.Minimize()
        sdfa = StaticDfa.FromDfa(dfa)
        return sdfa

    @staticmethod
    def LoadStaticDfa(fileHandle):
        dfa = StaticDfa()
        dfa.Load(fileHandle)
        return dfa

    def SetStream(s, stream):
        s.stream = stream
        
    def Peek(s):
        if s.stream.Peek() is None:
            return {"type":"none", "line":s.stream.GetLineNum()}

        if s.peekLength is not None:
            return s.peek
        
        maxlen = 0
        maxlex = None
        
        maxlen = 0
        curlen = 0
        for lex in s.lexs:
            lex.fa.Reset()
        lexs = s.lexs
        
        while True:
            curlen += 1
            peek = s.stream.PeekAt(curlen - 1)
            if peek is None:
                break
            
            newlexs = []

            for lex in lexs:
                result = lex.fa.Simulate(StringStream(peek))
                if result:
                    newlexs.append(lex)
                    if result[0] in lex.fa.end and \
                       curlen > maxlen:
                        maxlen = curlen
                        maxlex = lex
            lexs = newlexs
            if not lexs:
                break
            
        if maxlen == 0:
            raise Exception("error at line " + str(s.stream.GetLineNum()))
        
        lexeme = maxlex.Output(s.stream.Peek(maxlen))
        if lexeme:
            lexeme["line"] = s.stream.GetLineNum()
            s.peekLength = maxlen
            s.peek = lexeme
            return lexeme
        else:
            s.stream.Next(maxlen)
            s.peekLength = s.peek = None
            return s.Peek()

    def Next(s):
        if s.stream.Peek() is None:
            return
        if s.peekLength is None:
            s.Peek()
        if s.peekLength is not None:
            s.stream.Next(s.peekLength)
            s.peekLength = s.peek = None

if __name__ == "__main__":
    start = clock()
    lex = Lexer()
    print "Lexer build time :", clock() - start
    
    fs = FileStream("test/test5.py")
    start = clock()
    result = []
    lex.SetStream(fs)
    
    while True:
        p = lex.Peek()
        if p["type"] == "none":
            break
        result.append(p)
        lex.Next()
        
    print "Lexer run time :", clock() - start
    print str(len(result)), "tokens"

    #for token in result:
    #    print token
