'''
Created on May 7, 2009

@license: License: LGPL
@copyright: (c) 2009, dogsing.cn
@author: ant-man(Bin Zhao)
'''
from cn.pigersing.moc.utils.logger import get_logger
import ply.lex as lex
import re

_m_logger = get_logger('mocLexer')

def _error_func(msg, line, column) :
     _m_logger.debug('error occurred at (%d, %d) when lexical analysis, %s' % (line, column, msg))

class mocLexer(object):
    '''
    Using for lexical analysis
    '''

    def __init__(self, error_func=None):
        '''
        Create a new Lexer.
        @param erro_func:
                An error function. Will be called with an error
                message, line and column as arguments, in case of 
                an error during analysis.
        '''
        self.error_func = error_func if error_func else _error_func
        self.filename = ''

    def build(self, **kwargs):
        '''
        Builds the lexer from the specification. Must be
            called after the lexer object is created. 
        '''
        self.lexer = lex.lex(object=self, **kwargs)

    def input(self, text):
        self.lexer.input(text)

    def token(self):
        g = self.lexer.token()
        if not g :
            return None
        g.colunmno = self._find_tok_column(g)
        return g

    def reset_lineno(self):
        """ Resets the internal line number counter of the lexer.
        """
        self.lexer.lineno = 1

    def _error(self, msg, token):
        location = (token.lineno, self._find_tok_column(token))
        self.error_func(msg, location[0], location[1])
        self.lexer.skip(1)

    def _find_tok_column(self, token):
        i = token.lexpos
        while i > 0:
            if self.lexer.lexdata[i] == '\n': break
            i -= 1
        return (token.lexpos - i) + 1

    #===============================================================================
    #  keyword_map = {}  Reserved keywords
    #  'DEF', for defining the function or method
    #  'IN', for iterating in a collection
    #  'TRUE', 'FALSE', NULL
    #===============================================================================
    #  Class support will appear in next version
    #  'CLASS', 'THIS', 

    keywords = (
            'BREAK', 'CONTINUE',
            'DEF', 'ELSE', 'FOR',
            'FALSE', 'IF', 'IN', 'RETURN',
            'TRUE', 'WHILE', 'NULL'
    )

    keyword_map = {}
    for r in keywords:
        keyword_map[r.lower()] = r

    tokens_nc = keywords + (
            # Identifiers
            'ID',

            # constants 
            'FLOAT', 'INTEGER',

            # String literals
            'STRING_LITERAL',

            # Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
            'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD',
            'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
            'LOR', 'LAND', 'LNOT',
            'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',

            # Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
            'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
            'LSHIFTEQUAL', 'RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',

            # Increment/decrement (++,--)
            'PLUSPLUS', 'MINUSMINUS',

            ## Don't support this operation
            # Ternary operator (?)
            ##'TERNARY',

            # Delimeters ( ) [ ] { } , . ; :
            'LPAREN', 'RPAREN',
            'LBRACKET', 'RBRACKET',
            'LBRACE', 'RBRACE',
            'COMMA', 'PERIOD', 'SEMI', 'COLON',

            ## !! Don't support the variable parameters
            # Ellipsis (...)
            ##'ELLIPSIS',
    )

    tokens = tokens_nc + ('COMMENT',)

     # Newlines
    def t_NEWLINE(self, t):
        r'\n+'
        t.lexer.lineno += t.value.count("\n")

    # Floating literal
    def t_FLOAT(self, t) :
        r'(\d+)(\.\d+)(e(\+|-)?(\d+))?'
        return t

    # Integer literal
    def t_INTEGER(self, t) :
        r'\d+'
        return t

    # String literal
    def t_STRING_LITERAL(self, t):
        r'\"([^\\\n]|(\\(.|\n)))*?\"'
        t.lexer.lineno += t.value.count("\n")
        return t

    #Comment (C++-Style)
    def t_COMMENT(self, t):
        r'(/\*(.|\n)*?\*/)|(//.*?\n)'
        t.lexer.lineno += t.value.count("\n")
        return t.lexer.token()

    def t_ID(self, t):
        r'[a-zA-Z_][0-9a-zA-Z_]*'
        t.type = self.keyword_map.get(t.value, "ID")
        return t

    #===============================================================================
    #  Regexes for use in tokens
    #===============================================================================

    # Rules for the normal state
    t_ignore = ' \t'

    # Operators
    t_PLUS = r'\+'
    t_MINUS = r'-'
    t_TIMES = r'\*'
    t_DIVIDE = r'/'
    t_MOD = r'%'
    t_OR = r'\|'
    t_AND = r'&'
    t_NOT = r'~'
    t_XOR = r'\^'
    t_LSHIFT = r'<<'
    t_RSHIFT = r'>>'
    t_LOR = r'\|\|'
    t_LAND = r'&&'
    t_LNOT = r'!'
    t_LT = r'<'
    t_GT = r'>'
    t_LE = r'<='
    t_GE = r'>='
    t_EQ = r'=='
    t_NE = r'!='

    # Assignment operators

    t_EQUALS = r'='
    t_TIMESEQUAL = r'\*='
    t_DIVEQUAL = r'/='
    t_MODEQUAL = r'%='
    t_PLUSEQUAL = r'\+='
    t_MINUSEQUAL = r'-='
    t_LSHIFTEQUAL = r'<<='
    t_RSHIFTEQUAL = r'>>='
    t_ANDEQUAL = r'&='
    t_OREQUAL = r'\|='
    t_XOREQUAL = r'^='

    # Increment/decrement
    t_PLUSPLUS = r'\+\+'
    t_MINUSMINUS = r'--'

    # Delimeters
    t_LPAREN = r'\('
    t_RPAREN = r'\)'
    t_LBRACKET = r'\['
    t_RBRACKET = r'\]'
    t_LBRACE = r'\{'
    t_RBRACE = r'\}'
    t_COMMA = r','
    t_PERIOD = r'\.'
    t_SEMI = r';'
    t_COLON = r':'

    def t_error(self, t):
        msg = 'Illegal character %s' % repr(t.value[0])
        self._error(msg, t)


#===============================================================================
#  using for test 
#===============================================================================

if __name__ == '__main__' :

    import sys
    try :
        src = open(sys.argv[1])
        src = src.read()
    except IndexError :
        src = r'''
        if ( a > 10) {
            print ("hello world");
        }
        '''

    lexer = mocLexer()
    lexer.build(optimize=False)
    lexer.input(src)
    while True:
        tok = lexer.token()
        if not tok: break
        print '<%s> %s <%s> at (%d, %d)' % (tok.type, tok.value, tok.type, tok.lineno, tok.colunmno)
    
    for kw in lexer.keyword_map.keys():
        print kw
    
    for att in dir(lexer) :
        if att.startswith('t_') :
            a = getattr(lexer, att)
            if isinstance(a, str) :
                print "%s ::= %s" % (att[2:],a)
            print "%s ::= %s" % (att[2:],a.__doc__)
