"""A lexer to handle propositional phrases.

Danny Yoo (dyoo@hkn.eecs.berkeley.edu)

A proposition is an un unevaluated logical expression that can be true
or false, depending on the environment we evaluate one in.

At the moment, I'm reading "The Science Of Programming", by David
Gries.  He has a few chapters on propositional logic, and I got
inspired to write a Python program to work with propositional phrases.


Here are examples of some propositions:

    not X
    x and y and z
    x and y or (x and y)

From this example, we can see that we'll use the following keywords when
parsing these expressions:

    ['not', 'and', 'or', '(', ')']

To make the lexical analyzer nice, we'll also allow C syntax with the
equivalent symbols: ['!', '&&', '||'].

"""

import spark

def tokenize(s):
    "Given a string s, returns a list of tokens from s."
    scanner = ScannerWithKeywordsAndConstants()
    try:
        return scanner.tokenize(s)
    except spark.LexerError: return []

    
class SimpleScanner(spark.GenericScanner):
    def __init__(self):
        spark.GenericScanner.__init__(self)
        
    def tokenize(self, input):
        self.rv = []
        spark.GenericScanner.tokenize(self, input)
        return self.rv

    def t_whitespace(self, s):
        r' \s+ '
        pass  ## to ignore whitespace

    def t_brackets(self, s):
        r' \[ | \] '
        self.rv.append(Token(type=s))

    def t_parens(self, s):
        r' \( | \) '
        self.rv.append(Token(type=s))

    def t_identifier(self, s):
        r' [\w\d\_\-\?\.\']+ '
        self.rv.append(Token(type='identifier', attr=s))


class ScannerWithKeywordsAndConstants(SimpleScanner):
      
    def t_and(self, s):
        r' [aA][nN][dD] | \& '
        self.rv.append(Token(type='and'))

    def t_or(self, s):
        r' [oO][rR] | \| '
        self.rv.append(Token(type='or'))

    def t_not(self, s):
        r' [nN][oO][tT] | \! '
        self.rv.append(Token(type='not'))
        
    def t_eU(self, s):
        r' [e][U] '
        self.rv.append(Token(type='eU'))
        
    def t_aU(self, s):
        r' [a][U] '
        self.rv.append(Token(type='aU'))

    def t_eAT(self, s):
        r' [e][@] | [e][A][T] '
        self.rv.append(Token(type='eAT'))
        
    def t_aAT(self, s):
        r' [a][@] | [a][A][T] '
        self.rv.append(Token(type='aAT'))

    def t_implies(self, s):
        r' \-\> | [Ii][mM][pP][lL][iI][eE][sS] '
        self.rv.append(Token(type='implies'))

    def t_equals(self, s):
        r' \= | [eE][qQ][uU][aA][lL][sS]'
        self.rv.append(Token(type='equals'))
        
    def t_constant(self, s):
        r' \b[Tt][Tt]\b | \b[Ff][Ff]\b | [tT][rR][uU][eE] | [fF][aA][lL][sS][eE] '
        if s.lower() in ('tt', 'true'): value = 'tt'
        else: value = 'ff'
        self.rv.append(Token(type='constant', attr=value))
        

        


class Token:
    '''Defines a single "token", which has type and attr fields'''
    def __init__(self, type, attr=None):
        self.type = type
        self.attr = attr
 
    def __cmp__(self, other):
        return cmp(self.type, other)
 
    def __repr__(self):
        return 'Token(%s, %s)' % (repr(self.type), repr(self.attr))



def test():
    import prompt
    print """A small driver program to see if any of this works.

You can enter pieces of propositional phrases here, and see how we
tokenize each line.  To quit, type 'quit'."""
    p = prompt.Prompt('LexerTest', callback=tokenize)
    p.promptLoop()

if __name__ == '__main__':
    test()
