'''
Created on 29/09/2012

@author: Barrios - Correa - Felippone
'''

from AST_impl import *

reserved = {
            '%start'   : 'START',
            '%left'    : 'LEFT',
            '%right'   : 'RIGHT',
            '%nonassoc': 'NONASSOC',
            '%ignore'  : 'IGNORE' 
            }

tokens=list(reserved.values()) + ['SEMICOLON','MAYOR','POINTS','BAR','EQUAL','LPAREN',
                                  'RPAREN','LCOR','RCOR','IDEN','CLASS','REF','TOKEN','ER']

#tokens
t_SEMICOLON   = r';'
t_MAYOR       = r'>'
t_POINTS      = r':'
t_BAR         = r'\|'
t_EQUAL       = r'='
t_LPAREN      = r'\('
t_RPAREN      = r'\)'
t_LCOR        = r'\['
t_RCOR        = r'\]'      
 
def t_IDENT(t):
    r'[a-z][a-zA-Z0-9]*'
    t.type = reserved.get(t.value, 'IDEN')
    return t

def t_START(t):
    r'%start'
    t.type = 'START'
    return t

def t_LEFT(t):
    r'%left'
    t.type = 'LEFT'
    return t

def t_RIGHT(t):
    r'%right'
    t.type = 'RIGHT'
    return t

def t_NONASSOC(t):
    r'%nonassoc'
    t.type = 'NONASSOC'
    return t

def t_IGNORE(t):
    r'%ignore'
    t.type = 'IGNORE'
    return t

def t_CLASS(t):
    r'[A-Z][a-zA-Z]*'
    t.type = reserved.get(t.value, 'CLASS')
    return t

def t_REF(t):
    r'\$[0-9]+'
    t.type = reserved.get(t.value, 'REF')
    return t

def t_TOKEN(t):
    #r'\'[\(\)*+=\;<>&a-zA-Z]\''
    r'\'([^\'\n\\]|\\[^\n])+\''
    t.type = reserved.get(t.value, 'TOKEN')
    return t

def t_ER(t):
    r'/([^/\n\\]|\\[^\n])+/'
    t.type = reserved.get(t.value, 'ER')
    return t

# Ignored characters 
t_ignore = " \t"

t_ignore_COMMENT = r'\/\/[^\n]*\n'

def t_newline(t):
    r'\n+'
    t.lexer.lineno += t.value.count("\n")
    
def t_error(t):
    print("Illegal character '%s'" % t.value[0])
    t.lexer.skip(1)
    
#falta precedencias

# Parsing rules
def p_expressions(p):
    'expressions : expression'
    p[0] = Expressions(p[1])

def p_expressions_semicolon(p):
    'expressions : expressions SEMICOLON expression'
    p[1].add(p[3])
    p[0] = p[1]

def p_expression(p):
    'expression : IDEN MAYOR category POINTS productions'
    p[0] = Expression(False, p[1], p[3], p[5])
    
def p_expression_start(p):
    'expression : START IDEN MAYOR category POINTS productions'
    p[0] = Expression(True, p[2], p[4], p[6])
    
def p_productions(p):
    'productions : production'
    p[0] = Productions(p[1])
    
def p_productions_bar(p):
    'productions : productions BAR production'
    p[1].add(p[3])
    p[0] = p[1]
    
def p_production(p):
    'production : vocabulary MAYOR action'
    p[0] = Production(p[1], p[3])
    
def p_vocabulary(p):
    'vocabulary : lexem'
    p[0] = Vocabulary(p[1])
    
def p_vocabulary_lexema(p):
    'vocabulary : vocabulary lexem'
    p[1].add(p[2])
    p[0] = p[1]

def p_lexem_er(p):
    'lexem : ER'
    p[0] = ER(p[1])
    
def p_lexem_token(p):
    'lexem : TOKEN'
    p[0] = Token(p[1])
    
def p_lexem_iden(p):
    'lexem : IDEN'
    p[0] = Ident(p[1])
    
def p_action_ref(p):
    'action_ref : REF'
    p[0] = Ref(p[1])
    
def p_action_list(p):
    '''
    action_list : LCOR action_class RCOR
                | LCOR action_ref RCOR
    '''
    p[0] = ActionList(p[2])
    
def p_action_list2(p):
    'action_list : LCOR RCOR'
    p[0] = ActionList(ActionEmptyList())
    
def p_action_class(p):
    'action_class : LPAREN CLASS RPAREN'
    p[0] = Action(p[2], None)
    
def p_action_class_att(p):
    'action_class : LPAREN CLASS listatt RPAREN'
    p[0] = Action(p[2], p[3])

def p_action_comp(p):
    '''
    action : action_ref
           | action_class
           | action_list
    '''
    p[0] = p[1]
    
def p_action_comp3(p):
    '''
    action : action POINTS action_ref
           | action POINTS action_class
           | action POINTS action_list
    '''
    p[0] = Concat(p[1], p[3])
    
def p_listatt_att(p):
    'listatt : att'
    p[0] = Attributes(p[1])
    
def p_listatt(p):
    'listatt : listatt att'
    p[1].add(p[2])
    p[0] = p[1]
    
def p_att(p):
    'att : IDEN EQUAL action'
    p[0] = Attribute(p[1], p[3])
    
def p_class(p):
    'category : CLASS'
    p[0] = Category(p[1], False)
    
def p_class_cor(p):
    'category : LCOR CLASS RCOR'
    p[0] = Category(p[2], True)

#agregue esto
def p_assoc_left(p):
    'expression : LEFT assoc'
    p[0] = Assoc('left', p[2])
    
def p_assoc_er(p):
    'assoc : ER'
    p[0] = [p[1]]
    
def p_assoc_er_er(p):
    'assoc : assoc ER'
    p[1] = p[1] + [p[2]]
    p[0] = p[1]
    
def p_assoc(p):
    'assoc : TOKEN'
    p[0] = [p[1]]
    
def p_assoc_token(p):
    'assoc : assoc TOKEN'
    p[1] = p[1] + [p[2]]
    p[0] = p[1]
    
def p_assoc_right(p):
    'expression : RIGHT assoc'
    p[0] = Assoc('right',p[2])
    
def p_assoc_nonassoc(p):
    'expression : NONASSOC assoc'
    p[0] = Assoc('nonassoc',p[2])
    
def p_ignore(p):
    'expression : IGNORE lister'
    p[0] = Ignore(p[2])

def p_lister_er(p):
    'lister : ER'
    p[0] = [ER(p[1])]
    
def p_lister_token(p):
    'lister : TOKEN'
    p[0] = [Token(p[1])]
    
def p_lister_er_er(p):
    'lister : lister ER'
    p[1] = p[1] + [ER(p[2])]
    p[0] = p[1]
    
def p_lister_token_token(p):
    'lister : lister TOKEN'
    p[1] = p[1] + [Token(p[2])]
    p[0] = p[1]
    
def p_error(t):
    if hasattr(t, 'value'):
        print("Syntax error at '%s'" % t.value)
    else:
        print ("Syntex error")

#Build the parser
import ply.lex as lex
lex.lex()
import ply.yacc as yacc
yacc.yacc()

