#!/usr/bin/python
import re
import sys
import v_lexer
import types

def myreplace(text):
    new_text = ''
    for i in text:
        if i is not ' ':
            new_text += i
    for i in ['|', '{', '}', '[', ']']:
        if i in new_text:
            new_text = new_text.replace(i, '_')
    return new_text

def get_pairs(text, begin_str, end_str):
        pair_list = []
        stack = []
        index = 0
        while True:
            if text[index:index+len(begin_str)]==begin_str:
                stack.append(index)
            elif text[index:index+len(end_str)]==end_str:
                if len(stack)==1:   #ignore embeded pairs
                    pair_list.append([stack.pop(), index])
                else: 
                    stack.pop()
            else:
                if index>=len(text):
                    break
            index += 1 
        if stack:
            print "unmatched pairs"
            sys.exit(1)
        return pair_list

def resolve_opt(text, out_rules):
    pair_list = get_pairs(text, '[', ']' )
    new_text = ''
    new_rules = {}
    last_address = 0
    for begin, end in pair_list:
        tmp_name = myreplace(text[begin+1:end])+'_opt'
        #new_text += text[last_address:begin] + myreplace(text[begin+1:end])+'_opt '
        new_text += text[last_address:begin]+' '+tmp_name+' '
        last_address = end+1
        #new_rules[myreplace(text[begin+1:end])+'_opt'] = text[begin+1:end]
        new_rules[tmp_name] = text[begin+1:end] + ' | empty '
    new_text += text[last_address:]

    sub_rules = {}
    for i in new_rules.keys():
        pair_list = get_pairs(new_rules[i], '[', ']')
        if pair_list:
            new_rules[i] = resolve_opt(new_rules[i], sub_rules)

    for i in new_rules.keys():
        out_rules[i] = new_rules[i]
        #print 'new rule ', i, ' is ', new_rules[i]
    for i in sub_rules.keys():
        out_rules[i] = sub_rules[i]
        #print 'sub rule ', i, ' is ', sub_rules[i]

    return new_text

def resolve_list(text, out_rules):
    pair_list = get_pairs(text, '{', '}')
    new_text = ''
    new_rules = {}
    last_address = 0
    for begin, end in pair_list:
        tmp_name = myreplace(text[begin+1:end])+'_list'
        #new_text += text[last_address:begin] + myreplace(text[begin+1:end])+'_list '
        new_text += text[last_address:begin] + ' ' + tmp_name + ' '
        last_address = end+1
        #new_rules[myreplace(text[begin+1:end])+'_list'] = text[begin+1:end]
        new_rules[tmp_name] = text[begin+1:end]+ ' | ' + tmp_name + ' ' + text[begin+1:end] + ' | empty '
    new_text += text[last_address:]

    sub_rules = {}
    for i in new_rules.keys():
        pair_list = get_pairs(new_rules[i], '{', '}')
        if pair_list:
            new_rules[i] = resolve_list(new_rules[i], sub_rules)

    for i in new_rules.keys():
        out_rules[i] = new_rules[i]
        #print 'new rule ', i, ' is ', new_rules[i]
    for i in sub_rules.keys():
        out_rules[i] = sub_rules[i]
        #print 'sub rule ', i, ' is ', sub_rules[i]

    return new_text

def filter_tokens(text):
    if text.isspace():
        return ''
    mystr = ''
    for i in text.strip().split(' '):    
        
        #print ' searching for ***'+i+'***\n'
        for token in all_token_names:
            #print 'trying ***'+token+'***'
            obj = re.match(token, i)
            if obj:
            #if token==i:
                #print 'token ', token 
                mystr += ' '+all_tokens[token]+' '
                end = obj.end()
                #print 'target text is ', i, ' token is ', token, ' token name is ', all_tokens[token], ' end is ', end, 'len i is ', len(i)
                if end < len(i):
                    #print 'trying ', i[end:] 
                    mystr += filter_tokens( i[end:])

                break 
        else:
            #mystr += i
            if i=='PATHPULSE$':
                pass
                mystr += ' PATHPULSE_ DOLAR ' 
            else:
                print 'None token found ', i
                sys.exit(1)
    return mystr

def resolve_rules(raw_rules, out_rules):
    new_rules = {}
    sub_rules = {}
    for rule_name in raw_rules.keys():
        rule_body = raw_rules[rule_name]
        if rule_name not in filter_list:
            pair_list = get_pairs(rule_body, '<b>', '</b>')
            last_address = 0
            stage0_rule_body = ''
            for begin,end in pair_list:
                stage0_rule_body += rule_body[last_address:begin] + filter_tokens( rule_body[begin+3: end])
                last_address = end+4
            stage0_rule_body += rule_body[last_address:]
            stage1_rule_body = stage0_rule_body.replace('$', '_').replace(';','_')
            #print 'rule1  ', rule_name, ' is ', stage1_rule_body
            rule_name = rule_name.replace('$', '_').replace(';','_')
            stage2_rule_body = resolve_opt(stage1_rule_body, new_rules)
            #print 'rule2  ', rule_name, ' is ', stage2_rule_body
            stage3_rule_body = resolve_list(stage2_rule_body, new_rules)
            #print 'rule3  ', rule_name, ' is ', stage3_rule_body
            if rule_name not in out_rules.keys():
                out_rules[rule_name] = stage3_rule_body
    if new_rules :
        resolve_rules(new_rules, sub_rules)
    #for i in new_rules.keys():
    #    if i not in out_rules.keys():
    #        out_rules[i] = new_rules[i]
    for i in sub_rules.keys():
        if i not in out_rules.keys():
            out_rules[i] = sub_rules[i]
                


#identifier and number are tokens
# library descript is not supported
# dolar sign, the '$' is not supported in system_task_identifier
# unsigned number is relaxed to number for easy programming
# scalar_constant is relaxed to number
# output_level is binary_digit
# init_val is relaxed to number 
# level_symbol is relaxed to ID | MUL
# output_symbol is relaxed to ID
# edge_descriptor2 is relaxed to binary_digit binary_digit
# escaped_identifier is relaxed to ESCAPE ID
################################################################################

# program start execution here

################################################################################


all_rules = []
all_rules.append(['unsigned_number',   'unsigned_number : NUMBER'])
all_rules.append(['scalar_constant',   'scalar_constant : NUMBER'])
#all_rules.append(['output_level' ,     'output_level : ID'])
all_rules.append(['init_val',          'init_val : NUMBER'])
all_rules.append(['level_symbol0',      'level_symbol : ID '])
all_rules.append(['level_symbol1',      'level_symbol : MUL'])
all_rules.append(['edge_descriptor2',  'edge_descriptor : ID ID '])
all_rules.append(['escaped_identifier','escaped_identifier : ESCAPE ID'])
all_rules.append(['string',            'string : STRING'])
all_rules.append(['system_task_identifier', 'system_task_identifier : DOLAR ID'])
all_rules.append(['output_symbol', 'output_symbol : ID'])
all_rules.append(['function_statement', 'function_statement : statement'])
all_rules.append(['number', 'number : NUMBER'])
all_rules.append(['PATHPULSE_', 'PATHPULSE_ : PATHPULSE DOLAR'])
all_rules.append(['real_number', 'real_number : NUMBER'])
all_rules.append(['simple_identifier', 'simple_identifier : ID'])
all_rules.append(['system_function_identifier', 'system_function_identifier : DOLAR ID'])
all_rules.append(['edge_symbol', 'edge_symbol : ID'])

mylexer = v_lexer.VLexer(None)
all_tokens = {}
#for i in dir(mylexer):
#    if i.startswith('t_'):
#        if type(eval('mylexer.'+i)) is types.StringType:
#            all_tokens[eval('mylexer.'+i)] = i[2:]
#        elif type(eval('mylexer.'+i)) is types.MethodType:
#            if i not in ['t_MUL_COMMENT','t_SINGLE_COMMENT','t_PP','t_STRING','t_PP','','t_NUMBER','t_error','t_ID']:
#                all_tokens[eval('mylexer.'+i+'.__doc__')] = eval('mylexer.'+i+'.__name__')[2:]

#for token in mylexer.keywords:
#    all_tokens[r'\b'+token.lower()+r'\b'] = token
all_token_names = []
for token in mylexer.tokens:
#for i in range(len(mylexer.tokens)):
#    token = mylexer.tokens[i]
    if token in mylexer.keywords:
        if token == 'PATHPULSE$':
            tmp_name = r'\bPATHPULSE\$'
        else:
            tmp_name = r'\b'+token.lower()+r'\b'
        all_tokens[tmp_name] = token
        all_token_names.append(tmp_name)
    else:
        for i in dir(mylexer):
            if i.startswith('t_') and i[2:]==token:
                if type(eval('mylexer.'+i)) is types.StringType:
                    tmp_name = eval('mylexer.'+i)
                    all_tokens[tmp_name] = i[2:]
                    all_token_names.append(tmp_name)
                elif type(eval('mylexer.'+i)) is types.MethodType:
                    if i not in ['t_MUL_COMMENT','t_SINGLE_COMMENT','t_PP','t_STRING','t_PP','','t_NUMBER','t_error','t_ID']:
                        tmp_name = eval('mylexer.'+i+'.__doc__')
                        all_tokens[tmp_name] = eval('mylexer.'+i+'.__name__')[2:]
                        all_token_names.append(tmp_name)
                break
        else:
            print 'Error, token not defined', token
        

#for i in all_token_names:
    #print 'token ',  i, ' is ',  all_tokens[i]
#sys.exit(1)
filter_list = [
    'function_statement1',
    'text_macro_identifier',
    'library_text',
    'library_description',
    'library_declaration',
    'include_statement',
    'simple_identifier3',
    'system_function_identifier4',
    'system_task_identifier4',
    'number',
    'real_number2',
    'exp',
    'decimal_number',
    'binary_number',
    'octal_number',
    'hex_number',
    'sign',
    'size',
    'non_zero_unsigned_number2',
    'unsigned_number2',
    'binary_value2',
    'octal_value2',
    'hex_value2',
    'decimal_base2',
    'binary_base2',
    'octal_base2',
    'hex_base2',
    'non_zero_decimal_digit',
    'decimal_digit',
    'binary_digit',
    'octal_digit',
    'hex_digit',
    'x_digit',
    'z_digit',
    'string',
    'comment',
    'white_space',
    'init_val',
    'output_symbol',
    'level_symbol',
    'edge_symbol',
    'edge_descriptor2',
    'zero_or_one',
    'z_or_x',
    'scalar_constant',
    'escaped_identifier',
    'comment_text',
    'one_line_comment',
    'block_comment'
]


lines = open('verilog2005bnf.html').read().replace(r'<br></b>','</b><br>').split('<br>')

text_rules = []
raw_rules = {}

for line in lines: 
    if ' '*7 in line:
        text_rules[-1]= ''.join([text_rules[-1],line])
    elif '::=' in line:
        text_rules.append(line)
    elif 'tf_input_declaration' in line:
        text_rules[-1]= ''.join([text_rules[-1],line]) 

    for rule in text_rules:
        rule_name,rule_body = [i.replace('\n','').strip() for i in rule.split('::=')]
        rule_body = rule_body.replace('&amp;', '&')
        rule_body = rule_body.replace('&quot;', '"')
        rule_body = rule_body.replace('&lt;', '<')
        rule_body = rule_body.replace('&gt;', '>')
        raw_rules[rule_name] = rule_body

resolved_rules = {}
resolve_rules(raw_rules, resolved_rules) 

for name in resolved_rules.keys():
    #print 'rule  ', name, ' is ',  resolved_rules[name]
    rule_body = resolved_rules[name]
    #print 'rule4 ', name, ' is ', rule_body
    or_parts = rule_body.split('|')
    #print 'or_parts is ', or_parts
    index = 0
    if len(or_parts)==1:
        all_rules.append([name, name+' : '+rule_body])       
    else:
        tmp_name = myreplace(rule_body)+'_or'
        all_rules.append([name, name+' : '+tmp_name])
        for i in or_parts:
            function_name = tmp_name+str(index)
            doc_string = tmp_name+' : '+i
            if [function_name, doc_string] not in all_rules:
                all_rules.append([function_name, doc_string])
                #print 'I got rule', function_name
            #all_rules.append([function_name, doc_string])
            index += 1

header = """#!/usr/bin/python
import re
import sys
import ply.yacc
from v_lexer import VLexer

class ParseError(Exception):
    pass

class VParser(object):
    def __init__(
        self,
        lex_optimize=True,
        lextab='pyvparser.lextab',
        yacc_optimize=True,
        yacctab='pyvparser.yacctab',
        yacc_debug=False
    ):
        "Create a new VParser"
        self.vlex = VLexer(error_func=self._lex_error_func) 

        self.vlex.build(
            optimize=lex_optimize,
            lextab=lextab
        )

        self.tokens = self.vlex.tokens

        self.vparser = ply.yacc.yacc(
            module=self, 
            debug=yacc_debug,
            optimize=yacc_optimize,
            tabmodule=yacctab,
            start='source_text')

    def _parse_error(self, msg, lineno):
        raise ParseError("%s: %s" % (lineno, msg))

    def parse(self, text, debuglevel=0):
        self.vlex.reset_lineno()
        return self.vparser.parse(text, lexer=self.vlex, debug=debuglevel)

    def _lex_error_func(self, msg, line):
        self._parse_error(msg, line)

    precedence = (
               ('left', 'BNEG', 'LNEG'),
               ('left', 'POWER'),
               ('left', 'MUL', 'DIV', 'PER'),
               ('left', 'ADD', 'MINUS'),
               ('left', 'ULSHIFT', 'URSHIFT', 'SLSHIFT', 'SRSHIFT'),
               ('left', 'GT','GE', 'LT', 'LE'),
               ('left', 'EQ', 'NE', 'CEQ', 'CNE'),
               ('left', 'BAND', 'RNAND'),
               ('left', 'BXOR', 'XORN', 'NXOR'),
               ('left', 'BOR', 'RNOR'),
               ('left', 'LAND'),
               ('left', 'LOR')
    )
    
    def p_error(self, p):
        if p :
            self._parse_error( 'before : %s' % p.value, p.lineno)
        else :
            self._parse_error('At end of input', '') 

    def p_empty(self, p):
        " empty : " 
"""
print header
for i in all_rules:
    print '\n    def p_'+i[0]+'(self, p):\n        "'+i[1]+'"\n        pass\n'

