import ply.lex as lex

# List of token names.   This is always required
tokens = (
    "VARIABLE",     # varible name
    "VARIABLE_TYPE",  # reg wire logic
    "BUSWIDTH",     # etc. [31:0]
    "MODULE",       # module
    "INOUT",        # input output
    "LPAREN",
    "RPAREN",
    "ENDMODULE",     # endmodule
    "SEMICOLON",     # ;
    "COMMA",         # ,
    "EQUAL",         # =
)

# The first lexpos of each lineno
beginpos_line = {1: 0}

# Regular expression rules for simple tokens
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_SEMICOLON = r';'
t_COMMA = r','
t_EQUAL = r'='


def t_VARIABLE(t):
    r'[a-zA-Z][a-zA-Z_0-9_]*'
    t.type = 'VARIABLE'
    if t.value in ['reg', 'wire', 'logic']:
        t.type = 'VARIABLE_TYPE'
    elif t.value == 'module':
        t.type = 'MODULE'
    elif t.value == 'endmodule':
        t.type = 'ENDMODULE'
    elif t.value in ['input', 'output']:
        t.type = 'INOUT'
    return t

def t_BUSWIDTH(t):
    r'(\[[0-9]*:[0-9]*\])|(\`[a-zA-Z_]*)'
    return t


def t_newline(t):
    r'\n+'
    t.lexer.lineno += len(t.value)
    beginpos_line[t.lexer.lineno] = t.lexer.lexpos


t_ignore = r' '
t_ignore_comment = r'//.*'
t_ignore_comment2 = r'/\*.*\*/'

# Error handling rule

def t_error(t):
    #  print("Illegal character '%s'" % t.value[0])
    # just skip unknown token
    t.lexer.skip(1)


# Build the lexer
lexer = lex.lex()

if __name__ == "__main__":
    s = '''
module a_module();
    reg [`PARAM] aa;
    reg a;
    reg b;
    reg [7:0] vect;
    reg [7:0] mem [31:0];

    wire w_c;
    wire [7:0] w_vect;
    wire [7:0] w_mem [31:0];

    logic l_c;
    logic [7:0] l_vect;
    logic [7:0] l_mem [31:0];

endmodule
    '''
    with open("test.v", 'r') as file:
        s = ''.join(file.readlines())
        lexer.input(s)

        while True:
            tok: lex.LexToken = lexer.token()
            if not tok:
                break
            print(tok.type, tok.lineno, tok.lexpos -
                  beginpos_line[tok.lineno] + 1)
