'''
Author: Donald duck tang5722917@163.com
Date: 2022-09-08 16:09:02
LastEditors: Donald duck tang5722917@163.com
LastEditTime: 2024-04-10 19:04:20
Description: 
Copyright (c) 2024 by Donald duck email: tang5722917@163.com, All Rights Reserved.
'''
import ply.lex as lex

#List of token name
tokens = [
    'NUMBER',
    'FLOAT_NUM',
    'SPACE',
    'PLUS',
    'MINUS',
    'TIMES',
    'DIVIDE',
    'LPAREN',
    'RPAREN',
    "ASSIGNMENT",
    'Comment',
    "END",
    "ENDC",
    "OPTION"
    ]

#Regular expression rule for simple tokens
# Regular expression rules for simple tokens
t_PLUS    = r'\+'
t_MINUS   = r'-'
t_TIMES   = r'\*'
t_DIVIDE  = r'/'
t_LPAREN  = r'\('
t_RPAREN  = r'\)'
t_ASSIGNMENT = r'\='

# A regular expression rule with some action code
# Define a rule so we can track line numbers

def t_newline(t):
    r'\n'
    t.lexer.lineno += len(t.value) 

# A string containing ignored characters (spaces and tabs)
def t_SPACE(t):
    r'[ \t]+'
    
def t_Comment(t):
    r'[\#\*].*'
    t.value = str(t.value)  
    return t
    # No return value. Token discarded

# Error handling rule
def t_error(t):
    print("Illegal character '%s'" % t.value[0])
    t.lexer.skip(1)
def t_OPTION(t):
    r'[a-z_0-9]+'
    return t

def t_END(t):
    r'^\.end'
    return t

def t_ENDC(t):
    r'^\.endc'
    return t

def net_syntax(data,Debug_enable,logging):
    # Build the lexer
    lexer = lex.lex()

    # Give the lexer some input
    lexer.input(data.lower())
    # Tokenize
    toks = list()
    for tok in lexer:
        toks.append(tok)
    return toks
