'''
Author: Donald duck tang5722917@163.com
Date: 2022-09-08 16:09:02
LastEditors: Donald duck tang5722917@163.com
LastEditTime: 2024-04-10 19:01:42
Description: 
Copyright (c) 2024 by Donald duck email: tang5722917@163.com, All Rights Reserved.
'''
import ply.lex as lex
from Net_syntax import Netlist_tokens
from Net_syntax import Netlist_control_tokens
from Net_syntax import Netlist_token_status

states = (
    ('node','inclusive'),
    ('vmagn','inclusive'),
    ('vunit','inclusive'),
    ('opt','inclusive'),
)

#List of token name
tokens = [
    'NUMBER',
    'FLOAT_NUM',
    'UNITS',
    'MAGN',
    'SPACE',
    'PLUS',
    'MINUS',
    'TIMES',
    'DIVIDE',
    'LPAREN',
    'RPAREN',
    'Comment',
    'R_net_element',
    'L_net_element',
    'C_net_element',
    'VSource_net_element',
    'ISource_net_element',
    "CONTROL",
    "NODENAME",
    "OPTION"
    ]

#Regular expression rule for simple tokens
# Regular expression rules for simple tokens
t_PLUS    = r'\+'
t_MINUS   = r'-'
t_TIMES   = r'\*'
t_DIVIDE  = r'/'
t_LPAREN  = r'\('
t_RPAREN  = r'\)'
# A regular expression rule with some action code
# Define a rule so we can track line numbers

def t_newline(t):
    r'\n'
    t.lexer.lineno += len(t.value) 

# A string containing ignored characters (spaces and tabs)
def t_SPACE(t):
    r'[ \t]+'
    
def t_Comment(t):
    r'[\#\*].*'
    t.value = str(t.value)  
    return t
    # No return value. Token discarded

# Error handling rule
def t_error(t):
    print("Illegal character '%s'" % t.value[0])
    t.lexer.skip(1)


def t_R_net_element(t) :
    r'^r[0-9a-z-_]+'
    t.lexer.token_state.set_node_number(2)
    t.lexer.token_state.next()
    return t

def t_L_net_element(t) :
    r'^l[0-9a-z-_]+'
    t.lexer.token_state.set_node_number(2)
    t.lexer.token_state.next()
    return t

def t_C_net_element(t) :
    r'^c[0-9a-z-_]+'
    t.lexer.token_state.set_node_number(2)
    t.lexer.token_state.next()
    return t

def t_VSource_net_element(t):
    r'^v[0-9a-z-_]+'
    t.lexer.token_state.set_node_number(2)
    t.lexer.token_state.next()
    return t

def t_ISource_net_element(t):
    r'^i[0-9a-z-_]+'
    t.lexer.token_state.set_node_number(2)
    t.lexer.token_state.next()
    return t

def t_CONTROL(t):
    r'^\.[a-z_][a-z_0-9]*'
    # Look up symbol table information and return a tuple
    t.value = (t.value, Netlist_control_tokens.control_lookup(t.value))
    t.lexer.token_state.next_control()
    return t

#Condition
def t_node_NODENAME(t):
    r'[a-z_0-9]+'
    t.lexer.token_state.node_counter()
    t.lexer.token_state.next()
    return t

def t_opt_FLOAT_NUM(t):
    r'[-+]?([0-9]*\.[0-9]+|[0-9]+\.)'
    t.lexer.token_state.vmagn_match()
    return t

def t_opt_NUMBER(t):
    r'[-+]?[0-9]+'
    t.lexer.token_state.vmagn_match()
    return t

def t_opt_OPTION(t):
    r'[a-z_0-9]+'

def t_vmagn_MAGN(t):
    r'meg|[kgtmunp]'
    t.lexer.token_state.magn_match()
    return t
    
def t_vunit_vmagn_UNITS(t):    
    r'ohm|[avhc]'
    return t

def net_syntax(data,Debug_enable,logging):
    # Build the lexer
    lexer = lex.lex()
    lexer.token_state = Netlist_token_status.Token_status(lexer,Debug_enable,logging)

    # Give the lexer some input
    lexer.input(data.lower())
    # Tokenize
    toks = list()
    for tok in lexer:
        toks.append(tok)
    return toks

def get_net_line_ob(data,lineno,Debug_enable,logging):
    toks = net_syntax(data,Debug_enable,logging)
    netline_ob = Netlist_tokens.Netlist_Tokens(toks,lineno,Debug_enable,logging)
    return netline_ob
