text
stringlengths 0
267k
|
---|
'STRUCT', |
'TYPEDEF', |
# Extra WebIDL keywords |
'CALLBACK', |
'DICTIONARY', |
'OPTIONAL', |
'STATIC', |
# Invented for apps use |
'NAMESPACE', |
# Data types |
'FLOAT', |
'OCT', |
'INT', |
'HEX', |
'STRING', |
# Operators |
'LSHIFT', |
'RSHIFT' |
] |
# 'keywords' is a map of string to token type. All SYMBOL tokens are |
# matched against keywords, to determine if the token is actually a keyword. |
keywords = { |
'describe' : 'DESCRIBE', |
'enum' : 'ENUM', |
'label' : 'LABEL', |
'interface' : 'INTERFACE', |
'readonly' : 'READONLY', |
'struct' : 'STRUCT', |
'typedef' : 'TYPEDEF', |
'callback' : 'CALLBACK', |
'dictionary' : 'DICTIONARY', |
'optional' : 'OPTIONAL', |
'static' : 'STATIC', |
'namespace' : 'NAMESPACE', |
} |
# 'literals' is a value expected by lex which specifies a list of valid |
# literal tokens, meaning the token type and token value are identical. |
literals = '"*.(){}[],;:=+-/~|&^?' |
# Token definitions |
# |
# Lex assumes any value or function in the form of 't_<TYPE>' represents a |
# regular expression where a match will emit a token of type <TYPE>. In the |
# case of a function, the function is called when a match is made. These |
# definitions come from WebIDL. |
# 't_ignore' is a special match of items to ignore |
t_ignore = ' \t' |
# Constant values |
t_FLOAT = r'-?(\d+\.\d*|\d*\.\d+)([Ee][+-]?\d+)?|-?\d+[Ee][+-]?\d+' |
t_INT = r'-?[0-9]+[uU]?' |
t_OCT = r'-?0[0-7]+' |
t_HEX = r'-?0[Xx][0-9A-Fa-f]+' |
t_LSHIFT = r'<<' |
t_RSHIFT = r'>>' |
# A line ending '\n', we use this to increment the line number |
def t_LINE_END(self, t): |
r'\n+' |
self.AddLines(len(t.value)) |
# We do not process escapes in the IDL strings. Strings are exclusively |
# used for attributes, and not used as typical 'C' constants. |
def t_STRING(self, t): |
r'"[^"]*"' |
t.value = t.value[1:-1] |
self.AddLines(t.value.count('\n')) |
return t |
# A C or C++ style comment: /* xxx */ or // |
def t_COMMENT(self, t): |
r'(/\*(.|\n)*?\*/)|(//.*(\n[ \t]*//.*)*)' |
self.AddLines(t.value.count('\n')) |
return t |
# Return a "preprocessor" inline block |
def t_INLINE(self, t): |
r'\#inline (.|\n)*?\#endinl.*' |
self.AddLines(t.value.count('\n')) |
return t |
# A symbol or keyword. |
def t_KEYWORD_SYMBOL(self, t): |
r'_?[A-Za-z][A-Za-z_0-9]*' |
# All non-keywords are assumed to be symbols |
t.type = self.keywords.get(t.value, 'SYMBOL') |
# We strip leading underscores so that you can specify symbols with the same |
# value as a keywords (E.g. a dictionary named 'interface'). |
if t.value[0] == '_': |
t.value = t.value[1:] |