text
stringlengths 0
267k
|
---|
return t |
def t_ANY_error(self, t): |
msg = "Unrecognized input" |
line = self.lexobj.lineno |
# If that line has not been accounted for, then we must have hit |
# EoF, so compute the beginning of the line that caused the problem. |
if line >= len(self.index): |
# Find the offset in the line of the first word causing the issue |
word = t.value.split()[0] |
offs = self.lines[line - 1].find(word) |
# Add the computed line's starting position |
self.index.append(self.lexobj.lexpos - offs) |
msg = "Unexpected EoF reached after" |
pos = self.lexobj.lexpos - self.index[line] |
file = self.lexobj.filename |
out = self.ErrorMessage(file, line, pos, msg) |
sys.stderr.write(out + '\n') |
self.lex_errors += 1 |
def AddLines(self, count): |
# Set the lexer position for the beginning of the next line. In the case |
# of multiple lines, tokens can not exist on any of the lines except the |
# last one, so the recorded value for previous lines are unused. We still |
# fill the array however, to make sure the line count is correct. |
self.lexobj.lineno += count |
for i in range(count): |
self.index.append(self.lexobj.lexpos) |
def FileLineMsg(self, file, line, msg): |
if file: return "%s(%d) : %s" % (file, line + 1, msg) |
return "<BuiltIn> : %s" % msg |
def SourceLine(self, file, line, pos): |
caret = '\t^'.expandtabs(pos) |
# We decrement the line number since the array is 0 based while the |
# line numbers are 1 based. |
return "%s\n%s" % (self.lines[line - 1], caret) |
def ErrorMessage(self, file, line, pos, msg): |
return "\n%s\n%s" % ( |
self.FileLineMsg(file, line, msg), |
self.SourceLine(file, line, pos)) |
def SetData(self, filename, data): |
# Start with line 1, not zero |
self.lexobj.lineno = 1 |
self.lexobj.filename = filename |
self.lines = data.split('\n') |
self.index = [0] |
self.lexobj.input(data) |
self.lex_errors = 0 |
def __init__(self): |
self.lexobj = lex.lex(object=self, lextab=None, optimize=0) |
# |
# FilesToTokens |
# |
# From a set of source file names, generate a list of tokens. |
# |
def FilesToTokens(filenames, verbose=False): |
lexer = IDLLexer() |
outlist = [] |
for filename in filenames: |
data = open(filename).read() |
lexer.SetData(filename, data) |
if verbose: sys.stdout.write(' Loaded %s...\n' % filename) |
while 1: |
t = lexer.lexobj.token() |
if t is None: break |
outlist.append(t) |
return outlist |
def TokensFromText(text): |
lexer = IDLLexer() |
lexer.SetData('unknown', text) |
outlist = [] |
while 1: |
t = lexer.lexobj.token() |
if t is None: break |
outlist.append(t.value) |
return outlist |
# |
# TextToTokens |
# |
# From a block of text, generate a list of tokens |
# |
def TextToTokens(source): |
lexer = IDLLexer() |
outlist = [] |
lexer.SetData('AUTO', source) |
while 1: |