query
stringlengths 9
60
| language
stringclasses 1
value | code
stringlengths 105
25.7k
| url
stringlengths 91
217
|
---|---|---|---|
concatenate several file remove header lines
|
python
|
def removeDuplicates(inFileName, outFileName) :
"""removes duplicated lines from a 'inFileName' CSV file, the results are witten in 'outFileName'"""
f = open(inFileName)
legend = f.readline()
data = ''
h = {}
h[legend] = 0
lines = f.readlines()
for l in lines :
if not h.has_key(l) :
h[l] = 0
data += l
f.flush()
f.close()
f = open(outFileName, 'w')
f.write(legend+data)
f.flush()
f.close()
|
https://github.com/tariqdaouda/pyGeno/blob/474b1250bf78ce5c7e7c3bbbfdbad9635d5a7d14/pyGeno/tools/parsers/CSVTools.py#L14-L34
|
concatenate several file remove header lines
|
python
|
def header_without_lines(header, remove):
"""Return :py:class:`Header` without lines given in ``remove``
``remove`` is an iterable of pairs ``key``/``ID`` with the VCF header key
and ``ID`` of entry to remove. In the case that a line does not have
a ``mapping`` entry, you can give the full value to remove.
.. code-block:: python
# header is a vcfpy.Header, e.g., as read earlier from file
new_header = vcfpy.without_header_lines(
header, [('assembly', None), ('FILTER', 'PASS')])
# now, the header lines starting with "##assembly=" and the "PASS"
# filter line will be missing from new_header
"""
remove = set(remove)
# Copy over lines that are not removed
lines = []
for line in header.lines:
if hasattr(line, "mapping"):
if (line.key, line.mapping.get("ID", None)) in remove:
continue # filter out
else:
if (line.key, line.value) in remove:
continue # filter out
lines.append(line)
return Header(lines, header.samples)
|
https://github.com/bihealth/vcfpy/blob/99e2165df30f11e0c95f3170f31bc5191d9e9e15/vcfpy/header.py#L227-L253
|
concatenate several file remove header lines
|
python
|
def remove(self):
"""Remove duplicate lines from text files"""
num, sp, newfile = 0, "", []
if os.path.isfile(self.filename):
with open(self.filename, "r") as r:
oldfile = r.read().splitlines()
for line in oldfile:
if self.number:
num += 1
sp = ": "
if self.case_ins:
line = line.lower()
if self.ignore_blank and not line:
newfile.append(line)
elif line not in newfile:
newfile.append(line)
else:
if (self.args[0] in self.options[4:5] or
self.args[0] in self.options[6:7]):
if num == 0:
num = str()
print("{0}{1}{2}".format(num, sp, line))
if self.args[0] not in self.options[6:7]:
with open(self.filename, "w") as w:
for line in newfile:
w.write(line + "\n")
else:
self.not_access()
|
https://github.com/dslackw/dpline/blob/aef94abbbeb9d9286af4dfd7b8c10ac3b4cf3d7f/dpline/main.py#L53-L80
|
concatenate several file remove header lines
|
python
|
def remove_line_from_file(self,
line,
filename,
shutit_pexpect_child=None,
match_regexp=None,
literal=False,
note=None,
loglevel=logging.DEBUG):
"""Removes line from file, if it exists.
Must be exactly the line passed in to match.
Returns True if there were no problems, False if there were.
@param line: Line to remove.
@param filename Filename to remove it from.
@param shutit_pexpect_child: See send()
@param match_regexp: If supplied, a regexp to look for in the file
instead of the line itself,
handy if the line has awkward characters in it.
@param literal: If true, then simply grep for the exact string without
bash interpretation. (Default: False)
@param note: See send()
@type line: string
@type filename: string
@type match_regexp: string
@type literal: boolean
@return: True if the line was matched and deleted, False otherwise.
@rtype: boolean
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.remove_line_from_file(line,filename,match_regexp=match_regexp,literal=literal,note=note,loglevel=loglevel)
|
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_class.py#L1300-L1333
|
concatenate several file remove header lines
|
python
|
def remove_line_with(cls, lines, what):
"""
returns all lines that do not contain what
:param lines:
:param what:
:return:
"""
result = []
for line in lines:
if what not in line:
result = result + [line]
return result
|
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Shell.py#L584-L595
|
concatenate several file remove header lines
|
python
|
def remove_line_from_file(self,
line,
filename,
match_regexp=None,
literal=False,
note=None,
loglevel=logging.DEBUG):
"""Removes line from file, if it exists.
Must be exactly the line passed in to match.
Returns True if there were no problems, False if there were.
@param line: Line to remove.
@param filename Filename to remove it from.
@param match_regexp: If supplied, a regexp to look for in the file
instead of the line itself,
handy if the line has awkward characters in it.
@param literal: If true, then simply grep for the exact string without
bash interpretation. (Default: False)
@param note: See send()
@type line: string
@type filename: string
@type match_regexp: string
@type literal: boolean
@return: True if the line was matched and deleted, False otherwise.
@rtype: boolean
"""
shutit = self.shutit
shutit.handle_note(note)
# assume we're going to add it
tmp_filename = '/tmp/' + shutit_util.random_id()
if self.file_exists(filename):
if literal:
if match_regexp is None:
# v the space is intentional, to avoid polluting bash history.
self.send(ShutItSendSpec(self,
send=""" grep -v '^""" + line + """$' """ + filename + ' > ' + tmp_filename,
exit_values=['0','1'],
echo=False,
loglevel=loglevel,
ignore_background=True))
else:
if not shutit_util.check_regexp(match_regexp):
shutit.fail('Illegal regexp found in remove_line_from_file call: ' + match_regexp) # pragma: no cover
# v the space is intentional, to avoid polluting bash history.
self.send(ShutItSendSpec(self,
send=""" grep -v '^""" + match_regexp + """$' """ + filename + ' > ' + tmp_filename,
exit_values=['0','1'],
echo=False,
loglevel=loglevel,
ignore_background=True))
else:
if match_regexp is None:
# v the space is intentional, to avoid polluting bash history.
self.send(ShutItSendSpec(self,
send=' command grep -v "^' + line + '$" ' + filename + ' > ' + tmp_filename,
exit_values=['0','1'],
echo=False,
loglevel=loglevel,
ignore_background=True))
else:
if not shutit_util.check_regexp(match_regexp):
shutit.fail('Illegal regexp found in remove_line_from_file call: ' + match_regexp) # pragma: no cover
# v the space is intentional, to avoid polluting bash history.
self.send(ShutItSendSpec(self,
send=' command grep -v "^' + match_regexp + '$" ' + filename + ' > ' + tmp_filename,
exit_values=['0','1'],
echo=False,
loglevel=loglevel,
ignore_background=True))
self.send(ShutItSendSpec(self,
send=' command cat ' + tmp_filename + ' > ' + filename,
check_exit=False,
echo=False,
loglevel=loglevel,
ignore_background=True))
self.send(ShutItSendSpec(self,
send=' command rm -f ' + tmp_filename,
exit_values=['0','1'],
echo=False,
loglevel=loglevel,
ignore_background=True))
shutit.handle_note_after(note=note)
return True
|
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_pexpect.py#L2568-L2652
|
concatenate several file remove header lines
|
python
|
def remove_line(self, section, line):
"""Remove all instances of a line.
Returns:
int: the number of lines removed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
# No such section, skip.
return 0
return s.remove(line)
|
https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configfile.py#L448-L460
|
concatenate several file remove header lines
|
python
|
def removed(self):
"""
Return the total number of deleted lines in the file.
:return: int lines_deleted
"""
removed = 0
for line in self.diff.replace('\r', '').split("\n"):
if line.startswith('-') and not line.startswith('---'):
removed += 1
return removed
|
https://github.com/ishepard/pydriller/blob/71facb32afa085d5ddf0081beba34d00d57b8080/pydriller/domain/commit.py#L118-L128
|
concatenate several file remove header lines
|
python
|
def remove_lines(lines, remove=('[[back to top]', '<a class="mk-toclify"')):
"""Removes existing [back to top] links and <a id> tags."""
if not remove:
return lines[:]
out = []
for l in lines:
if l.startswith(remove):
continue
out.append(l)
return out
|
https://github.com/rasbt/markdown-toclify/blob/517cde672bebda5371130b87c1fcb7184d141a02/markdown_toclify/markdown_toclify.py#L41-L52
|
concatenate several file remove header lines
|
python
|
def main(args):
"""Remove lines after marker."""
filename = args[0]
marker = args[1]
for line in fileinput.input(filename, inplace=1):
print(line.rstrip())
if line.startswith(marker):
break
|
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/scripts/removeafter.py#L9-L16
|
concatenate several file remove header lines
|
python
|
def lines(self: object, fileids: str, plaintext: bool = True):
"""
Tokenizes documents in the corpus by line
"""
for text in self.texts(fileids, plaintext):
text = re.sub(r'\n\s*\n', '\n', text, re.MULTILINE) # Remove blank lines
for line in text.split('\n'):
yield line
|
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/corpus/readers.py#L444-L452
|
concatenate several file remove header lines
|
python
|
def remove(self, line):
"""Delete all lines matching the given line."""
nb = 0
for block in self.blocks:
nb += block.remove(line)
return nb
|
https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configfile.py#L227-L233
|
concatenate several file remove header lines
|
python
|
def strip_headers(text):
"""Remove lines that are part of the Project Gutenberg header or footer.
Note: this function is a port of the C++ utility by Johannes Krugel. The
original version of the code can be found at:
http://www14.in.tum.de/spp1307/src/strip_headers.cpp
Args:
text (unicode): The body of the text to clean up.
Returns:
unicode: The text with any non-text content removed.
"""
lines = text.splitlines()
sep = str(os.linesep)
out = []
i = 0
footer_found = False
ignore_section = False
for line in lines:
reset = False
if i <= 600:
# Check if the header ends here
if any(line.startswith(token) for token in TEXT_START_MARKERS):
reset = True
# If it's the end of the header, delete the output produced so far.
# May be done several times, if multiple lines occur indicating the
# end of the header
if reset:
out = []
continue
if i >= 100:
# Check if the footer begins here
if any(line.startswith(token) for token in TEXT_END_MARKERS):
footer_found = True
# If it's the beginning of the footer, stop output
if footer_found:
break
if any(line.startswith(token) for token in LEGALESE_START_MARKERS):
ignore_section = True
continue
elif any(line.startswith(token) for token in LEGALESE_END_MARKERS):
ignore_section = False
continue
if not ignore_section:
out.append(line.rstrip(sep))
i += 1
return sep.join(out)
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/cleanup/strip_headers.py#L14-L70
|
concatenate several file remove header lines
|
python
|
def uncomment_lines(lines):
"""Uncomment the given list of lines and return them. The first hash mark
following any amount of whitespace will be removed on each line."""
ret = []
for line in lines:
ws_prefix, rest, ignore = RE_LINE_SPLITTER_UNCOMMENT.match(line).groups()
ret.append(ws_prefix + rest)
return ''.join(ret)
|
https://github.com/neovim/pynvim/blob/5e577188e6d7133f597ad0ce60dc6a4b1314064a/scripts/logging_statement_modifier.py#L136-L143
|
concatenate several file remove header lines
|
python
|
def fix_header(filepath):
"""Removes leading whitespace from a MacOS header file.
This whitespace is causing issues with directives on some platforms.
"""
with open(filepath, "r+") as f:
current = f.read()
fixed = "\n".join(line.strip() for line in current.split("\n"))
if current == fixed:
return
f.seek(0)
f.truncate()
f.write(fixed)
|
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/build_libtcod.py#L145-L157
|
concatenate several file remove header lines
|
python
|
def uncommented_lines(self, filename, use_sudo=False):
"""
Get the lines of a remote file, ignoring empty or commented ones
"""
func = run_as_root if use_sudo else self.run
res = func('cat %s' % quote(filename), quiet=True)
if res.succeeded:
return [line for line in res.splitlines() if line and not line.startswith('#')]
return []
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/files.py#L286-L294
|
concatenate several file remove header lines
|
python
|
def _remove_boring_lines(text):
"""Remove lines that do not start with a letter or a quote.
From inspecting the data, this seems to leave in most prose and remove
most weird stuff.
Args:
text: a string
Returns:
a string
"""
lines = text.split("\n")
filtered = [line for line in lines if re.match("[a-zA-z\"\']", line)]
return "\n".join(filtered)
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wiki_revision_utils.py#L365-L378
|
concatenate several file remove header lines
|
python
|
def remove_header(self, name):
"""Remove a field from the header"""
if name in self.info_dict:
self.info_dict.pop(name)
logger.info("Removed '{0}' from INFO".format(name))
if name in self.filter_dict:
self.filter_dict.pop(name)
logger.info("Removed '{0}' from FILTER".format(name))
if name in self.format_dict:
self.format_dict.pop(name)
logger.info("Removed '{0}' from FORMAT".format(name))
if name in self.contig_dict:
self.contig_dict.pop(name)
logger.info("Removed '{0}' from CONTIG".format(name))
if name in self.alt_dict:
self.alt_dict.pop(name)
logger.info("Removed '{0}' from ALT".format(name))
if name in self.other_dict:
self.other_dict.pop(name)
logger.info("Removed '{0}' from OTHER".format(name))
return
|
https://github.com/moonso/vcftoolbox/blob/438fb1d85a83812c389774b94802eb5921c89e3a/vcftoolbox/header_parser.py#L249-L269
|
concatenate several file remove header lines
|
python
|
def remove_line_interval(input_file: str, delete_line_from: int,
delete_line_to: int, output_file: str):
r"""Remove a line interval.
:parameter input_file: the file that needs to be read.
:parameter delete_line_from: the line number from which start deleting.
:parameter delete_line_to: the line number to which stop deleting.
:parameter output_file: the file that needs to be written without the
selected lines.
:type input_file: str
:type delete_line_from: int
:type delete_line_to: int
:type output_file: str
:returns: None
:raises: LineOutOfFileBoundsError or a built-in exception.
.. note::
Line numbers start from ``1``.
.. note::
It is possible to remove a single line only. This happens when
the parameters delete_line_from and delete_line_to are equal.
"""
assert delete_line_from >= 1
assert delete_line_to >= 1
with open(input_file, 'r') as f:
lines = f.readlines()
# Invalid line ranges.
# Base case delete_line_to - delete_line_from == 0: single line.
if delete_line_to - delete_line_from < 0:
raise NegativeLineRangeError
if delete_line_from > len(lines) or delete_line_to > len(lines):
raise LineOutOfFileBoundsError
line_counter = 1
# Rewrite the file without the string.
with atomic_write(output_file, overwrite=True) as f:
for line in lines:
# Ignore the line interval where the content to be deleted lies.
if line_counter >= delete_line_from and line_counter <= delete_line_to:
pass
# Write the rest of the file.
else:
f.write(line)
line_counter += 1
|
https://github.com/frnmst/fpyutils/blob/74a9e15af4020248dda5ec6d25e05571c7717f20/fpyutils/filelines.py#L167-L213
|
concatenate several file remove header lines
|
python
|
def rm(self, line):
"""
Remove all occurrences of 'line' from contents
where 'line' is an entire line or a list of lines.
Return true if the file was changed by rm(), False otherwise.
Multi-line strings are converted to a list delimited by new lines.
:param line: String, or List of Strings; each string represents an entire line to be removed from file.
:return: Boolean, whether contents were changed.
"""
self.log('rm({0})'.format(line))
if line is False:
return False
if isinstance(line, str):
line = line.split('\n')
if not isinstance(line, list):
raise TypeError("Parameter 'line' not a 'string' or 'list', is {0}".format(type(line)))
local_changes = False
for this in line:
if this in self.contents:
while this in self.contents:
self.log('Removed "{0}" from position {1}'.format(this, self.contents.index(this)))
self.contents.remove(this)
self.changed = local_changes = True
else:
self.log('"{0}" not in {1}'.format(this, self.filename))
if self.sorted and local_changes:
self.sort()
return local_changes
|
https://github.com/jhazelwo/python-fileasobj/blob/4bdbb575e75da830b88d10d0c1020d787ceba44d/fileasobj/__init__.py#L158-L188
|
concatenate several file remove header lines
|
python
|
def remove_indent_lines(s):
"""
:param str s:
:return: remove as much indentation as possible
:rtype: str
"""
if not s:
return ""
lines = s.splitlines(True)
prefix = get_same_indent_prefix(lines)
if prefix is None: # not in expected format. just lstrip all lines
return "".join([l.lstrip() for l in lines])
return "".join([l[len(prefix):] for l in lines])
|
https://github.com/albertz/py_better_exchook/blob/3d524a027d7fc4e83e47e39a1978849561da69b3/better_exchook.py#L478-L490
|
concatenate several file remove header lines
|
python
|
def uncomment_line(line, prefix):
"""Remove prefix (and space) from line"""
if not prefix:
return line
if line.startswith(prefix + ' '):
return line[len(prefix) + 1:]
if line.startswith(prefix):
return line[len(prefix):]
return line
|
https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/header.py#L40-L48
|
concatenate several file remove header lines
|
python
|
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '/**/'
|
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/third_party/python/cpplint/cpplint.py#L1554-L1559
|
concatenate several file remove header lines
|
python
|
def _remove_trailing_new_line(l):
"""Remove a single instance of new line at the end of l if it exists.
Returns:
bytestring
"""
# replace only 1 instance of newline
# match longest line first (hence the reverse=True), we want to match "\r\n" rather than "\n" if we can
for n in sorted(new_lines_bytes, key=lambda x: len(x), reverse=True):
if l.endswith(n):
remove_new_line = slice(None, -len(n))
return l[remove_new_line]
return l
|
https://github.com/RobinNil/file_read_backwards/blob/e56443095b58aae309fbc43a0943eba867dc8500/file_read_backwards/buffer_work_space.py#L146-L158
|
concatenate several file remove header lines
|
python
|
def truncate(args):
"""
%prog truncate linecount filename
Remove linecount lines from the end of the file in-place. Borrowed from:
<http://superuser.com/questions/127786/how-to-remove-the-last-2-lines-of-a-very-large-file>
"""
p = OptionParser(truncate.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
number, filename = args
number = int(number)
count = 0
f = open(filename, "r+b")
f.seek(0, os.SEEK_END)
while f.tell() > 0:
f.seek(-1, os.SEEK_CUR)
char = f.read(1)
if char == '\n':
count += 1
if count == number + 1:
f.truncate()
print("Removed {0} lines from end of file".format(number), file=sys.stderr)
return number
f.seek(-1, os.SEEK_CUR)
if count < number + 1:
print("No change: requested removal would leave empty file", file=sys.stderr)
return -1
|
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/base.py#L608-L641
|
concatenate several file remove header lines
|
python
|
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
|
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/third_party/python/cpplint/cpplint.py#L1562-L1575
|
concatenate several file remove header lines
|
python
|
def rmlinenumber(linenumber, infile, dryrun=False):
"""
Sed-like line deletion function based on given line number..
Usage: pysed.rmlinenumber(<Unwanted Line Number>, <Text File>)
Example: pysed.rmlinenumber(10, '/path/to/file.txt')
Example 'DRYRUN': pysed.rmlinenumber(10, '/path/to/file.txt', dryrun=True)
#This will dump the output to STDOUT instead of changing the input file.
"""
linelist = []
linecounter = 0
if isinstance(linenumber, int):
exit("""'linenumber' argument must be an integer.""")
with open(infile) as reader:
for item in reader:
linecounter = linecounter + 1
if linecounter != linenumber:
linelist.append(item)
if dryrun is False:
with open(infile, "w") as writer:
writer.truncate()
for line in linelist:
writer.writelines(line)
elif dryrun is True:
for line in linelist:
print(line, end='')
else:
exit("""Unknown option specified to 'dryrun' argument,
Usage: dryrun=<True|False>.""")
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/utils/pysed.py#L73-L102
|
concatenate several file remove header lines
|
python
|
def remove_blank_lines(self):
"""Remove all blank lines (blank lines are those with zero characters)."""
to_remove = []
for i, line in enumerate(self):
ln = line.strip()
if ln == '':
to_remove.append(i)
self.delete_lines(to_remove)
|
https://github.com/exa-analytics/exa/blob/40fb3c22b531d460dbc51e603de75b856cc28f0d/exa/core/editor.py#L151-L158
|
concatenate several file remove header lines
|
python
|
def split_header(fp):
"""
Read file pointer and return pair of lines lists:
first - header, second - the rest.
"""
body_start, header_ended = 0, False
lines = []
for line in fp:
if line.startswith('#') and not header_ended:
# Header text
body_start += 1
else:
header_ended = True
lines.append(line)
return lines[:body_start], lines[body_start:]
|
https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/environment.py#L181-L195
|
concatenate several file remove header lines
|
python
|
def remove_bad():
"""Remove non-increasing BED lines which will cause variant callers to choke.
Also fixes space separated BED inputs.
"""
for line in sys.stdin:
parts = line.strip().split("\t")
if len(parts) == 1 and len(line.strip().split()) > 1:
parts = line.strip().split()
if line.strip() and len(parts) > 2 and int(parts[2]) > int(parts[1]):
sys.stdout.write("\t".join(parts) + "\n")
|
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/bedutils.py#L134-L144
|
concatenate several file remove header lines
|
python
|
def remove_page_boundary_lines(docbody):
"""Try to locate page breaks, headers and footers within a document body,
and remove the array cells at which they are found.
@param docbody: (list) of strings, each string being a line in the
document's body.
@return: (list) of strings. The document body, hopefully with page-
breaks, headers and footers removed. Each string in the list once more
represents a line in the document.
"""
number_head_lines = number_foot_lines = 0
# Make sure document not just full of whitespace:
if not document_contains_text(docbody):
# document contains only whitespace - cannot safely
# strip headers/footers
return docbody
# Get list of index posns of pagebreaks in document:
page_break_posns = get_page_break_positions(docbody)
# Get num lines making up each header if poss:
number_head_lines = get_number_header_lines(docbody, page_break_posns)
# Get num lines making up each footer if poss:
number_foot_lines = get_number_footer_lines(docbody, page_break_posns)
# Remove pagebreaks,headers,footers:
docbody = strip_headers_footers_pagebreaks(docbody,
page_break_posns,
number_head_lines,
number_foot_lines)
return docbody
|
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/documents/text.py#L186-L217
|
concatenate several file remove header lines
|
python
|
def delete_line(self):
"""Delete current line"""
cursor = self.textCursor()
if self.has_selected_text():
self.extend_selection_to_complete_lines()
start_pos, end_pos = cursor.selectionStart(), cursor.selectionEnd()
cursor.setPosition(start_pos)
else:
start_pos = end_pos = cursor.position()
cursor.beginEditBlock()
cursor.setPosition(start_pos)
cursor.movePosition(QTextCursor.StartOfBlock)
while cursor.position() <= end_pos:
cursor.movePosition(QTextCursor.EndOfBlock, QTextCursor.KeepAnchor)
if cursor.atEnd():
break
cursor.movePosition(QTextCursor.NextBlock, QTextCursor.KeepAnchor)
cursor.removeSelectedText()
cursor.endEditBlock()
self.ensureCursorVisible()
|
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/widgets/base.py#L1017-L1036
|
concatenate several file remove header lines
|
python
|
def __remove_trailing_empty_lines(lines):
"""
Removes leading empty lines from a list of lines.
:param list[str] lines: The lines.
"""
lines.reverse()
tmp = DocBlockReflection.__remove_leading_empty_lines(lines)
lines.reverse()
tmp.reverse()
return tmp
|
https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/DocBlockReflection.py#L111-L122
|
concatenate several file remove header lines
|
python
|
def filter_lines(input_file, output_file, translate=lambda line: line):
""" Translate all the lines of a single file """
filepath, lines = get_lines([input_file])[0]
return filepath, [(tag, translate(line=line, tag=tag)) for (tag, line) in lines]
|
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book_parser.py#L407-L410
|
concatenate several file remove header lines
|
python
|
def _remove_header(self, data, options):
'''Remove header from data'''
version_info = self._get_version_info(options['version'])
header_size = version_info['header_size']
if options['flags']['timestamp']:
header_size += version_info['timestamp_size']
data = data[header_size:]
return data
|
https://github.com/vingd/encrypted-pickle-python/blob/7656233598e02e65971f69e11849a0f288b2b2a5/encryptedpickle/encryptedpickle.py#L623-L634
|
concatenate several file remove header lines
|
python
|
def _cut_line(line):
"""Split the line on whitespaces and remove empty chunks
:param line: the line to split
:type line: str
:return: list of strings
:rtype: list
"""
# punct = '"#$%&\'()*+/<=>?@[\\]^`{|}~'
if re.search("([\t\n\r]+|[\x0b\x0c ]{3,})+", line):
tmp = re.split("([\t\n\r]+|[\x0b\x0c ]{3,})+", line, 1)
else:
tmp = re.split("[" + string.whitespace + "]+", line, 1)
res = [elt.strip() for elt in tmp if elt.strip() != '']
return res
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/config.py#L1053-L1067
|
concatenate several file remove header lines
|
python
|
def convert_line_endings(filename: str, to_unix: bool = False,
to_windows: bool = False) -> None:
"""
Converts a file (in place) from UNIX to Windows line endings, or the
reverse.
Args:
filename: filename to modify (in place)
to_unix: convert Windows (CR LF) to UNIX (LF)
to_windows: convert UNIX (LF) to Windows (CR LF)
"""
assert to_unix != to_windows
with open(filename, "rb") as f:
contents = f.read()
windows_eol = b"\r\n" # CR LF
unix_eol = b"\n" # LF
if to_unix:
log.info("Converting from Windows to UNIX line endings: {!r}",
filename)
src = windows_eol
dst = unix_eol
else: # to_windows
log.info("Converting from UNIX to Windows line endings: {!r}",
filename)
src = unix_eol
dst = windows_eol
if windows_eol in contents:
log.info("... already contains at least one Windows line ending; "
"probably converted before; skipping")
return
contents = contents.replace(src, dst)
with open(filename, "wb") as f:
f.write(contents)
|
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/file_io.py#L427-L459
|
concatenate several file remove header lines
|
python
|
def remove_header(self, name):
"""
Remove header (case-insensitive)
return True if header removed, False otherwise
"""
name_lower = name.lower()
for index in range(len(self.headers) - 1, -1, -1):
if self.headers[index][0].lower() == name_lower:
del self.headers[index]
return True
return False
|
https://github.com/webrecorder/warcio/blob/c64c4394805e13256695f51af072c95389397ee9/warcio/statusandheaders.py#L64-L75
|
concatenate several file remove header lines
|
python
|
def replace_lines(html_file, transformed):
"""Replace lines in the old file with the transformed lines."""
result = []
with codecs.open(html_file, 'r', 'utf-8') as input_file:
for line in input_file:
# replace all single quotes with double quotes
line = re.sub(r'\'', '"', line)
for attr, value, new_link in transformed:
if attr in line and value in line:
# replace old link with new staticfied link
new_line = line.replace(value, new_link)
result.append(new_line)
break
else:
result.append(line)
return ''.join(result)
|
https://github.com/danidee10/Staticfy/blob/ebc555b00377394b0f714e4a173d37833fec90cb/staticfy/staticfy.py#L92-L111
|
concatenate several file remove header lines
|
python
|
def remove_blank_lines(string):
""" Removes all blank lines in @string
-> #str without blank lines
"""
return "\n".join(line
for line in string.split("\n")
if len(line.strip()))
|
https://github.com/jaredLunde/vital-tools/blob/ea924c9bbb6ec22aa66f8095f018b1ee0099ac04/vital/tools/strings.py#L214-L221
|
concatenate several file remove header lines
|
python
|
def header_remove(self, value):
"""Automatically remove specified HTTP header from the response.
:param str|unicode value:
"""
self._set('del-header', value, multi=True)
return self._section
|
https://github.com/idlesign/uwsgiconf/blob/475407acb44199edbf7e0a66261bfeb51de1afae/uwsgiconf/options/routing.py#L386-L394
|
concatenate several file remove header lines
|
python
|
def lines(input):
"""Remove comments and empty lines"""
for raw_line in input:
line = raw_line.strip()
if line and not line.startswith('#'):
yield strip_comments(line)
|
https://github.com/abalkin/tz/blob/f25fca6afbf1abd46fd7aeb978282823c7dab5ab/tzdata-pkg/zic/zic.py#L20-L25
|
concatenate several file remove header lines
|
python
|
def text_remove_empty_lines(text):
"""
Whitespace normalization:
- Strip empty lines
- Strip trailing whitespace
"""
lines = [ line.rstrip() for line in text.splitlines() if line.strip() ]
return "\n".join(lines)
|
https://github.com/e7dal/bubble3/blob/59c735281a95b44f6263a25f4d6ce24fca520082/behave4cmd0/textutil.py#L164-L172
|
concatenate several file remove header lines
|
python
|
def lines(self):
"""
Array of all the lines.
"""
# Cache, because this one is reused very often.
if self._cache.lines is None:
self._cache.lines = _ImmutableLineList(self.text.split('\n'))
return self._cache.lines
|
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/document.py#L167-L175
|
concatenate several file remove header lines
|
python
|
def _extract_header_number(lines):
"""
Extracts the number of header lines from the second line of the ODF file
"""
pair = _extract_header_value(lines[1])
value_list = list(pair.values())
return int(value_list[0])
|
https://github.com/genepattern/genepattern-python/blob/9478ea65362b91c72a94f7300c3de8d710bebb71/gp/data.py#L315-L321
|
concatenate several file remove header lines
|
python
|
def uncomment(lines, prefix='#'):
"""Remove prefix and space, or only prefix, when possible"""
if not prefix:
return lines
prefix_and_space = prefix + ' '
length_prefix = len(prefix)
length_prefix_and_space = len(prefix_and_space)
return [line[length_prefix_and_space:] if line.startswith(prefix_and_space)
else (line[length_prefix:] if line.startswith(prefix) else line)
for line in lines]
|
https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/cell_reader.py#L24-L33
|
concatenate several file remove header lines
|
python
|
def trim(lines):
"""
Remove lines at the start and at the end of the given `lines` that are :class:`~taxi.timesheet.lines.TextLine`
instances and don't have any text.
"""
trim_top = None
trim_bottom = None
_lines = lines[:]
for (lineno, line) in enumerate(_lines):
if hasattr(line, 'is_text_line') and line.is_text_line and not line.text.strip():
trim_top = lineno
else:
break
for (lineno, line) in enumerate(reversed(_lines)):
if hasattr(line, 'is_text_line') and line.is_text_line and not line.text.strip():
trim_bottom = lineno
else:
break
if trim_top is not None:
_lines = _lines[trim_top + 1:]
if trim_bottom is not None:
trim_bottom = len(_lines) - trim_bottom - 1
_lines = _lines[:trim_bottom]
return _lines
|
https://github.com/liip/taxi/blob/269423c1f1ab571bd01a522819afe3e325bfbff6/taxi/timesheet/utils.py#L21-L49
|
concatenate several file remove header lines
|
python
|
def lines(self):
"""List of file lines."""
if self._lines is None:
with io.open(self.path, 'r', encoding='utf-8') as fh:
self._lines = fh.read().split('\n')
return self._lines
|
https://github.com/dmgass/baseline/blob/1f7988e8c9fafa83eb3a1ce73b1601d2afdbb2cd/baseline/_script.py#L89-L95
|
concatenate several file remove header lines
|
python
|
def set_header(self, msg):
""" Set second head line text """
self.s.move(1, 0)
self.overwrite_line(msg, attr=curses.A_NORMAL)
|
https://github.com/gapato/livestreamer-curses/blob/d841a421422db8c5b5a8bcfcff6b3ddd7ea8a64b/src/livestreamer_curses/streamlist.py#L393-L396
|
concatenate several file remove header lines
|
python
|
def remove_headers(headers, name):
"""Remove all headers with name *name*.
The list is modified in-place and the updated list is returned.
"""
i = 0
name = name.lower()
for j in range(len(headers)):
if headers[j][0].lower() != name:
if i != j:
headers[i] = headers[j]
i += 1
del headers[i:]
return headers
|
https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/http.py#L363-L376
|
concatenate several file remove header lines
|
python
|
def remove_fpaths(fpaths, verbose=VERBOSE, quiet=QUIET, strict=False,
print_caller=PRINT_CALLER, lbl='files'):
"""
Removes multiple file paths
"""
import utool as ut
if print_caller:
print(util_dbg.get_caller_name(range(1, 4)) + ' called remove_fpaths')
n_total = len(fpaths)
_verbose = (not quiet and n_total > 0) or VERYVERBOSE
if _verbose:
print('[util_path.remove_fpaths] try removing %d %s' % (n_total, lbl))
n_removed = 0
prog = ut.ProgIter(fpaths, label='removing files', enabled=verbose)
_iter = iter(prog)
# Try to be fast at first
try:
for fpath in _iter:
os.remove(fpath)
n_removed += 1
except OSError as ex:
# Buf if we fail put a try in the inner loop
if VERYVERBOSE:
print('WARNING: Could not remove fpath = %r' % (fpath,))
if strict:
util_dbg.printex(ex, 'Could not remove fpath = %r' % (fpath,),
iswarning=False)
raise
for fpath in _iter:
try:
os.remove(fpath)
n_removed += 1
except OSError as ex:
if VERYVERBOSE:
print('WARNING: Could not remove fpath = %r' % (fpath,))
if _verbose:
print('[util_path.remove_fpaths] ... removed %d / %d %s' % (
n_removed, n_total, lbl))
return n_removed
|
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L464-L502
|
concatenate several file remove header lines
|
python
|
def delete_lines(self):
"""
Deletes the document lines under cursor.
:return: Method success.
:rtype: bool
"""
cursor = self.textCursor()
self.__select_text_under_cursor_blocks(cursor)
cursor.removeSelectedText()
cursor.deleteChar()
return True
|
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/widgets/basic_QPlainTextEdit.py#L597-L609
|
concatenate several file remove header lines
|
python
|
def _remove_empty_lines(self, lines):
"""
Iterate through the lines and remove any that are
either empty or contain only one whitespace value
Parameters
----------
lines : array-like
The array of lines that we are to filter.
Returns
-------
filtered_lines : array-like
The same array of lines with the "empty" ones removed.
"""
ret = []
for l in lines:
# Remove empty lines and lines with only one whitespace value
if (len(l) > 1 or len(l) == 1 and
(not isinstance(l[0], str) or l[0].strip())):
ret.append(l)
return ret
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L2912-L2934
|
concatenate several file remove header lines
|
python
|
def _split_vlines_hlines(lines):
"""Separates lines into horizontal and vertical ones"""
vlines, hlines = [], []
for line in lines:
(vlines if line.x1 - line.x0 < 0.1 else hlines).append(line)
return vlines, hlines
|
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/grid.py#L190-L195
|
concatenate several file remove header lines
|
python
|
def copy_all_lines_from_to(inputFile, outputFile):
"""Copy all lines from an input file object to an output file object."""
currentLine = inputFile.readline()
while currentLine:
outputFile.write(currentLine)
currentLine = inputFile.readline()
|
https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/util.py#L371-L376
|
concatenate several file remove header lines
|
python
|
def fix_e303(self, result):
"""Remove extra blank lines."""
delete_linenum = int(result['info'].split('(')[1].split(')')[0]) - 2
delete_linenum = max(1, delete_linenum)
# We need to count because pycodestyle reports an offset line number if
# there are comments.
cnt = 0
line = result['line'] - 2
modified_lines = []
while cnt < delete_linenum and line >= 0:
if not self.source[line].strip():
self.source[line] = ''
modified_lines.append(1 + line) # Line indexed at 1
cnt += 1
line -= 1
return modified_lines
|
https://github.com/hhatto/autopep8/blob/fda3bb39181437b6b8a0aa0185f21ae5f14385dd/autopep8.py#L804-L821
|
concatenate several file remove header lines
|
python
|
def clean(self):
""" Remove intermediate files created.
"""
#TODO: add cleaning of mask files, *if* created ...
for f in self.catalog_names:
if 'match' in f:
if os.path.exists(self.catalog_names[f]):
log.info('Deleting intermediate match file: %s'%
self.catalog_names[f])
os.remove(self.catalog_names[f])
else:
for extn in f:
if os.path.exists(extn):
log.info('Deleting intermediate catalog: %d'%extn)
os.remove(extn)
|
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/imgclasses.py#L1066-L1080
|
concatenate several file remove header lines
|
python
|
def FlagCxx14Features(filename, clean_lines, linenum, error):
"""Flag those C++14 features that we restrict.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
# Flag unapproved C++14 headers.
if include and include.group(1) in ('scoped_allocator', 'shared_mutex'):
error(filename, linenum, 'build/c++14', 5,
('<%s> is an unapproved C++14 header.') % include.group(1))
|
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/third_party/python/cpplint/cpplint.py#L6022-L6038
|
concatenate several file remove header lines
|
python
|
def remove_headers(self, header_name):
""" Remove header by its name
:param header_name: name of header to remove
:return: None
"""
if self.__ro_flag:
raise RuntimeError('ro')
header_name = self.normalize_name(header_name)
if header_name in self.__headers.keys():
self.__headers.pop(header_name)
|
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/network/web/headers.py#L83-L93
|
concatenate several file remove header lines
|
python
|
def remove_initial_spaces_and_mark_message_lines(lines):
"""
Removes the initial spaces in each line before marking message lines.
This ensures headers can be identified if they are indented with spaces.
"""
i = 0
while i < len(lines):
lines[i] = lines[i].lstrip(' ')
i += 1
return mark_message_lines(lines)
|
https://github.com/mailgun/talon/blob/cdd84563dd329c4f887591807870d10015e0c7a7/talon/quotations.py#L219-L229
|
concatenate several file remove header lines
|
python
|
def getlines(filename, module_globals=None):
"""Get the lines for a file from the cache.
Update the cache if it doesn't contain an entry for this file already."""
if filename in cache:
return cache[filename][2]
try:
return updatecache(filename, module_globals)
except MemoryError:
clearcache()
return []
|
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/linecache.py#L33-L44
|
concatenate several file remove header lines
|
python
|
def remove_blank_lines(source):
"""
Removes blank lines from *source* and returns the result.
Example:
.. code-block:: python
test = "foo"
test2 = "bar"
Will become:
.. code-block:: python
test = "foo"
test2 = "bar"
"""
io_obj = io.StringIO(source)
source = [a for a in io_obj.readlines() if a.strip()]
return "".join(source)
|
https://github.com/liftoff/pyminifier/blob/087ea7b0c8c964f1f907c3f350f5ce281798db86/pyminifier/minification.py#L378-L399
|
concatenate several file remove header lines
|
python
|
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
|
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/third_party/python/cpplint/cpplint.py#L1578-L1591
|
concatenate several file remove header lines
|
python
|
def cleanLines(source, lineSep=os.linesep):
"""
:param source: some iterable source (list, file, etc)
:param lineSep: string of separators (chars) that must be removed
:return: list of non empty lines with removed separators
"""
stripped = (line.strip(lineSep) for line in source)
return (line for line in stripped if len(line) != 0)
|
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/storage/store_utils.py#L4-L11
|
concatenate several file remove header lines
|
python
|
def _remove_coding_header(cls, blob):
"""
There is a bug in ast.parse that cause it to throw a syntax error if
you have a header similar to...
# coding=utf-8,
we replace this line with something else to bypass the bug.
:param blob: file text contents
:return: adjusted blob
"""
# Remove the # coding=utf-8 to avoid AST erroneous parse errors
# https://bugs.python.org/issue22221
lines = blob.decode('utf-8').split('\n')
if lines and 'coding=utf-8' in lines[0]:
lines[0] = '#remove coding'
return '\n'.join(lines).encode('ascii', errors='replace')
|
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/contrib/python/src/python/pants/contrib/python/checks/checker/common.py#L88-L103
|
concatenate several file remove header lines
|
python
|
def remove_line_breaks(text):
"""Remove line breaks from input.
Including unicode 'line separator', 'paragraph separator',
and 'next line' characters.
"""
return unicode(text, 'utf-8').replace('\f', '').replace('\n', '') \
.replace('\r', '').replace(u'\xe2\x80\xa8', '') \
.replace(u'\xe2\x80\xa9', '').replace(u'\xc2\x85', '') \
.encode('utf-8')
|
https://github.com/inveniosoftware-attic/invenio-utils/blob/9a1c6db4e3f1370901f329f510480dd8df188296/invenio_utils/text.py#L486-L495
|
concatenate several file remove header lines
|
python
|
def filelines(fname,strip=False):
'''read lines from a file into lines...optional strip'''
with open(fname,'r') as f:
lines = f.readlines();
if strip:
lines[:] = [line.strip() for line in lines]
return lines;
|
https://github.com/noobermin/pys/blob/e01b74210c65eb96d019bb42e0a3c9e6676da943/pys/__init__.py#L69-L75
|
concatenate several file remove header lines
|
python
|
def remove_comments(text):
"""
Remove comments from a zonefile
"""
ret = []
lines = text.split("\n")
for line in lines:
if len(line) == 0:
continue
line = serialize(tokenize_line(line))
ret.append(line)
return "\n".join(ret)
|
https://github.com/blockstack/zone-file-py/blob/c1078c8c3c28f0881bc9a3af53d4972c4a6862d0/blockstack_zones/parse_zone_file.py#L182-L195
|
concatenate several file remove header lines
|
python
|
def do_cat(self, line):
"""cat FILENAME...
Concatenates files and sends to stdout.
"""
# note: when we get around to supporting cat from stdin, we'll need
# to write stdin to a temp file, and then copy the file
# since we need to know the filesize when copying to the pyboard.
args = self.line_to_args(line)
for filename in args:
filename = resolve_path(filename)
mode = auto(get_mode, filename)
if not mode_exists(mode):
print_err("Cannot access '%s': No such file" % filename)
continue
if not mode_isfile(mode):
print_err("'%s': is not a file" % filename)
continue
cat(filename, self.stdout)
|
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L2015-L2033
|
concatenate several file remove header lines
|
python
|
def preprocess(content, options):
# type: (Text, Optional[optparse.Values]) -> ReqFileLines
"""Split, filter, and join lines, and return a line iterator
:param content: the content of the requirements file
:param options: cli options
"""
lines_enum = enumerate(content.splitlines(), start=1) # type: ReqFileLines
lines_enum = join_lines(lines_enum)
lines_enum = ignore_comments(lines_enum)
lines_enum = skip_regex(lines_enum, options)
lines_enum = expand_env_variables(lines_enum)
return lines_enum
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/req/req_file.py#L116-L128
|
concatenate several file remove header lines
|
python
|
def __remove_leading_empty_lines(lines):
"""
Removes leading empty lines from a list of lines.
:param list[str] lines: The lines.
"""
tmp = list()
empty = True
for i in range(0, len(lines)):
empty = empty and lines[i] == ''
if not empty:
tmp.append(lines[i])
return tmp
|
https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/DocBlockReflection.py#L94-L107
|
concatenate several file remove header lines
|
python
|
def CheckForHeaderGuard(filename, clean_lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
clean_lines: A CleansedLines instance containing the file.
error: The function to call with any errors found.
"""
# Don't check for header guards if there are error suppression
# comments somewhere in this file.
#
# Because this is silencing a warning for a nonexistent line, we
# only support the very specific NOLINT(build/header_guard) syntax,
# and not the general NOLINT or NOLINT(*) syntax.
raw_lines = clean_lines.lines_without_raw_strings
for i in raw_lines:
if Search(r'//\s*NOLINT\(build/header_guard\)', i):
return
# Allow pragma once instead of header guards
for i in raw_lines:
if Search(r'^\s*#pragma\s+once', i):
return
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = ''
ifndef_linenum = 0
define = ''
endif = ''
endif_linenum = 0
for linenum, line in enumerate(raw_lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef or not define or ifndef != define:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
# Check for "//" comments on endif line.
ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
error)
match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
if match:
if match.group(1) == '_':
# Issue low severity warning for deprecated double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif // %s"' % cppvar)
return
# Didn't find the corresponding "//" comment. If this file does not
# contain any "//" comments at all, it could be that the compiler
# only wants "/**/" comments, look for those instead.
no_single_line_comments = True
for i in xrange(1, len(raw_lines) - 1):
line = raw_lines[i]
if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
no_single_line_comments = False
break
if no_single_line_comments:
match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
if match:
if match.group(1) == '_':
# Low severity warning for double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif /* %s */"' % cppvar)
return
# Didn't find anything
error(filename, endif_linenum, 'build/header_guard', 5,
'#endif line should be "#endif // %s"' % cppvar)
|
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/third_party/python/cpplint/cpplint.py#L1986-L2086
|
concatenate several file remove header lines
|
python
|
def get_lines_without_comments(filename: str) -> List[str]:
"""
Reads a file, and returns all lines as a list, left- and right-stripping
the lines and removing everything on a line after the first ``#``.
NOTE: does not cope well with quoted ``#`` symbols!
"""
lines = []
with open(filename) as f:
for line in f:
line = line.partition('#')[0] # the part before the first #
line = line.rstrip()
line = line.lstrip()
if line:
lines.append(line)
return lines
|
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/file_io.py#L136-L150
|
concatenate several file remove header lines
|
python
|
def head(self, lines=10):
"""
Return the top lines of the file.
"""
self.file.seek(0)
for i in range(lines):
if self.seek_next_line() == -1:
break
end_pos = self.file.tell()
self.file.seek(0)
data = self.file.read(end_pos)
for t in self.LINE_TERMINATORS:
if data.endswith(t):
# Only terminators _between_ lines should be preserved.
# Otherwise terminator of the last line will be treated as separtaing line and empty line.
data = data[:-len(t)]
break
if data:
return self.splitlines(data)
else:
return []
|
https://github.com/GreatFruitOmsk/tailhead/blob/a3b1324a39935f8ffcfda59328a9a458672889d9/tailhead/__init__.py#L222-L247
|
concatenate several file remove header lines
|
python
|
def clear_line(self, lines=None, bitmask=None,
leave_remaining_lines=False):
"""
The inverse of activate_line. If a line is active, it deactivates it.
This has the same parameters as activate_line()
"""
if lines is None and bitmask is None:
raise ValueError('Must set one of lines or bitmask')
if lines is not None and bitmask is not None:
raise ValueError('Can only set one of lines or bitmask')
if bitmask is not None:
if bitmask not in range(0, 256):
raise ValueError('bitmask must be an integer between '
'0 and 255')
if lines is not None:
if not isinstance(lines, list):
lines = [lines]
bitmask = 0
for l in lines:
if l < 1 or l > 8:
raise ValueError('Line numbers must be between 1 and 8 '
'(inclusive)')
bitmask |= self._lines[l]
self.con.clear_digital_output_lines(bitmask, leave_remaining_lines)
|
https://github.com/cedrus-opensource/pyxid/blob/02dba3a825f0d4f4c0bfa044c6a361492e4c25b6/pyxid/pyxid_impl.py#L289-L317
|
concatenate several file remove header lines
|
python
|
def files(self):
"""Grab files
"""
for line in self.SLACKBUILDS_TXT.splitlines():
if line.startswith(self.line_name):
sbo_name = line[17:].strip()
if line.startswith(self.line_files):
if sbo_name == self.name:
return line[18:].strip()
|
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/sbo/greps.py#L150-L158
|
concatenate several file remove header lines
|
python
|
def cleanup_lines( lines, **kwargs ):
''' Cleans up annotation after syntactic pre-processing and processing:
-- Removes embedded clause boundaries "<{>" and "<}>";
-- Removes CLBC markings from analysis;
-- Removes additional information between < and > from analysis;
-- Removes additional information between " and " from analysis;
-- If remove_caps==True , removes 'cap' annotations from analysis;
-- If remove_clo==True , removes CLO CLC CLB markings from analysis;
-- If double_quotes=='esc' then " will be overwritten with \\";
and
if double_quotes=='unesc' then \\" will be overwritten with ";
-- If fix_sent_tags=True, then sentence tags (<s> and </s>) will be
checked for mistakenly added analysis, and found analysis will be
removed;
Returns the input list, which has been cleaned from additional information;
'''
if not isinstance( lines, list ):
raise Exception('(!) Unexpected type of input argument! Expected a list of strings.')
remove_caps = False
remove_clo = False
double_quotes = None
fix_sent_tags = False
for argName, argVal in kwargs.items() :
if argName in ['remove_caps', 'remove_cap']:
remove_caps = bool(argVal)
if argName == 'remove_clo':
remove_clo = bool(argVal)
if argName == 'fix_sent_tags':
fix_sent_tags = bool(argVal)
if argName in ['double_quotes', 'quotes'] and argVal and \
argVal.lower() in ['esc', 'escape', 'unesc', 'unescape']:
double_quotes = argVal.lower()
pat_token_line = re.compile('^"<(.+)>"\s*$')
pat_analysis_start = re.compile('^(\s+)"(.+)"(\s[LZT].*)$')
i = 0
to_delete = []
while ( i < len(lines) ):
line = lines[i]
isAnalysisLine = line.startswith(' ') or line.startswith('\t')
if not isAnalysisLine:
removeCurrentTokenAndAnalysis = False
# 1) Remove embedded clause boundaries "<{>" and "<}>"
if line.startswith('"<{>"'):
if i+1 == len(lines) or (i+1 < len(lines) and not '"{"' in lines[i+1]):
removeCurrentTokenAndAnalysis = True
if line.startswith('"<}>"'):
if i+1 == len(lines) or (i+1 < len(lines) and not '"}"' in lines[i+1]):
removeCurrentTokenAndAnalysis = True
if removeCurrentTokenAndAnalysis:
# Remove the current token and all the subsequent analyses
del lines[i]
j=i
while ( j < len(lines) ):
line2 = lines[j]
if line2.startswith(' ') or line2.startswith('\t'):
del lines[j]
else:
break
continue
# 2) Convert double quotes (if required)
if double_quotes:
# '^"<(.+)>"\s*$'
if pat_token_line.match( lines[i] ):
token_cleaned = (pat_token_line.match(lines[i])).group(1)
# Escape or unescape double quotes
if double_quotes in ['esc', 'escape']:
token_cleaned = token_cleaned.replace('"', '\\"')
lines[i] = '"<'+token_cleaned+'>"'
elif double_quotes in ['unesc', 'unescape']:
token_cleaned = token_cleaned.replace('\\"', '"')
lines[i] = '"<'+token_cleaned+'>"'
else:
# Normalize analysis line
lines[i] = re.sub('^\s{4,}', '\t', lines[i])
# Remove clause boundary markings
lines[i] = re.sub('(.*)" ([LZT].*) CLBC (.*)', '\\1" \\2 \\3', lines[i])
# Remove additional information that was added during the analysis
lines[i] = re.sub('(.*)" L([^"<]*) ["<]([^@]*) (@.*)', '\\1" L\\2 \\4', lines[i])
# Remove 'cap' tags
if remove_caps:
lines[i] = lines[i].replace(' cap ', ' ')
# Convert double quotes (if required)
if double_quotes and double_quotes in ['unesc', 'unescape']:
lines[i] = lines[i].replace('\\"', '"')
elif double_quotes and double_quotes in ['esc', 'escape']:
m = pat_analysis_start.match( lines[i] )
if m:
# '^(\s+)"(.+)"(\s[LZT].*)$'
start = m.group(1)
content = m.group(2)
end = m.group(3)
content = content.replace('"', '\\"')
lines[i] = ''.join([start, '"', content, '"', end])
# Remove CLO CLC CLB markings
if remove_clo and 'CL' in lines[i]:
lines[i] = re.sub('\sCL[OCB]', ' ', lines[i])
lines[i] = re.sub('\s{2,}', ' ', lines[i])
# Fix sentence tags that mistakenly could have analysis (in EDT corpus)
if fix_sent_tags:
if i-1 > -1 and ('"</s>"' in lines[i-1] or '"<s>"' in lines[i-1]):
lines[i] = ''
i += 1
return lines
|
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/vislcg3_syntax.py#L312-L415
|
concatenate several file remove header lines
|
python
|
def remap_file_lines(from_path, to_path, line_map_list):
"""Adds line_map list to the list of association of from_file to
to to_file"""
from_path = pyc2py(from_path)
cache_file(to_path)
remap_entry = file2file_remap_lines.get(to_path)
if remap_entry:
new_list = list(remap_entry.from_to_pairs) + list(line_map_list)
else:
new_list = line_map_list
# FIXME: look for duplicates ?
file2file_remap_lines[to_path] = RemapLineEntry(
from_path,
tuple(sorted(new_list, key=lambda t: t[0]))
)
return
|
https://github.com/rocky/python-filecache/blob/60709ccd837ef5df001faf3cb02d4979ba342a23/pyficache/main.py#L419-L434
|
concatenate several file remove header lines
|
python
|
def remove(self, header):
"""Remove a header from the set. This raises an :exc:`KeyError` if the
header is not in the set.
.. versionchanged:: 0.5
In older versions a :exc:`IndexError` was raised instead of a
:exc:`KeyError` if the object was missing.
:param header: the header to be removed.
"""
key = header.lower()
if key not in self._set:
raise KeyError(header)
self._set.remove(key)
for idx, key in enumerate(self._headers):
if key.lower() == header:
del self._headers[idx]
break
if self.on_update is not None:
self.on_update(self)
|
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/datastructures.py#L2045-L2064
|
concatenate several file remove header lines
|
python
|
def generate_lines(self, infile):
""" Split file into lines
return dict with line=input, depth=n
"""
pound = '#'
for line in infile:
heading = 0
if line.startswith(pound):
heading = self.hash_count(line)
yield dict(line=line, heading=heading)
|
https://github.com/openbermuda/ripl/blob/4886b1a697e4b81c2202db9cb977609e034f8e70/ripl/md2py.py#L40-L54
|
concatenate several file remove header lines
|
python
|
def cleanup(dest, tardir, counterfile):
"""Remove existing rules"""
thefiles = Queue()
# dest directory files
queue_files(dest, thefiles)
# tar directory files
queue_files(tardir, thefiles)
while not thefiles.empty():
d_file = thefiles.get()
info("Deleting file: %s" % d_file)
os.unlink(d_file)
if os.path.exists(counterfile):
info("Deleting the counter file %s" % counterfile)
os.unlink(counterfile)
|
https://github.com/akissa/sachannelupdate/blob/a1c3c3d86b874f9c92c2407e2608963165d3ae98/sachannelupdate/base.py#L208-L221
|
concatenate several file remove header lines
|
python
|
def remove_newlines(xml):
r"""Remove newlines in the xml.
If the newline separates words in text, then replace with a space instead.
>>> remove_newlines('<p>para one</p>\n<p>para two</p>')
'<p>para one</p><p>para two</p>'
>>> remove_newlines('<p>line one\nline two</p>')
'<p>line one line two</p>'
>>> remove_newlines('one\n1')
'one 1'
>>> remove_newlines('hey!\nmore text!')
'hey! more text!'
"""
# Normalize newlines.
xml = xml.replace('\r\n', '\n')
xml = xml.replace('\r', '\n')
# Remove newlines that don't separate text. The remaining ones do separate text.
xml = re.sub(r'(?<=[>\s])\n(?=[<\s])', '', xml)
xml = xml.replace('\n', ' ')
return xml.strip()
|
https://github.com/christian-oudard/htmltreediff/blob/0e28f56492ae7e69bb0f74f9a79a8909a5ad588d/htmltreediff/util.py#L90-L110
|
concatenate several file remove header lines
|
python
|
def erase_line (self): # <ESC>[2K
'''Erases the entire current line.'''
self.fill_region (self.cur_r, 1, self.cur_r, self.cols)
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pexpect/screen.py#L388-L391
|
concatenate several file remove header lines
|
python
|
def rmlinematch(oldstr, infile, dryrun=False):
"""
Sed-like line deletion function based on given string..
Usage: pysed.rmlinematch(<Unwanted string>, <Text File>)
Example: pysed.rmlinematch('xyz', '/path/to/file.txt')
Example:
'DRYRUN': pysed.rmlinematch('xyz', '/path/to/file.txt', dryrun=True)
This will dump the output to STDOUT instead of changing the input file.
"""
linelist = []
with open(infile) as reader:
for item in reader:
rmitem = re.match(r'.*{}'.format(oldstr), item)
# if isinstance(rmitem) == isinstance(None): Not quite sure the intent here
if rmitem is None:
linelist.append(item)
if dryrun is False:
with open(infile, "w") as writer:
writer.truncate()
for line in linelist:
writer.writelines(line)
elif dryrun is True:
for line in linelist:
print(line, end='')
else:
exit("""Unknown option specified to 'dryrun' argument,
Usage: dryrun=<True|False>.""")
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/utils/pysed.py#L42-L70
|
concatenate several file remove header lines
|
python
|
def discard(self, s):
"""
Discard from original file.
"""
lines = s.splitlines(True)
for line in lines:
if line[-1] not in '\r\n':
if not self.warn:
logger.warning(
'partial line discard UNSUPPORTED; source map '
'generated will not match at the column level'
)
self.warn = True
else:
# simply increment row
self.row += 1
|
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/vlqsm.py#L92-L108
|
concatenate several file remove header lines
|
python
|
def generate_lines(self, infile):
""" Split file into lines
return dict with line=input, depth=n
"""
pound = '#'
for line in infile:
heading = self.hash_count(line)
indent = self.hash_count(line, pound=' ')
yield dict(line=line.strip(),
heading=heading,
indent=indent)
|
https://github.com/openbermuda/ripl/blob/4886b1a697e4b81c2202db9cb977609e034f8e70/ripl/md2slides.py#L44-L59
|
concatenate several file remove header lines
|
python
|
def clean(self):
""" Remove intermediate files created
"""
if not util.is_blank(self.catalog.catname) and os.path.exists(self.catalog.catname):
os.remove(self.catalog.catname)
|
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/imgclasses.py#L1443-L1447
|
concatenate several file remove header lines
|
python
|
def split_lines(lines):
"""
split a MagIC upload format file into lists.
the lists are split by the '>>>' lines between file_types.
"""
container = []
new_list = []
for line in lines:
if '>>>' in line:
container.append(new_list)
new_list = []
else:
new_list.append(line)
container.append(new_list)
return container
|
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/validate_upload2.py#L250-L264
|
concatenate several file remove header lines
|
python
|
def remove(self):
"""Remove this file."""
if self.exists() or self.islink():
self.fs.unlink(self.get_internal_path())
return 1
return None
|
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Node/FS.py#L3074-L3079
|
concatenate several file remove header lines
|
python
|
def _removeHeaderTag(header, tag):
"""Removes a tag from the beginning of a header string.
:param header: str
:param tag: str
:returns: (str, bool), header without the tag and a bool that indicates
wheter the tag was present.
"""
if header.startswith(tag):
tagPresent = True
header = header[len(tag):]
else:
tagPresent = False
return header, tagPresent
|
https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/_proteindb_refactoring.py#L448-L461
|
concatenate several file remove header lines
|
python
|
def remove_header_search_paths(self, paths, target_name=None, configuration_name=None):
"""
Removes the given search paths from the HEADER_SEARCH_PATHS section of the target on the configurations
:param paths: A string or array of strings
:param target_name: Target name or list of target names to remove the flag from or None for every target
:param configuration_name: Configuration name to add the flag to or None for every configuration
:return: void
"""
self.remove_search_paths(XCBuildConfigurationFlags.HEADER_SEARCH_PATHS, paths, target_name, configuration_name)
|
https://github.com/kronenthaler/mod-pbxproj/blob/8de3cbdd3210480ddbb1fa0f50a4f4ea87de6e71/pbxproj/pbxextensions/ProjectFlags.py#L129-L137
|
concatenate several file remove header lines
|
python
|
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
# Only do this check if the included header follows google naming
# conventions. If not, assume that it's a 3rd party API that
# requires special include conventions.
#
# We also make an exception for Lua headers, which follow google
# naming convention but not the include convention.
match = Match(r'#include\s*"([^/]+\.h)"', line)
if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
error(filename, linenum, 'build/include_subdir', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
duplicate_line = include_state.FindHeader(include)
if duplicate_line >= 0:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, duplicate_line))
return
for extension in GetNonHeaderExtensions():
if (include.endswith('.' + extension) and
os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):
error(filename, linenum, 'build/include', 4,
'Do not include .' + extension + ' files from other packages')
return
if not _THIRD_PARTY_HEADERS_PATTERN.match(include):
include_state.include_list[-1].append((include, linenum))
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
|
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/third_party/python/cpplint/cpplint.py#L4673-L4748
|
concatenate several file remove header lines
|
python
|
def replace_lines_in_files(search_string, replacement_line):
"""
Finds lines containing the search string and replaces the whole line with
the specified replacement string.
"""
# have the user select some files
paths = _s.dialogs.MultipleFiles('DIS AND DAT|*.*')
if paths == []: return
for path in paths:
_shutil.copy(path, path+".backup")
lines = read_lines(path)
for n in range(0,len(lines)):
if lines[n].find(search_string) >= 0:
print(lines[n])
lines[n] = replacement_line.strip() + "\n"
write_to_file(path, join(lines, ''))
return
|
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_functions.py#L1264-L1284
|
concatenate several file remove header lines
|
python
|
def fix_line_breaks(s):
"""
Convert \r\n and \r to \n chars. Strip any leading or trailing whitespace
on each line. Remove blank lines.
"""
l = s.splitlines()
x = [i.strip() for i in l]
x = [i for i in x if i] # remove blank lines
return "\n".join(x)
|
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/glamkit_collections/utils.py#L69-L77
|
concatenate several file remove header lines
|
python
|
def CheckOperatorSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around operators.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Don't try to do spacing checks for operator methods. Do this by
# replacing the troublesome characters with something else,
# preserving column position for all other characters.
#
# The replacement is done repeatedly to avoid false positives from
# operators that call operators.
while True:
match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
if match:
line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
else:
break
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if ((Search(r'[\w.]=', line) or
Search(r'=[\w.]', line))
and not Search(r'\b(if|while|for) ', line)
# Operators taken from [lex.operators] in C++11 standard.
and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
and not Search(r'operator=', line)):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
#
# If the operator is followed by a comma, assume it's be used in a
# macro context and don't do any checks. This avoids false
# positives.
#
# Note that && is not included here. Those are checked separately
# in CheckRValueReference
match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
elif not Match(r'#.*include', line):
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
if match:
(_, _, end_pos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if end_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
if match:
(_, _, start_pos) = ReverseCloseExpression(
clean_lines, linenum, len(match.group(1)))
if start_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
#
# We also allow operators following an opening parenthesis, since
# those tend to be macros that deal with operators.
match = Search(r'(operator|[^\s(<])(?:L|UL|ULL|l|ul|ull)?<<([^\s,=<])', line)
if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
|
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L3127-L3239
|
concatenate several file remove header lines
|
python
|
def uncomment(path,
regex,
char='#',
backup='.bak'):
'''
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Uncomment specified commented lines in a file
path
The full path to the file to be edited
regex
A regular expression used to find the lines that are to be uncommented.
This regex should not include the comment character. A leading ``^``
character will be stripped for convenience (for easily switching
between comment() and uncomment()).
char : ``#``
The character to remove in order to uncomment a line
backup : ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
CLI Example:
.. code-block:: bash
salt '*' file.uncomment /etc/hosts.deny 'ALL: PARANOID'
'''
return comment_line(path=path,
regex=regex,
char=char,
cmnt=False,
backup=backup)
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L1320-L1354
|
concatenate several file remove header lines
|
python
|
def line(self, line):
"""Returns list of strings split by input delimeter
Argument:
line - Input line to cut
"""
# Remove empty strings in case of multiple instances of delimiter
return [x for x in re.split(self.delimiter, line.rstrip()) if x != '']
|
https://github.com/jpweiser/cuts/blob/5baf7f2e145045942ee8dcaccbc47f8f821fcb56/cuts/fields.py#L25-L32
|
concatenate several file remove header lines
|
python
|
def remove_leading_garbage_lines_from_reference_section(ref_sectn):
"""Sometimes, the first lines of the extracted references are completely
blank or email addresses. These must be removed as they are not
references.
@param ref_sectn: (list) of strings - the reference section lines
@return: (list) of strings - the reference section without leading
blank lines or email addresses.
"""
p_email = re.compile(ur'^\s*e\-?mail', re.UNICODE)
while ref_sectn and (ref_sectn[0].isspace() or p_email.match(ref_sectn[0])):
ref_sectn.pop(0)
return ref_sectn
|
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L1364-L1375
|
concatenate several file remove header lines
|
python
|
def parse_headerline(self, line):
""" Parses header lines
Header example:
Batch Info,2013-03-20T07:11:09.9053262-07:00,2013-03-20T07:12:55.5280967-07:00,2013-03-20T07:11:07.1047817-07:00,,,,,,,,,,,,,,
Batch Data Path,D:\MassHunter\Data\130129\QuantResults\130129LS.batch.bin,,,,,,,,,,,,,,,,
Analysis Time,3/20/2013 7:11 AM,Analyst Name,Administrator,,,,,,,,,,,,,,
Report Time,3/20/2013 7:12 AM,Reporter Name,Administrator,,,,,,,,,,,,,,
Last Calib Update,3/20/2013 7:11 AM,Batch State,Processed,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,
"""
if self._end_header == True:
# Header already processed
return 0
if line.startswith(self.SEQUENCETABLE_KEY):
self._end_header = True
if len(self._header) == 0:
self.err("No header found", numline=self._numline)
return -1
return 0
splitted = [token.strip() for token in line.split(',')]
# Batch Info,2013-03-20T07:11:09.9053262-07:00,2013-03-20T07:12:55.5280967-07:00,2013-03-20T07:11:07.1047817-07:00,,,,,,,,,,,,,,
if splitted[0] == self.HEADERKEY_BATCHINFO:
if self.HEADERKEY_BATCHINFO in self._header:
self.warn("Header Batch Info already found. Discarding",
numline=self._numline, line=line)
return 0
self._header[self.HEADERKEY_BATCHINFO] = []
for i in range(len(splitted) - 1):
if splitted[i + 1]:
self._header[self.HEADERKEY_BATCHINFO].append(splitted[i + 1])
# Batch Data Path,D:\MassHunter\Data\130129\QuantResults\130129LS.batch.bin,,,,,,,,,,,,,,,,
elif splitted[0] == self.HEADERKEY_BATCHDATAPATH:
if self.HEADERKEY_BATCHDATAPATH in self._header:
self.warn("Header Batch Data Path already found. Discarding",
numline=self._numline, line=line)
return 0;
if splitted[1]:
self._header[self.HEADERKEY_BATCHDATAPATH] = splitted[1]
else:
self.warn("Batch Data Path not found or empty",
numline=self._numline, line=line)
# Analysis Time,3/20/2013 7:11 AM,Analyst Name,Administrator,,,,,,,,,,,,,,
elif splitted[0] == self.HEADERKEY_ANALYSISTIME:
if splitted[1]:
try:
d = datetime.strptime(splitted[1], "%m/%d/%Y %I:%M %p")
self._header[self.HEADERKEY_ANALYSISTIME] = d
except ValueError:
self.err("Invalid Analysis Time format",
numline=self._numline, line=line)
else:
self.warn("Analysis Time not found or empty",
numline=self._numline, line=line)
if splitted[2] and splitted[2] == self.HEADERKEY_ANALYSTNAME:
if splitted[3]:
self._header[self.HEADERKEY_ANALYSTNAME] = splitted[3]
else:
self.warn("Analyst Name not found or empty",
numline=self._numline, line=line)
else:
self.err("Analyst Name not found",
numline=self._numline, line=line)
# Report Time,3/20/2013 7:12 AM,Reporter Name,Administrator,,,,,,,,,,,,,,
elif splitted[0] == self.HEADERKEY_REPORTTIME:
if splitted[1]:
try:
d = datetime.strptime(splitted[1], "%m/%d/%Y %I:%M %p")
self._header[self.HEADERKEY_REPORTTIME] = d
except ValueError:
self.err("Invalid Report Time format",
numline=self._numline, line=line)
else:
self.warn("Report time not found or empty",
numline=self._numline, line=line)
if splitted[2] and splitted[2] == self.HEADERKEY_REPORTERNAME:
if splitted[3]:
self._header[self.HEADERKEY_REPORTERNAME] = splitted[3]
else:
self.warn("Reporter Name not found or empty",
numline=self._numline, line=line)
else:
self.err("Reporter Name not found",
numline=self._numline, line=line)
# Last Calib Update,3/20/2013 7:11 AM,Batch State,Processed,,,,,,,,,,,,,,
elif splitted[0] == self.HEADERKEY_LASTCALIBRATION:
if splitted[1]:
try:
d = datetime.strptime(splitted[1], "%m/%d/%Y %I:%M %p")
self._header[self.HEADERKEY_LASTCALIBRATION] = d
except ValueError:
self.err("Invalid Last Calibration time format",
numline=self._numline, line=line)
else:
self.warn("Last Calibration time not found or empty",
numline=self._numline, line=line)
if splitted[2] and splitted[2] == self.HEADERKEY_BATCHSTATE:
if splitted[3]:
self._header[self.HEADERKEY_BATCHSTATE] = splitted[3]
else:
self.warn("Batch state not found or empty",
numline=self._numline, line=line)
else:
self.err("Batch state not found",
numline=self._numline, line=line)
return 0
|
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/exportimport/instruments/agilent/masshunter/quantitative.py#L158-L283
|
concatenate several file remove header lines
|
python
|
def line(state, host, name, line, present=True, replace=None, flags=None):
'''
Ensure lines in files using grep to locate and sed to replace.
+ name: target remote file to edit
+ line: string or regex matching the target line
+ present: whether the line should be in the file
+ replace: text to replace entire matching lines when ``present=True``
+ flags: list of flags to pass to sed when replacing/deleting
Regex line matching:
Unless line matches a line (starts with ^, ends $), pyinfra will wrap it such that
it does, like: ``^.*LINE.*$``. This means we don't swap parts of lines out. To
change bits of lines, see ``files.replace``.
Regex line escaping:
If matching special characters (eg a crontab line containing *), remember to escape
it first using Python's ``re.escape``.
'''
match_line = line
# Ensure we're matching a whole ^line$
if not match_line.startswith('^'):
match_line = '^.*{0}'.format(match_line)
if not match_line.endswith('$'):
match_line = '{0}.*$'.format(match_line)
# Is there a matching line in this file?
present_lines = host.fact.find_in_file(name, match_line)
# If replace present, use that over the matching line
if replace:
line = replace
# We must provide some kind of replace to sed_replace_command below
else:
replace = ''
# Save commands for re-use in dynamic script when file not present at fact stage
echo_command = 'echo "{0}" >> {1}'.format(line, name)
sed_replace_command = sed_replace(
name, match_line, replace,
flags=flags,
)
# No line and we want it, append it
if not present_lines and present:
# If the file does not exist - it *might* be created, so we handle it
# dynamically with a little script.
if present_lines is None:
yield '''
# If the file now exists
if [ -f "{target}" ]; then
# Grep for the line, sed if matches
(grep "{match_line}" "{target}" && {sed_replace_command}) || \
# Else echo
{echo_command}
# No file, just echo
else
{echo_command}
fi
'''.format(
target=name,
match_line=match_line,
echo_command=echo_command,
sed_replace_command=sed_replace_command,
)
# Otherwise the file exists and there is no matching line, so append it
else:
yield echo_command
# Line(s) exists and we want to remove them, replace with nothing
elif present_lines and not present:
yield sed_replace(name, match_line, '', flags=flags)
# Line(s) exists and we have want to ensure they're correct
elif present_lines and present:
# If any of lines are different, sed replace them
if replace and any(line != replace for line in present_lines):
yield sed_replace_command
|
https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/files.py#L83-L165
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.