repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
vsoch/helpme | helpme/utils/memory.py | get_pid | def get_pid(pid=None):
'''get_pid will return a pid of interest. First we use given variable,
then environmental variable PID, and then PID of running process
'''
if pid == None:
if os.environ.get("PID",None) != None:
pid = int(os.environ.get("PID"))
# Then use current running script as process
else:
pid = os.getpid()
print("pid is %s" %pid)
return pid | python | def get_pid(pid=None):
'''get_pid will return a pid of interest. First we use given variable,
then environmental variable PID, and then PID of running process
'''
if pid == None:
if os.environ.get("PID",None) != None:
pid = int(os.environ.get("PID"))
# Then use current running script as process
else:
pid = os.getpid()
print("pid is %s" %pid)
return pid | [
"def",
"get_pid",
"(",
"pid",
"=",
"None",
")",
":",
"if",
"pid",
"==",
"None",
":",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"\"PID\"",
",",
"None",
")",
"!=",
"None",
":",
"pid",
"=",
"int",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"\"PID\"",
")",
")",
"# Then use current running script as process",
"else",
":",
"pid",
"=",
"os",
".",
"getpid",
"(",
")",
"print",
"(",
"\"pid is %s\"",
"%",
"pid",
")",
"return",
"pid"
] | get_pid will return a pid of interest. First we use given variable,
then environmental variable PID, and then PID of running process | [
"get_pid",
"will",
"return",
"a",
"pid",
"of",
"interest",
".",
"First",
"we",
"use",
"given",
"variable",
"then",
"environmental",
"variable",
"PID",
"and",
"then",
"PID",
"of",
"running",
"process"
] | train | https://github.com/vsoch/helpme/blob/e609172260b10cddadb2d2023ab26da8082a9feb/helpme/utils/memory.py#L30-L41 |
vsoch/helpme | helpme/utils/memory.py | get_memory_usage | def get_memory_usage(pid=None,timeout=1):
'''get_memory_usage returns a dictionary of resident set size (rss) and virtual
memory size (vms) for a process of interest, for as long as the process is running
:param pid: the pid to use:
:param timeout: the timeout
:: notes
example:
sleep 3 & exec python -m memory "$!"
'''
rss = []
vms = []
# If no pid is provided, look for environment variable
pid = get_pid(pid)
process = psutil.Process(pid)
# Create lists of memory usage over time
print(process.status())
while process.status() == 'running':
mem = process.memory_info()
rss.append(mem.rss)
vms.append(mem.vms)
time.sleep(timeout)
# http://pythonhosted.org/psutil/#psutil.Process.memory_info
result = {"rss":rss,"vms":vms}
print(result) | python | def get_memory_usage(pid=None,timeout=1):
'''get_memory_usage returns a dictionary of resident set size (rss) and virtual
memory size (vms) for a process of interest, for as long as the process is running
:param pid: the pid to use:
:param timeout: the timeout
:: notes
example:
sleep 3 & exec python -m memory "$!"
'''
rss = []
vms = []
# If no pid is provided, look for environment variable
pid = get_pid(pid)
process = psutil.Process(pid)
# Create lists of memory usage over time
print(process.status())
while process.status() == 'running':
mem = process.memory_info()
rss.append(mem.rss)
vms.append(mem.vms)
time.sleep(timeout)
# http://pythonhosted.org/psutil/#psutil.Process.memory_info
result = {"rss":rss,"vms":vms}
print(result) | [
"def",
"get_memory_usage",
"(",
"pid",
"=",
"None",
",",
"timeout",
"=",
"1",
")",
":",
"rss",
"=",
"[",
"]",
"vms",
"=",
"[",
"]",
"# If no pid is provided, look for environment variable",
"pid",
"=",
"get_pid",
"(",
"pid",
")",
"process",
"=",
"psutil",
".",
"Process",
"(",
"pid",
")",
"# Create lists of memory usage over time",
"print",
"(",
"process",
".",
"status",
"(",
")",
")",
"while",
"process",
".",
"status",
"(",
")",
"==",
"'running'",
":",
"mem",
"=",
"process",
".",
"memory_info",
"(",
")",
"rss",
".",
"append",
"(",
"mem",
".",
"rss",
")",
"vms",
".",
"append",
"(",
"mem",
".",
"vms",
")",
"time",
".",
"sleep",
"(",
"timeout",
")",
"# http://pythonhosted.org/psutil/#psutil.Process.memory_info",
"result",
"=",
"{",
"\"rss\"",
":",
"rss",
",",
"\"vms\"",
":",
"vms",
"}",
"print",
"(",
"result",
")"
] | get_memory_usage returns a dictionary of resident set size (rss) and virtual
memory size (vms) for a process of interest, for as long as the process is running
:param pid: the pid to use:
:param timeout: the timeout
:: notes
example:
sleep 3 & exec python -m memory "$!" | [
"get_memory_usage",
"returns",
"a",
"dictionary",
"of",
"resident",
"set",
"size",
"(",
"rss",
")",
"and",
"virtual",
"memory",
"size",
"(",
"vms",
")",
"for",
"a",
"process",
"of",
"interest",
"for",
"as",
"long",
"as",
"the",
"process",
"is",
"running",
":",
"param",
"pid",
":",
"the",
"pid",
"to",
"use",
":",
":",
"param",
"timeout",
":",
"the",
"timeout",
"::",
"notes",
"example",
":",
"sleep",
"3",
"&",
"exec",
"python",
"-",
"m",
"memory",
"$!"
] | train | https://github.com/vsoch/helpme/blob/e609172260b10cddadb2d2023ab26da8082a9feb/helpme/utils/memory.py#L44-L69 |
SetBased/py-stratum | pystratum/RoutineLoaderHelper.py | RoutineLoaderHelper.load_stored_routine | def load_stored_routine(self):
"""
Loads the stored routine into the instance of MySQL.
Returns the metadata of the stored routine if the stored routine is loaded successfully. Otherwise returns
False.
:rtype: dict[str,str]|bool
"""
try:
self._routine_name = os.path.splitext(os.path.basename(self._source_filename))[0]
if os.path.exists(self._source_filename):
if os.path.isfile(self._source_filename):
self._m_time = int(os.path.getmtime(self._source_filename))
else:
raise LoaderException("Unable to get mtime of file '{}'".format(self._source_filename))
else:
raise LoaderException("Source file '{}' does not exist".format(self._source_filename))
if self._pystratum_old_metadata:
self._pystratum_metadata = self._pystratum_old_metadata
load = self._must_reload()
if load:
self.__read_source_file()
self.__get_placeholders()
self._get_designation_type()
self._get_name()
self.__substitute_replace_pairs()
self._load_routine_file()
if self._designation_type == 'bulk_insert':
self._get_bulk_insert_table_columns_info()
self._get_routine_parameters_info()
self.__get_doc_block_parts_wrapper()
self.__save_shadow_copy()
self._update_metadata()
return self._pystratum_metadata
except Exception as exception:
self._log_exception(exception)
return False | python | def load_stored_routine(self):
"""
Loads the stored routine into the instance of MySQL.
Returns the metadata of the stored routine if the stored routine is loaded successfully. Otherwise returns
False.
:rtype: dict[str,str]|bool
"""
try:
self._routine_name = os.path.splitext(os.path.basename(self._source_filename))[0]
if os.path.exists(self._source_filename):
if os.path.isfile(self._source_filename):
self._m_time = int(os.path.getmtime(self._source_filename))
else:
raise LoaderException("Unable to get mtime of file '{}'".format(self._source_filename))
else:
raise LoaderException("Source file '{}' does not exist".format(self._source_filename))
if self._pystratum_old_metadata:
self._pystratum_metadata = self._pystratum_old_metadata
load = self._must_reload()
if load:
self.__read_source_file()
self.__get_placeholders()
self._get_designation_type()
self._get_name()
self.__substitute_replace_pairs()
self._load_routine_file()
if self._designation_type == 'bulk_insert':
self._get_bulk_insert_table_columns_info()
self._get_routine_parameters_info()
self.__get_doc_block_parts_wrapper()
self.__save_shadow_copy()
self._update_metadata()
return self._pystratum_metadata
except Exception as exception:
self._log_exception(exception)
return False | [
"def",
"load_stored_routine",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"_routine_name",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"self",
".",
"_source_filename",
")",
")",
"[",
"0",
"]",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"_source_filename",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"_source_filename",
")",
":",
"self",
".",
"_m_time",
"=",
"int",
"(",
"os",
".",
"path",
".",
"getmtime",
"(",
"self",
".",
"_source_filename",
")",
")",
"else",
":",
"raise",
"LoaderException",
"(",
"\"Unable to get mtime of file '{}'\"",
".",
"format",
"(",
"self",
".",
"_source_filename",
")",
")",
"else",
":",
"raise",
"LoaderException",
"(",
"\"Source file '{}' does not exist\"",
".",
"format",
"(",
"self",
".",
"_source_filename",
")",
")",
"if",
"self",
".",
"_pystratum_old_metadata",
":",
"self",
".",
"_pystratum_metadata",
"=",
"self",
".",
"_pystratum_old_metadata",
"load",
"=",
"self",
".",
"_must_reload",
"(",
")",
"if",
"load",
":",
"self",
".",
"__read_source_file",
"(",
")",
"self",
".",
"__get_placeholders",
"(",
")",
"self",
".",
"_get_designation_type",
"(",
")",
"self",
".",
"_get_name",
"(",
")",
"self",
".",
"__substitute_replace_pairs",
"(",
")",
"self",
".",
"_load_routine_file",
"(",
")",
"if",
"self",
".",
"_designation_type",
"==",
"'bulk_insert'",
":",
"self",
".",
"_get_bulk_insert_table_columns_info",
"(",
")",
"self",
".",
"_get_routine_parameters_info",
"(",
")",
"self",
".",
"__get_doc_block_parts_wrapper",
"(",
")",
"self",
".",
"__save_shadow_copy",
"(",
")",
"self",
".",
"_update_metadata",
"(",
")",
"return",
"self",
".",
"_pystratum_metadata",
"except",
"Exception",
"as",
"exception",
":",
"self",
".",
"_log_exception",
"(",
"exception",
")",
"return",
"False"
] | Loads the stored routine into the instance of MySQL.
Returns the metadata of the stored routine if the stored routine is loaded successfully. Otherwise returns
False.
:rtype: dict[str,str]|bool | [
"Loads",
"the",
"stored",
"routine",
"into",
"the",
"instance",
"of",
"MySQL",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/RoutineLoaderHelper.py#L190-L242 |
SetBased/py-stratum | pystratum/RoutineLoaderHelper.py | RoutineLoaderHelper.__read_source_file | def __read_source_file(self):
"""
Reads the file with the source of the stored routine.
"""
with open(self._source_filename, 'r', encoding=self._routine_file_encoding) as file:
self._routine_source_code = file.read()
self._routine_source_code_lines = self._routine_source_code.split("\n") | python | def __read_source_file(self):
"""
Reads the file with the source of the stored routine.
"""
with open(self._source_filename, 'r', encoding=self._routine_file_encoding) as file:
self._routine_source_code = file.read()
self._routine_source_code_lines = self._routine_source_code.split("\n") | [
"def",
"__read_source_file",
"(",
"self",
")",
":",
"with",
"open",
"(",
"self",
".",
"_source_filename",
",",
"'r'",
",",
"encoding",
"=",
"self",
".",
"_routine_file_encoding",
")",
"as",
"file",
":",
"self",
".",
"_routine_source_code",
"=",
"file",
".",
"read",
"(",
")",
"self",
".",
"_routine_source_code_lines",
"=",
"self",
".",
"_routine_source_code",
".",
"split",
"(",
"\"\\n\"",
")"
] | Reads the file with the source of the stored routine. | [
"Reads",
"the",
"file",
"with",
"the",
"source",
"of",
"the",
"stored",
"routine",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/RoutineLoaderHelper.py#L245-L252 |
SetBased/py-stratum | pystratum/RoutineLoaderHelper.py | RoutineLoaderHelper.__save_shadow_copy | def __save_shadow_copy(self):
"""
Saves a copy of the stored routine source with pure SQL (if shadow directory is set).
"""
if not self.shadow_directory:
return
destination_filename = os.path.join(self.shadow_directory, self._routine_name) + '.sql'
if os.path.realpath(destination_filename) == os.path.realpath(self._source_filename):
raise LoaderException("Shadow copy will override routine source '{}'".format(self._source_filename))
# Remove the (read only) shadow file if it exists.
if os.path.exists(destination_filename):
os.remove(destination_filename)
# Write the shadow file.
with open(destination_filename, 'wt', encoding=self._routine_file_encoding) as handle:
handle.write(self._routine_source_code)
# Make the file read only.
mode = os.stat(self._source_filename)[stat.ST_MODE]
os.chmod(destination_filename, mode & ~stat.S_IWUSR & ~stat.S_IWGRP & ~stat.S_IWOTH) | python | def __save_shadow_copy(self):
"""
Saves a copy of the stored routine source with pure SQL (if shadow directory is set).
"""
if not self.shadow_directory:
return
destination_filename = os.path.join(self.shadow_directory, self._routine_name) + '.sql'
if os.path.realpath(destination_filename) == os.path.realpath(self._source_filename):
raise LoaderException("Shadow copy will override routine source '{}'".format(self._source_filename))
# Remove the (read only) shadow file if it exists.
if os.path.exists(destination_filename):
os.remove(destination_filename)
# Write the shadow file.
with open(destination_filename, 'wt', encoding=self._routine_file_encoding) as handle:
handle.write(self._routine_source_code)
# Make the file read only.
mode = os.stat(self._source_filename)[stat.ST_MODE]
os.chmod(destination_filename, mode & ~stat.S_IWUSR & ~stat.S_IWGRP & ~stat.S_IWOTH) | [
"def",
"__save_shadow_copy",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"shadow_directory",
":",
"return",
"destination_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"shadow_directory",
",",
"self",
".",
"_routine_name",
")",
"+",
"'.sql'",
"if",
"os",
".",
"path",
".",
"realpath",
"(",
"destination_filename",
")",
"==",
"os",
".",
"path",
".",
"realpath",
"(",
"self",
".",
"_source_filename",
")",
":",
"raise",
"LoaderException",
"(",
"\"Shadow copy will override routine source '{}'\"",
".",
"format",
"(",
"self",
".",
"_source_filename",
")",
")",
"# Remove the (read only) shadow file if it exists.",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"destination_filename",
")",
":",
"os",
".",
"remove",
"(",
"destination_filename",
")",
"# Write the shadow file.",
"with",
"open",
"(",
"destination_filename",
",",
"'wt'",
",",
"encoding",
"=",
"self",
".",
"_routine_file_encoding",
")",
"as",
"handle",
":",
"handle",
".",
"write",
"(",
"self",
".",
"_routine_source_code",
")",
"# Make the file read only.",
"mode",
"=",
"os",
".",
"stat",
"(",
"self",
".",
"_source_filename",
")",
"[",
"stat",
".",
"ST_MODE",
"]",
"os",
".",
"chmod",
"(",
"destination_filename",
",",
"mode",
"&",
"~",
"stat",
".",
"S_IWUSR",
"&",
"~",
"stat",
".",
"S_IWGRP",
"&",
"~",
"stat",
".",
"S_IWOTH",
")"
] | Saves a copy of the stored routine source with pure SQL (if shadow directory is set). | [
"Saves",
"a",
"copy",
"of",
"the",
"stored",
"routine",
"source",
"with",
"pure",
"SQL",
"(",
"if",
"shadow",
"directory",
"is",
"set",
")",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/RoutineLoaderHelper.py#L255-L277 |
SetBased/py-stratum | pystratum/RoutineLoaderHelper.py | RoutineLoaderHelper.__substitute_replace_pairs | def __substitute_replace_pairs(self):
"""
Substitutes all replace pairs in the source of the stored routine.
"""
self._set_magic_constants()
routine_source = []
i = 0
for line in self._routine_source_code_lines:
self._replace['__LINE__'] = "'%d'" % (i + 1)
for search, replace in self._replace.items():
tmp = re.findall(search, line, re.IGNORECASE)
if tmp:
line = line.replace(tmp[0], replace)
routine_source.append(line)
i += 1
self._routine_source_code = "\n".join(routine_source) | python | def __substitute_replace_pairs(self):
"""
Substitutes all replace pairs in the source of the stored routine.
"""
self._set_magic_constants()
routine_source = []
i = 0
for line in self._routine_source_code_lines:
self._replace['__LINE__'] = "'%d'" % (i + 1)
for search, replace in self._replace.items():
tmp = re.findall(search, line, re.IGNORECASE)
if tmp:
line = line.replace(tmp[0], replace)
routine_source.append(line)
i += 1
self._routine_source_code = "\n".join(routine_source) | [
"def",
"__substitute_replace_pairs",
"(",
"self",
")",
":",
"self",
".",
"_set_magic_constants",
"(",
")",
"routine_source",
"=",
"[",
"]",
"i",
"=",
"0",
"for",
"line",
"in",
"self",
".",
"_routine_source_code_lines",
":",
"self",
".",
"_replace",
"[",
"'__LINE__'",
"]",
"=",
"\"'%d'\"",
"%",
"(",
"i",
"+",
"1",
")",
"for",
"search",
",",
"replace",
"in",
"self",
".",
"_replace",
".",
"items",
"(",
")",
":",
"tmp",
"=",
"re",
".",
"findall",
"(",
"search",
",",
"line",
",",
"re",
".",
"IGNORECASE",
")",
"if",
"tmp",
":",
"line",
"=",
"line",
".",
"replace",
"(",
"tmp",
"[",
"0",
"]",
",",
"replace",
")",
"routine_source",
".",
"append",
"(",
"line",
")",
"i",
"+=",
"1",
"self",
".",
"_routine_source_code",
"=",
"\"\\n\"",
".",
"join",
"(",
"routine_source",
")"
] | Substitutes all replace pairs in the source of the stored routine. | [
"Substitutes",
"all",
"replace",
"pairs",
"in",
"the",
"source",
"of",
"the",
"stored",
"routine",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/RoutineLoaderHelper.py#L280-L297 |
SetBased/py-stratum | pystratum/RoutineLoaderHelper.py | RoutineLoaderHelper._log_exception | def _log_exception(self, exception):
"""
Logs an exception.
:param Exception exception: The exception.
:rtype: None
"""
self._io.error(str(exception).strip().split(os.linesep)) | python | def _log_exception(self, exception):
"""
Logs an exception.
:param Exception exception: The exception.
:rtype: None
"""
self._io.error(str(exception).strip().split(os.linesep)) | [
"def",
"_log_exception",
"(",
"self",
",",
"exception",
")",
":",
"self",
".",
"_io",
".",
"error",
"(",
"str",
"(",
"exception",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
"os",
".",
"linesep",
")",
")"
] | Logs an exception.
:param Exception exception: The exception.
:rtype: None | [
"Logs",
"an",
"exception",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/RoutineLoaderHelper.py#L300-L308 |
SetBased/py-stratum | pystratum/RoutineLoaderHelper.py | RoutineLoaderHelper.__get_placeholders | def __get_placeholders(self):
"""
Extracts the placeholders from the stored routine source.
"""
ret = True
pattern = re.compile('(@[A-Za-z0-9_.]+(%(max-)?type)?@)')
matches = pattern.findall(self._routine_source_code)
placeholders = []
if len(matches) != 0:
for tmp in matches:
placeholder = tmp[0]
if placeholder.lower() not in self._replace_pairs:
raise LoaderException("Unknown placeholder '{0}' in file {1}".
format(placeholder, self._source_filename))
if placeholder not in placeholders:
placeholders.append(placeholder)
for placeholder in placeholders:
if placeholder not in self._replace:
self._replace[placeholder] = self._replace_pairs[placeholder.lower()]
return ret | python | def __get_placeholders(self):
"""
Extracts the placeholders from the stored routine source.
"""
ret = True
pattern = re.compile('(@[A-Za-z0-9_.]+(%(max-)?type)?@)')
matches = pattern.findall(self._routine_source_code)
placeholders = []
if len(matches) != 0:
for tmp in matches:
placeholder = tmp[0]
if placeholder.lower() not in self._replace_pairs:
raise LoaderException("Unknown placeholder '{0}' in file {1}".
format(placeholder, self._source_filename))
if placeholder not in placeholders:
placeholders.append(placeholder)
for placeholder in placeholders:
if placeholder not in self._replace:
self._replace[placeholder] = self._replace_pairs[placeholder.lower()]
return ret | [
"def",
"__get_placeholders",
"(",
"self",
")",
":",
"ret",
"=",
"True",
"pattern",
"=",
"re",
".",
"compile",
"(",
"'(@[A-Za-z0-9_.]+(%(max-)?type)?@)'",
")",
"matches",
"=",
"pattern",
".",
"findall",
"(",
"self",
".",
"_routine_source_code",
")",
"placeholders",
"=",
"[",
"]",
"if",
"len",
"(",
"matches",
")",
"!=",
"0",
":",
"for",
"tmp",
"in",
"matches",
":",
"placeholder",
"=",
"tmp",
"[",
"0",
"]",
"if",
"placeholder",
".",
"lower",
"(",
")",
"not",
"in",
"self",
".",
"_replace_pairs",
":",
"raise",
"LoaderException",
"(",
"\"Unknown placeholder '{0}' in file {1}\"",
".",
"format",
"(",
"placeholder",
",",
"self",
".",
"_source_filename",
")",
")",
"if",
"placeholder",
"not",
"in",
"placeholders",
":",
"placeholders",
".",
"append",
"(",
"placeholder",
")",
"for",
"placeholder",
"in",
"placeholders",
":",
"if",
"placeholder",
"not",
"in",
"self",
".",
"_replace",
":",
"self",
".",
"_replace",
"[",
"placeholder",
"]",
"=",
"self",
".",
"_replace_pairs",
"[",
"placeholder",
".",
"lower",
"(",
")",
"]",
"return",
"ret"
] | Extracts the placeholders from the stored routine source. | [
"Extracts",
"the",
"placeholders",
"from",
"the",
"stored",
"routine",
"source",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/RoutineLoaderHelper.py#L321-L345 |
SetBased/py-stratum | pystratum/RoutineLoaderHelper.py | RoutineLoaderHelper._get_designation_type | def _get_designation_type(self):
"""
Extracts the designation type of the stored routine.
"""
positions = self._get_specification_positions()
if positions[0] != -1 and positions[1] != -1:
pattern = re.compile(r'^\s*--\s+type\s*:\s*(\w+)\s*(.+)?\s*', re.IGNORECASE)
for line_number in range(positions[0], positions[1] + 1):
matches = pattern.findall(self._routine_source_code_lines[line_number])
if matches:
self._designation_type = matches[0][0].lower()
tmp = str(matches[0][1])
if self._designation_type == 'bulk_insert':
n = re.compile(r'([a-zA-Z0-9_]+)\s+([a-zA-Z0-9_,]+)', re.IGNORECASE)
info = n.findall(tmp)
if not info:
raise LoaderException('Expected: -- type: bulk_insert <table_name> <columns> in file {0}'.
format(self._source_filename))
self._table_name = info[0][0]
self._columns = str(info[0][1]).split(',')
elif self._designation_type == 'rows_with_key' or self._designation_type == 'rows_with_index':
self._columns = str(matches[0][1]).split(',')
else:
if matches[0][1]:
raise LoaderException('Expected: -- type: {}'.format(self._designation_type))
if not self._designation_type:
raise LoaderException("Unable to find the designation type of the stored routine in file {0}".
format(self._source_filename)) | python | def _get_designation_type(self):
"""
Extracts the designation type of the stored routine.
"""
positions = self._get_specification_positions()
if positions[0] != -1 and positions[1] != -1:
pattern = re.compile(r'^\s*--\s+type\s*:\s*(\w+)\s*(.+)?\s*', re.IGNORECASE)
for line_number in range(positions[0], positions[1] + 1):
matches = pattern.findall(self._routine_source_code_lines[line_number])
if matches:
self._designation_type = matches[0][0].lower()
tmp = str(matches[0][1])
if self._designation_type == 'bulk_insert':
n = re.compile(r'([a-zA-Z0-9_]+)\s+([a-zA-Z0-9_,]+)', re.IGNORECASE)
info = n.findall(tmp)
if not info:
raise LoaderException('Expected: -- type: bulk_insert <table_name> <columns> in file {0}'.
format(self._source_filename))
self._table_name = info[0][0]
self._columns = str(info[0][1]).split(',')
elif self._designation_type == 'rows_with_key' or self._designation_type == 'rows_with_index':
self._columns = str(matches[0][1]).split(',')
else:
if matches[0][1]:
raise LoaderException('Expected: -- type: {}'.format(self._designation_type))
if not self._designation_type:
raise LoaderException("Unable to find the designation type of the stored routine in file {0}".
format(self._source_filename)) | [
"def",
"_get_designation_type",
"(",
"self",
")",
":",
"positions",
"=",
"self",
".",
"_get_specification_positions",
"(",
")",
"if",
"positions",
"[",
"0",
"]",
"!=",
"-",
"1",
"and",
"positions",
"[",
"1",
"]",
"!=",
"-",
"1",
":",
"pattern",
"=",
"re",
".",
"compile",
"(",
"r'^\\s*--\\s+type\\s*:\\s*(\\w+)\\s*(.+)?\\s*'",
",",
"re",
".",
"IGNORECASE",
")",
"for",
"line_number",
"in",
"range",
"(",
"positions",
"[",
"0",
"]",
",",
"positions",
"[",
"1",
"]",
"+",
"1",
")",
":",
"matches",
"=",
"pattern",
".",
"findall",
"(",
"self",
".",
"_routine_source_code_lines",
"[",
"line_number",
"]",
")",
"if",
"matches",
":",
"self",
".",
"_designation_type",
"=",
"matches",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"tmp",
"=",
"str",
"(",
"matches",
"[",
"0",
"]",
"[",
"1",
"]",
")",
"if",
"self",
".",
"_designation_type",
"==",
"'bulk_insert'",
":",
"n",
"=",
"re",
".",
"compile",
"(",
"r'([a-zA-Z0-9_]+)\\s+([a-zA-Z0-9_,]+)'",
",",
"re",
".",
"IGNORECASE",
")",
"info",
"=",
"n",
".",
"findall",
"(",
"tmp",
")",
"if",
"not",
"info",
":",
"raise",
"LoaderException",
"(",
"'Expected: -- type: bulk_insert <table_name> <columns> in file {0}'",
".",
"format",
"(",
"self",
".",
"_source_filename",
")",
")",
"self",
".",
"_table_name",
"=",
"info",
"[",
"0",
"]",
"[",
"0",
"]",
"self",
".",
"_columns",
"=",
"str",
"(",
"info",
"[",
"0",
"]",
"[",
"1",
"]",
")",
".",
"split",
"(",
"','",
")",
"elif",
"self",
".",
"_designation_type",
"==",
"'rows_with_key'",
"or",
"self",
".",
"_designation_type",
"==",
"'rows_with_index'",
":",
"self",
".",
"_columns",
"=",
"str",
"(",
"matches",
"[",
"0",
"]",
"[",
"1",
"]",
")",
".",
"split",
"(",
"','",
")",
"else",
":",
"if",
"matches",
"[",
"0",
"]",
"[",
"1",
"]",
":",
"raise",
"LoaderException",
"(",
"'Expected: -- type: {}'",
".",
"format",
"(",
"self",
".",
"_designation_type",
")",
")",
"if",
"not",
"self",
".",
"_designation_type",
":",
"raise",
"LoaderException",
"(",
"\"Unable to find the designation type of the stored routine in file {0}\"",
".",
"format",
"(",
"self",
".",
"_source_filename",
")",
")"
] | Extracts the designation type of the stored routine. | [
"Extracts",
"the",
"designation",
"type",
"of",
"the",
"stored",
"routine",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/RoutineLoaderHelper.py#L348-L377 |
SetBased/py-stratum | pystratum/RoutineLoaderHelper.py | RoutineLoaderHelper._get_specification_positions | def _get_specification_positions(self):
"""
Returns a tuple with the start and end line numbers of the stored routine specification.
:rtype: tuple
"""
start = -1
for (i, line) in enumerate(self._routine_source_code_lines):
if self._is_start_of_stored_routine(line):
start = i
end = -1
for (i, line) in enumerate(self._routine_source_code_lines):
if self._is_start_of_stored_routine_body(line):
end = i - 1
return start, end | python | def _get_specification_positions(self):
"""
Returns a tuple with the start and end line numbers of the stored routine specification.
:rtype: tuple
"""
start = -1
for (i, line) in enumerate(self._routine_source_code_lines):
if self._is_start_of_stored_routine(line):
start = i
end = -1
for (i, line) in enumerate(self._routine_source_code_lines):
if self._is_start_of_stored_routine_body(line):
end = i - 1
return start, end | [
"def",
"_get_specification_positions",
"(",
"self",
")",
":",
"start",
"=",
"-",
"1",
"for",
"(",
"i",
",",
"line",
")",
"in",
"enumerate",
"(",
"self",
".",
"_routine_source_code_lines",
")",
":",
"if",
"self",
".",
"_is_start_of_stored_routine",
"(",
"line",
")",
":",
"start",
"=",
"i",
"end",
"=",
"-",
"1",
"for",
"(",
"i",
",",
"line",
")",
"in",
"enumerate",
"(",
"self",
".",
"_routine_source_code_lines",
")",
":",
"if",
"self",
".",
"_is_start_of_stored_routine_body",
"(",
"line",
")",
":",
"end",
"=",
"i",
"-",
"1",
"return",
"start",
",",
"end"
] | Returns a tuple with the start and end line numbers of the stored routine specification.
:rtype: tuple | [
"Returns",
"a",
"tuple",
"with",
"the",
"start",
"and",
"end",
"line",
"numbers",
"of",
"the",
"stored",
"routine",
"specification",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/RoutineLoaderHelper.py#L380-L396 |
SetBased/py-stratum | pystratum/RoutineLoaderHelper.py | RoutineLoaderHelper.__get_doc_block_lines | def __get_doc_block_lines(self):
"""
Returns the start and end line of the DOcBlock of the stored routine code.
"""
line1 = None
line2 = None
i = 0
for line in self._routine_source_code_lines:
if re.match(r'\s*/\*\*', line):
line1 = i
if re.match(r'\s*\*/', line):
line2 = i
if self._is_start_of_stored_routine(line):
break
i += 1
return line1, line2 | python | def __get_doc_block_lines(self):
"""
Returns the start and end line of the DOcBlock of the stored routine code.
"""
line1 = None
line2 = None
i = 0
for line in self._routine_source_code_lines:
if re.match(r'\s*/\*\*', line):
line1 = i
if re.match(r'\s*\*/', line):
line2 = i
if self._is_start_of_stored_routine(line):
break
i += 1
return line1, line2 | [
"def",
"__get_doc_block_lines",
"(",
"self",
")",
":",
"line1",
"=",
"None",
"line2",
"=",
"None",
"i",
"=",
"0",
"for",
"line",
"in",
"self",
".",
"_routine_source_code_lines",
":",
"if",
"re",
".",
"match",
"(",
"r'\\s*/\\*\\*'",
",",
"line",
")",
":",
"line1",
"=",
"i",
"if",
"re",
".",
"match",
"(",
"r'\\s*\\*/'",
",",
"line",
")",
":",
"line2",
"=",
"i",
"if",
"self",
".",
"_is_start_of_stored_routine",
"(",
"line",
")",
":",
"break",
"i",
"+=",
"1",
"return",
"line1",
",",
"line2"
] | Returns the start and end line of the DOcBlock of the stored routine code. | [
"Returns",
"the",
"start",
"and",
"end",
"line",
"of",
"the",
"DOcBlock",
"of",
"the",
"stored",
"routine",
"code",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/RoutineLoaderHelper.py#L422-L442 |
SetBased/py-stratum | pystratum/RoutineLoaderHelper.py | RoutineLoaderHelper.__get_doc_block_parts_source | def __get_doc_block_parts_source(self):
"""
Extracts the DocBlock (in parts) from the source of the stored routine source.
"""
line1, line2 = self.__get_doc_block_lines()
if line1 is not None and line2 is not None and line1 <= line2:
doc_block = self._routine_source_code_lines[line1:line2 - line1 + 1]
else:
doc_block = list()
reflection = DocBlockReflection(doc_block)
self._doc_block_parts_source['description'] = reflection.get_description()
self._doc_block_parts_source['parameters'] = list()
for tag in reflection.get_tags('param'):
parts = re.match(r'^(@param)\s+(\w+)\s*(.+)?', tag, re.DOTALL)
if parts:
self._doc_block_parts_source['parameters'].append({'name': parts.group(2),
'description': parts.group(3)}) | python | def __get_doc_block_parts_source(self):
"""
Extracts the DocBlock (in parts) from the source of the stored routine source.
"""
line1, line2 = self.__get_doc_block_lines()
if line1 is not None and line2 is not None and line1 <= line2:
doc_block = self._routine_source_code_lines[line1:line2 - line1 + 1]
else:
doc_block = list()
reflection = DocBlockReflection(doc_block)
self._doc_block_parts_source['description'] = reflection.get_description()
self._doc_block_parts_source['parameters'] = list()
for tag in reflection.get_tags('param'):
parts = re.match(r'^(@param)\s+(\w+)\s*(.+)?', tag, re.DOTALL)
if parts:
self._doc_block_parts_source['parameters'].append({'name': parts.group(2),
'description': parts.group(3)}) | [
"def",
"__get_doc_block_parts_source",
"(",
"self",
")",
":",
"line1",
",",
"line2",
"=",
"self",
".",
"__get_doc_block_lines",
"(",
")",
"if",
"line1",
"is",
"not",
"None",
"and",
"line2",
"is",
"not",
"None",
"and",
"line1",
"<=",
"line2",
":",
"doc_block",
"=",
"self",
".",
"_routine_source_code_lines",
"[",
"line1",
":",
"line2",
"-",
"line1",
"+",
"1",
"]",
"else",
":",
"doc_block",
"=",
"list",
"(",
")",
"reflection",
"=",
"DocBlockReflection",
"(",
"doc_block",
")",
"self",
".",
"_doc_block_parts_source",
"[",
"'description'",
"]",
"=",
"reflection",
".",
"get_description",
"(",
")",
"self",
".",
"_doc_block_parts_source",
"[",
"'parameters'",
"]",
"=",
"list",
"(",
")",
"for",
"tag",
"in",
"reflection",
".",
"get_tags",
"(",
"'param'",
")",
":",
"parts",
"=",
"re",
".",
"match",
"(",
"r'^(@param)\\s+(\\w+)\\s*(.+)?'",
",",
"tag",
",",
"re",
".",
"DOTALL",
")",
"if",
"parts",
":",
"self",
".",
"_doc_block_parts_source",
"[",
"'parameters'",
"]",
".",
"append",
"(",
"{",
"'name'",
":",
"parts",
".",
"group",
"(",
"2",
")",
",",
"'description'",
":",
"parts",
".",
"group",
"(",
"3",
")",
"}",
")"
] | Extracts the DocBlock (in parts) from the source of the stored routine source. | [
"Extracts",
"the",
"DocBlock",
"(",
"in",
"parts",
")",
"from",
"the",
"source",
"of",
"the",
"stored",
"routine",
"source",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/RoutineLoaderHelper.py#L445-L465 |
SetBased/py-stratum | pystratum/RoutineLoaderHelper.py | RoutineLoaderHelper.__get_doc_block_parts_wrapper | def __get_doc_block_parts_wrapper(self):
"""
Generates the DocBlock parts to be used by the wrapper generator.
"""
self.__get_doc_block_parts_source()
helper = self._get_data_type_helper()
parameters = list()
for parameter_info in self._parameters:
parameters.append(
{'parameter_name': parameter_info['name'],
'python_type': helper.column_type_to_python_type(parameter_info),
'data_type_descriptor': parameter_info['data_type_descriptor'],
'description': self.__get_parameter_doc_description(parameter_info['name'])})
self._doc_block_parts_wrapper['description'] = self._doc_block_parts_source['description']
self._doc_block_parts_wrapper['parameters'] = parameters | python | def __get_doc_block_parts_wrapper(self):
"""
Generates the DocBlock parts to be used by the wrapper generator.
"""
self.__get_doc_block_parts_source()
helper = self._get_data_type_helper()
parameters = list()
for parameter_info in self._parameters:
parameters.append(
{'parameter_name': parameter_info['name'],
'python_type': helper.column_type_to_python_type(parameter_info),
'data_type_descriptor': parameter_info['data_type_descriptor'],
'description': self.__get_parameter_doc_description(parameter_info['name'])})
self._doc_block_parts_wrapper['description'] = self._doc_block_parts_source['description']
self._doc_block_parts_wrapper['parameters'] = parameters | [
"def",
"__get_doc_block_parts_wrapper",
"(",
"self",
")",
":",
"self",
".",
"__get_doc_block_parts_source",
"(",
")",
"helper",
"=",
"self",
".",
"_get_data_type_helper",
"(",
")",
"parameters",
"=",
"list",
"(",
")",
"for",
"parameter_info",
"in",
"self",
".",
"_parameters",
":",
"parameters",
".",
"append",
"(",
"{",
"'parameter_name'",
":",
"parameter_info",
"[",
"'name'",
"]",
",",
"'python_type'",
":",
"helper",
".",
"column_type_to_python_type",
"(",
"parameter_info",
")",
",",
"'data_type_descriptor'",
":",
"parameter_info",
"[",
"'data_type_descriptor'",
"]",
",",
"'description'",
":",
"self",
".",
"__get_parameter_doc_description",
"(",
"parameter_info",
"[",
"'name'",
"]",
")",
"}",
")",
"self",
".",
"_doc_block_parts_wrapper",
"[",
"'description'",
"]",
"=",
"self",
".",
"_doc_block_parts_source",
"[",
"'description'",
"]",
"self",
".",
"_doc_block_parts_wrapper",
"[",
"'parameters'",
"]",
"=",
"parameters"
] | Generates the DocBlock parts to be used by the wrapper generator. | [
"Generates",
"the",
"DocBlock",
"parts",
"to",
"be",
"used",
"by",
"the",
"wrapper",
"generator",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/RoutineLoaderHelper.py#L493-L510 |
SetBased/py-stratum | pystratum/RoutineLoaderHelper.py | RoutineLoaderHelper._update_metadata | def _update_metadata(self):
"""
Updates the metadata of the stored routine.
"""
self._pystratum_metadata['routine_name'] = self._routine_name
self._pystratum_metadata['designation'] = self._designation_type
self._pystratum_metadata['table_name'] = self._table_name
self._pystratum_metadata['parameters'] = self._parameters
self._pystratum_metadata['columns'] = self._columns
self._pystratum_metadata['fields'] = self._fields
self._pystratum_metadata['column_types'] = self._columns_types
self._pystratum_metadata['timestamp'] = self._m_time
self._pystratum_metadata['replace'] = self._replace
self._pystratum_metadata['pydoc'] = self._doc_block_parts_wrapper | python | def _update_metadata(self):
"""
Updates the metadata of the stored routine.
"""
self._pystratum_metadata['routine_name'] = self._routine_name
self._pystratum_metadata['designation'] = self._designation_type
self._pystratum_metadata['table_name'] = self._table_name
self._pystratum_metadata['parameters'] = self._parameters
self._pystratum_metadata['columns'] = self._columns
self._pystratum_metadata['fields'] = self._fields
self._pystratum_metadata['column_types'] = self._columns_types
self._pystratum_metadata['timestamp'] = self._m_time
self._pystratum_metadata['replace'] = self._replace
self._pystratum_metadata['pydoc'] = self._doc_block_parts_wrapper | [
"def",
"_update_metadata",
"(",
"self",
")",
":",
"self",
".",
"_pystratum_metadata",
"[",
"'routine_name'",
"]",
"=",
"self",
".",
"_routine_name",
"self",
".",
"_pystratum_metadata",
"[",
"'designation'",
"]",
"=",
"self",
".",
"_designation_type",
"self",
".",
"_pystratum_metadata",
"[",
"'table_name'",
"]",
"=",
"self",
".",
"_table_name",
"self",
".",
"_pystratum_metadata",
"[",
"'parameters'",
"]",
"=",
"self",
".",
"_parameters",
"self",
".",
"_pystratum_metadata",
"[",
"'columns'",
"]",
"=",
"self",
".",
"_columns",
"self",
".",
"_pystratum_metadata",
"[",
"'fields'",
"]",
"=",
"self",
".",
"_fields",
"self",
".",
"_pystratum_metadata",
"[",
"'column_types'",
"]",
"=",
"self",
".",
"_columns_types",
"self",
".",
"_pystratum_metadata",
"[",
"'timestamp'",
"]",
"=",
"self",
".",
"_m_time",
"self",
".",
"_pystratum_metadata",
"[",
"'replace'",
"]",
"=",
"self",
".",
"_replace",
"self",
".",
"_pystratum_metadata",
"[",
"'pydoc'",
"]",
"=",
"self",
".",
"_doc_block_parts_wrapper"
] | Updates the metadata of the stored routine. | [
"Updates",
"the",
"metadata",
"of",
"the",
"stored",
"routine",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/RoutineLoaderHelper.py#L547-L560 |
SetBased/py-stratum | pystratum/RoutineLoaderHelper.py | RoutineLoaderHelper._set_magic_constants | def _set_magic_constants(self):
"""
Adds magic constants to replace list.
"""
real_path = os.path.realpath(self._source_filename)
self._replace['__FILE__'] = "'%s'" % real_path
self._replace['__ROUTINE__'] = "'%s'" % self._routine_name
self._replace['__DIR__'] = "'%s'" % os.path.dirname(real_path) | python | def _set_magic_constants(self):
"""
Adds magic constants to replace list.
"""
real_path = os.path.realpath(self._source_filename)
self._replace['__FILE__'] = "'%s'" % real_path
self._replace['__ROUTINE__'] = "'%s'" % self._routine_name
self._replace['__DIR__'] = "'%s'" % os.path.dirname(real_path) | [
"def",
"_set_magic_constants",
"(",
"self",
")",
":",
"real_path",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"self",
".",
"_source_filename",
")",
"self",
".",
"_replace",
"[",
"'__FILE__'",
"]",
"=",
"\"'%s'\"",
"%",
"real_path",
"self",
".",
"_replace",
"[",
"'__ROUTINE__'",
"]",
"=",
"\"'%s'\"",
"%",
"self",
".",
"_routine_name",
"self",
".",
"_replace",
"[",
"'__DIR__'",
"]",
"=",
"\"'%s'\"",
"%",
"os",
".",
"path",
".",
"dirname",
"(",
"real_path",
")"
] | Adds magic constants to replace list. | [
"Adds",
"magic",
"constants",
"to",
"replace",
"list",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/RoutineLoaderHelper.py#L571-L579 |
SetBased/py-stratum | pystratum/RoutineLoaderHelper.py | RoutineLoaderHelper._unset_magic_constants | def _unset_magic_constants(self):
"""
Removes magic constants from current replace list.
"""
if '__FILE__' in self._replace:
del self._replace['__FILE__']
if '__ROUTINE__' in self._replace:
del self._replace['__ROUTINE__']
if '__DIR__' in self._replace:
del self._replace['__DIR__']
if '__LINE__' in self._replace:
del self._replace['__LINE__'] | python | def _unset_magic_constants(self):
"""
Removes magic constants from current replace list.
"""
if '__FILE__' in self._replace:
del self._replace['__FILE__']
if '__ROUTINE__' in self._replace:
del self._replace['__ROUTINE__']
if '__DIR__' in self._replace:
del self._replace['__DIR__']
if '__LINE__' in self._replace:
del self._replace['__LINE__'] | [
"def",
"_unset_magic_constants",
"(",
"self",
")",
":",
"if",
"'__FILE__'",
"in",
"self",
".",
"_replace",
":",
"del",
"self",
".",
"_replace",
"[",
"'__FILE__'",
"]",
"if",
"'__ROUTINE__'",
"in",
"self",
".",
"_replace",
":",
"del",
"self",
".",
"_replace",
"[",
"'__ROUTINE__'",
"]",
"if",
"'__DIR__'",
"in",
"self",
".",
"_replace",
":",
"del",
"self",
".",
"_replace",
"[",
"'__DIR__'",
"]",
"if",
"'__LINE__'",
"in",
"self",
".",
"_replace",
":",
"del",
"self",
".",
"_replace",
"[",
"'__LINE__'",
"]"
] | Removes magic constants from current replace list. | [
"Removes",
"magic",
"constants",
"from",
"current",
"replace",
"list",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/RoutineLoaderHelper.py#L582-L596 |
SetBased/py-stratum | pystratum/RoutineLoaderHelper.py | RoutineLoaderHelper._print_sql_with_error | def _print_sql_with_error(self, sql, error_line):
"""
Writes a SQL statement with an syntax error to the output. The line where the error occurs is highlighted.
:param str sql: The SQL statement.
:param int error_line: The line where the error occurs.
"""
if os.linesep in sql:
lines = sql.split(os.linesep)
digits = math.ceil(math.log(len(lines) + 1, 10))
i = 1
for line in lines:
if i == error_line:
self._io.text('<error>{0:{width}} {1}</error>'.format(i, line, width=digits, ))
else:
self._io.text('{0:{width}} {1}'.format(i, line, width=digits, ))
i += 1
else:
self._io.text(sql) | python | def _print_sql_with_error(self, sql, error_line):
"""
Writes a SQL statement with an syntax error to the output. The line where the error occurs is highlighted.
:param str sql: The SQL statement.
:param int error_line: The line where the error occurs.
"""
if os.linesep in sql:
lines = sql.split(os.linesep)
digits = math.ceil(math.log(len(lines) + 1, 10))
i = 1
for line in lines:
if i == error_line:
self._io.text('<error>{0:{width}} {1}</error>'.format(i, line, width=digits, ))
else:
self._io.text('{0:{width}} {1}'.format(i, line, width=digits, ))
i += 1
else:
self._io.text(sql) | [
"def",
"_print_sql_with_error",
"(",
"self",
",",
"sql",
",",
"error_line",
")",
":",
"if",
"os",
".",
"linesep",
"in",
"sql",
":",
"lines",
"=",
"sql",
".",
"split",
"(",
"os",
".",
"linesep",
")",
"digits",
"=",
"math",
".",
"ceil",
"(",
"math",
".",
"log",
"(",
"len",
"(",
"lines",
")",
"+",
"1",
",",
"10",
")",
")",
"i",
"=",
"1",
"for",
"line",
"in",
"lines",
":",
"if",
"i",
"==",
"error_line",
":",
"self",
".",
"_io",
".",
"text",
"(",
"'<error>{0:{width}} {1}</error>'",
".",
"format",
"(",
"i",
",",
"line",
",",
"width",
"=",
"digits",
",",
")",
")",
"else",
":",
"self",
".",
"_io",
".",
"text",
"(",
"'{0:{width}} {1}'",
".",
"format",
"(",
"i",
",",
"line",
",",
"width",
"=",
"digits",
",",
")",
")",
"i",
"+=",
"1",
"else",
":",
"self",
".",
"_io",
".",
"text",
"(",
"sql",
")"
] | Writes a SQL statement with an syntax error to the output. The line where the error occurs is highlighted.
:param str sql: The SQL statement.
:param int error_line: The line where the error occurs. | [
"Writes",
"a",
"SQL",
"statement",
"with",
"an",
"syntax",
"error",
"to",
"the",
"output",
".",
"The",
"line",
"where",
"the",
"error",
"occurs",
"is",
"highlighted",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/RoutineLoaderHelper.py#L599-L617 |
mjirik/io3d | io3d/dili.py | list_filter | def list_filter(lst, startswith=None, notstartswith=None,
contain=None, notcontain=None):
""" Keep in list items according to filter parameters.
:param lst: item list
:param startswith: keep items starting with
:param notstartswith: remove items starting with
:return:
"""
keeped = []
for item in lst:
keep = False
if startswith is not None:
if item.startswith(startswith):
keep = True
if notstartswith is not None:
if not item.startswith(notstartswith):
keep = True
if contain is not None:
if contain in item:
keep = True
if notcontain is not None:
if not notcontain in item:
keep = True
if keep:
keeped.append(item)
return keeped | python | def list_filter(lst, startswith=None, notstartswith=None,
contain=None, notcontain=None):
""" Keep in list items according to filter parameters.
:param lst: item list
:param startswith: keep items starting with
:param notstartswith: remove items starting with
:return:
"""
keeped = []
for item in lst:
keep = False
if startswith is not None:
if item.startswith(startswith):
keep = True
if notstartswith is not None:
if not item.startswith(notstartswith):
keep = True
if contain is not None:
if contain in item:
keep = True
if notcontain is not None:
if not notcontain in item:
keep = True
if keep:
keeped.append(item)
return keeped | [
"def",
"list_filter",
"(",
"lst",
",",
"startswith",
"=",
"None",
",",
"notstartswith",
"=",
"None",
",",
"contain",
"=",
"None",
",",
"notcontain",
"=",
"None",
")",
":",
"keeped",
"=",
"[",
"]",
"for",
"item",
"in",
"lst",
":",
"keep",
"=",
"False",
"if",
"startswith",
"is",
"not",
"None",
":",
"if",
"item",
".",
"startswith",
"(",
"startswith",
")",
":",
"keep",
"=",
"True",
"if",
"notstartswith",
"is",
"not",
"None",
":",
"if",
"not",
"item",
".",
"startswith",
"(",
"notstartswith",
")",
":",
"keep",
"=",
"True",
"if",
"contain",
"is",
"not",
"None",
":",
"if",
"contain",
"in",
"item",
":",
"keep",
"=",
"True",
"if",
"notcontain",
"is",
"not",
"None",
":",
"if",
"not",
"notcontain",
"in",
"item",
":",
"keep",
"=",
"True",
"if",
"keep",
":",
"keeped",
".",
"append",
"(",
"item",
")",
"return",
"keeped"
] | Keep in list items according to filter parameters.
:param lst: item list
:param startswith: keep items starting with
:param notstartswith: remove items starting with
:return: | [
"Keep",
"in",
"list",
"items",
"according",
"to",
"filter",
"parameters",
"."
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/dili.py#L41-L68 |
mjirik/io3d | io3d/dili.py | split_dict | def split_dict(dct, keys):
"""
Split dict into two subdicts based on keys.
:param dct:
:param keys:
:return: dict_in, dict_out
"""
if type(dct) == collections.OrderedDict:
dict_in = collections.OrderedDict()
dict_out = collections.OrderedDict()
else:
dict_in = {}
dict_out = {}
for key, value in dct.items:
if key in keys:
dict_in[key] = value
else:
dict_out[key] = value
return dict_in, dict_out | python | def split_dict(dct, keys):
"""
Split dict into two subdicts based on keys.
:param dct:
:param keys:
:return: dict_in, dict_out
"""
if type(dct) == collections.OrderedDict:
dict_in = collections.OrderedDict()
dict_out = collections.OrderedDict()
else:
dict_in = {}
dict_out = {}
for key, value in dct.items:
if key in keys:
dict_in[key] = value
else:
dict_out[key] = value
return dict_in, dict_out | [
"def",
"split_dict",
"(",
"dct",
",",
"keys",
")",
":",
"if",
"type",
"(",
"dct",
")",
"==",
"collections",
".",
"OrderedDict",
":",
"dict_in",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"dict_out",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"else",
":",
"dict_in",
"=",
"{",
"}",
"dict_out",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"dct",
".",
"items",
":",
"if",
"key",
"in",
"keys",
":",
"dict_in",
"[",
"key",
"]",
"=",
"value",
"else",
":",
"dict_out",
"[",
"key",
"]",
"=",
"value",
"return",
"dict_in",
",",
"dict_out"
] | Split dict into two subdicts based on keys.
:param dct:
:param keys:
:return: dict_in, dict_out | [
"Split",
"dict",
"into",
"two",
"subdicts",
"based",
"on",
"keys",
"."
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/dili.py#L82-L102 |
mjirik/io3d | io3d/dili.py | recursive_update | def recursive_update(d, u):
"""
Dict recursive update.
Based on Alex Martelli code on stackoverflow
http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth?answertab=votes#tab-top
:param d: dict to update
:param u: dict with new data
:return:
"""
for k, v in u.iteritems():
if isinstance(v, collections.Mapping):
r = recursive_update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d | python | def recursive_update(d, u):
"""
Dict recursive update.
Based on Alex Martelli code on stackoverflow
http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth?answertab=votes#tab-top
:param d: dict to update
:param u: dict with new data
:return:
"""
for k, v in u.iteritems():
if isinstance(v, collections.Mapping):
r = recursive_update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d | [
"def",
"recursive_update",
"(",
"d",
",",
"u",
")",
":",
"for",
"k",
",",
"v",
"in",
"u",
".",
"iteritems",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"collections",
".",
"Mapping",
")",
":",
"r",
"=",
"recursive_update",
"(",
"d",
".",
"get",
"(",
"k",
",",
"{",
"}",
")",
",",
"v",
")",
"d",
"[",
"k",
"]",
"=",
"r",
"else",
":",
"d",
"[",
"k",
"]",
"=",
"u",
"[",
"k",
"]",
"return",
"d"
] | Dict recursive update.
Based on Alex Martelli code on stackoverflow
http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth?answertab=votes#tab-top
:param d: dict to update
:param u: dict with new data
:return: | [
"Dict",
"recursive",
"update",
"."
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/dili.py#L104-L121 |
mjirik/io3d | io3d/dili.py | flatten_dict_join_keys | def flatten_dict_join_keys(dct, join_symbol=" "):
""" Flatten dict with defined key join symbol.
:param dct: dict to flatten
:param join_symbol: default value is " "
:return:
"""
return dict( flatten_dict(dct, join=lambda a,b:a+join_symbol+b) ) | python | def flatten_dict_join_keys(dct, join_symbol=" "):
""" Flatten dict with defined key join symbol.
:param dct: dict to flatten
:param join_symbol: default value is " "
:return:
"""
return dict( flatten_dict(dct, join=lambda a,b:a+join_symbol+b) ) | [
"def",
"flatten_dict_join_keys",
"(",
"dct",
",",
"join_symbol",
"=",
"\" \"",
")",
":",
"return",
"dict",
"(",
"flatten_dict",
"(",
"dct",
",",
"join",
"=",
"lambda",
"a",
",",
"b",
":",
"a",
"+",
"join_symbol",
"+",
"b",
")",
")"
] | Flatten dict with defined key join symbol.
:param dct: dict to flatten
:param join_symbol: default value is " "
:return: | [
"Flatten",
"dict",
"with",
"defined",
"key",
"join",
"symbol",
"."
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/dili.py#L128-L135 |
mjirik/io3d | io3d/dili.py | list_contains | def list_contains(list_of_strings, substring, return_true_false_array=False):
""" Get strings in list which contains substring.
"""
key_tf = [keyi.find(substring) != -1 for keyi in list_of_strings]
if return_true_false_array:
return key_tf
keys_to_remove = list_of_strings[key_tf]
return keys_to_remove | python | def list_contains(list_of_strings, substring, return_true_false_array=False):
""" Get strings in list which contains substring.
"""
key_tf = [keyi.find(substring) != -1 for keyi in list_of_strings]
if return_true_false_array:
return key_tf
keys_to_remove = list_of_strings[key_tf]
return keys_to_remove | [
"def",
"list_contains",
"(",
"list_of_strings",
",",
"substring",
",",
"return_true_false_array",
"=",
"False",
")",
":",
"key_tf",
"=",
"[",
"keyi",
".",
"find",
"(",
"substring",
")",
"!=",
"-",
"1",
"for",
"keyi",
"in",
"list_of_strings",
"]",
"if",
"return_true_false_array",
":",
"return",
"key_tf",
"keys_to_remove",
"=",
"list_of_strings",
"[",
"key_tf",
"]",
"return",
"keys_to_remove"
] | Get strings in list which contains substring. | [
"Get",
"strings",
"in",
"list",
"which",
"contains",
"substring",
"."
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/dili.py#L176-L184 |
mjirik/io3d | io3d/dili.py | df_drop_duplicates | def df_drop_duplicates(df, ignore_key_pattern="time"):
"""
Drop duplicates from dataframe ignore columns with keys containing defined pattern.
:param df:
:param noinfo_key_pattern:
:return:
"""
keys_to_remove = list_contains(df.keys(), ignore_key_pattern)
#key_tf = [key.find(noinfo_key_pattern) != -1 for key in df.keys()]
# keys_to_remove
# remove duplicates
ks = copy.copy(list(df.keys()))
for key in keys_to_remove:
ks.remove(key)
df = df.drop_duplicates(ks)
return df | python | def df_drop_duplicates(df, ignore_key_pattern="time"):
"""
Drop duplicates from dataframe ignore columns with keys containing defined pattern.
:param df:
:param noinfo_key_pattern:
:return:
"""
keys_to_remove = list_contains(df.keys(), ignore_key_pattern)
#key_tf = [key.find(noinfo_key_pattern) != -1 for key in df.keys()]
# keys_to_remove
# remove duplicates
ks = copy.copy(list(df.keys()))
for key in keys_to_remove:
ks.remove(key)
df = df.drop_duplicates(ks)
return df | [
"def",
"df_drop_duplicates",
"(",
"df",
",",
"ignore_key_pattern",
"=",
"\"time\"",
")",
":",
"keys_to_remove",
"=",
"list_contains",
"(",
"df",
".",
"keys",
"(",
")",
",",
"ignore_key_pattern",
")",
"#key_tf = [key.find(noinfo_key_pattern) != -1 for key in df.keys()]",
"# keys_to_remove",
"# remove duplicates",
"ks",
"=",
"copy",
".",
"copy",
"(",
"list",
"(",
"df",
".",
"keys",
"(",
")",
")",
")",
"for",
"key",
"in",
"keys_to_remove",
":",
"ks",
".",
"remove",
"(",
"key",
")",
"df",
"=",
"df",
".",
"drop_duplicates",
"(",
"ks",
")",
"return",
"df"
] | Drop duplicates from dataframe ignore columns with keys containing defined pattern.
:param df:
:param noinfo_key_pattern:
:return: | [
"Drop",
"duplicates",
"from",
"dataframe",
"ignore",
"columns",
"with",
"keys",
"containing",
"defined",
"pattern",
"."
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/dili.py#L187-L205 |
mjirik/io3d | io3d/dili.py | ndarray_to_list_in_structure | def ndarray_to_list_in_structure(item, squeeze=True):
""" Change ndarray in structure of lists and dicts into lists.
"""
tp = type(item)
if tp == np.ndarray:
if squeeze:
item = item.squeeze()
item = item.tolist()
elif tp == list:
for i in range(len(item)):
item[i] = ndarray_to_list_in_structure(item[i])
elif tp == dict:
for lab in item:
item[lab] = ndarray_to_list_in_structure(item[lab])
return item | python | def ndarray_to_list_in_structure(item, squeeze=True):
""" Change ndarray in structure of lists and dicts into lists.
"""
tp = type(item)
if tp == np.ndarray:
if squeeze:
item = item.squeeze()
item = item.tolist()
elif tp == list:
for i in range(len(item)):
item[i] = ndarray_to_list_in_structure(item[i])
elif tp == dict:
for lab in item:
item[lab] = ndarray_to_list_in_structure(item[lab])
return item | [
"def",
"ndarray_to_list_in_structure",
"(",
"item",
",",
"squeeze",
"=",
"True",
")",
":",
"tp",
"=",
"type",
"(",
"item",
")",
"if",
"tp",
"==",
"np",
".",
"ndarray",
":",
"if",
"squeeze",
":",
"item",
"=",
"item",
".",
"squeeze",
"(",
")",
"item",
"=",
"item",
".",
"tolist",
"(",
")",
"elif",
"tp",
"==",
"list",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"item",
")",
")",
":",
"item",
"[",
"i",
"]",
"=",
"ndarray_to_list_in_structure",
"(",
"item",
"[",
"i",
"]",
")",
"elif",
"tp",
"==",
"dict",
":",
"for",
"lab",
"in",
"item",
":",
"item",
"[",
"lab",
"]",
"=",
"ndarray_to_list_in_structure",
"(",
"item",
"[",
"lab",
"]",
")",
"return",
"item"
] | Change ndarray in structure of lists and dicts into lists. | [
"Change",
"ndarray",
"in",
"structure",
"of",
"lists",
"and",
"dicts",
"into",
"lists",
"."
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/dili.py#L208-L224 |
mjirik/io3d | io3d/dili.py | dict_find_key | def dict_find_key(dd, value):
""" Find first suitable key in dict.
:param dd:
:param value:
:return:
"""
key = next(key for key, val in dd.items() if val == value)
return key | python | def dict_find_key(dd, value):
""" Find first suitable key in dict.
:param dd:
:param value:
:return:
"""
key = next(key for key, val in dd.items() if val == value)
return key | [
"def",
"dict_find_key",
"(",
"dd",
",",
"value",
")",
":",
"key",
"=",
"next",
"(",
"key",
"for",
"key",
",",
"val",
"in",
"dd",
".",
"items",
"(",
")",
"if",
"val",
"==",
"value",
")",
"return",
"key"
] | Find first suitable key in dict.
:param dd:
:param value:
:return: | [
"Find",
"first",
"suitable",
"key",
"in",
"dict",
"."
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/dili.py#L227-L235 |
mjirik/io3d | io3d/dili.py | sort_list_of_dicts | def sort_list_of_dicts(lst_of_dct, keys, reverse=False, **sort_args):
"""
Sort list of dicts by one or multiple keys.
If the key is not available, sort these to the end.
:param lst_of_dct: input structure. List of dicts.
:param keys: one or more keys in list
:param reverse:
:param sort_args:
:return:
"""
if type(keys) != list:
keys = [keys]
# dcmdir = lst_of_dct[:]
# lst_of_dct.sort(key=lambda x: [x[key] for key in keys], reverse=reverse, **sort_args)
lst_of_dct.sort(key=lambda x: [((False, x[key]) if key in x else (True, 0)) for key in keys], reverse=reverse, **sort_args)
return lst_of_dct | python | def sort_list_of_dicts(lst_of_dct, keys, reverse=False, **sort_args):
"""
Sort list of dicts by one or multiple keys.
If the key is not available, sort these to the end.
:param lst_of_dct: input structure. List of dicts.
:param keys: one or more keys in list
:param reverse:
:param sort_args:
:return:
"""
if type(keys) != list:
keys = [keys]
# dcmdir = lst_of_dct[:]
# lst_of_dct.sort(key=lambda x: [x[key] for key in keys], reverse=reverse, **sort_args)
lst_of_dct.sort(key=lambda x: [((False, x[key]) if key in x else (True, 0)) for key in keys], reverse=reverse, **sort_args)
return lst_of_dct | [
"def",
"sort_list_of_dicts",
"(",
"lst_of_dct",
",",
"keys",
",",
"reverse",
"=",
"False",
",",
"*",
"*",
"sort_args",
")",
":",
"if",
"type",
"(",
"keys",
")",
"!=",
"list",
":",
"keys",
"=",
"[",
"keys",
"]",
"# dcmdir = lst_of_dct[:]",
"# lst_of_dct.sort(key=lambda x: [x[key] for key in keys], reverse=reverse, **sort_args)",
"lst_of_dct",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"[",
"(",
"(",
"False",
",",
"x",
"[",
"key",
"]",
")",
"if",
"key",
"in",
"x",
"else",
"(",
"True",
",",
"0",
")",
")",
"for",
"key",
"in",
"keys",
"]",
",",
"reverse",
"=",
"reverse",
",",
"*",
"*",
"sort_args",
")",
"return",
"lst_of_dct"
] | Sort list of dicts by one or multiple keys.
If the key is not available, sort these to the end.
:param lst_of_dct: input structure. List of dicts.
:param keys: one or more keys in list
:param reverse:
:param sort_args:
:return: | [
"Sort",
"list",
"of",
"dicts",
"by",
"one",
"or",
"multiple",
"keys",
"."
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/dili.py#L238-L256 |
mjirik/io3d | io3d/dili.py | ordered_dict_to_dict | def ordered_dict_to_dict(config):
"""
Use dict instead of ordered dict in structure.
"""
if type(config) == collections.OrderedDict:
config = dict(config)
if type(config) == list:
for i in range(0, len(config)):
config[i] = ordered_dict_to_dict(config[i])
elif type(config) == dict:
for key in config:
config[key] = ordered_dict_to_dict(config[key])
return config | python | def ordered_dict_to_dict(config):
"""
Use dict instead of ordered dict in structure.
"""
if type(config) == collections.OrderedDict:
config = dict(config)
if type(config) == list:
for i in range(0, len(config)):
config[i] = ordered_dict_to_dict(config[i])
elif type(config) == dict:
for key in config:
config[key] = ordered_dict_to_dict(config[key])
return config | [
"def",
"ordered_dict_to_dict",
"(",
"config",
")",
":",
"if",
"type",
"(",
"config",
")",
"==",
"collections",
".",
"OrderedDict",
":",
"config",
"=",
"dict",
"(",
"config",
")",
"if",
"type",
"(",
"config",
")",
"==",
"list",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"config",
")",
")",
":",
"config",
"[",
"i",
"]",
"=",
"ordered_dict_to_dict",
"(",
"config",
"[",
"i",
"]",
")",
"elif",
"type",
"(",
"config",
")",
"==",
"dict",
":",
"for",
"key",
"in",
"config",
":",
"config",
"[",
"key",
"]",
"=",
"ordered_dict_to_dict",
"(",
"config",
"[",
"key",
"]",
")",
"return",
"config"
] | Use dict instead of ordered dict in structure. | [
"Use",
"dict",
"instead",
"of",
"ordered",
"dict",
"in",
"structure",
"."
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/dili.py#L258-L272 |
SetBased/py-stratum | pystratum/command/WrapperCommand.py | WrapperCommand.run_command | def run_command(self, config_file):
"""
:param str config_file: The name of config file.
"""
config = configparser.ConfigParser()
config.read(config_file)
rdbms = config.get('database', 'rdbms').lower()
wrapper = self.create_routine_wrapper_generator(rdbms)
wrapper.main(config_file) | python | def run_command(self, config_file):
"""
:param str config_file: The name of config file.
"""
config = configparser.ConfigParser()
config.read(config_file)
rdbms = config.get('database', 'rdbms').lower()
wrapper = self.create_routine_wrapper_generator(rdbms)
wrapper.main(config_file) | [
"def",
"run_command",
"(",
"self",
",",
"config_file",
")",
":",
"config",
"=",
"configparser",
".",
"ConfigParser",
"(",
")",
"config",
".",
"read",
"(",
"config_file",
")",
"rdbms",
"=",
"config",
".",
"get",
"(",
"'database'",
",",
"'rdbms'",
")",
".",
"lower",
"(",
")",
"wrapper",
"=",
"self",
".",
"create_routine_wrapper_generator",
"(",
"rdbms",
")",
"wrapper",
".",
"main",
"(",
"config_file",
")"
] | :param str config_file: The name of config file. | [
":",
"param",
"str",
"config_file",
":",
"The",
"name",
"of",
"config",
"file",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/command/WrapperCommand.py#L38-L48 |
SetBased/py-stratum | pystratum/command/WrapperCommand.py | WrapperCommand.create_routine_wrapper_generator | def create_routine_wrapper_generator(self, rdbms):
"""
Factory for creating a Constants objects (i.e. objects for generating a class with wrapper methods for calling
stored routines in a database).
:param str rdbms: The target RDBMS (i.e. mysql, mssql or pgsql).
:rtype: pystratum.RoutineWrapperGenerator.RoutineWrapperGenerator
"""
# Note: We load modules and classes dynamically such that on the end user's system only the required modules
# and other dependencies for the targeted RDBMS must be installed (and required modules and other
# dependencies for the other RDBMSs are not required).
if rdbms == 'mysql':
module = locate('pystratum_mysql.MySqlRoutineWrapperGenerator')
return module.MySqlRoutineWrapperGenerator(self.output)
if rdbms == 'mssql':
module = locate('pystratum_mssql.MsSqlRoutineWrapperGenerator')
return module.MsSqlRoutineWrapperGenerator(self.output)
if rdbms == 'pgsql':
module = locate('pystratum_pgsql.PgSqlRoutineWrapperGenerator')
return module.PgSqlRoutineWrapperGenerator(self.output)
raise Exception("Unknown RDBMS '{0!s}'.".format(rdbms)) | python | def create_routine_wrapper_generator(self, rdbms):
"""
Factory for creating a Constants objects (i.e. objects for generating a class with wrapper methods for calling
stored routines in a database).
:param str rdbms: The target RDBMS (i.e. mysql, mssql or pgsql).
:rtype: pystratum.RoutineWrapperGenerator.RoutineWrapperGenerator
"""
# Note: We load modules and classes dynamically such that on the end user's system only the required modules
# and other dependencies for the targeted RDBMS must be installed (and required modules and other
# dependencies for the other RDBMSs are not required).
if rdbms == 'mysql':
module = locate('pystratum_mysql.MySqlRoutineWrapperGenerator')
return module.MySqlRoutineWrapperGenerator(self.output)
if rdbms == 'mssql':
module = locate('pystratum_mssql.MsSqlRoutineWrapperGenerator')
return module.MsSqlRoutineWrapperGenerator(self.output)
if rdbms == 'pgsql':
module = locate('pystratum_pgsql.PgSqlRoutineWrapperGenerator')
return module.PgSqlRoutineWrapperGenerator(self.output)
raise Exception("Unknown RDBMS '{0!s}'.".format(rdbms)) | [
"def",
"create_routine_wrapper_generator",
"(",
"self",
",",
"rdbms",
")",
":",
"# Note: We load modules and classes dynamically such that on the end user's system only the required modules",
"# and other dependencies for the targeted RDBMS must be installed (and required modules and other",
"# dependencies for the other RDBMSs are not required).",
"if",
"rdbms",
"==",
"'mysql'",
":",
"module",
"=",
"locate",
"(",
"'pystratum_mysql.MySqlRoutineWrapperGenerator'",
")",
"return",
"module",
".",
"MySqlRoutineWrapperGenerator",
"(",
"self",
".",
"output",
")",
"if",
"rdbms",
"==",
"'mssql'",
":",
"module",
"=",
"locate",
"(",
"'pystratum_mssql.MsSqlRoutineWrapperGenerator'",
")",
"return",
"module",
".",
"MsSqlRoutineWrapperGenerator",
"(",
"self",
".",
"output",
")",
"if",
"rdbms",
"==",
"'pgsql'",
":",
"module",
"=",
"locate",
"(",
"'pystratum_pgsql.PgSqlRoutineWrapperGenerator'",
")",
"return",
"module",
".",
"PgSqlRoutineWrapperGenerator",
"(",
"self",
".",
"output",
")",
"raise",
"Exception",
"(",
"\"Unknown RDBMS '{0!s}'.\"",
".",
"format",
"(",
"rdbms",
")",
")"
] | Factory for creating a Constants objects (i.e. objects for generating a class with wrapper methods for calling
stored routines in a database).
:param str rdbms: The target RDBMS (i.e. mysql, mssql or pgsql).
:rtype: pystratum.RoutineWrapperGenerator.RoutineWrapperGenerator | [
"Factory",
"for",
"creating",
"a",
"Constants",
"objects",
"(",
"i",
".",
"e",
".",
"objects",
"for",
"generating",
"a",
"class",
"with",
"wrapper",
"methods",
"for",
"calling",
"stored",
"routines",
"in",
"a",
"database",
")",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/command/WrapperCommand.py#L51-L76 |
SetBased/py-stratum | pystratum/Connection.py | Connection._get_option | def _get_option(config, supplement, section, option, fallback=None):
"""
Reads an option for a configuration file.
:param configparser.ConfigParser config: The main config file.
:param configparser.ConfigParser supplement: The supplement config file.
:param str section: The name of the section op the option.
:param str option: The name of the option.
:param str|None fallback: The fallback value of the option if it is not set in either configuration files.
:rtype: str
:raise KeyError:
"""
if supplement:
return_value = supplement.get(section, option, fallback=config.get(section, option, fallback=fallback))
else:
return_value = config.get(section, option, fallback=fallback)
if fallback is None and return_value is None:
raise KeyError("Option '{0!s}' is not found in section '{1!s}'.".format(option, section))
return return_value | python | def _get_option(config, supplement, section, option, fallback=None):
"""
Reads an option for a configuration file.
:param configparser.ConfigParser config: The main config file.
:param configparser.ConfigParser supplement: The supplement config file.
:param str section: The name of the section op the option.
:param str option: The name of the option.
:param str|None fallback: The fallback value of the option if it is not set in either configuration files.
:rtype: str
:raise KeyError:
"""
if supplement:
return_value = supplement.get(section, option, fallback=config.get(section, option, fallback=fallback))
else:
return_value = config.get(section, option, fallback=fallback)
if fallback is None and return_value is None:
raise KeyError("Option '{0!s}' is not found in section '{1!s}'.".format(option, section))
return return_value | [
"def",
"_get_option",
"(",
"config",
",",
"supplement",
",",
"section",
",",
"option",
",",
"fallback",
"=",
"None",
")",
":",
"if",
"supplement",
":",
"return_value",
"=",
"supplement",
".",
"get",
"(",
"section",
",",
"option",
",",
"fallback",
"=",
"config",
".",
"get",
"(",
"section",
",",
"option",
",",
"fallback",
"=",
"fallback",
")",
")",
"else",
":",
"return_value",
"=",
"config",
".",
"get",
"(",
"section",
",",
"option",
",",
"fallback",
"=",
"fallback",
")",
"if",
"fallback",
"is",
"None",
"and",
"return_value",
"is",
"None",
":",
"raise",
"KeyError",
"(",
"\"Option '{0!s}' is not found in section '{1!s}'.\"",
".",
"format",
"(",
"option",
",",
"section",
")",
")",
"return",
"return_value"
] | Reads an option for a configuration file.
:param configparser.ConfigParser config: The main config file.
:param configparser.ConfigParser supplement: The supplement config file.
:param str section: The name of the section op the option.
:param str option: The name of the option.
:param str|None fallback: The fallback value of the option if it is not set in either configuration files.
:rtype: str
:raise KeyError: | [
"Reads",
"an",
"option",
"for",
"a",
"configuration",
"file",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/Connection.py#L28-L50 |
SetBased/py-stratum | pystratum/Connection.py | Connection._read_configuration | def _read_configuration(config_filename):
"""
Checks the supplement file.
:param str config_filename: The name of the configuration file.
:rtype: (configparser.ConfigParser,configparser.ConfigParser)
"""
config = ConfigParser()
config.read(config_filename)
if 'supplement' in config['database']:
path = os.path.dirname(config_filename) + '/' + config.get('database', 'supplement')
config_supplement = ConfigParser()
config_supplement.read(path)
else:
config_supplement = None
return config, config_supplement | python | def _read_configuration(config_filename):
"""
Checks the supplement file.
:param str config_filename: The name of the configuration file.
:rtype: (configparser.ConfigParser,configparser.ConfigParser)
"""
config = ConfigParser()
config.read(config_filename)
if 'supplement' in config['database']:
path = os.path.dirname(config_filename) + '/' + config.get('database', 'supplement')
config_supplement = ConfigParser()
config_supplement.read(path)
else:
config_supplement = None
return config, config_supplement | [
"def",
"_read_configuration",
"(",
"config_filename",
")",
":",
"config",
"=",
"ConfigParser",
"(",
")",
"config",
".",
"read",
"(",
"config_filename",
")",
"if",
"'supplement'",
"in",
"config",
"[",
"'database'",
"]",
":",
"path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"config_filename",
")",
"+",
"'/'",
"+",
"config",
".",
"get",
"(",
"'database'",
",",
"'supplement'",
")",
"config_supplement",
"=",
"ConfigParser",
"(",
")",
"config_supplement",
".",
"read",
"(",
"path",
")",
"else",
":",
"config_supplement",
"=",
"None",
"return",
"config",
",",
"config_supplement"
] | Checks the supplement file.
:param str config_filename: The name of the configuration file.
:rtype: (configparser.ConfigParser,configparser.ConfigParser) | [
"Checks",
"the",
"supplement",
"file",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/Connection.py#L54-L72 |
vsoch/helpme | helpme/action/record.py | record_asciinema | def record_asciinema():
'''a wrapper around generation of an asciinema.api.Api and a custom
recorder to pull out the input arguments to the Record from argparse.
The function generates a filename in advance and a return code
so we can check the final status.
'''
import asciinema.config as aconfig
from asciinema.api import Api
# Load the API class
cfg = aconfig.load()
api = Api(cfg.api_url, os.environ.get("USER"), cfg.install_id)
# Create dummy class to pass in as args
recorder = HelpMeRecord(api)
code = recorder.execute()
if code == 0 and os.path.exists(recorder.filename):
return recorder.filename
print('Problem generating %s, return code %s' %(recorder.filename, code)) | python | def record_asciinema():
'''a wrapper around generation of an asciinema.api.Api and a custom
recorder to pull out the input arguments to the Record from argparse.
The function generates a filename in advance and a return code
so we can check the final status.
'''
import asciinema.config as aconfig
from asciinema.api import Api
# Load the API class
cfg = aconfig.load()
api = Api(cfg.api_url, os.environ.get("USER"), cfg.install_id)
# Create dummy class to pass in as args
recorder = HelpMeRecord(api)
code = recorder.execute()
if code == 0 and os.path.exists(recorder.filename):
return recorder.filename
print('Problem generating %s, return code %s' %(recorder.filename, code)) | [
"def",
"record_asciinema",
"(",
")",
":",
"import",
"asciinema",
".",
"config",
"as",
"aconfig",
"from",
"asciinema",
".",
"api",
"import",
"Api",
"# Load the API class",
"cfg",
"=",
"aconfig",
".",
"load",
"(",
")",
"api",
"=",
"Api",
"(",
"cfg",
".",
"api_url",
",",
"os",
".",
"environ",
".",
"get",
"(",
"\"USER\"",
")",
",",
"cfg",
".",
"install_id",
")",
"# Create dummy class to pass in as args",
"recorder",
"=",
"HelpMeRecord",
"(",
"api",
")",
"code",
"=",
"recorder",
".",
"execute",
"(",
")",
"if",
"code",
"==",
"0",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"recorder",
".",
"filename",
")",
":",
"return",
"recorder",
".",
"filename",
"print",
"(",
"'Problem generating %s, return code %s'",
"%",
"(",
"recorder",
".",
"filename",
",",
"code",
")",
")"
] | a wrapper around generation of an asciinema.api.Api and a custom
recorder to pull out the input arguments to the Record from argparse.
The function generates a filename in advance and a return code
so we can check the final status. | [
"a",
"wrapper",
"around",
"generation",
"of",
"an",
"asciinema",
".",
"api",
".",
"Api",
"and",
"a",
"custom",
"recorder",
"to",
"pull",
"out",
"the",
"input",
"arguments",
"to",
"the",
"Record",
"from",
"argparse",
".",
"The",
"function",
"generates",
"a",
"filename",
"in",
"advance",
"and",
"a",
"return",
"code",
"so",
"we",
"can",
"check",
"the",
"final",
"status",
"."
] | train | https://github.com/vsoch/helpme/blob/e609172260b10cddadb2d2023ab26da8082a9feb/helpme/action/record.py#L80-L101 |
bpannier/simpletr64 | simpletr64/actions/fritz.py | Fritz.sendWakeOnLan | def sendWakeOnLan(self, macAddress, lanInterfaceId=1, timeout=1):
"""Send a wake up package to a device specified by its MAC address.
:param str macAddress: MAC address in the form ``38:C9:86:26:7E:38``; be aware that the MAC address might
be case sensitive, depending on the router
:param int lanInterfaceId: the id of the LAN interface
:param float timeout: the timeout to wait for the action to be executed
:return: the amount of known hosts.
:rtype: int
.. seealso:: :meth:`~simpletr64.actions.Lan.getHostDetailsByMACAddress`
"""
namespace = Fritz.getServiceType("sendWakeOnLan") + str(lanInterfaceId)
uri = self.getControlURL(namespace)
self.execute(uri, namespace, "X_AVM-DE_WakeOnLANByMACAddress", timeout=timeout,
NewMACAddress=macAddress) | python | def sendWakeOnLan(self, macAddress, lanInterfaceId=1, timeout=1):
"""Send a wake up package to a device specified by its MAC address.
:param str macAddress: MAC address in the form ``38:C9:86:26:7E:38``; be aware that the MAC address might
be case sensitive, depending on the router
:param int lanInterfaceId: the id of the LAN interface
:param float timeout: the timeout to wait for the action to be executed
:return: the amount of known hosts.
:rtype: int
.. seealso:: :meth:`~simpletr64.actions.Lan.getHostDetailsByMACAddress`
"""
namespace = Fritz.getServiceType("sendWakeOnLan") + str(lanInterfaceId)
uri = self.getControlURL(namespace)
self.execute(uri, namespace, "X_AVM-DE_WakeOnLANByMACAddress", timeout=timeout,
NewMACAddress=macAddress) | [
"def",
"sendWakeOnLan",
"(",
"self",
",",
"macAddress",
",",
"lanInterfaceId",
"=",
"1",
",",
"timeout",
"=",
"1",
")",
":",
"namespace",
"=",
"Fritz",
".",
"getServiceType",
"(",
"\"sendWakeOnLan\"",
")",
"+",
"str",
"(",
"lanInterfaceId",
")",
"uri",
"=",
"self",
".",
"getControlURL",
"(",
"namespace",
")",
"self",
".",
"execute",
"(",
"uri",
",",
"namespace",
",",
"\"X_AVM-DE_WakeOnLANByMACAddress\"",
",",
"timeout",
"=",
"timeout",
",",
"NewMACAddress",
"=",
"macAddress",
")"
] | Send a wake up package to a device specified by its MAC address.
:param str macAddress: MAC address in the form ``38:C9:86:26:7E:38``; be aware that the MAC address might
be case sensitive, depending on the router
:param int lanInterfaceId: the id of the LAN interface
:param float timeout: the timeout to wait for the action to be executed
:return: the amount of known hosts.
:rtype: int
.. seealso:: :meth:`~simpletr64.actions.Lan.getHostDetailsByMACAddress` | [
"Send",
"a",
"wake",
"up",
"package",
"to",
"a",
"device",
"specified",
"by",
"its",
"MAC",
"address",
"."
] | train | https://github.com/bpannier/simpletr64/blob/31081139f4e6c85084a56de1617df73927135466/simpletr64/actions/fritz.py#L91-L107 |
bpannier/simpletr64 | simpletr64/actions/fritz.py | Fritz.doUpdate | def doUpdate(self, timeout=1):
"""Do a software update of the Fritz Box if available.
:param float timeout: the timeout to wait for the action to be executed
:return: a list of if an update was available and the update state (bool, str)
:rtype: tuple(bool, str)
"""
namespace = Fritz.getServiceType("doUpdate")
uri = self.getControlURL(namespace)
results = self.execute(uri, namespace, "X_AVM-DE_DoUpdate", timeout=timeout)
return results["NewUpgradeAvailable"], results["NewX_AVM-DE_UpdateState"] | python | def doUpdate(self, timeout=1):
"""Do a software update of the Fritz Box if available.
:param float timeout: the timeout to wait for the action to be executed
:return: a list of if an update was available and the update state (bool, str)
:rtype: tuple(bool, str)
"""
namespace = Fritz.getServiceType("doUpdate")
uri = self.getControlURL(namespace)
results = self.execute(uri, namespace, "X_AVM-DE_DoUpdate", timeout=timeout)
return results["NewUpgradeAvailable"], results["NewX_AVM-DE_UpdateState"] | [
"def",
"doUpdate",
"(",
"self",
",",
"timeout",
"=",
"1",
")",
":",
"namespace",
"=",
"Fritz",
".",
"getServiceType",
"(",
"\"doUpdate\"",
")",
"uri",
"=",
"self",
".",
"getControlURL",
"(",
"namespace",
")",
"results",
"=",
"self",
".",
"execute",
"(",
"uri",
",",
"namespace",
",",
"\"X_AVM-DE_DoUpdate\"",
",",
"timeout",
"=",
"timeout",
")",
"return",
"results",
"[",
"\"NewUpgradeAvailable\"",
"]",
",",
"results",
"[",
"\"NewX_AVM-DE_UpdateState\"",
"]"
] | Do a software update of the Fritz Box if available.
:param float timeout: the timeout to wait for the action to be executed
:return: a list of if an update was available and the update state (bool, str)
:rtype: tuple(bool, str) | [
"Do",
"a",
"software",
"update",
"of",
"the",
"Fritz",
"Box",
"if",
"available",
"."
] | train | https://github.com/bpannier/simpletr64/blob/31081139f4e6c85084a56de1617df73927135466/simpletr64/actions/fritz.py#L109-L121 |
bpannier/simpletr64 | simpletr64/actions/fritz.py | Fritz.isOptimizedForIPTV | def isOptimizedForIPTV(self, wifiInterfaceId=1, timeout=1):
"""Return if the Wifi interface is optimized for IP TV
:param int wifiInterfaceId: the id of the Wifi interface
:param float timeout: the timeout to wait for the action to be executed
:return: if the Wifi interface is optimized for IP TV
:rtype: bool
.. seealso:: :meth:`~simpletr64.actions.Fritz.setOptimizedForIPTV`
"""
namespace = Fritz.getServiceType("isOptimizedForIPTV") + str(wifiInterfaceId)
uri = self.getControlURL(namespace)
results = self.execute(uri, namespace, "X_AVM-DE_GetIPTVOptimized", timeout=timeout)
return bool(int(results["NewX_AVM-DE_IPTVoptimize"])) | python | def isOptimizedForIPTV(self, wifiInterfaceId=1, timeout=1):
"""Return if the Wifi interface is optimized for IP TV
:param int wifiInterfaceId: the id of the Wifi interface
:param float timeout: the timeout to wait for the action to be executed
:return: if the Wifi interface is optimized for IP TV
:rtype: bool
.. seealso:: :meth:`~simpletr64.actions.Fritz.setOptimizedForIPTV`
"""
namespace = Fritz.getServiceType("isOptimizedForIPTV") + str(wifiInterfaceId)
uri = self.getControlURL(namespace)
results = self.execute(uri, namespace, "X_AVM-DE_GetIPTVOptimized", timeout=timeout)
return bool(int(results["NewX_AVM-DE_IPTVoptimize"])) | [
"def",
"isOptimizedForIPTV",
"(",
"self",
",",
"wifiInterfaceId",
"=",
"1",
",",
"timeout",
"=",
"1",
")",
":",
"namespace",
"=",
"Fritz",
".",
"getServiceType",
"(",
"\"isOptimizedForIPTV\"",
")",
"+",
"str",
"(",
"wifiInterfaceId",
")",
"uri",
"=",
"self",
".",
"getControlURL",
"(",
"namespace",
")",
"results",
"=",
"self",
".",
"execute",
"(",
"uri",
",",
"namespace",
",",
"\"X_AVM-DE_GetIPTVOptimized\"",
",",
"timeout",
"=",
"timeout",
")",
"return",
"bool",
"(",
"int",
"(",
"results",
"[",
"\"NewX_AVM-DE_IPTVoptimize\"",
"]",
")",
")"
] | Return if the Wifi interface is optimized for IP TV
:param int wifiInterfaceId: the id of the Wifi interface
:param float timeout: the timeout to wait for the action to be executed
:return: if the Wifi interface is optimized for IP TV
:rtype: bool
.. seealso:: :meth:`~simpletr64.actions.Fritz.setOptimizedForIPTV` | [
"Return",
"if",
"the",
"Wifi",
"interface",
"is",
"optimized",
"for",
"IP",
"TV"
] | train | https://github.com/bpannier/simpletr64/blob/31081139f4e6c85084a56de1617df73927135466/simpletr64/actions/fritz.py#L123-L138 |
bpannier/simpletr64 | simpletr64/actions/fritz.py | Fritz.setOptimizedForIPTV | def setOptimizedForIPTV(self, status, wifiInterfaceId=1, timeout=1):
"""Set if the Wifi interface is optimized for IP TV
:param bool status: set if Wifi interface should be optimized
:param int wifiInterfaceId: the id of the Wifi interface
:param float timeout: the timeout to wait for the action to be executed
.. seealso:: :meth:`~simpletr64.actions.Fritz.isOptimizedForIPTV`
"""
namespace = Fritz.getServiceType("setOptimizedForIPTV") + str(wifiInterfaceId)
uri = self.getControlURL(namespace)
if status:
setStatus = 1
else:
setStatus = 0
arguments = {"timeout": timeout, "NewX_AVM-DE_IPTVoptimize": setStatus}
self.execute(uri, namespace, "X_AVM-DE_SetIPTVOptimized", **arguments) | python | def setOptimizedForIPTV(self, status, wifiInterfaceId=1, timeout=1):
"""Set if the Wifi interface is optimized for IP TV
:param bool status: set if Wifi interface should be optimized
:param int wifiInterfaceId: the id of the Wifi interface
:param float timeout: the timeout to wait for the action to be executed
.. seealso:: :meth:`~simpletr64.actions.Fritz.isOptimizedForIPTV`
"""
namespace = Fritz.getServiceType("setOptimizedForIPTV") + str(wifiInterfaceId)
uri = self.getControlURL(namespace)
if status:
setStatus = 1
else:
setStatus = 0
arguments = {"timeout": timeout, "NewX_AVM-DE_IPTVoptimize": setStatus}
self.execute(uri, namespace, "X_AVM-DE_SetIPTVOptimized", **arguments) | [
"def",
"setOptimizedForIPTV",
"(",
"self",
",",
"status",
",",
"wifiInterfaceId",
"=",
"1",
",",
"timeout",
"=",
"1",
")",
":",
"namespace",
"=",
"Fritz",
".",
"getServiceType",
"(",
"\"setOptimizedForIPTV\"",
")",
"+",
"str",
"(",
"wifiInterfaceId",
")",
"uri",
"=",
"self",
".",
"getControlURL",
"(",
"namespace",
")",
"if",
"status",
":",
"setStatus",
"=",
"1",
"else",
":",
"setStatus",
"=",
"0",
"arguments",
"=",
"{",
"\"timeout\"",
":",
"timeout",
",",
"\"NewX_AVM-DE_IPTVoptimize\"",
":",
"setStatus",
"}",
"self",
".",
"execute",
"(",
"uri",
",",
"namespace",
",",
"\"X_AVM-DE_SetIPTVOptimized\"",
",",
"*",
"*",
"arguments",
")"
] | Set if the Wifi interface is optimized for IP TV
:param bool status: set if Wifi interface should be optimized
:param int wifiInterfaceId: the id of the Wifi interface
:param float timeout: the timeout to wait for the action to be executed
.. seealso:: :meth:`~simpletr64.actions.Fritz.isOptimizedForIPTV` | [
"Set",
"if",
"the",
"Wifi",
"interface",
"is",
"optimized",
"for",
"IP",
"TV"
] | train | https://github.com/bpannier/simpletr64/blob/31081139f4e6c85084a56de1617df73927135466/simpletr64/actions/fritz.py#L140-L159 |
bpannier/simpletr64 | simpletr64/actions/fritz.py | Fritz.getCallList | def getCallList(self, timeout=1):
"""Get the list of phone calls made
Example of a phone call result:
::
[{'Count': None, 'Name': None, 'CalledNumber': '030868709971', 'Numbertype': 'sip', 'Duration': '0:01',
'Caller': '015155255399', 'Called': 'SIP: 030868729971', 'Date': '02.01.14 13:14',
'Device': 'Anrufbeantworter','Path': None, 'Port': '40', 'Type': '1', 'Id': '15'}]
Types:
* 1 - answered
* 2 - missed
* 3 - outgoing
:param float timeout: the timeout to wait for the action to be executed
:return: the list of made phone calls
:rtype: list[dict[str: str]]
"""
namespace = Fritz.getServiceType("getCallList")
uri = self.getControlURL(namespace)
results = self.execute(uri, namespace, "GetCallList")
# setup proxies
proxies = {}
if self.httpProxy:
proxies = {"https": self.httpProxy}
if self.httpsProxy:
proxies = {"http": self.httpsProxy}
# get the content
request = requests.get(results["NewCallListURL"], proxies=proxies, timeout=float(timeout))
if request.status_code != 200:
errorStr = DeviceTR64._extractErrorString(request)
raise ValueError('Could not get CPE definitions "' + results["NewCallListURL"] + '" : ' +
str(request.status_code) + ' - ' + request.reason + " -- " + errorStr)
# parse xml
try:
root = ET.fromstring(request.text.encode('utf-8'))
except Exception as e:
raise ValueError("Could not parse call list '" + results["NewCallListURL"] + "': " + str(e))
calls = []
for child in root.getchildren():
if child.tag.lower() == "call":
callParameters = {}
for callChild in child.getchildren():
callParameters[callChild.tag] = callChild.text
calls.append(callParameters)
return calls | python | def getCallList(self, timeout=1):
"""Get the list of phone calls made
Example of a phone call result:
::
[{'Count': None, 'Name': None, 'CalledNumber': '030868709971', 'Numbertype': 'sip', 'Duration': '0:01',
'Caller': '015155255399', 'Called': 'SIP: 030868729971', 'Date': '02.01.14 13:14',
'Device': 'Anrufbeantworter','Path': None, 'Port': '40', 'Type': '1', 'Id': '15'}]
Types:
* 1 - answered
* 2 - missed
* 3 - outgoing
:param float timeout: the timeout to wait for the action to be executed
:return: the list of made phone calls
:rtype: list[dict[str: str]]
"""
namespace = Fritz.getServiceType("getCallList")
uri = self.getControlURL(namespace)
results = self.execute(uri, namespace, "GetCallList")
# setup proxies
proxies = {}
if self.httpProxy:
proxies = {"https": self.httpProxy}
if self.httpsProxy:
proxies = {"http": self.httpsProxy}
# get the content
request = requests.get(results["NewCallListURL"], proxies=proxies, timeout=float(timeout))
if request.status_code != 200:
errorStr = DeviceTR64._extractErrorString(request)
raise ValueError('Could not get CPE definitions "' + results["NewCallListURL"] + '" : ' +
str(request.status_code) + ' - ' + request.reason + " -- " + errorStr)
# parse xml
try:
root = ET.fromstring(request.text.encode('utf-8'))
except Exception as e:
raise ValueError("Could not parse call list '" + results["NewCallListURL"] + "': " + str(e))
calls = []
for child in root.getchildren():
if child.tag.lower() == "call":
callParameters = {}
for callChild in child.getchildren():
callParameters[callChild.tag] = callChild.text
calls.append(callParameters)
return calls | [
"def",
"getCallList",
"(",
"self",
",",
"timeout",
"=",
"1",
")",
":",
"namespace",
"=",
"Fritz",
".",
"getServiceType",
"(",
"\"getCallList\"",
")",
"uri",
"=",
"self",
".",
"getControlURL",
"(",
"namespace",
")",
"results",
"=",
"self",
".",
"execute",
"(",
"uri",
",",
"namespace",
",",
"\"GetCallList\"",
")",
"# setup proxies",
"proxies",
"=",
"{",
"}",
"if",
"self",
".",
"httpProxy",
":",
"proxies",
"=",
"{",
"\"https\"",
":",
"self",
".",
"httpProxy",
"}",
"if",
"self",
".",
"httpsProxy",
":",
"proxies",
"=",
"{",
"\"http\"",
":",
"self",
".",
"httpsProxy",
"}",
"# get the content",
"request",
"=",
"requests",
".",
"get",
"(",
"results",
"[",
"\"NewCallListURL\"",
"]",
",",
"proxies",
"=",
"proxies",
",",
"timeout",
"=",
"float",
"(",
"timeout",
")",
")",
"if",
"request",
".",
"status_code",
"!=",
"200",
":",
"errorStr",
"=",
"DeviceTR64",
".",
"_extractErrorString",
"(",
"request",
")",
"raise",
"ValueError",
"(",
"'Could not get CPE definitions \"'",
"+",
"results",
"[",
"\"NewCallListURL\"",
"]",
"+",
"'\" : '",
"+",
"str",
"(",
"request",
".",
"status_code",
")",
"+",
"' - '",
"+",
"request",
".",
"reason",
"+",
"\" -- \"",
"+",
"errorStr",
")",
"# parse xml",
"try",
":",
"root",
"=",
"ET",
".",
"fromstring",
"(",
"request",
".",
"text",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"\"Could not parse call list '\"",
"+",
"results",
"[",
"\"NewCallListURL\"",
"]",
"+",
"\"': \"",
"+",
"str",
"(",
"e",
")",
")",
"calls",
"=",
"[",
"]",
"for",
"child",
"in",
"root",
".",
"getchildren",
"(",
")",
":",
"if",
"child",
".",
"tag",
".",
"lower",
"(",
")",
"==",
"\"call\"",
":",
"callParameters",
"=",
"{",
"}",
"for",
"callChild",
"in",
"child",
".",
"getchildren",
"(",
")",
":",
"callParameters",
"[",
"callChild",
".",
"tag",
"]",
"=",
"callChild",
".",
"text",
"calls",
".",
"append",
"(",
"callParameters",
")",
"return",
"calls"
] | Get the list of phone calls made
Example of a phone call result:
::
[{'Count': None, 'Name': None, 'CalledNumber': '030868709971', 'Numbertype': 'sip', 'Duration': '0:01',
'Caller': '015155255399', 'Called': 'SIP: 030868729971', 'Date': '02.01.14 13:14',
'Device': 'Anrufbeantworter','Path': None, 'Port': '40', 'Type': '1', 'Id': '15'}]
Types:
* 1 - answered
* 2 - missed
* 3 - outgoing
:param float timeout: the timeout to wait for the action to be executed
:return: the list of made phone calls
:rtype: list[dict[str: str]] | [
"Get",
"the",
"list",
"of",
"phone",
"calls",
"made"
] | train | https://github.com/bpannier/simpletr64/blob/31081139f4e6c85084a56de1617df73927135466/simpletr64/actions/fritz.py#L161-L221 |
metachris/RPIO | source/RPIO/__init__.py | add_tcp_callback | def add_tcp_callback(port, callback, threaded_callback=False):
"""
Adds a unix socket server callback, which will be invoked when values
arrive from a connected socket client. The callback must accept two
parameters, eg. ``def callback(socket, msg)``.
"""
_rpio.add_tcp_callback(port, callback, threaded_callback) | python | def add_tcp_callback(port, callback, threaded_callback=False):
"""
Adds a unix socket server callback, which will be invoked when values
arrive from a connected socket client. The callback must accept two
parameters, eg. ``def callback(socket, msg)``.
"""
_rpio.add_tcp_callback(port, callback, threaded_callback) | [
"def",
"add_tcp_callback",
"(",
"port",
",",
"callback",
",",
"threaded_callback",
"=",
"False",
")",
":",
"_rpio",
".",
"add_tcp_callback",
"(",
"port",
",",
"callback",
",",
"threaded_callback",
")"
] | Adds a unix socket server callback, which will be invoked when values
arrive from a connected socket client. The callback must accept two
parameters, eg. ``def callback(socket, msg)``. | [
"Adds",
"a",
"unix",
"socket",
"server",
"callback",
"which",
"will",
"be",
"invoked",
"when",
"values",
"arrive",
"from",
"a",
"connected",
"socket",
"client",
".",
"The",
"callback",
"must",
"accept",
"two",
"parameters",
"eg",
".",
"def",
"callback",
"(",
"socket",
"msg",
")",
"."
] | train | https://github.com/metachris/RPIO/blob/be1942a69b2592ddacd9dc833d2668a19aafd8d2/source/RPIO/__init__.py#L194-L200 |
metachris/RPIO | source/RPIO/__init__.py | add_interrupt_callback | def add_interrupt_callback(gpio_id, callback, edge='both', \
pull_up_down=PUD_OFF, threaded_callback=False, \
debounce_timeout_ms=None):
"""
Add a callback to be executed when the value on 'gpio_id' changes to
the edge specified via the 'edge' parameter (default='both').
`pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and
`RPIO.PUD_OFF`.
If `threaded_callback` is True, the callback will be started
inside a Thread.
If debounce_timeout_ms is set, new interrupts will not be forwarded
until after the specified amount of milliseconds.
"""
_rpio.add_interrupt_callback(gpio_id, callback, edge, pull_up_down, \
threaded_callback, debounce_timeout_ms) | python | def add_interrupt_callback(gpio_id, callback, edge='both', \
pull_up_down=PUD_OFF, threaded_callback=False, \
debounce_timeout_ms=None):
"""
Add a callback to be executed when the value on 'gpio_id' changes to
the edge specified via the 'edge' parameter (default='both').
`pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and
`RPIO.PUD_OFF`.
If `threaded_callback` is True, the callback will be started
inside a Thread.
If debounce_timeout_ms is set, new interrupts will not be forwarded
until after the specified amount of milliseconds.
"""
_rpio.add_interrupt_callback(gpio_id, callback, edge, pull_up_down, \
threaded_callback, debounce_timeout_ms) | [
"def",
"add_interrupt_callback",
"(",
"gpio_id",
",",
"callback",
",",
"edge",
"=",
"'both'",
",",
"pull_up_down",
"=",
"PUD_OFF",
",",
"threaded_callback",
"=",
"False",
",",
"debounce_timeout_ms",
"=",
"None",
")",
":",
"_rpio",
".",
"add_interrupt_callback",
"(",
"gpio_id",
",",
"callback",
",",
"edge",
",",
"pull_up_down",
",",
"threaded_callback",
",",
"debounce_timeout_ms",
")"
] | Add a callback to be executed when the value on 'gpio_id' changes to
the edge specified via the 'edge' parameter (default='both').
`pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and
`RPIO.PUD_OFF`.
If `threaded_callback` is True, the callback will be started
inside a Thread.
If debounce_timeout_ms is set, new interrupts will not be forwarded
until after the specified amount of milliseconds. | [
"Add",
"a",
"callback",
"to",
"be",
"executed",
"when",
"the",
"value",
"on",
"gpio_id",
"changes",
"to",
"the",
"edge",
"specified",
"via",
"the",
"edge",
"parameter",
"(",
"default",
"=",
"both",
")",
"."
] | train | https://github.com/metachris/RPIO/blob/be1942a69b2592ddacd9dc833d2668a19aafd8d2/source/RPIO/__init__.py#L203-L220 |
metachris/RPIO | source/RPIO/__init__.py | wait_for_interrupts | def wait_for_interrupts(threaded=False, epoll_timeout=1):
"""
Blocking loop to listen for GPIO interrupts and distribute them to
associated callbacks. epoll_timeout is an easy way to shutdown the
blocking function. Per default the timeout is set to 1 second; if
`_is_waiting_for_interrupts` is set to False the loop will exit.
If an exception occurs while waiting for interrupts, the interrupt
gpio interfaces will be cleaned up (/sys/class/gpio unexports). In
this case all interrupts will be reset and you'd need to add the
callbacks again before using `wait_for_interrupts(..)` again.
If the argument `threaded` is True, wait_for_interrupts will be
started in a daemon Thread. To quit it, call
`RPIO.stop_waiting_for_interrupts()`.
"""
if threaded:
t = Thread(target=_rpio.wait_for_interrupts, args=(epoll_timeout,))
t.daemon = True
t.start()
else:
_rpio.wait_for_interrupts(epoll_timeout) | python | def wait_for_interrupts(threaded=False, epoll_timeout=1):
"""
Blocking loop to listen for GPIO interrupts and distribute them to
associated callbacks. epoll_timeout is an easy way to shutdown the
blocking function. Per default the timeout is set to 1 second; if
`_is_waiting_for_interrupts` is set to False the loop will exit.
If an exception occurs while waiting for interrupts, the interrupt
gpio interfaces will be cleaned up (/sys/class/gpio unexports). In
this case all interrupts will be reset and you'd need to add the
callbacks again before using `wait_for_interrupts(..)` again.
If the argument `threaded` is True, wait_for_interrupts will be
started in a daemon Thread. To quit it, call
`RPIO.stop_waiting_for_interrupts()`.
"""
if threaded:
t = Thread(target=_rpio.wait_for_interrupts, args=(epoll_timeout,))
t.daemon = True
t.start()
else:
_rpio.wait_for_interrupts(epoll_timeout) | [
"def",
"wait_for_interrupts",
"(",
"threaded",
"=",
"False",
",",
"epoll_timeout",
"=",
"1",
")",
":",
"if",
"threaded",
":",
"t",
"=",
"Thread",
"(",
"target",
"=",
"_rpio",
".",
"wait_for_interrupts",
",",
"args",
"=",
"(",
"epoll_timeout",
",",
")",
")",
"t",
".",
"daemon",
"=",
"True",
"t",
".",
"start",
"(",
")",
"else",
":",
"_rpio",
".",
"wait_for_interrupts",
"(",
"epoll_timeout",
")"
] | Blocking loop to listen for GPIO interrupts and distribute them to
associated callbacks. epoll_timeout is an easy way to shutdown the
blocking function. Per default the timeout is set to 1 second; if
`_is_waiting_for_interrupts` is set to False the loop will exit.
If an exception occurs while waiting for interrupts, the interrupt
gpio interfaces will be cleaned up (/sys/class/gpio unexports). In
this case all interrupts will be reset and you'd need to add the
callbacks again before using `wait_for_interrupts(..)` again.
If the argument `threaded` is True, wait_for_interrupts will be
started in a daemon Thread. To quit it, call
`RPIO.stop_waiting_for_interrupts()`. | [
"Blocking",
"loop",
"to",
"listen",
"for",
"GPIO",
"interrupts",
"and",
"distribute",
"them",
"to",
"associated",
"callbacks",
".",
"epoll_timeout",
"is",
"an",
"easy",
"way",
"to",
"shutdown",
"the",
"blocking",
"function",
".",
"Per",
"default",
"the",
"timeout",
"is",
"set",
"to",
"1",
"second",
";",
"if",
"_is_waiting_for_interrupts",
"is",
"set",
"to",
"False",
"the",
"loop",
"will",
"exit",
"."
] | train | https://github.com/metachris/RPIO/blob/be1942a69b2592ddacd9dc833d2668a19aafd8d2/source/RPIO/__init__.py#L233-L254 |
metachris/RPIO | fabfile.py | build_gpio | def build_gpio():
""" Builds source with Python 2.7 and 3.2, and tests import """
with cd("/tmp/source/c_gpio"):
test = "import _GPIO; print(_GPIO.VERSION_GPIO)"
run("make gpio2.7 && cp build/_GPIO.so .")
run('sudo python2.7 -c "%s"' % test)
run("cp _GPIO.so ../RPIO/")
run("cp _GPIO.so ../RPIO/_GPIO27.so")
run("make gpio3.2 && cp build/_GPIO.so .")
run('sudo python3.2 -c "%s"' % test)
run("mv _GPIO.so ../RPIO/_GPIO32.so") | python | def build_gpio():
""" Builds source with Python 2.7 and 3.2, and tests import """
with cd("/tmp/source/c_gpio"):
test = "import _GPIO; print(_GPIO.VERSION_GPIO)"
run("make gpio2.7 && cp build/_GPIO.so .")
run('sudo python2.7 -c "%s"' % test)
run("cp _GPIO.so ../RPIO/")
run("cp _GPIO.so ../RPIO/_GPIO27.so")
run("make gpio3.2 && cp build/_GPIO.so .")
run('sudo python3.2 -c "%s"' % test)
run("mv _GPIO.so ../RPIO/_GPIO32.so") | [
"def",
"build_gpio",
"(",
")",
":",
"with",
"cd",
"(",
"\"/tmp/source/c_gpio\"",
")",
":",
"test",
"=",
"\"import _GPIO; print(_GPIO.VERSION_GPIO)\"",
"run",
"(",
"\"make gpio2.7 && cp build/_GPIO.so .\"",
")",
"run",
"(",
"'sudo python2.7 -c \"%s\"'",
"%",
"test",
")",
"run",
"(",
"\"cp _GPIO.so ../RPIO/\"",
")",
"run",
"(",
"\"cp _GPIO.so ../RPIO/_GPIO27.so\"",
")",
"run",
"(",
"\"make gpio3.2 && cp build/_GPIO.so .\"",
")",
"run",
"(",
"'sudo python3.2 -c \"%s\"'",
"%",
"test",
")",
"run",
"(",
"\"mv _GPIO.so ../RPIO/_GPIO32.so\"",
")"
] | Builds source with Python 2.7 and 3.2, and tests import | [
"Builds",
"source",
"with",
"Python",
"2",
".",
"7",
"and",
"3",
".",
"2",
"and",
"tests",
"import"
] | train | https://github.com/metachris/RPIO/blob/be1942a69b2592ddacd9dc833d2668a19aafd8d2/fabfile.py#L107-L117 |
metachris/RPIO | fabfile.py | build_pwm | def build_pwm():
""" Builds source with Python 2.7 and 3.2, and tests import """
with cd("/tmp/source/c_pwm"):
test = "import _PWM; print(_PWM.VERSION)"
run("make py2.7")
run('sudo python2.7 -c "%s"' % test)
run("cp _PWM.so ../RPIO/PWM/")
run("mv _PWM.so ../RPIO/PWM/_PWM27.so")
run("make py3.2")
run('python3.2 -c "%s"' % test)
run("mv _PWM.so ../RPIO/PWM/_PWM32.so") | python | def build_pwm():
""" Builds source with Python 2.7 and 3.2, and tests import """
with cd("/tmp/source/c_pwm"):
test = "import _PWM; print(_PWM.VERSION)"
run("make py2.7")
run('sudo python2.7 -c "%s"' % test)
run("cp _PWM.so ../RPIO/PWM/")
run("mv _PWM.so ../RPIO/PWM/_PWM27.so")
run("make py3.2")
run('python3.2 -c "%s"' % test)
run("mv _PWM.so ../RPIO/PWM/_PWM32.so") | [
"def",
"build_pwm",
"(",
")",
":",
"with",
"cd",
"(",
"\"/tmp/source/c_pwm\"",
")",
":",
"test",
"=",
"\"import _PWM; print(_PWM.VERSION)\"",
"run",
"(",
"\"make py2.7\"",
")",
"run",
"(",
"'sudo python2.7 -c \"%s\"'",
"%",
"test",
")",
"run",
"(",
"\"cp _PWM.so ../RPIO/PWM/\"",
")",
"run",
"(",
"\"mv _PWM.so ../RPIO/PWM/_PWM27.so\"",
")",
"run",
"(",
"\"make py3.2\"",
")",
"run",
"(",
"'python3.2 -c \"%s\"'",
"%",
"test",
")",
"run",
"(",
"\"mv _PWM.so ../RPIO/PWM/_PWM32.so\"",
")"
] | Builds source with Python 2.7 and 3.2, and tests import | [
"Builds",
"source",
"with",
"Python",
"2",
".",
"7",
"and",
"3",
".",
"2",
"and",
"tests",
"import"
] | train | https://github.com/metachris/RPIO/blob/be1942a69b2592ddacd9dc833d2668a19aafd8d2/fabfile.py#L120-L130 |
metachris/RPIO | fabfile.py | upload_to_pypi | def upload_to_pypi():
""" Upload sdist and bdist_eggs to pypi """
# One more safety input and then we are ready to go :)
x = prompt("Are you sure to upload the current version to pypi?")
if not x or not x.lower() in ["y", "yes"]:
return
local("rm -rf dist")
local("python setup.py sdist")
version = _get_cur_version()
fn = "RPIO-%s.tar.gz" % version
put("dist/%s" % fn, "/tmp/")
with cd("/tmp"):
run("tar -xf /tmp/%s" % fn)
with cd("/tmp/RPIO-%s" % version):
run("python2.6 setup.py bdist_egg upload")
run("python2.7 setup.py bdist_egg upload")
run("python3.2 setup.py bdist_egg upload")
local("python setup.py sdist upload") | python | def upload_to_pypi():
""" Upload sdist and bdist_eggs to pypi """
# One more safety input and then we are ready to go :)
x = prompt("Are you sure to upload the current version to pypi?")
if not x or not x.lower() in ["y", "yes"]:
return
local("rm -rf dist")
local("python setup.py sdist")
version = _get_cur_version()
fn = "RPIO-%s.tar.gz" % version
put("dist/%s" % fn, "/tmp/")
with cd("/tmp"):
run("tar -xf /tmp/%s" % fn)
with cd("/tmp/RPIO-%s" % version):
run("python2.6 setup.py bdist_egg upload")
run("python2.7 setup.py bdist_egg upload")
run("python3.2 setup.py bdist_egg upload")
local("python setup.py sdist upload") | [
"def",
"upload_to_pypi",
"(",
")",
":",
"# One more safety input and then we are ready to go :)",
"x",
"=",
"prompt",
"(",
"\"Are you sure to upload the current version to pypi?\"",
")",
"if",
"not",
"x",
"or",
"not",
"x",
".",
"lower",
"(",
")",
"in",
"[",
"\"y\"",
",",
"\"yes\"",
"]",
":",
"return",
"local",
"(",
"\"rm -rf dist\"",
")",
"local",
"(",
"\"python setup.py sdist\"",
")",
"version",
"=",
"_get_cur_version",
"(",
")",
"fn",
"=",
"\"RPIO-%s.tar.gz\"",
"%",
"version",
"put",
"(",
"\"dist/%s\"",
"%",
"fn",
",",
"\"/tmp/\"",
")",
"with",
"cd",
"(",
"\"/tmp\"",
")",
":",
"run",
"(",
"\"tar -xf /tmp/%s\"",
"%",
"fn",
")",
"with",
"cd",
"(",
"\"/tmp/RPIO-%s\"",
"%",
"version",
")",
":",
"run",
"(",
"\"python2.6 setup.py bdist_egg upload\"",
")",
"run",
"(",
"\"python2.7 setup.py bdist_egg upload\"",
")",
"run",
"(",
"\"python3.2 setup.py bdist_egg upload\"",
")",
"local",
"(",
"\"python setup.py sdist upload\"",
")"
] | Upload sdist and bdist_eggs to pypi | [
"Upload",
"sdist",
"and",
"bdist_eggs",
"to",
"pypi"
] | train | https://github.com/metachris/RPIO/blob/be1942a69b2592ddacd9dc833d2668a19aafd8d2/fabfile.py#L181-L199 |
metachris/RPIO | source/RPIO/_RPIO.py | _threaded_callback | def _threaded_callback(callback, *args):
"""
Internal wrapper to start a callback in threaded mode. Using the
daemon mode to not block the main thread from exiting.
"""
t = Thread(target=callback, args=args)
t.daemon = True
t.start() | python | def _threaded_callback(callback, *args):
"""
Internal wrapper to start a callback in threaded mode. Using the
daemon mode to not block the main thread from exiting.
"""
t = Thread(target=callback, args=args)
t.daemon = True
t.start() | [
"def",
"_threaded_callback",
"(",
"callback",
",",
"*",
"args",
")",
":",
"t",
"=",
"Thread",
"(",
"target",
"=",
"callback",
",",
"args",
"=",
"args",
")",
"t",
".",
"daemon",
"=",
"True",
"t",
".",
"start",
"(",
")"
] | Internal wrapper to start a callback in threaded mode. Using the
daemon mode to not block the main thread from exiting. | [
"Internal",
"wrapper",
"to",
"start",
"a",
"callback",
"in",
"threaded",
"mode",
".",
"Using",
"the",
"daemon",
"mode",
"to",
"not",
"block",
"the",
"main",
"thread",
"from",
"exiting",
"."
] | train | https://github.com/metachris/RPIO/blob/be1942a69b2592ddacd9dc833d2668a19aafd8d2/source/RPIO/_RPIO.py#L48-L55 |
metachris/RPIO | source/RPIO/_RPIO.py | Interruptor.add_tcp_callback | def add_tcp_callback(self, port, callback, threaded_callback=False):
"""
Adds a unix socket server callback, which will be invoked when values
arrive from a connected socket client. The callback must accept two
parameters, eg. ``def callback(socket, msg)``.
"""
if not callback:
raise AttributeError("No callback")
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind((_TCP_SOCKET_HOST, port))
serversocket.listen(1)
serversocket.setblocking(0)
self._epoll.register(serversocket.fileno(), select.EPOLLIN)
# Prepare the callback (wrap in Thread if needed)
cb = callback if not threaded_callback else \
partial(_threaded_callback, callback)
self._tcp_server_sockets[serversocket.fileno()] = (serversocket, cb)
debug("Socket server started at port %s and callback added." % port) | python | def add_tcp_callback(self, port, callback, threaded_callback=False):
"""
Adds a unix socket server callback, which will be invoked when values
arrive from a connected socket client. The callback must accept two
parameters, eg. ``def callback(socket, msg)``.
"""
if not callback:
raise AttributeError("No callback")
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind((_TCP_SOCKET_HOST, port))
serversocket.listen(1)
serversocket.setblocking(0)
self._epoll.register(serversocket.fileno(), select.EPOLLIN)
# Prepare the callback (wrap in Thread if needed)
cb = callback if not threaded_callback else \
partial(_threaded_callback, callback)
self._tcp_server_sockets[serversocket.fileno()] = (serversocket, cb)
debug("Socket server started at port %s and callback added." % port) | [
"def",
"add_tcp_callback",
"(",
"self",
",",
"port",
",",
"callback",
",",
"threaded_callback",
"=",
"False",
")",
":",
"if",
"not",
"callback",
":",
"raise",
"AttributeError",
"(",
"\"No callback\"",
")",
"serversocket",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"serversocket",
".",
"setsockopt",
"(",
"socket",
".",
"SOL_SOCKET",
",",
"socket",
".",
"SO_REUSEADDR",
",",
"1",
")",
"serversocket",
".",
"bind",
"(",
"(",
"_TCP_SOCKET_HOST",
",",
"port",
")",
")",
"serversocket",
".",
"listen",
"(",
"1",
")",
"serversocket",
".",
"setblocking",
"(",
"0",
")",
"self",
".",
"_epoll",
".",
"register",
"(",
"serversocket",
".",
"fileno",
"(",
")",
",",
"select",
".",
"EPOLLIN",
")",
"# Prepare the callback (wrap in Thread if needed)",
"cb",
"=",
"callback",
"if",
"not",
"threaded_callback",
"else",
"partial",
"(",
"_threaded_callback",
",",
"callback",
")",
"self",
".",
"_tcp_server_sockets",
"[",
"serversocket",
".",
"fileno",
"(",
")",
"]",
"=",
"(",
"serversocket",
",",
"cb",
")",
"debug",
"(",
"\"Socket server started at port %s and callback added.\"",
"%",
"port",
")"
] | Adds a unix socket server callback, which will be invoked when values
arrive from a connected socket client. The callback must accept two
parameters, eg. ``def callback(socket, msg)``. | [
"Adds",
"a",
"unix",
"socket",
"server",
"callback",
"which",
"will",
"be",
"invoked",
"when",
"values",
"arrive",
"from",
"a",
"connected",
"socket",
"client",
".",
"The",
"callback",
"must",
"accept",
"two",
"parameters",
"eg",
".",
"def",
"callback",
"(",
"socket",
"msg",
")",
"."
] | train | https://github.com/metachris/RPIO/blob/be1942a69b2592ddacd9dc833d2668a19aafd8d2/source/RPIO/_RPIO.py#L91-L112 |
metachris/RPIO | source/RPIO/_RPIO.py | Interruptor.add_interrupt_callback | def add_interrupt_callback(self, gpio_id, callback, edge='both',
pull_up_down=_GPIO.PUD_OFF, threaded_callback=False,
debounce_timeout_ms=None):
"""
Add a callback to be executed when the value on 'gpio_id' changes to
the edge specified via the 'edge' parameter (default='both').
`pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and
`RPIO.PUD_OFF`.
If `threaded_callback` is True, the callback will be started
inside a Thread.
"""
gpio_id = _GPIO.channel_to_gpio(gpio_id)
debug("Adding callback for GPIO %s" % gpio_id)
if not edge in ["falling", "rising", "both", "none"]:
raise AttributeError("'%s' is not a valid edge." % edge)
if not pull_up_down in [_GPIO.PUD_UP, _GPIO.PUD_DOWN, _GPIO.PUD_OFF]:
raise AttributeError("'%s' is not a valid pull_up_down." % edge)
# Make sure the gpio_id is valid
if not gpio_id in set(chain(RPIO.GPIO_LIST_R1, RPIO.GPIO_LIST_R2, \
RPIO.GPIO_LIST_R3)):
raise AttributeError("GPIO %s is not a valid gpio-id." % gpio_id)
# Require INPUT pin setup; and set the correct PULL_UPDN
if RPIO.gpio_function(int(gpio_id)) == RPIO.IN:
RPIO.set_pullupdn(gpio_id, pull_up_down)
else:
debug("- changing gpio function from %s to INPUT" % \
(GPIO_FUNCTIONS[RPIO.gpio_function(int(gpio_id))]))
RPIO.setup(gpio_id, RPIO.IN, pull_up_down)
# Prepare the callback (wrap in Thread if needed)
cb = callback if not threaded_callback else \
partial(_threaded_callback, callback)
# Prepare the /sys/class path of this gpio
path_gpio = "%sgpio%s/" % (_SYS_GPIO_ROOT, gpio_id)
# If initial callback for this GPIO then set everything up. Else make
# sure the edge detection is the same.
if gpio_id in self._map_gpioid_to_callbacks:
with open(path_gpio + "edge", "r") as f:
e = f.read().strip()
if e != edge:
raise AttributeError(("Cannot add callback for gpio %s:"
" edge detection '%s' not compatible with existing"
" edge detection '%s'.") % (gpio_id, edge, e))
# Check whether edge is the same, else throw Exception
debug("- kernel interface already setup for GPIO %s" % gpio_id)
self._map_gpioid_to_callbacks[gpio_id].append(cb)
else:
# If kernel interface already exists unexport first for clean setup
if os.path.exists(path_gpio):
if self._show_warnings:
warn("Kernel interface for GPIO %s already exists." % \
gpio_id)
debug("- unexporting kernel interface for GPIO %s" % gpio_id)
with open(_SYS_GPIO_ROOT + "unexport", "w") as f:
f.write("%s" % gpio_id)
time.sleep(0.1)
# Export kernel interface /sys/class/gpio/gpioN
with open(_SYS_GPIO_ROOT + "export", "w") as f:
f.write("%s" % gpio_id)
self._gpio_kernel_interfaces_created.append(gpio_id)
debug("- kernel interface exported for GPIO %s" % gpio_id)
# Configure gpio as input
with open(path_gpio + "direction", "w") as f:
f.write("in")
# Configure gpio edge detection
with open(path_gpio + "edge", "w") as f:
f.write(edge)
debug(("- kernel interface configured for GPIO %s "
"(edge='%s', pullupdn=%s)") % (gpio_id, edge, \
_PULL_UPDN[pull_up_down]))
# Open the gpio value stream and read the initial value
f = open(path_gpio + "value", 'r')
val_initial = f.read().strip()
debug("- inital gpio value: %s" % val_initial)
f.seek(0)
# Add callback info to the mapping dictionaries
self._map_fileno_to_file[f.fileno()] = f
self._map_fileno_to_gpioid[f.fileno()] = gpio_id
self._map_fileno_to_options[f.fileno()] = {
"debounce_timeout_s": debounce_timeout_ms / 1000.0 if \
debounce_timeout_ms else 0,
"interrupt_last": 0,
"edge": edge
}
self._map_gpioid_to_fileno[gpio_id] = f.fileno()
self._map_gpioid_to_callbacks[gpio_id] = [cb]
# Add to epoll
self._epoll.register(f.fileno(), select.EPOLLPRI | select.EPOLLERR) | python | def add_interrupt_callback(self, gpio_id, callback, edge='both',
pull_up_down=_GPIO.PUD_OFF, threaded_callback=False,
debounce_timeout_ms=None):
"""
Add a callback to be executed when the value on 'gpio_id' changes to
the edge specified via the 'edge' parameter (default='both').
`pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and
`RPIO.PUD_OFF`.
If `threaded_callback` is True, the callback will be started
inside a Thread.
"""
gpio_id = _GPIO.channel_to_gpio(gpio_id)
debug("Adding callback for GPIO %s" % gpio_id)
if not edge in ["falling", "rising", "both", "none"]:
raise AttributeError("'%s' is not a valid edge." % edge)
if not pull_up_down in [_GPIO.PUD_UP, _GPIO.PUD_DOWN, _GPIO.PUD_OFF]:
raise AttributeError("'%s' is not a valid pull_up_down." % edge)
# Make sure the gpio_id is valid
if not gpio_id in set(chain(RPIO.GPIO_LIST_R1, RPIO.GPIO_LIST_R2, \
RPIO.GPIO_LIST_R3)):
raise AttributeError("GPIO %s is not a valid gpio-id." % gpio_id)
# Require INPUT pin setup; and set the correct PULL_UPDN
if RPIO.gpio_function(int(gpio_id)) == RPIO.IN:
RPIO.set_pullupdn(gpio_id, pull_up_down)
else:
debug("- changing gpio function from %s to INPUT" % \
(GPIO_FUNCTIONS[RPIO.gpio_function(int(gpio_id))]))
RPIO.setup(gpio_id, RPIO.IN, pull_up_down)
# Prepare the callback (wrap in Thread if needed)
cb = callback if not threaded_callback else \
partial(_threaded_callback, callback)
# Prepare the /sys/class path of this gpio
path_gpio = "%sgpio%s/" % (_SYS_GPIO_ROOT, gpio_id)
# If initial callback for this GPIO then set everything up. Else make
# sure the edge detection is the same.
if gpio_id in self._map_gpioid_to_callbacks:
with open(path_gpio + "edge", "r") as f:
e = f.read().strip()
if e != edge:
raise AttributeError(("Cannot add callback for gpio %s:"
" edge detection '%s' not compatible with existing"
" edge detection '%s'.") % (gpio_id, edge, e))
# Check whether edge is the same, else throw Exception
debug("- kernel interface already setup for GPIO %s" % gpio_id)
self._map_gpioid_to_callbacks[gpio_id].append(cb)
else:
# If kernel interface already exists unexport first for clean setup
if os.path.exists(path_gpio):
if self._show_warnings:
warn("Kernel interface for GPIO %s already exists." % \
gpio_id)
debug("- unexporting kernel interface for GPIO %s" % gpio_id)
with open(_SYS_GPIO_ROOT + "unexport", "w") as f:
f.write("%s" % gpio_id)
time.sleep(0.1)
# Export kernel interface /sys/class/gpio/gpioN
with open(_SYS_GPIO_ROOT + "export", "w") as f:
f.write("%s" % gpio_id)
self._gpio_kernel_interfaces_created.append(gpio_id)
debug("- kernel interface exported for GPIO %s" % gpio_id)
# Configure gpio as input
with open(path_gpio + "direction", "w") as f:
f.write("in")
# Configure gpio edge detection
with open(path_gpio + "edge", "w") as f:
f.write(edge)
debug(("- kernel interface configured for GPIO %s "
"(edge='%s', pullupdn=%s)") % (gpio_id, edge, \
_PULL_UPDN[pull_up_down]))
# Open the gpio value stream and read the initial value
f = open(path_gpio + "value", 'r')
val_initial = f.read().strip()
debug("- inital gpio value: %s" % val_initial)
f.seek(0)
# Add callback info to the mapping dictionaries
self._map_fileno_to_file[f.fileno()] = f
self._map_fileno_to_gpioid[f.fileno()] = gpio_id
self._map_fileno_to_options[f.fileno()] = {
"debounce_timeout_s": debounce_timeout_ms / 1000.0 if \
debounce_timeout_ms else 0,
"interrupt_last": 0,
"edge": edge
}
self._map_gpioid_to_fileno[gpio_id] = f.fileno()
self._map_gpioid_to_callbacks[gpio_id] = [cb]
# Add to epoll
self._epoll.register(f.fileno(), select.EPOLLPRI | select.EPOLLERR) | [
"def",
"add_interrupt_callback",
"(",
"self",
",",
"gpio_id",
",",
"callback",
",",
"edge",
"=",
"'both'",
",",
"pull_up_down",
"=",
"_GPIO",
".",
"PUD_OFF",
",",
"threaded_callback",
"=",
"False",
",",
"debounce_timeout_ms",
"=",
"None",
")",
":",
"gpio_id",
"=",
"_GPIO",
".",
"channel_to_gpio",
"(",
"gpio_id",
")",
"debug",
"(",
"\"Adding callback for GPIO %s\"",
"%",
"gpio_id",
")",
"if",
"not",
"edge",
"in",
"[",
"\"falling\"",
",",
"\"rising\"",
",",
"\"both\"",
",",
"\"none\"",
"]",
":",
"raise",
"AttributeError",
"(",
"\"'%s' is not a valid edge.\"",
"%",
"edge",
")",
"if",
"not",
"pull_up_down",
"in",
"[",
"_GPIO",
".",
"PUD_UP",
",",
"_GPIO",
".",
"PUD_DOWN",
",",
"_GPIO",
".",
"PUD_OFF",
"]",
":",
"raise",
"AttributeError",
"(",
"\"'%s' is not a valid pull_up_down.\"",
"%",
"edge",
")",
"# Make sure the gpio_id is valid",
"if",
"not",
"gpio_id",
"in",
"set",
"(",
"chain",
"(",
"RPIO",
".",
"GPIO_LIST_R1",
",",
"RPIO",
".",
"GPIO_LIST_R2",
",",
"RPIO",
".",
"GPIO_LIST_R3",
")",
")",
":",
"raise",
"AttributeError",
"(",
"\"GPIO %s is not a valid gpio-id.\"",
"%",
"gpio_id",
")",
"# Require INPUT pin setup; and set the correct PULL_UPDN",
"if",
"RPIO",
".",
"gpio_function",
"(",
"int",
"(",
"gpio_id",
")",
")",
"==",
"RPIO",
".",
"IN",
":",
"RPIO",
".",
"set_pullupdn",
"(",
"gpio_id",
",",
"pull_up_down",
")",
"else",
":",
"debug",
"(",
"\"- changing gpio function from %s to INPUT\"",
"%",
"(",
"GPIO_FUNCTIONS",
"[",
"RPIO",
".",
"gpio_function",
"(",
"int",
"(",
"gpio_id",
")",
")",
"]",
")",
")",
"RPIO",
".",
"setup",
"(",
"gpio_id",
",",
"RPIO",
".",
"IN",
",",
"pull_up_down",
")",
"# Prepare the callback (wrap in Thread if needed)",
"cb",
"=",
"callback",
"if",
"not",
"threaded_callback",
"else",
"partial",
"(",
"_threaded_callback",
",",
"callback",
")",
"# Prepare the /sys/class path of this gpio",
"path_gpio",
"=",
"\"%sgpio%s/\"",
"%",
"(",
"_SYS_GPIO_ROOT",
",",
"gpio_id",
")",
"# If initial callback for this GPIO then set everything up. Else make",
"# sure the edge detection is the same.",
"if",
"gpio_id",
"in",
"self",
".",
"_map_gpioid_to_callbacks",
":",
"with",
"open",
"(",
"path_gpio",
"+",
"\"edge\"",
",",
"\"r\"",
")",
"as",
"f",
":",
"e",
"=",
"f",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
"if",
"e",
"!=",
"edge",
":",
"raise",
"AttributeError",
"(",
"(",
"\"Cannot add callback for gpio %s:\"",
"\" edge detection '%s' not compatible with existing\"",
"\" edge detection '%s'.\"",
")",
"%",
"(",
"gpio_id",
",",
"edge",
",",
"e",
")",
")",
"# Check whether edge is the same, else throw Exception",
"debug",
"(",
"\"- kernel interface already setup for GPIO %s\"",
"%",
"gpio_id",
")",
"self",
".",
"_map_gpioid_to_callbacks",
"[",
"gpio_id",
"]",
".",
"append",
"(",
"cb",
")",
"else",
":",
"# If kernel interface already exists unexport first for clean setup",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path_gpio",
")",
":",
"if",
"self",
".",
"_show_warnings",
":",
"warn",
"(",
"\"Kernel interface for GPIO %s already exists.\"",
"%",
"gpio_id",
")",
"debug",
"(",
"\"- unexporting kernel interface for GPIO %s\"",
"%",
"gpio_id",
")",
"with",
"open",
"(",
"_SYS_GPIO_ROOT",
"+",
"\"unexport\"",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"%s\"",
"%",
"gpio_id",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"# Export kernel interface /sys/class/gpio/gpioN",
"with",
"open",
"(",
"_SYS_GPIO_ROOT",
"+",
"\"export\"",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"%s\"",
"%",
"gpio_id",
")",
"self",
".",
"_gpio_kernel_interfaces_created",
".",
"append",
"(",
"gpio_id",
")",
"debug",
"(",
"\"- kernel interface exported for GPIO %s\"",
"%",
"gpio_id",
")",
"# Configure gpio as input",
"with",
"open",
"(",
"path_gpio",
"+",
"\"direction\"",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"in\"",
")",
"# Configure gpio edge detection",
"with",
"open",
"(",
"path_gpio",
"+",
"\"edge\"",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"edge",
")",
"debug",
"(",
"(",
"\"- kernel interface configured for GPIO %s \"",
"\"(edge='%s', pullupdn=%s)\"",
")",
"%",
"(",
"gpio_id",
",",
"edge",
",",
"_PULL_UPDN",
"[",
"pull_up_down",
"]",
")",
")",
"# Open the gpio value stream and read the initial value",
"f",
"=",
"open",
"(",
"path_gpio",
"+",
"\"value\"",
",",
"'r'",
")",
"val_initial",
"=",
"f",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
"debug",
"(",
"\"- inital gpio value: %s\"",
"%",
"val_initial",
")",
"f",
".",
"seek",
"(",
"0",
")",
"# Add callback info to the mapping dictionaries",
"self",
".",
"_map_fileno_to_file",
"[",
"f",
".",
"fileno",
"(",
")",
"]",
"=",
"f",
"self",
".",
"_map_fileno_to_gpioid",
"[",
"f",
".",
"fileno",
"(",
")",
"]",
"=",
"gpio_id",
"self",
".",
"_map_fileno_to_options",
"[",
"f",
".",
"fileno",
"(",
")",
"]",
"=",
"{",
"\"debounce_timeout_s\"",
":",
"debounce_timeout_ms",
"/",
"1000.0",
"if",
"debounce_timeout_ms",
"else",
"0",
",",
"\"interrupt_last\"",
":",
"0",
",",
"\"edge\"",
":",
"edge",
"}",
"self",
".",
"_map_gpioid_to_fileno",
"[",
"gpio_id",
"]",
"=",
"f",
".",
"fileno",
"(",
")",
"self",
".",
"_map_gpioid_to_callbacks",
"[",
"gpio_id",
"]",
"=",
"[",
"cb",
"]",
"# Add to epoll",
"self",
".",
"_epoll",
".",
"register",
"(",
"f",
".",
"fileno",
"(",
")",
",",
"select",
".",
"EPOLLPRI",
"|",
"select",
".",
"EPOLLERR",
")"
] | Add a callback to be executed when the value on 'gpio_id' changes to
the edge specified via the 'edge' parameter (default='both').
`pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and
`RPIO.PUD_OFF`.
If `threaded_callback` is True, the callback will be started
inside a Thread. | [
"Add",
"a",
"callback",
"to",
"be",
"executed",
"when",
"the",
"value",
"on",
"gpio_id",
"changes",
"to",
"the",
"edge",
"specified",
"via",
"the",
"edge",
"parameter",
"(",
"default",
"=",
"both",
")",
"."
] | train | https://github.com/metachris/RPIO/blob/be1942a69b2592ddacd9dc833d2668a19aafd8d2/source/RPIO/_RPIO.py#L114-L217 |
metachris/RPIO | source/RPIO/_RPIO.py | Interruptor.del_interrupt_callback | def del_interrupt_callback(self, gpio_id):
""" Delete all interrupt callbacks from a certain gpio """
debug("- removing interrupts on gpio %s" % gpio_id)
gpio_id = _GPIO.channel_to_gpio(gpio_id)
fileno = self._map_gpioid_to_fileno[gpio_id]
# 1. Remove from epoll
self._epoll.unregister(fileno)
# 2. Cache the file
f = self._map_fileno_to_file[fileno]
# 3. Remove from maps
del self._map_fileno_to_file[fileno]
del self._map_fileno_to_gpioid[fileno]
del self._map_fileno_to_options[fileno]
del self._map_gpioid_to_fileno[gpio_id]
del self._map_gpioid_to_callbacks[gpio_id]
# 4. Close file last in case of IOError
f.close() | python | def del_interrupt_callback(self, gpio_id):
""" Delete all interrupt callbacks from a certain gpio """
debug("- removing interrupts on gpio %s" % gpio_id)
gpio_id = _GPIO.channel_to_gpio(gpio_id)
fileno = self._map_gpioid_to_fileno[gpio_id]
# 1. Remove from epoll
self._epoll.unregister(fileno)
# 2. Cache the file
f = self._map_fileno_to_file[fileno]
# 3. Remove from maps
del self._map_fileno_to_file[fileno]
del self._map_fileno_to_gpioid[fileno]
del self._map_fileno_to_options[fileno]
del self._map_gpioid_to_fileno[gpio_id]
del self._map_gpioid_to_callbacks[gpio_id]
# 4. Close file last in case of IOError
f.close() | [
"def",
"del_interrupt_callback",
"(",
"self",
",",
"gpio_id",
")",
":",
"debug",
"(",
"\"- removing interrupts on gpio %s\"",
"%",
"gpio_id",
")",
"gpio_id",
"=",
"_GPIO",
".",
"channel_to_gpio",
"(",
"gpio_id",
")",
"fileno",
"=",
"self",
".",
"_map_gpioid_to_fileno",
"[",
"gpio_id",
"]",
"# 1. Remove from epoll",
"self",
".",
"_epoll",
".",
"unregister",
"(",
"fileno",
")",
"# 2. Cache the file",
"f",
"=",
"self",
".",
"_map_fileno_to_file",
"[",
"fileno",
"]",
"# 3. Remove from maps",
"del",
"self",
".",
"_map_fileno_to_file",
"[",
"fileno",
"]",
"del",
"self",
".",
"_map_fileno_to_gpioid",
"[",
"fileno",
"]",
"del",
"self",
".",
"_map_fileno_to_options",
"[",
"fileno",
"]",
"del",
"self",
".",
"_map_gpioid_to_fileno",
"[",
"gpio_id",
"]",
"del",
"self",
".",
"_map_gpioid_to_callbacks",
"[",
"gpio_id",
"]",
"# 4. Close file last in case of IOError",
"f",
".",
"close",
"(",
")"
] | Delete all interrupt callbacks from a certain gpio | [
"Delete",
"all",
"interrupt",
"callbacks",
"from",
"a",
"certain",
"gpio"
] | train | https://github.com/metachris/RPIO/blob/be1942a69b2592ddacd9dc833d2668a19aafd8d2/source/RPIO/_RPIO.py#L219-L239 |
metachris/RPIO | source/RPIO/_RPIO.py | Interruptor._handle_interrupt | def _handle_interrupt(self, fileno, val):
""" Internally distributes interrupts to all attached callbacks """
val = int(val)
# Filter invalid edge values (sometimes 1 comes in when edge=falling)
edge = self._map_fileno_to_options[fileno]["edge"]
if (edge == 'rising' and val == 0) or (edge == 'falling' and val == 1):
return
# If user activated debounce for this callback, check timing now
debounce = self._map_fileno_to_options[fileno]["debounce_timeout_s"]
if debounce:
t = time.time()
t_last = self._map_fileno_to_options[fileno]["interrupt_last"]
if t - t_last < debounce:
debug("- don't start interrupt callback due to debouncing")
return
self._map_fileno_to_options[fileno]["interrupt_last"] = t
# Start the callback(s) now
gpio_id = self._map_fileno_to_gpioid[fileno]
if gpio_id in self._map_gpioid_to_callbacks:
for cb in self._map_gpioid_to_callbacks[gpio_id]:
cb(gpio_id, val) | python | def _handle_interrupt(self, fileno, val):
""" Internally distributes interrupts to all attached callbacks """
val = int(val)
# Filter invalid edge values (sometimes 1 comes in when edge=falling)
edge = self._map_fileno_to_options[fileno]["edge"]
if (edge == 'rising' and val == 0) or (edge == 'falling' and val == 1):
return
# If user activated debounce for this callback, check timing now
debounce = self._map_fileno_to_options[fileno]["debounce_timeout_s"]
if debounce:
t = time.time()
t_last = self._map_fileno_to_options[fileno]["interrupt_last"]
if t - t_last < debounce:
debug("- don't start interrupt callback due to debouncing")
return
self._map_fileno_to_options[fileno]["interrupt_last"] = t
# Start the callback(s) now
gpio_id = self._map_fileno_to_gpioid[fileno]
if gpio_id in self._map_gpioid_to_callbacks:
for cb in self._map_gpioid_to_callbacks[gpio_id]:
cb(gpio_id, val) | [
"def",
"_handle_interrupt",
"(",
"self",
",",
"fileno",
",",
"val",
")",
":",
"val",
"=",
"int",
"(",
"val",
")",
"# Filter invalid edge values (sometimes 1 comes in when edge=falling)",
"edge",
"=",
"self",
".",
"_map_fileno_to_options",
"[",
"fileno",
"]",
"[",
"\"edge\"",
"]",
"if",
"(",
"edge",
"==",
"'rising'",
"and",
"val",
"==",
"0",
")",
"or",
"(",
"edge",
"==",
"'falling'",
"and",
"val",
"==",
"1",
")",
":",
"return",
"# If user activated debounce for this callback, check timing now",
"debounce",
"=",
"self",
".",
"_map_fileno_to_options",
"[",
"fileno",
"]",
"[",
"\"debounce_timeout_s\"",
"]",
"if",
"debounce",
":",
"t",
"=",
"time",
".",
"time",
"(",
")",
"t_last",
"=",
"self",
".",
"_map_fileno_to_options",
"[",
"fileno",
"]",
"[",
"\"interrupt_last\"",
"]",
"if",
"t",
"-",
"t_last",
"<",
"debounce",
":",
"debug",
"(",
"\"- don't start interrupt callback due to debouncing\"",
")",
"return",
"self",
".",
"_map_fileno_to_options",
"[",
"fileno",
"]",
"[",
"\"interrupt_last\"",
"]",
"=",
"t",
"# Start the callback(s) now",
"gpio_id",
"=",
"self",
".",
"_map_fileno_to_gpioid",
"[",
"fileno",
"]",
"if",
"gpio_id",
"in",
"self",
".",
"_map_gpioid_to_callbacks",
":",
"for",
"cb",
"in",
"self",
".",
"_map_gpioid_to_callbacks",
"[",
"gpio_id",
"]",
":",
"cb",
"(",
"gpio_id",
",",
"val",
")"
] | Internally distributes interrupts to all attached callbacks | [
"Internally",
"distributes",
"interrupts",
"to",
"all",
"attached",
"callbacks"
] | train | https://github.com/metachris/RPIO/blob/be1942a69b2592ddacd9dc833d2668a19aafd8d2/source/RPIO/_RPIO.py#L241-L264 |
metachris/RPIO | source/RPIO/_RPIO.py | Interruptor.wait_for_interrupts | def wait_for_interrupts(self, epoll_timeout=1):
"""
Blocking loop to listen for GPIO interrupts and distribute them to
associated callbacks. epoll_timeout is an easy way to shutdown the
blocking function. Per default the timeout is set to 1 second; if
`_is_waiting_for_interrupts` is set to False the loop will exit.
If an exception occurs while waiting for interrupts, the interrupt
gpio interfaces will be cleaned up (/sys/class/gpio unexports). In
this case all interrupts will be reset and you'd need to add the
callbacks again before using `wait_for_interrupts(..)` again.
"""
self._is_waiting_for_interrupts = True
while self._is_waiting_for_interrupts:
events = self._epoll.poll(epoll_timeout)
for fileno, event in events:
debug("- epoll event on fd %s: %s" % (fileno, event))
if fileno in self._tcp_server_sockets:
# New client connection to socket server
serversocket, cb = self._tcp_server_sockets[fileno]
connection, address = serversocket.accept()
connection.setblocking(0)
f = connection.fileno()
self._epoll.register(f, select.EPOLLIN)
self._tcp_client_sockets[f] = (connection, cb)
elif event & select.EPOLLIN:
# Input from TCP socket
socket, cb = self._tcp_client_sockets[fileno]
content = socket.recv(1024)
if not content or not content.strip():
# No content means quitting
self.close_tcp_client(fileno)
else:
sock, cb = self._tcp_client_sockets[fileno]
cb(self._tcp_client_sockets[fileno][0], \
content.strip())
elif event & select.EPOLLHUP:
# TCP Socket Hangup
self.close_tcp_client(fileno)
elif event & select.EPOLLPRI:
# GPIO interrupts
f = self._map_fileno_to_file[fileno]
# read() is workaround for not getting new values
# with read(1)
val = f.read().strip()
f.seek(0)
self._handle_interrupt(fileno, val) | python | def wait_for_interrupts(self, epoll_timeout=1):
"""
Blocking loop to listen for GPIO interrupts and distribute them to
associated callbacks. epoll_timeout is an easy way to shutdown the
blocking function. Per default the timeout is set to 1 second; if
`_is_waiting_for_interrupts` is set to False the loop will exit.
If an exception occurs while waiting for interrupts, the interrupt
gpio interfaces will be cleaned up (/sys/class/gpio unexports). In
this case all interrupts will be reset and you'd need to add the
callbacks again before using `wait_for_interrupts(..)` again.
"""
self._is_waiting_for_interrupts = True
while self._is_waiting_for_interrupts:
events = self._epoll.poll(epoll_timeout)
for fileno, event in events:
debug("- epoll event on fd %s: %s" % (fileno, event))
if fileno in self._tcp_server_sockets:
# New client connection to socket server
serversocket, cb = self._tcp_server_sockets[fileno]
connection, address = serversocket.accept()
connection.setblocking(0)
f = connection.fileno()
self._epoll.register(f, select.EPOLLIN)
self._tcp_client_sockets[f] = (connection, cb)
elif event & select.EPOLLIN:
# Input from TCP socket
socket, cb = self._tcp_client_sockets[fileno]
content = socket.recv(1024)
if not content or not content.strip():
# No content means quitting
self.close_tcp_client(fileno)
else:
sock, cb = self._tcp_client_sockets[fileno]
cb(self._tcp_client_sockets[fileno][0], \
content.strip())
elif event & select.EPOLLHUP:
# TCP Socket Hangup
self.close_tcp_client(fileno)
elif event & select.EPOLLPRI:
# GPIO interrupts
f = self._map_fileno_to_file[fileno]
# read() is workaround for not getting new values
# with read(1)
val = f.read().strip()
f.seek(0)
self._handle_interrupt(fileno, val) | [
"def",
"wait_for_interrupts",
"(",
"self",
",",
"epoll_timeout",
"=",
"1",
")",
":",
"self",
".",
"_is_waiting_for_interrupts",
"=",
"True",
"while",
"self",
".",
"_is_waiting_for_interrupts",
":",
"events",
"=",
"self",
".",
"_epoll",
".",
"poll",
"(",
"epoll_timeout",
")",
"for",
"fileno",
",",
"event",
"in",
"events",
":",
"debug",
"(",
"\"- epoll event on fd %s: %s\"",
"%",
"(",
"fileno",
",",
"event",
")",
")",
"if",
"fileno",
"in",
"self",
".",
"_tcp_server_sockets",
":",
"# New client connection to socket server",
"serversocket",
",",
"cb",
"=",
"self",
".",
"_tcp_server_sockets",
"[",
"fileno",
"]",
"connection",
",",
"address",
"=",
"serversocket",
".",
"accept",
"(",
")",
"connection",
".",
"setblocking",
"(",
"0",
")",
"f",
"=",
"connection",
".",
"fileno",
"(",
")",
"self",
".",
"_epoll",
".",
"register",
"(",
"f",
",",
"select",
".",
"EPOLLIN",
")",
"self",
".",
"_tcp_client_sockets",
"[",
"f",
"]",
"=",
"(",
"connection",
",",
"cb",
")",
"elif",
"event",
"&",
"select",
".",
"EPOLLIN",
":",
"# Input from TCP socket",
"socket",
",",
"cb",
"=",
"self",
".",
"_tcp_client_sockets",
"[",
"fileno",
"]",
"content",
"=",
"socket",
".",
"recv",
"(",
"1024",
")",
"if",
"not",
"content",
"or",
"not",
"content",
".",
"strip",
"(",
")",
":",
"# No content means quitting",
"self",
".",
"close_tcp_client",
"(",
"fileno",
")",
"else",
":",
"sock",
",",
"cb",
"=",
"self",
".",
"_tcp_client_sockets",
"[",
"fileno",
"]",
"cb",
"(",
"self",
".",
"_tcp_client_sockets",
"[",
"fileno",
"]",
"[",
"0",
"]",
",",
"content",
".",
"strip",
"(",
")",
")",
"elif",
"event",
"&",
"select",
".",
"EPOLLHUP",
":",
"# TCP Socket Hangup",
"self",
".",
"close_tcp_client",
"(",
"fileno",
")",
"elif",
"event",
"&",
"select",
".",
"EPOLLPRI",
":",
"# GPIO interrupts",
"f",
"=",
"self",
".",
"_map_fileno_to_file",
"[",
"fileno",
"]",
"# read() is workaround for not getting new values",
"# with read(1)",
"val",
"=",
"f",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
"f",
".",
"seek",
"(",
"0",
")",
"self",
".",
"_handle_interrupt",
"(",
"fileno",
",",
"val",
")"
] | Blocking loop to listen for GPIO interrupts and distribute them to
associated callbacks. epoll_timeout is an easy way to shutdown the
blocking function. Per default the timeout is set to 1 second; if
`_is_waiting_for_interrupts` is set to False the loop will exit.
If an exception occurs while waiting for interrupts, the interrupt
gpio interfaces will be cleaned up (/sys/class/gpio unexports). In
this case all interrupts will be reset and you'd need to add the
callbacks again before using `wait_for_interrupts(..)` again. | [
"Blocking",
"loop",
"to",
"listen",
"for",
"GPIO",
"interrupts",
"and",
"distribute",
"them",
"to",
"associated",
"callbacks",
".",
"epoll_timeout",
"is",
"an",
"easy",
"way",
"to",
"shutdown",
"the",
"blocking",
"function",
".",
"Per",
"default",
"the",
"timeout",
"is",
"set",
"to",
"1",
"second",
";",
"if",
"_is_waiting_for_interrupts",
"is",
"set",
"to",
"False",
"the",
"loop",
"will",
"exit",
"."
] | train | https://github.com/metachris/RPIO/blob/be1942a69b2592ddacd9dc833d2668a19aafd8d2/source/RPIO/_RPIO.py#L273-L322 |
metachris/RPIO | source/RPIO/_RPIO.py | Interruptor.cleanup_interfaces | def cleanup_interfaces(self):
"""
Removes all /sys/class/gpio/gpioN interfaces that this script created,
and deletes callback bindings. Should be used after using interrupts.
"""
debug("Cleaning up interfaces...")
for gpio_id in self._gpio_kernel_interfaces_created:
# Close the value-file and remove interrupt bindings
self.del_interrupt_callback(gpio_id)
# Remove the kernel GPIO interface
debug("- unexporting GPIO %s" % gpio_id)
with open(_SYS_GPIO_ROOT + "unexport", "w") as f:
f.write("%s" % gpio_id)
# Reset list of created interfaces
self._gpio_kernel_interfaces_created = [] | python | def cleanup_interfaces(self):
"""
Removes all /sys/class/gpio/gpioN interfaces that this script created,
and deletes callback bindings. Should be used after using interrupts.
"""
debug("Cleaning up interfaces...")
for gpio_id in self._gpio_kernel_interfaces_created:
# Close the value-file and remove interrupt bindings
self.del_interrupt_callback(gpio_id)
# Remove the kernel GPIO interface
debug("- unexporting GPIO %s" % gpio_id)
with open(_SYS_GPIO_ROOT + "unexport", "w") as f:
f.write("%s" % gpio_id)
# Reset list of created interfaces
self._gpio_kernel_interfaces_created = [] | [
"def",
"cleanup_interfaces",
"(",
"self",
")",
":",
"debug",
"(",
"\"Cleaning up interfaces...\"",
")",
"for",
"gpio_id",
"in",
"self",
".",
"_gpio_kernel_interfaces_created",
":",
"# Close the value-file and remove interrupt bindings",
"self",
".",
"del_interrupt_callback",
"(",
"gpio_id",
")",
"# Remove the kernel GPIO interface",
"debug",
"(",
"\"- unexporting GPIO %s\"",
"%",
"gpio_id",
")",
"with",
"open",
"(",
"_SYS_GPIO_ROOT",
"+",
"\"unexport\"",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"%s\"",
"%",
"gpio_id",
")",
"# Reset list of created interfaces",
"self",
".",
"_gpio_kernel_interfaces_created",
"=",
"[",
"]"
] | Removes all /sys/class/gpio/gpioN interfaces that this script created,
and deletes callback bindings. Should be used after using interrupts. | [
"Removes",
"all",
"/",
"sys",
"/",
"class",
"/",
"gpio",
"/",
"gpioN",
"interfaces",
"that",
"this",
"script",
"created",
"and",
"deletes",
"callback",
"bindings",
".",
"Should",
"be",
"used",
"after",
"using",
"interrupts",
"."
] | train | https://github.com/metachris/RPIO/blob/be1942a69b2592ddacd9dc833d2668a19aafd8d2/source/RPIO/_RPIO.py#L331-L347 |
metachris/RPIO | source/RPIO/_RPIO.py | Interruptor.cleanup_tcpsockets | def cleanup_tcpsockets(self):
"""
Closes all TCP connections and then the socket servers
"""
for fileno in self._tcp_client_sockets.keys():
self.close_tcp_client(fileno)
for fileno, items in self._tcp_server_sockets.items():
socket, cb = items
debug("- _cleanup server socket connection (fd %s)" % fileno)
self._epoll.unregister(fileno)
socket.close()
self._tcp_server_sockets = {} | python | def cleanup_tcpsockets(self):
"""
Closes all TCP connections and then the socket servers
"""
for fileno in self._tcp_client_sockets.keys():
self.close_tcp_client(fileno)
for fileno, items in self._tcp_server_sockets.items():
socket, cb = items
debug("- _cleanup server socket connection (fd %s)" % fileno)
self._epoll.unregister(fileno)
socket.close()
self._tcp_server_sockets = {} | [
"def",
"cleanup_tcpsockets",
"(",
"self",
")",
":",
"for",
"fileno",
"in",
"self",
".",
"_tcp_client_sockets",
".",
"keys",
"(",
")",
":",
"self",
".",
"close_tcp_client",
"(",
"fileno",
")",
"for",
"fileno",
",",
"items",
"in",
"self",
".",
"_tcp_server_sockets",
".",
"items",
"(",
")",
":",
"socket",
",",
"cb",
"=",
"items",
"debug",
"(",
"\"- _cleanup server socket connection (fd %s)\"",
"%",
"fileno",
")",
"self",
".",
"_epoll",
".",
"unregister",
"(",
"fileno",
")",
"socket",
".",
"close",
"(",
")",
"self",
".",
"_tcp_server_sockets",
"=",
"{",
"}"
] | Closes all TCP connections and then the socket servers | [
"Closes",
"all",
"TCP",
"connections",
"and",
"then",
"the",
"socket",
"servers"
] | train | https://github.com/metachris/RPIO/blob/be1942a69b2592ddacd9dc833d2668a19aafd8d2/source/RPIO/_RPIO.py#L349-L360 |
metachris/RPIO | source/RPIO/PWM/__init__.py | add_channel_pulse | def add_channel_pulse(dma_channel, gpio, start, width):
"""
Add a pulse for a specific GPIO to a dma channel subcycle. `start` and
`width` are multiples of the pulse-width increment granularity.
"""
return _PWM.add_channel_pulse(dma_channel, gpio, start, width) | python | def add_channel_pulse(dma_channel, gpio, start, width):
"""
Add a pulse for a specific GPIO to a dma channel subcycle. `start` and
`width` are multiples of the pulse-width increment granularity.
"""
return _PWM.add_channel_pulse(dma_channel, gpio, start, width) | [
"def",
"add_channel_pulse",
"(",
"dma_channel",
",",
"gpio",
",",
"start",
",",
"width",
")",
":",
"return",
"_PWM",
".",
"add_channel_pulse",
"(",
"dma_channel",
",",
"gpio",
",",
"start",
",",
"width",
")"
] | Add a pulse for a specific GPIO to a dma channel subcycle. `start` and
`width` are multiples of the pulse-width increment granularity. | [
"Add",
"a",
"pulse",
"for",
"a",
"specific",
"GPIO",
"to",
"a",
"dma",
"channel",
"subcycle",
".",
"start",
"and",
"width",
"are",
"multiples",
"of",
"the",
"pulse",
"-",
"width",
"increment",
"granularity",
"."
] | train | https://github.com/metachris/RPIO/blob/be1942a69b2592ddacd9dc833d2668a19aafd8d2/source/RPIO/PWM/__init__.py#L110-L115 |
metachris/RPIO | source/RPIO/PWM/__init__.py | Servo.set_servo | def set_servo(self, gpio, pulse_width_us):
"""
Sets a pulse-width on a gpio to repeat every subcycle
(by default every 20ms).
"""
# Make sure we can set the exact pulse_width_us
_pulse_incr_us = _PWM.get_pulse_incr_us()
if pulse_width_us % _pulse_incr_us:
# No clean division possible
raise AttributeError(("Pulse width increment granularity %sus "
"cannot divide a pulse-time of %sus") % (_pulse_incr_us,
pulse_width_us))
# Initialize channel if not already done, else check subcycle time
if _PWM.is_channel_initialized(self._dma_channel):
_subcycle_us = _PWM.get_channel_subcycle_time_us(self._dma_channel)
if _subcycle_us != self._subcycle_time_us:
raise AttributeError(("Error: DMA channel %s is setup with a "
"subcycle_time of %sus (instead of %sus)") % \
(self._dma_channel, _subcycle_us,
self._subcycle_time_us))
else:
init_channel(self._dma_channel, self._subcycle_time_us)
# Add pulse for this GPIO
add_channel_pulse(self._dma_channel, gpio, 0, \
int(pulse_width_us / _pulse_incr_us)) | python | def set_servo(self, gpio, pulse_width_us):
"""
Sets a pulse-width on a gpio to repeat every subcycle
(by default every 20ms).
"""
# Make sure we can set the exact pulse_width_us
_pulse_incr_us = _PWM.get_pulse_incr_us()
if pulse_width_us % _pulse_incr_us:
# No clean division possible
raise AttributeError(("Pulse width increment granularity %sus "
"cannot divide a pulse-time of %sus") % (_pulse_incr_us,
pulse_width_us))
# Initialize channel if not already done, else check subcycle time
if _PWM.is_channel_initialized(self._dma_channel):
_subcycle_us = _PWM.get_channel_subcycle_time_us(self._dma_channel)
if _subcycle_us != self._subcycle_time_us:
raise AttributeError(("Error: DMA channel %s is setup with a "
"subcycle_time of %sus (instead of %sus)") % \
(self._dma_channel, _subcycle_us,
self._subcycle_time_us))
else:
init_channel(self._dma_channel, self._subcycle_time_us)
# Add pulse for this GPIO
add_channel_pulse(self._dma_channel, gpio, 0, \
int(pulse_width_us / _pulse_incr_us)) | [
"def",
"set_servo",
"(",
"self",
",",
"gpio",
",",
"pulse_width_us",
")",
":",
"# Make sure we can set the exact pulse_width_us",
"_pulse_incr_us",
"=",
"_PWM",
".",
"get_pulse_incr_us",
"(",
")",
"if",
"pulse_width_us",
"%",
"_pulse_incr_us",
":",
"# No clean division possible",
"raise",
"AttributeError",
"(",
"(",
"\"Pulse width increment granularity %sus \"",
"\"cannot divide a pulse-time of %sus\"",
")",
"%",
"(",
"_pulse_incr_us",
",",
"pulse_width_us",
")",
")",
"# Initialize channel if not already done, else check subcycle time",
"if",
"_PWM",
".",
"is_channel_initialized",
"(",
"self",
".",
"_dma_channel",
")",
":",
"_subcycle_us",
"=",
"_PWM",
".",
"get_channel_subcycle_time_us",
"(",
"self",
".",
"_dma_channel",
")",
"if",
"_subcycle_us",
"!=",
"self",
".",
"_subcycle_time_us",
":",
"raise",
"AttributeError",
"(",
"(",
"\"Error: DMA channel %s is setup with a \"",
"\"subcycle_time of %sus (instead of %sus)\"",
")",
"%",
"(",
"self",
".",
"_dma_channel",
",",
"_subcycle_us",
",",
"self",
".",
"_subcycle_time_us",
")",
")",
"else",
":",
"init_channel",
"(",
"self",
".",
"_dma_channel",
",",
"self",
".",
"_subcycle_time_us",
")",
"# Add pulse for this GPIO",
"add_channel_pulse",
"(",
"self",
".",
"_dma_channel",
",",
"gpio",
",",
"0",
",",
"int",
"(",
"pulse_width_us",
"/",
"_pulse_incr_us",
")",
")"
] | Sets a pulse-width on a gpio to repeat every subcycle
(by default every 20ms). | [
"Sets",
"a",
"pulse",
"-",
"width",
"on",
"a",
"gpio",
"to",
"repeat",
"every",
"subcycle",
"(",
"by",
"default",
"every",
"20ms",
")",
"."
] | train | https://github.com/metachris/RPIO/blob/be1942a69b2592ddacd9dc833d2668a19aafd8d2/source/RPIO/PWM/__init__.py#L190-L216 |
macbre/sql-metadata | sql_metadata.py | unique | def unique(_list):
"""
Makes the list have unique items only and maintains the order
list(set()) won't provide that
:type _list list
:rtype: list
"""
ret = []
for item in _list:
if item not in ret:
ret.append(item)
return ret | python | def unique(_list):
"""
Makes the list have unique items only and maintains the order
list(set()) won't provide that
:type _list list
:rtype: list
"""
ret = []
for item in _list:
if item not in ret:
ret.append(item)
return ret | [
"def",
"unique",
"(",
"_list",
")",
":",
"ret",
"=",
"[",
"]",
"for",
"item",
"in",
"_list",
":",
"if",
"item",
"not",
"in",
"ret",
":",
"ret",
".",
"append",
"(",
"item",
")",
"return",
"ret"
] | Makes the list have unique items only and maintains the order
list(set()) won't provide that
:type _list list
:rtype: list | [
"Makes",
"the",
"list",
"have",
"unique",
"items",
"only",
"and",
"maintains",
"the",
"order"
] | train | https://github.com/macbre/sql-metadata/blob/4b7b4ae0a961d568075aefe78535cf5aee74583c/sql_metadata.py#L12-L27 |
macbre/sql-metadata | sql_metadata.py | preprocess_query | def preprocess_query(query):
"""
Perform initial query cleanup
:type query str
:rtype str
"""
# 1. remove aliases
# FROM `dimension_wikis` `dw`
# INNER JOIN `fact_wam_scores` `fwN`
query = re.sub(r'(\s(FROM|JOIN)\s`[^`]+`)\s`[^`]+`', r'\1', query, flags=re.IGNORECASE)
# 2. `database`.`table` notation -> database.table
query = re.sub(r'`([^`]+)`\.`([^`]+)`', r'\1.\2', query)
# 2. database.table notation -> table
# query = re.sub(r'([a-z_0-9]+)\.([a-z_0-9]+)', r'\2', query, flags=re.IGNORECASE)
return query | python | def preprocess_query(query):
"""
Perform initial query cleanup
:type query str
:rtype str
"""
# 1. remove aliases
# FROM `dimension_wikis` `dw`
# INNER JOIN `fact_wam_scores` `fwN`
query = re.sub(r'(\s(FROM|JOIN)\s`[^`]+`)\s`[^`]+`', r'\1', query, flags=re.IGNORECASE)
# 2. `database`.`table` notation -> database.table
query = re.sub(r'`([^`]+)`\.`([^`]+)`', r'\1.\2', query)
# 2. database.table notation -> table
# query = re.sub(r'([a-z_0-9]+)\.([a-z_0-9]+)', r'\2', query, flags=re.IGNORECASE)
return query | [
"def",
"preprocess_query",
"(",
"query",
")",
":",
"# 1. remove aliases",
"# FROM `dimension_wikis` `dw`",
"# INNER JOIN `fact_wam_scores` `fwN`",
"query",
"=",
"re",
".",
"sub",
"(",
"r'(\\s(FROM|JOIN)\\s`[^`]+`)\\s`[^`]+`'",
",",
"r'\\1'",
",",
"query",
",",
"flags",
"=",
"re",
".",
"IGNORECASE",
")",
"# 2. `database`.`table` notation -> database.table",
"query",
"=",
"re",
".",
"sub",
"(",
"r'`([^`]+)`\\.`([^`]+)`'",
",",
"r'\\1.\\2'",
",",
"query",
")",
"# 2. database.table notation -> table",
"# query = re.sub(r'([a-z_0-9]+)\\.([a-z_0-9]+)', r'\\2', query, flags=re.IGNORECASE)",
"return",
"query"
] | Perform initial query cleanup
:type query str
:rtype str | [
"Perform",
"initial",
"query",
"cleanup"
] | train | https://github.com/macbre/sql-metadata/blob/4b7b4ae0a961d568075aefe78535cf5aee74583c/sql_metadata.py#L30-L48 |
macbre/sql-metadata | sql_metadata.py | get_query_tokens | def get_query_tokens(query):
"""
:type query str
:rtype: list[sqlparse.sql.Token]
"""
query = preprocess_query(query)
parsed = sqlparse.parse(query)
# handle empty queries (#12)
if not parsed:
return []
tokens = TokenList(parsed[0].tokens).flatten()
# print([(token.value, token.ttype) for token in tokens])
return [token for token in tokens if token.ttype is not Whitespace] | python | def get_query_tokens(query):
"""
:type query str
:rtype: list[sqlparse.sql.Token]
"""
query = preprocess_query(query)
parsed = sqlparse.parse(query)
# handle empty queries (#12)
if not parsed:
return []
tokens = TokenList(parsed[0].tokens).flatten()
# print([(token.value, token.ttype) for token in tokens])
return [token for token in tokens if token.ttype is not Whitespace] | [
"def",
"get_query_tokens",
"(",
"query",
")",
":",
"query",
"=",
"preprocess_query",
"(",
"query",
")",
"parsed",
"=",
"sqlparse",
".",
"parse",
"(",
"query",
")",
"# handle empty queries (#12)",
"if",
"not",
"parsed",
":",
"return",
"[",
"]",
"tokens",
"=",
"TokenList",
"(",
"parsed",
"[",
"0",
"]",
".",
"tokens",
")",
".",
"flatten",
"(",
")",
"# print([(token.value, token.ttype) for token in tokens])",
"return",
"[",
"token",
"for",
"token",
"in",
"tokens",
"if",
"token",
".",
"ttype",
"is",
"not",
"Whitespace",
"]"
] | :type query str
:rtype: list[sqlparse.sql.Token] | [
":",
"type",
"query",
"str",
":",
"rtype",
":",
"list",
"[",
"sqlparse",
".",
"sql",
".",
"Token",
"]"
] | train | https://github.com/macbre/sql-metadata/blob/4b7b4ae0a961d568075aefe78535cf5aee74583c/sql_metadata.py#L51-L66 |
macbre/sql-metadata | sql_metadata.py | get_query_columns | def get_query_columns(query):
"""
:type query str
:rtype: list[str]
"""
columns = []
last_keyword = None
last_token = None
# print(preprocess_query(query))
# these keywords should not change the state of a parser
# and not "reset" previously found SELECT keyword
keywords_ignored = ['AS', 'AND', 'OR', 'IN', 'IS', 'NOT', 'NOT NULL', 'LIKE', 'CASE', 'WHEN']
# these function should be ignored
# and not "reset" previously found SELECT keyword
functions_ignored = ['COUNT', 'MIN', 'MAX', 'FROM_UNIXTIME', 'DATE_FORMAT', 'CAST', 'CONVERT']
for token in get_query_tokens(query):
if token.is_keyword and token.value.upper() not in keywords_ignored:
# keep the name of the last keyword, e.g. SELECT, FROM, WHERE, (ORDER) BY
last_keyword = token.value.upper()
# print('keyword', last_keyword)
elif token.ttype is Name:
# analyze the name tokens, column names and where condition values
if last_keyword in ['SELECT', 'WHERE', 'BY', 'ON'] \
and last_token.value.upper() not in ['AS']:
# print(last_keyword, last_token, token.value)
if token.value.upper() not in functions_ignored:
if str(last_token) == '.':
# print('DOT', last_token, columns[-1])
# we have table.column notation example
# append column name to the last entry of columns
# as it is a table name in fact
table_name = columns[-1]
columns[-1] = '{}.{}'.format(table_name, token)
else:
columns.append(str(token.value))
elif last_keyword in ['INTO'] and last_token.ttype is Punctuation:
# INSERT INTO `foo` (col1, `col2`) VALUES (..)
# print(last_keyword, token, last_token)
columns.append(str(token.value).strip('`'))
elif token.ttype is Wildcard:
# handle * wildcard in SELECT part, but ignore count(*)
# print(last_keyword, last_token, token.value)
if last_keyword == 'SELECT' and last_token.value != '(':
if str(last_token) == '.':
# handle SELECT foo.*
table_name = columns[-1]
columns[-1] = '{}.{}'.format(table_name, str(token))
else:
columns.append(str(token.value))
last_token = token
return unique(columns) | python | def get_query_columns(query):
"""
:type query str
:rtype: list[str]
"""
columns = []
last_keyword = None
last_token = None
# print(preprocess_query(query))
# these keywords should not change the state of a parser
# and not "reset" previously found SELECT keyword
keywords_ignored = ['AS', 'AND', 'OR', 'IN', 'IS', 'NOT', 'NOT NULL', 'LIKE', 'CASE', 'WHEN']
# these function should be ignored
# and not "reset" previously found SELECT keyword
functions_ignored = ['COUNT', 'MIN', 'MAX', 'FROM_UNIXTIME', 'DATE_FORMAT', 'CAST', 'CONVERT']
for token in get_query_tokens(query):
if token.is_keyword and token.value.upper() not in keywords_ignored:
# keep the name of the last keyword, e.g. SELECT, FROM, WHERE, (ORDER) BY
last_keyword = token.value.upper()
# print('keyword', last_keyword)
elif token.ttype is Name:
# analyze the name tokens, column names and where condition values
if last_keyword in ['SELECT', 'WHERE', 'BY', 'ON'] \
and last_token.value.upper() not in ['AS']:
# print(last_keyword, last_token, token.value)
if token.value.upper() not in functions_ignored:
if str(last_token) == '.':
# print('DOT', last_token, columns[-1])
# we have table.column notation example
# append column name to the last entry of columns
# as it is a table name in fact
table_name = columns[-1]
columns[-1] = '{}.{}'.format(table_name, token)
else:
columns.append(str(token.value))
elif last_keyword in ['INTO'] and last_token.ttype is Punctuation:
# INSERT INTO `foo` (col1, `col2`) VALUES (..)
# print(last_keyword, token, last_token)
columns.append(str(token.value).strip('`'))
elif token.ttype is Wildcard:
# handle * wildcard in SELECT part, but ignore count(*)
# print(last_keyword, last_token, token.value)
if last_keyword == 'SELECT' and last_token.value != '(':
if str(last_token) == '.':
# handle SELECT foo.*
table_name = columns[-1]
columns[-1] = '{}.{}'.format(table_name, str(token))
else:
columns.append(str(token.value))
last_token = token
return unique(columns) | [
"def",
"get_query_columns",
"(",
"query",
")",
":",
"columns",
"=",
"[",
"]",
"last_keyword",
"=",
"None",
"last_token",
"=",
"None",
"# print(preprocess_query(query))",
"# these keywords should not change the state of a parser",
"# and not \"reset\" previously found SELECT keyword",
"keywords_ignored",
"=",
"[",
"'AS'",
",",
"'AND'",
",",
"'OR'",
",",
"'IN'",
",",
"'IS'",
",",
"'NOT'",
",",
"'NOT NULL'",
",",
"'LIKE'",
",",
"'CASE'",
",",
"'WHEN'",
"]",
"# these function should be ignored",
"# and not \"reset\" previously found SELECT keyword",
"functions_ignored",
"=",
"[",
"'COUNT'",
",",
"'MIN'",
",",
"'MAX'",
",",
"'FROM_UNIXTIME'",
",",
"'DATE_FORMAT'",
",",
"'CAST'",
",",
"'CONVERT'",
"]",
"for",
"token",
"in",
"get_query_tokens",
"(",
"query",
")",
":",
"if",
"token",
".",
"is_keyword",
"and",
"token",
".",
"value",
".",
"upper",
"(",
")",
"not",
"in",
"keywords_ignored",
":",
"# keep the name of the last keyword, e.g. SELECT, FROM, WHERE, (ORDER) BY",
"last_keyword",
"=",
"token",
".",
"value",
".",
"upper",
"(",
")",
"# print('keyword', last_keyword)",
"elif",
"token",
".",
"ttype",
"is",
"Name",
":",
"# analyze the name tokens, column names and where condition values",
"if",
"last_keyword",
"in",
"[",
"'SELECT'",
",",
"'WHERE'",
",",
"'BY'",
",",
"'ON'",
"]",
"and",
"last_token",
".",
"value",
".",
"upper",
"(",
")",
"not",
"in",
"[",
"'AS'",
"]",
":",
"# print(last_keyword, last_token, token.value)",
"if",
"token",
".",
"value",
".",
"upper",
"(",
")",
"not",
"in",
"functions_ignored",
":",
"if",
"str",
"(",
"last_token",
")",
"==",
"'.'",
":",
"# print('DOT', last_token, columns[-1])",
"# we have table.column notation example",
"# append column name to the last entry of columns",
"# as it is a table name in fact",
"table_name",
"=",
"columns",
"[",
"-",
"1",
"]",
"columns",
"[",
"-",
"1",
"]",
"=",
"'{}.{}'",
".",
"format",
"(",
"table_name",
",",
"token",
")",
"else",
":",
"columns",
".",
"append",
"(",
"str",
"(",
"token",
".",
"value",
")",
")",
"elif",
"last_keyword",
"in",
"[",
"'INTO'",
"]",
"and",
"last_token",
".",
"ttype",
"is",
"Punctuation",
":",
"# INSERT INTO `foo` (col1, `col2`) VALUES (..)",
"# print(last_keyword, token, last_token)",
"columns",
".",
"append",
"(",
"str",
"(",
"token",
".",
"value",
")",
".",
"strip",
"(",
"'`'",
")",
")",
"elif",
"token",
".",
"ttype",
"is",
"Wildcard",
":",
"# handle * wildcard in SELECT part, but ignore count(*)",
"# print(last_keyword, last_token, token.value)",
"if",
"last_keyword",
"==",
"'SELECT'",
"and",
"last_token",
".",
"value",
"!=",
"'('",
":",
"if",
"str",
"(",
"last_token",
")",
"==",
"'.'",
":",
"# handle SELECT foo.*",
"table_name",
"=",
"columns",
"[",
"-",
"1",
"]",
"columns",
"[",
"-",
"1",
"]",
"=",
"'{}.{}'",
".",
"format",
"(",
"table_name",
",",
"str",
"(",
"token",
")",
")",
"else",
":",
"columns",
".",
"append",
"(",
"str",
"(",
"token",
".",
"value",
")",
")",
"last_token",
"=",
"token",
"return",
"unique",
"(",
"columns",
")"
] | :type query str
:rtype: list[str] | [
":",
"type",
"query",
"str",
":",
"rtype",
":",
"list",
"[",
"str",
"]"
] | train | https://github.com/macbre/sql-metadata/blob/4b7b4ae0a961d568075aefe78535cf5aee74583c/sql_metadata.py#L69-L128 |
macbre/sql-metadata | sql_metadata.py | get_query_tables | def get_query_tables(query):
"""
:type query str
:rtype: list[str]
"""
tables = []
last_keyword = None
last_token = None
table_syntax_keywords = [
# SELECT queries
'FROM', 'WHERE', 'JOIN', 'INNER JOIN', 'LEFT JOIN', 'RIGHT JOIN', 'ON',
# INSERT queries
'INTO', 'VALUES',
# UPDATE queries
'UPDATE', 'SET',
# Hive queries
'TABLE', # INSERT TABLE
]
# print(query, get_query_tokens(query))
for token in get_query_tokens(query):
# print([token, token.ttype, last_token, last_keyword])
if token.is_keyword and token.value.upper() in table_syntax_keywords:
# keep the name of the last keyword, the next one can be a table name
last_keyword = token.value.upper()
# print('keyword', last_keyword)
elif str(token) == '(':
# reset the last_keyword for INSERT `foo` VALUES(id, bar) ...
last_keyword = None
elif token.is_keyword and str(token) in ['FORCE', 'ORDER']:
# reset the last_keyword for "SELECT x FORCE INDEX" queries and "SELECT x ORDER BY"
last_keyword = None
elif token.is_keyword and str(token) == 'SELECT' and last_keyword in ['INTO', 'TABLE']:
# reset the last_keyword for "INSERT INTO SELECT" and "INSERT TABLE SELECT" queries
last_keyword = None
elif token.ttype is Name or token.is_keyword:
# print([last_keyword, last_token, token.value])
# analyze the name tokens, column names and where condition values
if last_keyword in ['FROM', 'JOIN', 'INNER JOIN', 'LEFT JOIN', 'RIGHT JOIN',
'INTO', 'UPDATE', 'TABLE'] \
and last_token not in ['AS'] \
and token.value not in ['AS', 'SELECT']:
if last_token == '.':
# we have database.table notation example
# append table name to the last entry of tables
# as it is a database name in fact
database_name = tables[-1]
tables[-1] = '{}.{}'.format(database_name, token)
last_keyword = None
elif last_token not in [',', last_keyword]:
# it's not a list of tables, e.g. SELECT * FROM foo, bar
# hence, it can be the case of alias without AS, e.g. SELECT * FROM foo bar
pass
else:
table_name = str(token.value.strip('`'))
tables.append(table_name)
last_token = token.value.upper()
return unique(tables) | python | def get_query_tables(query):
"""
:type query str
:rtype: list[str]
"""
tables = []
last_keyword = None
last_token = None
table_syntax_keywords = [
# SELECT queries
'FROM', 'WHERE', 'JOIN', 'INNER JOIN', 'LEFT JOIN', 'RIGHT JOIN', 'ON',
# INSERT queries
'INTO', 'VALUES',
# UPDATE queries
'UPDATE', 'SET',
# Hive queries
'TABLE', # INSERT TABLE
]
# print(query, get_query_tokens(query))
for token in get_query_tokens(query):
# print([token, token.ttype, last_token, last_keyword])
if token.is_keyword and token.value.upper() in table_syntax_keywords:
# keep the name of the last keyword, the next one can be a table name
last_keyword = token.value.upper()
# print('keyword', last_keyword)
elif str(token) == '(':
# reset the last_keyword for INSERT `foo` VALUES(id, bar) ...
last_keyword = None
elif token.is_keyword and str(token) in ['FORCE', 'ORDER']:
# reset the last_keyword for "SELECT x FORCE INDEX" queries and "SELECT x ORDER BY"
last_keyword = None
elif token.is_keyword and str(token) == 'SELECT' and last_keyword in ['INTO', 'TABLE']:
# reset the last_keyword for "INSERT INTO SELECT" and "INSERT TABLE SELECT" queries
last_keyword = None
elif token.ttype is Name or token.is_keyword:
# print([last_keyword, last_token, token.value])
# analyze the name tokens, column names and where condition values
if last_keyword in ['FROM', 'JOIN', 'INNER JOIN', 'LEFT JOIN', 'RIGHT JOIN',
'INTO', 'UPDATE', 'TABLE'] \
and last_token not in ['AS'] \
and token.value not in ['AS', 'SELECT']:
if last_token == '.':
# we have database.table notation example
# append table name to the last entry of tables
# as it is a database name in fact
database_name = tables[-1]
tables[-1] = '{}.{}'.format(database_name, token)
last_keyword = None
elif last_token not in [',', last_keyword]:
# it's not a list of tables, e.g. SELECT * FROM foo, bar
# hence, it can be the case of alias without AS, e.g. SELECT * FROM foo bar
pass
else:
table_name = str(token.value.strip('`'))
tables.append(table_name)
last_token = token.value.upper()
return unique(tables) | [
"def",
"get_query_tables",
"(",
"query",
")",
":",
"tables",
"=",
"[",
"]",
"last_keyword",
"=",
"None",
"last_token",
"=",
"None",
"table_syntax_keywords",
"=",
"[",
"# SELECT queries",
"'FROM'",
",",
"'WHERE'",
",",
"'JOIN'",
",",
"'INNER JOIN'",
",",
"'LEFT JOIN'",
",",
"'RIGHT JOIN'",
",",
"'ON'",
",",
"# INSERT queries",
"'INTO'",
",",
"'VALUES'",
",",
"# UPDATE queries",
"'UPDATE'",
",",
"'SET'",
",",
"# Hive queries",
"'TABLE'",
",",
"# INSERT TABLE",
"]",
"# print(query, get_query_tokens(query))",
"for",
"token",
"in",
"get_query_tokens",
"(",
"query",
")",
":",
"# print([token, token.ttype, last_token, last_keyword])",
"if",
"token",
".",
"is_keyword",
"and",
"token",
".",
"value",
".",
"upper",
"(",
")",
"in",
"table_syntax_keywords",
":",
"# keep the name of the last keyword, the next one can be a table name",
"last_keyword",
"=",
"token",
".",
"value",
".",
"upper",
"(",
")",
"# print('keyword', last_keyword)",
"elif",
"str",
"(",
"token",
")",
"==",
"'('",
":",
"# reset the last_keyword for INSERT `foo` VALUES(id, bar) ...",
"last_keyword",
"=",
"None",
"elif",
"token",
".",
"is_keyword",
"and",
"str",
"(",
"token",
")",
"in",
"[",
"'FORCE'",
",",
"'ORDER'",
"]",
":",
"# reset the last_keyword for \"SELECT x FORCE INDEX\" queries and \"SELECT x ORDER BY\"",
"last_keyword",
"=",
"None",
"elif",
"token",
".",
"is_keyword",
"and",
"str",
"(",
"token",
")",
"==",
"'SELECT'",
"and",
"last_keyword",
"in",
"[",
"'INTO'",
",",
"'TABLE'",
"]",
":",
"# reset the last_keyword for \"INSERT INTO SELECT\" and \"INSERT TABLE SELECT\" queries",
"last_keyword",
"=",
"None",
"elif",
"token",
".",
"ttype",
"is",
"Name",
"or",
"token",
".",
"is_keyword",
":",
"# print([last_keyword, last_token, token.value])",
"# analyze the name tokens, column names and where condition values",
"if",
"last_keyword",
"in",
"[",
"'FROM'",
",",
"'JOIN'",
",",
"'INNER JOIN'",
",",
"'LEFT JOIN'",
",",
"'RIGHT JOIN'",
",",
"'INTO'",
",",
"'UPDATE'",
",",
"'TABLE'",
"]",
"and",
"last_token",
"not",
"in",
"[",
"'AS'",
"]",
"and",
"token",
".",
"value",
"not",
"in",
"[",
"'AS'",
",",
"'SELECT'",
"]",
":",
"if",
"last_token",
"==",
"'.'",
":",
"# we have database.table notation example",
"# append table name to the last entry of tables",
"# as it is a database name in fact",
"database_name",
"=",
"tables",
"[",
"-",
"1",
"]",
"tables",
"[",
"-",
"1",
"]",
"=",
"'{}.{}'",
".",
"format",
"(",
"database_name",
",",
"token",
")",
"last_keyword",
"=",
"None",
"elif",
"last_token",
"not",
"in",
"[",
"','",
",",
"last_keyword",
"]",
":",
"# it's not a list of tables, e.g. SELECT * FROM foo, bar",
"# hence, it can be the case of alias without AS, e.g. SELECT * FROM foo bar",
"pass",
"else",
":",
"table_name",
"=",
"str",
"(",
"token",
".",
"value",
".",
"strip",
"(",
"'`'",
")",
")",
"tables",
".",
"append",
"(",
"table_name",
")",
"last_token",
"=",
"token",
".",
"value",
".",
"upper",
"(",
")",
"return",
"unique",
"(",
"tables",
")"
] | :type query str
:rtype: list[str] | [
":",
"type",
"query",
"str",
":",
"rtype",
":",
"list",
"[",
"str",
"]"
] | train | https://github.com/macbre/sql-metadata/blob/4b7b4ae0a961d568075aefe78535cf5aee74583c/sql_metadata.py#L131-L193 |
macbre/sql-metadata | sql_metadata.py | get_query_limit_and_offset | def get_query_limit_and_offset(query):
"""
:type query str
:rtype: (int, int)
"""
limit = None
offset = None
last_keyword = None
last_token = None
# print(query)
for token in get_query_tokens(query):
# print([token, token.ttype, last_keyword])
if token.is_keyword and token.value.upper() in ['LIMIT', 'OFFSET']:
last_keyword = token.value.upper()
elif token.ttype is Number.Integer:
# print([token, last_keyword, last_token_was_integer])
if last_keyword == 'LIMIT':
# LIMIT <limit>
limit = int(token.value)
last_keyword = None
elif last_keyword == 'OFFSET':
# OFFSET <offset>
offset = int(token.value)
last_keyword = None
elif last_token and last_token.ttype is Punctuation:
# LIMIT <offset>,<limit>
offset = limit
limit = int(token.value)
last_token = token
if limit is None:
return None
return limit, offset or 0 | python | def get_query_limit_and_offset(query):
"""
:type query str
:rtype: (int, int)
"""
limit = None
offset = None
last_keyword = None
last_token = None
# print(query)
for token in get_query_tokens(query):
# print([token, token.ttype, last_keyword])
if token.is_keyword and token.value.upper() in ['LIMIT', 'OFFSET']:
last_keyword = token.value.upper()
elif token.ttype is Number.Integer:
# print([token, last_keyword, last_token_was_integer])
if last_keyword == 'LIMIT':
# LIMIT <limit>
limit = int(token.value)
last_keyword = None
elif last_keyword == 'OFFSET':
# OFFSET <offset>
offset = int(token.value)
last_keyword = None
elif last_token and last_token.ttype is Punctuation:
# LIMIT <offset>,<limit>
offset = limit
limit = int(token.value)
last_token = token
if limit is None:
return None
return limit, offset or 0 | [
"def",
"get_query_limit_and_offset",
"(",
"query",
")",
":",
"limit",
"=",
"None",
"offset",
"=",
"None",
"last_keyword",
"=",
"None",
"last_token",
"=",
"None",
"# print(query)",
"for",
"token",
"in",
"get_query_tokens",
"(",
"query",
")",
":",
"# print([token, token.ttype, last_keyword])",
"if",
"token",
".",
"is_keyword",
"and",
"token",
".",
"value",
".",
"upper",
"(",
")",
"in",
"[",
"'LIMIT'",
",",
"'OFFSET'",
"]",
":",
"last_keyword",
"=",
"token",
".",
"value",
".",
"upper",
"(",
")",
"elif",
"token",
".",
"ttype",
"is",
"Number",
".",
"Integer",
":",
"# print([token, last_keyword, last_token_was_integer])",
"if",
"last_keyword",
"==",
"'LIMIT'",
":",
"# LIMIT <limit>",
"limit",
"=",
"int",
"(",
"token",
".",
"value",
")",
"last_keyword",
"=",
"None",
"elif",
"last_keyword",
"==",
"'OFFSET'",
":",
"# OFFSET <offset>",
"offset",
"=",
"int",
"(",
"token",
".",
"value",
")",
"last_keyword",
"=",
"None",
"elif",
"last_token",
"and",
"last_token",
".",
"ttype",
"is",
"Punctuation",
":",
"# LIMIT <offset>,<limit>",
"offset",
"=",
"limit",
"limit",
"=",
"int",
"(",
"token",
".",
"value",
")",
"last_token",
"=",
"token",
"if",
"limit",
"is",
"None",
":",
"return",
"None",
"return",
"limit",
",",
"offset",
"or",
"0"
] | :type query str
:rtype: (int, int) | [
":",
"type",
"query",
"str",
":",
"rtype",
":",
"(",
"int",
"int",
")"
] | train | https://github.com/macbre/sql-metadata/blob/4b7b4ae0a961d568075aefe78535cf5aee74583c/sql_metadata.py#L196-L232 |
macbre/sql-metadata | sql_metadata.py | normalize_likes | def normalize_likes(sql):
"""
Normalize and wrap LIKE statements
:type sql str
:rtype: str
"""
sql = sql.replace('%', '')
# LIKE '%bot'
sql = re.sub(r"LIKE '[^\']+'", 'LIKE X', sql)
# or all_groups LIKE X or all_groups LIKE X
matches = re.finditer(r'(or|and) [^\s]+ LIKE X', sql, flags=re.IGNORECASE)
matches = [match.group(0) for match in matches] if matches else None
if matches:
for match in set(matches):
sql = re.sub(r'(\s?' + re.escape(match) + ')+', ' ' + match + ' ...', sql)
return sql | python | def normalize_likes(sql):
"""
Normalize and wrap LIKE statements
:type sql str
:rtype: str
"""
sql = sql.replace('%', '')
# LIKE '%bot'
sql = re.sub(r"LIKE '[^\']+'", 'LIKE X', sql)
# or all_groups LIKE X or all_groups LIKE X
matches = re.finditer(r'(or|and) [^\s]+ LIKE X', sql, flags=re.IGNORECASE)
matches = [match.group(0) for match in matches] if matches else None
if matches:
for match in set(matches):
sql = re.sub(r'(\s?' + re.escape(match) + ')+', ' ' + match + ' ...', sql)
return sql | [
"def",
"normalize_likes",
"(",
"sql",
")",
":",
"sql",
"=",
"sql",
".",
"replace",
"(",
"'%'",
",",
"''",
")",
"# LIKE '%bot'",
"sql",
"=",
"re",
".",
"sub",
"(",
"r\"LIKE '[^\\']+'\"",
",",
"'LIKE X'",
",",
"sql",
")",
"# or all_groups LIKE X or all_groups LIKE X",
"matches",
"=",
"re",
".",
"finditer",
"(",
"r'(or|and) [^\\s]+ LIKE X'",
",",
"sql",
",",
"flags",
"=",
"re",
".",
"IGNORECASE",
")",
"matches",
"=",
"[",
"match",
".",
"group",
"(",
"0",
")",
"for",
"match",
"in",
"matches",
"]",
"if",
"matches",
"else",
"None",
"if",
"matches",
":",
"for",
"match",
"in",
"set",
"(",
"matches",
")",
":",
"sql",
"=",
"re",
".",
"sub",
"(",
"r'(\\s?'",
"+",
"re",
".",
"escape",
"(",
"match",
")",
"+",
"')+'",
",",
"' '",
"+",
"match",
"+",
"' ...'",
",",
"sql",
")",
"return",
"sql"
] | Normalize and wrap LIKE statements
:type sql str
:rtype: str | [
"Normalize",
"and",
"wrap",
"LIKE",
"statements"
] | train | https://github.com/macbre/sql-metadata/blob/4b7b4ae0a961d568075aefe78535cf5aee74583c/sql_metadata.py#L236-L256 |
macbre/sql-metadata | sql_metadata.py | generalize_sql | def generalize_sql(sql):
"""
Removes most variables from an SQL query and replaces them with X or N for numbers.
Based on Mediawiki's DatabaseBase::generalizeSQL
:type sql str|None
:rtype: str
"""
if sql is None:
return None
# multiple spaces
sql = re.sub(r'\s{2,}', ' ', sql)
# MW comments
# e.g. /* CategoryDataService::getMostVisited N.N.N.N */
sql = remove_comments_from_sql(sql)
# handle LIKE statements
sql = normalize_likes(sql)
sql = re.sub(r"\\\\", '', sql)
sql = re.sub(r"\\'", '', sql)
sql = re.sub(r'\\"', '', sql)
sql = re.sub(r"'[^\']*'", 'X', sql)
sql = re.sub(r'"[^\"]*"', 'X', sql)
# All newlines, tabs, etc replaced by single space
sql = re.sub(r'\s+', ' ', sql)
# All numbers => N
sql = re.sub(r'-?[0-9]+', 'N', sql)
# WHERE foo IN ('880987','882618','708228','522330')
sql = re.sub(r' (IN|VALUES)\s*\([^,]+,[^)]+\)', ' \\1 (XYZ)', sql, flags=re.IGNORECASE)
return sql.strip() | python | def generalize_sql(sql):
"""
Removes most variables from an SQL query and replaces them with X or N for numbers.
Based on Mediawiki's DatabaseBase::generalizeSQL
:type sql str|None
:rtype: str
"""
if sql is None:
return None
# multiple spaces
sql = re.sub(r'\s{2,}', ' ', sql)
# MW comments
# e.g. /* CategoryDataService::getMostVisited N.N.N.N */
sql = remove_comments_from_sql(sql)
# handle LIKE statements
sql = normalize_likes(sql)
sql = re.sub(r"\\\\", '', sql)
sql = re.sub(r"\\'", '', sql)
sql = re.sub(r'\\"', '', sql)
sql = re.sub(r"'[^\']*'", 'X', sql)
sql = re.sub(r'"[^\"]*"', 'X', sql)
# All newlines, tabs, etc replaced by single space
sql = re.sub(r'\s+', ' ', sql)
# All numbers => N
sql = re.sub(r'-?[0-9]+', 'N', sql)
# WHERE foo IN ('880987','882618','708228','522330')
sql = re.sub(r' (IN|VALUES)\s*\([^,]+,[^)]+\)', ' \\1 (XYZ)', sql, flags=re.IGNORECASE)
return sql.strip() | [
"def",
"generalize_sql",
"(",
"sql",
")",
":",
"if",
"sql",
"is",
"None",
":",
"return",
"None",
"# multiple spaces",
"sql",
"=",
"re",
".",
"sub",
"(",
"r'\\s{2,}'",
",",
"' '",
",",
"sql",
")",
"# MW comments",
"# e.g. /* CategoryDataService::getMostVisited N.N.N.N */",
"sql",
"=",
"remove_comments_from_sql",
"(",
"sql",
")",
"# handle LIKE statements",
"sql",
"=",
"normalize_likes",
"(",
"sql",
")",
"sql",
"=",
"re",
".",
"sub",
"(",
"r\"\\\\\\\\\"",
",",
"''",
",",
"sql",
")",
"sql",
"=",
"re",
".",
"sub",
"(",
"r\"\\\\'\"",
",",
"''",
",",
"sql",
")",
"sql",
"=",
"re",
".",
"sub",
"(",
"r'\\\\\"'",
",",
"''",
",",
"sql",
")",
"sql",
"=",
"re",
".",
"sub",
"(",
"r\"'[^\\']*'\"",
",",
"'X'",
",",
"sql",
")",
"sql",
"=",
"re",
".",
"sub",
"(",
"r'\"[^\\\"]*\"'",
",",
"'X'",
",",
"sql",
")",
"# All newlines, tabs, etc replaced by single space",
"sql",
"=",
"re",
".",
"sub",
"(",
"r'\\s+'",
",",
"' '",
",",
"sql",
")",
"# All numbers => N",
"sql",
"=",
"re",
".",
"sub",
"(",
"r'-?[0-9]+'",
",",
"'N'",
",",
"sql",
")",
"# WHERE foo IN ('880987','882618','708228','522330')",
"sql",
"=",
"re",
".",
"sub",
"(",
"r' (IN|VALUES)\\s*\\([^,]+,[^)]+\\)'",
",",
"' \\\\1 (XYZ)'",
",",
"sql",
",",
"flags",
"=",
"re",
".",
"IGNORECASE",
")",
"return",
"sql",
".",
"strip",
"(",
")"
] | Removes most variables from an SQL query and replaces them with X or N for numbers.
Based on Mediawiki's DatabaseBase::generalizeSQL
:type sql str|None
:rtype: str | [
"Removes",
"most",
"variables",
"from",
"an",
"SQL",
"query",
"and",
"replaces",
"them",
"with",
"X",
"or",
"N",
"for",
"numbers",
"."
] | train | https://github.com/macbre/sql-metadata/blob/4b7b4ae0a961d568075aefe78535cf5aee74583c/sql_metadata.py#L269-L306 |
biocore/deblur | deblur/parallel_deblur.py | deblur_system_call | def deblur_system_call(params, input_fp):
"""Build deblur command for subprocess.
Parameters
----------
params: list of str
parameter settings to pass to deblur CLI
input_fp : str
name of the input fasta file to deblur
Returns
-------
stdout: string
process output directed to standard output
stderr: string
process output directed to standard error
return_value: integer
return code from process
"""
logger = logging.getLogger(__name__)
logger.debug('[%s] deblur system call params %s, input_fp %s' %
(mp.current_process().name, params, input_fp))
# construct command
script_name = "deblur"
script_subprogram = "workflow"
command = [script_name,
script_subprogram,
'--seqs-fp', input_fp,
'--is-worker-thread',
'--keep-tmp-files']
command.extend(params)
logger.debug('[%s] running command %s' % (mp.current_process().name,
command))
return _system_call(command) | python | def deblur_system_call(params, input_fp):
"""Build deblur command for subprocess.
Parameters
----------
params: list of str
parameter settings to pass to deblur CLI
input_fp : str
name of the input fasta file to deblur
Returns
-------
stdout: string
process output directed to standard output
stderr: string
process output directed to standard error
return_value: integer
return code from process
"""
logger = logging.getLogger(__name__)
logger.debug('[%s] deblur system call params %s, input_fp %s' %
(mp.current_process().name, params, input_fp))
# construct command
script_name = "deblur"
script_subprogram = "workflow"
command = [script_name,
script_subprogram,
'--seqs-fp', input_fp,
'--is-worker-thread',
'--keep-tmp-files']
command.extend(params)
logger.debug('[%s] running command %s' % (mp.current_process().name,
command))
return _system_call(command) | [
"def",
"deblur_system_call",
"(",
"params",
",",
"input_fp",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"'[%s] deblur system call params %s, input_fp %s'",
"%",
"(",
"mp",
".",
"current_process",
"(",
")",
".",
"name",
",",
"params",
",",
"input_fp",
")",
")",
"# construct command",
"script_name",
"=",
"\"deblur\"",
"script_subprogram",
"=",
"\"workflow\"",
"command",
"=",
"[",
"script_name",
",",
"script_subprogram",
",",
"'--seqs-fp'",
",",
"input_fp",
",",
"'--is-worker-thread'",
",",
"'--keep-tmp-files'",
"]",
"command",
".",
"extend",
"(",
"params",
")",
"logger",
".",
"debug",
"(",
"'[%s] running command %s'",
"%",
"(",
"mp",
".",
"current_process",
"(",
")",
".",
"name",
",",
"command",
")",
")",
"return",
"_system_call",
"(",
"command",
")"
] | Build deblur command for subprocess.
Parameters
----------
params: list of str
parameter settings to pass to deblur CLI
input_fp : str
name of the input fasta file to deblur
Returns
-------
stdout: string
process output directed to standard output
stderr: string
process output directed to standard error
return_value: integer
return code from process | [
"Build",
"deblur",
"command",
"for",
"subprocess",
"."
] | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/parallel_deblur.py#L17-L53 |
biocore/deblur | deblur/parallel_deblur.py | run_functor | def run_functor(functor, *args, **kwargs):
"""
Given a functor, run it and return its result. We can use this with
multiprocessing.map and map it over a list of job functors to do them.
Handles getting more than multiprocessing's pitiful exception output
This function was derived from:
http://stackoverflow.com/a/16618842/19741
This code was adopted from the American Gut project:
https://github.com/biocore/American-Gut/blob/master/americangut/parallel.py
"""
try:
# This is where you do your actual work
return functor(*args, **kwargs)
except Exception:
# Put all exception text into an exception and raise that
raise Exception("".join(traceback.format_exception(*sys.exc_info()))) | python | def run_functor(functor, *args, **kwargs):
"""
Given a functor, run it and return its result. We can use this with
multiprocessing.map and map it over a list of job functors to do them.
Handles getting more than multiprocessing's pitiful exception output
This function was derived from:
http://stackoverflow.com/a/16618842/19741
This code was adopted from the American Gut project:
https://github.com/biocore/American-Gut/blob/master/americangut/parallel.py
"""
try:
# This is where you do your actual work
return functor(*args, **kwargs)
except Exception:
# Put all exception text into an exception and raise that
raise Exception("".join(traceback.format_exception(*sys.exc_info()))) | [
"def",
"run_functor",
"(",
"functor",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"# This is where you do your actual work",
"return",
"functor",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
":",
"# Put all exception text into an exception and raise that",
"raise",
"Exception",
"(",
"\"\"",
".",
"join",
"(",
"traceback",
".",
"format_exception",
"(",
"*",
"sys",
".",
"exc_info",
"(",
")",
")",
")",
")"
] | Given a functor, run it and return its result. We can use this with
multiprocessing.map and map it over a list of job functors to do them.
Handles getting more than multiprocessing's pitiful exception output
This function was derived from:
http://stackoverflow.com/a/16618842/19741
This code was adopted from the American Gut project:
https://github.com/biocore/American-Gut/blob/master/americangut/parallel.py | [
"Given",
"a",
"functor",
"run",
"it",
"and",
"return",
"its",
"result",
".",
"We",
"can",
"use",
"this",
"with",
"multiprocessing",
".",
"map",
"and",
"map",
"it",
"over",
"a",
"list",
"of",
"job",
"functors",
"to",
"do",
"them",
"."
] | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/parallel_deblur.py#L56-L74 |
biocore/deblur | deblur/parallel_deblur.py | parallel_deblur | def parallel_deblur(inputs, params,
pos_ref_db_fp, neg_ref_dp_fp, jobs_to_start=1):
"""Dispatch execution over a pool of processors
This code was adopted from the American Gut project:
https://github.com/biocore/American-Gut/blob/master/americangut/parallel.py
Parameters
----------
inputs : iterable of str
File paths to input per-sample sequence files
params : list of str
list of CLI parameters supplied to the deblur workflow
(argv - first 2 are 'deblur','workflow' and are ignored)
pos_ref_db_fp : list of str
the indexed positive (16s) sortmerna database
(created in the main thread)
neg_ref_db_fp : list of str
the indexed negative (artifacts) sortmerna database
(created in the main thread)
jobs_to_start : int, optional
The number of processors on the local system to use
Returns
-------
all_result_paths : list
list of expected output files
"""
logger = logging.getLogger(__name__)
logger.info('parallel deblur started for %d inputs' % len(inputs))
# remove the irrelevant parameters
remove_param_list = ['-O', '--jobs-to-start', '--seqs-fp',
'--pos-ref-db-fp', '--neg-ref-db-fp']
skipnext = False
newparams = []
for carg in params[2:]:
if skipnext:
skipnext = False
continue
if carg in remove_param_list:
skipnext = True
continue
newparams.append(carg)
# add the ref_db_fp (since it may be not present in the
# original command parameters)
if pos_ref_db_fp:
new_pos_ref_db_fp = ','.join(pos_ref_db_fp)
newparams.append('--pos-ref-db-fp')
newparams.append(new_pos_ref_db_fp)
if neg_ref_dp_fp:
new_neg_ref_db_fp = ','.join(neg_ref_dp_fp)
newparams.append('--neg-ref-db-fp')
newparams.append(new_neg_ref_db_fp)
logger.debug('ready for functor %s' % newparams)
functor = partial(run_functor, deblur_system_call, newparams)
logger.debug('ready for pool %d jobs' % jobs_to_start)
pool = mp.Pool(processes=jobs_to_start)
logger.debug('almost running...')
for stdout, stderr, es in pool.map(functor, inputs):
if es != 0:
raise RuntimeError("stdout: %s\nstderr: %s\nexit: %d" % (stdout,
stderr,
es)) | python | def parallel_deblur(inputs, params,
pos_ref_db_fp, neg_ref_dp_fp, jobs_to_start=1):
"""Dispatch execution over a pool of processors
This code was adopted from the American Gut project:
https://github.com/biocore/American-Gut/blob/master/americangut/parallel.py
Parameters
----------
inputs : iterable of str
File paths to input per-sample sequence files
params : list of str
list of CLI parameters supplied to the deblur workflow
(argv - first 2 are 'deblur','workflow' and are ignored)
pos_ref_db_fp : list of str
the indexed positive (16s) sortmerna database
(created in the main thread)
neg_ref_db_fp : list of str
the indexed negative (artifacts) sortmerna database
(created in the main thread)
jobs_to_start : int, optional
The number of processors on the local system to use
Returns
-------
all_result_paths : list
list of expected output files
"""
logger = logging.getLogger(__name__)
logger.info('parallel deblur started for %d inputs' % len(inputs))
# remove the irrelevant parameters
remove_param_list = ['-O', '--jobs-to-start', '--seqs-fp',
'--pos-ref-db-fp', '--neg-ref-db-fp']
skipnext = False
newparams = []
for carg in params[2:]:
if skipnext:
skipnext = False
continue
if carg in remove_param_list:
skipnext = True
continue
newparams.append(carg)
# add the ref_db_fp (since it may be not present in the
# original command parameters)
if pos_ref_db_fp:
new_pos_ref_db_fp = ','.join(pos_ref_db_fp)
newparams.append('--pos-ref-db-fp')
newparams.append(new_pos_ref_db_fp)
if neg_ref_dp_fp:
new_neg_ref_db_fp = ','.join(neg_ref_dp_fp)
newparams.append('--neg-ref-db-fp')
newparams.append(new_neg_ref_db_fp)
logger.debug('ready for functor %s' % newparams)
functor = partial(run_functor, deblur_system_call, newparams)
logger.debug('ready for pool %d jobs' % jobs_to_start)
pool = mp.Pool(processes=jobs_to_start)
logger.debug('almost running...')
for stdout, stderr, es in pool.map(functor, inputs):
if es != 0:
raise RuntimeError("stdout: %s\nstderr: %s\nexit: %d" % (stdout,
stderr,
es)) | [
"def",
"parallel_deblur",
"(",
"inputs",
",",
"params",
",",
"pos_ref_db_fp",
",",
"neg_ref_dp_fp",
",",
"jobs_to_start",
"=",
"1",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"info",
"(",
"'parallel deblur started for %d inputs'",
"%",
"len",
"(",
"inputs",
")",
")",
"# remove the irrelevant parameters",
"remove_param_list",
"=",
"[",
"'-O'",
",",
"'--jobs-to-start'",
",",
"'--seqs-fp'",
",",
"'--pos-ref-db-fp'",
",",
"'--neg-ref-db-fp'",
"]",
"skipnext",
"=",
"False",
"newparams",
"=",
"[",
"]",
"for",
"carg",
"in",
"params",
"[",
"2",
":",
"]",
":",
"if",
"skipnext",
":",
"skipnext",
"=",
"False",
"continue",
"if",
"carg",
"in",
"remove_param_list",
":",
"skipnext",
"=",
"True",
"continue",
"newparams",
".",
"append",
"(",
"carg",
")",
"# add the ref_db_fp (since it may be not present in the",
"# original command parameters)",
"if",
"pos_ref_db_fp",
":",
"new_pos_ref_db_fp",
"=",
"','",
".",
"join",
"(",
"pos_ref_db_fp",
")",
"newparams",
".",
"append",
"(",
"'--pos-ref-db-fp'",
")",
"newparams",
".",
"append",
"(",
"new_pos_ref_db_fp",
")",
"if",
"neg_ref_dp_fp",
":",
"new_neg_ref_db_fp",
"=",
"','",
".",
"join",
"(",
"neg_ref_dp_fp",
")",
"newparams",
".",
"append",
"(",
"'--neg-ref-db-fp'",
")",
"newparams",
".",
"append",
"(",
"new_neg_ref_db_fp",
")",
"logger",
".",
"debug",
"(",
"'ready for functor %s'",
"%",
"newparams",
")",
"functor",
"=",
"partial",
"(",
"run_functor",
",",
"deblur_system_call",
",",
"newparams",
")",
"logger",
".",
"debug",
"(",
"'ready for pool %d jobs'",
"%",
"jobs_to_start",
")",
"pool",
"=",
"mp",
".",
"Pool",
"(",
"processes",
"=",
"jobs_to_start",
")",
"logger",
".",
"debug",
"(",
"'almost running...'",
")",
"for",
"stdout",
",",
"stderr",
",",
"es",
"in",
"pool",
".",
"map",
"(",
"functor",
",",
"inputs",
")",
":",
"if",
"es",
"!=",
"0",
":",
"raise",
"RuntimeError",
"(",
"\"stdout: %s\\nstderr: %s\\nexit: %d\"",
"%",
"(",
"stdout",
",",
"stderr",
",",
"es",
")",
")"
] | Dispatch execution over a pool of processors
This code was adopted from the American Gut project:
https://github.com/biocore/American-Gut/blob/master/americangut/parallel.py
Parameters
----------
inputs : iterable of str
File paths to input per-sample sequence files
params : list of str
list of CLI parameters supplied to the deblur workflow
(argv - first 2 are 'deblur','workflow' and are ignored)
pos_ref_db_fp : list of str
the indexed positive (16s) sortmerna database
(created in the main thread)
neg_ref_db_fp : list of str
the indexed negative (artifacts) sortmerna database
(created in the main thread)
jobs_to_start : int, optional
The number of processors on the local system to use
Returns
-------
all_result_paths : list
list of expected output files | [
"Dispatch",
"execution",
"over",
"a",
"pool",
"of",
"processors"
] | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/parallel_deblur.py#L77-L142 |
biocore/deblur | deblur/workflow.py | sequence_generator | def sequence_generator(input_fp):
"""Yield (id, sequence) from an input file
Parameters
----------
input_fp : filepath
A filepath, which can be any valid fasta or fastq file within the
limitations of scikit-bio's IO registry.
Notes
-----
The use of this method is a stopgap to replicate the existing `parse_fasta`
functionality while at the same time allowing for fastq support.
Raises
------
skbio.io.FormatIdentificationWarning
If the format of the input file cannot be determined.
Returns
-------
(str, str)
The ID and sequence.
"""
logger = logging.getLogger(__name__)
kw = {}
if sniff_fasta(input_fp)[0]:
format = 'fasta'
elif sniff_fastq(input_fp)[0]:
format = 'fastq'
kw['variant'] = _get_fastq_variant(input_fp)
else:
# usually happens when the fasta file is empty
# so need to return no sequences (and warn)
msg = "input file %s does not appear to be FASTA or FASTQ" % input_fp
logger.warn(msg)
warnings.warn(msg, UserWarning)
return
# some of the test code is using file paths, some is using StringIO.
if isinstance(input_fp, io.TextIOBase):
input_fp.seek(0)
for record in skbio.read(input_fp, format=format, **kw):
yield (record.metadata['id'], str(record)) | python | def sequence_generator(input_fp):
"""Yield (id, sequence) from an input file
Parameters
----------
input_fp : filepath
A filepath, which can be any valid fasta or fastq file within the
limitations of scikit-bio's IO registry.
Notes
-----
The use of this method is a stopgap to replicate the existing `parse_fasta`
functionality while at the same time allowing for fastq support.
Raises
------
skbio.io.FormatIdentificationWarning
If the format of the input file cannot be determined.
Returns
-------
(str, str)
The ID and sequence.
"""
logger = logging.getLogger(__name__)
kw = {}
if sniff_fasta(input_fp)[0]:
format = 'fasta'
elif sniff_fastq(input_fp)[0]:
format = 'fastq'
kw['variant'] = _get_fastq_variant(input_fp)
else:
# usually happens when the fasta file is empty
# so need to return no sequences (and warn)
msg = "input file %s does not appear to be FASTA or FASTQ" % input_fp
logger.warn(msg)
warnings.warn(msg, UserWarning)
return
# some of the test code is using file paths, some is using StringIO.
if isinstance(input_fp, io.TextIOBase):
input_fp.seek(0)
for record in skbio.read(input_fp, format=format, **kw):
yield (record.metadata['id'], str(record)) | [
"def",
"sequence_generator",
"(",
"input_fp",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"kw",
"=",
"{",
"}",
"if",
"sniff_fasta",
"(",
"input_fp",
")",
"[",
"0",
"]",
":",
"format",
"=",
"'fasta'",
"elif",
"sniff_fastq",
"(",
"input_fp",
")",
"[",
"0",
"]",
":",
"format",
"=",
"'fastq'",
"kw",
"[",
"'variant'",
"]",
"=",
"_get_fastq_variant",
"(",
"input_fp",
")",
"else",
":",
"# usually happens when the fasta file is empty",
"# so need to return no sequences (and warn)",
"msg",
"=",
"\"input file %s does not appear to be FASTA or FASTQ\"",
"%",
"input_fp",
"logger",
".",
"warn",
"(",
"msg",
")",
"warnings",
".",
"warn",
"(",
"msg",
",",
"UserWarning",
")",
"return",
"# some of the test code is using file paths, some is using StringIO.",
"if",
"isinstance",
"(",
"input_fp",
",",
"io",
".",
"TextIOBase",
")",
":",
"input_fp",
".",
"seek",
"(",
"0",
")",
"for",
"record",
"in",
"skbio",
".",
"read",
"(",
"input_fp",
",",
"format",
"=",
"format",
",",
"*",
"*",
"kw",
")",
":",
"yield",
"(",
"record",
".",
"metadata",
"[",
"'id'",
"]",
",",
"str",
"(",
"record",
")",
")"
] | Yield (id, sequence) from an input file
Parameters
----------
input_fp : filepath
A filepath, which can be any valid fasta or fastq file within the
limitations of scikit-bio's IO registry.
Notes
-----
The use of this method is a stopgap to replicate the existing `parse_fasta`
functionality while at the same time allowing for fastq support.
Raises
------
skbio.io.FormatIdentificationWarning
If the format of the input file cannot be determined.
Returns
-------
(str, str)
The ID and sequence. | [
"Yield",
"(",
"id",
"sequence",
")",
"from",
"an",
"input",
"file"
] | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L54-L100 |
biocore/deblur | deblur/workflow.py | trim_seqs | def trim_seqs(input_seqs, trim_len, left_trim_len):
"""Trim FASTA sequences to specified length.
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
trim_len : int
Sequence trimming length. Specify a value of -1 to disable trimming.
left_trim_len : int
Sequence trimming from the 5' end. A value of 0 will disable this trim.
Returns
-------
Generator of (str, str)
The trimmed sequences in (label, sequence) format
"""
# counters for the number of trimmed and total sequences
logger = logging.getLogger(__name__)
okseqs = 0
totseqs = 0
if trim_len < -1:
raise ValueError("Invalid trim_len: %d" % trim_len)
for label, seq in input_seqs:
totseqs += 1
if trim_len == -1:
okseqs += 1
yield label, seq
elif len(seq) >= trim_len:
okseqs += 1
yield label, seq[left_trim_len:trim_len]
if okseqs < 0.01*totseqs:
logger = logging.getLogger(__name__)
errmsg = 'Vast majority of sequences (%d / %d) are shorter ' \
'than the trim length (%d). ' \
'Are you using the correct -t trim length?' \
% (totseqs-okseqs, totseqs, trim_len)
logger.warn(errmsg)
warnings.warn(errmsg, UserWarning)
else:
logger.debug('trimmed to length %d (%d / %d remaining)'
% (trim_len, okseqs, totseqs)) | python | def trim_seqs(input_seqs, trim_len, left_trim_len):
"""Trim FASTA sequences to specified length.
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
trim_len : int
Sequence trimming length. Specify a value of -1 to disable trimming.
left_trim_len : int
Sequence trimming from the 5' end. A value of 0 will disable this trim.
Returns
-------
Generator of (str, str)
The trimmed sequences in (label, sequence) format
"""
# counters for the number of trimmed and total sequences
logger = logging.getLogger(__name__)
okseqs = 0
totseqs = 0
if trim_len < -1:
raise ValueError("Invalid trim_len: %d" % trim_len)
for label, seq in input_seqs:
totseqs += 1
if trim_len == -1:
okseqs += 1
yield label, seq
elif len(seq) >= trim_len:
okseqs += 1
yield label, seq[left_trim_len:trim_len]
if okseqs < 0.01*totseqs:
logger = logging.getLogger(__name__)
errmsg = 'Vast majority of sequences (%d / %d) are shorter ' \
'than the trim length (%d). ' \
'Are you using the correct -t trim length?' \
% (totseqs-okseqs, totseqs, trim_len)
logger.warn(errmsg)
warnings.warn(errmsg, UserWarning)
else:
logger.debug('trimmed to length %d (%d / %d remaining)'
% (trim_len, okseqs, totseqs)) | [
"def",
"trim_seqs",
"(",
"input_seqs",
",",
"trim_len",
",",
"left_trim_len",
")",
":",
"# counters for the number of trimmed and total sequences",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"okseqs",
"=",
"0",
"totseqs",
"=",
"0",
"if",
"trim_len",
"<",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"Invalid trim_len: %d\"",
"%",
"trim_len",
")",
"for",
"label",
",",
"seq",
"in",
"input_seqs",
":",
"totseqs",
"+=",
"1",
"if",
"trim_len",
"==",
"-",
"1",
":",
"okseqs",
"+=",
"1",
"yield",
"label",
",",
"seq",
"elif",
"len",
"(",
"seq",
")",
">=",
"trim_len",
":",
"okseqs",
"+=",
"1",
"yield",
"label",
",",
"seq",
"[",
"left_trim_len",
":",
"trim_len",
"]",
"if",
"okseqs",
"<",
"0.01",
"*",
"totseqs",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"errmsg",
"=",
"'Vast majority of sequences (%d / %d) are shorter '",
"'than the trim length (%d). '",
"'Are you using the correct -t trim length?'",
"%",
"(",
"totseqs",
"-",
"okseqs",
",",
"totseqs",
",",
"trim_len",
")",
"logger",
".",
"warn",
"(",
"errmsg",
")",
"warnings",
".",
"warn",
"(",
"errmsg",
",",
"UserWarning",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"'trimmed to length %d (%d / %d remaining)'",
"%",
"(",
"trim_len",
",",
"okseqs",
",",
"totseqs",
")",
")"
] | Trim FASTA sequences to specified length.
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
trim_len : int
Sequence trimming length. Specify a value of -1 to disable trimming.
left_trim_len : int
Sequence trimming from the 5' end. A value of 0 will disable this trim.
Returns
-------
Generator of (str, str)
The trimmed sequences in (label, sequence) format | [
"Trim",
"FASTA",
"sequences",
"to",
"specified",
"length",
"."
] | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L103-L150 |
biocore/deblur | deblur/workflow.py | dereplicate_seqs | def dereplicate_seqs(seqs_fp,
output_fp,
min_size=2,
use_log=False,
threads=1):
"""Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available)
"""
logger = logging.getLogger(__name__)
logger.info('dereplicate seqs file %s' % seqs_fp)
log_name = "%s.log" % output_fp
params = ['vsearch', '--derep_fulllength', seqs_fp,
'--output', output_fp, '--sizeout',
'--fasta_width', '0', '--minuniquesize', str(min_size),
'--quiet', '--threads', str(threads)]
if use_log:
params.extend(['--log', log_name])
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running vsearch dereplication on file %s' %
seqs_fp)
logger.debug('parameters used:\n%s' % params)
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
return | python | def dereplicate_seqs(seqs_fp,
output_fp,
min_size=2,
use_log=False,
threads=1):
"""Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available)
"""
logger = logging.getLogger(__name__)
logger.info('dereplicate seqs file %s' % seqs_fp)
log_name = "%s.log" % output_fp
params = ['vsearch', '--derep_fulllength', seqs_fp,
'--output', output_fp, '--sizeout',
'--fasta_width', '0', '--minuniquesize', str(min_size),
'--quiet', '--threads', str(threads)]
if use_log:
params.extend(['--log', log_name])
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running vsearch dereplication on file %s' %
seqs_fp)
logger.debug('parameters used:\n%s' % params)
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
return | [
"def",
"dereplicate_seqs",
"(",
"seqs_fp",
",",
"output_fp",
",",
"min_size",
"=",
"2",
",",
"use_log",
"=",
"False",
",",
"threads",
"=",
"1",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"info",
"(",
"'dereplicate seqs file %s'",
"%",
"seqs_fp",
")",
"log_name",
"=",
"\"%s.log\"",
"%",
"output_fp",
"params",
"=",
"[",
"'vsearch'",
",",
"'--derep_fulllength'",
",",
"seqs_fp",
",",
"'--output'",
",",
"output_fp",
",",
"'--sizeout'",
",",
"'--fasta_width'",
",",
"'0'",
",",
"'--minuniquesize'",
",",
"str",
"(",
"min_size",
")",
",",
"'--quiet'",
",",
"'--threads'",
",",
"str",
"(",
"threads",
")",
"]",
"if",
"use_log",
":",
"params",
".",
"extend",
"(",
"[",
"'--log'",
",",
"log_name",
"]",
")",
"sout",
",",
"serr",
",",
"res",
"=",
"_system_call",
"(",
"params",
")",
"if",
"not",
"res",
"==",
"0",
":",
"logger",
".",
"error",
"(",
"'Problem running vsearch dereplication on file %s'",
"%",
"seqs_fp",
")",
"logger",
".",
"debug",
"(",
"'parameters used:\\n%s'",
"%",
"params",
")",
"logger",
".",
"debug",
"(",
"'stdout: %s'",
"%",
"sout",
")",
"logger",
".",
"debug",
"(",
"'stderr: %s'",
"%",
"serr",
")",
"return"
] | Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available) | [
"Dereplicate",
"FASTA",
"sequences",
"and",
"remove",
"singletons",
"using",
"VSEARCH",
"."
] | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L153-L193 |
biocore/deblur | deblur/workflow.py | build_index_sortmerna | def build_index_sortmerna(ref_fp, working_dir):
"""Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases
"""
logger = logging.getLogger(__name__)
logger.info('build_index_sortmerna files %s to'
' dir %s' % (ref_fp, working_dir))
all_db = []
for db in ref_fp:
fasta_dir, fasta_filename = split(db)
index_basename = splitext(fasta_filename)[0]
db_output = join(working_dir, index_basename)
logger.debug('processing file %s into location %s' % (db, db_output))
params = ['indexdb_rna', '--ref', '%s,%s' %
(db, db_output), '--tmpdir', working_dir]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running indexdb_rna on file %s to dir %s. '
'database not indexed' % (db, db_output))
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
logger.critical('execution halted')
raise RuntimeError('Cannot index database file %s' % db)
logger.debug('file %s indexed' % db)
all_db.append(db_output)
return all_db | python | def build_index_sortmerna(ref_fp, working_dir):
"""Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases
"""
logger = logging.getLogger(__name__)
logger.info('build_index_sortmerna files %s to'
' dir %s' % (ref_fp, working_dir))
all_db = []
for db in ref_fp:
fasta_dir, fasta_filename = split(db)
index_basename = splitext(fasta_filename)[0]
db_output = join(working_dir, index_basename)
logger.debug('processing file %s into location %s' % (db, db_output))
params = ['indexdb_rna', '--ref', '%s,%s' %
(db, db_output), '--tmpdir', working_dir]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running indexdb_rna on file %s to dir %s. '
'database not indexed' % (db, db_output))
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
logger.critical('execution halted')
raise RuntimeError('Cannot index database file %s' % db)
logger.debug('file %s indexed' % db)
all_db.append(db_output)
return all_db | [
"def",
"build_index_sortmerna",
"(",
"ref_fp",
",",
"working_dir",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"info",
"(",
"'build_index_sortmerna files %s to'",
"' dir %s'",
"%",
"(",
"ref_fp",
",",
"working_dir",
")",
")",
"all_db",
"=",
"[",
"]",
"for",
"db",
"in",
"ref_fp",
":",
"fasta_dir",
",",
"fasta_filename",
"=",
"split",
"(",
"db",
")",
"index_basename",
"=",
"splitext",
"(",
"fasta_filename",
")",
"[",
"0",
"]",
"db_output",
"=",
"join",
"(",
"working_dir",
",",
"index_basename",
")",
"logger",
".",
"debug",
"(",
"'processing file %s into location %s'",
"%",
"(",
"db",
",",
"db_output",
")",
")",
"params",
"=",
"[",
"'indexdb_rna'",
",",
"'--ref'",
",",
"'%s,%s'",
"%",
"(",
"db",
",",
"db_output",
")",
",",
"'--tmpdir'",
",",
"working_dir",
"]",
"sout",
",",
"serr",
",",
"res",
"=",
"_system_call",
"(",
"params",
")",
"if",
"not",
"res",
"==",
"0",
":",
"logger",
".",
"error",
"(",
"'Problem running indexdb_rna on file %s to dir %s. '",
"'database not indexed'",
"%",
"(",
"db",
",",
"db_output",
")",
")",
"logger",
".",
"debug",
"(",
"'stdout: %s'",
"%",
"sout",
")",
"logger",
".",
"debug",
"(",
"'stderr: %s'",
"%",
"serr",
")",
"logger",
".",
"critical",
"(",
"'execution halted'",
")",
"raise",
"RuntimeError",
"(",
"'Cannot index database file %s'",
"%",
"db",
")",
"logger",
".",
"debug",
"(",
"'file %s indexed'",
"%",
"db",
")",
"all_db",
".",
"append",
"(",
"db_output",
")",
"return",
"all_db"
] | Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases | [
"Build",
"a",
"SortMeRNA",
"index",
"for",
"all",
"reference",
"databases",
"."
] | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L196-L232 |
biocore/deblur | deblur/workflow.py | filter_minreads_samples_from_table | def filter_minreads_samples_from_table(table, minreads=1, inplace=True):
"""Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table
"""
logger = logging.getLogger(__name__)
logger.debug('filter_minreads_started. minreads=%d' % minreads)
samp_sum = table.sum(axis='sample')
samp_ids = table.ids(axis='sample')
bad_samples = samp_ids[samp_sum < minreads]
if len(bad_samples) > 0:
logger.warn('removed %d samples with reads per sample<%d'
% (len(bad_samples), minreads))
table = table.filter(bad_samples, axis='sample',
inplace=inplace, invert=True)
else:
logger.debug('all samples contain > %d reads' % minreads)
return table | python | def filter_minreads_samples_from_table(table, minreads=1, inplace=True):
"""Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table
"""
logger = logging.getLogger(__name__)
logger.debug('filter_minreads_started. minreads=%d' % minreads)
samp_sum = table.sum(axis='sample')
samp_ids = table.ids(axis='sample')
bad_samples = samp_ids[samp_sum < minreads]
if len(bad_samples) > 0:
logger.warn('removed %d samples with reads per sample<%d'
% (len(bad_samples), minreads))
table = table.filter(bad_samples, axis='sample',
inplace=inplace, invert=True)
else:
logger.debug('all samples contain > %d reads' % minreads)
return table | [
"def",
"filter_minreads_samples_from_table",
"(",
"table",
",",
"minreads",
"=",
"1",
",",
"inplace",
"=",
"True",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"'filter_minreads_started. minreads=%d'",
"%",
"minreads",
")",
"samp_sum",
"=",
"table",
".",
"sum",
"(",
"axis",
"=",
"'sample'",
")",
"samp_ids",
"=",
"table",
".",
"ids",
"(",
"axis",
"=",
"'sample'",
")",
"bad_samples",
"=",
"samp_ids",
"[",
"samp_sum",
"<",
"minreads",
"]",
"if",
"len",
"(",
"bad_samples",
")",
">",
"0",
":",
"logger",
".",
"warn",
"(",
"'removed %d samples with reads per sample<%d'",
"%",
"(",
"len",
"(",
"bad_samples",
")",
",",
"minreads",
")",
")",
"table",
"=",
"table",
".",
"filter",
"(",
"bad_samples",
",",
"axis",
"=",
"'sample'",
",",
"inplace",
"=",
"inplace",
",",
"invert",
"=",
"True",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"'all samples contain > %d reads'",
"%",
"minreads",
")",
"return",
"table"
] | Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table | [
"Filter",
"samples",
"from",
"biom",
"table",
"that",
"have",
"less",
"than",
"minreads",
"reads",
"total"
] | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L235-L265 |
biocore/deblur | deblur/workflow.py | fasta_from_biom | def fasta_from_biom(table, fasta_file_name):
'''Save sequences from a biom table to a fasta file
Parameters
----------
table : biom.Table
The biom table containing the sequences
fasta_file_name : str
Name of the fasta output file
'''
logger = logging.getLogger(__name__)
logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name)
with open(fasta_file_name, 'w') as f:
for cseq in table.ids(axis='observation'):
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved biom table sequences to fasta file %s' % fasta_file_name) | python | def fasta_from_biom(table, fasta_file_name):
'''Save sequences from a biom table to a fasta file
Parameters
----------
table : biom.Table
The biom table containing the sequences
fasta_file_name : str
Name of the fasta output file
'''
logger = logging.getLogger(__name__)
logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name)
with open(fasta_file_name, 'w') as f:
for cseq in table.ids(axis='observation'):
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved biom table sequences to fasta file %s' % fasta_file_name) | [
"def",
"fasta_from_biom",
"(",
"table",
",",
"fasta_file_name",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"'saving biom table sequences to fasta file %s'",
"%",
"fasta_file_name",
")",
"with",
"open",
"(",
"fasta_file_name",
",",
"'w'",
")",
"as",
"f",
":",
"for",
"cseq",
"in",
"table",
".",
"ids",
"(",
"axis",
"=",
"'observation'",
")",
":",
"f",
".",
"write",
"(",
"'>%s\\n%s\\n'",
"%",
"(",
"cseq",
",",
"cseq",
")",
")",
"logger",
".",
"info",
"(",
"'saved biom table sequences to fasta file %s'",
"%",
"fasta_file_name",
")"
] | Save sequences from a biom table to a fasta file
Parameters
----------
table : biom.Table
The biom table containing the sequences
fasta_file_name : str
Name of the fasta output file | [
"Save",
"sequences",
"from",
"a",
"biom",
"table",
"to",
"a",
"fasta",
"file"
] | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L268-L284 |
biocore/deblur | deblur/workflow.py | remove_artifacts_from_biom_table | def remove_artifacts_from_biom_table(table_filename,
fasta_filename,
ref_fp,
biom_table_dir,
ref_db_fp,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from a biom table using SortMeRNA
Parameters
----------
table : str
name of the biom table file
fasta_filename : str
the fasta file containing all the sequences of the biom table
Returns
-------
tmp_files : list of str
The temp files created during the artifact removal step
"""
logger = logging.getLogger(__name__)
logger.info('getting 16s sequences from the biom table')
# remove artifacts from the fasta file. output is in clean_fp fasta file
clean_fp, num_seqs_left, tmp_files = remove_artifacts_seqs(fasta_filename, ref_fp,
working_dir=biom_table_dir,
ref_db_fp=ref_db_fp,
negate=False, threads=threads,
verbose=verbose,
sim_thresh=sim_thresh,
coverage_thresh=coverage_thresh)
if clean_fp is None:
logger.warn("No clean sequences in %s" % fasta_filename)
return tmp_files
logger.debug('removed artifacts from sequences input %s'
' to output %s' % (fasta_filename, clean_fp))
# read the clean fasta file
good_seqs = {s for _, s in sequence_generator(clean_fp)}
logger.debug('loaded %d sequences from cleaned biom table'
' fasta file' % len(good_seqs))
logger.debug('loading biom table %s' % table_filename)
table = load_table(table_filename)
# filter and save the artifact biom table
artifact_table = table.filter(list(good_seqs),
axis='observation', inplace=False,
invert=True)
# remove the samples with 0 reads
filter_minreads_samples_from_table(artifact_table)
output_nomatch_fp = join(biom_table_dir, 'reference-non-hit.biom')
write_biom_table(artifact_table, output_nomatch_fp)
logger.info('wrote artifact only filtered biom table to %s'
% output_nomatch_fp)
# and save the reference-non-hit fasta file
output_nomatch_fasta_fp = join(biom_table_dir, 'reference-non-hit.seqs.fa')
fasta_from_biom(artifact_table, output_nomatch_fasta_fp)
# filter and save the only 16s biom table
table.filter(list(good_seqs), axis='observation')
# remove the samples with 0 reads
filter_minreads_samples_from_table(table)
output_fp = join(biom_table_dir, 'reference-hit.biom')
write_biom_table(table, output_fp)
logger.info('wrote 16s filtered biom table to %s' % output_fp)
# and save the reference-non-hit fasta file
output_match_fasta_fp = join(biom_table_dir, 'reference-hit.seqs.fa')
fasta_from_biom(table, output_match_fasta_fp)
# we also don't need the cleaned fasta file
tmp_files.append(clean_fp)
return tmp_files | python | def remove_artifacts_from_biom_table(table_filename,
fasta_filename,
ref_fp,
biom_table_dir,
ref_db_fp,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from a biom table using SortMeRNA
Parameters
----------
table : str
name of the biom table file
fasta_filename : str
the fasta file containing all the sequences of the biom table
Returns
-------
tmp_files : list of str
The temp files created during the artifact removal step
"""
logger = logging.getLogger(__name__)
logger.info('getting 16s sequences from the biom table')
# remove artifacts from the fasta file. output is in clean_fp fasta file
clean_fp, num_seqs_left, tmp_files = remove_artifacts_seqs(fasta_filename, ref_fp,
working_dir=biom_table_dir,
ref_db_fp=ref_db_fp,
negate=False, threads=threads,
verbose=verbose,
sim_thresh=sim_thresh,
coverage_thresh=coverage_thresh)
if clean_fp is None:
logger.warn("No clean sequences in %s" % fasta_filename)
return tmp_files
logger.debug('removed artifacts from sequences input %s'
' to output %s' % (fasta_filename, clean_fp))
# read the clean fasta file
good_seqs = {s for _, s in sequence_generator(clean_fp)}
logger.debug('loaded %d sequences from cleaned biom table'
' fasta file' % len(good_seqs))
logger.debug('loading biom table %s' % table_filename)
table = load_table(table_filename)
# filter and save the artifact biom table
artifact_table = table.filter(list(good_seqs),
axis='observation', inplace=False,
invert=True)
# remove the samples with 0 reads
filter_minreads_samples_from_table(artifact_table)
output_nomatch_fp = join(biom_table_dir, 'reference-non-hit.biom')
write_biom_table(artifact_table, output_nomatch_fp)
logger.info('wrote artifact only filtered biom table to %s'
% output_nomatch_fp)
# and save the reference-non-hit fasta file
output_nomatch_fasta_fp = join(biom_table_dir, 'reference-non-hit.seqs.fa')
fasta_from_biom(artifact_table, output_nomatch_fasta_fp)
# filter and save the only 16s biom table
table.filter(list(good_seqs), axis='observation')
# remove the samples with 0 reads
filter_minreads_samples_from_table(table)
output_fp = join(biom_table_dir, 'reference-hit.biom')
write_biom_table(table, output_fp)
logger.info('wrote 16s filtered biom table to %s' % output_fp)
# and save the reference-non-hit fasta file
output_match_fasta_fp = join(biom_table_dir, 'reference-hit.seqs.fa')
fasta_from_biom(table, output_match_fasta_fp)
# we also don't need the cleaned fasta file
tmp_files.append(clean_fp)
return tmp_files | [
"def",
"remove_artifacts_from_biom_table",
"(",
"table_filename",
",",
"fasta_filename",
",",
"ref_fp",
",",
"biom_table_dir",
",",
"ref_db_fp",
",",
"threads",
"=",
"1",
",",
"verbose",
"=",
"False",
",",
"sim_thresh",
"=",
"None",
",",
"coverage_thresh",
"=",
"None",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"info",
"(",
"'getting 16s sequences from the biom table'",
")",
"# remove artifacts from the fasta file. output is in clean_fp fasta file",
"clean_fp",
",",
"num_seqs_left",
",",
"tmp_files",
"=",
"remove_artifacts_seqs",
"(",
"fasta_filename",
",",
"ref_fp",
",",
"working_dir",
"=",
"biom_table_dir",
",",
"ref_db_fp",
"=",
"ref_db_fp",
",",
"negate",
"=",
"False",
",",
"threads",
"=",
"threads",
",",
"verbose",
"=",
"verbose",
",",
"sim_thresh",
"=",
"sim_thresh",
",",
"coverage_thresh",
"=",
"coverage_thresh",
")",
"if",
"clean_fp",
"is",
"None",
":",
"logger",
".",
"warn",
"(",
"\"No clean sequences in %s\"",
"%",
"fasta_filename",
")",
"return",
"tmp_files",
"logger",
".",
"debug",
"(",
"'removed artifacts from sequences input %s'",
"' to output %s'",
"%",
"(",
"fasta_filename",
",",
"clean_fp",
")",
")",
"# read the clean fasta file",
"good_seqs",
"=",
"{",
"s",
"for",
"_",
",",
"s",
"in",
"sequence_generator",
"(",
"clean_fp",
")",
"}",
"logger",
".",
"debug",
"(",
"'loaded %d sequences from cleaned biom table'",
"' fasta file'",
"%",
"len",
"(",
"good_seqs",
")",
")",
"logger",
".",
"debug",
"(",
"'loading biom table %s'",
"%",
"table_filename",
")",
"table",
"=",
"load_table",
"(",
"table_filename",
")",
"# filter and save the artifact biom table",
"artifact_table",
"=",
"table",
".",
"filter",
"(",
"list",
"(",
"good_seqs",
")",
",",
"axis",
"=",
"'observation'",
",",
"inplace",
"=",
"False",
",",
"invert",
"=",
"True",
")",
"# remove the samples with 0 reads",
"filter_minreads_samples_from_table",
"(",
"artifact_table",
")",
"output_nomatch_fp",
"=",
"join",
"(",
"biom_table_dir",
",",
"'reference-non-hit.biom'",
")",
"write_biom_table",
"(",
"artifact_table",
",",
"output_nomatch_fp",
")",
"logger",
".",
"info",
"(",
"'wrote artifact only filtered biom table to %s'",
"%",
"output_nomatch_fp",
")",
"# and save the reference-non-hit fasta file",
"output_nomatch_fasta_fp",
"=",
"join",
"(",
"biom_table_dir",
",",
"'reference-non-hit.seqs.fa'",
")",
"fasta_from_biom",
"(",
"artifact_table",
",",
"output_nomatch_fasta_fp",
")",
"# filter and save the only 16s biom table",
"table",
".",
"filter",
"(",
"list",
"(",
"good_seqs",
")",
",",
"axis",
"=",
"'observation'",
")",
"# remove the samples with 0 reads",
"filter_minreads_samples_from_table",
"(",
"table",
")",
"output_fp",
"=",
"join",
"(",
"biom_table_dir",
",",
"'reference-hit.biom'",
")",
"write_biom_table",
"(",
"table",
",",
"output_fp",
")",
"logger",
".",
"info",
"(",
"'wrote 16s filtered biom table to %s'",
"%",
"output_fp",
")",
"# and save the reference-non-hit fasta file",
"output_match_fasta_fp",
"=",
"join",
"(",
"biom_table_dir",
",",
"'reference-hit.seqs.fa'",
")",
"fasta_from_biom",
"(",
"table",
",",
"output_match_fasta_fp",
")",
"# we also don't need the cleaned fasta file",
"tmp_files",
".",
"append",
"(",
"clean_fp",
")",
"return",
"tmp_files"
] | Remove artifacts from a biom table using SortMeRNA
Parameters
----------
table : str
name of the biom table file
fasta_filename : str
the fasta file containing all the sequences of the biom table
Returns
-------
tmp_files : list of str
The temp files created during the artifact removal step | [
"Remove",
"artifacts",
"from",
"a",
"biom",
"table",
"using",
"SortMeRNA"
] | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L287-L363 |
biocore/deblur | deblur/workflow.py | remove_artifacts_seqs | def remove_artifacts_seqs(seqs_fp,
ref_fp,
working_dir,
ref_db_fp,
negate=False,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from FASTA file using SortMeRNA.
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
ref_fp: tuple
file path(s) to FASTA database file
working_dir: string
working directory path
ref_db_fp: tuple
file path(s) to indexed FASTA database
negate: boolean, optional
if True, discard all input sequences aligning
to reference database
threads: integer, optional
number of threads to use for SortMeRNA
verbose: boolean, optional
If true, output SortMeRNA errors
sim_thresh: float, optional
The minimal similarity threshold (between 0 and 1)
for keeping the sequence
if None, the default values used are 0.65 for negate=False,
0.95 for negate=True
coverage_thresh: float, optional
The minimal coverage threshold (between 0 and 1)
for alignments for keeping the sequence
if None, the default values used are 0.5 for negate=False,
0.95 for negate=True
Returns
-------
output_fp : str
Name of the artifact removed fasta file
okseqs : int
The number of sequences left after artifact removal
tmp_files : list of str
Names of the tmp files created
"""
logger = logging.getLogger(__name__)
logger.info('remove_artifacts_seqs file %s' % seqs_fp)
if stat(seqs_fp).st_size == 0:
logger.warn('file %s has size 0, continuing' % seqs_fp)
return None, 0, []
if coverage_thresh is None:
if negate:
coverage_thresh = 0.95 * 100
else:
coverage_thresh = 0.5 * 100
if sim_thresh is None:
if negate:
sim_thresh = 0.95 * 100
else:
sim_thresh = 0.65 * 100
# the minimal average bitscore per nucleotide
bitscore_thresh = 0.65
output_fp = join(working_dir,
"%s.no_artifacts" % basename(seqs_fp))
blast_output = join(working_dir,
'%s.sortmerna' % basename(seqs_fp))
aligned_seq_ids = set()
for i, db in enumerate(ref_fp):
logger.debug('running on ref_fp %s working dir %s refdb_fp %s seqs %s'
% (db, working_dir, ref_db_fp[i], seqs_fp))
# run SortMeRNA
# we use -e 100 to remove E-value based filtering by sortmerna
# since we use bitscore/identity/coverage filtering instead
params = ['sortmerna', '--reads', seqs_fp, '--ref', '%s,%s' %
(db, ref_db_fp[i]),
'--aligned', blast_output, '--blast', '3', '--best', '1',
'--print_all_reads', '-v', '-e', '100']
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('sortmerna error on file %s' % seqs_fp)
logger.error('stdout : %s' % sout)
logger.error('stderr : %s' % serr)
return output_fp, 0, []
blast_output_filename = '%s.blast' % blast_output
with open(blast_output_filename, 'r') as bfl:
for line in bfl:
line = line.strip().split('\t')
# if * means no match
if line[1] == '*':
continue
# check if % identity[2] and coverage[13] are large enough
if (float(line[2]) >= sim_thresh) and \
(float(line[13]) >= coverage_thresh) and \
(float(line[11]) >= bitscore_thresh * len(line[0])):
aligned_seq_ids.add(line[0])
if negate:
def op(x): return x not in aligned_seq_ids
else:
def op(x): return x in aligned_seq_ids
# if negate = False, only output sequences
# matching to at least one of the databases
totalseqs = 0
okseqs = 0
badseqs = 0
with open(output_fp, 'w') as out_f:
for label, seq in sequence_generator(seqs_fp):
totalseqs += 1
label = label.split()[0]
if op(label):
out_f.write(">%s\n%s\n" % (label, seq))
okseqs += 1
else:
badseqs += 1
logger.info('total sequences %d, passing sequences %d, '
'failing sequences %d' % (totalseqs, okseqs, badseqs))
return output_fp, okseqs, [blast_output_filename] | python | def remove_artifacts_seqs(seqs_fp,
ref_fp,
working_dir,
ref_db_fp,
negate=False,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from FASTA file using SortMeRNA.
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
ref_fp: tuple
file path(s) to FASTA database file
working_dir: string
working directory path
ref_db_fp: tuple
file path(s) to indexed FASTA database
negate: boolean, optional
if True, discard all input sequences aligning
to reference database
threads: integer, optional
number of threads to use for SortMeRNA
verbose: boolean, optional
If true, output SortMeRNA errors
sim_thresh: float, optional
The minimal similarity threshold (between 0 and 1)
for keeping the sequence
if None, the default values used are 0.65 for negate=False,
0.95 for negate=True
coverage_thresh: float, optional
The minimal coverage threshold (between 0 and 1)
for alignments for keeping the sequence
if None, the default values used are 0.5 for negate=False,
0.95 for negate=True
Returns
-------
output_fp : str
Name of the artifact removed fasta file
okseqs : int
The number of sequences left after artifact removal
tmp_files : list of str
Names of the tmp files created
"""
logger = logging.getLogger(__name__)
logger.info('remove_artifacts_seqs file %s' % seqs_fp)
if stat(seqs_fp).st_size == 0:
logger.warn('file %s has size 0, continuing' % seqs_fp)
return None, 0, []
if coverage_thresh is None:
if negate:
coverage_thresh = 0.95 * 100
else:
coverage_thresh = 0.5 * 100
if sim_thresh is None:
if negate:
sim_thresh = 0.95 * 100
else:
sim_thresh = 0.65 * 100
# the minimal average bitscore per nucleotide
bitscore_thresh = 0.65
output_fp = join(working_dir,
"%s.no_artifacts" % basename(seqs_fp))
blast_output = join(working_dir,
'%s.sortmerna' % basename(seqs_fp))
aligned_seq_ids = set()
for i, db in enumerate(ref_fp):
logger.debug('running on ref_fp %s working dir %s refdb_fp %s seqs %s'
% (db, working_dir, ref_db_fp[i], seqs_fp))
# run SortMeRNA
# we use -e 100 to remove E-value based filtering by sortmerna
# since we use bitscore/identity/coverage filtering instead
params = ['sortmerna', '--reads', seqs_fp, '--ref', '%s,%s' %
(db, ref_db_fp[i]),
'--aligned', blast_output, '--blast', '3', '--best', '1',
'--print_all_reads', '-v', '-e', '100']
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('sortmerna error on file %s' % seqs_fp)
logger.error('stdout : %s' % sout)
logger.error('stderr : %s' % serr)
return output_fp, 0, []
blast_output_filename = '%s.blast' % blast_output
with open(blast_output_filename, 'r') as bfl:
for line in bfl:
line = line.strip().split('\t')
# if * means no match
if line[1] == '*':
continue
# check if % identity[2] and coverage[13] are large enough
if (float(line[2]) >= sim_thresh) and \
(float(line[13]) >= coverage_thresh) and \
(float(line[11]) >= bitscore_thresh * len(line[0])):
aligned_seq_ids.add(line[0])
if negate:
def op(x): return x not in aligned_seq_ids
else:
def op(x): return x in aligned_seq_ids
# if negate = False, only output sequences
# matching to at least one of the databases
totalseqs = 0
okseqs = 0
badseqs = 0
with open(output_fp, 'w') as out_f:
for label, seq in sequence_generator(seqs_fp):
totalseqs += 1
label = label.split()[0]
if op(label):
out_f.write(">%s\n%s\n" % (label, seq))
okseqs += 1
else:
badseqs += 1
logger.info('total sequences %d, passing sequences %d, '
'failing sequences %d' % (totalseqs, okseqs, badseqs))
return output_fp, okseqs, [blast_output_filename] | [
"def",
"remove_artifacts_seqs",
"(",
"seqs_fp",
",",
"ref_fp",
",",
"working_dir",
",",
"ref_db_fp",
",",
"negate",
"=",
"False",
",",
"threads",
"=",
"1",
",",
"verbose",
"=",
"False",
",",
"sim_thresh",
"=",
"None",
",",
"coverage_thresh",
"=",
"None",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"info",
"(",
"'remove_artifacts_seqs file %s'",
"%",
"seqs_fp",
")",
"if",
"stat",
"(",
"seqs_fp",
")",
".",
"st_size",
"==",
"0",
":",
"logger",
".",
"warn",
"(",
"'file %s has size 0, continuing'",
"%",
"seqs_fp",
")",
"return",
"None",
",",
"0",
",",
"[",
"]",
"if",
"coverage_thresh",
"is",
"None",
":",
"if",
"negate",
":",
"coverage_thresh",
"=",
"0.95",
"*",
"100",
"else",
":",
"coverage_thresh",
"=",
"0.5",
"*",
"100",
"if",
"sim_thresh",
"is",
"None",
":",
"if",
"negate",
":",
"sim_thresh",
"=",
"0.95",
"*",
"100",
"else",
":",
"sim_thresh",
"=",
"0.65",
"*",
"100",
"# the minimal average bitscore per nucleotide",
"bitscore_thresh",
"=",
"0.65",
"output_fp",
"=",
"join",
"(",
"working_dir",
",",
"\"%s.no_artifacts\"",
"%",
"basename",
"(",
"seqs_fp",
")",
")",
"blast_output",
"=",
"join",
"(",
"working_dir",
",",
"'%s.sortmerna'",
"%",
"basename",
"(",
"seqs_fp",
")",
")",
"aligned_seq_ids",
"=",
"set",
"(",
")",
"for",
"i",
",",
"db",
"in",
"enumerate",
"(",
"ref_fp",
")",
":",
"logger",
".",
"debug",
"(",
"'running on ref_fp %s working dir %s refdb_fp %s seqs %s'",
"%",
"(",
"db",
",",
"working_dir",
",",
"ref_db_fp",
"[",
"i",
"]",
",",
"seqs_fp",
")",
")",
"# run SortMeRNA",
"# we use -e 100 to remove E-value based filtering by sortmerna",
"# since we use bitscore/identity/coverage filtering instead",
"params",
"=",
"[",
"'sortmerna'",
",",
"'--reads'",
",",
"seqs_fp",
",",
"'--ref'",
",",
"'%s,%s'",
"%",
"(",
"db",
",",
"ref_db_fp",
"[",
"i",
"]",
")",
",",
"'--aligned'",
",",
"blast_output",
",",
"'--blast'",
",",
"'3'",
",",
"'--best'",
",",
"'1'",
",",
"'--print_all_reads'",
",",
"'-v'",
",",
"'-e'",
",",
"'100'",
"]",
"sout",
",",
"serr",
",",
"res",
"=",
"_system_call",
"(",
"params",
")",
"if",
"not",
"res",
"==",
"0",
":",
"logger",
".",
"error",
"(",
"'sortmerna error on file %s'",
"%",
"seqs_fp",
")",
"logger",
".",
"error",
"(",
"'stdout : %s'",
"%",
"sout",
")",
"logger",
".",
"error",
"(",
"'stderr : %s'",
"%",
"serr",
")",
"return",
"output_fp",
",",
"0",
",",
"[",
"]",
"blast_output_filename",
"=",
"'%s.blast'",
"%",
"blast_output",
"with",
"open",
"(",
"blast_output_filename",
",",
"'r'",
")",
"as",
"bfl",
":",
"for",
"line",
"in",
"bfl",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"# if * means no match",
"if",
"line",
"[",
"1",
"]",
"==",
"'*'",
":",
"continue",
"# check if % identity[2] and coverage[13] are large enough",
"if",
"(",
"float",
"(",
"line",
"[",
"2",
"]",
")",
">=",
"sim_thresh",
")",
"and",
"(",
"float",
"(",
"line",
"[",
"13",
"]",
")",
">=",
"coverage_thresh",
")",
"and",
"(",
"float",
"(",
"line",
"[",
"11",
"]",
")",
">=",
"bitscore_thresh",
"*",
"len",
"(",
"line",
"[",
"0",
"]",
")",
")",
":",
"aligned_seq_ids",
".",
"add",
"(",
"line",
"[",
"0",
"]",
")",
"if",
"negate",
":",
"def",
"op",
"(",
"x",
")",
":",
"return",
"x",
"not",
"in",
"aligned_seq_ids",
"else",
":",
"def",
"op",
"(",
"x",
")",
":",
"return",
"x",
"in",
"aligned_seq_ids",
"# if negate = False, only output sequences",
"# matching to at least one of the databases",
"totalseqs",
"=",
"0",
"okseqs",
"=",
"0",
"badseqs",
"=",
"0",
"with",
"open",
"(",
"output_fp",
",",
"'w'",
")",
"as",
"out_f",
":",
"for",
"label",
",",
"seq",
"in",
"sequence_generator",
"(",
"seqs_fp",
")",
":",
"totalseqs",
"+=",
"1",
"label",
"=",
"label",
".",
"split",
"(",
")",
"[",
"0",
"]",
"if",
"op",
"(",
"label",
")",
":",
"out_f",
".",
"write",
"(",
"\">%s\\n%s\\n\"",
"%",
"(",
"label",
",",
"seq",
")",
")",
"okseqs",
"+=",
"1",
"else",
":",
"badseqs",
"+=",
"1",
"logger",
".",
"info",
"(",
"'total sequences %d, passing sequences %d, '",
"'failing sequences %d'",
"%",
"(",
"totalseqs",
",",
"okseqs",
",",
"badseqs",
")",
")",
"return",
"output_fp",
",",
"okseqs",
",",
"[",
"blast_output_filename",
"]"
] | Remove artifacts from FASTA file using SortMeRNA.
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
ref_fp: tuple
file path(s) to FASTA database file
working_dir: string
working directory path
ref_db_fp: tuple
file path(s) to indexed FASTA database
negate: boolean, optional
if True, discard all input sequences aligning
to reference database
threads: integer, optional
number of threads to use for SortMeRNA
verbose: boolean, optional
If true, output SortMeRNA errors
sim_thresh: float, optional
The minimal similarity threshold (between 0 and 1)
for keeping the sequence
if None, the default values used are 0.65 for negate=False,
0.95 for negate=True
coverage_thresh: float, optional
The minimal coverage threshold (between 0 and 1)
for alignments for keeping the sequence
if None, the default values used are 0.5 for negate=False,
0.95 for negate=True
Returns
-------
output_fp : str
Name of the artifact removed fasta file
okseqs : int
The number of sequences left after artifact removal
tmp_files : list of str
Names of the tmp files created | [
"Remove",
"artifacts",
"from",
"FASTA",
"file",
"using",
"SortMeRNA",
"."
] | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L366-L493 |
biocore/deblur | deblur/workflow.py | multiple_sequence_alignment | def multiple_sequence_alignment(seqs_fp, threads=1):
"""Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)
# for mafft we use -1 to denote all threads and not 0
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning('msa failed. file %s has no reads' % seqs_fp)
return None
msa_fp = seqs_fp + '.msa'
params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',
'--thread', str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)
logger.debug('stderr : %s' % serr)
return None
return msa_fp | python | def multiple_sequence_alignment(seqs_fp, threads=1):
"""Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)
# for mafft we use -1 to denote all threads and not 0
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning('msa failed. file %s has no reads' % seqs_fp)
return None
msa_fp = seqs_fp + '.msa'
params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',
'--thread', str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)
logger.debug('stderr : %s' % serr)
return None
return msa_fp | [
"def",
"multiple_sequence_alignment",
"(",
"seqs_fp",
",",
"threads",
"=",
"1",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"info",
"(",
"'multiple_sequence_alignment seqs file %s'",
"%",
"seqs_fp",
")",
"# for mafft we use -1 to denote all threads and not 0",
"if",
"threads",
"==",
"0",
":",
"threads",
"=",
"-",
"1",
"if",
"stat",
"(",
"seqs_fp",
")",
".",
"st_size",
"==",
"0",
":",
"logger",
".",
"warning",
"(",
"'msa failed. file %s has no reads'",
"%",
"seqs_fp",
")",
"return",
"None",
"msa_fp",
"=",
"seqs_fp",
"+",
"'.msa'",
"params",
"=",
"[",
"'mafft'",
",",
"'--quiet'",
",",
"'--preservecase'",
",",
"'--parttree'",
",",
"'--auto'",
",",
"'--thread'",
",",
"str",
"(",
"threads",
")",
",",
"seqs_fp",
"]",
"sout",
",",
"serr",
",",
"res",
"=",
"_system_call",
"(",
"params",
",",
"stdoutfilename",
"=",
"msa_fp",
")",
"if",
"not",
"res",
"==",
"0",
":",
"logger",
".",
"info",
"(",
"'msa failed for file %s (maybe only 1 read?)'",
"%",
"seqs_fp",
")",
"logger",
".",
"debug",
"(",
"'stderr : %s'",
"%",
"serr",
")",
"return",
"None",
"return",
"msa_fp"
] | Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered | [
"Perform",
"multiple",
"sequence",
"alignment",
"on",
"FASTA",
"file",
"using",
"MAFFT",
"."
] | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L496-L529 |
biocore/deblur | deblur/workflow.py | remove_chimeras_denovo_from_seqs | def remove_chimeras_denovo_from_seqs(seqs_fp, working_dir, threads=1):
"""Remove chimeras de novo using UCHIME (VSEARCH implementation).
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
output_fp: string
file path to store chimera-free results
threads : int
number of threads (0 for all cores)
Returns
-------
output_fp
the chimera removed fasta file name
"""
logger = logging.getLogger(__name__)
logger.info('remove_chimeras_denovo_from_seqs seqs file %s'
'to working dir %s' % (seqs_fp, working_dir))
output_fp = join(
working_dir, "%s.no_chimeras" % basename(seqs_fp))
# we use the parameters dn=0.000001, xn=1000, minh=10000000
# so 1 mismatch in the A/B region will cancel it being labeled as chimera
# and ~3 unique reads in each region will make it a chimera if
# no mismatches
params = ['vsearch', '--uchime_denovo', seqs_fp,
'--nonchimeras', output_fp,
'-dn', '0.000001', '-xn', '1000',
'-minh', '10000000', '--mindiffs', '5',
'--fasta_width', '0', '--threads', str(threads)]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('problem with chimera removal for file %s' % seqs_fp)
logger.debug('stdout : %s' % sout)
logger.debug('stderr : %s' % serr)
return output_fp | python | def remove_chimeras_denovo_from_seqs(seqs_fp, working_dir, threads=1):
"""Remove chimeras de novo using UCHIME (VSEARCH implementation).
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
output_fp: string
file path to store chimera-free results
threads : int
number of threads (0 for all cores)
Returns
-------
output_fp
the chimera removed fasta file name
"""
logger = logging.getLogger(__name__)
logger.info('remove_chimeras_denovo_from_seqs seqs file %s'
'to working dir %s' % (seqs_fp, working_dir))
output_fp = join(
working_dir, "%s.no_chimeras" % basename(seqs_fp))
# we use the parameters dn=0.000001, xn=1000, minh=10000000
# so 1 mismatch in the A/B region will cancel it being labeled as chimera
# and ~3 unique reads in each region will make it a chimera if
# no mismatches
params = ['vsearch', '--uchime_denovo', seqs_fp,
'--nonchimeras', output_fp,
'-dn', '0.000001', '-xn', '1000',
'-minh', '10000000', '--mindiffs', '5',
'--fasta_width', '0', '--threads', str(threads)]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('problem with chimera removal for file %s' % seqs_fp)
logger.debug('stdout : %s' % sout)
logger.debug('stderr : %s' % serr)
return output_fp | [
"def",
"remove_chimeras_denovo_from_seqs",
"(",
"seqs_fp",
",",
"working_dir",
",",
"threads",
"=",
"1",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"info",
"(",
"'remove_chimeras_denovo_from_seqs seqs file %s'",
"'to working dir %s'",
"%",
"(",
"seqs_fp",
",",
"working_dir",
")",
")",
"output_fp",
"=",
"join",
"(",
"working_dir",
",",
"\"%s.no_chimeras\"",
"%",
"basename",
"(",
"seqs_fp",
")",
")",
"# we use the parameters dn=0.000001, xn=1000, minh=10000000",
"# so 1 mismatch in the A/B region will cancel it being labeled as chimera",
"# and ~3 unique reads in each region will make it a chimera if",
"# no mismatches",
"params",
"=",
"[",
"'vsearch'",
",",
"'--uchime_denovo'",
",",
"seqs_fp",
",",
"'--nonchimeras'",
",",
"output_fp",
",",
"'-dn'",
",",
"'0.000001'",
",",
"'-xn'",
",",
"'1000'",
",",
"'-minh'",
",",
"'10000000'",
",",
"'--mindiffs'",
",",
"'5'",
",",
"'--fasta_width'",
",",
"'0'",
",",
"'--threads'",
",",
"str",
"(",
"threads",
")",
"]",
"sout",
",",
"serr",
",",
"res",
"=",
"_system_call",
"(",
"params",
")",
"if",
"not",
"res",
"==",
"0",
":",
"logger",
".",
"error",
"(",
"'problem with chimera removal for file %s'",
"%",
"seqs_fp",
")",
"logger",
".",
"debug",
"(",
"'stdout : %s'",
"%",
"sout",
")",
"logger",
".",
"debug",
"(",
"'stderr : %s'",
"%",
"serr",
")",
"return",
"output_fp"
] | Remove chimeras de novo using UCHIME (VSEARCH implementation).
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
output_fp: string
file path to store chimera-free results
threads : int
number of threads (0 for all cores)
Returns
-------
output_fp
the chimera removed fasta file name | [
"Remove",
"chimeras",
"de",
"novo",
"using",
"UCHIME",
"(",
"VSEARCH",
"implementation",
")",
"."
] | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L532-L570 |
biocore/deblur | deblur/workflow.py | split_sequence_file_on_sample_ids_to_files | def split_sequence_file_on_sample_ids_to_files(seqs,
outdir):
"""Split FASTA file on sample IDs.
Parameters
----------
seqs: file handler
file handler to demultiplexed FASTA file
outdir: string
dirpath to output split FASTA files
"""
logger = logging.getLogger(__name__)
logger.info('split_sequence_file_on_sample_ids_to_files'
' for file %s into dir %s' % (seqs, outdir))
outputs = {}
for bits in sequence_generator(seqs):
sample = sample_id_from_read_id(bits[0])
if sample not in outputs:
outputs[sample] = open(join(outdir, sample + '.fasta'), 'w')
outputs[sample].write(">%s\n%s\n" % (bits[0], bits[1]))
for sample in outputs:
outputs[sample].close()
logger.info('split to %d files' % len(outputs)) | python | def split_sequence_file_on_sample_ids_to_files(seqs,
outdir):
"""Split FASTA file on sample IDs.
Parameters
----------
seqs: file handler
file handler to demultiplexed FASTA file
outdir: string
dirpath to output split FASTA files
"""
logger = logging.getLogger(__name__)
logger.info('split_sequence_file_on_sample_ids_to_files'
' for file %s into dir %s' % (seqs, outdir))
outputs = {}
for bits in sequence_generator(seqs):
sample = sample_id_from_read_id(bits[0])
if sample not in outputs:
outputs[sample] = open(join(outdir, sample + '.fasta'), 'w')
outputs[sample].write(">%s\n%s\n" % (bits[0], bits[1]))
for sample in outputs:
outputs[sample].close()
logger.info('split to %d files' % len(outputs)) | [
"def",
"split_sequence_file_on_sample_ids_to_files",
"(",
"seqs",
",",
"outdir",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"info",
"(",
"'split_sequence_file_on_sample_ids_to_files'",
"' for file %s into dir %s'",
"%",
"(",
"seqs",
",",
"outdir",
")",
")",
"outputs",
"=",
"{",
"}",
"for",
"bits",
"in",
"sequence_generator",
"(",
"seqs",
")",
":",
"sample",
"=",
"sample_id_from_read_id",
"(",
"bits",
"[",
"0",
"]",
")",
"if",
"sample",
"not",
"in",
"outputs",
":",
"outputs",
"[",
"sample",
"]",
"=",
"open",
"(",
"join",
"(",
"outdir",
",",
"sample",
"+",
"'.fasta'",
")",
",",
"'w'",
")",
"outputs",
"[",
"sample",
"]",
".",
"write",
"(",
"\">%s\\n%s\\n\"",
"%",
"(",
"bits",
"[",
"0",
"]",
",",
"bits",
"[",
"1",
"]",
")",
")",
"for",
"sample",
"in",
"outputs",
":",
"outputs",
"[",
"sample",
"]",
".",
"close",
"(",
")",
"logger",
".",
"info",
"(",
"'split to %d files'",
"%",
"len",
"(",
"outputs",
")",
")"
] | Split FASTA file on sample IDs.
Parameters
----------
seqs: file handler
file handler to demultiplexed FASTA file
outdir: string
dirpath to output split FASTA files | [
"Split",
"FASTA",
"file",
"on",
"sample",
"IDs",
"."
] | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L596-L621 |
biocore/deblur | deblur/workflow.py | write_biom_table | def write_biom_table(table, biom_fp):
"""Write BIOM table to file.
Parameters
----------
table: biom.table
an instance of a BIOM table
biom_fp: string
filepath to output BIOM table
"""
logger = logging.getLogger(__name__)
logger.debug('write_biom_table to file %s' % biom_fp)
with biom_open(biom_fp, 'w') as f:
table.to_hdf5(h5grp=f, generated_by="deblur")
logger.debug('wrote to BIOM file %s' % biom_fp) | python | def write_biom_table(table, biom_fp):
"""Write BIOM table to file.
Parameters
----------
table: biom.table
an instance of a BIOM table
biom_fp: string
filepath to output BIOM table
"""
logger = logging.getLogger(__name__)
logger.debug('write_biom_table to file %s' % biom_fp)
with biom_open(biom_fp, 'w') as f:
table.to_hdf5(h5grp=f, generated_by="deblur")
logger.debug('wrote to BIOM file %s' % biom_fp) | [
"def",
"write_biom_table",
"(",
"table",
",",
"biom_fp",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"'write_biom_table to file %s'",
"%",
"biom_fp",
")",
"with",
"biom_open",
"(",
"biom_fp",
",",
"'w'",
")",
"as",
"f",
":",
"table",
".",
"to_hdf5",
"(",
"h5grp",
"=",
"f",
",",
"generated_by",
"=",
"\"deblur\"",
")",
"logger",
".",
"debug",
"(",
"'wrote to BIOM file %s'",
"%",
"biom_fp",
")"
] | Write BIOM table to file.
Parameters
----------
table: biom.table
an instance of a BIOM table
biom_fp: string
filepath to output BIOM table | [
"Write",
"BIOM",
"table",
"to",
"file",
"."
] | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L624-L638 |
biocore/deblur | deblur/workflow.py | get_files_for_table | def get_files_for_table(input_dir,
file_end='.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras'):
"""Get a list of files to add to the output table
Parameters:
-----------
input_dir : string
name of the directory containing the deblurred fasta files
file_end : string
the ending of all the fasta files to be added to the table
(default '.fasta.trim.derep.no_artifacts.msa.deblur.no_chimeras')
Returns
-------
names : list of tuples of (string,string)
list of tuples of:
name of fasta files to be added to the biom table
sampleid (file names without the file_end and path)
"""
logger = logging.getLogger(__name__)
logger.debug('get_files_for_table input dir %s, '
'file-ending %s' % (input_dir, file_end))
names = []
for cfile in glob(join(input_dir, "*%s" % file_end)):
if not isfile(cfile):
continue
sample_id = basename(cfile)[:-len(file_end)]
sample_id = os.path.splitext(sample_id)[0]
names.append((cfile, sample_id))
logger.debug('found %d files' % len(names))
return names | python | def get_files_for_table(input_dir,
file_end='.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras'):
"""Get a list of files to add to the output table
Parameters:
-----------
input_dir : string
name of the directory containing the deblurred fasta files
file_end : string
the ending of all the fasta files to be added to the table
(default '.fasta.trim.derep.no_artifacts.msa.deblur.no_chimeras')
Returns
-------
names : list of tuples of (string,string)
list of tuples of:
name of fasta files to be added to the biom table
sampleid (file names without the file_end and path)
"""
logger = logging.getLogger(__name__)
logger.debug('get_files_for_table input dir %s, '
'file-ending %s' % (input_dir, file_end))
names = []
for cfile in glob(join(input_dir, "*%s" % file_end)):
if not isfile(cfile):
continue
sample_id = basename(cfile)[:-len(file_end)]
sample_id = os.path.splitext(sample_id)[0]
names.append((cfile, sample_id))
logger.debug('found %d files' % len(names))
return names | [
"def",
"get_files_for_table",
"(",
"input_dir",
",",
"file_end",
"=",
"'.trim.derep.no_artifacts'",
"'.msa.deblur.no_chimeras'",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"'get_files_for_table input dir %s, '",
"'file-ending %s'",
"%",
"(",
"input_dir",
",",
"file_end",
")",
")",
"names",
"=",
"[",
"]",
"for",
"cfile",
"in",
"glob",
"(",
"join",
"(",
"input_dir",
",",
"\"*%s\"",
"%",
"file_end",
")",
")",
":",
"if",
"not",
"isfile",
"(",
"cfile",
")",
":",
"continue",
"sample_id",
"=",
"basename",
"(",
"cfile",
")",
"[",
":",
"-",
"len",
"(",
"file_end",
")",
"]",
"sample_id",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"sample_id",
")",
"[",
"0",
"]",
"names",
".",
"append",
"(",
"(",
"cfile",
",",
"sample_id",
")",
")",
"logger",
".",
"debug",
"(",
"'found %d files'",
"%",
"len",
"(",
"names",
")",
")",
"return",
"names"
] | Get a list of files to add to the output table
Parameters:
-----------
input_dir : string
name of the directory containing the deblurred fasta files
file_end : string
the ending of all the fasta files to be added to the table
(default '.fasta.trim.derep.no_artifacts.msa.deblur.no_chimeras')
Returns
-------
names : list of tuples of (string,string)
list of tuples of:
name of fasta files to be added to the biom table
sampleid (file names without the file_end and path) | [
"Get",
"a",
"list",
"of",
"files",
"to",
"add",
"to",
"the",
"output",
"table"
] | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L641-L673 |
biocore/deblur | deblur/workflow.py | create_otu_table | def create_otu_table(output_fp, deblurred_list,
outputfasta_fp=None, minreads=0):
"""Create a biom table out of all files in a directory
Parameters
----------
output_fp : string
filepath to output BIOM table
deblurred_list : list of (str, str)
list of file names (including path), sampleid of all deblurred
fasta files to add to the table
outputfasta_fp : str, optional
name of output fasta file (of all sequences in the table) or None
to not write
minreads : int, optional
minimal number of reads per bacterial sequence in order to write
it to the biom table and fasta file or 0 to write all
"""
logger = logging.getLogger(__name__)
logger.info('create_otu_table for %d samples, '
'into output table %s' % (len(deblurred_list), output_fp))
# the regexp for finding the number of reads of a sequence
sizeregexp = re.compile('(?<=size=)\w+')
seqdict = {}
seqlist = []
sampset = set()
samplist = []
# arbitrary size for the sparse results matrix so we won't run out of space
obs = scipy.sparse.dok_matrix((int(1E9), len(deblurred_list)), dtype=np.double)
# load the sequences from all samples into a sprase matrix
sneaking_extensions = {'fasta', 'fastq', 'fna', 'fq', 'fa'}
for (cfilename, csampleid) in deblurred_list:
if csampleid.rsplit('.', 1)[-1] in sneaking_extensions:
csampleid = csampleid.rsplit('.', 1)[0]
# test if sample has already been processed
if csampleid in sampset:
warnings.warn('sample %s already in table!', UserWarning)
logger.error('sample %s already in table!' % csampleid)
continue
sampset.add(csampleid)
samplist.append(csampleid)
csampidx = len(sampset)-1
# read the fasta file and add to the matrix
for chead, cseq in sequence_generator(cfilename):
cseq = cseq.upper()
if cseq not in seqdict:
seqdict[cseq] = len(seqlist)
seqlist.append(cseq)
cseqidx = seqdict[cseq]
cfreq = float(sizeregexp.search(chead).group(0))
try:
obs[cseqidx, csampidx] += cfreq
except IndexError:
# exception means we ran out of space - add more OTUs
shape = obs.shape
obs.resize((shape[0]*2, shape[1]))
obs[cseqidx, csampidx] = cfreq
logger.info('for output biom table loaded %d samples, %d unique sequences'
% (len(samplist), len(seqlist)))
# and now make the sparse matrix the real size
obs.resize((len(seqlist), len(samplist)))
# do the minimal reads per otu filtering
if minreads > 0:
readsperotu = obs.sum(axis=1)
keep = np.where(readsperotu >= minreads)[0]
logger.info('keeping %d (out of %d sequences) with >=%d reads' %
(len(keep), len(seqlist), minreads))
obs = obs[keep, :]
seqlist = list(np.array(seqlist)[keep])
logger.debug('filtering completed')
# convert the matrix to a biom table
table = Table(obs.tocsr(), seqlist, samplist,
observation_metadata=None,
sample_metadata=None, table_id=None,
generated_by="deblur",
create_date=datetime.now().isoformat())
logger.debug('converted to biom table')
# remove samples with 0 reads
filter_minreads_samples_from_table(table)
# save the merged otu table
write_biom_table(table, output_fp)
logger.info('saved to biom file %s' % output_fp)
# and save the fasta file
if outputfasta_fp is not None:
logger.debug('saving fasta file')
with open(outputfasta_fp, 'w') as f:
for cseq in seqlist:
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved sequence fasta file to %s' % outputfasta_fp) | python | def create_otu_table(output_fp, deblurred_list,
outputfasta_fp=None, minreads=0):
"""Create a biom table out of all files in a directory
Parameters
----------
output_fp : string
filepath to output BIOM table
deblurred_list : list of (str, str)
list of file names (including path), sampleid of all deblurred
fasta files to add to the table
outputfasta_fp : str, optional
name of output fasta file (of all sequences in the table) or None
to not write
minreads : int, optional
minimal number of reads per bacterial sequence in order to write
it to the biom table and fasta file or 0 to write all
"""
logger = logging.getLogger(__name__)
logger.info('create_otu_table for %d samples, '
'into output table %s' % (len(deblurred_list), output_fp))
# the regexp for finding the number of reads of a sequence
sizeregexp = re.compile('(?<=size=)\w+')
seqdict = {}
seqlist = []
sampset = set()
samplist = []
# arbitrary size for the sparse results matrix so we won't run out of space
obs = scipy.sparse.dok_matrix((int(1E9), len(deblurred_list)), dtype=np.double)
# load the sequences from all samples into a sprase matrix
sneaking_extensions = {'fasta', 'fastq', 'fna', 'fq', 'fa'}
for (cfilename, csampleid) in deblurred_list:
if csampleid.rsplit('.', 1)[-1] in sneaking_extensions:
csampleid = csampleid.rsplit('.', 1)[0]
# test if sample has already been processed
if csampleid in sampset:
warnings.warn('sample %s already in table!', UserWarning)
logger.error('sample %s already in table!' % csampleid)
continue
sampset.add(csampleid)
samplist.append(csampleid)
csampidx = len(sampset)-1
# read the fasta file and add to the matrix
for chead, cseq in sequence_generator(cfilename):
cseq = cseq.upper()
if cseq not in seqdict:
seqdict[cseq] = len(seqlist)
seqlist.append(cseq)
cseqidx = seqdict[cseq]
cfreq = float(sizeregexp.search(chead).group(0))
try:
obs[cseqidx, csampidx] += cfreq
except IndexError:
# exception means we ran out of space - add more OTUs
shape = obs.shape
obs.resize((shape[0]*2, shape[1]))
obs[cseqidx, csampidx] = cfreq
logger.info('for output biom table loaded %d samples, %d unique sequences'
% (len(samplist), len(seqlist)))
# and now make the sparse matrix the real size
obs.resize((len(seqlist), len(samplist)))
# do the minimal reads per otu filtering
if minreads > 0:
readsperotu = obs.sum(axis=1)
keep = np.where(readsperotu >= minreads)[0]
logger.info('keeping %d (out of %d sequences) with >=%d reads' %
(len(keep), len(seqlist), minreads))
obs = obs[keep, :]
seqlist = list(np.array(seqlist)[keep])
logger.debug('filtering completed')
# convert the matrix to a biom table
table = Table(obs.tocsr(), seqlist, samplist,
observation_metadata=None,
sample_metadata=None, table_id=None,
generated_by="deblur",
create_date=datetime.now().isoformat())
logger.debug('converted to biom table')
# remove samples with 0 reads
filter_minreads_samples_from_table(table)
# save the merged otu table
write_biom_table(table, output_fp)
logger.info('saved to biom file %s' % output_fp)
# and save the fasta file
if outputfasta_fp is not None:
logger.debug('saving fasta file')
with open(outputfasta_fp, 'w') as f:
for cseq in seqlist:
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved sequence fasta file to %s' % outputfasta_fp) | [
"def",
"create_otu_table",
"(",
"output_fp",
",",
"deblurred_list",
",",
"outputfasta_fp",
"=",
"None",
",",
"minreads",
"=",
"0",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"info",
"(",
"'create_otu_table for %d samples, '",
"'into output table %s'",
"%",
"(",
"len",
"(",
"deblurred_list",
")",
",",
"output_fp",
")",
")",
"# the regexp for finding the number of reads of a sequence",
"sizeregexp",
"=",
"re",
".",
"compile",
"(",
"'(?<=size=)\\w+'",
")",
"seqdict",
"=",
"{",
"}",
"seqlist",
"=",
"[",
"]",
"sampset",
"=",
"set",
"(",
")",
"samplist",
"=",
"[",
"]",
"# arbitrary size for the sparse results matrix so we won't run out of space",
"obs",
"=",
"scipy",
".",
"sparse",
".",
"dok_matrix",
"(",
"(",
"int",
"(",
"1E9",
")",
",",
"len",
"(",
"deblurred_list",
")",
")",
",",
"dtype",
"=",
"np",
".",
"double",
")",
"# load the sequences from all samples into a sprase matrix",
"sneaking_extensions",
"=",
"{",
"'fasta'",
",",
"'fastq'",
",",
"'fna'",
",",
"'fq'",
",",
"'fa'",
"}",
"for",
"(",
"cfilename",
",",
"csampleid",
")",
"in",
"deblurred_list",
":",
"if",
"csampleid",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"[",
"-",
"1",
"]",
"in",
"sneaking_extensions",
":",
"csampleid",
"=",
"csampleid",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"[",
"0",
"]",
"# test if sample has already been processed",
"if",
"csampleid",
"in",
"sampset",
":",
"warnings",
".",
"warn",
"(",
"'sample %s already in table!'",
",",
"UserWarning",
")",
"logger",
".",
"error",
"(",
"'sample %s already in table!'",
"%",
"csampleid",
")",
"continue",
"sampset",
".",
"add",
"(",
"csampleid",
")",
"samplist",
".",
"append",
"(",
"csampleid",
")",
"csampidx",
"=",
"len",
"(",
"sampset",
")",
"-",
"1",
"# read the fasta file and add to the matrix",
"for",
"chead",
",",
"cseq",
"in",
"sequence_generator",
"(",
"cfilename",
")",
":",
"cseq",
"=",
"cseq",
".",
"upper",
"(",
")",
"if",
"cseq",
"not",
"in",
"seqdict",
":",
"seqdict",
"[",
"cseq",
"]",
"=",
"len",
"(",
"seqlist",
")",
"seqlist",
".",
"append",
"(",
"cseq",
")",
"cseqidx",
"=",
"seqdict",
"[",
"cseq",
"]",
"cfreq",
"=",
"float",
"(",
"sizeregexp",
".",
"search",
"(",
"chead",
")",
".",
"group",
"(",
"0",
")",
")",
"try",
":",
"obs",
"[",
"cseqidx",
",",
"csampidx",
"]",
"+=",
"cfreq",
"except",
"IndexError",
":",
"# exception means we ran out of space - add more OTUs",
"shape",
"=",
"obs",
".",
"shape",
"obs",
".",
"resize",
"(",
"(",
"shape",
"[",
"0",
"]",
"*",
"2",
",",
"shape",
"[",
"1",
"]",
")",
")",
"obs",
"[",
"cseqidx",
",",
"csampidx",
"]",
"=",
"cfreq",
"logger",
".",
"info",
"(",
"'for output biom table loaded %d samples, %d unique sequences'",
"%",
"(",
"len",
"(",
"samplist",
")",
",",
"len",
"(",
"seqlist",
")",
")",
")",
"# and now make the sparse matrix the real size",
"obs",
".",
"resize",
"(",
"(",
"len",
"(",
"seqlist",
")",
",",
"len",
"(",
"samplist",
")",
")",
")",
"# do the minimal reads per otu filtering",
"if",
"minreads",
">",
"0",
":",
"readsperotu",
"=",
"obs",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"keep",
"=",
"np",
".",
"where",
"(",
"readsperotu",
">=",
"minreads",
")",
"[",
"0",
"]",
"logger",
".",
"info",
"(",
"'keeping %d (out of %d sequences) with >=%d reads'",
"%",
"(",
"len",
"(",
"keep",
")",
",",
"len",
"(",
"seqlist",
")",
",",
"minreads",
")",
")",
"obs",
"=",
"obs",
"[",
"keep",
",",
":",
"]",
"seqlist",
"=",
"list",
"(",
"np",
".",
"array",
"(",
"seqlist",
")",
"[",
"keep",
"]",
")",
"logger",
".",
"debug",
"(",
"'filtering completed'",
")",
"# convert the matrix to a biom table",
"table",
"=",
"Table",
"(",
"obs",
".",
"tocsr",
"(",
")",
",",
"seqlist",
",",
"samplist",
",",
"observation_metadata",
"=",
"None",
",",
"sample_metadata",
"=",
"None",
",",
"table_id",
"=",
"None",
",",
"generated_by",
"=",
"\"deblur\"",
",",
"create_date",
"=",
"datetime",
".",
"now",
"(",
")",
".",
"isoformat",
"(",
")",
")",
"logger",
".",
"debug",
"(",
"'converted to biom table'",
")",
"# remove samples with 0 reads",
"filter_minreads_samples_from_table",
"(",
"table",
")",
"# save the merged otu table",
"write_biom_table",
"(",
"table",
",",
"output_fp",
")",
"logger",
".",
"info",
"(",
"'saved to biom file %s'",
"%",
"output_fp",
")",
"# and save the fasta file",
"if",
"outputfasta_fp",
"is",
"not",
"None",
":",
"logger",
".",
"debug",
"(",
"'saving fasta file'",
")",
"with",
"open",
"(",
"outputfasta_fp",
",",
"'w'",
")",
"as",
"f",
":",
"for",
"cseq",
"in",
"seqlist",
":",
"f",
".",
"write",
"(",
"'>%s\\n%s\\n'",
"%",
"(",
"cseq",
",",
"cseq",
")",
")",
"logger",
".",
"info",
"(",
"'saved sequence fasta file to %s'",
"%",
"outputfasta_fp",
")"
] | Create a biom table out of all files in a directory
Parameters
----------
output_fp : string
filepath to output BIOM table
deblurred_list : list of (str, str)
list of file names (including path), sampleid of all deblurred
fasta files to add to the table
outputfasta_fp : str, optional
name of output fasta file (of all sequences in the table) or None
to not write
minreads : int, optional
minimal number of reads per bacterial sequence in order to write
it to the biom table and fasta file or 0 to write all | [
"Create",
"a",
"biom",
"table",
"out",
"of",
"all",
"files",
"in",
"a",
"directory"
] | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L676-L774 |
biocore/deblur | deblur/workflow.py | launch_workflow | def launch_workflow(seqs_fp, working_dir, mean_error, error_dist,
indel_prob, indel_max, trim_length, left_trim_length,
min_size, ref_fp, ref_db_fp, threads_per_sample=1,
sim_thresh=None, coverage_thresh=None):
"""Launch full deblur workflow for a single post split-libraries fasta file
Parameters
----------
seqs_fp: string
a post split library fasta file for debluring
working_dir: string
working directory path
mean_error: float
mean error for original sequence estimate
error_dist: list
list of error probabilities for each hamming distance
indel_prob: float
insertion/deletion (indel) probability
indel_max: integer
maximal indel number
trim_length: integer
sequence trim length
left_trim_length: integer
trim the first n reads
min_size: integer
upper limit on sequence abundance (discard sequences below limit)
ref_fp: tuple
filepath(s) to FASTA reference database for artifact removal
ref_db_fp: tuple
filepath(s) to SortMeRNA indexed database for artifact removal
threads_per_sample: integer, optional
number of threads to use for SortMeRNA/mafft/vsearch
(0 for max available)
sim_thresh: float, optional
the minimal similarity for a sequence to the database.
if None, take the defaults (0.65 for negate=False,
0.95 for negate=True)
coverage_thresh: float, optional
the minimal coverage for alignment of a sequence to the database.
if None, take the defaults (0.3 for negate=False, 0.95 for negate=True)
Return
------
output_no_chimers_fp : string
filepath to fasta file with no chimeras of None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('--------------------------------------------------------')
logger.info('launch_workflow for file %s' % seqs_fp)
# Step 1: Trim sequences to specified length
output_trim_fp = join(working_dir, "%s.trim" % basename(seqs_fp))
with open(output_trim_fp, 'w') as out_f:
for label, seq in trim_seqs(
input_seqs=sequence_generator(seqs_fp),
trim_len=trim_length,
left_trim_len=left_trim_length):
out_f.write(">%s\n%s\n" % (label, seq))
# Step 2: Dereplicate sequences
output_derep_fp = join(working_dir,
"%s.derep" % basename(output_trim_fp))
dereplicate_seqs(seqs_fp=output_trim_fp,
output_fp=output_derep_fp,
min_size=min_size, threads=threads_per_sample)
# Step 3: Remove artifacts
output_artif_fp, num_seqs_left, _ = remove_artifacts_seqs(seqs_fp=output_derep_fp,
ref_fp=ref_fp,
working_dir=working_dir,
ref_db_fp=ref_db_fp,
negate=True,
threads=threads_per_sample,
sim_thresh=sim_thresh)
if not output_artif_fp:
warnings.warn('Problem removing artifacts from file %s' %
seqs_fp, UserWarning)
logger.warning('remove artifacts failed, aborting')
return None
# Step 4: Multiple sequence alignment
if num_seqs_left > 1:
output_msa_fp = join(working_dir,
"%s.msa" % basename(output_artif_fp))
alignment = multiple_sequence_alignment(seqs_fp=output_artif_fp,
threads=threads_per_sample)
if not alignment:
warnings.warn('Problem performing multiple sequence alignment '
'on file %s' % seqs_fp, UserWarning)
logger.warning('msa failed. aborting')
return None
elif num_seqs_left == 1:
# only one sequence after remove artifacts (but could be many reads)
# no need to run MSA - just use the pre-msa file as input for next step
output_msa_fp = output_artif_fp
else:
err_msg = ('No sequences left after artifact removal in '
'file %s' % seqs_fp)
warnings.warn(err_msg, UserWarning)
logger.warning(err_msg)
return None
# Step 5: Launch deblur
output_deblur_fp = join(working_dir,
"%s.deblur" % basename(output_msa_fp))
with open(output_deblur_fp, 'w') as f:
seqs = deblur(sequence_generator(output_msa_fp), mean_error,
error_dist, indel_prob, indel_max)
if seqs is None:
warnings.warn('multiple sequence alignment file %s contains '
'no sequences' % output_msa_fp, UserWarning)
logger.warn('no sequences returned from deblur for file %s' %
output_msa_fp)
return None
for s in seqs:
# remove '-' from aligned sequences
s.sequence = s.sequence.replace('-', '')
f.write(s.to_fasta())
# Step 6: Chimera removal
output_no_chimeras_fp = remove_chimeras_denovo_from_seqs(
output_deblur_fp, working_dir, threads=threads_per_sample)
logger.info('finished processing file')
return output_no_chimeras_fp | python | def launch_workflow(seqs_fp, working_dir, mean_error, error_dist,
indel_prob, indel_max, trim_length, left_trim_length,
min_size, ref_fp, ref_db_fp, threads_per_sample=1,
sim_thresh=None, coverage_thresh=None):
"""Launch full deblur workflow for a single post split-libraries fasta file
Parameters
----------
seqs_fp: string
a post split library fasta file for debluring
working_dir: string
working directory path
mean_error: float
mean error for original sequence estimate
error_dist: list
list of error probabilities for each hamming distance
indel_prob: float
insertion/deletion (indel) probability
indel_max: integer
maximal indel number
trim_length: integer
sequence trim length
left_trim_length: integer
trim the first n reads
min_size: integer
upper limit on sequence abundance (discard sequences below limit)
ref_fp: tuple
filepath(s) to FASTA reference database for artifact removal
ref_db_fp: tuple
filepath(s) to SortMeRNA indexed database for artifact removal
threads_per_sample: integer, optional
number of threads to use for SortMeRNA/mafft/vsearch
(0 for max available)
sim_thresh: float, optional
the minimal similarity for a sequence to the database.
if None, take the defaults (0.65 for negate=False,
0.95 for negate=True)
coverage_thresh: float, optional
the minimal coverage for alignment of a sequence to the database.
if None, take the defaults (0.3 for negate=False, 0.95 for negate=True)
Return
------
output_no_chimers_fp : string
filepath to fasta file with no chimeras of None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('--------------------------------------------------------')
logger.info('launch_workflow for file %s' % seqs_fp)
# Step 1: Trim sequences to specified length
output_trim_fp = join(working_dir, "%s.trim" % basename(seqs_fp))
with open(output_trim_fp, 'w') as out_f:
for label, seq in trim_seqs(
input_seqs=sequence_generator(seqs_fp),
trim_len=trim_length,
left_trim_len=left_trim_length):
out_f.write(">%s\n%s\n" % (label, seq))
# Step 2: Dereplicate sequences
output_derep_fp = join(working_dir,
"%s.derep" % basename(output_trim_fp))
dereplicate_seqs(seqs_fp=output_trim_fp,
output_fp=output_derep_fp,
min_size=min_size, threads=threads_per_sample)
# Step 3: Remove artifacts
output_artif_fp, num_seqs_left, _ = remove_artifacts_seqs(seqs_fp=output_derep_fp,
ref_fp=ref_fp,
working_dir=working_dir,
ref_db_fp=ref_db_fp,
negate=True,
threads=threads_per_sample,
sim_thresh=sim_thresh)
if not output_artif_fp:
warnings.warn('Problem removing artifacts from file %s' %
seqs_fp, UserWarning)
logger.warning('remove artifacts failed, aborting')
return None
# Step 4: Multiple sequence alignment
if num_seqs_left > 1:
output_msa_fp = join(working_dir,
"%s.msa" % basename(output_artif_fp))
alignment = multiple_sequence_alignment(seqs_fp=output_artif_fp,
threads=threads_per_sample)
if not alignment:
warnings.warn('Problem performing multiple sequence alignment '
'on file %s' % seqs_fp, UserWarning)
logger.warning('msa failed. aborting')
return None
elif num_seqs_left == 1:
# only one sequence after remove artifacts (but could be many reads)
# no need to run MSA - just use the pre-msa file as input for next step
output_msa_fp = output_artif_fp
else:
err_msg = ('No sequences left after artifact removal in '
'file %s' % seqs_fp)
warnings.warn(err_msg, UserWarning)
logger.warning(err_msg)
return None
# Step 5: Launch deblur
output_deblur_fp = join(working_dir,
"%s.deblur" % basename(output_msa_fp))
with open(output_deblur_fp, 'w') as f:
seqs = deblur(sequence_generator(output_msa_fp), mean_error,
error_dist, indel_prob, indel_max)
if seqs is None:
warnings.warn('multiple sequence alignment file %s contains '
'no sequences' % output_msa_fp, UserWarning)
logger.warn('no sequences returned from deblur for file %s' %
output_msa_fp)
return None
for s in seqs:
# remove '-' from aligned sequences
s.sequence = s.sequence.replace('-', '')
f.write(s.to_fasta())
# Step 6: Chimera removal
output_no_chimeras_fp = remove_chimeras_denovo_from_seqs(
output_deblur_fp, working_dir, threads=threads_per_sample)
logger.info('finished processing file')
return output_no_chimeras_fp | [
"def",
"launch_workflow",
"(",
"seqs_fp",
",",
"working_dir",
",",
"mean_error",
",",
"error_dist",
",",
"indel_prob",
",",
"indel_max",
",",
"trim_length",
",",
"left_trim_length",
",",
"min_size",
",",
"ref_fp",
",",
"ref_db_fp",
",",
"threads_per_sample",
"=",
"1",
",",
"sim_thresh",
"=",
"None",
",",
"coverage_thresh",
"=",
"None",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"info",
"(",
"'--------------------------------------------------------'",
")",
"logger",
".",
"info",
"(",
"'launch_workflow for file %s'",
"%",
"seqs_fp",
")",
"# Step 1: Trim sequences to specified length",
"output_trim_fp",
"=",
"join",
"(",
"working_dir",
",",
"\"%s.trim\"",
"%",
"basename",
"(",
"seqs_fp",
")",
")",
"with",
"open",
"(",
"output_trim_fp",
",",
"'w'",
")",
"as",
"out_f",
":",
"for",
"label",
",",
"seq",
"in",
"trim_seqs",
"(",
"input_seqs",
"=",
"sequence_generator",
"(",
"seqs_fp",
")",
",",
"trim_len",
"=",
"trim_length",
",",
"left_trim_len",
"=",
"left_trim_length",
")",
":",
"out_f",
".",
"write",
"(",
"\">%s\\n%s\\n\"",
"%",
"(",
"label",
",",
"seq",
")",
")",
"# Step 2: Dereplicate sequences",
"output_derep_fp",
"=",
"join",
"(",
"working_dir",
",",
"\"%s.derep\"",
"%",
"basename",
"(",
"output_trim_fp",
")",
")",
"dereplicate_seqs",
"(",
"seqs_fp",
"=",
"output_trim_fp",
",",
"output_fp",
"=",
"output_derep_fp",
",",
"min_size",
"=",
"min_size",
",",
"threads",
"=",
"threads_per_sample",
")",
"# Step 3: Remove artifacts",
"output_artif_fp",
",",
"num_seqs_left",
",",
"_",
"=",
"remove_artifacts_seqs",
"(",
"seqs_fp",
"=",
"output_derep_fp",
",",
"ref_fp",
"=",
"ref_fp",
",",
"working_dir",
"=",
"working_dir",
",",
"ref_db_fp",
"=",
"ref_db_fp",
",",
"negate",
"=",
"True",
",",
"threads",
"=",
"threads_per_sample",
",",
"sim_thresh",
"=",
"sim_thresh",
")",
"if",
"not",
"output_artif_fp",
":",
"warnings",
".",
"warn",
"(",
"'Problem removing artifacts from file %s'",
"%",
"seqs_fp",
",",
"UserWarning",
")",
"logger",
".",
"warning",
"(",
"'remove artifacts failed, aborting'",
")",
"return",
"None",
"# Step 4: Multiple sequence alignment",
"if",
"num_seqs_left",
">",
"1",
":",
"output_msa_fp",
"=",
"join",
"(",
"working_dir",
",",
"\"%s.msa\"",
"%",
"basename",
"(",
"output_artif_fp",
")",
")",
"alignment",
"=",
"multiple_sequence_alignment",
"(",
"seqs_fp",
"=",
"output_artif_fp",
",",
"threads",
"=",
"threads_per_sample",
")",
"if",
"not",
"alignment",
":",
"warnings",
".",
"warn",
"(",
"'Problem performing multiple sequence alignment '",
"'on file %s'",
"%",
"seqs_fp",
",",
"UserWarning",
")",
"logger",
".",
"warning",
"(",
"'msa failed. aborting'",
")",
"return",
"None",
"elif",
"num_seqs_left",
"==",
"1",
":",
"# only one sequence after remove artifacts (but could be many reads)",
"# no need to run MSA - just use the pre-msa file as input for next step",
"output_msa_fp",
"=",
"output_artif_fp",
"else",
":",
"err_msg",
"=",
"(",
"'No sequences left after artifact removal in '",
"'file %s'",
"%",
"seqs_fp",
")",
"warnings",
".",
"warn",
"(",
"err_msg",
",",
"UserWarning",
")",
"logger",
".",
"warning",
"(",
"err_msg",
")",
"return",
"None",
"# Step 5: Launch deblur",
"output_deblur_fp",
"=",
"join",
"(",
"working_dir",
",",
"\"%s.deblur\"",
"%",
"basename",
"(",
"output_msa_fp",
")",
")",
"with",
"open",
"(",
"output_deblur_fp",
",",
"'w'",
")",
"as",
"f",
":",
"seqs",
"=",
"deblur",
"(",
"sequence_generator",
"(",
"output_msa_fp",
")",
",",
"mean_error",
",",
"error_dist",
",",
"indel_prob",
",",
"indel_max",
")",
"if",
"seqs",
"is",
"None",
":",
"warnings",
".",
"warn",
"(",
"'multiple sequence alignment file %s contains '",
"'no sequences'",
"%",
"output_msa_fp",
",",
"UserWarning",
")",
"logger",
".",
"warn",
"(",
"'no sequences returned from deblur for file %s'",
"%",
"output_msa_fp",
")",
"return",
"None",
"for",
"s",
"in",
"seqs",
":",
"# remove '-' from aligned sequences",
"s",
".",
"sequence",
"=",
"s",
".",
"sequence",
".",
"replace",
"(",
"'-'",
",",
"''",
")",
"f",
".",
"write",
"(",
"s",
".",
"to_fasta",
"(",
")",
")",
"# Step 6: Chimera removal",
"output_no_chimeras_fp",
"=",
"remove_chimeras_denovo_from_seqs",
"(",
"output_deblur_fp",
",",
"working_dir",
",",
"threads",
"=",
"threads_per_sample",
")",
"logger",
".",
"info",
"(",
"'finished processing file'",
")",
"return",
"output_no_chimeras_fp"
] | Launch full deblur workflow for a single post split-libraries fasta file
Parameters
----------
seqs_fp: string
a post split library fasta file for debluring
working_dir: string
working directory path
mean_error: float
mean error for original sequence estimate
error_dist: list
list of error probabilities for each hamming distance
indel_prob: float
insertion/deletion (indel) probability
indel_max: integer
maximal indel number
trim_length: integer
sequence trim length
left_trim_length: integer
trim the first n reads
min_size: integer
upper limit on sequence abundance (discard sequences below limit)
ref_fp: tuple
filepath(s) to FASTA reference database for artifact removal
ref_db_fp: tuple
filepath(s) to SortMeRNA indexed database for artifact removal
threads_per_sample: integer, optional
number of threads to use for SortMeRNA/mafft/vsearch
(0 for max available)
sim_thresh: float, optional
the minimal similarity for a sequence to the database.
if None, take the defaults (0.65 for negate=False,
0.95 for negate=True)
coverage_thresh: float, optional
the minimal coverage for alignment of a sequence to the database.
if None, take the defaults (0.3 for negate=False, 0.95 for negate=True)
Return
------
output_no_chimers_fp : string
filepath to fasta file with no chimeras of None if error encountered | [
"Launch",
"full",
"deblur",
"workflow",
"for",
"a",
"single",
"post",
"split",
"-",
"libraries",
"fasta",
"file"
] | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L777-L895 |
biocore/deblur | deblur/workflow.py | start_log | def start_log(level=logging.DEBUG, filename=None):
"""start the logger for the run
Parameters
----------
level : int, optional
logging.DEBUG, logging.INFO etc. for the log level (between 0-50).
filename : str, optional
name of the filename to save the log to or
None (default) to use deblur.log.TIMESTAMP
"""
if filename is None:
tstr = time.ctime()
tstr = tstr.replace(' ', '.')
tstr = tstr.replace(':', '.')
filename = 'deblur.log.%s' % tstr
logging.basicConfig(filename=filename, level=level,
format='%(levelname)s(%(thread)d)'
'%(asctime)s:%(message)s')
logger = logging.getLogger(__name__)
logger.info('*************************')
logger.info('deblurring started') | python | def start_log(level=logging.DEBUG, filename=None):
"""start the logger for the run
Parameters
----------
level : int, optional
logging.DEBUG, logging.INFO etc. for the log level (between 0-50).
filename : str, optional
name of the filename to save the log to or
None (default) to use deblur.log.TIMESTAMP
"""
if filename is None:
tstr = time.ctime()
tstr = tstr.replace(' ', '.')
tstr = tstr.replace(':', '.')
filename = 'deblur.log.%s' % tstr
logging.basicConfig(filename=filename, level=level,
format='%(levelname)s(%(thread)d)'
'%(asctime)s:%(message)s')
logger = logging.getLogger(__name__)
logger.info('*************************')
logger.info('deblurring started') | [
"def",
"start_log",
"(",
"level",
"=",
"logging",
".",
"DEBUG",
",",
"filename",
"=",
"None",
")",
":",
"if",
"filename",
"is",
"None",
":",
"tstr",
"=",
"time",
".",
"ctime",
"(",
")",
"tstr",
"=",
"tstr",
".",
"replace",
"(",
"' '",
",",
"'.'",
")",
"tstr",
"=",
"tstr",
".",
"replace",
"(",
"':'",
",",
"'.'",
")",
"filename",
"=",
"'deblur.log.%s'",
"%",
"tstr",
"logging",
".",
"basicConfig",
"(",
"filename",
"=",
"filename",
",",
"level",
"=",
"level",
",",
"format",
"=",
"'%(levelname)s(%(thread)d)'",
"'%(asctime)s:%(message)s'",
")",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"info",
"(",
"'*************************'",
")",
"logger",
".",
"info",
"(",
"'deblurring started'",
")"
] | start the logger for the run
Parameters
----------
level : int, optional
logging.DEBUG, logging.INFO etc. for the log level (between 0-50).
filename : str, optional
name of the filename to save the log to or
None (default) to use deblur.log.TIMESTAMP | [
"start",
"the",
"logger",
"for",
"the",
"run"
] | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L898-L919 |
biocore/deblur | deblur/workflow.py | _system_call | def _system_call(cmd, stdoutfilename=None):
"""Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
logger = logging.getLogger(__name__)
logger.debug('system call: %s' % cmd)
if stdoutfilename:
with open(stdoutfilename, 'w') as f:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=f,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value | python | def _system_call(cmd, stdoutfilename=None):
"""Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
logger = logging.getLogger(__name__)
logger.debug('system call: %s' % cmd)
if stdoutfilename:
with open(stdoutfilename, 'w') as f:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=f,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value | [
"def",
"_system_call",
"(",
"cmd",
",",
"stdoutfilename",
"=",
"None",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"'system call: %s'",
"%",
"cmd",
")",
"if",
"stdoutfilename",
":",
"with",
"open",
"(",
"stdoutfilename",
",",
"'w'",
")",
"as",
"f",
":",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"universal_newlines",
"=",
"True",
",",
"shell",
"=",
"False",
",",
"stdout",
"=",
"f",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"else",
":",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"universal_newlines",
"=",
"True",
",",
"shell",
"=",
"False",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"# Communicate pulls all stdout/stderr from the PIPEs",
"# This call blocks until the command is done",
"stdout",
",",
"stderr",
"=",
"proc",
".",
"communicate",
"(",
")",
"return_value",
"=",
"proc",
".",
"returncode",
"return",
"stdout",
",",
"stderr",
",",
"return_value"
] | Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license. | [
"Execute",
"the",
"command",
"cmd",
"Parameters",
"----------",
"cmd",
":",
"str",
"The",
"string",
"containing",
"the",
"command",
"to",
"be",
"run",
".",
"stdoutfilename",
":",
"str",
"Name",
"of",
"the",
"file",
"to",
"save",
"stdout",
"to",
"or",
"None",
"(",
"default",
")",
"to",
"not",
"save",
"to",
"file",
"stderrfilename",
":",
"str",
"Name",
"of",
"the",
"file",
"to",
"save",
"stderr",
"to",
"or",
"None",
"(",
"default",
")",
"to",
"not",
"save",
"to",
"file"
] | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L922-L965 |
biocore/deblur | deblur/deblurring.py | get_sequences | def get_sequences(input_seqs):
"""Returns a list of Sequences
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
Returns
-------
list of Sequence
Raises
------
ValueError
If no sequences where found in `input_seqs`
If all the sequences do not have the same length either aligned or
unaligned.
"""
try:
seqs = [Sequence(id, seq) for id, seq in input_seqs]
except Exception:
seqs = []
if len(seqs) == 0:
logger = logging.getLogger(__name__)
logger.warn('No sequences found in fasta file!')
return None
# Check that all the sequence lengths (aligned and unaligned are the same)
aligned_lengths = set(s.length for s in seqs)
unaligned_lengths = set(s.unaligned_length for s in seqs)
if len(aligned_lengths) != 1 or len(unaligned_lengths) != 1:
raise ValueError(
"Not all sequence have the same length. Aligned lengths: %s, "
"sequence lengths: %s"
% (", ".join(map(str, aligned_lengths)),
", ".join(map(str, unaligned_lengths))))
seqs = sorted(seqs, key=attrgetter('frequency'), reverse=True)
return seqs | python | def get_sequences(input_seqs):
"""Returns a list of Sequences
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
Returns
-------
list of Sequence
Raises
------
ValueError
If no sequences where found in `input_seqs`
If all the sequences do not have the same length either aligned or
unaligned.
"""
try:
seqs = [Sequence(id, seq) for id, seq in input_seqs]
except Exception:
seqs = []
if len(seqs) == 0:
logger = logging.getLogger(__name__)
logger.warn('No sequences found in fasta file!')
return None
# Check that all the sequence lengths (aligned and unaligned are the same)
aligned_lengths = set(s.length for s in seqs)
unaligned_lengths = set(s.unaligned_length for s in seqs)
if len(aligned_lengths) != 1 or len(unaligned_lengths) != 1:
raise ValueError(
"Not all sequence have the same length. Aligned lengths: %s, "
"sequence lengths: %s"
% (", ".join(map(str, aligned_lengths)),
", ".join(map(str, unaligned_lengths))))
seqs = sorted(seqs, key=attrgetter('frequency'), reverse=True)
return seqs | [
"def",
"get_sequences",
"(",
"input_seqs",
")",
":",
"try",
":",
"seqs",
"=",
"[",
"Sequence",
"(",
"id",
",",
"seq",
")",
"for",
"id",
",",
"seq",
"in",
"input_seqs",
"]",
"except",
"Exception",
":",
"seqs",
"=",
"[",
"]",
"if",
"len",
"(",
"seqs",
")",
"==",
"0",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"warn",
"(",
"'No sequences found in fasta file!'",
")",
"return",
"None",
"# Check that all the sequence lengths (aligned and unaligned are the same)",
"aligned_lengths",
"=",
"set",
"(",
"s",
".",
"length",
"for",
"s",
"in",
"seqs",
")",
"unaligned_lengths",
"=",
"set",
"(",
"s",
".",
"unaligned_length",
"for",
"s",
"in",
"seqs",
")",
"if",
"len",
"(",
"aligned_lengths",
")",
"!=",
"1",
"or",
"len",
"(",
"unaligned_lengths",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"Not all sequence have the same length. Aligned lengths: %s, \"",
"\"sequence lengths: %s\"",
"%",
"(",
"\", \"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"aligned_lengths",
")",
")",
",",
"\", \"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"unaligned_lengths",
")",
")",
")",
")",
"seqs",
"=",
"sorted",
"(",
"seqs",
",",
"key",
"=",
"attrgetter",
"(",
"'frequency'",
")",
",",
"reverse",
"=",
"True",
")",
"return",
"seqs"
] | Returns a list of Sequences
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
Returns
-------
list of Sequence
Raises
------
ValueError
If no sequences where found in `input_seqs`
If all the sequences do not have the same length either aligned or
unaligned. | [
"Returns",
"a",
"list",
"of",
"Sequences"
] | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/deblurring.py#L27-L68 |
biocore/deblur | deblur/deblurring.py | deblur | def deblur(input_seqs, mean_error=0.005,
error_dist=None,
indel_prob=0.01, indel_max=3):
"""Deblur the reads
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format. The label
should include the sequence count in the 'size=X' format.
mean_error : float, optional
The mean illumina error, used for original sequence estimate.
Default: 0.005
error_dist : list of float, optional
A list of error probabilities. The length of the list determines the
amount of hamming distances taken into account. Default: None, use
the default error profile (from get_default_error_profile() )
indel_prob : float, optional
Indel probability (same for N indels). Default: 0.01
indel_max : int, optional
The maximal number of indels expected by errors. Default: 3
Results
-------
list of Sequence
The deblurred sequences
Notes
-----
mean_error is used only for normalizing the peak height before deblurring.
The array 'error_dist' represents the error distribution, where
Xi = max frequency of error hamming. The length of this array - 1 limits
the hamming distance taken into account, i.e. if the length if `error_dist`
is 10, sequences up to 10 - 1 = 9 hamming distance will be taken into
account
"""
logger = logging.getLogger(__name__)
if error_dist is None:
error_dist = get_default_error_profile()
logger.debug('Using error profile %s' % error_dist)
# Get the sequences
seqs = get_sequences(input_seqs)
if seqs is None:
logger.warn('no sequences deblurred')
return None
logger.info('deblurring %d sequences' % len(seqs))
# fix the original frequencies of each read error using the
# mean error profile
mod_factor = pow((1 - mean_error), seqs[0].unaligned_length)
error_dist = np.array(error_dist) / mod_factor
max_h_dist = len(error_dist) - 1
for seq_i in seqs:
# no need to remove neighbors if freq. is <=0
if seq_i.frequency <= 0:
continue
# Correct for the fact that many reads are expected to be mutated
num_err = error_dist * seq_i.frequency
# if it's low level, just continue
if num_err[1] < 0.1:
continue
# Compare to all other sequences and calculate hamming dist
seq_i_len = len(seq_i.sequence.rstrip('-'))
for seq_j in seqs:
# Ignore current sequence
if seq_i == seq_j:
continue
# Calculate the hamming distance
h_dist = np.count_nonzero(np.not_equal(seq_i.np_sequence,
seq_j.np_sequence))
# If far away, don't need to correct
if h_dist > max_h_dist:
continue
# Close, so lets calculate exact distance
# We stop checking in the shortest sequence after removing trailing
# indels. We need to do this in order to avoid double counting
# the insertions/deletions
length = min(seq_i_len, len(seq_j.sequence.rstrip('-')))
sub_seq_i = seq_i.np_sequence[:length]
sub_seq_j = seq_j.np_sequence[:length]
mask = (sub_seq_i != sub_seq_j)
# find all indels
mut_is_indel = np.logical_or(sub_seq_i[mask] == 4,
sub_seq_j[mask] == 4)
num_indels = mut_is_indel.sum()
if num_indels > 0:
# need to account for indel in one sequence not solved in the other
# (so we have '-' at the end. Need to ignore it in the total count)
h_dist = np.count_nonzero(np.not_equal(seq_i.np_sequence[:length],
seq_j.np_sequence[:length]))
num_substitutions = h_dist - num_indels
correction_value = num_err[num_substitutions]
if num_indels > indel_max:
correction_value = 0
elif num_indels > 0:
# remove errors due to (PCR?) indels (saw in 22 mock mixture)
correction_value = correction_value * indel_prob
# met all the criteria - so correct the frequency of the neighbor
seq_j.frequency -= correction_value
result = [s for s in seqs if round(s.frequency) > 0]
logger.info('%d unique sequences left following deblurring' % len(result))
return result | python | def deblur(input_seqs, mean_error=0.005,
error_dist=None,
indel_prob=0.01, indel_max=3):
"""Deblur the reads
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format. The label
should include the sequence count in the 'size=X' format.
mean_error : float, optional
The mean illumina error, used for original sequence estimate.
Default: 0.005
error_dist : list of float, optional
A list of error probabilities. The length of the list determines the
amount of hamming distances taken into account. Default: None, use
the default error profile (from get_default_error_profile() )
indel_prob : float, optional
Indel probability (same for N indels). Default: 0.01
indel_max : int, optional
The maximal number of indels expected by errors. Default: 3
Results
-------
list of Sequence
The deblurred sequences
Notes
-----
mean_error is used only for normalizing the peak height before deblurring.
The array 'error_dist' represents the error distribution, where
Xi = max frequency of error hamming. The length of this array - 1 limits
the hamming distance taken into account, i.e. if the length if `error_dist`
is 10, sequences up to 10 - 1 = 9 hamming distance will be taken into
account
"""
logger = logging.getLogger(__name__)
if error_dist is None:
error_dist = get_default_error_profile()
logger.debug('Using error profile %s' % error_dist)
# Get the sequences
seqs = get_sequences(input_seqs)
if seqs is None:
logger.warn('no sequences deblurred')
return None
logger.info('deblurring %d sequences' % len(seqs))
# fix the original frequencies of each read error using the
# mean error profile
mod_factor = pow((1 - mean_error), seqs[0].unaligned_length)
error_dist = np.array(error_dist) / mod_factor
max_h_dist = len(error_dist) - 1
for seq_i in seqs:
# no need to remove neighbors if freq. is <=0
if seq_i.frequency <= 0:
continue
# Correct for the fact that many reads are expected to be mutated
num_err = error_dist * seq_i.frequency
# if it's low level, just continue
if num_err[1] < 0.1:
continue
# Compare to all other sequences and calculate hamming dist
seq_i_len = len(seq_i.sequence.rstrip('-'))
for seq_j in seqs:
# Ignore current sequence
if seq_i == seq_j:
continue
# Calculate the hamming distance
h_dist = np.count_nonzero(np.not_equal(seq_i.np_sequence,
seq_j.np_sequence))
# If far away, don't need to correct
if h_dist > max_h_dist:
continue
# Close, so lets calculate exact distance
# We stop checking in the shortest sequence after removing trailing
# indels. We need to do this in order to avoid double counting
# the insertions/deletions
length = min(seq_i_len, len(seq_j.sequence.rstrip('-')))
sub_seq_i = seq_i.np_sequence[:length]
sub_seq_j = seq_j.np_sequence[:length]
mask = (sub_seq_i != sub_seq_j)
# find all indels
mut_is_indel = np.logical_or(sub_seq_i[mask] == 4,
sub_seq_j[mask] == 4)
num_indels = mut_is_indel.sum()
if num_indels > 0:
# need to account for indel in one sequence not solved in the other
# (so we have '-' at the end. Need to ignore it in the total count)
h_dist = np.count_nonzero(np.not_equal(seq_i.np_sequence[:length],
seq_j.np_sequence[:length]))
num_substitutions = h_dist - num_indels
correction_value = num_err[num_substitutions]
if num_indels > indel_max:
correction_value = 0
elif num_indels > 0:
# remove errors due to (PCR?) indels (saw in 22 mock mixture)
correction_value = correction_value * indel_prob
# met all the criteria - so correct the frequency of the neighbor
seq_j.frequency -= correction_value
result = [s for s in seqs if round(s.frequency) > 0]
logger.info('%d unique sequences left following deblurring' % len(result))
return result | [
"def",
"deblur",
"(",
"input_seqs",
",",
"mean_error",
"=",
"0.005",
",",
"error_dist",
"=",
"None",
",",
"indel_prob",
"=",
"0.01",
",",
"indel_max",
"=",
"3",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"if",
"error_dist",
"is",
"None",
":",
"error_dist",
"=",
"get_default_error_profile",
"(",
")",
"logger",
".",
"debug",
"(",
"'Using error profile %s'",
"%",
"error_dist",
")",
"# Get the sequences",
"seqs",
"=",
"get_sequences",
"(",
"input_seqs",
")",
"if",
"seqs",
"is",
"None",
":",
"logger",
".",
"warn",
"(",
"'no sequences deblurred'",
")",
"return",
"None",
"logger",
".",
"info",
"(",
"'deblurring %d sequences'",
"%",
"len",
"(",
"seqs",
")",
")",
"# fix the original frequencies of each read error using the",
"# mean error profile",
"mod_factor",
"=",
"pow",
"(",
"(",
"1",
"-",
"mean_error",
")",
",",
"seqs",
"[",
"0",
"]",
".",
"unaligned_length",
")",
"error_dist",
"=",
"np",
".",
"array",
"(",
"error_dist",
")",
"/",
"mod_factor",
"max_h_dist",
"=",
"len",
"(",
"error_dist",
")",
"-",
"1",
"for",
"seq_i",
"in",
"seqs",
":",
"# no need to remove neighbors if freq. is <=0",
"if",
"seq_i",
".",
"frequency",
"<=",
"0",
":",
"continue",
"# Correct for the fact that many reads are expected to be mutated",
"num_err",
"=",
"error_dist",
"*",
"seq_i",
".",
"frequency",
"# if it's low level, just continue",
"if",
"num_err",
"[",
"1",
"]",
"<",
"0.1",
":",
"continue",
"# Compare to all other sequences and calculate hamming dist",
"seq_i_len",
"=",
"len",
"(",
"seq_i",
".",
"sequence",
".",
"rstrip",
"(",
"'-'",
")",
")",
"for",
"seq_j",
"in",
"seqs",
":",
"# Ignore current sequence",
"if",
"seq_i",
"==",
"seq_j",
":",
"continue",
"# Calculate the hamming distance",
"h_dist",
"=",
"np",
".",
"count_nonzero",
"(",
"np",
".",
"not_equal",
"(",
"seq_i",
".",
"np_sequence",
",",
"seq_j",
".",
"np_sequence",
")",
")",
"# If far away, don't need to correct",
"if",
"h_dist",
">",
"max_h_dist",
":",
"continue",
"# Close, so lets calculate exact distance",
"# We stop checking in the shortest sequence after removing trailing",
"# indels. We need to do this in order to avoid double counting",
"# the insertions/deletions",
"length",
"=",
"min",
"(",
"seq_i_len",
",",
"len",
"(",
"seq_j",
".",
"sequence",
".",
"rstrip",
"(",
"'-'",
")",
")",
")",
"sub_seq_i",
"=",
"seq_i",
".",
"np_sequence",
"[",
":",
"length",
"]",
"sub_seq_j",
"=",
"seq_j",
".",
"np_sequence",
"[",
":",
"length",
"]",
"mask",
"=",
"(",
"sub_seq_i",
"!=",
"sub_seq_j",
")",
"# find all indels",
"mut_is_indel",
"=",
"np",
".",
"logical_or",
"(",
"sub_seq_i",
"[",
"mask",
"]",
"==",
"4",
",",
"sub_seq_j",
"[",
"mask",
"]",
"==",
"4",
")",
"num_indels",
"=",
"mut_is_indel",
".",
"sum",
"(",
")",
"if",
"num_indels",
">",
"0",
":",
"# need to account for indel in one sequence not solved in the other",
"# (so we have '-' at the end. Need to ignore it in the total count)",
"h_dist",
"=",
"np",
".",
"count_nonzero",
"(",
"np",
".",
"not_equal",
"(",
"seq_i",
".",
"np_sequence",
"[",
":",
"length",
"]",
",",
"seq_j",
".",
"np_sequence",
"[",
":",
"length",
"]",
")",
")",
"num_substitutions",
"=",
"h_dist",
"-",
"num_indels",
"correction_value",
"=",
"num_err",
"[",
"num_substitutions",
"]",
"if",
"num_indels",
">",
"indel_max",
":",
"correction_value",
"=",
"0",
"elif",
"num_indels",
">",
"0",
":",
"# remove errors due to (PCR?) indels (saw in 22 mock mixture)",
"correction_value",
"=",
"correction_value",
"*",
"indel_prob",
"# met all the criteria - so correct the frequency of the neighbor",
"seq_j",
".",
"frequency",
"-=",
"correction_value",
"result",
"=",
"[",
"s",
"for",
"s",
"in",
"seqs",
"if",
"round",
"(",
"s",
".",
"frequency",
")",
">",
"0",
"]",
"logger",
".",
"info",
"(",
"'%d unique sequences left following deblurring'",
"%",
"len",
"(",
"result",
")",
")",
"return",
"result"
] | Deblur the reads
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format. The label
should include the sequence count in the 'size=X' format.
mean_error : float, optional
The mean illumina error, used for original sequence estimate.
Default: 0.005
error_dist : list of float, optional
A list of error probabilities. The length of the list determines the
amount of hamming distances taken into account. Default: None, use
the default error profile (from get_default_error_profile() )
indel_prob : float, optional
Indel probability (same for N indels). Default: 0.01
indel_max : int, optional
The maximal number of indels expected by errors. Default: 3
Results
-------
list of Sequence
The deblurred sequences
Notes
-----
mean_error is used only for normalizing the peak height before deblurring.
The array 'error_dist' represents the error distribution, where
Xi = max frequency of error hamming. The length of this array - 1 limits
the hamming distance taken into account, i.e. if the length if `error_dist`
is 10, sequences up to 10 - 1 = 9 hamming distance will be taken into
account | [
"Deblur",
"the",
"reads"
] | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/deblurring.py#L71-L189 |
biocore/deblur | deblur/sequence.py | Sequence.to_fasta | def to_fasta(self):
"""Returns a string with the sequence in fasta format
Returns
-------
str
The FASTA representation of the sequence
"""
prefix, suffix = re.split('(?<=size=)\w+', self.label, maxsplit=1)
new_count = int(round(self.frequency))
new_label = "%s%d%s" % (prefix, new_count, suffix)
return ">%s\n%s\n" % (new_label, self.sequence) | python | def to_fasta(self):
"""Returns a string with the sequence in fasta format
Returns
-------
str
The FASTA representation of the sequence
"""
prefix, suffix = re.split('(?<=size=)\w+', self.label, maxsplit=1)
new_count = int(round(self.frequency))
new_label = "%s%d%s" % (prefix, new_count, suffix)
return ">%s\n%s\n" % (new_label, self.sequence) | [
"def",
"to_fasta",
"(",
"self",
")",
":",
"prefix",
",",
"suffix",
"=",
"re",
".",
"split",
"(",
"'(?<=size=)\\w+'",
",",
"self",
".",
"label",
",",
"maxsplit",
"=",
"1",
")",
"new_count",
"=",
"int",
"(",
"round",
"(",
"self",
".",
"frequency",
")",
")",
"new_label",
"=",
"\"%s%d%s\"",
"%",
"(",
"prefix",
",",
"new_count",
",",
"suffix",
")",
"return",
"\">%s\\n%s\\n\"",
"%",
"(",
"new_label",
",",
"self",
".",
"sequence",
")"
] | Returns a string with the sequence in fasta format
Returns
-------
str
The FASTA representation of the sequence | [
"Returns",
"a",
"string",
"with",
"the",
"sequence",
"in",
"fasta",
"format"
] | train | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/sequence.py#L58-L69 |
asyncee/django-easy-select2 | easy_select2/widgets.py | Select2Mixin.render_select2_options_code | def render_select2_options_code(self, options, id_):
"""Render options for select2."""
output = []
for key, value in options.items():
if isinstance(value, (dict, list)):
value = json.dumps(value)
output.append("data-{name}='{value}'".format(
name=key,
value=mark_safe(value)))
return mark_safe(' '.join(output)) | python | def render_select2_options_code(self, options, id_):
"""Render options for select2."""
output = []
for key, value in options.items():
if isinstance(value, (dict, list)):
value = json.dumps(value)
output.append("data-{name}='{value}'".format(
name=key,
value=mark_safe(value)))
return mark_safe(' '.join(output)) | [
"def",
"render_select2_options_code",
"(",
"self",
",",
"options",
",",
"id_",
")",
":",
"output",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"options",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"(",
"dict",
",",
"list",
")",
")",
":",
"value",
"=",
"json",
".",
"dumps",
"(",
"value",
")",
"output",
".",
"append",
"(",
"\"data-{name}='{value}'\"",
".",
"format",
"(",
"name",
"=",
"key",
",",
"value",
"=",
"mark_safe",
"(",
"value",
")",
")",
")",
"return",
"mark_safe",
"(",
"' '",
".",
"join",
"(",
"output",
")",
")"
] | Render options for select2. | [
"Render",
"options",
"for",
"select2",
"."
] | train | https://github.com/asyncee/django-easy-select2/blob/f81bbaa91d0266029be7ef6d075d85f13273e3a5/easy_select2/widgets.py#L76-L85 |
asyncee/django-easy-select2 | easy_select2/widgets.py | Select2Mixin.render_js_code | def render_js_code(self, id_, *args, **kwargs):
"""Render html container for Select2 widget with options."""
if id_:
options = self.render_select2_options_code(
dict(self.get_options()), id_)
return mark_safe(self.html.format(id=id_, options=options))
return u'' | python | def render_js_code(self, id_, *args, **kwargs):
"""Render html container for Select2 widget with options."""
if id_:
options = self.render_select2_options_code(
dict(self.get_options()), id_)
return mark_safe(self.html.format(id=id_, options=options))
return u'' | [
"def",
"render_js_code",
"(",
"self",
",",
"id_",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"id_",
":",
"options",
"=",
"self",
".",
"render_select2_options_code",
"(",
"dict",
"(",
"self",
".",
"get_options",
"(",
")",
")",
",",
"id_",
")",
"return",
"mark_safe",
"(",
"self",
".",
"html",
".",
"format",
"(",
"id",
"=",
"id_",
",",
"options",
"=",
"options",
")",
")",
"return",
"u''"
] | Render html container for Select2 widget with options. | [
"Render",
"html",
"container",
"for",
"Select2",
"widget",
"with",
"options",
"."
] | train | https://github.com/asyncee/django-easy-select2/blob/f81bbaa91d0266029be7ef6d075d85f13273e3a5/easy_select2/widgets.py#L87-L93 |
asyncee/django-easy-select2 | easy_select2/widgets.py | Select2Mixin.render | def render(self, name, value, attrs=None, **kwargs):
"""
Extend base class's `render` method by appending
javascript inline text to html output.
"""
output = super(Select2Mixin, self).render(
name, value, attrs=attrs, **kwargs)
id_ = attrs['id']
output += self.render_js_code(
id_, name, value, attrs=attrs, **kwargs)
return mark_safe(output) | python | def render(self, name, value, attrs=None, **kwargs):
"""
Extend base class's `render` method by appending
javascript inline text to html output.
"""
output = super(Select2Mixin, self).render(
name, value, attrs=attrs, **kwargs)
id_ = attrs['id']
output += self.render_js_code(
id_, name, value, attrs=attrs, **kwargs)
return mark_safe(output) | [
"def",
"render",
"(",
"self",
",",
"name",
",",
"value",
",",
"attrs",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"output",
"=",
"super",
"(",
"Select2Mixin",
",",
"self",
")",
".",
"render",
"(",
"name",
",",
"value",
",",
"attrs",
"=",
"attrs",
",",
"*",
"*",
"kwargs",
")",
"id_",
"=",
"attrs",
"[",
"'id'",
"]",
"output",
"+=",
"self",
".",
"render_js_code",
"(",
"id_",
",",
"name",
",",
"value",
",",
"attrs",
"=",
"attrs",
",",
"*",
"*",
"kwargs",
")",
"return",
"mark_safe",
"(",
"output",
")"
] | Extend base class's `render` method by appending
javascript inline text to html output. | [
"Extend",
"base",
"class",
"s",
"render",
"method",
"by",
"appending",
"javascript",
"inline",
"text",
"to",
"html",
"output",
"."
] | train | https://github.com/asyncee/django-easy-select2/blob/f81bbaa91d0266029be7ef6d075d85f13273e3a5/easy_select2/widgets.py#L95-L105 |
asyncee/django-easy-select2 | docs/source/_ext/djangodocs.py | visit_console_html | def visit_console_html(self, node):
"""Generate HTML for the console directive."""
if self.builder.name in ('djangohtml', 'json') and node['win_console_text']:
# Put a mark on the document object signaling the fact the directive
# has been used on it.
self.document._console_directive_used_flag = True
uid = node['uid']
self.body.append('''\
<div class="console-block" id="console-block-%(id)s">
<input class="c-tab-unix" id="c-tab-%(id)s-unix" type="radio" name="console-%(id)s" checked>
<label for="c-tab-%(id)s-unix" title="Linux/macOS">/</label>
<input class="c-tab-win" id="c-tab-%(id)s-win" type="radio" name="console-%(id)s">
<label for="c-tab-%(id)s-win" title="Windows"></label>
<section class="c-content-unix" id="c-content-%(id)s-unix">\n''' % {'id': uid})
try:
self.visit_literal_block(node)
except nodes.SkipNode:
pass
self.body.append('</section>\n')
self.body.append('<section class="c-content-win" id="c-content-%(id)s-win">\n' % {'id': uid})
win_text = node['win_console_text']
highlight_args = {'force': True}
if 'linenos' in node:
linenos = node['linenos']
else:
linenos = win_text.count('\n') >= self.highlightlinenothreshold - 1
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.highlighter.highlight_block(
win_text, 'doscon', warn=warner, linenos=linenos, **highlight_args
)
self.body.append(highlighted)
self.body.append('</section>\n')
self.body.append('</div>\n')
raise nodes.SkipNode
else:
self.visit_literal_block(node) | python | def visit_console_html(self, node):
"""Generate HTML for the console directive."""
if self.builder.name in ('djangohtml', 'json') and node['win_console_text']:
# Put a mark on the document object signaling the fact the directive
# has been used on it.
self.document._console_directive_used_flag = True
uid = node['uid']
self.body.append('''\
<div class="console-block" id="console-block-%(id)s">
<input class="c-tab-unix" id="c-tab-%(id)s-unix" type="radio" name="console-%(id)s" checked>
<label for="c-tab-%(id)s-unix" title="Linux/macOS">/</label>
<input class="c-tab-win" id="c-tab-%(id)s-win" type="radio" name="console-%(id)s">
<label for="c-tab-%(id)s-win" title="Windows"></label>
<section class="c-content-unix" id="c-content-%(id)s-unix">\n''' % {'id': uid})
try:
self.visit_literal_block(node)
except nodes.SkipNode:
pass
self.body.append('</section>\n')
self.body.append('<section class="c-content-win" id="c-content-%(id)s-win">\n' % {'id': uid})
win_text = node['win_console_text']
highlight_args = {'force': True}
if 'linenos' in node:
linenos = node['linenos']
else:
linenos = win_text.count('\n') >= self.highlightlinenothreshold - 1
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.highlighter.highlight_block(
win_text, 'doscon', warn=warner, linenos=linenos, **highlight_args
)
self.body.append(highlighted)
self.body.append('</section>\n')
self.body.append('</div>\n')
raise nodes.SkipNode
else:
self.visit_literal_block(node) | [
"def",
"visit_console_html",
"(",
"self",
",",
"node",
")",
":",
"if",
"self",
".",
"builder",
".",
"name",
"in",
"(",
"'djangohtml'",
",",
"'json'",
")",
"and",
"node",
"[",
"'win_console_text'",
"]",
":",
"# Put a mark on the document object signaling the fact the directive",
"# has been used on it.",
"self",
".",
"document",
".",
"_console_directive_used_flag",
"=",
"True",
"uid",
"=",
"node",
"[",
"'uid'",
"]",
"self",
".",
"body",
".",
"append",
"(",
"'''\\\n<div class=\"console-block\" id=\"console-block-%(id)s\">\n<input class=\"c-tab-unix\" id=\"c-tab-%(id)s-unix\" type=\"radio\" name=\"console-%(id)s\" checked>\n<label for=\"c-tab-%(id)s-unix\" title=\"Linux/macOS\">/</label>\n<input class=\"c-tab-win\" id=\"c-tab-%(id)s-win\" type=\"radio\" name=\"console-%(id)s\">\n<label for=\"c-tab-%(id)s-win\" title=\"Windows\"></label>\n<section class=\"c-content-unix\" id=\"c-content-%(id)s-unix\">\\n'''",
"%",
"{",
"'id'",
":",
"uid",
"}",
")",
"try",
":",
"self",
".",
"visit_literal_block",
"(",
"node",
")",
"except",
"nodes",
".",
"SkipNode",
":",
"pass",
"self",
".",
"body",
".",
"append",
"(",
"'</section>\\n'",
")",
"self",
".",
"body",
".",
"append",
"(",
"'<section class=\"c-content-win\" id=\"c-content-%(id)s-win\">\\n'",
"%",
"{",
"'id'",
":",
"uid",
"}",
")",
"win_text",
"=",
"node",
"[",
"'win_console_text'",
"]",
"highlight_args",
"=",
"{",
"'force'",
":",
"True",
"}",
"if",
"'linenos'",
"in",
"node",
":",
"linenos",
"=",
"node",
"[",
"'linenos'",
"]",
"else",
":",
"linenos",
"=",
"win_text",
".",
"count",
"(",
"'\\n'",
")",
">=",
"self",
".",
"highlightlinenothreshold",
"-",
"1",
"def",
"warner",
"(",
"msg",
")",
":",
"self",
".",
"builder",
".",
"warn",
"(",
"msg",
",",
"(",
"self",
".",
"builder",
".",
"current_docname",
",",
"node",
".",
"line",
")",
")",
"highlighted",
"=",
"self",
".",
"highlighter",
".",
"highlight_block",
"(",
"win_text",
",",
"'doscon'",
",",
"warn",
"=",
"warner",
",",
"linenos",
"=",
"linenos",
",",
"*",
"*",
"highlight_args",
")",
"self",
".",
"body",
".",
"append",
"(",
"highlighted",
")",
"self",
".",
"body",
".",
"append",
"(",
"'</section>\\n'",
")",
"self",
".",
"body",
".",
"append",
"(",
"'</div>\\n'",
")",
"raise",
"nodes",
".",
"SkipNode",
"else",
":",
"self",
".",
"visit_literal_block",
"(",
"node",
")"
] | Generate HTML for the console directive. | [
"Generate",
"HTML",
"for",
"the",
"console",
"directive",
"."
] | train | https://github.com/asyncee/django-easy-select2/blob/f81bbaa91d0266029be7ef6d075d85f13273e3a5/docs/source/_ext/djangodocs.py#L364-L403 |
asyncee/django-easy-select2 | easy_select2/utils.py | select2_modelform_meta | def select2_modelform_meta(model,
meta_fields=None,
widgets=None,
attrs=None,
**kwargs):
"""
Return `Meta` class with Select2-enabled widgets for fields
with choices (e.g. ForeignKey, CharField, etc) for use with
ModelForm.
Arguments:
model - a model class to create `Meta` class for.
meta_fields - dictionary with `Meta` class fields, for
example, {'fields': ['id', 'name']}
attrs - select2 widget attributes (width, for example),
must be of type `dict`.
**kwargs - will be merged with meta_fields.
"""
widgets = widgets or {}
meta_fields = meta_fields or {}
# TODO: assert attrs is of type `dict`
for field in model._meta.fields:
if isinstance(field, ForeignKey) or field.choices:
widgets.update({field.name: Select2(select2attrs=attrs)})
for field in model._meta.many_to_many:
widgets.update({field.name: Select2Multiple(select2attrs=attrs)})
meta_fields.update({
'model': model,
'widgets': widgets,
})
if 'exclude' not in kwargs and 'fields' not in kwargs:
meta_fields.update({'exclude': []})
meta_fields.update(**kwargs)
meta = type('Meta', (object,), meta_fields)
return meta | python | def select2_modelform_meta(model,
meta_fields=None,
widgets=None,
attrs=None,
**kwargs):
"""
Return `Meta` class with Select2-enabled widgets for fields
with choices (e.g. ForeignKey, CharField, etc) for use with
ModelForm.
Arguments:
model - a model class to create `Meta` class for.
meta_fields - dictionary with `Meta` class fields, for
example, {'fields': ['id', 'name']}
attrs - select2 widget attributes (width, for example),
must be of type `dict`.
**kwargs - will be merged with meta_fields.
"""
widgets = widgets or {}
meta_fields = meta_fields or {}
# TODO: assert attrs is of type `dict`
for field in model._meta.fields:
if isinstance(field, ForeignKey) or field.choices:
widgets.update({field.name: Select2(select2attrs=attrs)})
for field in model._meta.many_to_many:
widgets.update({field.name: Select2Multiple(select2attrs=attrs)})
meta_fields.update({
'model': model,
'widgets': widgets,
})
if 'exclude' not in kwargs and 'fields' not in kwargs:
meta_fields.update({'exclude': []})
meta_fields.update(**kwargs)
meta = type('Meta', (object,), meta_fields)
return meta | [
"def",
"select2_modelform_meta",
"(",
"model",
",",
"meta_fields",
"=",
"None",
",",
"widgets",
"=",
"None",
",",
"attrs",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"widgets",
"=",
"widgets",
"or",
"{",
"}",
"meta_fields",
"=",
"meta_fields",
"or",
"{",
"}",
"# TODO: assert attrs is of type `dict`",
"for",
"field",
"in",
"model",
".",
"_meta",
".",
"fields",
":",
"if",
"isinstance",
"(",
"field",
",",
"ForeignKey",
")",
"or",
"field",
".",
"choices",
":",
"widgets",
".",
"update",
"(",
"{",
"field",
".",
"name",
":",
"Select2",
"(",
"select2attrs",
"=",
"attrs",
")",
"}",
")",
"for",
"field",
"in",
"model",
".",
"_meta",
".",
"many_to_many",
":",
"widgets",
".",
"update",
"(",
"{",
"field",
".",
"name",
":",
"Select2Multiple",
"(",
"select2attrs",
"=",
"attrs",
")",
"}",
")",
"meta_fields",
".",
"update",
"(",
"{",
"'model'",
":",
"model",
",",
"'widgets'",
":",
"widgets",
",",
"}",
")",
"if",
"'exclude'",
"not",
"in",
"kwargs",
"and",
"'fields'",
"not",
"in",
"kwargs",
":",
"meta_fields",
".",
"update",
"(",
"{",
"'exclude'",
":",
"[",
"]",
"}",
")",
"meta_fields",
".",
"update",
"(",
"*",
"*",
"kwargs",
")",
"meta",
"=",
"type",
"(",
"'Meta'",
",",
"(",
"object",
",",
")",
",",
"meta_fields",
")",
"return",
"meta"
] | Return `Meta` class with Select2-enabled widgets for fields
with choices (e.g. ForeignKey, CharField, etc) for use with
ModelForm.
Arguments:
model - a model class to create `Meta` class for.
meta_fields - dictionary with `Meta` class fields, for
example, {'fields': ['id', 'name']}
attrs - select2 widget attributes (width, for example),
must be of type `dict`.
**kwargs - will be merged with meta_fields. | [
"Return",
"Meta",
"class",
"with",
"Select2",
"-",
"enabled",
"widgets",
"for",
"fields",
"with",
"choices",
"(",
"e",
".",
"g",
".",
"ForeignKey",
"CharField",
"etc",
")",
"for",
"use",
"with",
"ModelForm",
"."
] | train | https://github.com/asyncee/django-easy-select2/blob/f81bbaa91d0266029be7ef6d075d85f13273e3a5/easy_select2/utils.py#L10-L49 |
asyncee/django-easy-select2 | easy_select2/utils.py | select2_modelform | def select2_modelform(
model, attrs=None, form_class=es2_forms.FixedModelForm):
"""
Return ModelForm class for model with select2 widgets.
Arguments:
attrs: select2 widget attributes (width, for example) of type `dict`.
form_class: modelform base class, `forms.ModelForm` by default.
::
SomeModelForm = select2_modelform(models.SomeModelBanner)
is the same like::
class SomeModelForm(forms.ModelForm):
Meta = select2_modelform_meta(models.SomeModelForm)
"""
classname = '%sForm' % model._meta.object_name
meta = select2_modelform_meta(model, attrs=attrs)
return type(classname, (form_class,), {'Meta': meta}) | python | def select2_modelform(
model, attrs=None, form_class=es2_forms.FixedModelForm):
"""
Return ModelForm class for model with select2 widgets.
Arguments:
attrs: select2 widget attributes (width, for example) of type `dict`.
form_class: modelform base class, `forms.ModelForm` by default.
::
SomeModelForm = select2_modelform(models.SomeModelBanner)
is the same like::
class SomeModelForm(forms.ModelForm):
Meta = select2_modelform_meta(models.SomeModelForm)
"""
classname = '%sForm' % model._meta.object_name
meta = select2_modelform_meta(model, attrs=attrs)
return type(classname, (form_class,), {'Meta': meta}) | [
"def",
"select2_modelform",
"(",
"model",
",",
"attrs",
"=",
"None",
",",
"form_class",
"=",
"es2_forms",
".",
"FixedModelForm",
")",
":",
"classname",
"=",
"'%sForm'",
"%",
"model",
".",
"_meta",
".",
"object_name",
"meta",
"=",
"select2_modelform_meta",
"(",
"model",
",",
"attrs",
"=",
"attrs",
")",
"return",
"type",
"(",
"classname",
",",
"(",
"form_class",
",",
")",
",",
"{",
"'Meta'",
":",
"meta",
"}",
")"
] | Return ModelForm class for model with select2 widgets.
Arguments:
attrs: select2 widget attributes (width, for example) of type `dict`.
form_class: modelform base class, `forms.ModelForm` by default.
::
SomeModelForm = select2_modelform(models.SomeModelBanner)
is the same like::
class SomeModelForm(forms.ModelForm):
Meta = select2_modelform_meta(models.SomeModelForm) | [
"Return",
"ModelForm",
"class",
"for",
"model",
"with",
"select2",
"widgets",
"."
] | train | https://github.com/asyncee/django-easy-select2/blob/f81bbaa91d0266029be7ef6d075d85f13273e3a5/easy_select2/utils.py#L52-L72 |
scour-project/scour | scour/svg_transform.py | Lexer.lex | def lex(self, text):
""" Yield (token_type, str_data) tokens.
The last token will be (EOF, None) where EOF is the singleton object
defined in this module.
"""
for match in self.regex.finditer(text):
for name, _ in self.lexicon:
m = match.group(name)
if m is not None:
yield (name, m)
break
yield (EOF, None) | python | def lex(self, text):
""" Yield (token_type, str_data) tokens.
The last token will be (EOF, None) where EOF is the singleton object
defined in this module.
"""
for match in self.regex.finditer(text):
for name, _ in self.lexicon:
m = match.group(name)
if m is not None:
yield (name, m)
break
yield (EOF, None) | [
"def",
"lex",
"(",
"self",
",",
"text",
")",
":",
"for",
"match",
"in",
"self",
".",
"regex",
".",
"finditer",
"(",
"text",
")",
":",
"for",
"name",
",",
"_",
"in",
"self",
".",
"lexicon",
":",
"m",
"=",
"match",
".",
"group",
"(",
"name",
")",
"if",
"m",
"is",
"not",
"None",
":",
"yield",
"(",
"name",
",",
"m",
")",
"break",
"yield",
"(",
"EOF",
",",
"None",
")"
] | Yield (token_type, str_data) tokens.
The last token will be (EOF, None) where EOF is the singleton object
defined in this module. | [
"Yield",
"(",
"token_type",
"str_data",
")",
"tokens",
"."
] | train | https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/svg_transform.py#L105-L117 |
scour-project/scour | scour/svg_transform.py | SVGTransformationParser.parse | def parse(self, text):
""" Parse a string of SVG transform="" data.
"""
gen = self.lexer.lex(text)
next_val_fn = partial(next, *(gen,))
commands = []
token = next_val_fn()
while token[0] is not EOF:
command, token = self.rule_svg_transform(next_val_fn, token)
commands.append(command)
return commands | python | def parse(self, text):
""" Parse a string of SVG transform="" data.
"""
gen = self.lexer.lex(text)
next_val_fn = partial(next, *(gen,))
commands = []
token = next_val_fn()
while token[0] is not EOF:
command, token = self.rule_svg_transform(next_val_fn, token)
commands.append(command)
return commands | [
"def",
"parse",
"(",
"self",
",",
"text",
")",
":",
"gen",
"=",
"self",
".",
"lexer",
".",
"lex",
"(",
"text",
")",
"next_val_fn",
"=",
"partial",
"(",
"next",
",",
"*",
"(",
"gen",
",",
")",
")",
"commands",
"=",
"[",
"]",
"token",
"=",
"next_val_fn",
"(",
")",
"while",
"token",
"[",
"0",
"]",
"is",
"not",
"EOF",
":",
"command",
",",
"token",
"=",
"self",
".",
"rule_svg_transform",
"(",
"next_val_fn",
",",
"token",
")",
"commands",
".",
"append",
"(",
"command",
")",
"return",
"commands"
] | Parse a string of SVG transform="" data. | [
"Parse",
"a",
"string",
"of",
"SVG",
"transform",
"=",
"data",
"."
] | train | https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/svg_transform.py#L154-L165 |
scour-project/scour | scour/svg_regex.py | SVGPathParser.parse | def parse(self, text):
""" Parse a string of SVG <path> data.
"""
gen = self.lexer.lex(text)
next_val_fn = partial(next, *(gen,))
token = next_val_fn()
return self.rule_svg_path(next_val_fn, token) | python | def parse(self, text):
""" Parse a string of SVG <path> data.
"""
gen = self.lexer.lex(text)
next_val_fn = partial(next, *(gen,))
token = next_val_fn()
return self.rule_svg_path(next_val_fn, token) | [
"def",
"parse",
"(",
"self",
",",
"text",
")",
":",
"gen",
"=",
"self",
".",
"lexer",
".",
"lex",
"(",
"text",
")",
"next_val_fn",
"=",
"partial",
"(",
"next",
",",
"*",
"(",
"gen",
",",
")",
")",
"token",
"=",
"next_val_fn",
"(",
")",
"return",
"self",
".",
"rule_svg_path",
"(",
"next_val_fn",
",",
"token",
")"
] | Parse a string of SVG <path> data. | [
"Parse",
"a",
"string",
"of",
"SVG",
"<path",
">",
"data",
"."
] | train | https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/svg_regex.py#L154-L160 |
scour-project/scour | scour/scour.py | findElementsWithId | def findElementsWithId(node, elems=None):
"""
Returns all elements with id attributes
"""
if elems is None:
elems = {}
id = node.getAttribute('id')
if id != '':
elems[id] = node
if node.hasChildNodes():
for child in node.childNodes:
# from http://www.w3.org/TR/DOM-Level-2-Core/idl-definitions.html
# we are only really interested in nodes of type Element (1)
if child.nodeType == Node.ELEMENT_NODE:
findElementsWithId(child, elems)
return elems | python | def findElementsWithId(node, elems=None):
"""
Returns all elements with id attributes
"""
if elems is None:
elems = {}
id = node.getAttribute('id')
if id != '':
elems[id] = node
if node.hasChildNodes():
for child in node.childNodes:
# from http://www.w3.org/TR/DOM-Level-2-Core/idl-definitions.html
# we are only really interested in nodes of type Element (1)
if child.nodeType == Node.ELEMENT_NODE:
findElementsWithId(child, elems)
return elems | [
"def",
"findElementsWithId",
"(",
"node",
",",
"elems",
"=",
"None",
")",
":",
"if",
"elems",
"is",
"None",
":",
"elems",
"=",
"{",
"}",
"id",
"=",
"node",
".",
"getAttribute",
"(",
"'id'",
")",
"if",
"id",
"!=",
"''",
":",
"elems",
"[",
"id",
"]",
"=",
"node",
"if",
"node",
".",
"hasChildNodes",
"(",
")",
":",
"for",
"child",
"in",
"node",
".",
"childNodes",
":",
"# from http://www.w3.org/TR/DOM-Level-2-Core/idl-definitions.html",
"# we are only really interested in nodes of type Element (1)",
"if",
"child",
".",
"nodeType",
"==",
"Node",
".",
"ELEMENT_NODE",
":",
"findElementsWithId",
"(",
"child",
",",
"elems",
")",
"return",
"elems"
] | Returns all elements with id attributes | [
"Returns",
"all",
"elements",
"with",
"id",
"attributes"
] | train | https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L524-L539 |
scour-project/scour | scour/scour.py | findReferencedElements | def findReferencedElements(node, ids=None):
"""
Returns IDs of all referenced elements
- node is the node at which to start the search.
- returns a map which has the id as key and
each value is is a list of nodes
Currently looks at 'xlink:href' and all attributes in 'referencingProps'
"""
global referencingProps
if ids is None:
ids = {}
# TODO: input argument ids is clunky here (see below how it is called)
# GZ: alternative to passing dict, use **kwargs
# if this node is a style element, parse its text into CSS
if node.nodeName == 'style' and node.namespaceURI == NS['SVG']:
# one stretch of text, please! (we could use node.normalize(), but
# this actually modifies the node, and we don't want to keep
# whitespace around if there's any)
stylesheet = "".join([child.nodeValue for child in node.childNodes])
if stylesheet != '':
cssRules = parseCssString(stylesheet)
for rule in cssRules:
for propname in rule['properties']:
propval = rule['properties'][propname]
findReferencingProperty(node, propname, propval, ids)
return ids
# else if xlink:href is set, then grab the id
href = node.getAttributeNS(NS['XLINK'], 'href')
if href != '' and len(href) > 1 and href[0] == '#':
# we remove the hash mark from the beginning of the id
id = href[1:]
if id in ids:
ids[id].append(node)
else:
ids[id] = [node]
# now get all style properties and the fill, stroke, filter attributes
styles = node.getAttribute('style').split(';')
for style in styles:
propval = style.split(':')
if len(propval) == 2:
prop = propval[0].strip()
val = propval[1].strip()
findReferencingProperty(node, prop, val, ids)
for attr in referencingProps:
val = node.getAttribute(attr).strip()
if not val:
continue
findReferencingProperty(node, attr, val, ids)
if node.hasChildNodes():
for child in node.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
findReferencedElements(child, ids)
return ids | python | def findReferencedElements(node, ids=None):
"""
Returns IDs of all referenced elements
- node is the node at which to start the search.
- returns a map which has the id as key and
each value is is a list of nodes
Currently looks at 'xlink:href' and all attributes in 'referencingProps'
"""
global referencingProps
if ids is None:
ids = {}
# TODO: input argument ids is clunky here (see below how it is called)
# GZ: alternative to passing dict, use **kwargs
# if this node is a style element, parse its text into CSS
if node.nodeName == 'style' and node.namespaceURI == NS['SVG']:
# one stretch of text, please! (we could use node.normalize(), but
# this actually modifies the node, and we don't want to keep
# whitespace around if there's any)
stylesheet = "".join([child.nodeValue for child in node.childNodes])
if stylesheet != '':
cssRules = parseCssString(stylesheet)
for rule in cssRules:
for propname in rule['properties']:
propval = rule['properties'][propname]
findReferencingProperty(node, propname, propval, ids)
return ids
# else if xlink:href is set, then grab the id
href = node.getAttributeNS(NS['XLINK'], 'href')
if href != '' and len(href) > 1 and href[0] == '#':
# we remove the hash mark from the beginning of the id
id = href[1:]
if id in ids:
ids[id].append(node)
else:
ids[id] = [node]
# now get all style properties and the fill, stroke, filter attributes
styles = node.getAttribute('style').split(';')
for style in styles:
propval = style.split(':')
if len(propval) == 2:
prop = propval[0].strip()
val = propval[1].strip()
findReferencingProperty(node, prop, val, ids)
for attr in referencingProps:
val = node.getAttribute(attr).strip()
if not val:
continue
findReferencingProperty(node, attr, val, ids)
if node.hasChildNodes():
for child in node.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
findReferencedElements(child, ids)
return ids | [
"def",
"findReferencedElements",
"(",
"node",
",",
"ids",
"=",
"None",
")",
":",
"global",
"referencingProps",
"if",
"ids",
"is",
"None",
":",
"ids",
"=",
"{",
"}",
"# TODO: input argument ids is clunky here (see below how it is called)",
"# GZ: alternative to passing dict, use **kwargs",
"# if this node is a style element, parse its text into CSS",
"if",
"node",
".",
"nodeName",
"==",
"'style'",
"and",
"node",
".",
"namespaceURI",
"==",
"NS",
"[",
"'SVG'",
"]",
":",
"# one stretch of text, please! (we could use node.normalize(), but",
"# this actually modifies the node, and we don't want to keep",
"# whitespace around if there's any)",
"stylesheet",
"=",
"\"\"",
".",
"join",
"(",
"[",
"child",
".",
"nodeValue",
"for",
"child",
"in",
"node",
".",
"childNodes",
"]",
")",
"if",
"stylesheet",
"!=",
"''",
":",
"cssRules",
"=",
"parseCssString",
"(",
"stylesheet",
")",
"for",
"rule",
"in",
"cssRules",
":",
"for",
"propname",
"in",
"rule",
"[",
"'properties'",
"]",
":",
"propval",
"=",
"rule",
"[",
"'properties'",
"]",
"[",
"propname",
"]",
"findReferencingProperty",
"(",
"node",
",",
"propname",
",",
"propval",
",",
"ids",
")",
"return",
"ids",
"# else if xlink:href is set, then grab the id",
"href",
"=",
"node",
".",
"getAttributeNS",
"(",
"NS",
"[",
"'XLINK'",
"]",
",",
"'href'",
")",
"if",
"href",
"!=",
"''",
"and",
"len",
"(",
"href",
")",
">",
"1",
"and",
"href",
"[",
"0",
"]",
"==",
"'#'",
":",
"# we remove the hash mark from the beginning of the id",
"id",
"=",
"href",
"[",
"1",
":",
"]",
"if",
"id",
"in",
"ids",
":",
"ids",
"[",
"id",
"]",
".",
"append",
"(",
"node",
")",
"else",
":",
"ids",
"[",
"id",
"]",
"=",
"[",
"node",
"]",
"# now get all style properties and the fill, stroke, filter attributes",
"styles",
"=",
"node",
".",
"getAttribute",
"(",
"'style'",
")",
".",
"split",
"(",
"';'",
")",
"for",
"style",
"in",
"styles",
":",
"propval",
"=",
"style",
".",
"split",
"(",
"':'",
")",
"if",
"len",
"(",
"propval",
")",
"==",
"2",
":",
"prop",
"=",
"propval",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"val",
"=",
"propval",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"findReferencingProperty",
"(",
"node",
",",
"prop",
",",
"val",
",",
"ids",
")",
"for",
"attr",
"in",
"referencingProps",
":",
"val",
"=",
"node",
".",
"getAttribute",
"(",
"attr",
")",
".",
"strip",
"(",
")",
"if",
"not",
"val",
":",
"continue",
"findReferencingProperty",
"(",
"node",
",",
"attr",
",",
"val",
",",
"ids",
")",
"if",
"node",
".",
"hasChildNodes",
"(",
")",
":",
"for",
"child",
"in",
"node",
".",
"childNodes",
":",
"if",
"child",
".",
"nodeType",
"==",
"Node",
".",
"ELEMENT_NODE",
":",
"findReferencedElements",
"(",
"child",
",",
"ids",
")",
"return",
"ids"
] | Returns IDs of all referenced elements
- node is the node at which to start the search.
- returns a map which has the id as key and
each value is is a list of nodes
Currently looks at 'xlink:href' and all attributes in 'referencingProps' | [
"Returns",
"IDs",
"of",
"all",
"referenced",
"elements",
"-",
"node",
"is",
"the",
"node",
"at",
"which",
"to",
"start",
"the",
"search",
".",
"-",
"returns",
"a",
"map",
"which",
"has",
"the",
"id",
"as",
"key",
"and",
"each",
"value",
"is",
"is",
"a",
"list",
"of",
"nodes"
] | train | https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L545-L604 |
scour-project/scour | scour/scour.py | removeUnreferencedElements | def removeUnreferencedElements(doc, keepDefs):
"""
Removes all unreferenced elements except for <svg>, <font>, <metadata>, <title>, and <desc>.
Also vacuums the defs of any non-referenced renderable elements.
Returns the number of unreferenced elements removed from the document.
"""
global _num_elements_removed
num = 0
# Remove certain unreferenced elements outside of defs
removeTags = ['linearGradient', 'radialGradient', 'pattern']
identifiedElements = findElementsWithId(doc.documentElement)
referencedIDs = findReferencedElements(doc.documentElement)
for id in identifiedElements:
if id not in referencedIDs:
goner = identifiedElements[id]
if (goner is not None and goner.nodeName in removeTags
and goner.parentNode is not None
and goner.parentNode.tagName != 'defs'):
goner.parentNode.removeChild(goner)
num += 1
_num_elements_removed += 1
if not keepDefs:
# Remove most unreferenced elements inside defs
defs = doc.documentElement.getElementsByTagName('defs')
for aDef in defs:
elemsToRemove = removeUnusedDefs(doc, aDef)
for elem in elemsToRemove:
elem.parentNode.removeChild(elem)
_num_elements_removed += 1
num += 1
return num | python | def removeUnreferencedElements(doc, keepDefs):
"""
Removes all unreferenced elements except for <svg>, <font>, <metadata>, <title>, and <desc>.
Also vacuums the defs of any non-referenced renderable elements.
Returns the number of unreferenced elements removed from the document.
"""
global _num_elements_removed
num = 0
# Remove certain unreferenced elements outside of defs
removeTags = ['linearGradient', 'radialGradient', 'pattern']
identifiedElements = findElementsWithId(doc.documentElement)
referencedIDs = findReferencedElements(doc.documentElement)
for id in identifiedElements:
if id not in referencedIDs:
goner = identifiedElements[id]
if (goner is not None and goner.nodeName in removeTags
and goner.parentNode is not None
and goner.parentNode.tagName != 'defs'):
goner.parentNode.removeChild(goner)
num += 1
_num_elements_removed += 1
if not keepDefs:
# Remove most unreferenced elements inside defs
defs = doc.documentElement.getElementsByTagName('defs')
for aDef in defs:
elemsToRemove = removeUnusedDefs(doc, aDef)
for elem in elemsToRemove:
elem.parentNode.removeChild(elem)
_num_elements_removed += 1
num += 1
return num | [
"def",
"removeUnreferencedElements",
"(",
"doc",
",",
"keepDefs",
")",
":",
"global",
"_num_elements_removed",
"num",
"=",
"0",
"# Remove certain unreferenced elements outside of defs",
"removeTags",
"=",
"[",
"'linearGradient'",
",",
"'radialGradient'",
",",
"'pattern'",
"]",
"identifiedElements",
"=",
"findElementsWithId",
"(",
"doc",
".",
"documentElement",
")",
"referencedIDs",
"=",
"findReferencedElements",
"(",
"doc",
".",
"documentElement",
")",
"for",
"id",
"in",
"identifiedElements",
":",
"if",
"id",
"not",
"in",
"referencedIDs",
":",
"goner",
"=",
"identifiedElements",
"[",
"id",
"]",
"if",
"(",
"goner",
"is",
"not",
"None",
"and",
"goner",
".",
"nodeName",
"in",
"removeTags",
"and",
"goner",
".",
"parentNode",
"is",
"not",
"None",
"and",
"goner",
".",
"parentNode",
".",
"tagName",
"!=",
"'defs'",
")",
":",
"goner",
".",
"parentNode",
".",
"removeChild",
"(",
"goner",
")",
"num",
"+=",
"1",
"_num_elements_removed",
"+=",
"1",
"if",
"not",
"keepDefs",
":",
"# Remove most unreferenced elements inside defs",
"defs",
"=",
"doc",
".",
"documentElement",
".",
"getElementsByTagName",
"(",
"'defs'",
")",
"for",
"aDef",
"in",
"defs",
":",
"elemsToRemove",
"=",
"removeUnusedDefs",
"(",
"doc",
",",
"aDef",
")",
"for",
"elem",
"in",
"elemsToRemove",
":",
"elem",
".",
"parentNode",
".",
"removeChild",
"(",
"elem",
")",
"_num_elements_removed",
"+=",
"1",
"num",
"+=",
"1",
"return",
"num"
] | Removes all unreferenced elements except for <svg>, <font>, <metadata>, <title>, and <desc>.
Also vacuums the defs of any non-referenced renderable elements.
Returns the number of unreferenced elements removed from the document. | [
"Removes",
"all",
"unreferenced",
"elements",
"except",
"for",
"<svg",
">",
"<font",
">",
"<metadata",
">",
"<title",
">",
"and",
"<desc",
">",
".",
"Also",
"vacuums",
"the",
"defs",
"of",
"any",
"non",
"-",
"referenced",
"renderable",
"elements",
"."
] | train | https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L656-L690 |
scour-project/scour | scour/scour.py | shortenIDs | def shortenIDs(doc, prefix, unprotectedElements=None):
"""
Shortens ID names used in the document. ID names referenced the most often are assigned the
shortest ID names.
If the list unprotectedElements is provided, only IDs from this list will be shortened.
Returns the number of bytes saved by shortening ID names in the document.
"""
num = 0
identifiedElements = findElementsWithId(doc.documentElement)
if unprotectedElements is None:
unprotectedElements = identifiedElements
referencedIDs = findReferencedElements(doc.documentElement)
# Make idList (list of idnames) sorted by reference count
# descending, so the highest reference count is first.
# First check that there's actually a defining element for the current ID name.
# (Cyn: I've seen documents with #id references but no element with that ID!)
idList = [(len(referencedIDs[rid]), rid) for rid in referencedIDs
if rid in unprotectedElements]
idList.sort(reverse=True)
idList = [rid for count, rid in idList]
# Add unreferenced IDs to end of idList in arbitrary order
idList.extend([rid for rid in unprotectedElements if rid not in idList])
curIdNum = 1
for rid in idList:
curId = intToID(curIdNum, prefix)
# First make sure that *this* element isn't already using
# the ID name we want to give it.
if curId != rid:
# Then, skip ahead if the new ID is already in identifiedElement.
while curId in identifiedElements:
curIdNum += 1
curId = intToID(curIdNum, prefix)
# Then go rename it.
num += renameID(doc, rid, curId, identifiedElements, referencedIDs)
curIdNum += 1
return num | python | def shortenIDs(doc, prefix, unprotectedElements=None):
"""
Shortens ID names used in the document. ID names referenced the most often are assigned the
shortest ID names.
If the list unprotectedElements is provided, only IDs from this list will be shortened.
Returns the number of bytes saved by shortening ID names in the document.
"""
num = 0
identifiedElements = findElementsWithId(doc.documentElement)
if unprotectedElements is None:
unprotectedElements = identifiedElements
referencedIDs = findReferencedElements(doc.documentElement)
# Make idList (list of idnames) sorted by reference count
# descending, so the highest reference count is first.
# First check that there's actually a defining element for the current ID name.
# (Cyn: I've seen documents with #id references but no element with that ID!)
idList = [(len(referencedIDs[rid]), rid) for rid in referencedIDs
if rid in unprotectedElements]
idList.sort(reverse=True)
idList = [rid for count, rid in idList]
# Add unreferenced IDs to end of idList in arbitrary order
idList.extend([rid for rid in unprotectedElements if rid not in idList])
curIdNum = 1
for rid in idList:
curId = intToID(curIdNum, prefix)
# First make sure that *this* element isn't already using
# the ID name we want to give it.
if curId != rid:
# Then, skip ahead if the new ID is already in identifiedElement.
while curId in identifiedElements:
curIdNum += 1
curId = intToID(curIdNum, prefix)
# Then go rename it.
num += renameID(doc, rid, curId, identifiedElements, referencedIDs)
curIdNum += 1
return num | [
"def",
"shortenIDs",
"(",
"doc",
",",
"prefix",
",",
"unprotectedElements",
"=",
"None",
")",
":",
"num",
"=",
"0",
"identifiedElements",
"=",
"findElementsWithId",
"(",
"doc",
".",
"documentElement",
")",
"if",
"unprotectedElements",
"is",
"None",
":",
"unprotectedElements",
"=",
"identifiedElements",
"referencedIDs",
"=",
"findReferencedElements",
"(",
"doc",
".",
"documentElement",
")",
"# Make idList (list of idnames) sorted by reference count",
"# descending, so the highest reference count is first.",
"# First check that there's actually a defining element for the current ID name.",
"# (Cyn: I've seen documents with #id references but no element with that ID!)",
"idList",
"=",
"[",
"(",
"len",
"(",
"referencedIDs",
"[",
"rid",
"]",
")",
",",
"rid",
")",
"for",
"rid",
"in",
"referencedIDs",
"if",
"rid",
"in",
"unprotectedElements",
"]",
"idList",
".",
"sort",
"(",
"reverse",
"=",
"True",
")",
"idList",
"=",
"[",
"rid",
"for",
"count",
",",
"rid",
"in",
"idList",
"]",
"# Add unreferenced IDs to end of idList in arbitrary order",
"idList",
".",
"extend",
"(",
"[",
"rid",
"for",
"rid",
"in",
"unprotectedElements",
"if",
"rid",
"not",
"in",
"idList",
"]",
")",
"curIdNum",
"=",
"1",
"for",
"rid",
"in",
"idList",
":",
"curId",
"=",
"intToID",
"(",
"curIdNum",
",",
"prefix",
")",
"# First make sure that *this* element isn't already using",
"# the ID name we want to give it.",
"if",
"curId",
"!=",
"rid",
":",
"# Then, skip ahead if the new ID is already in identifiedElement.",
"while",
"curId",
"in",
"identifiedElements",
":",
"curIdNum",
"+=",
"1",
"curId",
"=",
"intToID",
"(",
"curIdNum",
",",
"prefix",
")",
"# Then go rename it.",
"num",
"+=",
"renameID",
"(",
"doc",
",",
"rid",
",",
"curId",
",",
"identifiedElements",
",",
"referencedIDs",
")",
"curIdNum",
"+=",
"1",
"return",
"num"
] | Shortens ID names used in the document. ID names referenced the most often are assigned the
shortest ID names.
If the list unprotectedElements is provided, only IDs from this list will be shortened.
Returns the number of bytes saved by shortening ID names in the document. | [
"Shortens",
"ID",
"names",
"used",
"in",
"the",
"document",
".",
"ID",
"names",
"referenced",
"the",
"most",
"often",
"are",
"assigned",
"the",
"shortest",
"ID",
"names",
".",
"If",
"the",
"list",
"unprotectedElements",
"is",
"provided",
"only",
"IDs",
"from",
"this",
"list",
"will",
"be",
"shortened",
"."
] | train | https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L693-L735 |
scour-project/scour | scour/scour.py | intToID | def intToID(idnum, prefix):
"""
Returns the ID name for the given ID number, spreadsheet-style, i.e. from a to z,
then from aa to az, ba to bz, etc., until zz.
"""
rid = ''
while idnum > 0:
idnum -= 1
rid = chr((idnum % 26) + ord('a')) + rid
idnum = int(idnum / 26)
return prefix + rid | python | def intToID(idnum, prefix):
"""
Returns the ID name for the given ID number, spreadsheet-style, i.e. from a to z,
then from aa to az, ba to bz, etc., until zz.
"""
rid = ''
while idnum > 0:
idnum -= 1
rid = chr((idnum % 26) + ord('a')) + rid
idnum = int(idnum / 26)
return prefix + rid | [
"def",
"intToID",
"(",
"idnum",
",",
"prefix",
")",
":",
"rid",
"=",
"''",
"while",
"idnum",
">",
"0",
":",
"idnum",
"-=",
"1",
"rid",
"=",
"chr",
"(",
"(",
"idnum",
"%",
"26",
")",
"+",
"ord",
"(",
"'a'",
")",
")",
"+",
"rid",
"idnum",
"=",
"int",
"(",
"idnum",
"/",
"26",
")",
"return",
"prefix",
"+",
"rid"
] | Returns the ID name for the given ID number, spreadsheet-style, i.e. from a to z,
then from aa to az, ba to bz, etc., until zz. | [
"Returns",
"the",
"ID",
"name",
"for",
"the",
"given",
"ID",
"number",
"spreadsheet",
"-",
"style",
"i",
".",
"e",
".",
"from",
"a",
"to",
"z",
"then",
"from",
"aa",
"to",
"az",
"ba",
"to",
"bz",
"etc",
".",
"until",
"zz",
"."
] | train | https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L738-L750 |
scour-project/scour | scour/scour.py | renameID | def renameID(doc, idFrom, idTo, identifiedElements, referencedIDs):
"""
Changes the ID name from idFrom to idTo, on the declaring element
as well as all references in the document doc.
Updates identifiedElements and referencedIDs.
Does not handle the case where idTo is already the ID name
of another element in doc.
Returns the number of bytes saved by this replacement.
"""
num = 0
definingNode = identifiedElements[idFrom]
definingNode.setAttribute("id", idTo)
del identifiedElements[idFrom]
identifiedElements[idTo] = definingNode
num += len(idFrom) - len(idTo)
# Update references to renamed node
referringNodes = referencedIDs.get(idFrom)
if referringNodes is not None:
# Look for the idFrom ID name in each of the referencing elements,
# exactly like findReferencedElements would.
# Cyn: Duplicated processing!
for node in referringNodes:
# if this node is a style element, parse its text into CSS
if node.nodeName == 'style' and node.namespaceURI == NS['SVG']:
# node.firstChild will be either a CDATA or a Text node now
if node.firstChild is not None:
# concatenate the value of all children, in case
# there's a CDATASection node surrounded by whitespace
# nodes
# (node.normalize() will NOT work here, it only acts on Text nodes)
oldValue = "".join([child.nodeValue for child in node.childNodes])
# not going to reparse the whole thing
newValue = oldValue.replace('url(#' + idFrom + ')', 'url(#' + idTo + ')')
newValue = newValue.replace("url(#'" + idFrom + "')", 'url(#' + idTo + ')')
newValue = newValue.replace('url(#"' + idFrom + '")', 'url(#' + idTo + ')')
# and now replace all the children with this new stylesheet.
# again, this is in case the stylesheet was a CDATASection
node.childNodes[:] = [node.ownerDocument.createTextNode(newValue)]
num += len(oldValue) - len(newValue)
# if xlink:href is set to #idFrom, then change the id
href = node.getAttributeNS(NS['XLINK'], 'href')
if href == '#' + idFrom:
node.setAttributeNS(NS['XLINK'], 'href', '#' + idTo)
num += len(idFrom) - len(idTo)
# if the style has url(#idFrom), then change the id
styles = node.getAttribute('style')
if styles != '':
newValue = styles.replace('url(#' + idFrom + ')', 'url(#' + idTo + ')')
newValue = newValue.replace("url('#" + idFrom + "')", 'url(#' + idTo + ')')
newValue = newValue.replace('url("#' + idFrom + '")', 'url(#' + idTo + ')')
node.setAttribute('style', newValue)
num += len(styles) - len(newValue)
# now try the fill, stroke, filter attributes
for attr in referencingProps:
oldValue = node.getAttribute(attr)
if oldValue != '':
newValue = oldValue.replace('url(#' + idFrom + ')', 'url(#' + idTo + ')')
newValue = newValue.replace("url('#" + idFrom + "')", 'url(#' + idTo + ')')
newValue = newValue.replace('url("#' + idFrom + '")', 'url(#' + idTo + ')')
node.setAttribute(attr, newValue)
num += len(oldValue) - len(newValue)
del referencedIDs[idFrom]
referencedIDs[idTo] = referringNodes
return num | python | def renameID(doc, idFrom, idTo, identifiedElements, referencedIDs):
"""
Changes the ID name from idFrom to idTo, on the declaring element
as well as all references in the document doc.
Updates identifiedElements and referencedIDs.
Does not handle the case where idTo is already the ID name
of another element in doc.
Returns the number of bytes saved by this replacement.
"""
num = 0
definingNode = identifiedElements[idFrom]
definingNode.setAttribute("id", idTo)
del identifiedElements[idFrom]
identifiedElements[idTo] = definingNode
num += len(idFrom) - len(idTo)
# Update references to renamed node
referringNodes = referencedIDs.get(idFrom)
if referringNodes is not None:
# Look for the idFrom ID name in each of the referencing elements,
# exactly like findReferencedElements would.
# Cyn: Duplicated processing!
for node in referringNodes:
# if this node is a style element, parse its text into CSS
if node.nodeName == 'style' and node.namespaceURI == NS['SVG']:
# node.firstChild will be either a CDATA or a Text node now
if node.firstChild is not None:
# concatenate the value of all children, in case
# there's a CDATASection node surrounded by whitespace
# nodes
# (node.normalize() will NOT work here, it only acts on Text nodes)
oldValue = "".join([child.nodeValue for child in node.childNodes])
# not going to reparse the whole thing
newValue = oldValue.replace('url(#' + idFrom + ')', 'url(#' + idTo + ')')
newValue = newValue.replace("url(#'" + idFrom + "')", 'url(#' + idTo + ')')
newValue = newValue.replace('url(#"' + idFrom + '")', 'url(#' + idTo + ')')
# and now replace all the children with this new stylesheet.
# again, this is in case the stylesheet was a CDATASection
node.childNodes[:] = [node.ownerDocument.createTextNode(newValue)]
num += len(oldValue) - len(newValue)
# if xlink:href is set to #idFrom, then change the id
href = node.getAttributeNS(NS['XLINK'], 'href')
if href == '#' + idFrom:
node.setAttributeNS(NS['XLINK'], 'href', '#' + idTo)
num += len(idFrom) - len(idTo)
# if the style has url(#idFrom), then change the id
styles = node.getAttribute('style')
if styles != '':
newValue = styles.replace('url(#' + idFrom + ')', 'url(#' + idTo + ')')
newValue = newValue.replace("url('#" + idFrom + "')", 'url(#' + idTo + ')')
newValue = newValue.replace('url("#' + idFrom + '")', 'url(#' + idTo + ')')
node.setAttribute('style', newValue)
num += len(styles) - len(newValue)
# now try the fill, stroke, filter attributes
for attr in referencingProps:
oldValue = node.getAttribute(attr)
if oldValue != '':
newValue = oldValue.replace('url(#' + idFrom + ')', 'url(#' + idTo + ')')
newValue = newValue.replace("url('#" + idFrom + "')", 'url(#' + idTo + ')')
newValue = newValue.replace('url("#' + idFrom + '")', 'url(#' + idTo + ')')
node.setAttribute(attr, newValue)
num += len(oldValue) - len(newValue)
del referencedIDs[idFrom]
referencedIDs[idTo] = referringNodes
return num | [
"def",
"renameID",
"(",
"doc",
",",
"idFrom",
",",
"idTo",
",",
"identifiedElements",
",",
"referencedIDs",
")",
":",
"num",
"=",
"0",
"definingNode",
"=",
"identifiedElements",
"[",
"idFrom",
"]",
"definingNode",
".",
"setAttribute",
"(",
"\"id\"",
",",
"idTo",
")",
"del",
"identifiedElements",
"[",
"idFrom",
"]",
"identifiedElements",
"[",
"idTo",
"]",
"=",
"definingNode",
"num",
"+=",
"len",
"(",
"idFrom",
")",
"-",
"len",
"(",
"idTo",
")",
"# Update references to renamed node",
"referringNodes",
"=",
"referencedIDs",
".",
"get",
"(",
"idFrom",
")",
"if",
"referringNodes",
"is",
"not",
"None",
":",
"# Look for the idFrom ID name in each of the referencing elements,",
"# exactly like findReferencedElements would.",
"# Cyn: Duplicated processing!",
"for",
"node",
"in",
"referringNodes",
":",
"# if this node is a style element, parse its text into CSS",
"if",
"node",
".",
"nodeName",
"==",
"'style'",
"and",
"node",
".",
"namespaceURI",
"==",
"NS",
"[",
"'SVG'",
"]",
":",
"# node.firstChild will be either a CDATA or a Text node now",
"if",
"node",
".",
"firstChild",
"is",
"not",
"None",
":",
"# concatenate the value of all children, in case",
"# there's a CDATASection node surrounded by whitespace",
"# nodes",
"# (node.normalize() will NOT work here, it only acts on Text nodes)",
"oldValue",
"=",
"\"\"",
".",
"join",
"(",
"[",
"child",
".",
"nodeValue",
"for",
"child",
"in",
"node",
".",
"childNodes",
"]",
")",
"# not going to reparse the whole thing",
"newValue",
"=",
"oldValue",
".",
"replace",
"(",
"'url(#'",
"+",
"idFrom",
"+",
"')'",
",",
"'url(#'",
"+",
"idTo",
"+",
"')'",
")",
"newValue",
"=",
"newValue",
".",
"replace",
"(",
"\"url(#'\"",
"+",
"idFrom",
"+",
"\"')\"",
",",
"'url(#'",
"+",
"idTo",
"+",
"')'",
")",
"newValue",
"=",
"newValue",
".",
"replace",
"(",
"'url(#\"'",
"+",
"idFrom",
"+",
"'\")'",
",",
"'url(#'",
"+",
"idTo",
"+",
"')'",
")",
"# and now replace all the children with this new stylesheet.",
"# again, this is in case the stylesheet was a CDATASection",
"node",
".",
"childNodes",
"[",
":",
"]",
"=",
"[",
"node",
".",
"ownerDocument",
".",
"createTextNode",
"(",
"newValue",
")",
"]",
"num",
"+=",
"len",
"(",
"oldValue",
")",
"-",
"len",
"(",
"newValue",
")",
"# if xlink:href is set to #idFrom, then change the id",
"href",
"=",
"node",
".",
"getAttributeNS",
"(",
"NS",
"[",
"'XLINK'",
"]",
",",
"'href'",
")",
"if",
"href",
"==",
"'#'",
"+",
"idFrom",
":",
"node",
".",
"setAttributeNS",
"(",
"NS",
"[",
"'XLINK'",
"]",
",",
"'href'",
",",
"'#'",
"+",
"idTo",
")",
"num",
"+=",
"len",
"(",
"idFrom",
")",
"-",
"len",
"(",
"idTo",
")",
"# if the style has url(#idFrom), then change the id",
"styles",
"=",
"node",
".",
"getAttribute",
"(",
"'style'",
")",
"if",
"styles",
"!=",
"''",
":",
"newValue",
"=",
"styles",
".",
"replace",
"(",
"'url(#'",
"+",
"idFrom",
"+",
"')'",
",",
"'url(#'",
"+",
"idTo",
"+",
"')'",
")",
"newValue",
"=",
"newValue",
".",
"replace",
"(",
"\"url('#\"",
"+",
"idFrom",
"+",
"\"')\"",
",",
"'url(#'",
"+",
"idTo",
"+",
"')'",
")",
"newValue",
"=",
"newValue",
".",
"replace",
"(",
"'url(\"#'",
"+",
"idFrom",
"+",
"'\")'",
",",
"'url(#'",
"+",
"idTo",
"+",
"')'",
")",
"node",
".",
"setAttribute",
"(",
"'style'",
",",
"newValue",
")",
"num",
"+=",
"len",
"(",
"styles",
")",
"-",
"len",
"(",
"newValue",
")",
"# now try the fill, stroke, filter attributes",
"for",
"attr",
"in",
"referencingProps",
":",
"oldValue",
"=",
"node",
".",
"getAttribute",
"(",
"attr",
")",
"if",
"oldValue",
"!=",
"''",
":",
"newValue",
"=",
"oldValue",
".",
"replace",
"(",
"'url(#'",
"+",
"idFrom",
"+",
"')'",
",",
"'url(#'",
"+",
"idTo",
"+",
"')'",
")",
"newValue",
"=",
"newValue",
".",
"replace",
"(",
"\"url('#\"",
"+",
"idFrom",
"+",
"\"')\"",
",",
"'url(#'",
"+",
"idTo",
"+",
"')'",
")",
"newValue",
"=",
"newValue",
".",
"replace",
"(",
"'url(\"#'",
"+",
"idFrom",
"+",
"'\")'",
",",
"'url(#'",
"+",
"idTo",
"+",
"')'",
")",
"node",
".",
"setAttribute",
"(",
"attr",
",",
"newValue",
")",
"num",
"+=",
"len",
"(",
"oldValue",
")",
"-",
"len",
"(",
"newValue",
")",
"del",
"referencedIDs",
"[",
"idFrom",
"]",
"referencedIDs",
"[",
"idTo",
"]",
"=",
"referringNodes",
"return",
"num"
] | Changes the ID name from idFrom to idTo, on the declaring element
as well as all references in the document doc.
Updates identifiedElements and referencedIDs.
Does not handle the case where idTo is already the ID name
of another element in doc.
Returns the number of bytes saved by this replacement. | [
"Changes",
"the",
"ID",
"name",
"from",
"idFrom",
"to",
"idTo",
"on",
"the",
"declaring",
"element",
"as",
"well",
"as",
"all",
"references",
"in",
"the",
"document",
"doc",
"."
] | train | https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L753-L828 |
scour-project/scour | scour/scour.py | unprotected_ids | def unprotected_ids(doc, options):
u"""Returns a list of unprotected IDs within the document doc."""
identifiedElements = findElementsWithId(doc.documentElement)
if not (options.protect_ids_noninkscape or
options.protect_ids_list or
options.protect_ids_prefix):
return identifiedElements
if options.protect_ids_list:
protect_ids_list = options.protect_ids_list.split(",")
if options.protect_ids_prefix:
protect_ids_prefixes = options.protect_ids_prefix.split(",")
for id in list(identifiedElements):
protected = False
if options.protect_ids_noninkscape and not id[-1].isdigit():
protected = True
if options.protect_ids_list and id in protect_ids_list:
protected = True
if options.protect_ids_prefix:
for prefix in protect_ids_prefixes:
if id.startswith(prefix):
protected = True
if protected:
del identifiedElements[id]
return identifiedElements | python | def unprotected_ids(doc, options):
u"""Returns a list of unprotected IDs within the document doc."""
identifiedElements = findElementsWithId(doc.documentElement)
if not (options.protect_ids_noninkscape or
options.protect_ids_list or
options.protect_ids_prefix):
return identifiedElements
if options.protect_ids_list:
protect_ids_list = options.protect_ids_list.split(",")
if options.protect_ids_prefix:
protect_ids_prefixes = options.protect_ids_prefix.split(",")
for id in list(identifiedElements):
protected = False
if options.protect_ids_noninkscape and not id[-1].isdigit():
protected = True
if options.protect_ids_list and id in protect_ids_list:
protected = True
if options.protect_ids_prefix:
for prefix in protect_ids_prefixes:
if id.startswith(prefix):
protected = True
if protected:
del identifiedElements[id]
return identifiedElements | [
"def",
"unprotected_ids",
"(",
"doc",
",",
"options",
")",
":",
"identifiedElements",
"=",
"findElementsWithId",
"(",
"doc",
".",
"documentElement",
")",
"if",
"not",
"(",
"options",
".",
"protect_ids_noninkscape",
"or",
"options",
".",
"protect_ids_list",
"or",
"options",
".",
"protect_ids_prefix",
")",
":",
"return",
"identifiedElements",
"if",
"options",
".",
"protect_ids_list",
":",
"protect_ids_list",
"=",
"options",
".",
"protect_ids_list",
".",
"split",
"(",
"\",\"",
")",
"if",
"options",
".",
"protect_ids_prefix",
":",
"protect_ids_prefixes",
"=",
"options",
".",
"protect_ids_prefix",
".",
"split",
"(",
"\",\"",
")",
"for",
"id",
"in",
"list",
"(",
"identifiedElements",
")",
":",
"protected",
"=",
"False",
"if",
"options",
".",
"protect_ids_noninkscape",
"and",
"not",
"id",
"[",
"-",
"1",
"]",
".",
"isdigit",
"(",
")",
":",
"protected",
"=",
"True",
"if",
"options",
".",
"protect_ids_list",
"and",
"id",
"in",
"protect_ids_list",
":",
"protected",
"=",
"True",
"if",
"options",
".",
"protect_ids_prefix",
":",
"for",
"prefix",
"in",
"protect_ids_prefixes",
":",
"if",
"id",
".",
"startswith",
"(",
"prefix",
")",
":",
"protected",
"=",
"True",
"if",
"protected",
":",
"del",
"identifiedElements",
"[",
"id",
"]",
"return",
"identifiedElements"
] | u"""Returns a list of unprotected IDs within the document doc. | [
"u",
"Returns",
"a",
"list",
"of",
"unprotected",
"IDs",
"within",
"the",
"document",
"doc",
"."
] | train | https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L831-L854 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.