hexsha
stringlengths
40
40
repo
stringlengths
5
121
path
stringlengths
4
227
license
sequence
language
stringclasses
1 value
identifier
stringlengths
1
107
return_type
stringlengths
2
237
original_string
stringlengths
75
13.4k
original_docstring
stringlengths
13
12.9k
docstring
stringlengths
13
2.57k
docstring_tokens
sequence
code
stringlengths
23
1.88k
code_tokens
sequence
short_docstring
stringlengths
1
1.32k
short_docstring_tokens
sequence
comment
sequence
parameters
list
docstring_params
dict
code_with_imports
stringlengths
23
1.88k
idxs
int64
0
611k
cluster
int64
0
1.02k
7540540e177a568c94169af73b1747f4da9025d7
akersten/cute-kitchen
src/router/annotations.py
[ "MIT" ]
Python
route_login_required
<not_specific>
def route_login_required(): """ Decorates a function to indicate the requirement for a logged-in session. :return: The decorated function. """ def decorator(fn): fn.route_login_required = True return fn return decorator
Decorates a function to indicate the requirement for a logged-in session. :return: The decorated function.
Decorates a function to indicate the requirement for a logged-in session.
[ "Decorates", "a", "function", "to", "indicate", "the", "requirement", "for", "a", "logged", "-", "in", "session", "." ]
def route_login_required(): def decorator(fn): fn.route_login_required = True return fn return decorator
[ "def", "route_login_required", "(", ")", ":", "def", "decorator", "(", "fn", ")", ":", "fn", ".", "route_login_required", "=", "True", "return", "fn", "return", "decorator" ]
Decorates a function to indicate the requirement for a logged-in session.
[ "Decorates", "a", "function", "to", "indicate", "the", "requirement", "for", "a", "logged", "-", "in", "session", "." ]
[ "\"\"\"\n Decorates a function to indicate the requirement for a logged-in session.\n :return: The decorated function.\n \"\"\"" ]
[]
{ "returns": [ { "docstring": "The decorated function.", "docstring_tokens": [ "The", "decorated", "function", "." ], "type": null } ], "raises": [], "params": [], "outlier_params": [], "others": [] }
def route_login_required(): def decorator(fn): fn.route_login_required = True return fn return decorator
0
56
c5b891ac420cbeffb65019d562bc382e7bf6367b
MacFarlaneBro/compose-mode
compose_mode/generate.py
[ "MIT" ]
Python
fix_restart
<not_specific>
def fix_restart(restart_config): """ Fix output of docker-compose. docker-compose's "show config" mechanism--the internals of which we use to merge configs--doesn't actually return valid configurations for the "restart" property as they convert it to an internal representation which they then forget to convert back to the yaml format. We do that by hand here. """ try: mrc = restart_config['MaximumRetryCount'] except TypeError: name = restart_config else: name = restart_config['Name'] if name in ['always', 'unless-stopped', 'no']: return name else: return '{}:{}'.format(name, mrc)
Fix output of docker-compose. docker-compose's "show config" mechanism--the internals of which we use to merge configs--doesn't actually return valid configurations for the "restart" property as they convert it to an internal representation which they then forget to convert back to the yaml format. We do that by hand here.
Fix output of docker-compose.
[ "Fix", "output", "of", "docker", "-", "compose", "." ]
def fix_restart(restart_config): try: mrc = restart_config['MaximumRetryCount'] except TypeError: name = restart_config else: name = restart_config['Name'] if name in ['always', 'unless-stopped', 'no']: return name else: return '{}:{}'.format(name, mrc)
[ "def", "fix_restart", "(", "restart_config", ")", ":", "try", ":", "mrc", "=", "restart_config", "[", "'MaximumRetryCount'", "]", "except", "TypeError", ":", "name", "=", "restart_config", "else", ":", "name", "=", "restart_config", "[", "'Name'", "]", "if", "name", "in", "[", "'always'", ",", "'unless-stopped'", ",", "'no'", "]", ":", "return", "name", "else", ":", "return", "'{}:{}'", ".", "format", "(", "name", ",", "mrc", ")" ]
Fix output of docker-compose.
[ "Fix", "output", "of", "docker", "-", "compose", "." ]
[ "\"\"\" Fix output of docker-compose.\n\n docker-compose's \"show config\" mechanism--the internals of which we use to\n merge configs--doesn't actually return valid configurations for the\n \"restart\" property as they convert it to an internal representation which\n they then forget to convert back to the yaml format. We do that by hand\n here.\n \"\"\"" ]
[ { "param": "restart_config", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "restart_config", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def fix_restart(restart_config): try: mrc = restart_config['MaximumRetryCount'] except TypeError: name = restart_config else: name = restart_config['Name'] if name in ['always', 'unless-stopped', 'no']: return name else: return '{}:{}'.format(name, mrc)
1
921
65ef5aa7f6bb80020bfccdca5945fcdda9118ad3
netceteragroup/da4e
assembler.py
[ "MIT" ]
Python
_read_file_and_join_lines
<not_specific>
def _read_file_and_join_lines(file_name): ''' Reads all contents of a file and appends each line into one ',' separated string. Leading and trailing whitespace is removed. Lines having a '#' character as first non- whitespace character are considered as comment and are therefore ignored in the output. @param fileName: the file to read ''' def _normalizeString(line): return line.replace('\n', '').strip() def _isComment(line): return not line.startswith("#") def _isEmptyLine(line): return line != "" with open(file_name, 'r') as file: return ','.join(filter(_isEmptyLine, filter(_isComment, map(_normalizeString, file.readlines()))))
Reads all contents of a file and appends each line into one ',' separated string. Leading and trailing whitespace is removed. Lines having a '#' character as first non- whitespace character are considered as comment and are therefore ignored in the output. @param fileName: the file to read
Reads all contents of a file and appends each line into one ',' separated string. Leading and trailing whitespace is removed. Lines having a '#' character as first non whitespace character are considered as comment and are therefore ignored in the output.
[ "Reads", "all", "contents", "of", "a", "file", "and", "appends", "each", "line", "into", "one", "'", "'", "separated", "string", ".", "Leading", "and", "trailing", "whitespace", "is", "removed", ".", "Lines", "having", "a", "'", "#", "'", "character", "as", "first", "non", "whitespace", "character", "are", "considered", "as", "comment", "and", "are", "therefore", "ignored", "in", "the", "output", "." ]
def _read_file_and_join_lines(file_name): def _normalizeString(line): return line.replace('\n', '').strip() def _isComment(line): return not line.startswith("#") def _isEmptyLine(line): return line != "" with open(file_name, 'r') as file: return ','.join(filter(_isEmptyLine, filter(_isComment, map(_normalizeString, file.readlines()))))
[ "def", "_read_file_and_join_lines", "(", "file_name", ")", ":", "def", "_normalizeString", "(", "line", ")", ":", "return", "line", ".", "replace", "(", "'\\n'", ",", "''", ")", ".", "strip", "(", ")", "def", "_isComment", "(", "line", ")", ":", "return", "not", "line", ".", "startswith", "(", "\"#\"", ")", "def", "_isEmptyLine", "(", "line", ")", ":", "return", "line", "!=", "\"\"", "with", "open", "(", "file_name", ",", "'r'", ")", "as", "file", ":", "return", "','", ".", "join", "(", "filter", "(", "_isEmptyLine", ",", "filter", "(", "_isComment", ",", "map", "(", "_normalizeString", ",", "file", ".", "readlines", "(", ")", ")", ")", ")", ")" ]
Reads all contents of a file and appends each line into one ',' separated string.
[ "Reads", "all", "contents", "of", "a", "file", "and", "appends", "each", "line", "into", "one", "'", "'", "separated", "string", "." ]
[ "'''\n Reads all contents of a file and appends each line into one ',' separated string. \n Leading and trailing whitespace is removed. Lines having a '#' character as first non-\n whitespace character are considered as comment and are therefore ignored in the output. \n @param fileName: the file to read\n '''" ]
[ { "param": "file_name", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "file_name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [ { "identifier": "fileName", "type": null, "docstring": "the file to read", "docstring_tokens": [ "the", "file", "to", "read" ], "default": null, "is_optional": false } ], "others": [] }
def _read_file_and_join_lines(file_name): def _normalizeString(line): return line.replace('\n', '').strip() def _isComment(line): return not line.startswith("#") def _isEmptyLine(line): return line != "" with open(file_name, 'r') as file: return ','.join(filter(_isEmptyLine, filter(_isComment, map(_normalizeString, file.readlines()))))
2
312
4ba9d06164e542a8b6d024e64d49a8773a1ff650
WolfgangFahl/pyOnlineSpreadSheetEditing
onlinespreadsheet/tablequery.py
[ "Apache-2.0" ]
Python
match
<not_specific>
def match(pattern:str,string:str): ''' re match search for the given pattern with ignore case ''' return re.search(pattern=pattern, string=string, flags=re.IGNORECASE)
re match search for the given pattern with ignore case
re match search for the given pattern with ignore case
[ "re", "match", "search", "for", "the", "given", "pattern", "with", "ignore", "case" ]
def match(pattern:str,string:str): return re.search(pattern=pattern, string=string, flags=re.IGNORECASE)
[ "def", "match", "(", "pattern", ":", "str", ",", "string", ":", "str", ")", ":", "return", "re", ".", "search", "(", "pattern", "=", "pattern", ",", "string", "=", "string", ",", "flags", "=", "re", ".", "IGNORECASE", ")" ]
re match search for the given pattern with ignore case
[ "re", "match", "search", "for", "the", "given", "pattern", "with", "ignore", "case" ]
[ "'''\n re match search for the given pattern with ignore case\n '''" ]
[ { "param": "pattern", "type": "str" }, { "param": "string", "type": "str" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "pattern", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "string", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def match(pattern:str,string:str): return re.search(pattern=pattern, string=string, flags=re.IGNORECASE)
3
994
ed05c8d76b4e4beff6fca1cee61e2aadf3732abe
tomerten/PyElegantSDDS
pyelegantsdds/elegantrun.py
[ "MIT" ]
Python
write_parallel_run_script
null
def write_parallel_run_script(sif): """ Method to generate parallel elegant run script. Parameters: ----------- sif: str path to singularity container """ bashstrlist = [ "#!/bin/bash", "pele={}".format(sif), 'cmd="bash temp_run_pelegant.sh"', "", "$pele $cmd $1", ] bashstr = "\n".join(bashstrlist) # write to file with open("run_pelegant.sh", "w") as f: f.write(bashstr)
Method to generate parallel elegant run script. Parameters: ----------- sif: str path to singularity container
Method to generate parallel elegant run script. str path to singularity container
[ "Method", "to", "generate", "parallel", "elegant", "run", "script", ".", "str", "path", "to", "singularity", "container" ]
def write_parallel_run_script(sif): bashstrlist = [ "#!/bin/bash", "pele={}".format(sif), 'cmd="bash temp_run_pelegant.sh"', "", "$pele $cmd $1", ] bashstr = "\n".join(bashstrlist) with open("run_pelegant.sh", "w") as f: f.write(bashstr)
[ "def", "write_parallel_run_script", "(", "sif", ")", ":", "bashstrlist", "=", "[", "\"#!/bin/bash\"", ",", "\"pele={}\"", ".", "format", "(", "sif", ")", ",", "'cmd=\"bash temp_run_pelegant.sh\"'", ",", "\"\"", ",", "\"$pele $cmd $1\"", ",", "]", "bashstr", "=", "\"\\n\"", ".", "join", "(", "bashstrlist", ")", "with", "open", "(", "\"run_pelegant.sh\"", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "bashstr", ")" ]
Method to generate parallel elegant run script.
[ "Method", "to", "generate", "parallel", "elegant", "run", "script", "." ]
[ "\"\"\"\n Method to generate parallel elegant run\n script.\n\n Parameters:\n -----------\n sif: str\n path to singularity container\n\n \"\"\"", "# write to file" ]
[ { "param": "sif", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "sif", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def write_parallel_run_script(sif): bashstrlist = [ "#!/bin/bash", "pele={}".format(sif), 'cmd="bash temp_run_pelegant.sh"', "", "$pele $cmd $1", ] bashstr = "\n".join(bashstrlist) with open("run_pelegant.sh", "w") as f: f.write(bashstr)
5
673
bf09424b7fd16830410dec98a364876163fae903
erikagardini/InferringMusicAndVisualArtStyleEvolution
python/utilities.py
[ "MIT" ]
Python
find_min_max_indexes
<not_specific>
def find_min_max_indexes(dates): """ Get the index of the min value and the max value (centroids are not considered) Args: [ndarray int] dates: dates matrix Returns: [float] index_min: index of the min value [int] index_max: index of the max value """ min = dates[0] index_min = 0 max = dates[0] index_max = 0 for i in range(0, dates.shape[0]): if dates[i] < min and dates[i] != -1: min = dates[i] index_min = i if dates[i] > max and dates[i] != -1: max = dates[i] index_max = i return index_min, index_max
Get the index of the min value and the max value (centroids are not considered) Args: [ndarray int] dates: dates matrix Returns: [float] index_min: index of the min value [int] index_max: index of the max value
Get the index of the min value and the max value (centroids are not considered)
[ "Get", "the", "index", "of", "the", "min", "value", "and", "the", "max", "value", "(", "centroids", "are", "not", "considered", ")" ]
def find_min_max_indexes(dates): min = dates[0] index_min = 0 max = dates[0] index_max = 0 for i in range(0, dates.shape[0]): if dates[i] < min and dates[i] != -1: min = dates[i] index_min = i if dates[i] > max and dates[i] != -1: max = dates[i] index_max = i return index_min, index_max
[ "def", "find_min_max_indexes", "(", "dates", ")", ":", "min", "=", "dates", "[", "0", "]", "index_min", "=", "0", "max", "=", "dates", "[", "0", "]", "index_max", "=", "0", "for", "i", "in", "range", "(", "0", ",", "dates", ".", "shape", "[", "0", "]", ")", ":", "if", "dates", "[", "i", "]", "<", "min", "and", "dates", "[", "i", "]", "!=", "-", "1", ":", "min", "=", "dates", "[", "i", "]", "index_min", "=", "i", "if", "dates", "[", "i", "]", ">", "max", "and", "dates", "[", "i", "]", "!=", "-", "1", ":", "max", "=", "dates", "[", "i", "]", "index_max", "=", "i", "return", "index_min", ",", "index_max" ]
Get the index of the min value and the max value (centroids are not considered)
[ "Get", "the", "index", "of", "the", "min", "value", "and", "the", "max", "value", "(", "centroids", "are", "not", "considered", ")" ]
[ "\"\"\"\n Get the index of the min value and the max value (centroids are not considered)\n Args:\n [ndarray int] dates: dates matrix\n\n Returns:\n [float] index_min: index of the min value\n [int] index_max: index of the max value\n \"\"\"" ]
[ { "param": "dates", "type": null } ]
{ "returns": [ { "docstring": "[float] index_min: index of the min value\n[int] index_max: index of the max value", "docstring_tokens": [ "[", "float", "]", "index_min", ":", "index", "of", "the", "min", "value", "[", "int", "]", "index_max", ":", "index", "of", "the", "max", "value" ], "type": null } ], "raises": [], "params": [ { "identifier": "dates", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [ { "identifier": "[ndarray int] dates", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "others": [] }
def find_min_max_indexes(dates): min = dates[0] index_min = 0 max = dates[0] index_max = 0 for i in range(0, dates.shape[0]): if dates[i] < min and dates[i] != -1: min = dates[i] index_min = i if dates[i] > max and dates[i] != -1: max = dates[i] index_max = i return index_min, index_max
6
721
8eae126dbf8a8baeab1e2e976d363b1bcf02863a
muvr/muvr-ml
sensorcnn/dataset/utils.py
[ "BSD-3-Clause" ]
Python
csv_file_iterator
null
def csv_file_iterator(root_directory): """Returns a generator (iterator) of absolute file paths for CSV files in a given directory""" for root_path, _, files in os.walk(root_directory, followlinks=True): for f in files: if f.endswith("csv"): yield os.path.join(root_path, f)
Returns a generator (iterator) of absolute file paths for CSV files in a given directory
Returns a generator (iterator) of absolute file paths for CSV files in a given directory
[ "Returns", "a", "generator", "(", "iterator", ")", "of", "absolute", "file", "paths", "for", "CSV", "files", "in", "a", "given", "directory" ]
def csv_file_iterator(root_directory): for root_path, _, files in os.walk(root_directory, followlinks=True): for f in files: if f.endswith("csv"): yield os.path.join(root_path, f)
[ "def", "csv_file_iterator", "(", "root_directory", ")", ":", "for", "root_path", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "root_directory", ",", "followlinks", "=", "True", ")", ":", "for", "f", "in", "files", ":", "if", "f", ".", "endswith", "(", "\"csv\"", ")", ":", "yield", "os", ".", "path", ".", "join", "(", "root_path", ",", "f", ")" ]
Returns a generator (iterator) of absolute file paths for CSV files in a given directory
[ "Returns", "a", "generator", "(", "iterator", ")", "of", "absolute", "file", "paths", "for", "CSV", "files", "in", "a", "given", "directory" ]
[ "\"\"\"Returns a generator (iterator) of absolute file paths for CSV files in a given directory\"\"\"" ]
[ { "param": "root_directory", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "root_directory", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def csv_file_iterator(root_directory): for root_path, _, files in os.walk(root_directory, followlinks=True): for f in files: if f.endswith("csv"): yield os.path.join(root_path, f)
7
671
4c8f7cefbccbd3770a96057fbf07a756c0ce334d
intrlocutr/nightcoreify
nightcorei.py
[ "0BSD" ]
Python
create_tags
list
def create_tags(tags: list) -> list: """Prepares tags for a new upload. Keeps as many old tags as possible while adding a "nightcore" tag.""" to_add = 'nightcore' # The total number of characters in YouTube video tags can't exceed 400. # We're adding the "nightcore" tag, so we'll only keep this many characters of the original tags. target_len = 400 - len(to_add) new_tags = [] length = 0 # Keep tags up until they can no longer fit within our target. for tag in tags: length += len(tag) if length < target_len: new_tags.append(tag) else: break new_tags.append(to_add) return new_tags
Prepares tags for a new upload. Keeps as many old tags as possible while adding a "nightcore" tag.
Prepares tags for a new upload. Keeps as many old tags as possible while adding a "nightcore" tag.
[ "Prepares", "tags", "for", "a", "new", "upload", ".", "Keeps", "as", "many", "old", "tags", "as", "possible", "while", "adding", "a", "\"", "nightcore", "\"", "tag", "." ]
def create_tags(tags: list) -> list: to_add = 'nightcore' target_len = 400 - len(to_add) new_tags = [] length = 0 for tag in tags: length += len(tag) if length < target_len: new_tags.append(tag) else: break new_tags.append(to_add) return new_tags
[ "def", "create_tags", "(", "tags", ":", "list", ")", "->", "list", ":", "to_add", "=", "'nightcore'", "target_len", "=", "400", "-", "len", "(", "to_add", ")", "new_tags", "=", "[", "]", "length", "=", "0", "for", "tag", "in", "tags", ":", "length", "+=", "len", "(", "tag", ")", "if", "length", "<", "target_len", ":", "new_tags", ".", "append", "(", "tag", ")", "else", ":", "break", "new_tags", ".", "append", "(", "to_add", ")", "return", "new_tags" ]
Prepares tags for a new upload.
[ "Prepares", "tags", "for", "a", "new", "upload", "." ]
[ "\"\"\"Prepares tags for a new upload. Keeps as many old tags as possible while adding a \"nightcore\" tag.\"\"\"", "# The total number of characters in YouTube video tags can't exceed 400.", "# We're adding the \"nightcore\" tag, so we'll only keep this many characters of the original tags.", "# Keep tags up until they can no longer fit within our target." ]
[ { "param": "tags", "type": "list" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "tags", "type": "list", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def create_tags(tags: list) -> list: to_add = 'nightcore' target_len = 400 - len(to_add) new_tags = [] length = 0 for tag in tags: length += len(tag) if length < target_len: new_tags.append(tag) else: break new_tags.append(to_add) return new_tags
8
208
903fc63fede8ca3f8a8dc0b5f4ca496131e1a105
JustKowalski/sound_separation
datasets/fuss/reverberate_and_mix.py
[ "Apache-2.0" ]
Python
write_item_dict
null
def write_item_dict(item_dict, item_file, separate=False): """Write tab separated source/rir lists in files for train/validate/eval.""" if not separate: with open(item_file, 'w') as f: for subfolder in item_dict: for example in item_dict[subfolder]: line = '\t'.join([example] + item_dict[subfolder][example]) f.write(line + '\n') else: for subfolder in item_dict: item_base, item_ext = item_file.split('.') item_file_sub = item_base + '_' + subfolder + '.' + item_ext with open(item_file_sub, 'w') as f: for example in item_dict[subfolder]: line = '\t'.join([example] + item_dict[subfolder][example]) f.write(line + '\n')
Write tab separated source/rir lists in files for train/validate/eval.
Write tab separated source/rir lists in files for train/validate/eval.
[ "Write", "tab", "separated", "source", "/", "rir", "lists", "in", "files", "for", "train", "/", "validate", "/", "eval", "." ]
def write_item_dict(item_dict, item_file, separate=False): if not separate: with open(item_file, 'w') as f: for subfolder in item_dict: for example in item_dict[subfolder]: line = '\t'.join([example] + item_dict[subfolder][example]) f.write(line + '\n') else: for subfolder in item_dict: item_base, item_ext = item_file.split('.') item_file_sub = item_base + '_' + subfolder + '.' + item_ext with open(item_file_sub, 'w') as f: for example in item_dict[subfolder]: line = '\t'.join([example] + item_dict[subfolder][example]) f.write(line + '\n')
[ "def", "write_item_dict", "(", "item_dict", ",", "item_file", ",", "separate", "=", "False", ")", ":", "if", "not", "separate", ":", "with", "open", "(", "item_file", ",", "'w'", ")", "as", "f", ":", "for", "subfolder", "in", "item_dict", ":", "for", "example", "in", "item_dict", "[", "subfolder", "]", ":", "line", "=", "'\\t'", ".", "join", "(", "[", "example", "]", "+", "item_dict", "[", "subfolder", "]", "[", "example", "]", ")", "f", ".", "write", "(", "line", "+", "'\\n'", ")", "else", ":", "for", "subfolder", "in", "item_dict", ":", "item_base", ",", "item_ext", "=", "item_file", ".", "split", "(", "'.'", ")", "item_file_sub", "=", "item_base", "+", "'_'", "+", "subfolder", "+", "'.'", "+", "item_ext", "with", "open", "(", "item_file_sub", ",", "'w'", ")", "as", "f", ":", "for", "example", "in", "item_dict", "[", "subfolder", "]", ":", "line", "=", "'\\t'", ".", "join", "(", "[", "example", "]", "+", "item_dict", "[", "subfolder", "]", "[", "example", "]", ")", "f", ".", "write", "(", "line", "+", "'\\n'", ")" ]
Write tab separated source/rir lists in files for train/validate/eval.
[ "Write", "tab", "separated", "source", "/", "rir", "lists", "in", "files", "for", "train", "/", "validate", "/", "eval", "." ]
[ "\"\"\"Write tab separated source/rir lists in files for train/validate/eval.\"\"\"" ]
[ { "param": "item_dict", "type": null }, { "param": "item_file", "type": null }, { "param": "separate", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "item_dict", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "item_file", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "separate", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def write_item_dict(item_dict, item_file, separate=False): if not separate: with open(item_file, 'w') as f: for subfolder in item_dict: for example in item_dict[subfolder]: line = '\t'.join([example] + item_dict[subfolder][example]) f.write(line + '\n') else: for subfolder in item_dict: item_base, item_ext = item_file.split('.') item_file_sub = item_base + '_' + subfolder + '.' + item_ext with open(item_file_sub, 'w') as f: for example in item_dict[subfolder]: line = '\t'.join([example] + item_dict[subfolder][example]) f.write(line + '\n')
9
677
8f34f83cd4a60a8d8e72e31e4fde3b9e819b893f
BAMWelDX/weldx
weldx/tests/test_time.py
[ "BSD-3-Clause" ]
Python
_transform_array
<not_specific>
def _transform_array(data, is_array, is_scalar): """Transform an array into a scalar, single value array or return in unmodified.""" if not is_array: return data[0] if is_scalar: return [data[0]] return data
Transform an array into a scalar, single value array or return in unmodified.
Transform an array into a scalar, single value array or return in unmodified.
[ "Transform", "an", "array", "into", "a", "scalar", "single", "value", "array", "or", "return", "in", "unmodified", "." ]
def _transform_array(data, is_array, is_scalar): if not is_array: return data[0] if is_scalar: return [data[0]] return data
[ "def", "_transform_array", "(", "data", ",", "is_array", ",", "is_scalar", ")", ":", "if", "not", "is_array", ":", "return", "data", "[", "0", "]", "if", "is_scalar", ":", "return", "[", "data", "[", "0", "]", "]", "return", "data" ]
Transform an array into a scalar, single value array or return in unmodified.
[ "Transform", "an", "array", "into", "a", "scalar", "single", "value", "array", "or", "return", "in", "unmodified", "." ]
[ "\"\"\"Transform an array into a scalar, single value array or return in unmodified.\"\"\"" ]
[ { "param": "data", "type": null }, { "param": "is_array", "type": null }, { "param": "is_scalar", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "data", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "is_array", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "is_scalar", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _transform_array(data, is_array, is_scalar): if not is_array: return data[0] if is_scalar: return [data[0]] return data
10
524
cc94e3893f2982011fc08d2790d839d5a009c4f5
EmersonAires/Introducao_a_ciencia_da_computacao_com_Python
Exercicios_Resolvidos/Parte 2/Semana 1/exercicio_12_1.py
[ "MIT" ]
Python
extrair_coluna
<not_specific>
def extrair_coluna(b_mat, col): '''retorna uma lista com os elementos da coluna indicada''' coluna_b_mat = [] for linha in b_mat: elemento = linha[col] coluna_b_mat.append(elemento) return coluna_b_mat
retorna uma lista com os elementos da coluna indicada
retorna uma lista com os elementos da coluna indicada
[ "retorna", "uma", "lista", "com", "os", "elementos", "da", "coluna", "indicada" ]
def extrair_coluna(b_mat, col): coluna_b_mat = [] for linha in b_mat: elemento = linha[col] coluna_b_mat.append(elemento) return coluna_b_mat
[ "def", "extrair_coluna", "(", "b_mat", ",", "col", ")", ":", "coluna_b_mat", "=", "[", "]", "for", "linha", "in", "b_mat", ":", "elemento", "=", "linha", "[", "col", "]", "coluna_b_mat", ".", "append", "(", "elemento", ")", "return", "coluna_b_mat" ]
retorna uma lista com os elementos da coluna indicada
[ "retorna", "uma", "lista", "com", "os", "elementos", "da", "coluna", "indicada" ]
[ "'''retorna uma lista com os elementos da coluna indicada'''" ]
[ { "param": "b_mat", "type": null }, { "param": "col", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "b_mat", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "col", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def extrair_coluna(b_mat, col): coluna_b_mat = [] for linha in b_mat: elemento = linha[col] coluna_b_mat.append(elemento) return coluna_b_mat
11
667
5bb08e1a378e746bf276fdb1704d41f5409d7d65
trisongz/file_io
fileio/core/py_utils.py
[ "MIT" ]
Python
temporary_assignment
null
def temporary_assignment(obj, attr, value): """Temporarily assign obj.attr to value.""" original = getattr(obj, attr) setattr(obj, attr, value) try: yield finally: setattr(obj, attr, original)
Temporarily assign obj.attr to value.
Temporarily assign obj.attr to value.
[ "Temporarily", "assign", "obj", ".", "attr", "to", "value", "." ]
def temporary_assignment(obj, attr, value): original = getattr(obj, attr) setattr(obj, attr, value) try: yield finally: setattr(obj, attr, original)
[ "def", "temporary_assignment", "(", "obj", ",", "attr", ",", "value", ")", ":", "original", "=", "getattr", "(", "obj", ",", "attr", ")", "setattr", "(", "obj", ",", "attr", ",", "value", ")", "try", ":", "yield", "finally", ":", "setattr", "(", "obj", ",", "attr", ",", "original", ")" ]
Temporarily assign obj.attr to value.
[ "Temporarily", "assign", "obj", ".", "attr", "to", "value", "." ]
[ "\"\"\"Temporarily assign obj.attr to value.\"\"\"" ]
[ { "param": "obj", "type": null }, { "param": "attr", "type": null }, { "param": "value", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "obj", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "attr", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "value", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def temporary_assignment(obj, attr, value): original = getattr(obj, attr) setattr(obj, attr, value) try: yield finally: setattr(obj, attr, original)
12
748
63484325eaa962f8f517d4993495bc314d2325ed
alchem0x2A/vasp-interactive-test
examples/ex11_k8s_minimal.py
[ "MIT" ]
Python
_thread_calculate
<not_specific>
def _thread_calculate(atoms, energy): """A threaded version of atoms.get_potential_energy. Energy is a one-member list ideas taken from https://wiki.fysik.dtu.dk/ase/_modules/ase/neb.html#NEB """ energy[0] = atoms.get_potential_energy() return
A threaded version of atoms.get_potential_energy. Energy is a one-member list ideas taken from https://wiki.fysik.dtu.dk/ase/_modules/ase/neb.html#NEB
A threaded version of atoms.get_potential_energy.
[ "A", "threaded", "version", "of", "atoms", ".", "get_potential_energy", "." ]
def _thread_calculate(atoms, energy): energy[0] = atoms.get_potential_energy() return
[ "def", "_thread_calculate", "(", "atoms", ",", "energy", ")", ":", "energy", "[", "0", "]", "=", "atoms", ".", "get_potential_energy", "(", ")", "return" ]
A threaded version of atoms.get_potential_energy.
[ "A", "threaded", "version", "of", "atoms", ".", "get_potential_energy", "." ]
[ "\"\"\"A threaded version of atoms.get_potential_energy. Energy is a one-member list\n ideas taken from https://wiki.fysik.dtu.dk/ase/_modules/ase/neb.html#NEB\n \"\"\"" ]
[ { "param": "atoms", "type": null }, { "param": "energy", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "atoms", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "energy", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _thread_calculate(atoms, energy): energy[0] = atoms.get_potential_energy() return
13
315
e71a7da7f51510edc97fee6d78a7582b46d166d7
seung-lab/mapbuffer
mapbuffer/mapbuffer.py
[ "BSD-3-Clause" ]
Python
eytzinger_sort
<not_specific>
def eytzinger_sort(inpt, output, i = 0, k = 1): """ Takes an ascendingly sorted input and an equal sized output buffer into which to rewrite the input in eytzinger order. Modified from: https://algorithmica.org/en/eytzinger """ if k <= len(inpt): i = eytzinger_sort(inpt, output, i, 2 * k) output[k - 1] = inpt[i] i += 1 i = eytzinger_sort(inpt, output,i, 2 * k + 1) return i
Takes an ascendingly sorted input and an equal sized output buffer into which to rewrite the input in eytzinger order. Modified from: https://algorithmica.org/en/eytzinger
Takes an ascendingly sorted input and an equal sized output buffer into which to rewrite the input in eytzinger order.
[ "Takes", "an", "ascendingly", "sorted", "input", "and", "an", "equal", "sized", "output", "buffer", "into", "which", "to", "rewrite", "the", "input", "in", "eytzinger", "order", "." ]
def eytzinger_sort(inpt, output, i = 0, k = 1): if k <= len(inpt): i = eytzinger_sort(inpt, output, i, 2 * k) output[k - 1] = inpt[i] i += 1 i = eytzinger_sort(inpt, output,i, 2 * k + 1) return i
[ "def", "eytzinger_sort", "(", "inpt", ",", "output", ",", "i", "=", "0", ",", "k", "=", "1", ")", ":", "if", "k", "<=", "len", "(", "inpt", ")", ":", "i", "=", "eytzinger_sort", "(", "inpt", ",", "output", ",", "i", ",", "2", "*", "k", ")", "output", "[", "k", "-", "1", "]", "=", "inpt", "[", "i", "]", "i", "+=", "1", "i", "=", "eytzinger_sort", "(", "inpt", ",", "output", ",", "i", ",", "2", "*", "k", "+", "1", ")", "return", "i" ]
Takes an ascendingly sorted input and an equal sized output buffer into which to rewrite the input in eytzinger order.
[ "Takes", "an", "ascendingly", "sorted", "input", "and", "an", "equal", "sized", "output", "buffer", "into", "which", "to", "rewrite", "the", "input", "in", "eytzinger", "order", "." ]
[ "\"\"\"\n Takes an ascendingly sorted input and \n an equal sized output buffer into which to \n rewrite the input in eytzinger order.\n\n Modified from:\n https://algorithmica.org/en/eytzinger\n \"\"\"" ]
[ { "param": "inpt", "type": null }, { "param": "output", "type": null }, { "param": "i", "type": null }, { "param": "k", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "inpt", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "output", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "i", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "k", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def eytzinger_sort(inpt, output, i = 0, k = 1): if k <= len(inpt): i = eytzinger_sort(inpt, output, i, 2 * k) output[k - 1] = inpt[i] i += 1 i = eytzinger_sort(inpt, output,i, 2 * k + 1) return i
14
993
b2984037c6a8ed370b786cafaa04ec91d894c19a
GHzytp/PyLorentz
GUI/PyLorentz_GUI.py
[ "BSD-3-Clause" ]
Python
change_visibility
None
def change_visibility(window: sg.Window, elem_val_list: List[Tuple[sg.Element, Any]]) -> None: """ Take a list of element keys and change visibility of the element. Args: window : The element representing the main GUI window. elem_val_list : The list of elements with values whose state is to be changed. Returns: None """ for elem_key, val in elem_val_list: window[elem_key].Update(visible=val)
Take a list of element keys and change visibility of the element. Args: window : The element representing the main GUI window. elem_val_list : The list of elements with values whose state is to be changed. Returns: None
Take a list of element keys and change visibility of the element.
[ "Take", "a", "list", "of", "element", "keys", "and", "change", "visibility", "of", "the", "element", "." ]
def change_visibility(window: sg.Window, elem_val_list: List[Tuple[sg.Element, Any]]) -> None: for elem_key, val in elem_val_list: window[elem_key].Update(visible=val)
[ "def", "change_visibility", "(", "window", ":", "sg", ".", "Window", ",", "elem_val_list", ":", "List", "[", "Tuple", "[", "sg", ".", "Element", ",", "Any", "]", "]", ")", "->", "None", ":", "for", "elem_key", ",", "val", "in", "elem_val_list", ":", "window", "[", "elem_key", "]", ".", "Update", "(", "visible", "=", "val", ")" ]
Take a list of element keys and change visibility of the element.
[ "Take", "a", "list", "of", "element", "keys", "and", "change", "visibility", "of", "the", "element", "." ]
[ "\"\"\" Take a list of element keys and change\n visibility of the element.\n\n Args:\n window : The element representing the main GUI window.\n elem_val_list : The list of elements with values whose\n state is to be changed.\n\n Returns:\n None\n \"\"\"" ]
[ { "param": "window", "type": "sg.Window" }, { "param": "elem_val_list", "type": "List[Tuple[sg.Element, Any]]" } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "window", "type": "sg.Window", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "elem_val_list", "type": "List[Tuple[sg.Element, Any]]", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [ { "identifier": "window ", "type": null, "docstring": "The element representing the main GUI window.", "docstring_tokens": [ "The", "element", "representing", "the", "main", "GUI", "window", "." ], "default": null, "is_optional": null }, { "identifier": "elem_val_list ", "type": null, "docstring": "The list of elements with values whose\nstate is to be changed.", "docstring_tokens": [ "The", "list", "of", "elements", "with", "values", "whose", "state", "is", "to", "be", "changed", "." ], "default": null, "is_optional": null } ], "others": [] }
def change_visibility(window: sg.Window, elem_val_list: List[Tuple[sg.Element, Any]]) -> None: for elem_key, val in elem_val_list: window[elem_key].Update(visible=val)
15
239
61d3a295b3c1cedde8c1eab628b02734cf1c0cd5
BensonGathu/neighborhood
virtual/lib/python3.8/site-packages/PIL/ImageChops.py
[ "Unlicense" ]
Python
multiply
<not_specific>
def multiply(image1, image2): """ Superimposes two images on top of each other. If you multiply an image with a solid black image, the result is black. If you multiply with a solid white image, the image is unaffected. .. code-block:: python out = image1 * image2 / MAX :rtype: :py:class:`~PIL.Image.Image` """ image1.load() image2.load() return image1._new(image1.im.chop_multiply(image2.im))
Superimposes two images on top of each other. If you multiply an image with a solid black image, the result is black. If you multiply with a solid white image, the image is unaffected. .. code-block:: python out = image1 * image2 / MAX :rtype: :py:class:`~PIL.Image.Image`
Superimposes two images on top of each other. If you multiply an image with a solid black image, the result is black. If you multiply with a solid white image, the image is unaffected. code-block:: python out = image1 * image2 / MAX
[ "Superimposes", "two", "images", "on", "top", "of", "each", "other", ".", "If", "you", "multiply", "an", "image", "with", "a", "solid", "black", "image", "the", "result", "is", "black", ".", "If", "you", "multiply", "with", "a", "solid", "white", "image", "the", "image", "is", "unaffected", ".", "code", "-", "block", "::", "python", "out", "=", "image1", "*", "image2", "/", "MAX" ]
def multiply(image1, image2): image1.load() image2.load() return image1._new(image1.im.chop_multiply(image2.im))
[ "def", "multiply", "(", "image1", ",", "image2", ")", ":", "image1", ".", "load", "(", ")", "image2", ".", "load", "(", ")", "return", "image1", ".", "_new", "(", "image1", ".", "im", ".", "chop_multiply", "(", "image2", ".", "im", ")", ")" ]
Superimposes two images on top of each other.
[ "Superimposes", "two", "images", "on", "top", "of", "each", "other", "." ]
[ "\"\"\"\n Superimposes two images on top of each other.\n\n If you multiply an image with a solid black image, the result is black. If\n you multiply with a solid white image, the image is unaffected.\n\n .. code-block:: python\n\n out = image1 * image2 / MAX\n\n :rtype: :py:class:`~PIL.Image.Image`\n \"\"\"" ]
[ { "param": "image1", "type": null }, { "param": "image2", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": ":py:class:`~PIL.Image.Image`" } ], "raises": [], "params": [ { "identifier": "image1", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "image2", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def multiply(image1, image2): image1.load() image2.load() return image1._new(image1.im.chop_multiply(image2.im))
16
804
2d8da20bf2fb315a41e314223d05cc62ea9dcae5
DeepanshS/mrsimulator-ui
app/sims/method/__init__.py
[ "BSD-3-Clause" ]
Python
generate_sidepanel
<not_specific>
def generate_sidepanel(method, index): """Generate scrollable side panel listing for methods""" title = html.B(f"Method {index}", className="") # method name name = html.Div(method["name"], className="") # method channel(s) channels = ", ".join(method["channels"]) channels = html.Div(f"Channel: {channels}") # n dimensions n_dim = len(method["spectral_dimensions"]) n_dim = html.Div(f"Dimensions: {n_dim}") a_tag = html.A([title, name, channels, n_dim]) # The H6(index) only shows for smaller screen sizes. return html.Li( [html.H6(index), html.Div(a_tag)], # draggable="true", className="list-group-item", # id={"type": "select-method-index", "index": index}, )
Generate scrollable side panel listing for methods
Generate scrollable side panel listing for methods
[ "Generate", "scrollable", "side", "panel", "listing", "for", "methods" ]
def generate_sidepanel(method, index): title = html.B(f"Method {index}", className="") name = html.Div(method["name"], className="") channels = ", ".join(method["channels"]) channels = html.Div(f"Channel: {channels}") n_dim = len(method["spectral_dimensions"]) n_dim = html.Div(f"Dimensions: {n_dim}") a_tag = html.A([title, name, channels, n_dim]) return html.Li( [html.H6(index), html.Div(a_tag)], className="list-group-item", )
[ "def", "generate_sidepanel", "(", "method", ",", "index", ")", ":", "title", "=", "html", ".", "B", "(", "f\"Method {index}\"", ",", "className", "=", "\"\"", ")", "name", "=", "html", ".", "Div", "(", "method", "[", "\"name\"", "]", ",", "className", "=", "\"\"", ")", "channels", "=", "\", \"", ".", "join", "(", "method", "[", "\"channels\"", "]", ")", "channels", "=", "html", ".", "Div", "(", "f\"Channel: {channels}\"", ")", "n_dim", "=", "len", "(", "method", "[", "\"spectral_dimensions\"", "]", ")", "n_dim", "=", "html", ".", "Div", "(", "f\"Dimensions: {n_dim}\"", ")", "a_tag", "=", "html", ".", "A", "(", "[", "title", ",", "name", ",", "channels", ",", "n_dim", "]", ")", "return", "html", ".", "Li", "(", "[", "html", ".", "H6", "(", "index", ")", ",", "html", ".", "Div", "(", "a_tag", ")", "]", ",", "className", "=", "\"list-group-item\"", ",", ")" ]
Generate scrollable side panel listing for methods
[ "Generate", "scrollable", "side", "panel", "listing", "for", "methods" ]
[ "\"\"\"Generate scrollable side panel listing for methods\"\"\"", "# method name", "# method channel(s)", "# n dimensions", "# The H6(index) only shows for smaller screen sizes.", "# draggable=\"true\",", "# id={\"type\": \"select-method-index\", \"index\": index}," ]
[ { "param": "method", "type": null }, { "param": "index", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "method", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "index", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import html def generate_sidepanel(method, index): title = html.B(f"Method {index}", className="") name = html.Div(method["name"], className="") channels = ", ".join(method["channels"]) channels = html.Div(f"Channel: {channels}") n_dim = len(method["spectral_dimensions"]) n_dim = html.Div(f"Dimensions: {n_dim}") a_tag = html.A([title, name, channels, n_dim]) return html.Li( [html.H6(index), html.Div(a_tag)], className="list-group-item", )
18
196
6b1bbc653af085e7c5303ff686cc116c3b15f540
QuantumJack/GNN
lib/net_gen.py
[ "Apache-2.0" ]
Python
__generate_nets
<not_specific>
def __generate_nets(gnw_path, main_netfile_path, net_size, num_nets_per_size, outdir_path): """ Generate random network modules extracted from main_net_filename Args: gnw_path: path to GeneNetWeaver ".jar" file. main_netfile_path: filepath for the main network used for module extraction. net_size: size of the extracted modules. num_nets_per_size: number of modules to be extracted for each size. outdir_path: directory to store extracted network modules. """ if not os.path.exists(outdir_path): os.mkdir(outdir_path) command = ("java -jar {!s} --extract -c data/settings.txt --input-net {!s}" " --input-net-format=0 --random-seed --greedy-selection --subnet-size={!s}" " --num-subnets={!s} --output-net-format=0 --output-path={!s}" ).format(gnw_path, main_netfile_path, net_size, num_nets_per_size, outdir_path) subprocess.run([command], shell=True) file_prefix = os.path.basename(main_netfile_path)[:-4] generated_files = glob.glob("{!s}/{!s}*.tsv".format(outdir_path, file_prefix)) return generated_files
Generate random network modules extracted from main_net_filename Args: gnw_path: path to GeneNetWeaver ".jar" file. main_netfile_path: filepath for the main network used for module extraction. net_size: size of the extracted modules. num_nets_per_size: number of modules to be extracted for each size. outdir_path: directory to store extracted network modules.
Generate random network modules extracted from main_net_filename
[ "Generate", "random", "network", "modules", "extracted", "from", "main_net_filename" ]
def __generate_nets(gnw_path, main_netfile_path, net_size, num_nets_per_size, outdir_path): if not os.path.exists(outdir_path): os.mkdir(outdir_path) command = ("java -jar {!s} --extract -c data/settings.txt --input-net {!s}" " --input-net-format=0 --random-seed --greedy-selection --subnet-size={!s}" " --num-subnets={!s} --output-net-format=0 --output-path={!s}" ).format(gnw_path, main_netfile_path, net_size, num_nets_per_size, outdir_path) subprocess.run([command], shell=True) file_prefix = os.path.basename(main_netfile_path)[:-4] generated_files = glob.glob("{!s}/{!s}*.tsv".format(outdir_path, file_prefix)) return generated_files
[ "def", "__generate_nets", "(", "gnw_path", ",", "main_netfile_path", ",", "net_size", ",", "num_nets_per_size", ",", "outdir_path", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "outdir_path", ")", ":", "os", ".", "mkdir", "(", "outdir_path", ")", "command", "=", "(", "\"java -jar {!s} --extract -c data/settings.txt --input-net {!s}\"", "\" --input-net-format=0 --random-seed --greedy-selection --subnet-size={!s}\"", "\" --num-subnets={!s} --output-net-format=0 --output-path={!s}\"", ")", ".", "format", "(", "gnw_path", ",", "main_netfile_path", ",", "net_size", ",", "num_nets_per_size", ",", "outdir_path", ")", "subprocess", ".", "run", "(", "[", "command", "]", ",", "shell", "=", "True", ")", "file_prefix", "=", "os", ".", "path", ".", "basename", "(", "main_netfile_path", ")", "[", ":", "-", "4", "]", "generated_files", "=", "glob", ".", "glob", "(", "\"{!s}/{!s}*.tsv\"", ".", "format", "(", "outdir_path", ",", "file_prefix", ")", ")", "return", "generated_files" ]
Generate random network modules extracted from main_net_filename
[ "Generate", "random", "network", "modules", "extracted", "from", "main_net_filename" ]
[ "\"\"\" Generate random network modules extracted from main_net_filename\n Args:\n gnw_path: path to GeneNetWeaver \".jar\" file.\n main_netfile_path: filepath for the main network used for module extraction.\n net_size: size of the extracted modules.\n num_nets_per_size: number of modules to be extracted for each size.\n outdir_path: directory to store extracted network modules.\n \"\"\"" ]
[ { "param": "gnw_path", "type": null }, { "param": "main_netfile_path", "type": null }, { "param": "net_size", "type": null }, { "param": "num_nets_per_size", "type": null }, { "param": "outdir_path", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "gnw_path", "type": null, "docstring": "path to GeneNetWeaver \".jar\" file.", "docstring_tokens": [ "path", "to", "GeneNetWeaver", "\"", ".", "jar", "\"", "file", "." ], "default": null, "is_optional": null }, { "identifier": "main_netfile_path", "type": null, "docstring": "filepath for the main network used for module extraction.", "docstring_tokens": [ "filepath", "for", "the", "main", "network", "used", "for", "module", "extraction", "." ], "default": null, "is_optional": null }, { "identifier": "net_size", "type": null, "docstring": "size of the extracted modules.", "docstring_tokens": [ "size", "of", "the", "extracted", "modules", "." ], "default": null, "is_optional": null }, { "identifier": "num_nets_per_size", "type": null, "docstring": "number of modules to be extracted for each size.", "docstring_tokens": [ "number", "of", "modules", "to", "be", "extracted", "for", "each", "size", "." ], "default": null, "is_optional": null }, { "identifier": "outdir_path", "type": null, "docstring": "directory to store extracted network modules.", "docstring_tokens": [ "directory", "to", "store", "extracted", "network", "modules", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import subprocess import os import glob def __generate_nets(gnw_path, main_netfile_path, net_size, num_nets_per_size, outdir_path): if not os.path.exists(outdir_path): os.mkdir(outdir_path) command = ("java -jar {!s} --extract -c data/settings.txt --input-net {!s}" " --input-net-format=0 --random-seed --greedy-selection --subnet-size={!s}" " --num-subnets={!s} --output-net-format=0 --output-path={!s}" ).format(gnw_path, main_netfile_path, net_size, num_nets_per_size, outdir_path) subprocess.run([command], shell=True) file_prefix = os.path.basename(main_netfile_path)[:-4] generated_files = glob.glob("{!s}/{!s}*.tsv".format(outdir_path, file_prefix)) return generated_files
20
539
5258c905652f8e689e7f69238996a5b2f5d95f0d
clbarnes/pyboolnet
boolnet/misc.py
[ "MIT" ]
Python
prod
<not_specific>
def prod(numbers): """ Find the product of a sequence :param numbers: Sequence of numbers :return: Their product """ ret = 1 for number in numbers: ret *= number return ret
Find the product of a sequence :param numbers: Sequence of numbers :return: Their product
Find the product of a sequence
[ "Find", "the", "product", "of", "a", "sequence" ]
def prod(numbers): ret = 1 for number in numbers: ret *= number return ret
[ "def", "prod", "(", "numbers", ")", ":", "ret", "=", "1", "for", "number", "in", "numbers", ":", "ret", "*=", "number", "return", "ret" ]
Find the product of a sequence
[ "Find", "the", "product", "of", "a", "sequence" ]
[ "\"\"\"\n Find the product of a sequence\n\n :param numbers: Sequence of numbers\n :return: Their product\n \"\"\"" ]
[ { "param": "numbers", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "numbers", "type": null, "docstring": "Sequence of numbers", "docstring_tokens": [ "Sequence", "of", "numbers" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def prod(numbers): ret = 1 for number in numbers: ret *= number return ret
23
413
664e3c76f043a3063bee9e66192f6daf1dad61e8
sullivanmj/pywemo
pywemo/ouimeaux_device/api/rules_db.py
[ "MIT" ]
Python
_unpack_db
<not_specific>
def _unpack_db(content, db_file): """Unpack the sqlite database from a .zip file content.""" zip_contents = io.BytesIO(content) with zipfile.ZipFile(zip_contents) as zip_file: inner_file_name = zip_file.namelist()[0] with zip_file.open(inner_file_name) as zipped_db_file: db_file.write(zipped_db_file.read()) db_file.flush() return inner_file_name raise RuntimeError("Could not find database within zip file")
Unpack the sqlite database from a .zip file content.
Unpack the sqlite database from a .zip file content.
[ "Unpack", "the", "sqlite", "database", "from", "a", ".", "zip", "file", "content", "." ]
def _unpack_db(content, db_file): zip_contents = io.BytesIO(content) with zipfile.ZipFile(zip_contents) as zip_file: inner_file_name = zip_file.namelist()[0] with zip_file.open(inner_file_name) as zipped_db_file: db_file.write(zipped_db_file.read()) db_file.flush() return inner_file_name raise RuntimeError("Could not find database within zip file")
[ "def", "_unpack_db", "(", "content", ",", "db_file", ")", ":", "zip_contents", "=", "io", ".", "BytesIO", "(", "content", ")", "with", "zipfile", ".", "ZipFile", "(", "zip_contents", ")", "as", "zip_file", ":", "inner_file_name", "=", "zip_file", ".", "namelist", "(", ")", "[", "0", "]", "with", "zip_file", ".", "open", "(", "inner_file_name", ")", "as", "zipped_db_file", ":", "db_file", ".", "write", "(", "zipped_db_file", ".", "read", "(", ")", ")", "db_file", ".", "flush", "(", ")", "return", "inner_file_name", "raise", "RuntimeError", "(", "\"Could not find database within zip file\"", ")" ]
Unpack the sqlite database from a .zip file content.
[ "Unpack", "the", "sqlite", "database", "from", "a", ".", "zip", "file", "content", "." ]
[ "\"\"\"Unpack the sqlite database from a .zip file content.\"\"\"" ]
[ { "param": "content", "type": null }, { "param": "db_file", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "content", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "db_file", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import zipfile import io def _unpack_db(content, db_file): zip_contents = io.BytesIO(content) with zipfile.ZipFile(zip_contents) as zip_file: inner_file_name = zip_file.namelist()[0] with zip_file.open(inner_file_name) as zipped_db_file: db_file.write(zipped_db_file.read()) db_file.flush() return inner_file_name raise RuntimeError("Could not find database within zip file")
24
619
ae5462bd222a5c5bd4c72139814f3a36c73c6486
4con/hello-gn
build/android/apksize.py
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
Python
print_human_readable_size_info
null
def print_human_readable_size_info(apk): """Prints size information in human readable format. Args: apk: ApkSizeInfo object """ files = apk.processed_files logging.critical('Stats for files as they exist within the apk:') for ext in files: logging.critical(' %-8s %s bytes in %s files', ext, files[ext]['compressed_bytes'], files[ext]['number']) logging.critical('--------------------------------------') logging.critical( 'All Files: %s bytes in %s files', apk.compressed_size, apk.total_files) logging.critical('APK Size: %s', apk.apk_size) logging.critical('APK overhead: %s', apk.apk_size - apk.compressed_size) logging.critical('--------------------------------------') logging.critical('Stats for files when extracted from the apk:') for ext in files: logging.critical(' %-8s %s bytes in %s files', ext, files[ext]['uncompressed_bytes'], files[ext]['number']) logging.critical('--------------------------------------') logging.critical( 'All Files: %s bytes in %s files', apk.uncompressed_size, apk.total_files)
Prints size information in human readable format. Args: apk: ApkSizeInfo object
Prints size information in human readable format.
[ "Prints", "size", "information", "in", "human", "readable", "format", "." ]
def print_human_readable_size_info(apk): files = apk.processed_files logging.critical('Stats for files as they exist within the apk:') for ext in files: logging.critical(' %-8s %s bytes in %s files', ext, files[ext]['compressed_bytes'], files[ext]['number']) logging.critical('--------------------------------------') logging.critical( 'All Files: %s bytes in %s files', apk.compressed_size, apk.total_files) logging.critical('APK Size: %s', apk.apk_size) logging.critical('APK overhead: %s', apk.apk_size - apk.compressed_size) logging.critical('--------------------------------------') logging.critical('Stats for files when extracted from the apk:') for ext in files: logging.critical(' %-8s %s bytes in %s files', ext, files[ext]['uncompressed_bytes'], files[ext]['number']) logging.critical('--------------------------------------') logging.critical( 'All Files: %s bytes in %s files', apk.uncompressed_size, apk.total_files)
[ "def", "print_human_readable_size_info", "(", "apk", ")", ":", "files", "=", "apk", ".", "processed_files", "logging", ".", "critical", "(", "'Stats for files as they exist within the apk:'", ")", "for", "ext", "in", "files", ":", "logging", ".", "critical", "(", "' %-8s %s bytes in %s files'", ",", "ext", ",", "files", "[", "ext", "]", "[", "'compressed_bytes'", "]", ",", "files", "[", "ext", "]", "[", "'number'", "]", ")", "logging", ".", "critical", "(", "'--------------------------------------'", ")", "logging", ".", "critical", "(", "'All Files: %s bytes in %s files'", ",", "apk", ".", "compressed_size", ",", "apk", ".", "total_files", ")", "logging", ".", "critical", "(", "'APK Size: %s'", ",", "apk", ".", "apk_size", ")", "logging", ".", "critical", "(", "'APK overhead: %s'", ",", "apk", ".", "apk_size", "-", "apk", ".", "compressed_size", ")", "logging", ".", "critical", "(", "'--------------------------------------'", ")", "logging", ".", "critical", "(", "'Stats for files when extracted from the apk:'", ")", "for", "ext", "in", "files", ":", "logging", ".", "critical", "(", "' %-8s %s bytes in %s files'", ",", "ext", ",", "files", "[", "ext", "]", "[", "'uncompressed_bytes'", "]", ",", "files", "[", "ext", "]", "[", "'number'", "]", ")", "logging", ".", "critical", "(", "'--------------------------------------'", ")", "logging", ".", "critical", "(", "'All Files: %s bytes in %s files'", ",", "apk", ".", "uncompressed_size", ",", "apk", ".", "total_files", ")" ]
Prints size information in human readable format.
[ "Prints", "size", "information", "in", "human", "readable", "format", "." ]
[ "\"\"\"Prints size information in human readable format.\n\n Args:\n apk: ApkSizeInfo object\n \"\"\"" ]
[ { "param": "apk", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "apk", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import logging def print_human_readable_size_info(apk): files = apk.processed_files logging.critical('Stats for files as they exist within the apk:') for ext in files: logging.critical(' %-8s %s bytes in %s files', ext, files[ext]['compressed_bytes'], files[ext]['number']) logging.critical('--------------------------------------') logging.critical( 'All Files: %s bytes in %s files', apk.compressed_size, apk.total_files) logging.critical('APK Size: %s', apk.apk_size) logging.critical('APK overhead: %s', apk.apk_size - apk.compressed_size) logging.critical('--------------------------------------') logging.critical('Stats for files when extracted from the apk:') for ext in files: logging.critical(' %-8s %s bytes in %s files', ext, files[ext]['uncompressed_bytes'], files[ext]['number']) logging.critical('--------------------------------------') logging.critical( 'All Files: %s bytes in %s files', apk.uncompressed_size, apk.total_files)
25
803
cfb9c7e09aba8ed937f808b341f34b7eee06405b
finex-dev/finex
finex/ratios.py
[ "BSD-3-Clause" ]
Python
roce
<not_specific>
def roce(net_income, preferred_dividends, average_common_equity): """Computes return on common equity. Parameters ---------- net_income : int or float Net income preferred_dividends : int or float Preferred dividends average_common_equity : int or float Average common equity Returns ------- out : int or float Return on common equity """ return (net_income - preferred_dividends) / average_common_equity
Computes return on common equity. Parameters ---------- net_income : int or float Net income preferred_dividends : int or float Preferred dividends average_common_equity : int or float Average common equity Returns ------- out : int or float Return on common equity
Computes return on common equity. Parameters net_income : int or float Net income preferred_dividends : int or float Preferred dividends average_common_equity : int or float Average common equity Returns out : int or float Return on common equity
[ "Computes", "return", "on", "common", "equity", ".", "Parameters", "net_income", ":", "int", "or", "float", "Net", "income", "preferred_dividends", ":", "int", "or", "float", "Preferred", "dividends", "average_common_equity", ":", "int", "or", "float", "Average", "common", "equity", "Returns", "out", ":", "int", "or", "float", "Return", "on", "common", "equity" ]
def roce(net_income, preferred_dividends, average_common_equity): return (net_income - preferred_dividends) / average_common_equity
[ "def", "roce", "(", "net_income", ",", "preferred_dividends", ",", "average_common_equity", ")", ":", "return", "(", "net_income", "-", "preferred_dividends", ")", "/", "average_common_equity" ]
Computes return on common equity.
[ "Computes", "return", "on", "common", "equity", "." ]
[ "\"\"\"Computes return on common equity.\n\n Parameters\n ----------\n net_income : int or float\n Net income\n preferred_dividends : int or float\n Preferred dividends\n average_common_equity : int or float\n Average common equity\n\n Returns\n -------\n out : int or float\n Return on common equity\n \"\"\"" ]
[ { "param": "net_income", "type": null }, { "param": "preferred_dividends", "type": null }, { "param": "average_common_equity", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "net_income", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "preferred_dividends", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "average_common_equity", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def roce(net_income, preferred_dividends, average_common_equity): return (net_income - preferred_dividends) / average_common_equity
26
306
177f8f7c5c114b75e46eb141489e8ed3694972a9
guesswhohaha/learntools
learntools/core/asserts.py
[ "Apache-2.0" ]
Python
assert_has_columns
null
def assert_has_columns(df, cols, name="dataframe", strict=False): """Assert that the given dataframe contains columns with the given names. If strict is True, then assert it has *only* those columns. """ for col in cols: assert col in df.columns, "Expected {} to have column `{}`".format( name, col ) if strict: for col in df.columns: msg = "Unexpected column in {}: `{}`".format(name, col) assert col in cols, msg
Assert that the given dataframe contains columns with the given names. If strict is True, then assert it has *only* those columns.
Assert that the given dataframe contains columns with the given names. If strict is True, then assert it has *only* those columns.
[ "Assert", "that", "the", "given", "dataframe", "contains", "columns", "with", "the", "given", "names", ".", "If", "strict", "is", "True", "then", "assert", "it", "has", "*", "only", "*", "those", "columns", "." ]
def assert_has_columns(df, cols, name="dataframe", strict=False): for col in cols: assert col in df.columns, "Expected {} to have column `{}`".format( name, col ) if strict: for col in df.columns: msg = "Unexpected column in {}: `{}`".format(name, col) assert col in cols, msg
[ "def", "assert_has_columns", "(", "df", ",", "cols", ",", "name", "=", "\"dataframe\"", ",", "strict", "=", "False", ")", ":", "for", "col", "in", "cols", ":", "assert", "col", "in", "df", ".", "columns", ",", "\"Expected {} to have column `{}`\"", ".", "format", "(", "name", ",", "col", ")", "if", "strict", ":", "for", "col", "in", "df", ".", "columns", ":", "msg", "=", "\"Unexpected column in {}: `{}`\"", ".", "format", "(", "name", ",", "col", ")", "assert", "col", "in", "cols", ",", "msg" ]
Assert that the given dataframe contains columns with the given names.
[ "Assert", "that", "the", "given", "dataframe", "contains", "columns", "with", "the", "given", "names", "." ]
[ "\"\"\"Assert that the given dataframe contains columns with the given names.\n If strict is True, then assert it has *only* those columns.\n \"\"\"" ]
[ { "param": "df", "type": null }, { "param": "cols", "type": null }, { "param": "name", "type": null }, { "param": "strict", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "df", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "cols", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "strict", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def assert_has_columns(df, cols, name="dataframe", strict=False): for col in cols: assert col in df.columns, "Expected {} to have column `{}`".format( name, col ) if strict: for col in df.columns: msg = "Unexpected column in {}: `{}`".format(name, col) assert col in cols, msg
27
419
bb9bc874cfb0cf7a29aef5ba2b188b6dea437065
maurogaravello/Pirati
plot.py
[ "MIT" ]
Python
check_negative
<not_specific>
def check_negative(value): """ check if a value is negative""" ivalue = int(value) if ivalue <= 0: raise argparse.ArgumentTypeError("%s is an invalid entry" % value) return ivalue
check if a value is negative
check if a value is negative
[ "check", "if", "a", "value", "is", "negative" ]
def check_negative(value): ivalue = int(value) if ivalue <= 0: raise argparse.ArgumentTypeError("%s is an invalid entry" % value) return ivalue
[ "def", "check_negative", "(", "value", ")", ":", "ivalue", "=", "int", "(", "value", ")", "if", "ivalue", "<=", "0", ":", "raise", "argparse", ".", "ArgumentTypeError", "(", "\"%s is an invalid entry\"", "%", "value", ")", "return", "ivalue" ]
check if a value is negative
[ "check", "if", "a", "value", "is", "negative" ]
[ "\"\"\" check if a value is negative\"\"\"" ]
[ { "param": "value", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "value", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import argparse def check_negative(value): ivalue = int(value) if ivalue <= 0: raise argparse.ArgumentTypeError("%s is an invalid entry" % value) return ivalue
28
856
642f0f99428865d2e704e0a3535fd3e4cd3f9e98
larsOhne/pvdn
pvdn/detection/train.py
[ "CC0-1.0" ]
Python
print_end_summary
null
def print_end_summary(epochs: int, output_dir: str, best_epochs: dict, best_metrics: dict): """ Outputs the final summary to the terminal. :param epochs: number of epochs the model has been trained :type epochs: int :param output_dir: directory where the results are written to :type output_dir: str :param best_epochs: dictionary with metrics as keys, and epoch number of the model that has the best performance in it as value :type best_epochs: dict :param best_metrics: dictionary storing the best result for each metric :type best_metrics: dict """ print(f"\nTraining completed successfully for {epochs} epochs.\n" f"Logs written to {output_dir}.") best_str = [f"{k}: {v:4f} (epoch {best_epochs[k]})" for k, v in best_metrics.items()] print(f"-------------- Best --------------") for s in best_str: print(s)
Outputs the final summary to the terminal. :param epochs: number of epochs the model has been trained :type epochs: int :param output_dir: directory where the results are written to :type output_dir: str :param best_epochs: dictionary with metrics as keys, and epoch number of the model that has the best performance in it as value :type best_epochs: dict :param best_metrics: dictionary storing the best result for each metric :type best_metrics: dict
Outputs the final summary to the terminal.
[ "Outputs", "the", "final", "summary", "to", "the", "terminal", "." ]
def print_end_summary(epochs: int, output_dir: str, best_epochs: dict, best_metrics: dict): print(f"\nTraining completed successfully for {epochs} epochs.\n" f"Logs written to {output_dir}.") best_str = [f"{k}: {v:4f} (epoch {best_epochs[k]})" for k, v in best_metrics.items()] print(f"-------------- Best --------------") for s in best_str: print(s)
[ "def", "print_end_summary", "(", "epochs", ":", "int", ",", "output_dir", ":", "str", ",", "best_epochs", ":", "dict", ",", "best_metrics", ":", "dict", ")", ":", "print", "(", "f\"\\nTraining completed successfully for {epochs} epochs.\\n\"", "f\"Logs written to {output_dir}.\"", ")", "best_str", "=", "[", "f\"{k}: {v:4f} (epoch {best_epochs[k]})\"", "for", "k", ",", "v", "in", "best_metrics", ".", "items", "(", ")", "]", "print", "(", "f\"-------------- Best --------------\"", ")", "for", "s", "in", "best_str", ":", "print", "(", "s", ")" ]
Outputs the final summary to the terminal.
[ "Outputs", "the", "final", "summary", "to", "the", "terminal", "." ]
[ "\"\"\"\n Outputs the final summary to the terminal.\n :param epochs: number of epochs the model has been trained\n :type epochs: int\n :param output_dir: directory where the results are written to\n :type output_dir: str\n :param best_epochs: dictionary with metrics as keys, and epoch number of\n the model that has the best performance in it as value\n :type best_epochs: dict\n :param best_metrics: dictionary storing the best result for each metric\n :type best_metrics: dict\n \"\"\"" ]
[ { "param": "epochs", "type": "int" }, { "param": "output_dir", "type": "str" }, { "param": "best_epochs", "type": "dict" }, { "param": "best_metrics", "type": "dict" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "epochs", "type": "int", "docstring": "number of epochs the model has been trained", "docstring_tokens": [ "number", "of", "epochs", "the", "model", "has", "been", "trained" ], "default": null, "is_optional": null }, { "identifier": "output_dir", "type": "str", "docstring": "directory where the results are written to", "docstring_tokens": [ "directory", "where", "the", "results", "are", "written", "to" ], "default": null, "is_optional": null }, { "identifier": "best_epochs", "type": "dict", "docstring": "dictionary with metrics as keys, and epoch number of\nthe model that has the best performance in it as value", "docstring_tokens": [ "dictionary", "with", "metrics", "as", "keys", "and", "epoch", "number", "of", "the", "model", "that", "has", "the", "best", "performance", "in", "it", "as", "value" ], "default": null, "is_optional": null }, { "identifier": "best_metrics", "type": "dict", "docstring": "dictionary storing the best result for each metric", "docstring_tokens": [ "dictionary", "storing", "the", "best", "result", "for", "each", "metric" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def print_end_summary(epochs: int, output_dir: str, best_epochs: dict, best_metrics: dict): print(f"\nTraining completed successfully for {epochs} epochs.\n" f"Logs written to {output_dir}.") best_str = [f"{k}: {v:4f} (epoch {best_epochs[k]})" for k, v in best_metrics.items()] print(f"-------------- Best --------------") for s in best_str: print(s)
29
22
8a51d4283261aa0674abb481617734ad2aaf10f1
j19sch/pytest-logfest
pytest_logfest/plugin.py
[ "MIT" ]
Python
fxt_function_logger
null
def fxt_function_logger(request, module_logger, session_filememoryhandler): """ Yields a logger, child of the module logger and named the name of the function. Adds records for test started, setup error, test fail, and test ended. """ logger = module_logger.getChild(request.node.name) logger.info("TEST STARTED") yield logger try: if request.node.rep_setup.failed: logger.warning("SETUP ERROR") except AttributeError: pass try: if request.node.rep_call.failed: logger.warning("TEST FAIL") except AttributeError: pass logger.info("TEST ENDED\n") session_filememoryhandler.flush_with_filter_on_info()
Yields a logger, child of the module logger and named the name of the function. Adds records for test started, setup error, test fail, and test ended.
Yields a logger, child of the module logger and named the name of the function. Adds records for test started, setup error, test fail, and test ended.
[ "Yields", "a", "logger", "child", "of", "the", "module", "logger", "and", "named", "the", "name", "of", "the", "function", ".", "Adds", "records", "for", "test", "started", "setup", "error", "test", "fail", "and", "test", "ended", "." ]
def fxt_function_logger(request, module_logger, session_filememoryhandler): logger = module_logger.getChild(request.node.name) logger.info("TEST STARTED") yield logger try: if request.node.rep_setup.failed: logger.warning("SETUP ERROR") except AttributeError: pass try: if request.node.rep_call.failed: logger.warning("TEST FAIL") except AttributeError: pass logger.info("TEST ENDED\n") session_filememoryhandler.flush_with_filter_on_info()
[ "def", "fxt_function_logger", "(", "request", ",", "module_logger", ",", "session_filememoryhandler", ")", ":", "logger", "=", "module_logger", ".", "getChild", "(", "request", ".", "node", ".", "name", ")", "logger", ".", "info", "(", "\"TEST STARTED\"", ")", "yield", "logger", "try", ":", "if", "request", ".", "node", ".", "rep_setup", ".", "failed", ":", "logger", ".", "warning", "(", "\"SETUP ERROR\"", ")", "except", "AttributeError", ":", "pass", "try", ":", "if", "request", ".", "node", ".", "rep_call", ".", "failed", ":", "logger", ".", "warning", "(", "\"TEST FAIL\"", ")", "except", "AttributeError", ":", "pass", "logger", ".", "info", "(", "\"TEST ENDED\\n\"", ")", "session_filememoryhandler", ".", "flush_with_filter_on_info", "(", ")" ]
Yields a logger, child of the module logger and named the name of the function.
[ "Yields", "a", "logger", "child", "of", "the", "module", "logger", "and", "named", "the", "name", "of", "the", "function", "." ]
[ "\"\"\"\n Yields a logger, child of the module logger and named the name of the function.\n Adds records for test started, setup error, test fail, and test ended.\n\n \"\"\"" ]
[ { "param": "request", "type": null }, { "param": "module_logger", "type": null }, { "param": "session_filememoryhandler", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "request", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "module_logger", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "session_filememoryhandler", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def fxt_function_logger(request, module_logger, session_filememoryhandler): logger = module_logger.getChild(request.node.name) logger.info("TEST STARTED") yield logger try: if request.node.rep_setup.failed: logger.warning("SETUP ERROR") except AttributeError: pass try: if request.node.rep_call.failed: logger.warning("TEST FAIL") except AttributeError: pass logger.info("TEST ENDED\n") session_filememoryhandler.flush_with_filter_on_info()
30
301
4d978bbc81c4e00039d1224dd4790f6483e5af9b
abugler/SMLFinalProject
runners/utils.py
[ "MIT" ]
Python
disp_script
null
def disp_script(spec): """ Displays the arguments for a script in a readable fashion in logging. Args: spec (dict): Dictionary containing script parameters. """ logging.info( f"\n" f" Running {spec['script']} with args:\n" f" config: {spec['config']}\n" f" run_in: {spec['run_in']}\n" f" num_gpus: {spec['num_gpus']}\n" f" blocking: {spec['blocking']}\n" )
Displays the arguments for a script in a readable fashion in logging. Args: spec (dict): Dictionary containing script parameters.
Displays the arguments for a script in a readable fashion in logging.
[ "Displays", "the", "arguments", "for", "a", "script", "in", "a", "readable", "fashion", "in", "logging", "." ]
def disp_script(spec): logging.info( f"\n" f" Running {spec['script']} with args:\n" f" config: {spec['config']}\n" f" run_in: {spec['run_in']}\n" f" num_gpus: {spec['num_gpus']}\n" f" blocking: {spec['blocking']}\n" )
[ "def", "disp_script", "(", "spec", ")", ":", "logging", ".", "info", "(", "f\"\\n\"", "f\" Running {spec['script']} with args:\\n\"", "f\" config: {spec['config']}\\n\"", "f\" run_in: {spec['run_in']}\\n\"", "f\" num_gpus: {spec['num_gpus']}\\n\"", "f\" blocking: {spec['blocking']}\\n\"", ")" ]
Displays the arguments for a script in a readable fashion in logging.
[ "Displays", "the", "arguments", "for", "a", "script", "in", "a", "readable", "fashion", "in", "logging", "." ]
[ "\"\"\"\n Displays the arguments for a script in a readable fashion in\n logging.\n \n Args:\n spec (dict): Dictionary containing script parameters.\n \"\"\"" ]
[ { "param": "spec", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "spec", "type": null, "docstring": "Dictionary containing script parameters.", "docstring_tokens": [ "Dictionary", "containing", "script", "parameters", "." ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
import logging def disp_script(spec): logging.info( f"\n" f" Running {spec['script']} with args:\n" f" config: {spec['config']}\n" f" run_in: {spec['run_in']}\n" f" num_gpus: {spec['num_gpus']}\n" f" blocking: {spec['blocking']}\n" )
31
1,017
bcc04efb1ef7d3258b74f184ec0c0258e4e8c5fa
elyase/altair
altair/expr/core.py
[ "BSD-3-Clause" ]
Python
_js_repr
<not_specific>
def _js_repr(val): """Return a javascript-safe string representation of val""" if val is True: return 'true' elif val is False: return 'false' elif val is None: return 'null' else: return repr(val)
Return a javascript-safe string representation of val
Return a javascript-safe string representation of val
[ "Return", "a", "javascript", "-", "safe", "string", "representation", "of", "val" ]
def _js_repr(val): if val is True: return 'true' elif val is False: return 'false' elif val is None: return 'null' else: return repr(val)
[ "def", "_js_repr", "(", "val", ")", ":", "if", "val", "is", "True", ":", "return", "'true'", "elif", "val", "is", "False", ":", "return", "'false'", "elif", "val", "is", "None", ":", "return", "'null'", "else", ":", "return", "repr", "(", "val", ")" ]
Return a javascript-safe string representation of val
[ "Return", "a", "javascript", "-", "safe", "string", "representation", "of", "val" ]
[ "\"\"\"Return a javascript-safe string representation of val\"\"\"" ]
[ { "param": "val", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "val", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _js_repr(val): if val is True: return 'true' elif val is False: return 'false' elif val is None: return 'null' else: return repr(val)
32
126
892fd63e452f2cae6fe22559ccb6584141ee4b92
Bielorusse/Erdorbit_random
script.py
[ "MIT" ]
Python
multiplyArrayByScalar
<not_specific>
def multiplyArrayByScalar(inputArray, scalar): ''' Multiplying each component of a 2D array by a scalar - Inputs: inputArray input array scal input scalar - Outputs: outputArray output array ''' outputArray = [] for j in range(len(inputArray)): outputArray.append(inputArray[j]) for k in range(len(inputArray[j])): outputArray[j][k] = inputArray[j][k] * scalar return outputArray
Multiplying each component of a 2D array by a scalar - Inputs: inputArray input array scal input scalar - Outputs: outputArray output array
Multiplying each component of a 2D array by a scalar Inputs: inputArray input array scal input scalar Outputs: outputArray output array
[ "Multiplying", "each", "component", "of", "a", "2D", "array", "by", "a", "scalar", "Inputs", ":", "inputArray", "input", "array", "scal", "input", "scalar", "Outputs", ":", "outputArray", "output", "array" ]
def multiplyArrayByScalar(inputArray, scalar): outputArray = [] for j in range(len(inputArray)): outputArray.append(inputArray[j]) for k in range(len(inputArray[j])): outputArray[j][k] = inputArray[j][k] * scalar return outputArray
[ "def", "multiplyArrayByScalar", "(", "inputArray", ",", "scalar", ")", ":", "outputArray", "=", "[", "]", "for", "j", "in", "range", "(", "len", "(", "inputArray", ")", ")", ":", "outputArray", ".", "append", "(", "inputArray", "[", "j", "]", ")", "for", "k", "in", "range", "(", "len", "(", "inputArray", "[", "j", "]", ")", ")", ":", "outputArray", "[", "j", "]", "[", "k", "]", "=", "inputArray", "[", "j", "]", "[", "k", "]", "*", "scalar", "return", "outputArray" ]
Multiplying each component of a 2D array by a scalar Inputs: inputArray input array scal input scalar Outputs: outputArray output array
[ "Multiplying", "each", "component", "of", "a", "2D", "array", "by", "a", "scalar", "Inputs", ":", "inputArray", "input", "array", "scal", "input", "scalar", "Outputs", ":", "outputArray", "output", "array" ]
[ "'''\n\tMultiplying each component of a 2D array by a scalar\n \t- Inputs:\n inputArray input array\n scal input scalar\n \t- Outputs:\n \t\t outputArray output array\n '''" ]
[ { "param": "inputArray", "type": null }, { "param": "scalar", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "inputArray", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "scalar", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def multiplyArrayByScalar(inputArray, scalar): outputArray = [] for j in range(len(inputArray)): outputArray.append(inputArray[j]) for k in range(len(inputArray[j])): outputArray[j][k] = inputArray[j][k] * scalar return outputArray
33
1,004
950f789d70c9ff60ddf38726d7b8992d103f289c
google-research/pyreach
pyreach/common/base/debug.py
[ "Apache-2.0" ]
Python
debug
None
def debug(msg: str) -> None: """Write debug message to stderr. Debug message consists file name, line number and function name of the calling routine. Args: msg: a custom message. """ parent_frame = inspect.stack()[1] file = parent_frame[1] line = parent_frame[2] func = parent_frame[3] sys.stderr.write("{}:{}-{}: {}\n".format(file, line, func, msg)) sys.stderr.flush()
Write debug message to stderr. Debug message consists file name, line number and function name of the calling routine. Args: msg: a custom message.
Write debug message to stderr. Debug message consists file name, line number and function name of the calling routine.
[ "Write", "debug", "message", "to", "stderr", ".", "Debug", "message", "consists", "file", "name", "line", "number", "and", "function", "name", "of", "the", "calling", "routine", "." ]
def debug(msg: str) -> None: parent_frame = inspect.stack()[1] file = parent_frame[1] line = parent_frame[2] func = parent_frame[3] sys.stderr.write("{}:{}-{}: {}\n".format(file, line, func, msg)) sys.stderr.flush()
[ "def", "debug", "(", "msg", ":", "str", ")", "->", "None", ":", "parent_frame", "=", "inspect", ".", "stack", "(", ")", "[", "1", "]", "file", "=", "parent_frame", "[", "1", "]", "line", "=", "parent_frame", "[", "2", "]", "func", "=", "parent_frame", "[", "3", "]", "sys", ".", "stderr", ".", "write", "(", "\"{}:{}-{}: {}\\n\"", ".", "format", "(", "file", ",", "line", ",", "func", ",", "msg", ")", ")", "sys", ".", "stderr", ".", "flush", "(", ")" ]
Write debug message to stderr.
[ "Write", "debug", "message", "to", "stderr", "." ]
[ "\"\"\"Write debug message to stderr.\n\n Debug message consists file name, line number and function name\n of the calling routine.\n\n Args:\n msg: a custom message.\n \"\"\"" ]
[ { "param": "msg", "type": "str" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "msg", "type": "str", "docstring": "a custom message.", "docstring_tokens": [ "a", "custom", "message", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import inspect import sys def debug(msg: str) -> None: parent_frame = inspect.stack()[1] file = parent_frame[1] line = parent_frame[2] func = parent_frame[3] sys.stderr.write("{}:{}-{}: {}\n".format(file, line, func, msg)) sys.stderr.flush()
34
504
9a6a097e06c1ab31ad5a3c4d60545ff85a3c54d4
cookbrite/ebs-deploy
ebs_deploy/__init__.py
[ "MIT" ]
Python
override_scaling
<not_specific>
def override_scaling(option_settings, min_size, max_size): """ takes the merged option_settings and injects custom min/max autoscaling sizes """ match_namespace = "aws:autoscaling:asg" match_keys = {"MinSize": min_size, "MaxSize": max_size} copied_option_settings = [] for (namespace, key, value) in option_settings: new_option = (namespace, key, value) if match_namespace == namespace and key in match_keys: new_option = (namespace, key, match_keys[key]) copied_option_settings.append(new_option) return copied_option_settings
takes the merged option_settings and injects custom min/max autoscaling sizes
takes the merged option_settings and injects custom min/max autoscaling sizes
[ "takes", "the", "merged", "option_settings", "and", "injects", "custom", "min", "/", "max", "autoscaling", "sizes" ]
def override_scaling(option_settings, min_size, max_size): match_namespace = "aws:autoscaling:asg" match_keys = {"MinSize": min_size, "MaxSize": max_size} copied_option_settings = [] for (namespace, key, value) in option_settings: new_option = (namespace, key, value) if match_namespace == namespace and key in match_keys: new_option = (namespace, key, match_keys[key]) copied_option_settings.append(new_option) return copied_option_settings
[ "def", "override_scaling", "(", "option_settings", ",", "min_size", ",", "max_size", ")", ":", "match_namespace", "=", "\"aws:autoscaling:asg\"", "match_keys", "=", "{", "\"MinSize\"", ":", "min_size", ",", "\"MaxSize\"", ":", "max_size", "}", "copied_option_settings", "=", "[", "]", "for", "(", "namespace", ",", "key", ",", "value", ")", "in", "option_settings", ":", "new_option", "=", "(", "namespace", ",", "key", ",", "value", ")", "if", "match_namespace", "==", "namespace", "and", "key", "in", "match_keys", ":", "new_option", "=", "(", "namespace", ",", "key", ",", "match_keys", "[", "key", "]", ")", "copied_option_settings", ".", "append", "(", "new_option", ")", "return", "copied_option_settings" ]
takes the merged option_settings and injects custom min/max autoscaling sizes
[ "takes", "the", "merged", "option_settings", "and", "injects", "custom", "min", "/", "max", "autoscaling", "sizes" ]
[ "\"\"\" takes the merged option_settings and injects custom min/max autoscaling sizes \"\"\"" ]
[ { "param": "option_settings", "type": null }, { "param": "min_size", "type": null }, { "param": "max_size", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "option_settings", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "min_size", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "max_size", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def override_scaling(option_settings, min_size, max_size): match_namespace = "aws:autoscaling:asg" match_keys = {"MinSize": min_size, "MaxSize": max_size} copied_option_settings = [] for (namespace, key, value) in option_settings: new_option = (namespace, key, value) if match_namespace == namespace and key in match_keys: new_option = (namespace, key, match_keys[key]) copied_option_settings.append(new_option) return copied_option_settings
35
744
440b71cde91480a4acc2ad27be6b5c2c2c2604ee
iamholger/professor
pyext/professor2/ipol.py
[ "MIT" ]
Python
mk_ipolinputs
<not_specific>
def mk_ipolinputs(params): """ Make sorted run name and parameter name & value lists, suitable for passing to prof.Ipol params is a dict (actually, prefer OrderedDict) of run_names -> param_vals, as returned from read_rundata """ runs = sorted(params.keys()) if not runs: return runs, [], [[]] paramnames = params[runs[0]].keys() paramslist = [[params[run][pn] for pn in paramnames] for run in runs] return runs, paramnames, paramslist
Make sorted run name and parameter name & value lists, suitable for passing to prof.Ipol params is a dict (actually, prefer OrderedDict) of run_names -> param_vals, as returned from read_rundata
Make sorted run name and parameter name & value lists, suitable for passing to prof.Ipol params is a dict (actually, prefer OrderedDict) of run_names -> param_vals, as returned from read_rundata
[ "Make", "sorted", "run", "name", "and", "parameter", "name", "&", "value", "lists", "suitable", "for", "passing", "to", "prof", ".", "Ipol", "params", "is", "a", "dict", "(", "actually", "prefer", "OrderedDict", ")", "of", "run_names", "-", ">", "param_vals", "as", "returned", "from", "read_rundata" ]
def mk_ipolinputs(params): runs = sorted(params.keys()) if not runs: return runs, [], [[]] paramnames = params[runs[0]].keys() paramslist = [[params[run][pn] for pn in paramnames] for run in runs] return runs, paramnames, paramslist
[ "def", "mk_ipolinputs", "(", "params", ")", ":", "runs", "=", "sorted", "(", "params", ".", "keys", "(", ")", ")", "if", "not", "runs", ":", "return", "runs", ",", "[", "]", ",", "[", "[", "]", "]", "paramnames", "=", "params", "[", "runs", "[", "0", "]", "]", ".", "keys", "(", ")", "paramslist", "=", "[", "[", "params", "[", "run", "]", "[", "pn", "]", "for", "pn", "in", "paramnames", "]", "for", "run", "in", "runs", "]", "return", "runs", ",", "paramnames", ",", "paramslist" ]
Make sorted run name and parameter name & value lists, suitable for passing to prof.Ipol params is a dict (actually, prefer OrderedDict) of run_names -> param_vals, as returned from read_rundata
[ "Make", "sorted", "run", "name", "and", "parameter", "name", "&", "value", "lists", "suitable", "for", "passing", "to", "prof", ".", "Ipol", "params", "is", "a", "dict", "(", "actually", "prefer", "OrderedDict", ")", "of", "run_names", "-", ">", "param_vals", "as", "returned", "from", "read_rundata" ]
[ "\"\"\"\n Make sorted run name and parameter name & value lists, suitable for passing to prof.Ipol\n\n params is a dict (actually, prefer OrderedDict) of run_names -> param_vals,\n as returned from read_rundata\n \"\"\"" ]
[ { "param": "params", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "params", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def mk_ipolinputs(params): runs = sorted(params.keys()) if not runs: return runs, [], [[]] paramnames = params[runs[0]].keys() paramslist = [[params[run][pn] for pn in paramnames] for run in runs] return runs, paramnames, paramslist
36
703
6ba8475b25a768b98f6094c5b04ba522fbead45a
Hyperparticle/lct-master
charles-university/statistical-nlp/assignment-3/tag.py
[ "MIT" ]
Python
open_text
<not_specific>
def open_text(filename): """Reads a text line by line, applies light preprocessing, and returns an array of words and tags""" with open(filename, encoding='iso-8859-2') as f: content = f.readlines() preprocess = lambda word: tuple(word.strip().rsplit('/', 1)) return [preprocess(word) for word in content]
Reads a text line by line, applies light preprocessing, and returns an array of words and tags
Reads a text line by line, applies light preprocessing, and returns an array of words and tags
[ "Reads", "a", "text", "line", "by", "line", "applies", "light", "preprocessing", "and", "returns", "an", "array", "of", "words", "and", "tags" ]
def open_text(filename): with open(filename, encoding='iso-8859-2') as f: content = f.readlines() preprocess = lambda word: tuple(word.strip().rsplit('/', 1)) return [preprocess(word) for word in content]
[ "def", "open_text", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "encoding", "=", "'iso-8859-2'", ")", "as", "f", ":", "content", "=", "f", ".", "readlines", "(", ")", "preprocess", "=", "lambda", "word", ":", "tuple", "(", "word", ".", "strip", "(", ")", ".", "rsplit", "(", "'/'", ",", "1", ")", ")", "return", "[", "preprocess", "(", "word", ")", "for", "word", "in", "content", "]" ]
Reads a text line by line, applies light preprocessing, and returns an array of words and tags
[ "Reads", "a", "text", "line", "by", "line", "applies", "light", "preprocessing", "and", "returns", "an", "array", "of", "words", "and", "tags" ]
[ "\"\"\"Reads a text line by line, applies light preprocessing, and returns an array of words and tags\"\"\"" ]
[ { "param": "filename", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "filename", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def open_text(filename): with open(filename, encoding='iso-8859-2') as f: content = f.readlines() preprocess = lambda word: tuple(word.strip().rsplit('/', 1)) return [preprocess(word) for word in content]
37
628
9b15eca2a02be787e6c3a05fff838aba8bbda7f9
jschmidtnj/CS115
hw5.py
[ "MIT" ]
Python
fast_lucas
<not_specific>
def fast_lucas(n): '''Returns the nth Lucas number using the memoization technique shown in class and lab. The Lucas numbers are as follows: [2, 1, 3, 4, 7, 11, ...]''' def lucasMemo(n, memo): if n in memo: return memo[n] if n == 0: result = 2 elif n == 1: result = 1 else: result = lucasMemo(n-1, memo) + lucasMemo(n-2, memo) memo[n] = result return result return lucasMemo(n, {})
Returns the nth Lucas number using the memoization technique shown in class and lab. The Lucas numbers are as follows: [2, 1, 3, 4, 7, 11, ...]
Returns the nth Lucas number using the memoization technique shown in class and lab.
[ "Returns", "the", "nth", "Lucas", "number", "using", "the", "memoization", "technique", "shown", "in", "class", "and", "lab", "." ]
def fast_lucas(n): def lucasMemo(n, memo): if n in memo: return memo[n] if n == 0: result = 2 elif n == 1: result = 1 else: result = lucasMemo(n-1, memo) + lucasMemo(n-2, memo) memo[n] = result return result return lucasMemo(n, {})
[ "def", "fast_lucas", "(", "n", ")", ":", "def", "lucasMemo", "(", "n", ",", "memo", ")", ":", "if", "n", "in", "memo", ":", "return", "memo", "[", "n", "]", "if", "n", "==", "0", ":", "result", "=", "2", "elif", "n", "==", "1", ":", "result", "=", "1", "else", ":", "result", "=", "lucasMemo", "(", "n", "-", "1", ",", "memo", ")", "+", "lucasMemo", "(", "n", "-", "2", ",", "memo", ")", "memo", "[", "n", "]", "=", "result", "return", "result", "return", "lucasMemo", "(", "n", ",", "{", "}", ")" ]
Returns the nth Lucas number using the memoization technique shown in class and lab.
[ "Returns", "the", "nth", "Lucas", "number", "using", "the", "memoization", "technique", "shown", "in", "class", "and", "lab", "." ]
[ "'''Returns the nth Lucas number using the memoization technique\n shown in class and lab. The Lucas numbers are as follows:\n [2, 1, 3, 4, 7, 11, ...]'''" ]
[ { "param": "n", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "n", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def fast_lucas(n): def lucasMemo(n, memo): if n in memo: return memo[n] if n == 0: result = 2 elif n == 1: result = 1 else: result = lucasMemo(n-1, memo) + lucasMemo(n-2, memo) memo[n] = result return result return lucasMemo(n, {})
38
371
4b509323441ad1cfa76f24e0b712b509f5f23a01
fangohr/oommf-python
vision/oommf/mifgen.py
[ "BSD-2-Clause" ]
Python
_save_mif
<not_specific>
def _save_mif(sim_object, target): """ save_mif(sim_object) Function takes a simulation instance and then saves the mif to file in order to run a simulation, and also += 1 to the N_mifs in order to keep track of multiple simulation steps, ie if a parameter will change. Parameters ---------- sim_object : instance A simulation instance. Returns ------- string The path to the mif file """ path = sim_object.name + "_" + str(sim_object.t) + "_" + str(target) + ".mif" f = open(path, 'w') f.write(sim_object.mif) return path
save_mif(sim_object) Function takes a simulation instance and then saves the mif to file in order to run a simulation, and also += 1 to the N_mifs in order to keep track of multiple simulation steps, ie if a parameter will change. Parameters ---------- sim_object : instance A simulation instance. Returns ------- string The path to the mif file
save_mif(sim_object) Function takes a simulation instance and then saves the mif to file in order to run a simulation, and also += 1 to the N_mifs in order to keep track of multiple simulation steps, ie if a parameter will change. Parameters sim_object : instance A simulation instance. Returns string The path to the mif file
[ "save_mif", "(", "sim_object", ")", "Function", "takes", "a", "simulation", "instance", "and", "then", "saves", "the", "mif", "to", "file", "in", "order", "to", "run", "a", "simulation", "and", "also", "+", "=", "1", "to", "the", "N_mifs", "in", "order", "to", "keep", "track", "of", "multiple", "simulation", "steps", "ie", "if", "a", "parameter", "will", "change", ".", "Parameters", "sim_object", ":", "instance", "A", "simulation", "instance", ".", "Returns", "string", "The", "path", "to", "the", "mif", "file" ]
def _save_mif(sim_object, target): path = sim_object.name + "_" + str(sim_object.t) + "_" + str(target) + ".mif" f = open(path, 'w') f.write(sim_object.mif) return path
[ "def", "_save_mif", "(", "sim_object", ",", "target", ")", ":", "path", "=", "sim_object", ".", "name", "+", "\"_\"", "+", "str", "(", "sim_object", ".", "t", ")", "+", "\"_\"", "+", "str", "(", "target", ")", "+", "\".mif\"", "f", "=", "open", "(", "path", ",", "'w'", ")", "f", ".", "write", "(", "sim_object", ".", "mif", ")", "return", "path" ]
save_mif(sim_object) Function takes a simulation instance and then saves the mif to file in order to run a simulation, and also += 1 to the N_mifs in order to keep track of multiple simulation steps, ie if a parameter will change.
[ "save_mif", "(", "sim_object", ")", "Function", "takes", "a", "simulation", "instance", "and", "then", "saves", "the", "mif", "to", "file", "in", "order", "to", "run", "a", "simulation", "and", "also", "+", "=", "1", "to", "the", "N_mifs", "in", "order", "to", "keep", "track", "of", "multiple", "simulation", "steps", "ie", "if", "a", "parameter", "will", "change", "." ]
[ "\"\"\"\n save_mif(sim_object)\n\n Function takes a simulation instance and then saves the mif to file in\n order to run a simulation, and also += 1 to the N_mifs in order to keep\n track of multiple simulation steps, ie if a parameter will change.\n \n Parameters\n ----------\n sim_object : instance\n A simulation instance.\n \n Returns\n -------\n string\n The path to the mif file\n \"\"\"" ]
[ { "param": "sim_object", "type": null }, { "param": "target", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "sim_object", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "target", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _save_mif(sim_object, target): path = sim_object.name + "_" + str(sim_object.t) + "_" + str(target) + ".mif" f = open(path, 'w') f.write(sim_object.mif) return path
39
157
fdbaead8c186a6baca4ad86b5dbf22f4c5e3e1b2
masenf/metaforward
src/metaforward.py
[ "BSD-2-Clause" ]
Python
shadowed_attributes_from_bases
<not_specific>
def shadowed_attributes_from_bases(mcs, bases, dct=None): """ Collect attributes from a base class and the dct of a class under construction. :param bases: sequence of types :param dct: optional dict of attributes for a class under construction :return: set of attributes which should not be proxied """ shadowed_attributes = set() if dct is not None: shadowed_attributes.update(dct.keys()) for base in bases: shadowed_attributes.update(dir(base)) return shadowed_attributes
Collect attributes from a base class and the dct of a class under construction. :param bases: sequence of types :param dct: optional dict of attributes for a class under construction :return: set of attributes which should not be proxied
Collect attributes from a base class and the dct of a class under construction.
[ "Collect", "attributes", "from", "a", "base", "class", "and", "the", "dct", "of", "a", "class", "under", "construction", "." ]
def shadowed_attributes_from_bases(mcs, bases, dct=None): shadowed_attributes = set() if dct is not None: shadowed_attributes.update(dct.keys()) for base in bases: shadowed_attributes.update(dir(base)) return shadowed_attributes
[ "def", "shadowed_attributes_from_bases", "(", "mcs", ",", "bases", ",", "dct", "=", "None", ")", ":", "shadowed_attributes", "=", "set", "(", ")", "if", "dct", "is", "not", "None", ":", "shadowed_attributes", ".", "update", "(", "dct", ".", "keys", "(", ")", ")", "for", "base", "in", "bases", ":", "shadowed_attributes", ".", "update", "(", "dir", "(", "base", ")", ")", "return", "shadowed_attributes" ]
Collect attributes from a base class and the dct of a class under construction.
[ "Collect", "attributes", "from", "a", "base", "class", "and", "the", "dct", "of", "a", "class", "under", "construction", "." ]
[ "\"\"\"\n Collect attributes from a base class and the dct of a class under construction.\n\n :param bases: sequence of types\n :param dct: optional dict of attributes for a class under construction\n :return: set of attributes which should not be proxied\n \"\"\"" ]
[ { "param": "mcs", "type": null }, { "param": "bases", "type": null }, { "param": "dct", "type": null } ]
{ "returns": [ { "docstring": "set of attributes which should not be proxied", "docstring_tokens": [ "set", "of", "attributes", "which", "should", "not", "be", "proxied" ], "type": null } ], "raises": [], "params": [ { "identifier": "mcs", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "bases", "type": null, "docstring": "sequence of types", "docstring_tokens": [ "sequence", "of", "types" ], "default": null, "is_optional": null }, { "identifier": "dct", "type": null, "docstring": "optional dict of attributes for a class under construction", "docstring_tokens": [ "optional", "dict", "of", "attributes", "for", "a", "class", "under", "construction" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def shadowed_attributes_from_bases(mcs, bases, dct=None): shadowed_attributes = set() if dct is not None: shadowed_attributes.update(dct.keys()) for base in bases: shadowed_attributes.update(dir(base)) return shadowed_attributes
40
867
efb7d56381ffb678d115f43d13a2057ef73b85d9
nuxy/bittrex_autotrader
bittrex_autotrader/__main__.py
[ "MIT" ]
Python
_list_of_dict_filter_by
<not_specific>
def _list_of_dict_filter_by(data, key, value): """ Returns list of dictionary items filtered by key/value. Args: data (dict): Data to filter. key (str): Dictionary key search. value (str): Dictionary key value match. Returns: list """ return [ item for i, item in enumerate(data) if data[i].get(key) == value ]
Returns list of dictionary items filtered by key/value. Args: data (dict): Data to filter. key (str): Dictionary key search. value (str): Dictionary key value match. Returns: list
Returns list of dictionary items filtered by key/value.
[ "Returns", "list", "of", "dictionary", "items", "filtered", "by", "key", "/", "value", "." ]
def _list_of_dict_filter_by(data, key, value): return [ item for i, item in enumerate(data) if data[i].get(key) == value ]
[ "def", "_list_of_dict_filter_by", "(", "data", ",", "key", ",", "value", ")", ":", "return", "[", "item", "for", "i", ",", "item", "in", "enumerate", "(", "data", ")", "if", "data", "[", "i", "]", ".", "get", "(", "key", ")", "==", "value", "]" ]
Returns list of dictionary items filtered by key/value.
[ "Returns", "list", "of", "dictionary", "items", "filtered", "by", "key", "/", "value", "." ]
[ "\"\"\"\n Returns list of dictionary items filtered by key/value.\n\n Args:\n data (dict):\n Data to filter.\n key (str):\n Dictionary key search.\n value (str):\n Dictionary key value match.\n\n Returns:\n list\n \"\"\"" ]
[ { "param": "data", "type": null }, { "param": "key", "type": null }, { "param": "value", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "data", "type": null, "docstring": "Data to filter.", "docstring_tokens": [ "Data", "to", "filter", "." ], "default": null, "is_optional": false }, { "identifier": "key", "type": null, "docstring": "Dictionary key search.", "docstring_tokens": [ "Dictionary", "key", "search", "." ], "default": null, "is_optional": false }, { "identifier": "value", "type": null, "docstring": "Dictionary key value match.", "docstring_tokens": [ "Dictionary", "key", "value", "match", "." ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def _list_of_dict_filter_by(data, key, value): return [ item for i, item in enumerate(data) if data[i].get(key) == value ]
41
411
95e101001497dcd38c9653f10ac6de81199769ab
d-chambers/OpenSarToolkit
ost/helpers/scihub.py
[ "MIT" ]
Python
connect
<not_specific>
def connect(uname=None, pword=None, base_url="https://apihub.copernicus.eu/apihub"): """Generates an opener for the Copernicus apihub/dhus :param uname: username of Copernicus' scihub :type uname: str :param pword: password of Copernicus' scihub :type pword: str :param base_url: :return: an urllib opener instance for Copernicus' scihub :rtype: opener object """ if not uname: print( " If you do not have a Copernicus Scihub user" " account go to: https://scihub.copernicus.eu" ) uname = input(" Your Copernicus Scihub Username:") if not pword: pword = getpass.getpass(" Your Copernicus Scihub Password:") # create opener manager = urllib.request.HTTPPasswordMgrWithDefaultRealm() manager.add_password(None, base_url, uname, pword) handler = urllib.request.HTTPBasicAuthHandler(manager) opener = urllib.request.build_opener(handler) return opener
Generates an opener for the Copernicus apihub/dhus :param uname: username of Copernicus' scihub :type uname: str :param pword: password of Copernicus' scihub :type pword: str :param base_url: :return: an urllib opener instance for Copernicus' scihub :rtype: opener object
Generates an opener for the Copernicus apihub/dhus
[ "Generates", "an", "opener", "for", "the", "Copernicus", "apihub", "/", "dhus" ]
def connect(uname=None, pword=None, base_url="https://apihub.copernicus.eu/apihub"): if not uname: print( " If you do not have a Copernicus Scihub user" " account go to: https://scihub.copernicus.eu" ) uname = input(" Your Copernicus Scihub Username:") if not pword: pword = getpass.getpass(" Your Copernicus Scihub Password:") manager = urllib.request.HTTPPasswordMgrWithDefaultRealm() manager.add_password(None, base_url, uname, pword) handler = urllib.request.HTTPBasicAuthHandler(manager) opener = urllib.request.build_opener(handler) return opener
[ "def", "connect", "(", "uname", "=", "None", ",", "pword", "=", "None", ",", "base_url", "=", "\"https://apihub.copernicus.eu/apihub\"", ")", ":", "if", "not", "uname", ":", "print", "(", "\" If you do not have a Copernicus Scihub user\"", "\" account go to: https://scihub.copernicus.eu\"", ")", "uname", "=", "input", "(", "\" Your Copernicus Scihub Username:\"", ")", "if", "not", "pword", ":", "pword", "=", "getpass", ".", "getpass", "(", "\" Your Copernicus Scihub Password:\"", ")", "manager", "=", "urllib", ".", "request", ".", "HTTPPasswordMgrWithDefaultRealm", "(", ")", "manager", ".", "add_password", "(", "None", ",", "base_url", ",", "uname", ",", "pword", ")", "handler", "=", "urllib", ".", "request", ".", "HTTPBasicAuthHandler", "(", "manager", ")", "opener", "=", "urllib", ".", "request", ".", "build_opener", "(", "handler", ")", "return", "opener" ]
Generates an opener for the Copernicus apihub/dhus
[ "Generates", "an", "opener", "for", "the", "Copernicus", "apihub", "/", "dhus" ]
[ "\"\"\"Generates an opener for the Copernicus apihub/dhus\n\n\n :param uname: username of Copernicus' scihub\n :type uname: str\n :param pword: password of Copernicus' scihub\n :type pword: str\n :param base_url:\n :return: an urllib opener instance for Copernicus' scihub\n :rtype: opener object\n \"\"\"", "# create opener" ]
[ { "param": "uname", "type": null }, { "param": "pword", "type": null }, { "param": "base_url", "type": null } ]
{ "returns": [ { "docstring": "an urllib opener instance for Copernicus' scihub", "docstring_tokens": [ "an", "urllib", "opener", "instance", "for", "Copernicus", "'", "scihub" ], "type": "opener object" } ], "raises": [], "params": [ { "identifier": "uname", "type": null, "docstring": "username of Copernicus' scihub", "docstring_tokens": [ "username", "of", "Copernicus", "'", "scihub" ], "default": null, "is_optional": null }, { "identifier": "pword", "type": null, "docstring": "password of Copernicus' scihub", "docstring_tokens": [ "password", "of", "Copernicus", "'", "scihub" ], "default": null, "is_optional": null }, { "identifier": "base_url", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import getpass import urllib def connect(uname=None, pword=None, base_url="https://apihub.copernicus.eu/apihub"): if not uname: print( " If you do not have a Copernicus Scihub user" " account go to: https://scihub.copernicus.eu" ) uname = input(" Your Copernicus Scihub Username:") if not pword: pword = getpass.getpass(" Your Copernicus Scihub Password:") manager = urllib.request.HTTPPasswordMgrWithDefaultRealm() manager.add_password(None, base_url, uname, pword) handler = urllib.request.HTTPBasicAuthHandler(manager) opener = urllib.request.build_opener(handler) return opener
43
125
32e3f226d9e450744b37a06b1b5e8904828a3571
beli302/Pitches
virtual/lib/python3.6/site-packages/flask_uploads.py
[ "MIT" ]
Python
patch_request_class
<not_specific>
def patch_request_class(app, size=64 * 1024 * 1024): """ By default, Flask will accept uploads to an arbitrary size. While Werkzeug switches uploads from memory to a temporary file when they hit 500 KiB, it's still possible for someone to overload your disk space with a gigantic file. This patches the app's request class's `~werkzeug.BaseRequest.max_content_length` attribute so that any upload larger than the given size is rejected with an HTTP error. .. note:: In Flask 0.6, you can do this by setting the `MAX_CONTENT_LENGTH` setting, without patching the request class. To emulate this behavior, you can pass `None` as the size (you must pass it explicitly). That is the best way to call this function, as it won't break the Flask 0.6 functionality if it exists. .. versionchanged:: 0.1.1 :param app: The app to patch the request class of. :param size: The maximum size to accept, in bytes. The default is 64 MiB. If it is `None`, the app's `MAX_CONTENT_LENGTH` configuration setting will be used to patch. """ if size is None: if isinstance(app.request_class.__dict__['max_content_length'], property): return size = app.config.get('MAX_CONTENT_LENGTH') reqclass = app.request_class patched = type(reqclass.__name__, (reqclass,), {'max_content_length': size}) app.request_class = patched
By default, Flask will accept uploads to an arbitrary size. While Werkzeug switches uploads from memory to a temporary file when they hit 500 KiB, it's still possible for someone to overload your disk space with a gigantic file. This patches the app's request class's `~werkzeug.BaseRequest.max_content_length` attribute so that any upload larger than the given size is rejected with an HTTP error. .. note:: In Flask 0.6, you can do this by setting the `MAX_CONTENT_LENGTH` setting, without patching the request class. To emulate this behavior, you can pass `None` as the size (you must pass it explicitly). That is the best way to call this function, as it won't break the Flask 0.6 functionality if it exists. .. versionchanged:: 0.1.1 :param app: The app to patch the request class of. :param size: The maximum size to accept, in bytes. The default is 64 MiB. If it is `None`, the app's `MAX_CONTENT_LENGTH` configuration setting will be used to patch.
By default, Flask will accept uploads to an arbitrary size. While Werkzeug switches uploads from memory to a temporary file when they hit 500 KiB, it's still possible for someone to overload your disk space with a gigantic file. This patches the app's request class's `~werkzeug.BaseRequest.max_content_length` attribute so that any upload larger than the given size is rejected with an HTTP error. In Flask 0.6, you can do this by setting the `MAX_CONTENT_LENGTH` setting, without patching the request class. To emulate this behavior, you can pass `None` as the size (you must pass it explicitly). That is the best way to call this function, as it won't break the Flask 0.6 functionality if it exists.
[ "By", "default", "Flask", "will", "accept", "uploads", "to", "an", "arbitrary", "size", ".", "While", "Werkzeug", "switches", "uploads", "from", "memory", "to", "a", "temporary", "file", "when", "they", "hit", "500", "KiB", "it", "'", "s", "still", "possible", "for", "someone", "to", "overload", "your", "disk", "space", "with", "a", "gigantic", "file", ".", "This", "patches", "the", "app", "'", "s", "request", "class", "'", "s", "`", "~werkzeug", ".", "BaseRequest", ".", "max_content_length", "`", "attribute", "so", "that", "any", "upload", "larger", "than", "the", "given", "size", "is", "rejected", "with", "an", "HTTP", "error", ".", "In", "Flask", "0", ".", "6", "you", "can", "do", "this", "by", "setting", "the", "`", "MAX_CONTENT_LENGTH", "`", "setting", "without", "patching", "the", "request", "class", ".", "To", "emulate", "this", "behavior", "you", "can", "pass", "`", "None", "`", "as", "the", "size", "(", "you", "must", "pass", "it", "explicitly", ")", ".", "That", "is", "the", "best", "way", "to", "call", "this", "function", "as", "it", "won", "'", "t", "break", "the", "Flask", "0", ".", "6", "functionality", "if", "it", "exists", "." ]
def patch_request_class(app, size=64 * 1024 * 1024): if size is None: if isinstance(app.request_class.__dict__['max_content_length'], property): return size = app.config.get('MAX_CONTENT_LENGTH') reqclass = app.request_class patched = type(reqclass.__name__, (reqclass,), {'max_content_length': size}) app.request_class = patched
[ "def", "patch_request_class", "(", "app", ",", "size", "=", "64", "*", "1024", "*", "1024", ")", ":", "if", "size", "is", "None", ":", "if", "isinstance", "(", "app", ".", "request_class", ".", "__dict__", "[", "'max_content_length'", "]", ",", "property", ")", ":", "return", "size", "=", "app", ".", "config", ".", "get", "(", "'MAX_CONTENT_LENGTH'", ")", "reqclass", "=", "app", ".", "request_class", "patched", "=", "type", "(", "reqclass", ".", "__name__", ",", "(", "reqclass", ",", ")", ",", "{", "'max_content_length'", ":", "size", "}", ")", "app", ".", "request_class", "=", "patched" ]
By default, Flask will accept uploads to an arbitrary size.
[ "By", "default", "Flask", "will", "accept", "uploads", "to", "an", "arbitrary", "size", "." ]
[ "\"\"\"\n By default, Flask will accept uploads to an arbitrary size. While Werkzeug\n switches uploads from memory to a temporary file when they hit 500 KiB,\n it's still possible for someone to overload your disk space with a\n gigantic file.\n\n This patches the app's request class's\n `~werkzeug.BaseRequest.max_content_length` attribute so that any upload\n larger than the given size is rejected with an HTTP error.\n\n .. note::\n\n In Flask 0.6, you can do this by setting the `MAX_CONTENT_LENGTH`\n setting, without patching the request class. To emulate this behavior,\n you can pass `None` as the size (you must pass it explicitly). That is\n the best way to call this function, as it won't break the Flask 0.6\n functionality if it exists.\n\n .. versionchanged:: 0.1.1\n\n :param app: The app to patch the request class of.\n :param size: The maximum size to accept, in bytes. The default is 64 MiB.\n If it is `None`, the app's `MAX_CONTENT_LENGTH` configuration\n setting will be used to patch.\n \"\"\"" ]
[ { "param": "app", "type": null }, { "param": "size", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "app", "type": null, "docstring": "The app to patch the request class of.", "docstring_tokens": [ "The", "app", "to", "patch", "the", "request", "class", "of", "." ], "default": null, "is_optional": null }, { "identifier": "size", "type": null, "docstring": "The maximum size to accept, in bytes. The default is 64 MiB.\nIf it is `None`, the app's `MAX_CONTENT_LENGTH` configuration\nsetting will be used to patch.", "docstring_tokens": [ "The", "maximum", "size", "to", "accept", "in", "bytes", ".", "The", "default", "is", "64", "MiB", ".", "If", "it", "is", "`", "None", "`", "the", "app", "'", "s", "`", "MAX_CONTENT_LENGTH", "`", "configuration", "setting", "will", "be", "used", "to", "patch", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def patch_request_class(app, size=64 * 1024 * 1024): if size is None: if isinstance(app.request_class.__dict__['max_content_length'], property): return size = app.config.get('MAX_CONTENT_LENGTH') reqclass = app.request_class patched = type(reqclass.__name__, (reqclass,), {'max_content_length': size}) app.request_class = patched
44
142
303426d5ee7004b9dd1a7767a2408579d6fb2033
neptune-ml/sacred
sacred/commandline_options.py
[ "MIT" ]
Python
apply
null
def apply(cls, args, run): """Add priority info for this run.""" try: priority = float(args) except ValueError: raise ValueError( "The PRIORITY argument must be a number! " "(but was '{}')".format(args) ) run.meta_info["priority"] = priority
Add priority info for this run.
Add priority info for this run.
[ "Add", "priority", "info", "for", "this", "run", "." ]
def apply(cls, args, run): try: priority = float(args) except ValueError: raise ValueError( "The PRIORITY argument must be a number! " "(but was '{}')".format(args) ) run.meta_info["priority"] = priority
[ "def", "apply", "(", "cls", ",", "args", ",", "run", ")", ":", "try", ":", "priority", "=", "float", "(", "args", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"The PRIORITY argument must be a number! \"", "\"(but was '{}')\"", ".", "format", "(", "args", ")", ")", "run", ".", "meta_info", "[", "\"priority\"", "]", "=", "priority" ]
Add priority info for this run.
[ "Add", "priority", "info", "for", "this", "run", "." ]
[ "\"\"\"Add priority info for this run.\"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "args", "type": null }, { "param": "run", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "args", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "run", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def apply(cls, args, run): try: priority = float(args) except ValueError: raise ValueError( "The PRIORITY argument must be a number! " "(but was '{}')".format(args) ) run.meta_info["priority"] = priority
45
194
866a30195e6c69b24064d40dcd0432c7464bdea9
tvhahn/Tool-Wear-Files
load_data.py
[ "MIT" ]
Python
tool_no_apply
<not_specific>
def tool_no_apply(cols): """Gets the tool number from the PMC signal Explanation =========== Same explanation as in the cut_signal_apply function """ pmc = cols[0] if (pmc - 64) > 0: return int(pmc - 64) else: return int(pmc)
Gets the tool number from the PMC signal Explanation =========== Same explanation as in the cut_signal_apply function
Gets the tool number from the PMC signal Explanation Same explanation as in the cut_signal_apply function
[ "Gets", "the", "tool", "number", "from", "the", "PMC", "signal", "Explanation", "Same", "explanation", "as", "in", "the", "cut_signal_apply", "function" ]
def tool_no_apply(cols): pmc = cols[0] if (pmc - 64) > 0: return int(pmc - 64) else: return int(pmc)
[ "def", "tool_no_apply", "(", "cols", ")", ":", "pmc", "=", "cols", "[", "0", "]", "if", "(", "pmc", "-", "64", ")", ">", "0", ":", "return", "int", "(", "pmc", "-", "64", ")", "else", ":", "return", "int", "(", "pmc", ")" ]
Gets the tool number from the PMC signal Explanation
[ "Gets", "the", "tool", "number", "from", "the", "PMC", "signal", "Explanation" ]
[ "\"\"\"Gets the tool number from the PMC signal\n \n Explanation\n ===========\n Same explanation as in the cut_signal_apply function\n\n \"\"\"" ]
[ { "param": "cols", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cols", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def tool_no_apply(cols): pmc = cols[0] if (pmc - 64) > 0: return int(pmc - 64) else: return int(pmc)
46
531
323c8a621d742c6892970c147bed4e6c27b1d7f0
pypyjs/pypy
pypy/module/cpyext/import_.py
[ "Apache-2.0", "OpenSSL" ]
Python
PyImport_Import
<not_specific>
def PyImport_Import(space, w_name): """ This is a higher-level interface that calls the current "import hook function". It invokes the __import__() function from the __builtins__ of the current globals. This means that the import is done using whatever import hooks are installed in the current environment, e.g. by rexec or ihooks. Always uses absolute imports.""" caller = space.getexecutioncontext().gettopframe_nohidden() # Get the builtins from current globals if caller is not None: w_globals = caller.w_globals w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) else: # No globals -- use standard builtins, and fake globals w_builtin = space.getbuiltinmodule('__builtin__') w_globals = space.newdict() space.setitem(w_globals, space.wrap("__builtins__"), w_builtin) # Get the __import__ function from the builtins if space.isinstance_w(w_builtin, space.w_dict): w_import = space.getitem(w_builtin, space.wrap("__import__")) else: w_import = space.getattr(w_builtin, space.wrap("__import__")) # Call the __import__ function with the proper argument list # Always use absolute import here. return space.call_function(w_import, w_name, w_globals, w_globals, space.newlist([space.wrap("__doc__")]))
This is a higher-level interface that calls the current "import hook function". It invokes the __import__() function from the __builtins__ of the current globals. This means that the import is done using whatever import hooks are installed in the current environment, e.g. by rexec or ihooks. Always uses absolute imports.
This is a higher-level interface that calls the current "import hook function". It invokes the __import__() function from the __builtins__ of the current globals. This means that the import is done using whatever import hooks are installed in the current environment, e.g. by rexec or ihooks. Always uses absolute imports.
[ "This", "is", "a", "higher", "-", "level", "interface", "that", "calls", "the", "current", "\"", "import", "hook", "function", "\"", ".", "It", "invokes", "the", "__import__", "()", "function", "from", "the", "__builtins__", "of", "the", "current", "globals", ".", "This", "means", "that", "the", "import", "is", "done", "using", "whatever", "import", "hooks", "are", "installed", "in", "the", "current", "environment", "e", ".", "g", ".", "by", "rexec", "or", "ihooks", ".", "Always", "uses", "absolute", "imports", "." ]
def PyImport_Import(space, w_name): caller = space.getexecutioncontext().gettopframe_nohidden() if caller is not None: w_globals = caller.w_globals w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) else: w_builtin = space.getbuiltinmodule('__builtin__') w_globals = space.newdict() space.setitem(w_globals, space.wrap("__builtins__"), w_builtin) if space.isinstance_w(w_builtin, space.w_dict): w_import = space.getitem(w_builtin, space.wrap("__import__")) else: w_import = space.getattr(w_builtin, space.wrap("__import__")) return space.call_function(w_import, w_name, w_globals, w_globals, space.newlist([space.wrap("__doc__")]))
[ "def", "PyImport_Import", "(", "space", ",", "w_name", ")", ":", "caller", "=", "space", ".", "getexecutioncontext", "(", ")", ".", "gettopframe_nohidden", "(", ")", "if", "caller", "is", "not", "None", ":", "w_globals", "=", "caller", ".", "w_globals", "w_builtin", "=", "space", ".", "getitem", "(", "w_globals", ",", "space", ".", "wrap", "(", "'__builtins__'", ")", ")", "else", ":", "w_builtin", "=", "space", ".", "getbuiltinmodule", "(", "'__builtin__'", ")", "w_globals", "=", "space", ".", "newdict", "(", ")", "space", ".", "setitem", "(", "w_globals", ",", "space", ".", "wrap", "(", "\"__builtins__\"", ")", ",", "w_builtin", ")", "if", "space", ".", "isinstance_w", "(", "w_builtin", ",", "space", ".", "w_dict", ")", ":", "w_import", "=", "space", ".", "getitem", "(", "w_builtin", ",", "space", ".", "wrap", "(", "\"__import__\"", ")", ")", "else", ":", "w_import", "=", "space", ".", "getattr", "(", "w_builtin", ",", "space", ".", "wrap", "(", "\"__import__\"", ")", ")", "return", "space", ".", "call_function", "(", "w_import", ",", "w_name", ",", "w_globals", ",", "w_globals", ",", "space", ".", "newlist", "(", "[", "space", ".", "wrap", "(", "\"__doc__\"", ")", "]", ")", ")" ]
This is a higher-level interface that calls the current "import hook function".
[ "This", "is", "a", "higher", "-", "level", "interface", "that", "calls", "the", "current", "\"", "import", "hook", "function", "\"", "." ]
[ "\"\"\"\n This is a higher-level interface that calls the current \"import hook function\".\n It invokes the __import__() function from the __builtins__ of the\n current globals. This means that the import is done using whatever import hooks\n are installed in the current environment, e.g. by rexec or ihooks.\n\n Always uses absolute imports.\"\"\"", "# Get the builtins from current globals", "# No globals -- use standard builtins, and fake globals", "# Get the __import__ function from the builtins", "# Call the __import__ function with the proper argument list", "# Always use absolute import here." ]
[ { "param": "space", "type": null }, { "param": "w_name", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "space", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "w_name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def PyImport_Import(space, w_name): caller = space.getexecutioncontext().gettopframe_nohidden() if caller is not None: w_globals = caller.w_globals w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) else: w_builtin = space.getbuiltinmodule('__builtin__') w_globals = space.newdict() space.setitem(w_globals, space.wrap("__builtins__"), w_builtin) if space.isinstance_w(w_builtin, space.w_dict): w_import = space.getitem(w_builtin, space.wrap("__import__")) else: w_import = space.getattr(w_builtin, space.wrap("__import__")) return space.call_function(w_import, w_name, w_globals, w_globals, space.newlist([space.wrap("__doc__")]))
47
54
f134b7410f030c009eefe67e740f832d9cbaa2f8
MirkoLedda/polyoligo
src/polyoligo/lib_utils.py
[ "BSD-2-Clause" ]
Python
seconds_to_hms
<not_specific>
def seconds_to_hms(t): """Formats a datetime.timedelta object to a HH:MM:SS string.""" hours, remainder = divmod(t.total_seconds(), 3600) minutes, seconds = divmod(remainder, 60) hours, minutes, seconds = int(hours), int(minutes), int(seconds) if hours < 10: hours = "0%s" % int(hours) if minutes < 10: minutes = "0%s" % minutes if seconds < 10: seconds = "0%s" % seconds return "%s:%s:%s" % (hours, minutes, seconds)
Formats a datetime.timedelta object to a HH:MM:SS string.
Formats a datetime.timedelta object to a HH:MM:SS string.
[ "Formats", "a", "datetime", ".", "timedelta", "object", "to", "a", "HH", ":", "MM", ":", "SS", "string", "." ]
def seconds_to_hms(t): hours, remainder = divmod(t.total_seconds(), 3600) minutes, seconds = divmod(remainder, 60) hours, minutes, seconds = int(hours), int(minutes), int(seconds) if hours < 10: hours = "0%s" % int(hours) if minutes < 10: minutes = "0%s" % minutes if seconds < 10: seconds = "0%s" % seconds return "%s:%s:%s" % (hours, minutes, seconds)
[ "def", "seconds_to_hms", "(", "t", ")", ":", "hours", ",", "remainder", "=", "divmod", "(", "t", ".", "total_seconds", "(", ")", ",", "3600", ")", "minutes", ",", "seconds", "=", "divmod", "(", "remainder", ",", "60", ")", "hours", ",", "minutes", ",", "seconds", "=", "int", "(", "hours", ")", ",", "int", "(", "minutes", ")", ",", "int", "(", "seconds", ")", "if", "hours", "<", "10", ":", "hours", "=", "\"0%s\"", "%", "int", "(", "hours", ")", "if", "minutes", "<", "10", ":", "minutes", "=", "\"0%s\"", "%", "minutes", "if", "seconds", "<", "10", ":", "seconds", "=", "\"0%s\"", "%", "seconds", "return", "\"%s:%s:%s\"", "%", "(", "hours", ",", "minutes", ",", "seconds", ")" ]
Formats a datetime.timedelta object to a HH:MM:SS string.
[ "Formats", "a", "datetime", ".", "timedelta", "object", "to", "a", "HH", ":", "MM", ":", "SS", "string", "." ]
[ "\"\"\"Formats a datetime.timedelta object to a HH:MM:SS string.\"\"\"" ]
[ { "param": "t", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "t", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def seconds_to_hms(t): hours, remainder = divmod(t.total_seconds(), 3600) minutes, seconds = divmod(remainder, 60) hours, minutes, seconds = int(hours), int(minutes), int(seconds) if hours < 10: hours = "0%s" % int(hours) if minutes < 10: minutes = "0%s" % minutes if seconds < 10: seconds = "0%s" % seconds return "%s:%s:%s" % (hours, minutes, seconds)
48
814
d37711957c262fbe690050937109571e502cf3e6
Pandaaaa906/scrapy-playwright
scrapy_playwright/headers.py
[ "BSD-3-Clause" ]
Python
use_playwright_headers
dict
async def use_playwright_headers( browser_type: str, playwright_request: PlaywrightRequest, scrapy_headers: Headers, ) -> dict: """Return headers from the Playwright request, unaltered""" return playwright_request.headers
Return headers from the Playwright request, unaltered
Return headers from the Playwright request, unaltered
[ "Return", "headers", "from", "the", "Playwright", "request", "unaltered" ]
async def use_playwright_headers( browser_type: str, playwright_request: PlaywrightRequest, scrapy_headers: Headers, ) -> dict: return playwright_request.headers
[ "async", "def", "use_playwright_headers", "(", "browser_type", ":", "str", ",", "playwright_request", ":", "PlaywrightRequest", ",", "scrapy_headers", ":", "Headers", ",", ")", "->", "dict", ":", "return", "playwright_request", ".", "headers" ]
Return headers from the Playwright request, unaltered
[ "Return", "headers", "from", "the", "Playwright", "request", "unaltered" ]
[ "\"\"\"Return headers from the Playwright request, unaltered\"\"\"" ]
[ { "param": "browser_type", "type": "str" }, { "param": "playwright_request", "type": "PlaywrightRequest" }, { "param": "scrapy_headers", "type": "Headers" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "browser_type", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "playwright_request", "type": "PlaywrightRequest", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "scrapy_headers", "type": "Headers", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
async def use_playwright_headers( browser_type: str, playwright_request: PlaywrightRequest, scrapy_headers: Headers, ) -> dict: return playwright_request.headers
49
759
35a98f36b10113ace2ee5dac283ea5b6e9f91f5e
threefoldtech/jumpscaleX_core
install/threesdk/shell.py
[ "Apache-2.0" ]
Python
eval_code
<not_specific>
def eval_code(stmts, locals_=None, globals_=None): """ a helper function to ignore incomplete syntax erros when evaluating code while typing incomplete lines, e.g.: j.clien... """ if not stmts: return try: code = compile(stmts, filename=__name__, mode="eval") except SyntaxError: return try: return eval(code, globals_, locals_) except: return
a helper function to ignore incomplete syntax erros when evaluating code while typing incomplete lines, e.g.: j.clien...
a helper function to ignore incomplete syntax erros when evaluating code while typing incomplete lines, e.g.: j.clien
[ "a", "helper", "function", "to", "ignore", "incomplete", "syntax", "erros", "when", "evaluating", "code", "while", "typing", "incomplete", "lines", "e", ".", "g", ".", ":", "j", ".", "clien" ]
def eval_code(stmts, locals_=None, globals_=None): if not stmts: return try: code = compile(stmts, filename=__name__, mode="eval") except SyntaxError: return try: return eval(code, globals_, locals_) except: return
[ "def", "eval_code", "(", "stmts", ",", "locals_", "=", "None", ",", "globals_", "=", "None", ")", ":", "if", "not", "stmts", ":", "return", "try", ":", "code", "=", "compile", "(", "stmts", ",", "filename", "=", "__name__", ",", "mode", "=", "\"eval\"", ")", "except", "SyntaxError", ":", "return", "try", ":", "return", "eval", "(", "code", ",", "globals_", ",", "locals_", ")", "except", ":", "return" ]
a helper function to ignore incomplete syntax erros when evaluating code while typing incomplete lines, e.g.
[ "a", "helper", "function", "to", "ignore", "incomplete", "syntax", "erros", "when", "evaluating", "code", "while", "typing", "incomplete", "lines", "e", ".", "g", "." ]
[ "\"\"\"\n a helper function to ignore incomplete syntax erros when evaluating code\n while typing incomplete lines, e.g.: j.clien...\n \"\"\"" ]
[ { "param": "stmts", "type": null }, { "param": "locals_", "type": null }, { "param": "globals_", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "stmts", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "locals_", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "globals_", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def eval_code(stmts, locals_=None, globals_=None): if not stmts: return try: code = compile(stmts, filename=__name__, mode="eval") except SyntaxError: return try: return eval(code, globals_, locals_) except: return
51
545
3d22d48525dfc4451cbff275e76eaf0f861d2502
Allthemighty/SataniaBot
src/modules/misc/misc_util.py
[ "MIT" ]
Python
simple_check
<not_specific>
def simple_check(author, channel): """ A predicate used in the wait_for() function, to ensure the user input can only come from the user who activated the command, and in the same channel. :param author: Author object :param channel: Channel object :return: Check function """ def check(message): return message.author == author and message.channel == channel return check
A predicate used in the wait_for() function, to ensure the user input can only come from the user who activated the command, and in the same channel. :param author: Author object :param channel: Channel object :return: Check function
A predicate used in the wait_for() function, to ensure the user input can only come from the user who activated the command, and in the same channel.
[ "A", "predicate", "used", "in", "the", "wait_for", "()", "function", "to", "ensure", "the", "user", "input", "can", "only", "come", "from", "the", "user", "who", "activated", "the", "command", "and", "in", "the", "same", "channel", "." ]
def simple_check(author, channel): def check(message): return message.author == author and message.channel == channel return check
[ "def", "simple_check", "(", "author", ",", "channel", ")", ":", "def", "check", "(", "message", ")", ":", "return", "message", ".", "author", "==", "author", "and", "message", ".", "channel", "==", "channel", "return", "check" ]
A predicate used in the wait_for() function, to ensure the user input can only come from the user who activated the command, and in the same channel.
[ "A", "predicate", "used", "in", "the", "wait_for", "()", "function", "to", "ensure", "the", "user", "input", "can", "only", "come", "from", "the", "user", "who", "activated", "the", "command", "and", "in", "the", "same", "channel", "." ]
[ "\"\"\"\n A predicate used in the wait_for() function, to ensure the user input can only come\n from the user who activated the command, and in the same channel.\n :param author: Author object\n :param channel: Channel object\n :return: Check function\n \"\"\"" ]
[ { "param": "author", "type": null }, { "param": "channel", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "author", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "channel", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def simple_check(author, channel): def check(message): return message.author == author and message.channel == channel return check
52
443
ecd832b296a9381f4398c16dcef906a078177dd4
cmu-sei/usersim
tasks/frequency.py
[ "BSL-1.0" ]
Python
parameters
<not_specific>
def parameters(cls): """ Returns a dictionary with human-readable descriptions of required arguments for the Frequency task. Returns: dict of dicts: Configuration dictionary with the keys 'required' and 'optional', where values are dicts containing the required and optional parameters and their descriptions for the Frequency task, respectively. """ params = {'required': {'task': 'task| the configuration of another task', 'frequency': 'number| positive decimal number - avg number of triggers per hour', 'repetitions': 'int| non-negative integer - 0 for unlimited'}, 'optional': {}} return params
Returns a dictionary with human-readable descriptions of required arguments for the Frequency task. Returns: dict of dicts: Configuration dictionary with the keys 'required' and 'optional', where values are dicts containing the required and optional parameters and their descriptions for the Frequency task, respectively.
Returns a dictionary with human-readable descriptions of required arguments for the Frequency task.
[ "Returns", "a", "dictionary", "with", "human", "-", "readable", "descriptions", "of", "required", "arguments", "for", "the", "Frequency", "task", "." ]
def parameters(cls): params = {'required': {'task': 'task| the configuration of another task', 'frequency': 'number| positive decimal number - avg number of triggers per hour', 'repetitions': 'int| non-negative integer - 0 for unlimited'}, 'optional': {}} return params
[ "def", "parameters", "(", "cls", ")", ":", "params", "=", "{", "'required'", ":", "{", "'task'", ":", "'task| the configuration of another task'", ",", "'frequency'", ":", "'number| positive decimal number - avg number of triggers per hour'", ",", "'repetitions'", ":", "'int| non-negative integer - 0 for unlimited'", "}", ",", "'optional'", ":", "{", "}", "}", "return", "params" ]
Returns a dictionary with human-readable descriptions of required arguments for the Frequency task.
[ "Returns", "a", "dictionary", "with", "human", "-", "readable", "descriptions", "of", "required", "arguments", "for", "the", "Frequency", "task", "." ]
[ "\"\"\" Returns a dictionary with human-readable descriptions of required arguments for the Frequency task.\n\n Returns:\n dict of dicts: Configuration dictionary with the keys 'required' and 'optional', where values are dicts\n containing the required and optional parameters and their descriptions for the Frequency task,\n respectively.\n \"\"\"" ]
[ { "param": "cls", "type": null } ]
{ "returns": [ { "docstring": "dict of dicts: Configuration dictionary with the keys 'required' and 'optional', where values are dicts\ncontaining the required and optional parameters and their descriptions for the Frequency task,\nrespectively.", "docstring_tokens": [ "dict", "of", "dicts", ":", "Configuration", "dictionary", "with", "the", "keys", "'", "required", "'", "and", "'", "optional", "'", "where", "values", "are", "dicts", "containing", "the", "required", "and", "optional", "parameters", "and", "their", "descriptions", "for", "the", "Frequency", "task", "respectively", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def parameters(cls): params = {'required': {'task': 'task| the configuration of another task', 'frequency': 'number| positive decimal number - avg number of triggers per hour', 'repetitions': 'int| non-negative integer - 0 for unlimited'}, 'optional': {}} return params
53
85
3a0d16ef8d51a02e0c9568006f90794aa64c9e5c
russhughes/TurtlePlotBot
lib/oledui.py
[ "MIT" ]
Python
_screen_shot
null
def _screen_shot(uio, *_): """ _screen_shot Append a text screen shot of the OLED display in the screenshot.txt file. This can be triggered by a call to the routine or by long pressing a button on the Pin 0 any time the UI is waiting for a button press. Each screen shot starts with a line consisting of the word "BEGIN". Each row of the display is represented as a line of '.' and 'X' characters where a dark pixel is represented by a '.' and each light pixel is represented by a 'X'. The screenshot ends with a line consisting of the word "END". The screenshot.txt file can contain multiple screenshots. """ print("Writing screenshot... ") with open('screenshots.txt', 'a') as output: print('BEGIN', file=output) for row in range(uio.display.height): for col in range(uio.display.width): if uio.display.pixel(col, row): print('X', sep="", end="", file=output) else: print(".", sep="", end="", file=output) print("", file=output) print("END", file=output) print("done.")
_screen_shot Append a text screen shot of the OLED display in the screenshot.txt file. This can be triggered by a call to the routine or by long pressing a button on the Pin 0 any time the UI is waiting for a button press. Each screen shot starts with a line consisting of the word "BEGIN". Each row of the display is represented as a line of '.' and 'X' characters where a dark pixel is represented by a '.' and each light pixel is represented by a 'X'. The screenshot ends with a line consisting of the word "END". The screenshot.txt file can contain multiple screenshots.
_screen_shot Append a text screen shot of the OLED display in the screenshot.txt file. This can be triggered by a call to the routine or by long pressing a button on the Pin 0 any time the UI is waiting for a button press. Each screen shot starts with a line consisting of the word "BEGIN".
[ "_screen_shot", "Append", "a", "text", "screen", "shot", "of", "the", "OLED", "display", "in", "the", "screenshot", ".", "txt", "file", ".", "This", "can", "be", "triggered", "by", "a", "call", "to", "the", "routine", "or", "by", "long", "pressing", "a", "button", "on", "the", "Pin", "0", "any", "time", "the", "UI", "is", "waiting", "for", "a", "button", "press", ".", "Each", "screen", "shot", "starts", "with", "a", "line", "consisting", "of", "the", "word", "\"", "BEGIN", "\"", "." ]
def _screen_shot(uio, *_): print("Writing screenshot... ") with open('screenshots.txt', 'a') as output: print('BEGIN', file=output) for row in range(uio.display.height): for col in range(uio.display.width): if uio.display.pixel(col, row): print('X', sep="", end="", file=output) else: print(".", sep="", end="", file=output) print("", file=output) print("END", file=output) print("done.")
[ "def", "_screen_shot", "(", "uio", ",", "*", "_", ")", ":", "print", "(", "\"Writing screenshot... \"", ")", "with", "open", "(", "'screenshots.txt'", ",", "'a'", ")", "as", "output", ":", "print", "(", "'BEGIN'", ",", "file", "=", "output", ")", "for", "row", "in", "range", "(", "uio", ".", "display", ".", "height", ")", ":", "for", "col", "in", "range", "(", "uio", ".", "display", ".", "width", ")", ":", "if", "uio", ".", "display", ".", "pixel", "(", "col", ",", "row", ")", ":", "print", "(", "'X'", ",", "sep", "=", "\"\"", ",", "end", "=", "\"\"", ",", "file", "=", "output", ")", "else", ":", "print", "(", "\".\"", ",", "sep", "=", "\"\"", ",", "end", "=", "\"\"", ",", "file", "=", "output", ")", "print", "(", "\"\"", ",", "file", "=", "output", ")", "print", "(", "\"END\"", ",", "file", "=", "output", ")", "print", "(", "\"done.\"", ")" ]
_screen_shot Append a text screen shot of the OLED display in the screenshot.txt file.
[ "_screen_shot", "Append", "a", "text", "screen", "shot", "of", "the", "OLED", "display", "in", "the", "screenshot", ".", "txt", "file", "." ]
[ "\"\"\"\n _screen_shot\n\n Append a text screen shot of the OLED display in the screenshot.txt\n file. This can be triggered by a call to the routine or by long\n pressing a button on the Pin 0 any time the UI is waiting for a\n button press.\n\n Each screen shot starts with a line consisting of the word \"BEGIN\".\n Each row of the display is represented as a line of '.' and 'X'\n characters where a dark pixel is represented by a '.' and each light\n pixel is represented by a 'X'. The screenshot ends with a line\n consisting of the word \"END\". The screenshot.txt file can contain\n multiple screenshots.\n \"\"\"" ]
[ { "param": "uio", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "uio", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _screen_shot(uio, *_): print("Writing screenshot... ") with open('screenshots.txt', 'a') as output: print('BEGIN', file=output) for row in range(uio.display.height): for col in range(uio.display.width): if uio.display.pixel(col, row): print('X', sep="", end="", file=output) else: print(".", sep="", end="", file=output) print("", file=output) print("END", file=output) print("done.")
54
918
2e79e2b725ac9e7e5c08e4e2dcbc552df0a4a442
ooici/coi-services
ion/agents/platform/test/helper.py
[ "BSD-2-Clause" ]
Python
using_actual_rsn_oms_endpoint
<not_specific>
def using_actual_rsn_oms_endpoint(cls): """ Determines whether we are testing against the actual RSN OMS endpoint. This is based on looking up the "USING_ACTUAL_RSN_OMS_ENDPOINT" environment variable, which normally will only be defined as convenient while doing local tests. See OOIION-1352. """ return "yes" == os.getenv('USING_ACTUAL_RSN_OMS_ENDPOINT')
Determines whether we are testing against the actual RSN OMS endpoint. This is based on looking up the "USING_ACTUAL_RSN_OMS_ENDPOINT" environment variable, which normally will only be defined as convenient while doing local tests. See OOIION-1352.
Determines whether we are testing against the actual RSN OMS endpoint. This is based on looking up the "USING_ACTUAL_RSN_OMS_ENDPOINT" environment variable, which normally will only be defined as convenient while doing local tests.
[ "Determines", "whether", "we", "are", "testing", "against", "the", "actual", "RSN", "OMS", "endpoint", ".", "This", "is", "based", "on", "looking", "up", "the", "\"", "USING_ACTUAL_RSN_OMS_ENDPOINT", "\"", "environment", "variable", "which", "normally", "will", "only", "be", "defined", "as", "convenient", "while", "doing", "local", "tests", "." ]
def using_actual_rsn_oms_endpoint(cls): return "yes" == os.getenv('USING_ACTUAL_RSN_OMS_ENDPOINT')
[ "def", "using_actual_rsn_oms_endpoint", "(", "cls", ")", ":", "return", "\"yes\"", "==", "os", ".", "getenv", "(", "'USING_ACTUAL_RSN_OMS_ENDPOINT'", ")" ]
Determines whether we are testing against the actual RSN OMS endpoint.
[ "Determines", "whether", "we", "are", "testing", "against", "the", "actual", "RSN", "OMS", "endpoint", "." ]
[ "\"\"\"\n Determines whether we are testing against the actual RSN OMS endpoint.\n This is based on looking up the \"USING_ACTUAL_RSN_OMS_ENDPOINT\"\n environment variable, which normally will only be defined as\n convenient while doing local tests. See OOIION-1352.\n \"\"\"" ]
[ { "param": "cls", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def using_actual_rsn_oms_endpoint(cls): return "yes" == os.getenv('USING_ACTUAL_RSN_OMS_ENDPOINT')
55
657
cc179d17e4e35730d499aba1ba8c9c604fca0dad
crahal/lit_reviewer
src/pubmed_functions.py
[ "MIT" ]
Python
return_pag
<not_specific>
def return_pag(paper): """ Return the pagination from the paper nest""" try: return paper['MedlineCitation']['Article']['Pagination']['MedlinePgn'] except KeyError: return None
Return the pagination from the paper nest
Return the pagination from the paper nest
[ "Return", "the", "pagination", "from", "the", "paper", "nest" ]
def return_pag(paper): try: return paper['MedlineCitation']['Article']['Pagination']['MedlinePgn'] except KeyError: return None
[ "def", "return_pag", "(", "paper", ")", ":", "try", ":", "return", "paper", "[", "'MedlineCitation'", "]", "[", "'Article'", "]", "[", "'Pagination'", "]", "[", "'MedlinePgn'", "]", "except", "KeyError", ":", "return", "None" ]
Return the pagination from the paper nest
[ "Return", "the", "pagination", "from", "the", "paper", "nest" ]
[ "\"\"\" Return the pagination from the paper nest\"\"\"" ]
[ { "param": "paper", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "paper", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def return_pag(paper): try: return paper['MedlineCitation']['Article']['Pagination']['MedlinePgn'] except KeyError: return None
56
359
2e17d39b74ad622e3ac0ae25d5f59143463bfa85
rafaelcn/cryptography
aes/src/aes/common.py
[ "MIT" ]
Python
add_round_key
null
def add_round_key(state, key): """ Add the given key to the state using XOR. """ for i in range(4): for j in range(4): state[i][j] ^= key[i][j]
Add the given key to the state using XOR.
Add the given key to the state using XOR.
[ "Add", "the", "given", "key", "to", "the", "state", "using", "XOR", "." ]
def add_round_key(state, key): for i in range(4): for j in range(4): state[i][j] ^= key[i][j]
[ "def", "add_round_key", "(", "state", ",", "key", ")", ":", "for", "i", "in", "range", "(", "4", ")", ":", "for", "j", "in", "range", "(", "4", ")", ":", "state", "[", "i", "]", "[", "j", "]", "^=", "key", "[", "i", "]", "[", "j", "]" ]
Add the given key to the state using XOR.
[ "Add", "the", "given", "key", "to", "the", "state", "using", "XOR", "." ]
[ "\"\"\"\n Add the given key to the state using XOR.\n \"\"\"" ]
[ { "param": "state", "type": null }, { "param": "key", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "state", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "key", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def add_round_key(state, key): for i in range(4): for j in range(4): state[i][j] ^= key[i][j]
57
727
e18bd85473e5921ae439f6af3476d616402d6ba0
waikato-datamining/video-frame-processor
src/vfp/_processor.py
[ "MIT" ]
Python
decode_fourcc
<not_specific>
def decode_fourcc(cc): """ Turns the float into a four letter codec string. Taken from here: https://stackoverflow.com/a/49138893/4698227 :param cc: the codec as float :type cc: float :return: the codec string :rtype: str """ return "".join([chr((int(cc) >> 8 * i) & 0xFF) for i in range(4)])
Turns the float into a four letter codec string. Taken from here: https://stackoverflow.com/a/49138893/4698227 :param cc: the codec as float :type cc: float :return: the codec string :rtype: str
Turns the float into a four letter codec string.
[ "Turns", "the", "float", "into", "a", "four", "letter", "codec", "string", "." ]
def decode_fourcc(cc): return "".join([chr((int(cc) >> 8 * i) & 0xFF) for i in range(4)])
[ "def", "decode_fourcc", "(", "cc", ")", ":", "return", "\"\"", ".", "join", "(", "[", "chr", "(", "(", "int", "(", "cc", ")", ">>", "8", "*", "i", ")", "&", "0xFF", ")", "for", "i", "in", "range", "(", "4", ")", "]", ")" ]
Turns the float into a four letter codec string.
[ "Turns", "the", "float", "into", "a", "four", "letter", "codec", "string", "." ]
[ "\"\"\"\n Turns the float into a four letter codec string.\n Taken from here:\n https://stackoverflow.com/a/49138893/4698227\n :param cc: the codec as float\n :type cc: float\n :return: the codec string\n :rtype: str\n \"\"\"" ]
[ { "param": "cc", "type": null } ]
{ "returns": [ { "docstring": "the codec string", "docstring_tokens": [ "the", "codec", "string" ], "type": "str" } ], "raises": [], "params": [ { "identifier": "cc", "type": null, "docstring": "the codec as float", "docstring_tokens": [ "the", "codec", "as", "float" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def decode_fourcc(cc): return "".join([chr((int(cc) >> 8 * i) & 0xFF) for i in range(4)])
58
316
3378601f401cc65b610da88dac290768b028faa7
mwhit74/sau
sau/sau.py
[ "MIT" ]
Python
ac1_mx
<not_specific>
def ac1_mx(w, l, x): """Moment at x - Beam simply supported - Uniformly dist. loads Calculates the moment in the beam at any location, x, along the beam due to a uniformly distributed load. m = w*x/2*(l-x) Args: w (float): uniformly distributed load l (float): length of beam between supports E (float): modulus of elasticity I (float): section modulus x (float): distance along beam from left support Returns: m (tuple(float, str)): maximum positive moment at midspan Notes: 1. Consistent units are the responsibility of the user. 3. For maximum positive moment use x = L/2. """ m = w*x/2.*(l-x) text = (f'm = w*x/2*(l-x) \n' + f'm = {w:.2f}*{x:.2f}/2*({l:.2f}-{x:.2f}) \n' + f'm = {m:.2f}') return m, text
Moment at x - Beam simply supported - Uniformly dist. loads Calculates the moment in the beam at any location, x, along the beam due to a uniformly distributed load. m = w*x/2*(l-x) Args: w (float): uniformly distributed load l (float): length of beam between supports E (float): modulus of elasticity I (float): section modulus x (float): distance along beam from left support Returns: m (tuple(float, str)): maximum positive moment at midspan Notes: 1. Consistent units are the responsibility of the user. 3. For maximum positive moment use x = L/2.
Moment at x - Beam simply supported - Uniformly dist. loads Calculates the moment in the beam at any location, x, along the beam due to a uniformly distributed load.
[ "Moment", "at", "x", "-", "Beam", "simply", "supported", "-", "Uniformly", "dist", ".", "loads", "Calculates", "the", "moment", "in", "the", "beam", "at", "any", "location", "x", "along", "the", "beam", "due", "to", "a", "uniformly", "distributed", "load", "." ]
def ac1_mx(w, l, x): m = w*x/2.*(l-x) text = (f'm = w*x/2*(l-x) \n' + f'm = {w:.2f}*{x:.2f}/2*({l:.2f}-{x:.2f}) \n' + f'm = {m:.2f}') return m, text
[ "def", "ac1_mx", "(", "w", ",", "l", ",", "x", ")", ":", "m", "=", "w", "*", "x", "/", "2.", "*", "(", "l", "-", "x", ")", "text", "=", "(", "f'm = w*x/2*(l-x) \\n'", "+", "f'm = {w:.2f}*{x:.2f}/2*({l:.2f}-{x:.2f}) \\n'", "+", "f'm = {m:.2f}'", ")", "return", "m", ",", "text" ]
Moment at x - Beam simply supported - Uniformly dist.
[ "Moment", "at", "x", "-", "Beam", "simply", "supported", "-", "Uniformly", "dist", "." ]
[ "\"\"\"Moment at x - Beam simply supported - Uniformly dist. loads\n \n Calculates the moment in the beam at any location, x, along the\n beam due to a uniformly distributed load.\n \n m = w*x/2*(l-x)\n \n Args:\n w (float): uniformly distributed load\n \n l (float): length of beam between supports\n \n E (float): modulus of elasticity\n \n I (float): section modulus\n \n x (float): distance along beam from left support\n \n Returns:\n m (tuple(float, str)): maximum positive moment at midspan\n \n Notes:\n 1. Consistent units are the responsibility of the user.\n 3. For maximum positive moment use x = L/2.\n \"\"\"" ]
[ { "param": "w", "type": null }, { "param": "l", "type": null }, { "param": "x", "type": null } ]
{ "returns": [ { "docstring": "m (tuple(float, str)): maximum positive moment at midspan", "docstring_tokens": [ "m", "(", "tuple", "(", "float", "str", "))", ":", "maximum", "positive", "moment", "at", "midspan" ], "type": null } ], "raises": [], "params": [ { "identifier": "w", "type": null, "docstring": "uniformly distributed load", "docstring_tokens": [ "uniformly", "distributed", "load" ], "default": null, "is_optional": false }, { "identifier": "l", "type": null, "docstring": "length of beam between supports", "docstring_tokens": [ "length", "of", "beam", "between", "supports" ], "default": null, "is_optional": false }, { "identifier": "x", "type": null, "docstring": "distance along beam from left support", "docstring_tokens": [ "distance", "along", "beam", "from", "left", "support" ], "default": null, "is_optional": false } ], "outlier_params": [ { "identifier": "E", "type": null, "docstring": "modulus of elasticity", "docstring_tokens": [ "modulus", "of", "elasticity" ], "default": null, "is_optional": false }, { "identifier": "I", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false } ], "others": [] }
def ac1_mx(w, l, x): m = w*x/2.*(l-x) text = (f'm = w*x/2*(l-x) \n' + f'm = {w:.2f}*{x:.2f}/2*({l:.2f}-{x:.2f}) \n' + f'm = {m:.2f}') return m, text
59
185
85c81e489f3398ee5c56508f8cd0e49341a902fc
amole-arup/eng_utilities
eng_utilities/section_utilities.py
[ "MIT" ]
Python
deck_props
<not_specific>
def deck_props(DD, DR, B, TT, TB): """Properties of a trapezoidal deck when given deck depth (DD), rib depth (DR) rib spacing (B), rib width at top (TT), rib width at bottom (TB) - P is the length of the deck on the underside (topside is ignored) - A is the cross-sectional area of concrete per rib - D_AVE is the average depth of concrete NB At the moment no flexural properties are calculated""" A = DD * B + 0.5 * (TT + TB) * DR P = B - TT + TB + ((TT - TB)**2 + 4 * DR**2)**0.5 return {'P': P, 'A': A, 'D_AVE': A / B}
Properties of a trapezoidal deck when given deck depth (DD), rib depth (DR) rib spacing (B), rib width at top (TT), rib width at bottom (TB) - P is the length of the deck on the underside (topside is ignored) - A is the cross-sectional area of concrete per rib - D_AVE is the average depth of concrete NB At the moment no flexural properties are calculated
Properties of a trapezoidal deck when given deck depth (DD), rib depth (DR) rib spacing (B), rib width at top (TT), rib width at bottom (TB) P is the length of the deck on the underside (topside is ignored) A is the cross-sectional area of concrete per rib D_AVE is the average depth of concrete NB At the moment no flexural properties are calculated
[ "Properties", "of", "a", "trapezoidal", "deck", "when", "given", "deck", "depth", "(", "DD", ")", "rib", "depth", "(", "DR", ")", "rib", "spacing", "(", "B", ")", "rib", "width", "at", "top", "(", "TT", ")", "rib", "width", "at", "bottom", "(", "TB", ")", "P", "is", "the", "length", "of", "the", "deck", "on", "the", "underside", "(", "topside", "is", "ignored", ")", "A", "is", "the", "cross", "-", "sectional", "area", "of", "concrete", "per", "rib", "D_AVE", "is", "the", "average", "depth", "of", "concrete", "NB", "At", "the", "moment", "no", "flexural", "properties", "are", "calculated" ]
def deck_props(DD, DR, B, TT, TB): A = DD * B + 0.5 * (TT + TB) * DR P = B - TT + TB + ((TT - TB)**2 + 4 * DR**2)**0.5 return {'P': P, 'A': A, 'D_AVE': A / B}
[ "def", "deck_props", "(", "DD", ",", "DR", ",", "B", ",", "TT", ",", "TB", ")", ":", "A", "=", "DD", "*", "B", "+", "0.5", "*", "(", "TT", "+", "TB", ")", "*", "DR", "P", "=", "B", "-", "TT", "+", "TB", "+", "(", "(", "TT", "-", "TB", ")", "**", "2", "+", "4", "*", "DR", "**", "2", ")", "**", "0.5", "return", "{", "'P'", ":", "P", ",", "'A'", ":", "A", ",", "'D_AVE'", ":", "A", "/", "B", "}" ]
Properties of a trapezoidal deck when given deck depth (DD), rib depth (DR) rib spacing (B), rib width at top (TT), rib width at bottom (TB) P is the length of the deck on the underside (topside is ignored) A is the cross-sectional area of concrete per rib D_AVE is the average depth of concrete
[ "Properties", "of", "a", "trapezoidal", "deck", "when", "given", "deck", "depth", "(", "DD", ")", "rib", "depth", "(", "DR", ")", "rib", "spacing", "(", "B", ")", "rib", "width", "at", "top", "(", "TT", ")", "rib", "width", "at", "bottom", "(", "TB", ")", "P", "is", "the", "length", "of", "the", "deck", "on", "the", "underside", "(", "topside", "is", "ignored", ")", "A", "is", "the", "cross", "-", "sectional", "area", "of", "concrete", "per", "rib", "D_AVE", "is", "the", "average", "depth", "of", "concrete" ]
[ "\"\"\"Properties of a trapezoidal deck when given deck depth (DD), rib depth (DR)\n rib spacing (B), rib width at top (TT), rib width at bottom (TB)\n - P is the length of the deck on the underside (topside is ignored)\n - A is the cross-sectional area of concrete per rib\n - D_AVE is the average depth of concrete\n \n NB At the moment no flexural properties are calculated\"\"\"" ]
[ { "param": "DD", "type": null }, { "param": "DR", "type": null }, { "param": "B", "type": null }, { "param": "TT", "type": null }, { "param": "TB", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "DD", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "DR", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "B", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "TT", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "TB", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def deck_props(DD, DR, B, TT, TB): A = DD * B + 0.5 * (TT + TB) * DR P = B - TT + TB + ((TT - TB)**2 + 4 * DR**2)**0.5 return {'P': P, 'A': A, 'D_AVE': A / B}
60
30
4158684ebdf6a0f7e38126105f4c44de5ba48d04
turnoutnow/game-analytics-pipeline
source/demo/publish_data.py
[ "MIT-0" ]
Python
send_record_batch
null
def send_record_batch(kinesis_client, stream_name, raw_records): """Send a batch of records to Amazon Kinesis.""" # Translate input records into the format needed by the boto3 SDK formatted_records = [] for rec in raw_records: formatted_records.append({'PartitionKey': rec['event']['event_id'], 'Data': json.dumps(rec)}) kinesis_client.put_records(StreamName=stream_name, Records=formatted_records) print('Sent %d records to stream %s.' % (len(formatted_records), stream_name))
Send a batch of records to Amazon Kinesis.
Send a batch of records to Amazon Kinesis.
[ "Send", "a", "batch", "of", "records", "to", "Amazon", "Kinesis", "." ]
def send_record_batch(kinesis_client, stream_name, raw_records): formatted_records = [] for rec in raw_records: formatted_records.append({'PartitionKey': rec['event']['event_id'], 'Data': json.dumps(rec)}) kinesis_client.put_records(StreamName=stream_name, Records=formatted_records) print('Sent %d records to stream %s.' % (len(formatted_records), stream_name))
[ "def", "send_record_batch", "(", "kinesis_client", ",", "stream_name", ",", "raw_records", ")", ":", "formatted_records", "=", "[", "]", "for", "rec", "in", "raw_records", ":", "formatted_records", ".", "append", "(", "{", "'PartitionKey'", ":", "rec", "[", "'event'", "]", "[", "'event_id'", "]", ",", "'Data'", ":", "json", ".", "dumps", "(", "rec", ")", "}", ")", "kinesis_client", ".", "put_records", "(", "StreamName", "=", "stream_name", ",", "Records", "=", "formatted_records", ")", "print", "(", "'Sent %d records to stream %s.'", "%", "(", "len", "(", "formatted_records", ")", ",", "stream_name", ")", ")" ]
Send a batch of records to Amazon Kinesis.
[ "Send", "a", "batch", "of", "records", "to", "Amazon", "Kinesis", "." ]
[ "\"\"\"Send a batch of records to Amazon Kinesis.\"\"\"", "# Translate input records into the format needed by the boto3 SDK" ]
[ { "param": "kinesis_client", "type": null }, { "param": "stream_name", "type": null }, { "param": "raw_records", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "kinesis_client", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "stream_name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "raw_records", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import json def send_record_batch(kinesis_client, stream_name, raw_records): formatted_records = [] for rec in raw_records: formatted_records.append({'PartitionKey': rec['event']['event_id'], 'Data': json.dumps(rec)}) kinesis_client.put_records(StreamName=stream_name, Records=formatted_records) print('Sent %d records to stream %s.' % (len(formatted_records), stream_name))
61
111
21cda7ead5143cd6dba8fbe256ccb44e0a991113
baajur/Anakin
tools/external_converter_v2/parser/tensorflow/run_pb.py
[ "Apache-2.0" ]
Python
convert_name_tf2ak
<not_specific>
def convert_name_tf2ak(tf_name, perfix='record_'): ''' conver tf name to ak name :param tf_name: :param perfix: :return: ''' ak_name = tf_name[:] for index, x in enumerate(tf_name): if x == '/': ak_name = ak_name[:index] + '_' + ak_name[index + 1:] return perfix + ak_name
conver tf name to ak name :param tf_name: :param perfix: :return:
conver tf name to ak name
[ "conver", "tf", "name", "to", "ak", "name" ]
def convert_name_tf2ak(tf_name, perfix='record_'): ak_name = tf_name[:] for index, x in enumerate(tf_name): if x == '/': ak_name = ak_name[:index] + '_' + ak_name[index + 1:] return perfix + ak_name
[ "def", "convert_name_tf2ak", "(", "tf_name", ",", "perfix", "=", "'record_'", ")", ":", "ak_name", "=", "tf_name", "[", ":", "]", "for", "index", ",", "x", "in", "enumerate", "(", "tf_name", ")", ":", "if", "x", "==", "'/'", ":", "ak_name", "=", "ak_name", "[", ":", "index", "]", "+", "'_'", "+", "ak_name", "[", "index", "+", "1", ":", "]", "return", "perfix", "+", "ak_name" ]
conver tf name to ak name
[ "conver", "tf", "name", "to", "ak", "name" ]
[ "'''\n conver tf name to ak name\n :param tf_name:\n :param perfix:\n :return:\n '''" ]
[ { "param": "tf_name", "type": null }, { "param": "perfix", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "tf_name", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "perfix", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def convert_name_tf2ak(tf_name, perfix='record_'): ak_name = tf_name[:] for index, x in enumerate(tf_name): if x == '/': ak_name = ak_name[:index] + '_' + ak_name[index + 1:] return perfix + ak_name
62
80

Dataset Card for "thevault-filtered"

Filtered version of The Vault (function). Restricted only to Python, then:

  • Light AST filtering for self-contained functions
  • Run through CodeBERT embeddings, clustered with k-means to 1024 clusters, and then the clusters were manually skimmed for seemingly uninformative functions.

The clusters excluded and their reasons are as follows:

  excluded = [
    4, # biochem stuff? DEcompiled code
    9, # Empty functions
    33, # Empty functions
    34, # UI stuff, just returns arguments
    37, # Empty functions
    40, # Empty functions
    42, # Empty functions
    44, # _namespace_SIO stuff
    55, # Trivial, e.g. add(a, b) = a + b
    66, # find_by class methods
    67, # Mostly methods, seems not very informative
    77, # openapi_types, returns a fixed dictionary
    78, # Minimal, method stuff
    83, # Locale configuration
    87, # Just returns argument
    101, # Incomplete
    102, # Class methods
    108, # openapi_types
    156, # Empty functions
    164, # Trivial, function aliases
    168, # Class methods
    172, # Empty functions
    173, # Class methods
    175, # Class methods
    181, # Empty functions
    182, # Fixed API stuff
    190, # Fixed specific stuff
    197, # from_dictionary class methods
    198, # Empty functions
    234, # Unimplemented
    246, # Fixed specific stuff
    277, # Empty functions
    280, # Empty functions
    282, # Empty functions
    287, # Trivial, e.g. helloWorld()
    299, # Mostly unfinished
    304, # Empty functions
    310, # Fixed API stuff
    313, # Just modifies globals
    320, # Empty functions
    329, # Takes a credentials object, and runs methods on it
    332, # MangoPi bot
    334, # Empty
    338, # namespace_SIO nonsense
    339, # fn(x) = x
    363, # Empty functions
    370, # Empty
    379, # Empty
    388, # Empty
    392, # Empty functions
    393, # Fixed lists
    409, # Fixed dictionaries
    416, # Aliases to print
    428, # Empty functions
    437, # Empty functions
    444, # Empty
    454, # Mostly just calls methods on arguments
    463, # Mostly just calls methods on arguments
    470, # Fixed dictionaries
    474, # Mostly fixed printing
    465, # OpenAPI fixed dictionaries
    476, # Empty
    477, # Fixed dictionaries
    491, # Trivial
    494, # Lots of fixed string stuff
    496, # Empty
    511, # Empty
    518, # OpenAPI
    521, # Fixed API stuff
    536, # Empty
    540, # Fixed API stuff
    553, # Empty
    555, # Empty
    564, # Empty
    566, # Empty
    568, # cls methods
    573, # Mostly fixed dict stuff
    574, # namespace_SO stuff, more biochem?
    582, # namespace_SO stuff, more biochem?
    602, # Fixed lists
    608, # Mostly cls methods
    617, # Mostly cls methods
    629, # cls methods, fixed lists
    641, # Fixed API stuff
    642, # Empty
    647, # Windows API stuff
    648, # jupyter stuff
    649, # mostly fixed dicts
    652, # Empty
    660, # Empty
    665, # cls methods
    666, # Empty
    672, # Empty
    680, # fixed dicts
    682, # Empty
    686, # Empty
    687, # Fixed lists elements_sequence
    692, # cls methods
    693, # ASCII art
    704, # Empty
    709, # mqtt send message
    712, # Empty
    715, # Fixed data recoding
    717, # Empty
    722, # cls methods
    725, # cls methods
    734, # cls methods
    737, # Empty
    741, # Trivial cls methods
    742, # Empty
    745, # Fixed strings
    752, # Empty
    758, # Mostly fixed printing
    768, # Empty
    783, # Empty
    784, # Mostly fixed dicts
    802, # Fixed printing
    806, # Empty
    821, # Empty
    824, # stuff like load_performance_win_x64_win_x64_vs2017_settings
    825, # Trivial
    835, # Empty
    851, # Empty
    862, # Empty
    876, # Trivial
    878, # Empty
    887, # Empty
    888, # Mostly fixed dicts
    890, # Mostly fixed dicts
    893, # Empty
    898, # cls methods
    899, # Fixed ['str'] stuff
    906, # Auto-generated or something
    912, # Empty
    924, # Empty
    933, # namespace_SO biochem stuff
    938, # Trivial
    959, # Mostly fixed printing
    963, # API-specific
    965, # cls methods
    967, # cls methods
    970, # Mostly fixed printing
    971, # cls methods
    972, # cls methods
    973, # Empty
    979, # cls methods
    982, # Empty
    983, # Empty
    989, # cls methods
    990, # API specific
    1007, # API specific
    1014, # Empty
]

MIT licensed, like the original dataset

Downloads last month
4
Edit dataset card