hexsha
stringlengths
40
40
repo
stringlengths
5
121
path
stringlengths
4
227
license
sequence
language
stringclasses
1 value
identifier
stringlengths
1
107
return_type
stringlengths
2
237
original_string
stringlengths
75
13.4k
original_docstring
stringlengths
13
12.9k
docstring
stringlengths
13
2.57k
docstring_tokens
sequence
code
stringlengths
23
1.88k
code_tokens
sequence
short_docstring
stringlengths
1
1.32k
short_docstring_tokens
sequence
comment
sequence
parameters
list
docstring_params
dict
code_with_imports
stringlengths
23
1.88k
idxs
int64
0
611k
cluster
int64
0
1.02k
4f6cdd0017f8c497e4643df0d01eea199daf5873
zjuchenll/dataflow-multithreading-on-tia
tools/parameters/interconnect_parameters.py
[ "MIT" ]
Python
from_dictionary
<not_specific>
def from_dictionary(cls, dictionary): """ Instantiate an InterconnectParameters wrapper from a dictionary. :param dictionary: loaded from a configuration file or elsewhere :return: new InterconnectParameters instance """ # Filter the dictionary with only parameters necessary for the initializer. key_filter_set = {"router_type", "num_router_sources", "num_router_destinations", "num_input_channels", "num_output_channels", "router_buffer_depth", "num_physical_planes"} filtered_interconnect_dictionary = {key: dictionary[key] for key in key_filter_set} # Unpack the dictionary into the initializer. return cls(**filtered_interconnect_dictionary)
Instantiate an InterconnectParameters wrapper from a dictionary. :param dictionary: loaded from a configuration file or elsewhere :return: new InterconnectParameters instance
Instantiate an InterconnectParameters wrapper from a dictionary.
[ "Instantiate", "an", "InterconnectParameters", "wrapper", "from", "a", "dictionary", "." ]
def from_dictionary(cls, dictionary): key_filter_set = {"router_type", "num_router_sources", "num_router_destinations", "num_input_channels", "num_output_channels", "router_buffer_depth", "num_physical_planes"} filtered_interconnect_dictionary = {key: dictionary[key] for key in key_filter_set} return cls(**filtered_interconnect_dictionary)
[ "def", "from_dictionary", "(", "cls", ",", "dictionary", ")", ":", "key_filter_set", "=", "{", "\"router_type\"", ",", "\"num_router_sources\"", ",", "\"num_router_destinations\"", ",", "\"num_input_channels\"", ",", "\"num_output_channels\"", ",", "\"router_buffer_depth\"", ",", "\"num_physical_planes\"", "}", "filtered_interconnect_dictionary", "=", "{", "key", ":", "dictionary", "[", "key", "]", "for", "key", "in", "key_filter_set", "}", "return", "cls", "(", "**", "filtered_interconnect_dictionary", ")" ]
Instantiate an InterconnectParameters wrapper from a dictionary.
[ "Instantiate", "an", "InterconnectParameters", "wrapper", "from", "a", "dictionary", "." ]
[ "\"\"\"\n Instantiate an InterconnectParameters wrapper from a dictionary.\n\n :param dictionary: loaded from a configuration file or elsewhere\n :return: new InterconnectParameters instance\n \"\"\"", "# Filter the dictionary with only parameters necessary for the initializer.", "# Unpack the dictionary into the initializer." ]
[ { "param": "cls", "type": null }, { "param": "dictionary", "type": null } ]
{ "returns": [ { "docstring": "new InterconnectParameters instance", "docstring_tokens": [ "new", "InterconnectParameters", "instance" ], "type": null } ], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "dictionary", "type": null, "docstring": "loaded from a configuration file or elsewhere", "docstring_tokens": [ "loaded", "from", "a", "configuration", "file", "or", "elsewhere" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def from_dictionary(cls, dictionary): key_filter_set = {"router_type", "num_router_sources", "num_router_destinations", "num_input_channels", "num_output_channels", "router_buffer_depth", "num_physical_planes"} filtered_interconnect_dictionary = {key: dictionary[key] for key in key_filter_set} return cls(**filtered_interconnect_dictionary)
109
942
2afd93bd348a4c24ebc88321858edb1334f39b73
formalabstracts/CNL-CIC
2parser/primitive.py
[ "MIT" ]
Python
line
<not_specific>
def line(etok): """String giving the starting line number of Etok.""" raw = etok.raw if raw: return f'line={raw[0].lineno}.' return ''
String giving the starting line number of Etok.
String giving the starting line number of Etok.
[ "String", "giving", "the", "starting", "line", "number", "of", "Etok", "." ]
def line(etok): raw = etok.raw if raw: return f'line={raw[0].lineno}.' return ''
[ "def", "line", "(", "etok", ")", ":", "raw", "=", "etok", ".", "raw", "if", "raw", ":", "return", "f'line={raw[0].lineno}.'", "return", "''" ]
String giving the starting line number of Etok.
[ "String", "giving", "the", "starting", "line", "number", "of", "Etok", "." ]
[ "\"\"\"String giving the starting line number of Etok.\"\"\"" ]
[ { "param": "etok", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "etok", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def line(etok): raw = etok.raw if raw: return f'line={raw[0].lineno}.' return ''
111
71
033c246f3c0efded7a0f6eb2fcfd6c78a07f462f
SolarSPELL-Main/DLMS
build_automation/content_management/utils.py
[ "MIT" ]
Python
calc_sha256
<not_specific>
def calc_sha256(input_file): """ Calculate the SHA-256 checksum for the given file object. :param input_file: Input file for which the SHA-256 should be calculated. """ sha256_ctxt = hashlib.sha256() bytes_data = input_file.read(4096) while bytes_data != b"": sha256_ctxt.update(bytes_data) bytes_data = input_file.read(4096) input_file.seek(0) return sha256_ctxt.hexdigest()
Calculate the SHA-256 checksum for the given file object. :param input_file: Input file for which the SHA-256 should be calculated.
Calculate the SHA-256 checksum for the given file object.
[ "Calculate", "the", "SHA", "-", "256", "checksum", "for", "the", "given", "file", "object", "." ]
def calc_sha256(input_file): sha256_ctxt = hashlib.sha256() bytes_data = input_file.read(4096) while bytes_data != b"": sha256_ctxt.update(bytes_data) bytes_data = input_file.read(4096) input_file.seek(0) return sha256_ctxt.hexdigest()
[ "def", "calc_sha256", "(", "input_file", ")", ":", "sha256_ctxt", "=", "hashlib", ".", "sha256", "(", ")", "bytes_data", "=", "input_file", ".", "read", "(", "4096", ")", "while", "bytes_data", "!=", "b\"\"", ":", "sha256_ctxt", ".", "update", "(", "bytes_data", ")", "bytes_data", "=", "input_file", ".", "read", "(", "4096", ")", "input_file", ".", "seek", "(", "0", ")", "return", "sha256_ctxt", ".", "hexdigest", "(", ")" ]
Calculate the SHA-256 checksum for the given file object.
[ "Calculate", "the", "SHA", "-", "256", "checksum", "for", "the", "given", "file", "object", "." ]
[ "\"\"\"\n Calculate the SHA-256 checksum for the given file object.\n :param input_file: Input file for which the SHA-256 should be calculated.\n \"\"\"" ]
[ { "param": "input_file", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "input_file", "type": null, "docstring": "Input file for which the SHA-256 should be calculated.", "docstring_tokens": [ "Input", "file", "for", "which", "the", "SHA", "-", "256", "should", "be", "calculated", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import hashlib def calc_sha256(input_file): sha256_ctxt = hashlib.sha256() bytes_data = input_file.read(4096) while bytes_data != b"": sha256_ctxt.update(bytes_data) bytes_data = input_file.read(4096) input_file.seek(0) return sha256_ctxt.hexdigest()
112
224
42dfe57cceb6f27b443b2c64f85015fa20cf635d
MatteoLacki/rta
rta/preprocessing.py
[ "BSD-2-Clause" ]
Python
split
<not_specific>
def split(L, cond): """Split the DataFrame accoring to the count in the condition. Things with cond > 1 are wrong, == 1 are right. """ wrong = L[ L.index.isin(cond[cond > 1].index) ] good = L[ L.index.isin(cond[cond == 1].index) ] return good, wrong
Split the DataFrame accoring to the count in the condition. Things with cond > 1 are wrong, == 1 are right.
Split the DataFrame accoring to the count in the condition. Things with cond > 1 are wrong, == 1 are right.
[ "Split", "the", "DataFrame", "accoring", "to", "the", "count", "in", "the", "condition", ".", "Things", "with", "cond", ">", "1", "are", "wrong", "==", "1", "are", "right", "." ]
def split(L, cond): wrong = L[ L.index.isin(cond[cond > 1].index) ] good = L[ L.index.isin(cond[cond == 1].index) ] return good, wrong
[ "def", "split", "(", "L", ",", "cond", ")", ":", "wrong", "=", "L", "[", "L", ".", "index", ".", "isin", "(", "cond", "[", "cond", ">", "1", "]", ".", "index", ")", "]", "good", "=", "L", "[", "L", ".", "index", ".", "isin", "(", "cond", "[", "cond", "==", "1", "]", ".", "index", ")", "]", "return", "good", ",", "wrong" ]
Split the DataFrame accoring to the count in the condition.
[ "Split", "the", "DataFrame", "accoring", "to", "the", "count", "in", "the", "condition", "." ]
[ "\"\"\"Split the DataFrame accoring to the count in the condition.\n\n Things with cond > 1 are wrong, == 1 are right.\n \"\"\"" ]
[ { "param": "L", "type": null }, { "param": "cond", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "L", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "cond", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def split(L, cond): wrong = L[ L.index.isin(cond[cond > 1].index) ] good = L[ L.index.isin(cond[cond == 1].index) ] return good, wrong
113
620
a961d896c6e2d71c24a99ad719aecf084c13e02f
Salazar769/Ciclo-1-python
modulo.py
[ "CC0-1.0" ]
Python
modificarHorario
<not_specific>
def modificarHorario(dataFrame, cursoPorCambiar, listaPeriodosNuevos): """Esta funcion recibe el dataframe en uso, el nombre del curso a modificar y la lista de periodos va a ocupar ahora el curso. retorna el dataframe actualizado """ indices = dataFrame["Cursos"].to_list() posicionPorCambiar = indices.index(cursoPorCambiar) periodosDisponibles = ["","Per1","Per2","Per3","Per4","Per5","Per6","Per7","Per8","Per9","Per10","Per11","Per12","Per13","Per14","Per15","Per16","Per17","Per18","Per19","Per20","Per21","Per22","Per23","Per24","Per25","Per26","Per27","Per28","Per29","Per30","Per31","Per32","Per33","Per34","Per35","Per36","Per37","Per38","Per39","Per40","Per41","Per42","Per43","Per44","Per45","Per46","Per47","Per48","Per49","Per50"] size = len(dataFrame) #cantidad de cursos actuales nuevo = [True if i in listaPeriodosNuevos else False for i in periodosDisponibles] nuevo[0] = cursoPorCambiar dataFrame.loc[posicionPorCambiar]= nuevo return dataFrame
Esta funcion recibe el dataframe en uso, el nombre del curso a modificar y la lista de periodos va a ocupar ahora el curso. retorna el dataframe actualizado
Esta funcion recibe el dataframe en uso, el nombre del curso a modificar y la lista de periodos va a ocupar ahora el curso. retorna el dataframe actualizado
[ "Esta", "funcion", "recibe", "el", "dataframe", "en", "uso", "el", "nombre", "del", "curso", "a", "modificar", "y", "la", "lista", "de", "periodos", "va", "a", "ocupar", "ahora", "el", "curso", ".", "retorna", "el", "dataframe", "actualizado" ]
def modificarHorario(dataFrame, cursoPorCambiar, listaPeriodosNuevos): indices = dataFrame["Cursos"].to_list() posicionPorCambiar = indices.index(cursoPorCambiar) periodosDisponibles = ["","Per1","Per2","Per3","Per4","Per5","Per6","Per7","Per8","Per9","Per10","Per11","Per12","Per13","Per14","Per15","Per16","Per17","Per18","Per19","Per20","Per21","Per22","Per23","Per24","Per25","Per26","Per27","Per28","Per29","Per30","Per31","Per32","Per33","Per34","Per35","Per36","Per37","Per38","Per39","Per40","Per41","Per42","Per43","Per44","Per45","Per46","Per47","Per48","Per49","Per50"] size = len(dataFrame) nuevo = [True if i in listaPeriodosNuevos else False for i in periodosDisponibles] nuevo[0] = cursoPorCambiar dataFrame.loc[posicionPorCambiar]= nuevo return dataFrame
[ "def", "modificarHorario", "(", "dataFrame", ",", "cursoPorCambiar", ",", "listaPeriodosNuevos", ")", ":", "indices", "=", "dataFrame", "[", "\"Cursos\"", "]", ".", "to_list", "(", ")", "posicionPorCambiar", "=", "indices", ".", "index", "(", "cursoPorCambiar", ")", "periodosDisponibles", "=", "[", "\"\"", ",", "\"Per1\"", ",", "\"Per2\"", ",", "\"Per3\"", ",", "\"Per4\"", ",", "\"Per5\"", ",", "\"Per6\"", ",", "\"Per7\"", ",", "\"Per8\"", ",", "\"Per9\"", ",", "\"Per10\"", ",", "\"Per11\"", ",", "\"Per12\"", ",", "\"Per13\"", ",", "\"Per14\"", ",", "\"Per15\"", ",", "\"Per16\"", ",", "\"Per17\"", ",", "\"Per18\"", ",", "\"Per19\"", ",", "\"Per20\"", ",", "\"Per21\"", ",", "\"Per22\"", ",", "\"Per23\"", ",", "\"Per24\"", ",", "\"Per25\"", ",", "\"Per26\"", ",", "\"Per27\"", ",", "\"Per28\"", ",", "\"Per29\"", ",", "\"Per30\"", ",", "\"Per31\"", ",", "\"Per32\"", ",", "\"Per33\"", ",", "\"Per34\"", ",", "\"Per35\"", ",", "\"Per36\"", ",", "\"Per37\"", ",", "\"Per38\"", ",", "\"Per39\"", ",", "\"Per40\"", ",", "\"Per41\"", ",", "\"Per42\"", ",", "\"Per43\"", ",", "\"Per44\"", ",", "\"Per45\"", ",", "\"Per46\"", ",", "\"Per47\"", ",", "\"Per48\"", ",", "\"Per49\"", ",", "\"Per50\"", "]", "size", "=", "len", "(", "dataFrame", ")", "nuevo", "=", "[", "True", "if", "i", "in", "listaPeriodosNuevos", "else", "False", "for", "i", "in", "periodosDisponibles", "]", "nuevo", "[", "0", "]", "=", "cursoPorCambiar", "dataFrame", ".", "loc", "[", "posicionPorCambiar", "]", "=", "nuevo", "return", "dataFrame" ]
Esta funcion recibe el dataframe en uso, el nombre del curso a modificar y la lista de periodos va a ocupar ahora el curso.
[ "Esta", "funcion", "recibe", "el", "dataframe", "en", "uso", "el", "nombre", "del", "curso", "a", "modificar", "y", "la", "lista", "de", "periodos", "va", "a", "ocupar", "ahora", "el", "curso", "." ]
[ "\"\"\"Esta funcion recibe el dataframe en uso, el nombre del curso a modificar y la lista de periodos va a ocupar ahora el curso.\r\n retorna el dataframe actualizado\r\n \"\"\"", "#cantidad de cursos actuales\r" ]
[ { "param": "dataFrame", "type": null }, { "param": "cursoPorCambiar", "type": null }, { "param": "listaPeriodosNuevos", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "dataFrame", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "cursoPorCambiar", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "listaPeriodosNuevos", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def modificarHorario(dataFrame, cursoPorCambiar, listaPeriodosNuevos): indices = dataFrame["Cursos"].to_list() posicionPorCambiar = indices.index(cursoPorCambiar) periodosDisponibles = ["","Per1","Per2","Per3","Per4","Per5","Per6","Per7","Per8","Per9","Per10","Per11","Per12","Per13","Per14","Per15","Per16","Per17","Per18","Per19","Per20","Per21","Per22","Per23","Per24","Per25","Per26","Per27","Per28","Per29","Per30","Per31","Per32","Per33","Per34","Per35","Per36","Per37","Per38","Per39","Per40","Per41","Per42","Per43","Per44","Per45","Per46","Per47","Per48","Per49","Per50"] size = len(dataFrame) nuevo = [True if i in listaPeriodosNuevos else False for i in periodosDisponibles] nuevo[0] = cursoPorCambiar dataFrame.loc[posicionPorCambiar]= nuevo return dataFrame
114
895
f8de74c6b919b82dc36f8903246956eae4e00749
vincefn/pyvkfft
pyvkfft/scripts/pyvkfft_test.py
[ "MIT" ]
Python
name_next_file
<not_specific>
def name_next_file(pattern="pyvkfft-test%04d.html"): """ Find the first unused name for a file, starting at i=1 :param pattern: the pattern for the file name. :return: the filename """ lsdir = os.listdir() for i in range(1001, 1999): if pattern % i not in lsdir: return pattern % i raise RuntimeError("name_next_file: '%s' files all used from 1001 to 1998. Maybe cleanup ?" % pattern)
Find the first unused name for a file, starting at i=1 :param pattern: the pattern for the file name. :return: the filename
Find the first unused name for a file, starting at i=1
[ "Find", "the", "first", "unused", "name", "for", "a", "file", "starting", "at", "i", "=", "1" ]
def name_next_file(pattern="pyvkfft-test%04d.html"): lsdir = os.listdir() for i in range(1001, 1999): if pattern % i not in lsdir: return pattern % i raise RuntimeError("name_next_file: '%s' files all used from 1001 to 1998. Maybe cleanup ?" % pattern)
[ "def", "name_next_file", "(", "pattern", "=", "\"pyvkfft-test%04d.html\"", ")", ":", "lsdir", "=", "os", ".", "listdir", "(", ")", "for", "i", "in", "range", "(", "1001", ",", "1999", ")", ":", "if", "pattern", "%", "i", "not", "in", "lsdir", ":", "return", "pattern", "%", "i", "raise", "RuntimeError", "(", "\"name_next_file: '%s' files all used from 1001 to 1998. Maybe cleanup ?\"", "%", "pattern", ")" ]
Find the first unused name for a file, starting at i=1
[ "Find", "the", "first", "unused", "name", "for", "a", "file", "starting", "at", "i", "=", "1" ]
[ "\"\"\"\n Find the first unused name for a file, starting at i=1\n :param pattern: the pattern for the file name.\n :return: the filename\n \"\"\"" ]
[ { "param": "pattern", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "pattern", "type": null, "docstring": "the pattern for the file name.", "docstring_tokens": [ "the", "pattern", "for", "the", "file", "name", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def name_next_file(pattern="pyvkfft-test%04d.html"): lsdir = os.listdir() for i in range(1001, 1999): if pattern % i not in lsdir: return pattern % i raise RuntimeError("name_next_file: '%s' files all used from 1001 to 1998. Maybe cleanup ?" % pattern)
115
322
87ca097a645b48edbcefcb038c230c6a4ea137ee
mustafaneguib/spatial-information-programming-analysis-project
assign2_modules.py
[ "BSD-4-Clause-UC" ]
Python
build_html_component_without_title
<not_specific>
def build_html_component_without_title(html_string): """ This function builds the html string for a component. :param html_string: html_string of the component :param title: Title of the html component :return: html_string """ html = """ <div class="row"> <div class="col-md-12"> <p> {0} </p> </div> </div> """.format(html_string) return html
This function builds the html string for a component. :param html_string: html_string of the component :param title: Title of the html component :return: html_string
This function builds the html string for a component.
[ "This", "function", "builds", "the", "html", "string", "for", "a", "component", "." ]
def build_html_component_without_title(html_string): html = """ <div class="row"> <div class="col-md-12"> <p> {0} </p> </div> </div> """.format(html_string) return html
[ "def", "build_html_component_without_title", "(", "html_string", ")", ":", "html", "=", "\"\"\"\n \n <div class=\"row\">\n <div class=\"col-md-12\">\n <p>\n {0}\n </p>\n </div>\n </div>\n \n \"\"\"", ".", "format", "(", "html_string", ")", "return", "html" ]
This function builds the html string for a component.
[ "This", "function", "builds", "the", "html", "string", "for", "a", "component", "." ]
[ "\"\"\"\n This function builds the html string for a component.\n :param html_string: html_string of the component\n :param title: Title of the html component\n :return: html_string\n \"\"\"" ]
[ { "param": "html_string", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "html_string", "type": null, "docstring": "html_string of the component", "docstring_tokens": [ "html_string", "of", "the", "component" ], "default": null, "is_optional": null } ], "outlier_params": [ { "identifier": "title", "type": null, "docstring": "Title of the html component", "docstring_tokens": [ "Title", "of", "the", "html", "component" ], "default": null, "is_optional": null } ], "others": [] }
def build_html_component_without_title(html_string): html = """ <div class="row"> <div class="col-md-12"> <p> {0} </p> </div> </div> """.format(html_string) return html
116
220
2b83d531db3363cb8d16823e0c9f3d9d8932e3b2
diego-hermida/ClimateChangeApp
telegram_bot/main.py
[ "MIT" ]
Python
green_bold
str
def green_bold(msg: str) -> str: """ Given an 'str' object, wraps it between ANSI green & bold escape characters. :param msg: Message to be wrapped. :return: The same message, which will be displayed as green & bold by the terminal. """ return '\u001b[1;32m%s\u001b[0m' % msg
Given an 'str' object, wraps it between ANSI green & bold escape characters. :param msg: Message to be wrapped. :return: The same message, which will be displayed as green & bold by the terminal.
Given an 'str' object, wraps it between ANSI green & bold escape characters.
[ "Given", "an", "'", "str", "'", "object", "wraps", "it", "between", "ANSI", "green", "&", "bold", "escape", "characters", "." ]
def green_bold(msg: str) -> str: return '\u001b[1;32m%s\u001b[0m' % msg
[ "def", "green_bold", "(", "msg", ":", "str", ")", "->", "str", ":", "return", "'\\u001b[1;32m%s\\u001b[0m'", "%", "msg" ]
Given an 'str' object, wraps it between ANSI green & bold escape characters.
[ "Given", "an", "'", "str", "'", "object", "wraps", "it", "between", "ANSI", "green", "&", "bold", "escape", "characters", "." ]
[ "\"\"\"\n Given an 'str' object, wraps it between ANSI green & bold escape characters.\n :param msg: Message to be wrapped.\n :return: The same message, which will be displayed as green & bold by the terminal.\n \"\"\"" ]
[ { "param": "msg", "type": "str" } ]
{ "returns": [ { "docstring": "The same message, which will be displayed as green & bold by the terminal.", "docstring_tokens": [ "The", "same", "message", "which", "will", "be", "displayed", "as", "green", "&", "bold", "by", "the", "terminal", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "msg", "type": "str", "docstring": "Message to be wrapped.", "docstring_tokens": [ "Message", "to", "be", "wrapped", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def green_bold(msg: str) -> str: return '\u001b[1;32m%s\u001b[0m' % msg
117
305
a54be27a4d3b2dfecd26b39462bcf25d1e591fc7
ClimateCompatibleGrowth/scorm_package
src/scorm_package/scorm.py
[ "MIT" ]
Python
retrieve_file_paths
<not_specific>
def retrieve_file_paths(dirName): """ Retrieves the filepath for the directory being zipped. """ # setup file paths variable filePaths = [] # Read all directory, subdirectories and file lists for root, _, files in os.walk(dirName): for filename in files: # Create the full filepath by using os module. filePath = os.path.join(root, filename) filePaths.append(filePath) # return all paths return filePaths
Retrieves the filepath for the directory being zipped.
Retrieves the filepath for the directory being zipped.
[ "Retrieves", "the", "filepath", "for", "the", "directory", "being", "zipped", "." ]
def retrieve_file_paths(dirName): filePaths = [] for root, _, files in os.walk(dirName): for filename in files: filePath = os.path.join(root, filename) filePaths.append(filePath) return filePaths
[ "def", "retrieve_file_paths", "(", "dirName", ")", ":", "filePaths", "=", "[", "]", "for", "root", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "dirName", ")", ":", "for", "filename", "in", "files", ":", "filePath", "=", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", "filePaths", ".", "append", "(", "filePath", ")", "return", "filePaths" ]
Retrieves the filepath for the directory being zipped.
[ "Retrieves", "the", "filepath", "for", "the", "directory", "being", "zipped", "." ]
[ "\"\"\"\n Retrieves the filepath for the directory being zipped.\n \"\"\"", "# setup file paths variable", "# Read all directory, subdirectories and file lists", "# Create the full filepath by using os module.", "# return all paths" ]
[ { "param": "dirName", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "dirName", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def retrieve_file_paths(dirName): filePaths = [] for root, _, files in os.walk(dirName): for filename in files: filePath = os.path.join(root, filename) filePaths.append(filePath) return filePaths
118
270
6bf0b7f3f63f9cea47b6089d3a52bb5325a16bfa
mernst/cozy
cozy/common.py
[ "Apache-2.0" ]
Python
my_caller
<not_specific>
def my_caller(up=0): """ Returns a FrameInfo object describing the caller of the function that called my_caller. You might care about these properties of the FrameInfo object: .filename .function .lineno The `up` parameter can be used to look farther up the call stack. For instance, up=1 returns info about the caller of the caller of the function that called my_caller. """ stack = inspect.stack() return stack[up+2]
Returns a FrameInfo object describing the caller of the function that called my_caller. You might care about these properties of the FrameInfo object: .filename .function .lineno The `up` parameter can be used to look farther up the call stack. For instance, up=1 returns info about the caller of the caller of the function that called my_caller.
Returns a FrameInfo object describing the caller of the function that called my_caller. You might care about these properties of the FrameInfo object: .filename .function .lineno The `up` parameter can be used to look farther up the call stack. For instance, up=1 returns info about the caller of the caller of the function that called my_caller.
[ "Returns", "a", "FrameInfo", "object", "describing", "the", "caller", "of", "the", "function", "that", "called", "my_caller", ".", "You", "might", "care", "about", "these", "properties", "of", "the", "FrameInfo", "object", ":", ".", "filename", ".", "function", ".", "lineno", "The", "`", "up", "`", "parameter", "can", "be", "used", "to", "look", "farther", "up", "the", "call", "stack", ".", "For", "instance", "up", "=", "1", "returns", "info", "about", "the", "caller", "of", "the", "caller", "of", "the", "function", "that", "called", "my_caller", "." ]
def my_caller(up=0): stack = inspect.stack() return stack[up+2]
[ "def", "my_caller", "(", "up", "=", "0", ")", ":", "stack", "=", "inspect", ".", "stack", "(", ")", "return", "stack", "[", "up", "+", "2", "]" ]
Returns a FrameInfo object describing the caller of the function that called my_caller.
[ "Returns", "a", "FrameInfo", "object", "describing", "the", "caller", "of", "the", "function", "that", "called", "my_caller", "." ]
[ "\"\"\"\n Returns a FrameInfo object describing the caller of the function that\n called my_caller.\n\n You might care about these properties of the FrameInfo object:\n .filename\n .function\n .lineno\n\n The `up` parameter can be used to look farther up the call stack. For\n instance, up=1 returns info about the caller of the caller of the function\n that called my_caller.\n \"\"\"" ]
[ { "param": "up", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "up", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import inspect def my_caller(up=0): stack = inspect.stack() return stack[up+2]
119
503
399facab691f0c8d28ecb51ef347628b1d95e281
cobymc/altimeter
altimeter/core/resource/resource_spec.py
[ "MIT" ]
Python
generate_id
str
def generate_id( cls: Type["ResourceSpec"], short_resource_id: str, context: Dict[str, Any] ) -> str: """Generate a full id for this type given a short_resource_id. Args: short_resource_id: short resource id for this resource context: contains auxiliary information which can be passed through the parse process. Returns: full resource id string """ return f"{cls.type_name}:{short_resource_id}"
Generate a full id for this type given a short_resource_id. Args: short_resource_id: short resource id for this resource context: contains auxiliary information which can be passed through the parse process. Returns: full resource id string
Generate a full id for this type given a short_resource_id.
[ "Generate", "a", "full", "id", "for", "this", "type", "given", "a", "short_resource_id", "." ]
def generate_id( cls: Type["ResourceSpec"], short_resource_id: str, context: Dict[str, Any] ) -> str: return f"{cls.type_name}:{short_resource_id}"
[ "def", "generate_id", "(", "cls", ":", "Type", "[", "\"ResourceSpec\"", "]", ",", "short_resource_id", ":", "str", ",", "context", ":", "Dict", "[", "str", ",", "Any", "]", ")", "->", "str", ":", "return", "f\"{cls.type_name}:{short_resource_id}\"" ]
Generate a full id for this type given a short_resource_id.
[ "Generate", "a", "full", "id", "for", "this", "type", "given", "a", "short_resource_id", "." ]
[ "\"\"\"Generate a full id for this type given a short_resource_id.\n\n Args:\n short_resource_id: short resource id for this resource\n context: contains auxiliary information which can be passed through the parse process.\n\n Returns:\n full resource id string\n \"\"\"" ]
[ { "param": "cls", "type": "Type[\"ResourceSpec\"]" }, { "param": "short_resource_id", "type": "str" }, { "param": "context", "type": "Dict[str, Any]" } ]
{ "returns": [ { "docstring": "full resource id string", "docstring_tokens": [ "full", "resource", "id", "string" ], "type": null } ], "raises": [], "params": [ { "identifier": "cls", "type": "Type[\"ResourceSpec\"]", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "short_resource_id", "type": "str", "docstring": "short resource id for this resource", "docstring_tokens": [ "short", "resource", "id", "for", "this", "resource" ], "default": null, "is_optional": null }, { "identifier": "context", "type": "Dict[str, Any]", "docstring": "contains auxiliary information which can be passed through the parse process.", "docstring_tokens": [ "contains", "auxiliary", "information", "which", "can", "be", "passed", "through", "the", "parse", "process", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def generate_id( cls: Type["ResourceSpec"], short_resource_id: str, context: Dict[str, Any] ) -> str: return f"{cls.type_name}:{short_resource_id}"
120
630
eccc77c5d424c27701a538e4bc03a9dba7e2a7f4
brutalic/pynet_brutal
class6/library/eos_switchport.py
[ "Apache-2.0" ]
Python
create
null
def create(module): """Creates a new instance of switchport on the node """ name = module.attributes['name'] module.log('Invoked create for eos_switchport[%s]' % name) module.node.api('switchports').create(name)
Creates a new instance of switchport on the node
Creates a new instance of switchport on the node
[ "Creates", "a", "new", "instance", "of", "switchport", "on", "the", "node" ]
def create(module): name = module.attributes['name'] module.log('Invoked create for eos_switchport[%s]' % name) module.node.api('switchports').create(name)
[ "def", "create", "(", "module", ")", ":", "name", "=", "module", ".", "attributes", "[", "'name'", "]", "module", ".", "log", "(", "'Invoked create for eos_switchport[%s]'", "%", "name", ")", "module", ".", "node", ".", "api", "(", "'switchports'", ")", ".", "create", "(", "name", ")" ]
Creates a new instance of switchport on the node
[ "Creates", "a", "new", "instance", "of", "switchport", "on", "the", "node" ]
[ "\"\"\"Creates a new instance of switchport on the node\n \"\"\"" ]
[ { "param": "module", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "module", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def create(module): name = module.attributes['name'] module.log('Invoked create for eos_switchport[%s]' % name) module.node.api('switchports').create(name)
121
819
c5e6c8821a5dd9fd84b67c0b3853decddb290e9e
balanced/billy
billy/utils/generic.py
[ "MIT" ]
Python
round_down_cent
<not_specific>
def round_down_cent(amount): """Round down money value in cent (drop float points), for example, 5.66666 cents will be rounded to 5 cents :param amount: the money amount in cent to be rounded :return: the rounded money amount """ return int(amount)
Round down money value in cent (drop float points), for example, 5.66666 cents will be rounded to 5 cents :param amount: the money amount in cent to be rounded :return: the rounded money amount
Round down money value in cent (drop float points), for example, 5.66666 cents will be rounded to 5 cents
[ "Round", "down", "money", "value", "in", "cent", "(", "drop", "float", "points", ")", "for", "example", "5", ".", "66666", "cents", "will", "be", "rounded", "to", "5", "cents" ]
def round_down_cent(amount): return int(amount)
[ "def", "round_down_cent", "(", "amount", ")", ":", "return", "int", "(", "amount", ")" ]
Round down money value in cent (drop float points), for example, 5.66666 cents will be rounded to 5 cents
[ "Round", "down", "money", "value", "in", "cent", "(", "drop", "float", "points", ")", "for", "example", "5", ".", "66666", "cents", "will", "be", "rounded", "to", "5", "cents" ]
[ "\"\"\"Round down money value in cent (drop float points), for example, 5.66666\n cents will be rounded to 5 cents\n\n :param amount: the money amount in cent to be rounded\n :return: the rounded money amount\n \"\"\"" ]
[ { "param": "amount", "type": null } ]
{ "returns": [ { "docstring": "the rounded money amount", "docstring_tokens": [ "the", "rounded", "money", "amount" ], "type": null } ], "raises": [], "params": [ { "identifier": "amount", "type": null, "docstring": "the money amount in cent to be rounded", "docstring_tokens": [ "the", "money", "amount", "in", "cent", "to", "be", "rounded" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def round_down_cent(amount): return int(amount)
122
529
15f69d04b5582c34498855c2b7321daab237bb7d
MSLNZ/GTC
GTC/function.py
[ "MIT" ]
Python
complex_to_seq
<not_specific>
def complex_to_seq(z): """Transform a complex number into a 4-element sequence :arg z: a number If ``z = x + yj``, then an array of the form ``[[x,-y],[y,x]]`` can be used to represent ``z`` in matrix computations. **Examples**:: >>> import numpy >>> z = 1 + 2j >>> function.complex_to_seq(z) (1.0, -2.0, 2.0, 1.0) >>> m = numpy.array( function.complex_to_seq(z) ) >>> m.shape = (2,2) >>> print( m ) [[ 1. -2.] [ 2. 1.]] """ z = complex(z) return (z.real,-z.imag,z.imag,z.real)
Transform a complex number into a 4-element sequence :arg z: a number If ``z = x + yj``, then an array of the form ``[[x,-y],[y,x]]`` can be used to represent ``z`` in matrix computations. **Examples**:: >>> import numpy >>> z = 1 + 2j >>> function.complex_to_seq(z) (1.0, -2.0, 2.0, 1.0) >>> m = numpy.array( function.complex_to_seq(z) ) >>> m.shape = (2,2) >>> print( m ) [[ 1. -2.] [ 2. 1.]]
Transform a complex number into a 4-element sequence
[ "Transform", "a", "complex", "number", "into", "a", "4", "-", "element", "sequence" ]
def complex_to_seq(z): z = complex(z) return (z.real,-z.imag,z.imag,z.real)
[ "def", "complex_to_seq", "(", "z", ")", ":", "z", "=", "complex", "(", "z", ")", "return", "(", "z", ".", "real", ",", "-", "z", ".", "imag", ",", "z", ".", "imag", ",", "z", ".", "real", ")" ]
Transform a complex number into a 4-element sequence
[ "Transform", "a", "complex", "number", "into", "a", "4", "-", "element", "sequence" ]
[ "\"\"\"Transform a complex number into a 4-element sequence\r\n\r\n :arg z: a number\r\n\r\n If ``z = x + yj``, then an array of the form ``[[x,-y],[y,x]]`` \r\n can be used to represent ``z`` in matrix computations. \r\n\r\n **Examples**::\r\n >>> import numpy\r\n >>> z = 1 + 2j\r\n >>> function.complex_to_seq(z)\r\n (1.0, -2.0, 2.0, 1.0)\r\n \r\n >>> m = numpy.array( function.complex_to_seq(z) )\r\n >>> m.shape = (2,2)\r\n >>> print( m )\r\n [[ 1. -2.]\r\n [ 2. 1.]]\r\n \r\n \"\"\"" ]
[ { "param": "z", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "z", "type": null, "docstring": "a number\nIf ``z = x + yj``, then an array of the form ``[[x,-y],[y,x]]``\ncan be used to represent ``z`` in matrix computations.\n\n\n\n", "docstring_tokens": [ "a", "number", "If", "`", "`", "z", "=", "x", "+", "yj", "`", "`", "then", "an", "array", "of", "the", "form", "`", "`", "[[", "x", "-", "y", "]", "[", "y", "x", "]]", "`", "`", "can", "be", "used", "to", "represent", "`", "`", "z", "`", "`", "in", "matrix", "computations", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def complex_to_seq(z): z = complex(z) return (z.real,-z.imag,z.imag,z.real)
124
247
fd1a8736f23ff155b16658aaea6e727940d557a3
keatonb/astropy
astropy/utils/parsing.py
[ "BSD-3-Clause" ]
Python
_patch_get_caller_module_dict
<not_specific>
def _patch_get_caller_module_dict(module): """Temporarily replace the module's get_caller_module_dict. This is a function inside ``ply.lex`` and ``ply.yacc`` (each has a copy) that is used to retrieve the caller's local symbols. Here, we patch the function to instead retrieve the grandparent's local symbols to account for a wrapper layer. """ original = module.get_caller_module_dict @functools.wraps(original) def wrapper(levels): # Add 2, not 1, because the wrapper itself adds another level return original(levels + 2) module.get_caller_module_dict = wrapper yield module.get_caller_module_dict = original
Temporarily replace the module's get_caller_module_dict. This is a function inside ``ply.lex`` and ``ply.yacc`` (each has a copy) that is used to retrieve the caller's local symbols. Here, we patch the function to instead retrieve the grandparent's local symbols to account for a wrapper layer.
Temporarily replace the module's get_caller_module_dict.
[ "Temporarily", "replace", "the", "module", "'", "s", "get_caller_module_dict", "." ]
def _patch_get_caller_module_dict(module): original = module.get_caller_module_dict @functools.wraps(original) def wrapper(levels): return original(levels + 2) module.get_caller_module_dict = wrapper yield module.get_caller_module_dict = original
[ "def", "_patch_get_caller_module_dict", "(", "module", ")", ":", "original", "=", "module", ".", "get_caller_module_dict", "@", "functools", ".", "wraps", "(", "original", ")", "def", "wrapper", "(", "levels", ")", ":", "return", "original", "(", "levels", "+", "2", ")", "module", ".", "get_caller_module_dict", "=", "wrapper", "yield", "module", ".", "get_caller_module_dict", "=", "original" ]
Temporarily replace the module's get_caller_module_dict.
[ "Temporarily", "replace", "the", "module", "'", "s", "get_caller_module_dict", "." ]
[ "\"\"\"Temporarily replace the module's get_caller_module_dict.\n\n This is a function inside ``ply.lex`` and ``ply.yacc`` (each has a copy)\n that is used to retrieve the caller's local symbols. Here, we patch the\n function to instead retrieve the grandparent's local symbols to account\n for a wrapper layer.\n \"\"\"", "# Add 2, not 1, because the wrapper itself adds another level" ]
[ { "param": "module", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "module", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import functools def _patch_get_caller_module_dict(module): original = module.get_caller_module_dict @functools.wraps(original) def wrapper(levels): return original(levels + 2) module.get_caller_module_dict = wrapper yield module.get_caller_module_dict = original
125
10
dad41c3a739529e7e8fe74ae8ee23579c2f82d3f
ankhoudary12/covid19_etl
dags/covid_19_etl.py
[ "MIT" ]
Python
clear_local_raw_covid_data
None
def clear_local_raw_covid_data(path: str) -> None: """Clear raw covid csvs from specified path. Args: path (str): path to delete files """ files = os.listdir(path) if files: for file in files: os.remove(os.path.join(path, file))
Clear raw covid csvs from specified path. Args: path (str): path to delete files
Clear raw covid csvs from specified path.
[ "Clear", "raw", "covid", "csvs", "from", "specified", "path", "." ]
def clear_local_raw_covid_data(path: str) -> None: files = os.listdir(path) if files: for file in files: os.remove(os.path.join(path, file))
[ "def", "clear_local_raw_covid_data", "(", "path", ":", "str", ")", "->", "None", ":", "files", "=", "os", ".", "listdir", "(", "path", ")", "if", "files", ":", "for", "file", "in", "files", ":", "os", ".", "remove", "(", "os", ".", "path", ".", "join", "(", "path", ",", "file", ")", ")" ]
Clear raw covid csvs from specified path.
[ "Clear", "raw", "covid", "csvs", "from", "specified", "path", "." ]
[ "\"\"\"Clear raw covid csvs from specified path.\n\n Args:\n path (str): path to delete files\n \"\"\"" ]
[ { "param": "path", "type": "str" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "path", "type": "str", "docstring": "path to delete files", "docstring_tokens": [ "path", "to", "delete", "files" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
import os def clear_local_raw_covid_data(path: str) -> None: files = os.listdir(path) if files: for file in files: os.remove(os.path.join(path, file))
126
671
079e8d8d31269aeb014b278ab5096adb8d48f46b
slavesocieties/ssda-nlp
ssda_nlp/relationships.py
[ "MIT" ]
Python
build_new_person
<not_specific>
def build_new_person(people_df, next_id, person_type): ''' appends a row representing a new person to an existing people df people_df: df containing all entities labeled as people in the entry with unique ids volume_metadata: metadata for the volume that the entry comes from, built by retrieve_volume_metadata person_type: type of person being added, e.g. "principal" or "cleric" returns: updated df with new person added and updated next available id ''' curr_entry = next_id[next_id.find('-') + 1:next_id.find('P') - 1] person_name = "Unknown " + person_type person_number = next_id[next_id.find('P') + 1:] person_number = int(person_number) people_df = people_df.append({"entry_no": curr_entry, "pred_entity": person_name, "pred_label": "PER", "unique_id": next_id}, ignore_index=True) next_id = next_id[:next_id.find('P') + 1] + str(person_number + 1) return people_df, next_id
appends a row representing a new person to an existing people df people_df: df containing all entities labeled as people in the entry with unique ids volume_metadata: metadata for the volume that the entry comes from, built by retrieve_volume_metadata person_type: type of person being added, e.g. "principal" or "cleric" returns: updated df with new person added and updated next available id
appends a row representing a new person to an existing people df people_df: df containing all entities labeled as people in the entry with unique ids volume_metadata: metadata for the volume that the entry comes from, built by retrieve_volume_metadata person_type: type of person being added, e.g. "principal" or "cleric" updated df with new person added and updated next available id
[ "appends", "a", "row", "representing", "a", "new", "person", "to", "an", "existing", "people", "df", "people_df", ":", "df", "containing", "all", "entities", "labeled", "as", "people", "in", "the", "entry", "with", "unique", "ids", "volume_metadata", ":", "metadata", "for", "the", "volume", "that", "the", "entry", "comes", "from", "built", "by", "retrieve_volume_metadata", "person_type", ":", "type", "of", "person", "being", "added", "e", ".", "g", ".", "\"", "principal", "\"", "or", "\"", "cleric", "\"", "updated", "df", "with", "new", "person", "added", "and", "updated", "next", "available", "id" ]
def build_new_person(people_df, next_id, person_type): curr_entry = next_id[next_id.find('-') + 1:next_id.find('P') - 1] person_name = "Unknown " + person_type person_number = next_id[next_id.find('P') + 1:] person_number = int(person_number) people_df = people_df.append({"entry_no": curr_entry, "pred_entity": person_name, "pred_label": "PER", "unique_id": next_id}, ignore_index=True) next_id = next_id[:next_id.find('P') + 1] + str(person_number + 1) return people_df, next_id
[ "def", "build_new_person", "(", "people_df", ",", "next_id", ",", "person_type", ")", ":", "curr_entry", "=", "next_id", "[", "next_id", ".", "find", "(", "'-'", ")", "+", "1", ":", "next_id", ".", "find", "(", "'P'", ")", "-", "1", "]", "person_name", "=", "\"Unknown \"", "+", "person_type", "person_number", "=", "next_id", "[", "next_id", ".", "find", "(", "'P'", ")", "+", "1", ":", "]", "person_number", "=", "int", "(", "person_number", ")", "people_df", "=", "people_df", ".", "append", "(", "{", "\"entry_no\"", ":", "curr_entry", ",", "\"pred_entity\"", ":", "person_name", ",", "\"pred_label\"", ":", "\"PER\"", ",", "\"unique_id\"", ":", "next_id", "}", ",", "ignore_index", "=", "True", ")", "next_id", "=", "next_id", "[", ":", "next_id", ".", "find", "(", "'P'", ")", "+", "1", "]", "+", "str", "(", "person_number", "+", "1", ")", "return", "people_df", ",", "next_id" ]
appends a row representing a new person to an existing people df people_df: df containing all entities labeled as people in the entry with unique ids volume_metadata: metadata for the volume that the entry comes from, built by retrieve_volume_metadata person_type: type of person being added, e.g.
[ "appends", "a", "row", "representing", "a", "new", "person", "to", "an", "existing", "people", "df", "people_df", ":", "df", "containing", "all", "entities", "labeled", "as", "people", "in", "the", "entry", "with", "unique", "ids", "volume_metadata", ":", "metadata", "for", "the", "volume", "that", "the", "entry", "comes", "from", "built", "by", "retrieve_volume_metadata", "person_type", ":", "type", "of", "person", "being", "added", "e", ".", "g", "." ]
[ "'''\n appends a row representing a new person to an existing people df\n people_df: df containing all entities labeled as people in the entry with unique ids\n volume_metadata: metadata for the volume that the entry comes from, built by retrieve_volume_metadata\n person_type: type of person being added, e.g. \"principal\" or \"cleric\"\n\n returns: updated df with new person added and updated next available id\n '''" ]
[ { "param": "people_df", "type": null }, { "param": "next_id", "type": null }, { "param": "person_type", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "people_df", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "next_id", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "person_type", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def build_new_person(people_df, next_id, person_type): curr_entry = next_id[next_id.find('-') + 1:next_id.find('P') - 1] person_name = "Unknown " + person_type person_number = next_id[next_id.find('P') + 1:] person_number = int(person_number) people_df = people_df.append({"entry_no": curr_entry, "pred_entity": person_name, "pred_label": "PER", "unique_id": next_id}, ignore_index=True) next_id = next_id[:next_id.find('P') + 1] + str(person_number + 1) return people_df, next_id
127
111
26dd55b6a0f612234e711d2e9da51f482146a66d
ian-bartholomew/machine-learning
classification/decision_tree/decision_tree_binary.py
[ "MIT" ]
Python
error_reduction
<not_specific>
def error_reduction(error_before_split, error_after_split): """ Purpose: Compute the difference between error before and after split Input : Error before split and error after split Output : Difference between error before and after split """ return (error_before_split - error_after_split)
Purpose: Compute the difference between error before and after split Input : Error before split and error after split Output : Difference between error before and after split
Compute the difference between error before and after split Input : Error before split and error after split Output : Difference between error before and after split
[ "Compute", "the", "difference", "between", "error", "before", "and", "after", "split", "Input", ":", "Error", "before", "split", "and", "error", "after", "split", "Output", ":", "Difference", "between", "error", "before", "and", "after", "split" ]
def error_reduction(error_before_split, error_after_split): return (error_before_split - error_after_split)
[ "def", "error_reduction", "(", "error_before_split", ",", "error_after_split", ")", ":", "return", "(", "error_before_split", "-", "error_after_split", ")" ]
Purpose: Compute the difference between error before and after split Input : Error before split and error after split Output : Difference between error before and after split
[ "Purpose", ":", "Compute", "the", "difference", "between", "error", "before", "and", "after", "split", "Input", ":", "Error", "before", "split", "and", "error", "after", "split", "Output", ":", "Difference", "between", "error", "before", "and", "after", "split" ]
[ "\"\"\"\r\n Purpose: Compute the difference between error before and after split\r\n Input : Error before split and error after split\r\n Output : Difference between error before and after split\r\n \"\"\"" ]
[ { "param": "error_before_split", "type": null }, { "param": "error_after_split", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "error_before_split", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "error_after_split", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def error_reduction(error_before_split, error_after_split): return (error_before_split - error_after_split)
128
306
c5a885594689cfda35e49a7fad738e21e2d73d02
aweandreverence/django-htk
utils/data_structures/general.py
[ "MIT" ]
Python
filter_dict
<not_specific>
def filter_dict(d, keys): """Returns a subset of dictionary `d` with keys from `keys` """ filtered = {} for key in keys: filtered[key] = d.get(key) return filtered
Returns a subset of dictionary `d` with keys from `keys`
Returns a subset of dictionary `d` with keys from `keys`
[ "Returns", "a", "subset", "of", "dictionary", "`", "d", "`", "with", "keys", "from", "`", "keys", "`" ]
def filter_dict(d, keys): filtered = {} for key in keys: filtered[key] = d.get(key) return filtered
[ "def", "filter_dict", "(", "d", ",", "keys", ")", ":", "filtered", "=", "{", "}", "for", "key", "in", "keys", ":", "filtered", "[", "key", "]", "=", "d", ".", "get", "(", "key", ")", "return", "filtered" ]
Returns a subset of dictionary `d` with keys from `keys`
[ "Returns", "a", "subset", "of", "dictionary", "`", "d", "`", "with", "keys", "from", "`", "keys", "`" ]
[ "\"\"\"Returns a subset of dictionary `d` with keys from `keys`\n \"\"\"" ]
[ { "param": "d", "type": null }, { "param": "keys", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "d", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "keys", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def filter_dict(d, keys): filtered = {} for key in keys: filtered[key] = d.get(key) return filtered
129
47
8fa90889cd9fae41026d28fea6f8322c68e01fb2
PhilippJunk/homelette
homelette/routines.py
[ "MIT" ]
Python
_remove_files
None
def _remove_files(*args: str) -> None: ''' Remove files given after modelling procedure. Called by Children classes after model generation in order to clean up temporary files or unneccessary output files. Uses glob.glob internally and can therefore handle wildcards. Parameters ---------- *args : str Filenames or queries for filenames that will be deleted. Returns ------- None ''' for query in args: for file in glob.glob(query): os.remove(file)
Remove files given after modelling procedure. Called by Children classes after model generation in order to clean up temporary files or unneccessary output files. Uses glob.glob internally and can therefore handle wildcards. Parameters ---------- *args : str Filenames or queries for filenames that will be deleted. Returns ------- None
Remove files given after modelling procedure. Called by Children classes after model generation in order to clean up temporary files or unneccessary output files. Uses glob.glob internally and can therefore handle wildcards. Parameters args : str Filenames or queries for filenames that will be deleted. Returns None
[ "Remove", "files", "given", "after", "modelling", "procedure", ".", "Called", "by", "Children", "classes", "after", "model", "generation", "in", "order", "to", "clean", "up", "temporary", "files", "or", "unneccessary", "output", "files", ".", "Uses", "glob", ".", "glob", "internally", "and", "can", "therefore", "handle", "wildcards", ".", "Parameters", "args", ":", "str", "Filenames", "or", "queries", "for", "filenames", "that", "will", "be", "deleted", ".", "Returns", "None" ]
def _remove_files(*args: str) -> None: for query in args: for file in glob.glob(query): os.remove(file)
[ "def", "_remove_files", "(", "*", "args", ":", "str", ")", "->", "None", ":", "for", "query", "in", "args", ":", "for", "file", "in", "glob", ".", "glob", "(", "query", ")", ":", "os", ".", "remove", "(", "file", ")" ]
Remove files given after modelling procedure.
[ "Remove", "files", "given", "after", "modelling", "procedure", "." ]
[ "'''\n Remove files given after modelling procedure.\n\n Called by Children classes after model generation in order to clean up\n temporary files or unneccessary output files.\n Uses glob.glob internally and can therefore handle wildcards.\n\n Parameters\n ----------\n *args : str\n Filenames or queries for filenames that will be deleted.\n\n Returns\n -------\n None\n '''" ]
[ { "param": "args", "type": "str" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "args", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os import glob def _remove_files(*args: str) -> None: for query in args: for file in glob.glob(query): os.remove(file)
130
127
3707dc49c15865574597d3e7688934cb1492836f
mrbot-ai/deep_qa
deep_qa/data/instances/text_classification/tuple_instance.py
[ "Apache-2.0" ]
Python
read_from_line
<not_specific>
def read_from_line(cls, line: str): """ Reads a TupleInstances from a line. The format has one of four options: (1) [subject]###[predicate]###[object1]... (2) [sentence index][tab][subject]###[predicate]###[object1]... (3) [subject]###[predicate]###[object1]...[tab][label] (4) [sentence index][tab][subject]###[predicate]###[object1]...[tab][label] Objects are optional, and can vary in number. This makes the acceptable number of slots per tuple, 2 or more. """ fields = line.split("\t") if len(fields) == 1: # Case 1 tuple_string = fields[0] index = None label = None elif len(fields) == 2: if fields[0].isdigit(): # Case 2 index = int(fields[0]) tuple_string = fields[1] label = None else: # Case 3 tuple_string = fields[0] index = None label = fields[2].strip() == "1" else: # Case 4 index = int(fields[0]) tuple_string = fields[1] label = fields[2].strip() == "1" tuple_fields = tuple_string.split('###') if len(tuple_fields) < 2: raise RuntimeError("Unexpected number of fields in tuple: " + tuple_string) return cls(tuple_fields, label=label, index=index)
Reads a TupleInstances from a line. The format has one of four options: (1) [subject]###[predicate]###[object1]... (2) [sentence index][tab][subject]###[predicate]###[object1]... (3) [subject]###[predicate]###[object1]...[tab][label] (4) [sentence index][tab][subject]###[predicate]###[object1]...[tab][label] Objects are optional, and can vary in number. This makes the acceptable number of slots per tuple, 2 or more.
Reads a TupleInstances from a line. Objects are optional, and can vary in number. This makes the acceptable number of slots per tuple, 2 or more.
[ "Reads", "a", "TupleInstances", "from", "a", "line", ".", "Objects", "are", "optional", "and", "can", "vary", "in", "number", ".", "This", "makes", "the", "acceptable", "number", "of", "slots", "per", "tuple", "2", "or", "more", "." ]
def read_from_line(cls, line: str): fields = line.split("\t") if len(fields) == 1: tuple_string = fields[0] index = None label = None elif len(fields) == 2: if fields[0].isdigit(): index = int(fields[0]) tuple_string = fields[1] label = None else: tuple_string = fields[0] index = None label = fields[2].strip() == "1" else: index = int(fields[0]) tuple_string = fields[1] label = fields[2].strip() == "1" tuple_fields = tuple_string.split('###') if len(tuple_fields) < 2: raise RuntimeError("Unexpected number of fields in tuple: " + tuple_string) return cls(tuple_fields, label=label, index=index)
[ "def", "read_from_line", "(", "cls", ",", "line", ":", "str", ")", ":", "fields", "=", "line", ".", "split", "(", "\"\\t\"", ")", "if", "len", "(", "fields", ")", "==", "1", ":", "tuple_string", "=", "fields", "[", "0", "]", "index", "=", "None", "label", "=", "None", "elif", "len", "(", "fields", ")", "==", "2", ":", "if", "fields", "[", "0", "]", ".", "isdigit", "(", ")", ":", "index", "=", "int", "(", "fields", "[", "0", "]", ")", "tuple_string", "=", "fields", "[", "1", "]", "label", "=", "None", "else", ":", "tuple_string", "=", "fields", "[", "0", "]", "index", "=", "None", "label", "=", "fields", "[", "2", "]", ".", "strip", "(", ")", "==", "\"1\"", "else", ":", "index", "=", "int", "(", "fields", "[", "0", "]", ")", "tuple_string", "=", "fields", "[", "1", "]", "label", "=", "fields", "[", "2", "]", ".", "strip", "(", ")", "==", "\"1\"", "tuple_fields", "=", "tuple_string", ".", "split", "(", "'###'", ")", "if", "len", "(", "tuple_fields", ")", "<", "2", ":", "raise", "RuntimeError", "(", "\"Unexpected number of fields in tuple: \"", "+", "tuple_string", ")", "return", "cls", "(", "tuple_fields", ",", "label", "=", "label", ",", "index", "=", "index", ")" ]
Reads a TupleInstances from a line.
[ "Reads", "a", "TupleInstances", "from", "a", "line", "." ]
[ "\"\"\"\n Reads a TupleInstances from a line. The format has one of four options:\n\n (1) [subject]###[predicate]###[object1]...\n (2) [sentence index][tab][subject]###[predicate]###[object1]...\n (3) [subject]###[predicate]###[object1]...[tab][label]\n (4) [sentence index][tab][subject]###[predicate]###[object1]...[tab][label]\n\n Objects are optional, and can vary in number. This makes the acceptable number of slots per\n tuple, 2 or more.\n \"\"\"", "# Case 1", "# Case 2", "# Case 3", "# Case 4" ]
[ { "param": "cls", "type": null }, { "param": "line", "type": "str" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "line", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def read_from_line(cls, line: str): fields = line.split("\t") if len(fields) == 1: tuple_string = fields[0] index = None label = None elif len(fields) == 2: if fields[0].isdigit(): index = int(fields[0]) tuple_string = fields[1] label = None else: tuple_string = fields[0] index = None label = fields[2].strip() == "1" else: index = int(fields[0]) tuple_string = fields[1] label = fields[2].strip() == "1" tuple_fields = tuple_string.split('###') if len(tuple_fields) < 2: raise RuntimeError("Unexpected number of fields in tuple: " + tuple_string) return cls(tuple_fields, label=label, index=index)
131
756
a50c6a0056a6b4fd3f66568c6d36a3bbd51b0dd9
bce-toolkit/BCE
bce/logic/balancer/model.py
[ "BSD-3-Clause" ]
Python
_convert_unknown_id_to_symbol
<not_specific>
def _convert_unknown_id_to_symbol(unknown_id): """Get the symbol of an unknown. :type unknown_id: int :param unknown_id: The ID of the unknown. :rtype : str :return: The symbol. """ # If the |unknown_id| is zero, just returns |PROTECT_HEADER|a. if unknown_id == 0: return "a" # Initialize alphabet table. ch_table = "abcdefghijklmnopqrstuvwxyz" ch_table_len = len(ch_table) # Convert decimal to 26 ary. cur_id = unknown_id r = "" while cur_id != 0: r = ch_table[cur_id % ch_table_len] + r cur_id = int(cur_id / ch_table_len) return r
Get the symbol of an unknown. :type unknown_id: int :param unknown_id: The ID of the unknown. :rtype : str :return: The symbol.
Get the symbol of an unknown.
[ "Get", "the", "symbol", "of", "an", "unknown", "." ]
def _convert_unknown_id_to_symbol(unknown_id): if unknown_id == 0: return "a" ch_table = "abcdefghijklmnopqrstuvwxyz" ch_table_len = len(ch_table) cur_id = unknown_id r = "" while cur_id != 0: r = ch_table[cur_id % ch_table_len] + r cur_id = int(cur_id / ch_table_len) return r
[ "def", "_convert_unknown_id_to_symbol", "(", "unknown_id", ")", ":", "if", "unknown_id", "==", "0", ":", "return", "\"a\"", "ch_table", "=", "\"abcdefghijklmnopqrstuvwxyz\"", "ch_table_len", "=", "len", "(", "ch_table", ")", "cur_id", "=", "unknown_id", "r", "=", "\"\"", "while", "cur_id", "!=", "0", ":", "r", "=", "ch_table", "[", "cur_id", "%", "ch_table_len", "]", "+", "r", "cur_id", "=", "int", "(", "cur_id", "/", "ch_table_len", ")", "return", "r" ]
Get the symbol of an unknown.
[ "Get", "the", "symbol", "of", "an", "unknown", "." ]
[ "\"\"\"Get the symbol of an unknown.\n\n :type unknown_id: int\n :param unknown_id: The ID of the unknown.\n :rtype : str\n :return: The symbol.\n \"\"\"", "# If the |unknown_id| is zero, just returns |PROTECT_HEADER|a.", "# Initialize alphabet table.", "# Convert decimal to 26 ary." ]
[ { "param": "unknown_id", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": "str" } ], "raises": [], "params": [ { "identifier": "unknown_id", "type": null, "docstring": "The ID of the unknown.", "docstring_tokens": [ "The", "ID", "of", "the", "unknown", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _convert_unknown_id_to_symbol(unknown_id): if unknown_id == 0: return "a" ch_table = "abcdefghijklmnopqrstuvwxyz" ch_table_len = len(ch_table) cur_id = unknown_id r = "" while cur_id != 0: r = ch_table[cur_id % ch_table_len] + r cur_id = int(cur_id / ch_table_len) return r
132
700
1fe799da6cddb25113e43e0da619a723468d1013
logr4y/tgscrape
tgscrape.py
[ "MIT" ]
Python
guess_if_last
<not_specific>
def guess_if_last(lmsg): """Guesses if message is the last one in a group""" msg_day = lmsg['datetime'].split('T')[0] msg_day = datetime.datetime.strptime(msg_day, '%Y-%m-%d') check_day = datetime.datetime.today() - datetime.timedelta(days=1) if msg_day >= check_day: return True return False
Guesses if message is the last one in a group
Guesses if message is the last one in a group
[ "Guesses", "if", "message", "is", "the", "last", "one", "in", "a", "group" ]
def guess_if_last(lmsg): msg_day = lmsg['datetime'].split('T')[0] msg_day = datetime.datetime.strptime(msg_day, '%Y-%m-%d') check_day = datetime.datetime.today() - datetime.timedelta(days=1) if msg_day >= check_day: return True return False
[ "def", "guess_if_last", "(", "lmsg", ")", ":", "msg_day", "=", "lmsg", "[", "'datetime'", "]", ".", "split", "(", "'T'", ")", "[", "0", "]", "msg_day", "=", "datetime", ".", "datetime", ".", "strptime", "(", "msg_day", ",", "'%Y-%m-%d'", ")", "check_day", "=", "datetime", ".", "datetime", ".", "today", "(", ")", "-", "datetime", ".", "timedelta", "(", "days", "=", "1", ")", "if", "msg_day", ">=", "check_day", ":", "return", "True", "return", "False" ]
Guesses if message is the last one in a group
[ "Guesses", "if", "message", "is", "the", "last", "one", "in", "a", "group" ]
[ "\"\"\"Guesses if message is the last one in a group\"\"\"" ]
[ { "param": "lmsg", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "lmsg", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import datetime def guess_if_last(lmsg): msg_day = lmsg['datetime'].split('T')[0] msg_day = datetime.datetime.strptime(msg_day, '%Y-%m-%d') check_day = datetime.datetime.today() - datetime.timedelta(days=1) if msg_day >= check_day: return True return False
133
205
3107f63b7aef96fe1421e4ac30a8e7b0303215c7
ByRellex/sinstruments
sinstruments/simulators/icepap.py
[ "MIT" ]
Python
_cmd_result
<not_specific>
def _cmd_result(cmd_match): """retrieve the command error message prefix from the command line""" groups = cmd_match.groupdict() # replace None values with '' groups_str = dict([(k, ("" if v is None else v)) for k, v in groups.items()]) groups_str["instr"] = groups_str["instr"].upper() cmd_err = "{addr}{broadcast}{is_query}{instr}".format(**groups_str) return cmd_err
retrieve the command error message prefix from the command line
retrieve the command error message prefix from the command line
[ "retrieve", "the", "command", "error", "message", "prefix", "from", "the", "command", "line" ]
def _cmd_result(cmd_match): groups = cmd_match.groupdict() groups_str = dict([(k, ("" if v is None else v)) for k, v in groups.items()]) groups_str["instr"] = groups_str["instr"].upper() cmd_err = "{addr}{broadcast}{is_query}{instr}".format(**groups_str) return cmd_err
[ "def", "_cmd_result", "(", "cmd_match", ")", ":", "groups", "=", "cmd_match", ".", "groupdict", "(", ")", "groups_str", "=", "dict", "(", "[", "(", "k", ",", "(", "\"\"", "if", "v", "is", "None", "else", "v", ")", ")", "for", "k", ",", "v", "in", "groups", ".", "items", "(", ")", "]", ")", "groups_str", "[", "\"instr\"", "]", "=", "groups_str", "[", "\"instr\"", "]", ".", "upper", "(", ")", "cmd_err", "=", "\"{addr}{broadcast}{is_query}{instr}\"", ".", "format", "(", "**", "groups_str", ")", "return", "cmd_err" ]
retrieve the command error message prefix from the command line
[ "retrieve", "the", "command", "error", "message", "prefix", "from", "the", "command", "line" ]
[ "\"\"\"retrieve the command error message prefix from the command line\"\"\"", "# replace None values with ''" ]
[ { "param": "cmd_match", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cmd_match", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _cmd_result(cmd_match): groups = cmd_match.groupdict() groups_str = dict([(k, ("" if v is None else v)) for k, v in groups.items()]) groups_str["instr"] = groups_str["instr"].upper() cmd_err = "{addr}{broadcast}{is_query}{instr}".format(**groups_str) return cmd_err
134
934
5acf79fa3353bf8da526d4e500f619b0a8ab92e9
RhuaYuri/radio
projeto_pesquisa/Lib/site-packages/flyingcircus/base.py
[ "MIT" ]
Python
median
<not_specific>
def median( seq, force_sort=True): """ Compute the median of a numeric sequence. For iterative computation see: - `flyingcircus.next_median()` - `flyingcircus.next_medoid_and_median()` - `flyingcircus.i_median()` - `flyingcircus.i_medoid_and_median()` - `flyingcircus.i_median_and_median_abs_dev()` This is roughly comparable to `statistics.median()`. If more than one among median, medoid, quantile, quantiloid is needed, it is more efficient to sort the items prior to calling and then perform the multiple required calle with `force_sort` set to False. Args: seq (Sequence[Number]): The input items. force_sort (bool): Force sorting of the input items. If the items are already sorted, this can be safely set to False. Otherwise, it should be set to True. Returns: result (Number): The median of the items. Examples: >>> items = range(0, 52, 2) >>> median(items) 25.0 >>> statistics.median(items) == median(items) True See Also: - flyingcircus.median_and_median_abs_dev() - flyingcircus.next_median() - flyingcircus.next_medoid_and_median() - flyingcircus.i_median() - flyingcircus.i_medoid_and_median() - flyingcircus.i_median_and_median_abs_dev() - flyingcircus.medoid() - flyingcircus.quantile() - flyingcircus.interquantilic_range() - flyingcircus.sym_interquantilic_range() - flyingcircus.median_abs_dev() """ n = len(seq) i = n // 2 sorted_items = sorted(seq) if force_sort else seq if not (n % 2) and sorted_items[i - 1] != sorted_items[i]: median_ = (sorted_items[i - 1] + sorted_items[i]) / 2 else: median_ = sorted_items[i] return median_
Compute the median of a numeric sequence. For iterative computation see: - `flyingcircus.next_median()` - `flyingcircus.next_medoid_and_median()` - `flyingcircus.i_median()` - `flyingcircus.i_medoid_and_median()` - `flyingcircus.i_median_and_median_abs_dev()` This is roughly comparable to `statistics.median()`. If more than one among median, medoid, quantile, quantiloid is needed, it is more efficient to sort the items prior to calling and then perform the multiple required calle with `force_sort` set to False. Args: seq (Sequence[Number]): The input items. force_sort (bool): Force sorting of the input items. If the items are already sorted, this can be safely set to False. Otherwise, it should be set to True. Returns: result (Number): The median of the items. Examples: >>> items = range(0, 52, 2) >>> median(items) 25.0 >>> statistics.median(items) == median(items) True See Also: - flyingcircus.median_and_median_abs_dev() - flyingcircus.next_median() - flyingcircus.next_medoid_and_median() - flyingcircus.i_median() - flyingcircus.i_medoid_and_median() - flyingcircus.i_median_and_median_abs_dev() - flyingcircus.medoid() - flyingcircus.quantile() - flyingcircus.interquantilic_range() - flyingcircus.sym_interquantilic_range() - flyingcircus.median_abs_dev()
Compute the median of a numeric sequence. This is roughly comparable to `statistics.median()`. If more than one among median, medoid, quantile, quantiloid is needed, it is more efficient to sort the items prior to calling and then perform the multiple required calle with `force_sort` set to False.
[ "Compute", "the", "median", "of", "a", "numeric", "sequence", ".", "This", "is", "roughly", "comparable", "to", "`", "statistics", ".", "median", "()", "`", ".", "If", "more", "than", "one", "among", "median", "medoid", "quantile", "quantiloid", "is", "needed", "it", "is", "more", "efficient", "to", "sort", "the", "items", "prior", "to", "calling", "and", "then", "perform", "the", "multiple", "required", "calle", "with", "`", "force_sort", "`", "set", "to", "False", "." ]
def median( seq, force_sort=True): n = len(seq) i = n // 2 sorted_items = sorted(seq) if force_sort else seq if not (n % 2) and sorted_items[i - 1] != sorted_items[i]: median_ = (sorted_items[i - 1] + sorted_items[i]) / 2 else: median_ = sorted_items[i] return median_
[ "def", "median", "(", "seq", ",", "force_sort", "=", "True", ")", ":", "n", "=", "len", "(", "seq", ")", "i", "=", "n", "//", "2", "sorted_items", "=", "sorted", "(", "seq", ")", "if", "force_sort", "else", "seq", "if", "not", "(", "n", "%", "2", ")", "and", "sorted_items", "[", "i", "-", "1", "]", "!=", "sorted_items", "[", "i", "]", ":", "median_", "=", "(", "sorted_items", "[", "i", "-", "1", "]", "+", "sorted_items", "[", "i", "]", ")", "/", "2", "else", ":", "median_", "=", "sorted_items", "[", "i", "]", "return", "median_" ]
Compute the median of a numeric sequence.
[ "Compute", "the", "median", "of", "a", "numeric", "sequence", "." ]
[ "\"\"\"\n Compute the median of a numeric sequence.\n\n For iterative computation see:\n - `flyingcircus.next_median()`\n - `flyingcircus.next_medoid_and_median()`\n - `flyingcircus.i_median()`\n - `flyingcircus.i_medoid_and_median()`\n - `flyingcircus.i_median_and_median_abs_dev()`\n\n This is roughly comparable to `statistics.median()`.\n\n If more than one among median, medoid, quantile, quantiloid is needed,\n it is more efficient to sort the items prior to calling and then\n perform the multiple required calle with `force_sort` set to False.\n\n Args:\n seq (Sequence[Number]): The input items.\n force_sort (bool): Force sorting of the input items.\n If the items are already sorted, this can be safely set to False.\n Otherwise, it should be set to True.\n\n Returns:\n result (Number): The median of the items.\n\n Examples:\n >>> items = range(0, 52, 2)\n >>> median(items)\n 25.0\n >>> statistics.median(items) == median(items)\n True\n\n See Also:\n - flyingcircus.median_and_median_abs_dev()\n - flyingcircus.next_median()\n - flyingcircus.next_medoid_and_median()\n - flyingcircus.i_median()\n - flyingcircus.i_medoid_and_median()\n - flyingcircus.i_median_and_median_abs_dev()\n - flyingcircus.medoid()\n - flyingcircus.quantile()\n - flyingcircus.interquantilic_range()\n - flyingcircus.sym_interquantilic_range()\n - flyingcircus.median_abs_dev()\n \"\"\"" ]
[ { "param": "seq", "type": null }, { "param": "force_sort", "type": null } ]
{ "returns": [ { "docstring": "result (Number): The median of the items.", "docstring_tokens": [ "result", "(", "Number", ")", ":", "The", "median", "of", "the", "items", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "seq", "type": null, "docstring": "The input items.", "docstring_tokens": [ "The", "input", "items", "." ], "default": null, "is_optional": false }, { "identifier": "force_sort", "type": null, "docstring": "Force sorting of the input items.\nIf the items are already sorted, this can be safely set to False.\nOtherwise, it should be set to True.", "docstring_tokens": [ "Force", "sorting", "of", "the", "input", "items", ".", "If", "the", "items", "are", "already", "sorted", "this", "can", "be", "safely", "set", "to", "False", ".", "Otherwise", "it", "should", "be", "set", "to", "True", "." ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [ { "identifier": "examples", "docstring": null, "docstring_tokens": [ "None" ] } ] }
def median( seq, force_sort=True): n = len(seq) i = n // 2 sorted_items = sorted(seq) if force_sort else seq if not (n % 2) and sorted_items[i - 1] != sorted_items[i]: median_ = (sorted_items[i - 1] + sorted_items[i]) / 2 else: median_ = sorted_items[i] return median_
135
131
a5b9563a7eb8e6b70fd19da335fda92520e4aadd
MPI-MAPSS/MAPSS
mapss/static/packages/arches/arches/app/utils/flatten_dict.py
[ "CC0-1.0" ]
Python
flatten_dict
<not_specific>
def flatten_dict(nested_dict, separator="_"): """ Flatten dict object with nested keys into a single level. Args: nested_dict: A nested dict object. separator: a string used to denote hierarchical Returns: The flattened dict object if successful, None otherwise. """ out = {} def flatten(x, name=""): if type(x) is dict: for a in x: flatten(x[a], name + a + separator) elif type(x) is list: i = 0 for a in x: flatten(a, name + str(i) + separator) i += 1 else: out[name[:-1]] = x flatten(nested_dict) return out
Flatten dict object with nested keys into a single level. Args: nested_dict: A nested dict object. separator: a string used to denote hierarchical Returns: The flattened dict object if successful, None otherwise.
Flatten dict object with nested keys into a single level.
[ "Flatten", "dict", "object", "with", "nested", "keys", "into", "a", "single", "level", "." ]
def flatten_dict(nested_dict, separator="_"): out = {} def flatten(x, name=""): if type(x) is dict: for a in x: flatten(x[a], name + a + separator) elif type(x) is list: i = 0 for a in x: flatten(a, name + str(i) + separator) i += 1 else: out[name[:-1]] = x flatten(nested_dict) return out
[ "def", "flatten_dict", "(", "nested_dict", ",", "separator", "=", "\"_\"", ")", ":", "out", "=", "{", "}", "def", "flatten", "(", "x", ",", "name", "=", "\"\"", ")", ":", "if", "type", "(", "x", ")", "is", "dict", ":", "for", "a", "in", "x", ":", "flatten", "(", "x", "[", "a", "]", ",", "name", "+", "a", "+", "separator", ")", "elif", "type", "(", "x", ")", "is", "list", ":", "i", "=", "0", "for", "a", "in", "x", ":", "flatten", "(", "a", ",", "name", "+", "str", "(", "i", ")", "+", "separator", ")", "i", "+=", "1", "else", ":", "out", "[", "name", "[", ":", "-", "1", "]", "]", "=", "x", "flatten", "(", "nested_dict", ")", "return", "out" ]
Flatten dict object with nested keys into a single level.
[ "Flatten", "dict", "object", "with", "nested", "keys", "into", "a", "single", "level", "." ]
[ "\"\"\"\n Flatten dict object with nested keys into a single level.\n Args:\n nested_dict: A nested dict object.\n separator: a string used to denote hierarchical\n Returns:\n The flattened dict object if successful, None otherwise.\n \"\"\"" ]
[ { "param": "nested_dict", "type": null }, { "param": "separator", "type": null } ]
{ "returns": [ { "docstring": "The flattened dict object if successful, None otherwise.", "docstring_tokens": [ "The", "flattened", "dict", "object", "if", "successful", "None", "otherwise", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "nested_dict", "type": null, "docstring": "A nested dict object.", "docstring_tokens": [ "A", "nested", "dict", "object", "." ], "default": null, "is_optional": null }, { "identifier": "separator", "type": null, "docstring": "a string used to denote hierarchical", "docstring_tokens": [ "a", "string", "used", "to", "denote", "hierarchical" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def flatten_dict(nested_dict, separator="_"): out = {} def flatten(x, name=""): if type(x) is dict: for a in x: flatten(x[a], name + a + separator) elif type(x) is list: i = 0 for a in x: flatten(a, name + str(i) + separator) i += 1 else: out[name[:-1]] = x flatten(nested_dict) return out
136
92
6deb97b7f83a3c2505dff29dbe9b8f02a411e7f2
ftnext/python-as-pyconjp-staff
timetable_maker/main.py
[ "MIT" ]
Python
handle_existing_worksheet
<not_specific>
def handle_existing_worksheet(spreadsheet, worksheet_name, do_overwrite): """Return worksheet when overwrite, otherwise stash and return None""" worksheet = spreadsheet.worksheet(worksheet_name) if do_overwrite: worksheet.clear() return worksheet new_title = f"{worksheet_name}_stash_{datetime.now():%Y%m%d_%H%M%S}" worksheet.update_title(new_title) return None
Return worksheet when overwrite, otherwise stash and return None
Return worksheet when overwrite, otherwise stash and return None
[ "Return", "worksheet", "when", "overwrite", "otherwise", "stash", "and", "return", "None" ]
def handle_existing_worksheet(spreadsheet, worksheet_name, do_overwrite): worksheet = spreadsheet.worksheet(worksheet_name) if do_overwrite: worksheet.clear() return worksheet new_title = f"{worksheet_name}_stash_{datetime.now():%Y%m%d_%H%M%S}" worksheet.update_title(new_title) return None
[ "def", "handle_existing_worksheet", "(", "spreadsheet", ",", "worksheet_name", ",", "do_overwrite", ")", ":", "worksheet", "=", "spreadsheet", ".", "worksheet", "(", "worksheet_name", ")", "if", "do_overwrite", ":", "worksheet", ".", "clear", "(", ")", "return", "worksheet", "new_title", "=", "f\"{worksheet_name}_stash_{datetime.now():%Y%m%d_%H%M%S}\"", "worksheet", ".", "update_title", "(", "new_title", ")", "return", "None" ]
Return worksheet when overwrite, otherwise stash and return None
[ "Return", "worksheet", "when", "overwrite", "otherwise", "stash", "and", "return", "None" ]
[ "\"\"\"Return worksheet when overwrite, otherwise stash and return None\"\"\"" ]
[ { "param": "spreadsheet", "type": null }, { "param": "worksheet_name", "type": null }, { "param": "do_overwrite", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "spreadsheet", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "worksheet_name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "do_overwrite", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import datetime def handle_existing_worksheet(spreadsheet, worksheet_name, do_overwrite): worksheet = spreadsheet.worksheet(worksheet_name) if do_overwrite: worksheet.clear() return worksheet new_title = f"{worksheet_name}_stash_{datetime.now():%Y%m%d_%H%M%S}" worksheet.update_title(new_title) return None
139
926
2ba5cb3b10e94082f14c51c17f6d4f9cc693c19b
buffbob/titanic
src/titanic/go1_utils.py
[ "MIT" ]
Python
process_digit_data
<not_specific>
def process_digit_data(df): """ df(a DataFrame) is num x 784 scale and convert to num x 28 x 28 return an array """ df = df/255 vals = df.values.reshape(-1,28,28,1) return vals
df(a DataFrame) is num x 784 scale and convert to num x 28 x 28 return an array
df(a DataFrame) is num x 784 scale and convert to num x 28 x 28 return an array
[ "df", "(", "a", "DataFrame", ")", "is", "num", "x", "784", "scale", "and", "convert", "to", "num", "x", "28", "x", "28", "return", "an", "array" ]
def process_digit_data(df): df = df/255 vals = df.values.reshape(-1,28,28,1) return vals
[ "def", "process_digit_data", "(", "df", ")", ":", "df", "=", "df", "/", "255", "vals", "=", "df", ".", "values", ".", "reshape", "(", "-", "1", ",", "28", ",", "28", ",", "1", ")", "return", "vals" ]
df(a DataFrame) is num x 784 scale and convert to num x 28 x 28 return an array
[ "df", "(", "a", "DataFrame", ")", "is", "num", "x", "784", "scale", "and", "convert", "to", "num", "x", "28", "x", "28", "return", "an", "array" ]
[ "\"\"\"\n df(a DataFrame) is num x 784\n scale and convert to num x 28 x 28\n return an array\n \"\"\"" ]
[ { "param": "df", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "df", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def process_digit_data(df): df = df/255 vals = df.values.reshape(-1,28,28,1) return vals
140
247
2dc10fa28137004d12712d67664389ed355f338e
Jeffro80/Convert_Expenses
convertExpenses.py
[ "CC-BY-3.0" ]
Python
weekly_to_annual
<not_specific>
def weekly_to_annual(weekly): """Convert weekly amount to annual cost. Args: weekly (float): Weekly cost. Returns: (float): Annual cost. """ return weekly * 52
Convert weekly amount to annual cost. Args: weekly (float): Weekly cost. Returns: (float): Annual cost.
Convert weekly amount to annual cost.
[ "Convert", "weekly", "amount", "to", "annual", "cost", "." ]
def weekly_to_annual(weekly): return weekly * 52
[ "def", "weekly_to_annual", "(", "weekly", ")", ":", "return", "weekly", "*", "52" ]
Convert weekly amount to annual cost.
[ "Convert", "weekly", "amount", "to", "annual", "cost", "." ]
[ "\"\"\"Convert weekly amount to annual cost.\n \n Args:\n weekly (float): Weekly cost.\n \n Returns:\n (float): Annual cost.\n \"\"\"" ]
[ { "param": "weekly", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": "(float)" } ], "raises": [], "params": [ { "identifier": "weekly", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def weekly_to_annual(weekly): return weekly * 52
141
529
d24b56bd00d99dc1d7abe4594e5cfb75ce250077
arccode/factory
py/test/fixture/arduino.py
[ "BSD-3-Clause" ]
Python
_Blink
null
def _Blink(arduino, times=1, interval_secs=0.1): """Blinks LED light in Arduino board. This is an example of how to use ArduinoController object. """ for nth in range(times): if nth: time.sleep(interval_secs) if not arduino.SendExpectReceive('H', 'H', retry=2): raise Exception('Failed to send command "H"') time.sleep(interval_secs) if not arduino.SendExpectReceive('L', 'L', retry=2): raise Exception('Failed to send command "L"')
Blinks LED light in Arduino board. This is an example of how to use ArduinoController object.
Blinks LED light in Arduino board. This is an example of how to use ArduinoController object.
[ "Blinks", "LED", "light", "in", "Arduino", "board", ".", "This", "is", "an", "example", "of", "how", "to", "use", "ArduinoController", "object", "." ]
def _Blink(arduino, times=1, interval_secs=0.1): for nth in range(times): if nth: time.sleep(interval_secs) if not arduino.SendExpectReceive('H', 'H', retry=2): raise Exception('Failed to send command "H"') time.sleep(interval_secs) if not arduino.SendExpectReceive('L', 'L', retry=2): raise Exception('Failed to send command "L"')
[ "def", "_Blink", "(", "arduino", ",", "times", "=", "1", ",", "interval_secs", "=", "0.1", ")", ":", "for", "nth", "in", "range", "(", "times", ")", ":", "if", "nth", ":", "time", ".", "sleep", "(", "interval_secs", ")", "if", "not", "arduino", ".", "SendExpectReceive", "(", "'H'", ",", "'H'", ",", "retry", "=", "2", ")", ":", "raise", "Exception", "(", "'Failed to send command \"H\"'", ")", "time", ".", "sleep", "(", "interval_secs", ")", "if", "not", "arduino", ".", "SendExpectReceive", "(", "'L'", ",", "'L'", ",", "retry", "=", "2", ")", ":", "raise", "Exception", "(", "'Failed to send command \"L\"'", ")" ]
Blinks LED light in Arduino board.
[ "Blinks", "LED", "light", "in", "Arduino", "board", "." ]
[ "\"\"\"Blinks LED light in Arduino board.\n\n This is an example of how to use ArduinoController object.\n \"\"\"" ]
[ { "param": "arduino", "type": null }, { "param": "times", "type": null }, { "param": "interval_secs", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "arduino", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "times", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "interval_secs", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import time def _Blink(arduino, times=1, interval_secs=0.1): for nth in range(times): if nth: time.sleep(interval_secs) if not arduino.SendExpectReceive('H', 'H', retry=2): raise Exception('Failed to send command "H"') time.sleep(interval_secs) if not arduino.SendExpectReceive('L', 'L', retry=2): raise Exception('Failed to send command "L"')
142
233
4bc5ce9a85413586a08c49651984f0ab6f940317
kevlar1818/mipster
mipster.py
[ "MIT" ]
Python
parse_cmd
<not_specific>
def parse_cmd(line): '''takes a string and breaks it into a command name and its arguments''' line = re.sub('^\w+:', '', line) line = re.sub('#.*', '', line) # handle in-line comments line = re.sub('[,\(\)]', ' ', line) # handle commas and parens return re.split('\s+', line.strip())
takes a string and breaks it into a command name and its arguments
takes a string and breaks it into a command name and its arguments
[ "takes", "a", "string", "and", "breaks", "it", "into", "a", "command", "name", "and", "its", "arguments" ]
def parse_cmd(line): line = re.sub('^\w+:', '', line) line = re.sub('#.*', '', line) line = re.sub('[,\(\)]', ' ', line) return re.split('\s+', line.strip())
[ "def", "parse_cmd", "(", "line", ")", ":", "line", "=", "re", ".", "sub", "(", "'^\\w+:'", ",", "''", ",", "line", ")", "line", "=", "re", ".", "sub", "(", "'#.*'", ",", "''", ",", "line", ")", "line", "=", "re", ".", "sub", "(", "'[,\\(\\)]'", ",", "' '", ",", "line", ")", "return", "re", ".", "split", "(", "'\\s+'", ",", "line", ".", "strip", "(", ")", ")" ]
takes a string and breaks it into a command name and its arguments
[ "takes", "a", "string", "and", "breaks", "it", "into", "a", "command", "name", "and", "its", "arguments" ]
[ "'''takes a string and breaks it into a command name and its arguments'''", "# handle in-line comments", "# handle commas and parens" ]
[ { "param": "line", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "line", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def parse_cmd(line): line = re.sub('^\w+:', '', line) line = re.sub('#.*', '', line) line = re.sub('[,\(\)]', ' ', line) return re.split('\s+', line.strip())
143
589
b41a05c1ac470df22125490ae349e0dec66bd31c
NervanaSystems/ngraph-python
examples/deepspeech/deepspeech.py
[ "Apache-2.0" ]
Python
decode_outputs
<not_specific>
def decode_outputs(probs, inds, decoder): """ Decode from network probabilities and compute CER Arguments: probs: Tensor of character probabilities inds: List of character indices for ground truth decoder: instance of a Decoder Returns: Tuple of (ground truth transcript, decoded transcript, CER) """ ground_truth = decoder.process_string(decoder.convert_to_string(inds), remove_repetitions=False) decoded_string = decoder.decode(probs) cer = decoder.cer(ground_truth, decoded_string) / float(len(ground_truth)) return ground_truth, decoded_string, cer
Decode from network probabilities and compute CER Arguments: probs: Tensor of character probabilities inds: List of character indices for ground truth decoder: instance of a Decoder Returns: Tuple of (ground truth transcript, decoded transcript, CER)
Decode from network probabilities and compute CER
[ "Decode", "from", "network", "probabilities", "and", "compute", "CER" ]
def decode_outputs(probs, inds, decoder): ground_truth = decoder.process_string(decoder.convert_to_string(inds), remove_repetitions=False) decoded_string = decoder.decode(probs) cer = decoder.cer(ground_truth, decoded_string) / float(len(ground_truth)) return ground_truth, decoded_string, cer
[ "def", "decode_outputs", "(", "probs", ",", "inds", ",", "decoder", ")", ":", "ground_truth", "=", "decoder", ".", "process_string", "(", "decoder", ".", "convert_to_string", "(", "inds", ")", ",", "remove_repetitions", "=", "False", ")", "decoded_string", "=", "decoder", ".", "decode", "(", "probs", ")", "cer", "=", "decoder", ".", "cer", "(", "ground_truth", ",", "decoded_string", ")", "/", "float", "(", "len", "(", "ground_truth", ")", ")", "return", "ground_truth", ",", "decoded_string", ",", "cer" ]
Decode from network probabilities and compute CER
[ "Decode", "from", "network", "probabilities", "and", "compute", "CER" ]
[ "\"\"\"\n Decode from network probabilities and compute CER\n Arguments:\n probs: Tensor of character probabilities\n inds: List of character indices for ground truth\n decoder: instance of a Decoder\n\n Returns:\n Tuple of (ground truth transcript, decoded transcript, CER)\n \"\"\"" ]
[ { "param": "probs", "type": null }, { "param": "inds", "type": null }, { "param": "decoder", "type": null } ]
{ "returns": [ { "docstring": "Tuple of (ground truth transcript, decoded transcript, CER)", "docstring_tokens": [ "Tuple", "of", "(", "ground", "truth", "transcript", "decoded", "transcript", "CER", ")" ], "type": null } ], "raises": [], "params": [ { "identifier": "probs", "type": null, "docstring": "Tensor of character probabilities", "docstring_tokens": [ "Tensor", "of", "character", "probabilities" ], "default": null, "is_optional": null }, { "identifier": "inds", "type": null, "docstring": "List of character indices for ground truth", "docstring_tokens": [ "List", "of", "character", "indices", "for", "ground", "truth" ], "default": null, "is_optional": null }, { "identifier": "decoder", "type": null, "docstring": "instance of a Decoder", "docstring_tokens": [ "instance", "of", "a", "Decoder" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def decode_outputs(probs, inds, decoder): ground_truth = decoder.process_string(decoder.convert_to_string(inds), remove_repetitions=False) decoded_string = decoder.decode(probs) cer = decoder.cer(ground_truth, decoded_string) / float(len(ground_truth)) return ground_truth, decoded_string, cer
144
257
f7e07e8a5c6379c2f966fd23d5e9c11c4e99552b
bwhmather/python-jpake
jpake/__init__.py
[ "BSD-3-Clause" ]
Python
pascal
<not_specific>
def pascal(s): """ Encode a byte string as a pascal string with a big-endian header """ if len(s) >= 2**16: raise ValueError("cannot encode value greater than (2^8)^(2^16)") return len(s).to_bytes(2, 'big') + s
Encode a byte string as a pascal string with a big-endian header
Encode a byte string as a pascal string with a big-endian header
[ "Encode", "a", "byte", "string", "as", "a", "pascal", "string", "with", "a", "big", "-", "endian", "header" ]
def pascal(s): if len(s) >= 2**16: raise ValueError("cannot encode value greater than (2^8)^(2^16)") return len(s).to_bytes(2, 'big') + s
[ "def", "pascal", "(", "s", ")", ":", "if", "len", "(", "s", ")", ">=", "2", "**", "16", ":", "raise", "ValueError", "(", "\"cannot encode value greater than (2^8)^(2^16)\"", ")", "return", "len", "(", "s", ")", ".", "to_bytes", "(", "2", ",", "'big'", ")", "+", "s" ]
Encode a byte string as a pascal string with a big-endian header
[ "Encode", "a", "byte", "string", "as", "a", "pascal", "string", "with", "a", "big", "-", "endian", "header" ]
[ "\"\"\"\n Encode a byte string as a pascal string with a big-endian header\n \"\"\"" ]
[ { "param": "s", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "s", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def pascal(s): if len(s) >= 2**16: raise ValueError("cannot encode value greater than (2^8)^(2^16)") return len(s).to_bytes(2, 'big') + s
145
874
bc1c7538ee455539c4f5c750aebdb98ef2a3d10a
Robpol86/general
convert_music.py
[ "MIT" ]
Python
error
null
def error(message, code=1): """Prints an error message to stderr and exits with a status of 1 by default.""" if message: print('ERROR: {}'.format(message), file=sys.stderr) else: print(file=sys.stderr) sys.exit(code)
Prints an error message to stderr and exits with a status of 1 by default.
Prints an error message to stderr and exits with a status of 1 by default.
[ "Prints", "an", "error", "message", "to", "stderr", "and", "exits", "with", "a", "status", "of", "1", "by", "default", "." ]
def error(message, code=1): if message: print('ERROR: {}'.format(message), file=sys.stderr) else: print(file=sys.stderr) sys.exit(code)
[ "def", "error", "(", "message", ",", "code", "=", "1", ")", ":", "if", "message", ":", "print", "(", "'ERROR: {}'", ".", "format", "(", "message", ")", ",", "file", "=", "sys", ".", "stderr", ")", "else", ":", "print", "(", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "code", ")" ]
Prints an error message to stderr and exits with a status of 1 by default.
[ "Prints", "an", "error", "message", "to", "stderr", "and", "exits", "with", "a", "status", "of", "1", "by", "default", "." ]
[ "\"\"\"Prints an error message to stderr and exits with a status of 1 by default.\"\"\"" ]
[ { "param": "message", "type": null }, { "param": "code", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "message", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "code", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import sys def error(message, code=1): if message: print('ERROR: {}'.format(message), file=sys.stderr) else: print(file=sys.stderr) sys.exit(code)
146
317
53de0bddc21359194be6ccbf61eecf40d0263deb
Omritk/warriorframework
warhorn/source/utils.py
[ "Apache-2.0" ]
Python
words
null
def words(import_actions): """ This function gives out the words by splitting the lines :Arguments: 1. import_actions (list) = list of the lines starting from 'import' :Returns: list = list of words in the line """ line_stream = iter(import_actions) for line in line_stream: for word in line.split(): yield word
This function gives out the words by splitting the lines :Arguments: 1. import_actions (list) = list of the lines starting from 'import' :Returns: list = list of words in the line
This function gives out the words by splitting the lines
[ "This", "function", "gives", "out", "the", "words", "by", "splitting", "the", "lines" ]
def words(import_actions): line_stream = iter(import_actions) for line in line_stream: for word in line.split(): yield word
[ "def", "words", "(", "import_actions", ")", ":", "line_stream", "=", "iter", "(", "import_actions", ")", "for", "line", "in", "line_stream", ":", "for", "word", "in", "line", ".", "split", "(", ")", ":", "yield", "word" ]
This function gives out the words by splitting the lines
[ "This", "function", "gives", "out", "the", "words", "by", "splitting", "the", "lines" ]
[ "\"\"\" This function gives out the words by splitting the lines\n\n :Arguments:\n\n 1. import_actions (list) = list of the lines starting from 'import'\n\n :Returns:\n\n list = list of words in the line\n\n \"\"\"" ]
[ { "param": "import_actions", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "import_actions", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [ { "identifier": "Arguments", "docstring": "1. import_actions (list) = list of the lines starting from 'import'", "docstring_tokens": [ "1", ".", "import_actions", "(", "list", ")", "=", "list", "of", "the", "lines", "starting", "from", "'", "import", "'" ] }, { "identifier": "Returns", "docstring": "list = list of words in the line", "docstring_tokens": [ "list", "=", "list", "of", "words", "in", "the", "line" ] } ] }
def words(import_actions): line_stream = iter(import_actions) for line in line_stream: for word in line.split(): yield word
147
122
417b92b4da60e1680d5399326786ea34d3efffd1
MTrajK/python-projects
SureDone API/suredone_upload.py
[ "MIT" ]
Python
construct_input_file_path
<not_specific>
def construct_input_file_path(args, logger): """Gets the input file value from the command line and construct the path. Parameters: args: Object with command line arguments (input file path) logger: Function used for logging Returns: Input file path. """ if args.input_file == None: raise Exception('Missing input file path.') # absolute path if os.path.isabs(args.input_file): return args.input_file # relative path if sys.platform == 'linux' or sys.platform == 'linux2': # Linux input_file = ['~'] elif sys.platform == 'win32': # Windows input_file = ['~', 'Downloads'] input_file.append(args.input_file) return os.path.expanduser(os.sep.join(input_file))
Gets the input file value from the command line and construct the path. Parameters: args: Object with command line arguments (input file path) logger: Function used for logging Returns: Input file path.
Gets the input file value from the command line and construct the path.
[ "Gets", "the", "input", "file", "value", "from", "the", "command", "line", "and", "construct", "the", "path", "." ]
def construct_input_file_path(args, logger): if args.input_file == None: raise Exception('Missing input file path.') if os.path.isabs(args.input_file): return args.input_file if sys.platform == 'linux' or sys.platform == 'linux2': input_file = ['~'] elif sys.platform == 'win32': input_file = ['~', 'Downloads'] input_file.append(args.input_file) return os.path.expanduser(os.sep.join(input_file))
[ "def", "construct_input_file_path", "(", "args", ",", "logger", ")", ":", "if", "args", ".", "input_file", "==", "None", ":", "raise", "Exception", "(", "'Missing input file path.'", ")", "if", "os", ".", "path", ".", "isabs", "(", "args", ".", "input_file", ")", ":", "return", "args", ".", "input_file", "if", "sys", ".", "platform", "==", "'linux'", "or", "sys", ".", "platform", "==", "'linux2'", ":", "input_file", "=", "[", "'~'", "]", "elif", "sys", ".", "platform", "==", "'win32'", ":", "input_file", "=", "[", "'~'", ",", "'Downloads'", "]", "input_file", ".", "append", "(", "args", ".", "input_file", ")", "return", "os", ".", "path", ".", "expanduser", "(", "os", ".", "sep", ".", "join", "(", "input_file", ")", ")" ]
Gets the input file value from the command line and construct the path.
[ "Gets", "the", "input", "file", "value", "from", "the", "command", "line", "and", "construct", "the", "path", "." ]
[ "\"\"\"Gets the input file value from the command line and construct the path.\n\n Parameters:\n args: Object with command line arguments (input file path)\n logger: Function used for logging\n \n Returns:\n Input file path.\n \"\"\"", "# absolute path", "# relative path", "# Linux", "# Windows" ]
[ { "param": "args", "type": null }, { "param": "logger", "type": null } ]
{ "returns": [ { "docstring": "Input file path.", "docstring_tokens": [ "Input", "file", "path", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "args", "type": null, "docstring": "Object with command line arguments (input file path)", "docstring_tokens": [ "Object", "with", "command", "line", "arguments", "(", "input", "file", "path", ")" ], "default": null, "is_optional": null }, { "identifier": "logger", "type": null, "docstring": "Function used for logging", "docstring_tokens": [ "Function", "used", "for", "logging" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import sys import os def construct_input_file_path(args, logger): if args.input_file == None: raise Exception('Missing input file path.') if os.path.isabs(args.input_file): return args.input_file if sys.platform == 'linux' or sys.platform == 'linux2': input_file = ['~'] elif sys.platform == 'win32': input_file = ['~', 'Downloads'] input_file.append(args.input_file) return os.path.expanduser(os.sep.join(input_file))
148
493
c813809667d8d1c8a2280e6941692d300ff47a94
moorea1/DS595CS525-RL-Projects
Project1/mdp_dp.py
[ "MIT" ]
Python
render_single
<not_specific>
def render_single(env, policy, render = False, n_episodes=100): """ Given a game envrionemnt of gym package, play multiple episodes of the game. An episode is over when the returned value for "done" = True. At each step, pick an action and collect the reward and new state from the game. Parameters: ---------- env: gym.core.Environment Environment to play on. Must have nS, nA, and P as attributes. policy: np.array of shape [env.nS, env.nA] The action to take at a given state render: whether or not to render the game(it's slower to render the game) n_episodes: the number of episodes to play in the game. Returns: ------ total_rewards: the total number of rewards achieved in the game. """ total_rewards = 0 for _ in range(n_episodes): ob = env.reset() # initialize the episode done = False while not done: if render: env.render() # render the game ############################ # YOUR IMPLEMENTATION HERE # return total_rewards
Given a game envrionemnt of gym package, play multiple episodes of the game. An episode is over when the returned value for "done" = True. At each step, pick an action and collect the reward and new state from the game. Parameters: ---------- env: gym.core.Environment Environment to play on. Must have nS, nA, and P as attributes. policy: np.array of shape [env.nS, env.nA] The action to take at a given state render: whether or not to render the game(it's slower to render the game) n_episodes: the number of episodes to play in the game. Returns: ------ total_rewards: the total number of rewards achieved in the game.
Given a game envrionemnt of gym package, play multiple episodes of the game. An episode is over when the returned value for "done" = True. At each step, pick an action and collect the reward and new state from the game. gym.core.Environment Environment to play on. Must have nS, nA, and P as attributes. policy: np.array of shape [env.nS, env.nA] The action to take at a given state render: whether or not to render the game(it's slower to render the game) n_episodes: the number of episodes to play in the game. Returns. the total number of rewards achieved in the game.
[ "Given", "a", "game", "envrionemnt", "of", "gym", "package", "play", "multiple", "episodes", "of", "the", "game", ".", "An", "episode", "is", "over", "when", "the", "returned", "value", "for", "\"", "done", "\"", "=", "True", ".", "At", "each", "step", "pick", "an", "action", "and", "collect", "the", "reward", "and", "new", "state", "from", "the", "game", ".", "gym", ".", "core", ".", "Environment", "Environment", "to", "play", "on", ".", "Must", "have", "nS", "nA", "and", "P", "as", "attributes", ".", "policy", ":", "np", ".", "array", "of", "shape", "[", "env", ".", "nS", "env", ".", "nA", "]", "The", "action", "to", "take", "at", "a", "given", "state", "render", ":", "whether", "or", "not", "to", "render", "the", "game", "(", "it", "'", "s", "slower", "to", "render", "the", "game", ")", "n_episodes", ":", "the", "number", "of", "episodes", "to", "play", "in", "the", "game", ".", "Returns", ".", "the", "total", "number", "of", "rewards", "achieved", "in", "the", "game", "." ]
def render_single(env, policy, render = False, n_episodes=100): total_rewards = 0 for _ in range(n_episodes): ob = env.reset() done = False while not done: if render: env.render() return total_rewards
[ "def", "render_single", "(", "env", ",", "policy", ",", "render", "=", "False", ",", "n_episodes", "=", "100", ")", ":", "total_rewards", "=", "0", "for", "_", "in", "range", "(", "n_episodes", ")", ":", "ob", "=", "env", ".", "reset", "(", ")", "done", "=", "False", "while", "not", "done", ":", "if", "render", ":", "env", ".", "render", "(", ")", "return", "total_rewards" ]
Given a game envrionemnt of gym package, play multiple episodes of the game.
[ "Given", "a", "game", "envrionemnt", "of", "gym", "package", "play", "multiple", "episodes", "of", "the", "game", "." ]
[ "\"\"\"\n Given a game envrionemnt of gym package, play multiple episodes of the game.\n An episode is over when the returned value for \"done\" = True.\n At each step, pick an action and collect the reward and new state from the game.\n\n Parameters:\n ----------\n env: gym.core.Environment\n Environment to play on. Must have nS, nA, and P as attributes.\n policy: np.array of shape [env.nS, env.nA]\n The action to take at a given state\n render: whether or not to render the game(it's slower to render the game)\n n_episodes: the number of episodes to play in the game. \n Returns:\n ------\n total_rewards: the total number of rewards achieved in the game.\n \"\"\"", "# initialize the episode", "# render the game", "############################", "# YOUR IMPLEMENTATION HERE #" ]
[ { "param": "env", "type": null }, { "param": "policy", "type": null }, { "param": "render", "type": null }, { "param": "n_episodes", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "env", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "policy", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "render", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "n_episodes", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def render_single(env, policy, render = False, n_episodes=100): total_rewards = 0 for _ in range(n_episodes): ob = env.reset() done = False while not done: if render: env.render() return total_rewards
149
458
cf64bc9f88234a57121765ad15e762b30128bc19
meraki/meraki-python-sdk
meraki_sdk/models/update_network_cellular_gateway_settings_dhcp_model.py
[ "MIT" ]
Python
from_dictionary
<not_specific>
def from_dictionary(cls, dictionary): """Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class. """ if dictionary is None: return None # Extract variables from the dictionary dhcp_lease_time = dictionary.get('dhcpLeaseTime') dns_nameservers = dictionary.get('dnsNameservers') dns_custom_nameservers = dictionary.get('dnsCustomNameservers') # Return an object of this model return cls(dhcp_lease_time, dns_nameservers, dns_custom_nameservers)
Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.
Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. An instance of this structure class.
[ "Creates", "an", "instance", "of", "this", "model", "from", "a", "dictionary", "Args", ":", "dictionary", "(", "dictionary", ")", ":", "A", "dictionary", "representation", "of", "the", "object", "as", "obtained", "from", "the", "deserialization", "of", "the", "server", "'", "s", "response", ".", "The", "keys", "MUST", "match", "property", "names", "in", "the", "API", "description", ".", "An", "instance", "of", "this", "structure", "class", "." ]
def from_dictionary(cls, dictionary): if dictionary is None: return None dhcp_lease_time = dictionary.get('dhcpLeaseTime') dns_nameservers = dictionary.get('dnsNameservers') dns_custom_nameservers = dictionary.get('dnsCustomNameservers') return cls(dhcp_lease_time, dns_nameservers, dns_custom_nameservers)
[ "def", "from_dictionary", "(", "cls", ",", "dictionary", ")", ":", "if", "dictionary", "is", "None", ":", "return", "None", "dhcp_lease_time", "=", "dictionary", ".", "get", "(", "'dhcpLeaseTime'", ")", "dns_nameservers", "=", "dictionary", ".", "get", "(", "'dnsNameservers'", ")", "dns_custom_nameservers", "=", "dictionary", ".", "get", "(", "'dnsCustomNameservers'", ")", "return", "cls", "(", "dhcp_lease_time", ",", "dns_nameservers", ",", "dns_custom_nameservers", ")" ]
Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response.
[ "Creates", "an", "instance", "of", "this", "model", "from", "a", "dictionary", "Args", ":", "dictionary", "(", "dictionary", ")", ":", "A", "dictionary", "representation", "of", "the", "object", "as", "obtained", "from", "the", "deserialization", "of", "the", "server", "'", "s", "response", "." ]
[ "\"\"\"Creates an instance of this model from a dictionary\r\n\r\n Args:\r\n dictionary (dictionary): A dictionary representation of the object as\r\n obtained from the deserialization of the server's response. The keys\r\n MUST match property names in the API description.\r\n\r\n Returns:\r\n object: An instance of this structure class.\r\n\r\n \"\"\"", "# Extract variables from the dictionary\r", "# Return an object of this model\r" ]
[ { "param": "cls", "type": null }, { "param": "dictionary", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "dictionary", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def from_dictionary(cls, dictionary): if dictionary is None: return None dhcp_lease_time = dictionary.get('dhcpLeaseTime') dns_nameservers = dictionary.get('dnsNameservers') dns_custom_nameservers = dictionary.get('dnsCustomNameservers') return cls(dhcp_lease_time, dns_nameservers, dns_custom_nameservers)
150
171
8f4a1203b463b44bc7984646f24dc9de34450f34
anshulahuja98/python3-crdt
py3crdt/orset.py
[ "MIT" ]
Python
add
<not_specific>
def add(payload, elem, unique_tag): """ The function to add an element with it's unique tag to ORSet object's payload. Args: payload (list): Payload in which element has to be added. elem (any_type): The element to be added. unique_tag (any_type): Tag to identify element. Returns: payload (list): Payload in which element is added. """ # Bool value to check if element already in payload found = False for item in payload: # If element already in payload, add unique_tag to it's tag_list if elem == item["elem"]: item["tags"].append(unique_tag) found = True break # If element not in payload, add element with unique tag if not found: payload.append({"elem": elem, "tags": [unique_tag]}) # Sort the payload payload.sort(key=lambda i: i['elem']) return payload
The function to add an element with it's unique tag to ORSet object's payload. Args: payload (list): Payload in which element has to be added. elem (any_type): The element to be added. unique_tag (any_type): Tag to identify element. Returns: payload (list): Payload in which element is added.
The function to add an element with it's unique tag to ORSet object's payload.
[ "The", "function", "to", "add", "an", "element", "with", "it", "'", "s", "unique", "tag", "to", "ORSet", "object", "'", "s", "payload", "." ]
def add(payload, elem, unique_tag): found = False for item in payload: if elem == item["elem"]: item["tags"].append(unique_tag) found = True break if not found: payload.append({"elem": elem, "tags": [unique_tag]}) payload.sort(key=lambda i: i['elem']) return payload
[ "def", "add", "(", "payload", ",", "elem", ",", "unique_tag", ")", ":", "found", "=", "False", "for", "item", "in", "payload", ":", "if", "elem", "==", "item", "[", "\"elem\"", "]", ":", "item", "[", "\"tags\"", "]", ".", "append", "(", "unique_tag", ")", "found", "=", "True", "break", "if", "not", "found", ":", "payload", ".", "append", "(", "{", "\"elem\"", ":", "elem", ",", "\"tags\"", ":", "[", "unique_tag", "]", "}", ")", "payload", ".", "sort", "(", "key", "=", "lambda", "i", ":", "i", "[", "'elem'", "]", ")", "return", "payload" ]
The function to add an element with it's unique tag to ORSet object's payload.
[ "The", "function", "to", "add", "an", "element", "with", "it", "'", "s", "unique", "tag", "to", "ORSet", "object", "'", "s", "payload", "." ]
[ "\"\"\"\n The function to add an element with it's unique tag to ORSet object's payload.\n\n Args:\n payload (list): Payload in which element has to be added.\n elem (any_type): The element to be added.\n unique_tag (any_type): Tag to identify element.\n\n Returns:\n payload (list): Payload in which element is added.\n \"\"\"", "# Bool value to check if element already in payload", "# If element already in payload, add unique_tag to it's tag_list", "# If element not in payload, add element with unique tag", "# Sort the payload" ]
[ { "param": "payload", "type": null }, { "param": "elem", "type": null }, { "param": "unique_tag", "type": null } ]
{ "returns": [ { "docstring": "payload (list): Payload in which element is added.", "docstring_tokens": [ "payload", "(", "list", ")", ":", "Payload", "in", "which", "element", "is", "added", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "payload", "type": null, "docstring": "Payload in which element has to be added.", "docstring_tokens": [ "Payload", "in", "which", "element", "has", "to", "be", "added", "." ], "default": null, "is_optional": false }, { "identifier": "elem", "type": null, "docstring": "The element to be added.", "docstring_tokens": [ "The", "element", "to", "be", "added", "." ], "default": null, "is_optional": false }, { "identifier": "unique_tag", "type": null, "docstring": "Tag to identify element.", "docstring_tokens": [ "Tag", "to", "identify", "element", "." ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def add(payload, elem, unique_tag): found = False for item in payload: if elem == item["elem"]: item["tags"].append(unique_tag) found = True break if not found: payload.append({"elem": elem, "tags": [unique_tag]}) payload.sort(key=lambda i: i['elem']) return payload
151
547
9e417a39aedeffec349c6214ef92ac204904d9ee
jankukacka/happy
src/happy/io.py
[ "MIT" ]
Python
save_pickle
<not_specific>
def save_pickle(filename, data): ''' Save pickle file by its filename. ''' ## Pickle files import pickle with open(filename, 'wb') as file: return pickle.dump(data, file, protocol=4)
Save pickle file by its filename.
Save pickle file by its filename.
[ "Save", "pickle", "file", "by", "its", "filename", "." ]
def save_pickle(filename, data): import pickle with open(filename, 'wb') as file: return pickle.dump(data, file, protocol=4)
[ "def", "save_pickle", "(", "filename", ",", "data", ")", ":", "import", "pickle", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "file", ":", "return", "pickle", ".", "dump", "(", "data", ",", "file", ",", "protocol", "=", "4", ")" ]
Save pickle file by its filename.
[ "Save", "pickle", "file", "by", "its", "filename", "." ]
[ "'''\n Save pickle file by its filename.\n '''", "## Pickle files" ]
[ { "param": "filename", "type": null }, { "param": "data", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "filename", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "data", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import pickle def save_pickle(filename, data): import pickle with open(filename, 'wb') as file: return pickle.dump(data, file, protocol=4)
152
515
f3ea671bf79aa3e0db549f06061fa9343b09ed70
JonasSinjan/hrt_pipeline
src/utils.py
[ "MIT" ]
Python
check_filenames
<not_specific>
def check_filenames(data_f): """ checks if the science scans have the same DID - this would cause an issue for naming the output demod files """ scan_name_list = [str(scan.split('.fits')[0][-10:]) for scan in data_f] seen = set() uniq_scan_DIDs = [x for x in scan_name_list if x in seen or seen.add(x)] #creates list of unique DIDs from the list #print(uniq_scan_DIDs) #print(scan_name_list)S if uniq_scan_DIDs == []: print("The scans' DIDs are all unique") else: for x in uniq_scan_DIDs: number = scan_name_list.count(x) if number > 1: #if more than one print(f"The DID: {x} is repeated {number} times") i = 1 for index, name in enumerate(scan_name_list): if name == x: scan_name_list[index] = name + f"_{i}" #add _1, _2, etc to the file name, so that when written to output file not overwriting i += 1 print("The New DID list is: ", scan_name_list) return scan_name_list
checks if the science scans have the same DID - this would cause an issue for naming the output demod files
checks if the science scans have the same DID - this would cause an issue for naming the output demod files
[ "checks", "if", "the", "science", "scans", "have", "the", "same", "DID", "-", "this", "would", "cause", "an", "issue", "for", "naming", "the", "output", "demod", "files" ]
def check_filenames(data_f): scan_name_list = [str(scan.split('.fits')[0][-10:]) for scan in data_f] seen = set() uniq_scan_DIDs = [x for x in scan_name_list if x in seen or seen.add(x)] if uniq_scan_DIDs == []: print("The scans' DIDs are all unique") else: for x in uniq_scan_DIDs: number = scan_name_list.count(x) if number > 1: print(f"The DID: {x} is repeated {number} times") i = 1 for index, name in enumerate(scan_name_list): if name == x: scan_name_list[index] = name + f"_{i}" i += 1 print("The New DID list is: ", scan_name_list) return scan_name_list
[ "def", "check_filenames", "(", "data_f", ")", ":", "scan_name_list", "=", "[", "str", "(", "scan", ".", "split", "(", "'.fits'", ")", "[", "0", "]", "[", "-", "10", ":", "]", ")", "for", "scan", "in", "data_f", "]", "seen", "=", "set", "(", ")", "uniq_scan_DIDs", "=", "[", "x", "for", "x", "in", "scan_name_list", "if", "x", "in", "seen", "or", "seen", ".", "add", "(", "x", ")", "]", "if", "uniq_scan_DIDs", "==", "[", "]", ":", "print", "(", "\"The scans' DIDs are all unique\"", ")", "else", ":", "for", "x", "in", "uniq_scan_DIDs", ":", "number", "=", "scan_name_list", ".", "count", "(", "x", ")", "if", "number", ">", "1", ":", "print", "(", "f\"The DID: {x} is repeated {number} times\"", ")", "i", "=", "1", "for", "index", ",", "name", "in", "enumerate", "(", "scan_name_list", ")", ":", "if", "name", "==", "x", ":", "scan_name_list", "[", "index", "]", "=", "name", "+", "f\"_{i}\"", "i", "+=", "1", "print", "(", "\"The New DID list is: \"", ",", "scan_name_list", ")", "return", "scan_name_list" ]
checks if the science scans have the same DID - this would cause an issue for naming the output demod files
[ "checks", "if", "the", "science", "scans", "have", "the", "same", "DID", "-", "this", "would", "cause", "an", "issue", "for", "naming", "the", "output", "demod", "files" ]
[ "\"\"\"\n checks if the science scans have the same DID - this would cause an issue for naming the output demod files\n \"\"\"", "#creates list of unique DIDs from the list", "#print(uniq_scan_DIDs)", "#print(scan_name_list)S", "#if more than one", "#add _1, _2, etc to the file name, so that when written to output file not overwriting" ]
[ { "param": "data_f", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "data_f", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def check_filenames(data_f): scan_name_list = [str(scan.split('.fits')[0][-10:]) for scan in data_f] seen = set() uniq_scan_DIDs = [x for x in scan_name_list if x in seen or seen.add(x)] if uniq_scan_DIDs == []: print("The scans' DIDs are all unique") else: for x in uniq_scan_DIDs: number = scan_name_list.count(x) if number > 1: print(f"The DID: {x} is repeated {number} times") i = 1 for index, name in enumerate(scan_name_list): if name == x: scan_name_list[index] = name + f"_{i}" i += 1 print("The New DID list is: ", scan_name_list) return scan_name_list
153
272
6e56dfb612ec509d88e968bec66f7b9e38bcf149
withinnoitatpmet/mxnetsource
python/mxnet/visualization.py
[ "Apache-2.0" ]
Python
looks_like_weight
<not_specific>
def looks_like_weight(name): """Internal helper to figure out if node should be hidden with `hide_weights`. """ if name.endswith("_weight"): return True if name.endswith("_bias"): return True return False
Internal helper to figure out if node should be hidden with `hide_weights`.
Internal helper to figure out if node should be hidden with `hide_weights`.
[ "Internal", "helper", "to", "figure", "out", "if", "node", "should", "be", "hidden", "with", "`", "hide_weights", "`", "." ]
def looks_like_weight(name): if name.endswith("_weight"): return True if name.endswith("_bias"): return True return False
[ "def", "looks_like_weight", "(", "name", ")", ":", "if", "name", ".", "endswith", "(", "\"_weight\"", ")", ":", "return", "True", "if", "name", ".", "endswith", "(", "\"_bias\"", ")", ":", "return", "True", "return", "False" ]
Internal helper to figure out if node should be hidden with `hide_weights`.
[ "Internal", "helper", "to", "figure", "out", "if", "node", "should", "be", "hidden", "with", "`", "hide_weights", "`", "." ]
[ "\"\"\"Internal helper to figure out if node should be hidden with `hide_weights`.\n \"\"\"" ]
[ { "param": "name", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def looks_like_weight(name): if name.endswith("_weight"): return True if name.endswith("_bias"): return True return False
154
510
aac221e5e318801560f96c29048ef19c9c31ed24
jerryhgoss/atmospy
atmospy/stats.py
[ "Apache-2.0" ]
Python
days_avg_gt_std
<not_specific>
def days_avg_gt_std(data, std): """ Find the number of days where the maximum hourly measurement is above the 1-hour standard used by the EPA Inputs ------ :param data: dataframe or Series with timestamp index and pollution data column. should already be resampled to appropriate timebase :type data: pd.DataFrame or pd.Series :param std: standard value used by EPA :type std: float Returns ------- count: the number of days where the average value surpassed the standard """ daily_avg = data.groupby(data.index.floor('d')).mean() count = daily_avg[daily_avg > std].count() return count
Find the number of days where the maximum hourly measurement is above the 1-hour standard used by the EPA Inputs ------ :param data: dataframe or Series with timestamp index and pollution data column. should already be resampled to appropriate timebase :type data: pd.DataFrame or pd.Series :param std: standard value used by EPA :type std: float Returns ------- count: the number of days where the average value surpassed the standard
Find the number of days where the maximum hourly measurement is above the 1-hour standard used by the EPA Inputs :param data: dataframe or Series with timestamp index and pollution data column. Returns the number of days where the average value surpassed the standard
[ "Find", "the", "number", "of", "days", "where", "the", "maximum", "hourly", "measurement", "is", "above", "the", "1", "-", "hour", "standard", "used", "by", "the", "EPA", "Inputs", ":", "param", "data", ":", "dataframe", "or", "Series", "with", "timestamp", "index", "and", "pollution", "data", "column", ".", "Returns", "the", "number", "of", "days", "where", "the", "average", "value", "surpassed", "the", "standard" ]
def days_avg_gt_std(data, std): daily_avg = data.groupby(data.index.floor('d')).mean() count = daily_avg[daily_avg > std].count() return count
[ "def", "days_avg_gt_std", "(", "data", ",", "std", ")", ":", "daily_avg", "=", "data", ".", "groupby", "(", "data", ".", "index", ".", "floor", "(", "'d'", ")", ")", ".", "mean", "(", ")", "count", "=", "daily_avg", "[", "daily_avg", ">", "std", "]", ".", "count", "(", ")", "return", "count" ]
Find the number of days where the maximum hourly measurement is above the 1-hour standard used by the EPA
[ "Find", "the", "number", "of", "days", "where", "the", "maximum", "hourly", "measurement", "is", "above", "the", "1", "-", "hour", "standard", "used", "by", "the", "EPA" ]
[ "\"\"\"\n Find the number of days where the maximum hourly measurement is\n above the 1-hour standard used by the EPA\n \n Inputs\n ------\n :param data: dataframe or Series with timestamp index and pollution data column.\n should already be resampled to appropriate timebase\n :type data: pd.DataFrame or pd.Series\n :param std: standard value used by EPA\n :type std: float\n \n Returns\n -------\n count: the number of days where the average value surpassed the standard\n \"\"\"" ]
[ { "param": "data", "type": null }, { "param": "std", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "data", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "std", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def days_avg_gt_std(data, std): daily_avg = data.groupby(data.index.floor('d')).mean() count = daily_avg[daily_avg > std].count() return count
156
635
c9b820b2531cc2cc587d88a8b4a7a50f67502d0a
zjsteyn/Eyra
Backend/scripts/processMIMTokens.py
[ "Apache-2.0" ]
Python
containsCapsAbbrev
<not_specific>
def containsCapsAbbrev(x): ''' Test if list x contains e.g. OECD, DNA, RNA which are abbreviations but not filtered out with punctuation. ''' for word in x: upperCase = 0 for c in word: if c.isupper(): upperCase += 1 if upperCase >= 2: return True return False
Test if list x contains e.g. OECD, DNA, RNA which are abbreviations but not filtered out with punctuation.
Test if list x contains e.g. OECD, DNA, RNA which are abbreviations but not filtered out with punctuation.
[ "Test", "if", "list", "x", "contains", "e", ".", "g", ".", "OECD", "DNA", "RNA", "which", "are", "abbreviations", "but", "not", "filtered", "out", "with", "punctuation", "." ]
def containsCapsAbbrev(x): for word in x: upperCase = 0 for c in word: if c.isupper(): upperCase += 1 if upperCase >= 2: return True return False
[ "def", "containsCapsAbbrev", "(", "x", ")", ":", "for", "word", "in", "x", ":", "upperCase", "=", "0", "for", "c", "in", "word", ":", "if", "c", ".", "isupper", "(", ")", ":", "upperCase", "+=", "1", "if", "upperCase", ">=", "2", ":", "return", "True", "return", "False" ]
Test if list x contains e.g.
[ "Test", "if", "list", "x", "contains", "e", ".", "g", "." ]
[ "'''\n Test if list x contains e.g. OECD, DNA, RNA which are abbreviations but\n not filtered out with punctuation.\n '''" ]
[ { "param": "x", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "x", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def containsCapsAbbrev(x): for word in x: upperCase = 0 for c in word: if c.isupper(): upperCase += 1 if upperCase >= 2: return True return False
157
105
18af1df768de33fc7185d8e4fe853759d1703842
moddent/NTHU-COM-526000-DL-HW
HW01/function.py
[ "MIT" ]
Python
lr_scheduler
<not_specific>
def lr_scheduler(epoch, lr): """ Decay the learning rate every 50 epoch. """ drop = 0.5 epochs_drop = 25 lr = lr * math.pow(drop, math.floor((1 + epoch) / epochs_drop)) return lr
Decay the learning rate every 50 epoch.
Decay the learning rate every 50 epoch.
[ "Decay", "the", "learning", "rate", "every", "50", "epoch", "." ]
def lr_scheduler(epoch, lr): drop = 0.5 epochs_drop = 25 lr = lr * math.pow(drop, math.floor((1 + epoch) / epochs_drop)) return lr
[ "def", "lr_scheduler", "(", "epoch", ",", "lr", ")", ":", "drop", "=", "0.5", "epochs_drop", "=", "25", "lr", "=", "lr", "*", "math", ".", "pow", "(", "drop", ",", "math", ".", "floor", "(", "(", "1", "+", "epoch", ")", "/", "epochs_drop", ")", ")", "return", "lr" ]
Decay the learning rate every 50 epoch.
[ "Decay", "the", "learning", "rate", "every", "50", "epoch", "." ]
[ "\"\"\"\n Decay the learning rate every 50 epoch.\n \"\"\"" ]
[ { "param": "epoch", "type": null }, { "param": "lr", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "epoch", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "lr", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import math def lr_scheduler(epoch, lr): drop = 0.5 epochs_drop = 25 lr = lr * math.pow(drop, math.floor((1 + epoch) / epochs_drop)) return lr
158
227
bc70c80ae5907a0d3c782634e8e936ff43468a55
davidwtbuxton/lotus-notes-export
notesexport.py
[ "MIT" ]
Python
notes_headers
<not_specific>
def notes_headers(msg): """Returns a dictionary of some interesting notes headers.""" d = { 'Subject': None, 'PostedDate': None, 'From': None, 'SentTo': None, } for line in msg.splitlines(): if None not in d.values(): return d for k in d: if d[k] is None: if line.startswith(k + ': '): d[k] = line[len(k + ': '):] else: return d
Returns a dictionary of some interesting notes headers.
Returns a dictionary of some interesting notes headers.
[ "Returns", "a", "dictionary", "of", "some", "interesting", "notes", "headers", "." ]
def notes_headers(msg): d = { 'Subject': None, 'PostedDate': None, 'From': None, 'SentTo': None, } for line in msg.splitlines(): if None not in d.values(): return d for k in d: if d[k] is None: if line.startswith(k + ': '): d[k] = line[len(k + ': '):] else: return d
[ "def", "notes_headers", "(", "msg", ")", ":", "d", "=", "{", "'Subject'", ":", "None", ",", "'PostedDate'", ":", "None", ",", "'From'", ":", "None", ",", "'SentTo'", ":", "None", ",", "}", "for", "line", "in", "msg", ".", "splitlines", "(", ")", ":", "if", "None", "not", "in", "d", ".", "values", "(", ")", ":", "return", "d", "for", "k", "in", "d", ":", "if", "d", "[", "k", "]", "is", "None", ":", "if", "line", ".", "startswith", "(", "k", "+", "': '", ")", ":", "d", "[", "k", "]", "=", "line", "[", "len", "(", "k", "+", "': '", ")", ":", "]", "else", ":", "return", "d" ]
Returns a dictionary of some interesting notes headers.
[ "Returns", "a", "dictionary", "of", "some", "interesting", "notes", "headers", "." ]
[ "\"\"\"Returns a dictionary of some interesting notes headers.\"\"\"" ]
[ { "param": "msg", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "msg", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def notes_headers(msg): d = { 'Subject': None, 'PostedDate': None, 'From': None, 'SentTo': None, } for line in msg.splitlines(): if None not in d.values(): return d for k in d: if d[k] is None: if line.startswith(k + ': '): d[k] = line[len(k + ': '):] else: return d
159
613
a58ed2f9ff977888b86bef17454ca86ab8aeeef8
dcsparkes/adventofcode
lifegame/lifegame.py
[ "Unlicense" ]
Python
sideBySide
<not_specific>
def sideBySide(seatA, seatB, separation=5): """ Turn two LifeGames into a single string separated by 'separation' spaces. Relatively easy to rewrite with *args for more than 2 :param seatA: :param seatB: :param separation: :return: """ delimiter = " " * separation rowsA = str(seatA).split('\n') rowsB = str(seatB).split('\n') rows = [delimiter.join(components) for components in zip(rowsA, rowsB)] return '\n'.join(rows)
Turn two LifeGames into a single string separated by 'separation' spaces. Relatively easy to rewrite with *args for more than 2 :param seatA: :param seatB: :param separation: :return:
Turn two LifeGames into a single string separated by 'separation' spaces. Relatively easy to rewrite with *args for more than 2
[ "Turn", "two", "LifeGames", "into", "a", "single", "string", "separated", "by", "'", "separation", "'", "spaces", ".", "Relatively", "easy", "to", "rewrite", "with", "*", "args", "for", "more", "than", "2" ]
def sideBySide(seatA, seatB, separation=5): delimiter = " " * separation rowsA = str(seatA).split('\n') rowsB = str(seatB).split('\n') rows = [delimiter.join(components) for components in zip(rowsA, rowsB)] return '\n'.join(rows)
[ "def", "sideBySide", "(", "seatA", ",", "seatB", ",", "separation", "=", "5", ")", ":", "delimiter", "=", "\" \"", "*", "separation", "rowsA", "=", "str", "(", "seatA", ")", ".", "split", "(", "'\\n'", ")", "rowsB", "=", "str", "(", "seatB", ")", ".", "split", "(", "'\\n'", ")", "rows", "=", "[", "delimiter", ".", "join", "(", "components", ")", "for", "components", "in", "zip", "(", "rowsA", ",", "rowsB", ")", "]", "return", "'\\n'", ".", "join", "(", "rows", ")" ]
Turn two LifeGames into a single string separated by 'separation' spaces.
[ "Turn", "two", "LifeGames", "into", "a", "single", "string", "separated", "by", "'", "separation", "'", "spaces", "." ]
[ "\"\"\"\n Turn two LifeGames into a single string separated by 'separation' spaces.\n Relatively easy to rewrite with *args for more than 2\n :param seatA:\n :param seatB:\n :param separation:\n :return:\n \"\"\"" ]
[ { "param": "seatA", "type": null }, { "param": "seatB", "type": null }, { "param": "separation", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "seatA", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "seatB", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "separation", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def sideBySide(seatA, seatB, separation=5): delimiter = " " * separation rowsA = str(seatA).split('\n') rowsB = str(seatB).split('\n') rows = [delimiter.join(components) for components in zip(rowsA, rowsB)] return '\n'.join(rows)
160
507
3be80331c354459f34c9fe0c26eaf4e3713cd032
meramsey/python-scripts-collection
scripts/human_filesize.py
[ "MIT" ]
Python
human_size
<not_specific>
def human_size(bytes, units=None): """ Returns a human readable string representation of bytes """ if units is None: units = [' bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB'] return str(bytes) + units[0] if bytes < 1024 else human_size(bytes >> 10, units[1:])
Returns a human readable string representation of bytes
Returns a human readable string representation of bytes
[ "Returns", "a", "human", "readable", "string", "representation", "of", "bytes" ]
def human_size(bytes, units=None): if units is None: units = [' bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB'] return str(bytes) + units[0] if bytes < 1024 else human_size(bytes >> 10, units[1:])
[ "def", "human_size", "(", "bytes", ",", "units", "=", "None", ")", ":", "if", "units", "is", "None", ":", "units", "=", "[", "' bytes'", ",", "'KB'", ",", "'MB'", ",", "'GB'", ",", "'TB'", ",", "'PB'", ",", "'EB'", "]", "return", "str", "(", "bytes", ")", "+", "units", "[", "0", "]", "if", "bytes", "<", "1024", "else", "human_size", "(", "bytes", ">>", "10", ",", "units", "[", "1", ":", "]", ")" ]
Returns a human readable string representation of bytes
[ "Returns", "a", "human", "readable", "string", "representation", "of", "bytes" ]
[ "\"\"\" Returns a human readable string representation of bytes \"\"\"" ]
[ { "param": "bytes", "type": null }, { "param": "units", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "bytes", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "units", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def human_size(bytes, units=None): if units is None: units = [' bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB'] return str(bytes) + units[0] if bytes < 1024 else human_size(bytes >> 10, units[1:])
163
813
b648699bb61ce3c7b8511e991d7508d05d72967b
pimoroni/plasma
library/tests/conftest.py
[ "MIT" ]
Python
cleanup_plasma
null
def cleanup_plasma(): """This fixture removes all plasma modules from sys.modules. This ensures that each module is fully re-imported, along with the fixtures for serial, etc, for each test function. """ yield None to_delete = [] for module in sys.modules: if module.startswith('plasma'): to_delete.append(module) for module in to_delete: del sys.modules[module]
This fixture removes all plasma modules from sys.modules. This ensures that each module is fully re-imported, along with the fixtures for serial, etc, for each test function.
This fixture removes all plasma modules from sys.modules. This ensures that each module is fully re-imported, along with the fixtures for serial, etc, for each test function.
[ "This", "fixture", "removes", "all", "plasma", "modules", "from", "sys", ".", "modules", ".", "This", "ensures", "that", "each", "module", "is", "fully", "re", "-", "imported", "along", "with", "the", "fixtures", "for", "serial", "etc", "for", "each", "test", "function", "." ]
def cleanup_plasma(): yield None to_delete = [] for module in sys.modules: if module.startswith('plasma'): to_delete.append(module) for module in to_delete: del sys.modules[module]
[ "def", "cleanup_plasma", "(", ")", ":", "yield", "None", "to_delete", "=", "[", "]", "for", "module", "in", "sys", ".", "modules", ":", "if", "module", ".", "startswith", "(", "'plasma'", ")", ":", "to_delete", ".", "append", "(", "module", ")", "for", "module", "in", "to_delete", ":", "del", "sys", ".", "modules", "[", "module", "]" ]
This fixture removes all plasma modules from sys.modules.
[ "This", "fixture", "removes", "all", "plasma", "modules", "from", "sys", ".", "modules", "." ]
[ "\"\"\"This fixture removes all plasma modules from sys.modules.\n\n This ensures that each module is fully re-imported, along with\n the fixtures for serial, etc, for each test function.\n\n \"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
import sys def cleanup_plasma(): yield None to_delete = [] for module in sys.modules: if module.startswith('plasma'): to_delete.append(module) for module in to_delete: del sys.modules[module]
164
690
6bcba49c66cf183836a392f7b1da6e731785571a
Anrufliste/voice-skill-sdk
skill_sdk/l10n.py
[ "MIT" ]
Python
nl_decapitalize
<not_specific>
def nl_decapitalize(string: str): """ Decapitalize first character (the rest is untouched) :param string: :return: """ return string[:1].lower() + string[1:]
Decapitalize first character (the rest is untouched) :param string: :return:
Decapitalize first character (the rest is untouched)
[ "Decapitalize", "first", "character", "(", "the", "rest", "is", "untouched", ")" ]
def nl_decapitalize(string: str): return string[:1].lower() + string[1:]
[ "def", "nl_decapitalize", "(", "string", ":", "str", ")", ":", "return", "string", "[", ":", "1", "]", ".", "lower", "(", ")", "+", "string", "[", "1", ":", "]" ]
Decapitalize first character (the rest is untouched)
[ "Decapitalize", "first", "character", "(", "the", "rest", "is", "untouched", ")" ]
[ "\"\"\"\n Decapitalize first character (the rest is untouched)\n\n :param string:\n :return:\n \"\"\"" ]
[ { "param": "string", "type": "str" } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "string", "type": "str", "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def nl_decapitalize(string: str): return string[:1].lower() + string[1:]
165
992
9f67126b284c449f9f058bc48c7945a75e089554
sxie22/uf3
uf3/regression/least_squares.py
[ "Apache-2.0" ]
Python
validate_regularizer
null
def validate_regularizer(regularizer: np.ndarray, n_feats: int): """ Check for consistency between regularizer matrix and number of features. Args: regularizer (np.ndarray): regularizer matrix. n_feats (int): number of features. """ n_row, n_col = regularizer.shape if n_col != n_feats: shape_comparison = "N x {0}. Provided: {1} x {2}".format(n_feats, n_row, n_col) raise ValueError( "Expected regularizer shape: " + shape_comparison)
Check for consistency between regularizer matrix and number of features. Args: regularizer (np.ndarray): regularizer matrix. n_feats (int): number of features.
Check for consistency between regularizer matrix and number of features.
[ "Check", "for", "consistency", "between", "regularizer", "matrix", "and", "number", "of", "features", "." ]
def validate_regularizer(regularizer: np.ndarray, n_feats: int): n_row, n_col = regularizer.shape if n_col != n_feats: shape_comparison = "N x {0}. Provided: {1} x {2}".format(n_feats, n_row, n_col) raise ValueError( "Expected regularizer shape: " + shape_comparison)
[ "def", "validate_regularizer", "(", "regularizer", ":", "np", ".", "ndarray", ",", "n_feats", ":", "int", ")", ":", "n_row", ",", "n_col", "=", "regularizer", ".", "shape", "if", "n_col", "!=", "n_feats", ":", "shape_comparison", "=", "\"N x {0}. Provided: {1} x {2}\"", ".", "format", "(", "n_feats", ",", "n_row", ",", "n_col", ")", "raise", "ValueError", "(", "\"Expected regularizer shape: \"", "+", "shape_comparison", ")" ]
Check for consistency between regularizer matrix and number of features.
[ "Check", "for", "consistency", "between", "regularizer", "matrix", "and", "number", "of", "features", "." ]
[ "\"\"\"\n Check for consistency between regularizer matrix and number of features.\n\n Args:\n regularizer (np.ndarray): regularizer matrix.\n n_feats (int): number of features.\n \"\"\"" ]
[ { "param": "regularizer", "type": "np.ndarray" }, { "param": "n_feats", "type": "int" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "regularizer", "type": "np.ndarray", "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false }, { "identifier": "n_feats", "type": "int", "docstring": "number of features.", "docstring_tokens": [ "number", "of", "features", "." ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def validate_regularizer(regularizer: np.ndarray, n_feats: int): n_row, n_col = regularizer.shape if n_col != n_feats: shape_comparison = "N x {0}. Provided: {1} x {2}".format(n_feats, n_row, n_col) raise ValueError( "Expected regularizer shape: " + shape_comparison)
167
241
09ded4cbd5c7ddb58c41a6ad303da10ce845bb04
maytrue/webrtc
modules/audio_processing/test/py_quality_assessment/quality_assessment/evaluation.py
[ "DOC", "BSD-3-Clause" ]
Python
Run
<not_specific>
def Run(cls, evaluation_score_workers, apm_input_metadata, apm_output_filepath, reference_input_filepath, output_path): """Runs the evaluation. Iterates over the given evaluation score workers. Args: evaluation_score_workers: list of EvaluationScore instances. apm_input_metadata: dictionary with metadata of the APM input. apm_output_filepath: path to the audio track file with the APM output. reference_input_filepath: path to the reference audio track file. output_path: output path. Returns: A dict of evaluation score name and score pairs. """ # Init. scores = {} for evaluation_score_worker in evaluation_score_workers: logging.info(' computing <%s> score', evaluation_score_worker.NAME) evaluation_score_worker.SetInputSignalMetadata(apm_input_metadata) evaluation_score_worker.SetReferenceSignalFilepath( reference_input_filepath) evaluation_score_worker.SetTestedSignalFilepath( apm_output_filepath) evaluation_score_worker.Run(output_path) scores[evaluation_score_worker.NAME] = evaluation_score_worker.score return scores
Runs the evaluation. Iterates over the given evaluation score workers. Args: evaluation_score_workers: list of EvaluationScore instances. apm_input_metadata: dictionary with metadata of the APM input. apm_output_filepath: path to the audio track file with the APM output. reference_input_filepath: path to the reference audio track file. output_path: output path. Returns: A dict of evaluation score name and score pairs.
Runs the evaluation. Iterates over the given evaluation score workers.
[ "Runs", "the", "evaluation", ".", "Iterates", "over", "the", "given", "evaluation", "score", "workers", "." ]
def Run(cls, evaluation_score_workers, apm_input_metadata, apm_output_filepath, reference_input_filepath, output_path): scores = {} for evaluation_score_worker in evaluation_score_workers: logging.info(' computing <%s> score', evaluation_score_worker.NAME) evaluation_score_worker.SetInputSignalMetadata(apm_input_metadata) evaluation_score_worker.SetReferenceSignalFilepath( reference_input_filepath) evaluation_score_worker.SetTestedSignalFilepath( apm_output_filepath) evaluation_score_worker.Run(output_path) scores[evaluation_score_worker.NAME] = evaluation_score_worker.score return scores
[ "def", "Run", "(", "cls", ",", "evaluation_score_workers", ",", "apm_input_metadata", ",", "apm_output_filepath", ",", "reference_input_filepath", ",", "output_path", ")", ":", "scores", "=", "{", "}", "for", "evaluation_score_worker", "in", "evaluation_score_workers", ":", "logging", ".", "info", "(", "' computing <%s> score'", ",", "evaluation_score_worker", ".", "NAME", ")", "evaluation_score_worker", ".", "SetInputSignalMetadata", "(", "apm_input_metadata", ")", "evaluation_score_worker", ".", "SetReferenceSignalFilepath", "(", "reference_input_filepath", ")", "evaluation_score_worker", ".", "SetTestedSignalFilepath", "(", "apm_output_filepath", ")", "evaluation_score_worker", ".", "Run", "(", "output_path", ")", "scores", "[", "evaluation_score_worker", ".", "NAME", "]", "=", "evaluation_score_worker", ".", "score", "return", "scores" ]
Runs the evaluation.
[ "Runs", "the", "evaluation", "." ]
[ "\"\"\"Runs the evaluation.\n\n Iterates over the given evaluation score workers.\n\n Args:\n evaluation_score_workers: list of EvaluationScore instances.\n apm_input_metadata: dictionary with metadata of the APM input.\n apm_output_filepath: path to the audio track file with the APM output.\n reference_input_filepath: path to the reference audio track file.\n output_path: output path.\n\n Returns:\n A dict of evaluation score name and score pairs.\n \"\"\"", "# Init." ]
[ { "param": "cls", "type": null }, { "param": "evaluation_score_workers", "type": null }, { "param": "apm_input_metadata", "type": null }, { "param": "apm_output_filepath", "type": null }, { "param": "reference_input_filepath", "type": null }, { "param": "output_path", "type": null } ]
{ "returns": [ { "docstring": "A dict of evaluation score name and score pairs.", "docstring_tokens": [ "A", "dict", "of", "evaluation", "score", "name", "and", "score", "pairs", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "evaluation_score_workers", "type": null, "docstring": "list of EvaluationScore instances.", "docstring_tokens": [ "list", "of", "EvaluationScore", "instances", "." ], "default": null, "is_optional": null }, { "identifier": "apm_input_metadata", "type": null, "docstring": "dictionary with metadata of the APM input.", "docstring_tokens": [ "dictionary", "with", "metadata", "of", "the", "APM", "input", "." ], "default": null, "is_optional": null }, { "identifier": "apm_output_filepath", "type": null, "docstring": "path to the audio track file with the APM output.", "docstring_tokens": [ "path", "to", "the", "audio", "track", "file", "with", "the", "APM", "output", "." ], "default": null, "is_optional": null }, { "identifier": "reference_input_filepath", "type": null, "docstring": "path to the reference audio track file.", "docstring_tokens": [ "path", "to", "the", "reference", "audio", "track", "file", "." ], "default": null, "is_optional": null }, { "identifier": "output_path", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import logging def Run(cls, evaluation_score_workers, apm_input_metadata, apm_output_filepath, reference_input_filepath, output_path): scores = {} for evaluation_score_worker in evaluation_score_workers: logging.info(' computing <%s> score', evaluation_score_worker.NAME) evaluation_score_worker.SetInputSignalMetadata(apm_input_metadata) evaluation_score_worker.SetReferenceSignalFilepath( reference_input_filepath) evaluation_score_worker.SetTestedSignalFilepath( apm_output_filepath) evaluation_score_worker.Run(output_path) scores[evaluation_score_worker.NAME] = evaluation_score_worker.score return scores
168
110
c6460d3b38eb380df5b9c790ad806ad14ec836a9
BIONF/taXaminer
prepare_and_check.py
[ "MIT" ]
Python
check_dir_slash
<not_specific>
def check_dir_slash(path): """Appends trailing slash to path if necessary. Args: path(str): path to be checked for slash Returns: (str) path with slash at the end """ if path.endswith('/'): return path else: return path+'/'
Appends trailing slash to path if necessary. Args: path(str): path to be checked for slash Returns: (str) path with slash at the end
Appends trailing slash to path if necessary.
[ "Appends", "trailing", "slash", "to", "path", "if", "necessary", "." ]
def check_dir_slash(path): if path.endswith('/'): return path else: return path+'/'
[ "def", "check_dir_slash", "(", "path", ")", ":", "if", "path", ".", "endswith", "(", "'/'", ")", ":", "return", "path", "else", ":", "return", "path", "+", "'/'" ]
Appends trailing slash to path if necessary.
[ "Appends", "trailing", "slash", "to", "path", "if", "necessary", "." ]
[ "\"\"\"Appends trailing slash to path if necessary.\n\n Args:\n path(str): path to be checked for slash\n\n Returns:\n (str) path with slash at the end\n\n \"\"\"" ]
[ { "param": "path", "type": null } ]
{ "returns": [ { "docstring": "(str) path with slash at the end", "docstring_tokens": [ "(", "str", ")", "path", "with", "slash", "at", "the", "end" ], "type": null } ], "raises": [], "params": [ { "identifier": "path", "type": null, "docstring": "path to be checked for slash", "docstring_tokens": [ "path", "to", "be", "checked", "for", "slash" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def check_dir_slash(path): if path.endswith('/'): return path else: return path+'/'
169
71
18d6baaf0359b10f5388a1c72403426339b872c7
ezramorris/smrt-importer
smrt_importer/processor.py
[ "MIT" ]
Python
move_file
null
def move_file(path: Path, dest: Path): """Move a file, adding a suffix if necessary. path: file path dest: destination directory """ newpath = dest / path.name i=1 while newpath.exists(): newpath = dest / f'{path.stem}_{i}{path.suffix}' i+=1 path.rename(newpath)
Move a file, adding a suffix if necessary. path: file path dest: destination directory
Move a file, adding a suffix if necessary. path: file path dest: destination directory
[ "Move", "a", "file", "adding", "a", "suffix", "if", "necessary", ".", "path", ":", "file", "path", "dest", ":", "destination", "directory" ]
def move_file(path: Path, dest: Path): newpath = dest / path.name i=1 while newpath.exists(): newpath = dest / f'{path.stem}_{i}{path.suffix}' i+=1 path.rename(newpath)
[ "def", "move_file", "(", "path", ":", "Path", ",", "dest", ":", "Path", ")", ":", "newpath", "=", "dest", "/", "path", ".", "name", "i", "=", "1", "while", "newpath", ".", "exists", "(", ")", ":", "newpath", "=", "dest", "/", "f'{path.stem}_{i}{path.suffix}'", "i", "+=", "1", "path", ".", "rename", "(", "newpath", ")" ]
Move a file, adding a suffix if necessary.
[ "Move", "a", "file", "adding", "a", "suffix", "if", "necessary", "." ]
[ "\"\"\"Move a file, adding a suffix if necessary.\n \n path: file path\n dest: destination directory\n \"\"\"" ]
[ { "param": "path", "type": "Path" }, { "param": "dest", "type": "Path" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "path", "type": "Path", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "dest", "type": "Path", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def move_file(path: Path, dest: Path): newpath = dest / path.name i=1 while newpath.exists(): newpath = dest / f'{path.stem}_{i}{path.suffix}' i+=1 path.rename(newpath)
170
606
0c17379a655101f895d7eba780521973b03a73ef
ypsun-umd/MiniDAQ-DCB-Panels
utils/dim_browser.py
[ "BSD-2-Clause" ]
Python
print_all_clients_connected_to_server
null
def print_all_clients_connected_to_server(dbr): """ dbr.getServerClients("TimeService-server") returns the number of clients connected to the server dbr.getNextServerClient() can be called after this call """ # getNextServerClient() is a generator that generates a tuple or None if no # Clients are found this is another way to use the getNext*** functions for client_tuple in dbr.getNextServerClient(): if client_tuple is not None: print( "Client name = {0}, node name = {1}".format( client_tuple[0], client_tuple[1] )) print("")
dbr.getServerClients("TimeService-server") returns the number of clients connected to the server dbr.getNextServerClient() can be called after this call
dbr.getServerClients("TimeService-server") returns the number of clients connected to the server dbr.getNextServerClient() can be called after this call
[ "dbr", ".", "getServerClients", "(", "\"", "TimeService", "-", "server", "\"", ")", "returns", "the", "number", "of", "clients", "connected", "to", "the", "server", "dbr", ".", "getNextServerClient", "()", "can", "be", "called", "after", "this", "call" ]
def print_all_clients_connected_to_server(dbr): for client_tuple in dbr.getNextServerClient(): if client_tuple is not None: print( "Client name = {0}, node name = {1}".format( client_tuple[0], client_tuple[1] )) print("")
[ "def", "print_all_clients_connected_to_server", "(", "dbr", ")", ":", "for", "client_tuple", "in", "dbr", ".", "getNextServerClient", "(", ")", ":", "if", "client_tuple", "is", "not", "None", ":", "print", "(", "\"Client name = {0}, node name = {1}\"", ".", "format", "(", "client_tuple", "[", "0", "]", ",", "client_tuple", "[", "1", "]", ")", ")", "print", "(", "\"\"", ")" ]
dbr.getServerClients("TimeService-server") returns the number of clients connected to the server dbr.getNextServerClient() can be called after this call
[ "dbr", ".", "getServerClients", "(", "\"", "TimeService", "-", "server", "\"", ")", "returns", "the", "number", "of", "clients", "connected", "to", "the", "server", "dbr", ".", "getNextServerClient", "()", "can", "be", "called", "after", "this", "call" ]
[ "\"\"\"\n dbr.getServerClients(\"TimeService-server\") returns the number of clients\n connected to the server\n dbr.getNextServerClient() can be called after this call\n \"\"\"", "# getNextServerClient() is a generator that generates a tuple or None if no", "# Clients are found this is another way to use the getNext*** functions" ]
[ { "param": "dbr", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "dbr", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def print_all_clients_connected_to_server(dbr): for client_tuple in dbr.getNextServerClient(): if client_tuple is not None: print( "Client name = {0}, node name = {1}".format( client_tuple[0], client_tuple[1] )) print("")
171
19
3914697415b7fe9af8bd8f9a02dcfab6149d0435
ParkvilleData/MetaGenePipe
scripts/inputArgMaker.v.1.0.1.py
[ "Apache-2.0" ]
Python
printDictionary
null
def printDictionary(dictionary): """ print Dictionary Prints contents of dictionary for debug purposes Args: param1 (Dictionary): Dictionary of questionable content Returns: NA: print out dictionary contents """ print("\nDictionary contents below") pp = pprint.PrettyPrinter(indent=4) pp.pprint(dictionary) print(len(dictionary))
print Dictionary Prints contents of dictionary for debug purposes Args: param1 (Dictionary): Dictionary of questionable content Returns: NA: print out dictionary contents
print Dictionary Prints contents of dictionary for debug purposes
[ "print", "Dictionary", "Prints", "contents", "of", "dictionary", "for", "debug", "purposes" ]
def printDictionary(dictionary): print("\nDictionary contents below") pp = pprint.PrettyPrinter(indent=4) pp.pprint(dictionary) print(len(dictionary))
[ "def", "printDictionary", "(", "dictionary", ")", ":", "print", "(", "\"\\nDictionary contents below\"", ")", "pp", "=", "pprint", ".", "PrettyPrinter", "(", "indent", "=", "4", ")", "pp", ".", "pprint", "(", "dictionary", ")", "print", "(", "len", "(", "dictionary", ")", ")" ]
print Dictionary Prints contents of dictionary for debug purposes
[ "print", "Dictionary", "Prints", "contents", "of", "dictionary", "for", "debug", "purposes" ]
[ "\"\"\" print Dictionary\n\n Prints contents of dictionary for debug purposes\n\n Args:\n param1 (Dictionary): Dictionary of questionable content\n\n Returns:\n NA: print out dictionary contents\n\n \"\"\"" ]
[ { "param": "dictionary", "type": null } ]
{ "returns": [ { "docstring": "print out dictionary contents", "docstring_tokens": [ "print", "out", "dictionary", "contents" ], "type": "NA" } ], "raises": [], "params": [ { "identifier": "dictionary", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [ { "identifier": "param1", "type": null, "docstring": "Dictionary of questionable content", "docstring_tokens": [ "Dictionary", "of", "questionable", "content" ], "default": null, "is_optional": false } ], "others": [] }
import pprint def printDictionary(dictionary): print("\nDictionary contents below") pp = pprint.PrettyPrinter(indent=4) pp.pprint(dictionary) print(len(dictionary))
172
345
4151806476243e46058cd970a601534f21ba9447
adelolmo/snapraid-runner
deb/opt/snapraid-runner/snapraid-runner.py
[ "Apache-2.0" ]
Python
tee_log
<not_specific>
def tee_log(infile, out_lines, log_level): """ Create a thread thot saves all the output on infile to out_lines and logs every line with log_level """ def tee_thread(): for line in iter(infile.readline, ""): line = line.strip() # Do not log the progress display if "\r" in line: line = line.split("\r")[-1] logging.log(log_level, line.strip()) out_lines.append(line) infile.close() t = threading.Thread(target=tee_thread) t.daemon = True t.start() return t
Create a thread thot saves all the output on infile to out_lines and logs every line with log_level
Create a thread thot saves all the output on infile to out_lines and logs every line with log_level
[ "Create", "a", "thread", "thot", "saves", "all", "the", "output", "on", "infile", "to", "out_lines", "and", "logs", "every", "line", "with", "log_level" ]
def tee_log(infile, out_lines, log_level): def tee_thread(): for line in iter(infile.readline, ""): line = line.strip() if "\r" in line: line = line.split("\r")[-1] logging.log(log_level, line.strip()) out_lines.append(line) infile.close() t = threading.Thread(target=tee_thread) t.daemon = True t.start() return t
[ "def", "tee_log", "(", "infile", ",", "out_lines", ",", "log_level", ")", ":", "def", "tee_thread", "(", ")", ":", "for", "line", "in", "iter", "(", "infile", ".", "readline", ",", "\"\"", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "\"\\r\"", "in", "line", ":", "line", "=", "line", ".", "split", "(", "\"\\r\"", ")", "[", "-", "1", "]", "logging", ".", "log", "(", "log_level", ",", "line", ".", "strip", "(", ")", ")", "out_lines", ".", "append", "(", "line", ")", "infile", ".", "close", "(", ")", "t", "=", "threading", ".", "Thread", "(", "target", "=", "tee_thread", ")", "t", ".", "daemon", "=", "True", "t", ".", "start", "(", ")", "return", "t" ]
Create a thread thot saves all the output on infile to out_lines and logs every line with log_level
[ "Create", "a", "thread", "thot", "saves", "all", "the", "output", "on", "infile", "to", "out_lines", "and", "logs", "every", "line", "with", "log_level" ]
[ "\"\"\"\n Create a thread thot saves all the output on infile to out_lines and\n logs every line with log_level\n \"\"\"", "# Do not log the progress display" ]
[ { "param": "infile", "type": null }, { "param": "out_lines", "type": null }, { "param": "log_level", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "infile", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "out_lines", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "log_level", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import logging import threading def tee_log(infile, out_lines, log_level): def tee_thread(): for line in iter(infile.readline, ""): line = line.strip() if "\r" in line: line = line.split("\r")[-1] logging.log(log_level, line.strip()) out_lines.append(line) infile.close() t = threading.Thread(target=tee_thread) t.daemon = True t.start() return t
173
417
926daa19f7aa4411ffca1cb5deb12c9e73205581
PabRod/snakePi
scripts/process.py
[ "Apache-2.0" ]
Python
writer
null
def writer(val, file): """ Writes a value to a file """ val_str = f'{val:.8f}' # Keep decimals enough with open(file, 'w') as f: f.write(val_str)
Writes a value to a file
Writes a value to a file
[ "Writes", "a", "value", "to", "a", "file" ]
def writer(val, file): val_str = f'{val:.8f}' with open(file, 'w') as f: f.write(val_str)
[ "def", "writer", "(", "val", ",", "file", ")", ":", "val_str", "=", "f'{val:.8f}'", "with", "open", "(", "file", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "val_str", ")" ]
Writes a value to a file
[ "Writes", "a", "value", "to", "a", "file" ]
[ "\"\"\" Writes a value to a file \"\"\"", "# Keep decimals enough" ]
[ { "param": "val", "type": null }, { "param": "file", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "val", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "file", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def writer(val, file): val_str = f'{val:.8f}' with open(file, 'w') as f: f.write(val_str)
174
483
a997c809edca66b028d3be6bea5b6d08753edb22
corenel/flambeau
flambeau/network/lr_scheduler.py
[ "MIT" ]
Python
linear_anneal
<not_specific>
def linear_anneal(base_lr, global_step, warmup_steps, min_lr): """ Linearly annealed learning rate from 0 in the first warming up epochs. :param base_lr: base learning rate :type base_lr: float :param global_step: global training steps :type global_step: int :param warmup_steps: number of steps for warming up :type warmup_steps: int :param min_lr: minimum learning rate :type min_lr: float :return: scheduled learning rate :rtype: float """ lr = max(min_lr + (base_lr - min_lr) * (1.0 - global_step / warmup_steps), min_lr) return lr
Linearly annealed learning rate from 0 in the first warming up epochs. :param base_lr: base learning rate :type base_lr: float :param global_step: global training steps :type global_step: int :param warmup_steps: number of steps for warming up :type warmup_steps: int :param min_lr: minimum learning rate :type min_lr: float :return: scheduled learning rate :rtype: float
Linearly annealed learning rate from 0 in the first warming up epochs.
[ "Linearly", "annealed", "learning", "rate", "from", "0", "in", "the", "first", "warming", "up", "epochs", "." ]
def linear_anneal(base_lr, global_step, warmup_steps, min_lr): lr = max(min_lr + (base_lr - min_lr) * (1.0 - global_step / warmup_steps), min_lr) return lr
[ "def", "linear_anneal", "(", "base_lr", ",", "global_step", ",", "warmup_steps", ",", "min_lr", ")", ":", "lr", "=", "max", "(", "min_lr", "+", "(", "base_lr", "-", "min_lr", ")", "*", "(", "1.0", "-", "global_step", "/", "warmup_steps", ")", ",", "min_lr", ")", "return", "lr" ]
Linearly annealed learning rate from 0 in the first warming up epochs.
[ "Linearly", "annealed", "learning", "rate", "from", "0", "in", "the", "first", "warming", "up", "epochs", "." ]
[ "\"\"\"\n Linearly annealed learning rate from 0 in the first warming up epochs.\n\n :param base_lr: base learning rate\n :type base_lr: float\n :param global_step: global training steps\n :type global_step: int\n :param warmup_steps: number of steps for warming up\n :type warmup_steps: int\n :param min_lr: minimum learning rate\n :type min_lr: float\n :return: scheduled learning rate\n :rtype: float\n \"\"\"" ]
[ { "param": "base_lr", "type": null }, { "param": "global_step", "type": null }, { "param": "warmup_steps", "type": null }, { "param": "min_lr", "type": null } ]
{ "returns": [ { "docstring": "scheduled learning rate", "docstring_tokens": [ "scheduled", "learning", "rate" ], "type": "float" } ], "raises": [], "params": [ { "identifier": "base_lr", "type": null, "docstring": "base learning rate", "docstring_tokens": [ "base", "learning", "rate" ], "default": null, "is_optional": null }, { "identifier": "global_step", "type": null, "docstring": "global training steps", "docstring_tokens": [ "global", "training", "steps" ], "default": null, "is_optional": null }, { "identifier": "warmup_steps", "type": null, "docstring": "number of steps for warming up", "docstring_tokens": [ "number", "of", "steps", "for", "warming", "up" ], "default": null, "is_optional": null }, { "identifier": "min_lr", "type": null, "docstring": "minimum learning rate", "docstring_tokens": [ "minimum", "learning", "rate" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def linear_anneal(base_lr, global_step, warmup_steps, min_lr): lr = max(min_lr + (base_lr - min_lr) * (1.0 - global_step / warmup_steps), min_lr) return lr
175
554
39f1cc2e03f1e8d4a4cbcea5957c96172ced046a
moyamo/polygon2square
geometry.py
[ "MIT" ]
Python
line_intersects_segment
<not_specific>
def line_intersects_segment(line, line_segment): """Returns the intersection the Line and LineSegment or None if they do not intersect. This function is useful for splitting polygons by a straight line. """ linesegform = line_segment.to_line() if line.is_parallel_to(linesegform): return None else: p = line.intersection(linesegform) # Is the intersection on the line_segment? if line_segment.between(p): return p else: return None
Returns the intersection the Line and LineSegment or None if they do not intersect. This function is useful for splitting polygons by a straight line.
Returns the intersection the Line and LineSegment or None if they do not intersect. This function is useful for splitting polygons by a straight line.
[ "Returns", "the", "intersection", "the", "Line", "and", "LineSegment", "or", "None", "if", "they", "do", "not", "intersect", ".", "This", "function", "is", "useful", "for", "splitting", "polygons", "by", "a", "straight", "line", "." ]
def line_intersects_segment(line, line_segment): linesegform = line_segment.to_line() if line.is_parallel_to(linesegform): return None else: p = line.intersection(linesegform) if line_segment.between(p): return p else: return None
[ "def", "line_intersects_segment", "(", "line", ",", "line_segment", ")", ":", "linesegform", "=", "line_segment", ".", "to_line", "(", ")", "if", "line", ".", "is_parallel_to", "(", "linesegform", ")", ":", "return", "None", "else", ":", "p", "=", "line", ".", "intersection", "(", "linesegform", ")", "if", "line_segment", ".", "between", "(", "p", ")", ":", "return", "p", "else", ":", "return", "None" ]
Returns the intersection the Line and LineSegment or None if they do not intersect.
[ "Returns", "the", "intersection", "the", "Line", "and", "LineSegment", "or", "None", "if", "they", "do", "not", "intersect", "." ]
[ "\"\"\"Returns the intersection the Line and LineSegment or None if they do\n not intersect.\n\n This function is useful for splitting polygons by a straight line.\n \"\"\"", "# Is the intersection on the line_segment?" ]
[ { "param": "line", "type": null }, { "param": "line_segment", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "line", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "line_segment", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def line_intersects_segment(line, line_segment): linesegform = line_segment.to_line() if line.is_parallel_to(linesegform): return None else: p = line.intersection(linesegform) if line_segment.between(p): return p else: return None
176
208
f1427a0b1563fd72c10e3b46ea035ae51710b217
bmorledge-hampton19/benbiohelpers
python/benbiohelpers/CountThisInThat/OutputDataStratifiers.py
[ "MIT" ]
Python
sortPositionIDs
<not_specific>
def sortPositionIDs(positionIDs: Union[List[str], List[Tuple]]): """ Sorts the position IDs derived from the Encompassed Data and Encompassing Data ODS's. Can handle input as a list of strings or tuples, and with a single position or both a start and end position. However, it is assumed that all members of the list are formatted the same with respect to the above variations. """ # Sorting for list of strings: if isinstance(positionIDs[0], str): # If both start and end positions are given (Represented by a '-' between positions, before the strand designation), sort on end position first if '-' in positionIDs[0].split('(')[0]: positionIDs.sort(key = lambda positionID: float(positionID.split('(')[0].split('-')[1])) # Next, sort on the first (potentially only) given position. positionIDs.sort(key = lambda positionID: float(positionID.split('(')[0].split(':')[1].split('-')[0])) # Finally, sort on the chromosome identifier. positionIDs.sort(key = lambda positionID: positionID.split(':')[0]) return positionIDs # Do this as a formality, even though this sorts in place (I think). # Otherwise, assume we have some iterable. else: # If the iterable has 4 items, sort on item 3 first, which represents the end position if len(positionIDs[0]) == 4: positionIDs.sort(key = lambda positionID: positionID[2]) # Next, sort by the start position and then the chromosome positionIDs.sort(key = lambda positionID: positionID[1]) positionIDs.sort(key = lambda positionID: positionID[0])
Sorts the position IDs derived from the Encompassed Data and Encompassing Data ODS's. Can handle input as a list of strings or tuples, and with a single position or both a start and end position. However, it is assumed that all members of the list are formatted the same with respect to the above variations.
Sorts the position IDs derived from the Encompassed Data and Encompassing Data ODS's. Can handle input as a list of strings or tuples, and with a single position or both a start and end position. However, it is assumed that all members of the list are formatted the same with respect to the above variations.
[ "Sorts", "the", "position", "IDs", "derived", "from", "the", "Encompassed", "Data", "and", "Encompassing", "Data", "ODS", "'", "s", ".", "Can", "handle", "input", "as", "a", "list", "of", "strings", "or", "tuples", "and", "with", "a", "single", "position", "or", "both", "a", "start", "and", "end", "position", ".", "However", "it", "is", "assumed", "that", "all", "members", "of", "the", "list", "are", "formatted", "the", "same", "with", "respect", "to", "the", "above", "variations", "." ]
def sortPositionIDs(positionIDs: Union[List[str], List[Tuple]]): if isinstance(positionIDs[0], str): if '-' in positionIDs[0].split('(')[0]: positionIDs.sort(key = lambda positionID: float(positionID.split('(')[0].split('-')[1])) positionIDs.sort(key = lambda positionID: float(positionID.split('(')[0].split(':')[1].split('-')[0])) positionIDs.sort(key = lambda positionID: positionID.split(':')[0]) return positionIDs else: if len(positionIDs[0]) == 4: positionIDs.sort(key = lambda positionID: positionID[2]) positionIDs.sort(key = lambda positionID: positionID[1]) positionIDs.sort(key = lambda positionID: positionID[0])
[ "def", "sortPositionIDs", "(", "positionIDs", ":", "Union", "[", "List", "[", "str", "]", ",", "List", "[", "Tuple", "]", "]", ")", ":", "if", "isinstance", "(", "positionIDs", "[", "0", "]", ",", "str", ")", ":", "if", "'-'", "in", "positionIDs", "[", "0", "]", ".", "split", "(", "'('", ")", "[", "0", "]", ":", "positionIDs", ".", "sort", "(", "key", "=", "lambda", "positionID", ":", "float", "(", "positionID", ".", "split", "(", "'('", ")", "[", "0", "]", ".", "split", "(", "'-'", ")", "[", "1", "]", ")", ")", "positionIDs", ".", "sort", "(", "key", "=", "lambda", "positionID", ":", "float", "(", "positionID", ".", "split", "(", "'('", ")", "[", "0", "]", ".", "split", "(", "':'", ")", "[", "1", "]", ".", "split", "(", "'-'", ")", "[", "0", "]", ")", ")", "positionIDs", ".", "sort", "(", "key", "=", "lambda", "positionID", ":", "positionID", ".", "split", "(", "':'", ")", "[", "0", "]", ")", "return", "positionIDs", "else", ":", "if", "len", "(", "positionIDs", "[", "0", "]", ")", "==", "4", ":", "positionIDs", ".", "sort", "(", "key", "=", "lambda", "positionID", ":", "positionID", "[", "2", "]", ")", "positionIDs", ".", "sort", "(", "key", "=", "lambda", "positionID", ":", "positionID", "[", "1", "]", ")", "positionIDs", ".", "sort", "(", "key", "=", "lambda", "positionID", ":", "positionID", "[", "0", "]", ")" ]
Sorts the position IDs derived from the Encompassed Data and Encompassing Data ODS's.
[ "Sorts", "the", "position", "IDs", "derived", "from", "the", "Encompassed", "Data", "and", "Encompassing", "Data", "ODS", "'", "s", "." ]
[ "\"\"\"\n Sorts the position IDs derived from the Encompassed Data and Encompassing Data ODS's.\n Can handle input as a list of strings or tuples, and with a single position or both a start and end position.\n However, it is assumed that all members of the list are formatted the same with respect to the above variations.\n \"\"\"", "# Sorting for list of strings:", "# If both start and end positions are given (Represented by a '-' between positions, before the strand designation), sort on end position first", "# Next, sort on the first (potentially only) given position.", "# Finally, sort on the chromosome identifier.", "# Do this as a formality, even though this sorts in place (I think).", "# Otherwise, assume we have some iterable.", "# If the iterable has 4 items, sort on item 3 first, which represents the end position", "# Next, sort by the start position and then the chromosome" ]
[ { "param": "positionIDs", "type": "Union[List[str], List[Tuple]]" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "positionIDs", "type": "Union[List[str], List[Tuple]]", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def sortPositionIDs(positionIDs: Union[List[str], List[Tuple]]): if isinstance(positionIDs[0], str): if '-' in positionIDs[0].split('(')[0]: positionIDs.sort(key = lambda positionID: float(positionID.split('(')[0].split('-')[1])) positionIDs.sort(key = lambda positionID: float(positionID.split('(')[0].split(':')[1].split('-')[0])) positionIDs.sort(key = lambda positionID: positionID.split(':')[0]) return positionIDs else: if len(positionIDs[0]) == 4: positionIDs.sort(key = lambda positionID: positionID[2]) positionIDs.sort(key = lambda positionID: positionID[1]) positionIDs.sort(key = lambda positionID: positionID[0])
177
340
6a5460420e8e6297daab6e39711c4336ce592144
2019-fall-csc-226/t03-boustrophedon-turtles-meadors-frank-t03
t03_stub.py
[ "MIT" ]
Python
squiggle_right
null
def squiggle_right(meadors): """ Uses turtle to fill a line left to right """ for cross_space in range(12): for first_arc in range(2): meadors.forward(20) meadors.right(90) for second_arc in range(2): meadors.forward(20) meadors.left(90)
Uses turtle to fill a line left to right
Uses turtle to fill a line left to right
[ "Uses", "turtle", "to", "fill", "a", "line", "left", "to", "right" ]
def squiggle_right(meadors): for cross_space in range(12): for first_arc in range(2): meadors.forward(20) meadors.right(90) for second_arc in range(2): meadors.forward(20) meadors.left(90)
[ "def", "squiggle_right", "(", "meadors", ")", ":", "for", "cross_space", "in", "range", "(", "12", ")", ":", "for", "first_arc", "in", "range", "(", "2", ")", ":", "meadors", ".", "forward", "(", "20", ")", "meadors", ".", "right", "(", "90", ")", "for", "second_arc", "in", "range", "(", "2", ")", ":", "meadors", ".", "forward", "(", "20", ")", "meadors", ".", "left", "(", "90", ")" ]
Uses turtle to fill a line left to right
[ "Uses", "turtle", "to", "fill", "a", "line", "left", "to", "right" ]
[ "\"\"\"\n Uses turtle to fill a line left to right\n \"\"\"" ]
[ { "param": "meadors", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "meadors", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def squiggle_right(meadors): for cross_space in range(12): for first_arc in range(2): meadors.forward(20) meadors.right(90) for second_arc in range(2): meadors.forward(20) meadors.left(90)
178
208
a4bad79a38087b87cf65767ccd679f09a571f15f
ssh0/growing-string
triangular_lattice/correlation.py
[ "MIT" ]
Python
choose_indexes
<not_specific>
def choose_indexes(_list, num, L): """Choose the index pairs whose width is fixed. """ N = len(_list) if N - L < num: raise StopIteration('list index is smaller than expected (%d), ' % (num + L) + 'given (%d).' % N ) return sorted(random.sample(_list[:N - L], num))
Choose the index pairs whose width is fixed.
Choose the index pairs whose width is fixed.
[ "Choose", "the", "index", "pairs", "whose", "width", "is", "fixed", "." ]
def choose_indexes(_list, num, L): N = len(_list) if N - L < num: raise StopIteration('list index is smaller than expected (%d), ' % (num + L) + 'given (%d).' % N ) return sorted(random.sample(_list[:N - L], num))
[ "def", "choose_indexes", "(", "_list", ",", "num", ",", "L", ")", ":", "N", "=", "len", "(", "_list", ")", "if", "N", "-", "L", "<", "num", ":", "raise", "StopIteration", "(", "'list index is smaller than expected (%d), '", "%", "(", "num", "+", "L", ")", "+", "'given (%d).'", "%", "N", ")", "return", "sorted", "(", "random", ".", "sample", "(", "_list", "[", ":", "N", "-", "L", "]", ",", "num", ")", ")" ]
Choose the index pairs whose width is fixed.
[ "Choose", "the", "index", "pairs", "whose", "width", "is", "fixed", "." ]
[ "\"\"\"Choose the index pairs whose width is fixed. \"\"\"" ]
[ { "param": "_list", "type": null }, { "param": "num", "type": null }, { "param": "L", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "_list", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "num", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "L", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import random def choose_indexes(_list, num, L): N = len(_list) if N - L < num: raise StopIteration('list index is smaller than expected (%d), ' % (num + L) + 'given (%d).' % N ) return sorted(random.sample(_list[:N - L], num))
179
291
e4fc25dd2f9033f2277ba4757222048350167d65
InbarRose/irtools
irtools/_libs/file_utils.py
[ "MIT" ]
Python
write_to_tmp_file
<not_specific>
def write_to_tmp_file(content): """ writes the content to a temporary file :param content: the content to write (string) :return: returns the file_path """ with tempfile.NamedTemporaryFile(delete=False) as f: f.write(content) return f.name
writes the content to a temporary file :param content: the content to write (string) :return: returns the file_path
writes the content to a temporary file
[ "writes", "the", "content", "to", "a", "temporary", "file" ]
def write_to_tmp_file(content): with tempfile.NamedTemporaryFile(delete=False) as f: f.write(content) return f.name
[ "def", "write_to_tmp_file", "(", "content", ")", ":", "with", "tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ")", "as", "f", ":", "f", ".", "write", "(", "content", ")", "return", "f", ".", "name" ]
writes the content to a temporary file
[ "writes", "the", "content", "to", "a", "temporary", "file" ]
[ "\"\"\"\n writes the content to a temporary file\n :param content: the content to write (string)\n :return: returns the file_path\n \"\"\"" ]
[ { "param": "content", "type": null } ]
{ "returns": [ { "docstring": "returns the file_path", "docstring_tokens": [ "returns", "the", "file_path" ], "type": null } ], "raises": [], "params": [ { "identifier": "content", "type": null, "docstring": "the content to write (string)", "docstring_tokens": [ "the", "content", "to", "write", "(", "string", ")" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import tempfile def write_to_tmp_file(content): with tempfile.NamedTemporaryFile(delete=False) as f: f.write(content) return f.name
180
483
d642856c24c7129dee1af95b499bd6a18318e6d6
mathigatti/silence-removal
silenceRemoval.py
[ "MIT" ]
Python
detect_leading_silence
<not_specific>
def detect_leading_silence(sound, silence_threshold=-50.0, chunk_size=10): ''' sound is a pydub.AudioSegment silence_threshold in dB chunk_size in ms iterate over chunks until you find the first one with sound ''' trim_ms = 0 # ms while sound[trim_ms:trim_ms+chunk_size].dBFS < silence_threshold: trim_ms += chunk_size return trim_ms
sound is a pydub.AudioSegment silence_threshold in dB chunk_size in ms iterate over chunks until you find the first one with sound
sound is a pydub.AudioSegment silence_threshold in dB chunk_size in ms iterate over chunks until you find the first one with sound
[ "sound", "is", "a", "pydub", ".", "AudioSegment", "silence_threshold", "in", "dB", "chunk_size", "in", "ms", "iterate", "over", "chunks", "until", "you", "find", "the", "first", "one", "with", "sound" ]
def detect_leading_silence(sound, silence_threshold=-50.0, chunk_size=10): trim_ms = 0 while sound[trim_ms:trim_ms+chunk_size].dBFS < silence_threshold: trim_ms += chunk_size return trim_ms
[ "def", "detect_leading_silence", "(", "sound", ",", "silence_threshold", "=", "-", "50.0", ",", "chunk_size", "=", "10", ")", ":", "trim_ms", "=", "0", "while", "sound", "[", "trim_ms", ":", "trim_ms", "+", "chunk_size", "]", ".", "dBFS", "<", "silence_threshold", ":", "trim_ms", "+=", "chunk_size", "return", "trim_ms" ]
sound is a pydub.AudioSegment silence_threshold in dB chunk_size in ms iterate over chunks until you find the first one with sound
[ "sound", "is", "a", "pydub", ".", "AudioSegment", "silence_threshold", "in", "dB", "chunk_size", "in", "ms", "iterate", "over", "chunks", "until", "you", "find", "the", "first", "one", "with", "sound" ]
[ "'''\n sound is a pydub.AudioSegment\n silence_threshold in dB\n chunk_size in ms\n iterate over chunks until you find the first one with sound\n '''", "# ms" ]
[ { "param": "sound", "type": null }, { "param": "silence_threshold", "type": null }, { "param": "chunk_size", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "sound", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "silence_threshold", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "chunk_size", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def detect_leading_silence(sound, silence_threshold=-50.0, chunk_size=10): trim_ms = 0 while sound[trim_ms:trim_ms+chunk_size].dBFS < silence_threshold: trim_ms += chunk_size return trim_ms
181
554
6aaf7c2737a002697abb08c82d6222d1e68020c3
ohsu-comp-bio/neoepiscope
neoepiscope/transcript_expression.py
[ "MIT" ]
Python
feature_to_tpm_dict
<not_specific>
def feature_to_tpm_dict(feature_to_read_count, feature_to_feature_length): """Calculate TPM values for feature feature_to_read_count: dictionary linking features to read counts (float) feature_to_feature_length: dictionary linking features to feature lengths (float) Return value: dictionary linking feature ID to TPM value """ total_rpk = 0.0 feature_to_rpk = {} feature_to_tpm = {} # Get read per kilobase counts for each feature for feature in feature_to_read_count: try: rpk = feature_to_read_count[feature] / feature_to_feature_length[feature] except KeyError: continue feature_to_rpk[feature] = rpk total_rpk += rpk # Calculate scaling factor scaling = total_rpk / 1000000.0 # Calculate TPM values for feature in feature_to_rpk: tpm = feature_to_rpk[feature] / scaling feature_to_tpm[feature] = tpm return feature_to_tpm
Calculate TPM values for feature feature_to_read_count: dictionary linking features to read counts (float) feature_to_feature_length: dictionary linking features to feature lengths (float) Return value: dictionary linking feature ID to TPM value
Calculate TPM values for feature feature_to_read_count: dictionary linking features to read counts (float) feature_to_feature_length: dictionary linking features to feature lengths (float) Return value: dictionary linking feature ID to TPM value
[ "Calculate", "TPM", "values", "for", "feature", "feature_to_read_count", ":", "dictionary", "linking", "features", "to", "read", "counts", "(", "float", ")", "feature_to_feature_length", ":", "dictionary", "linking", "features", "to", "feature", "lengths", "(", "float", ")", "Return", "value", ":", "dictionary", "linking", "feature", "ID", "to", "TPM", "value" ]
def feature_to_tpm_dict(feature_to_read_count, feature_to_feature_length): total_rpk = 0.0 feature_to_rpk = {} feature_to_tpm = {} for feature in feature_to_read_count: try: rpk = feature_to_read_count[feature] / feature_to_feature_length[feature] except KeyError: continue feature_to_rpk[feature] = rpk total_rpk += rpk scaling = total_rpk / 1000000.0 for feature in feature_to_rpk: tpm = feature_to_rpk[feature] / scaling feature_to_tpm[feature] = tpm return feature_to_tpm
[ "def", "feature_to_tpm_dict", "(", "feature_to_read_count", ",", "feature_to_feature_length", ")", ":", "total_rpk", "=", "0.0", "feature_to_rpk", "=", "{", "}", "feature_to_tpm", "=", "{", "}", "for", "feature", "in", "feature_to_read_count", ":", "try", ":", "rpk", "=", "feature_to_read_count", "[", "feature", "]", "/", "feature_to_feature_length", "[", "feature", "]", "except", "KeyError", ":", "continue", "feature_to_rpk", "[", "feature", "]", "=", "rpk", "total_rpk", "+=", "rpk", "scaling", "=", "total_rpk", "/", "1000000.0", "for", "feature", "in", "feature_to_rpk", ":", "tpm", "=", "feature_to_rpk", "[", "feature", "]", "/", "scaling", "feature_to_tpm", "[", "feature", "]", "=", "tpm", "return", "feature_to_tpm" ]
Calculate TPM values for feature feature_to_read_count: dictionary linking features to read counts (float) feature_to_feature_length: dictionary linking features to feature lengths (float)
[ "Calculate", "TPM", "values", "for", "feature", "feature_to_read_count", ":", "dictionary", "linking", "features", "to", "read", "counts", "(", "float", ")", "feature_to_feature_length", ":", "dictionary", "linking", "features", "to", "feature", "lengths", "(", "float", ")" ]
[ "\"\"\"Calculate TPM values for feature\n\n feature_to_read_count: dictionary linking features to read counts (float)\n feature_to_feature_length: dictionary linking features to feature lengths (float)\n\n Return value: dictionary linking feature ID to TPM value\n \"\"\"", "# Get read per kilobase counts for each feature", "# Calculate scaling factor", "# Calculate TPM values" ]
[ { "param": "feature_to_read_count", "type": null }, { "param": "feature_to_feature_length", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "feature_to_read_count", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "feature_to_feature_length", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def feature_to_tpm_dict(feature_to_read_count, feature_to_feature_length): total_rpk = 0.0 feature_to_rpk = {} feature_to_tpm = {} for feature in feature_to_read_count: try: rpk = feature_to_read_count[feature] / feature_to_feature_length[feature] except KeyError: continue feature_to_rpk[feature] = rpk total_rpk += rpk scaling = total_rpk / 1000000.0 for feature in feature_to_rpk: tpm = feature_to_rpk[feature] / scaling feature_to_tpm[feature] = tpm return feature_to_tpm
182
96
807d3c6a43078e1a316a558c13e5e3d375e846db
tangzhiyi11/Paddle
python/paddle/fluid/contrib/quantize/quantize_transpiler.py
[ "Apache-2.0" ]
Python
_quantized_scale_name
<not_specific>
def _quantized_scale_name(var_name): """ Return quantized variable name for the input `var_name`. """ return "%s.scale" % (var_name)
Return quantized variable name for the input `var_name`.
Return quantized variable name for the input `var_name`.
[ "Return", "quantized", "variable", "name", "for", "the", "input", "`", "var_name", "`", "." ]
def _quantized_scale_name(var_name): return "%s.scale" % (var_name)
[ "def", "_quantized_scale_name", "(", "var_name", ")", ":", "return", "\"%s.scale\"", "%", "(", "var_name", ")" ]
Return quantized variable name for the input `var_name`.
[ "Return", "quantized", "variable", "name", "for", "the", "input", "`", "var_name", "`", "." ]
[ "\"\"\"\n Return quantized variable name for the input `var_name`.\n \"\"\"" ]
[ { "param": "var_name", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "var_name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _quantized_scale_name(var_name): return "%s.scale" % (var_name)
183
605
2e48be5cdfa6fd39291f2a3ae37f9fd92ba35a27
itongs/Historic_Work
FIT2004-All-Assns/assignment1.py
[ "MIT" ]
Python
convert_to_numbers
<not_specific>
def convert_to_numbers(string_list): """ Converts each string to a number as if it were a base 27 number @param string_list The list of strings to convert @return num_list The numbers corresponding to each string @complexity O(NM) where N is the length of the list and M the maximum number of letters in a string """ num_list = [0] * len(string_list) for i in range(len(string_list)): # Loop for each string word = string_list[i] num = 0 for j in range(len(word)): # For each character in each string mult = ord(word[-1-j]) % 32 num = (26**j)*mult + num num_list[i] = num return num_list
Converts each string to a number as if it were a base 27 number @param string_list The list of strings to convert @return num_list The numbers corresponding to each string @complexity O(NM) where N is the length of the list and M the maximum number of letters in a string
Converts each string to a number as if it were a base 27 number @param string_list The list of strings to convert @return num_list The numbers corresponding to each string @complexity O(NM) where N is the length of the list and M the maximum number of letters in a string
[ "Converts", "each", "string", "to", "a", "number", "as", "if", "it", "were", "a", "base", "27", "number", "@param", "string_list", "The", "list", "of", "strings", "to", "convert", "@return", "num_list", "The", "numbers", "corresponding", "to", "each", "string", "@complexity", "O", "(", "NM", ")", "where", "N", "is", "the", "length", "of", "the", "list", "and", "M", "the", "maximum", "number", "of", "letters", "in", "a", "string" ]
def convert_to_numbers(string_list): num_list = [0] * len(string_list) for i in range(len(string_list)): word = string_list[i] num = 0 for j in range(len(word)): mult = ord(word[-1-j]) % 32 num = (26**j)*mult + num num_list[i] = num return num_list
[ "def", "convert_to_numbers", "(", "string_list", ")", ":", "num_list", "=", "[", "0", "]", "*", "len", "(", "string_list", ")", "for", "i", "in", "range", "(", "len", "(", "string_list", ")", ")", ":", "word", "=", "string_list", "[", "i", "]", "num", "=", "0", "for", "j", "in", "range", "(", "len", "(", "word", ")", ")", ":", "mult", "=", "ord", "(", "word", "[", "-", "1", "-", "j", "]", ")", "%", "32", "num", "=", "(", "26", "**", "j", ")", "*", "mult", "+", "num", "num_list", "[", "i", "]", "=", "num", "return", "num_list" ]
Converts each string to a number as if it were a base 27 number @param string_list The list of strings to convert @return num_list The numbers corresponding to each string @complexity O(NM) where N is the length of the list and M the maximum number of letters in a string
[ "Converts", "each", "string", "to", "a", "number", "as", "if", "it", "were", "a", "base", "27", "number", "@param", "string_list", "The", "list", "of", "strings", "to", "convert", "@return", "num_list", "The", "numbers", "corresponding", "to", "each", "string", "@complexity", "O", "(", "NM", ")", "where", "N", "is", "the", "length", "of", "the", "list", "and", "M", "the", "maximum", "number", "of", "letters", "in", "a", "string" ]
[ "\"\"\"\r\n Converts each string to a number as if it were a base 27 number\r\n\r\n @param string_list The list of strings to convert\r\n @return num_list The numbers corresponding to each string\r\n @complexity O(NM) where N is the length of the list and M the maximum number of letters in a string\r\n \"\"\"", "# Loop for each string\r", "# For each character in each string\r" ]
[ { "param": "string_list", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "string_list", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def convert_to_numbers(string_list): num_list = [0] * len(string_list) for i in range(len(string_list)): word = string_list[i] num = 0 for j in range(len(word)): mult = ord(word[-1-j]) % 32 num = (26**j)*mult + num num_list[i] = num return num_list
184
187
f14cde8e28d29ae671614c8a7783e5e7015c9e9f
eifinger/hass-here-weather
custom_components/here_weather/utils.py
[ "MIT" ]
Python
convert_asterisk_to_none
str | None
def convert_asterisk_to_none(state: str) -> str | None: """Convert HERE API representation of None.""" if state == "*": return None return state
Convert HERE API representation of None.
Convert HERE API representation of None.
[ "Convert", "HERE", "API", "representation", "of", "None", "." ]
def convert_asterisk_to_none(state: str) -> str | None: if state == "*": return None return state
[ "def", "convert_asterisk_to_none", "(", "state", ":", "str", ")", "->", "str", "|", "None", ":", "if", "state", "==", "\"*\"", ":", "return", "None", "return", "state" ]
Convert HERE API representation of None.
[ "Convert", "HERE", "API", "representation", "of", "None", "." ]
[ "\"\"\"Convert HERE API representation of None.\"\"\"" ]
[ { "param": "state", "type": "str" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "state", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def convert_asterisk_to_none(state: str) -> str | None: if state == "*": return None return state
185
71
61ab8f2051d78497acb3a700b5a4918a05a6819e
Grrmo/Domino-console-game
dominoes.py
[ "MIT" ]
Python
assign
<not_specific>
def assign(domino_set): """Assigns 7 domino pieces to each player and returns each players set of tiles and the list of remaining tiles""" player_hand = [] computer_hand = [] for _ in range(7): player_hand += [domino_set.pop()] computer_hand += [domino_set.pop()] return player_hand, computer_hand, domino_set
Assigns 7 domino pieces to each player and returns each players set of tiles and the list of remaining tiles
Assigns 7 domino pieces to each player and returns each players set of tiles and the list of remaining tiles
[ "Assigns", "7", "domino", "pieces", "to", "each", "player", "and", "returns", "each", "players", "set", "of", "tiles", "and", "the", "list", "of", "remaining", "tiles" ]
def assign(domino_set): player_hand = [] computer_hand = [] for _ in range(7): player_hand += [domino_set.pop()] computer_hand += [domino_set.pop()] return player_hand, computer_hand, domino_set
[ "def", "assign", "(", "domino_set", ")", ":", "player_hand", "=", "[", "]", "computer_hand", "=", "[", "]", "for", "_", "in", "range", "(", "7", ")", ":", "player_hand", "+=", "[", "domino_set", ".", "pop", "(", ")", "]", "computer_hand", "+=", "[", "domino_set", ".", "pop", "(", ")", "]", "return", "player_hand", ",", "computer_hand", ",", "domino_set" ]
Assigns 7 domino pieces to each player and returns each players set of tiles and the list of remaining tiles
[ "Assigns", "7", "domino", "pieces", "to", "each", "player", "and", "returns", "each", "players", "set", "of", "tiles", "and", "the", "list", "of", "remaining", "tiles" ]
[ "\"\"\"Assigns 7 domino pieces to each player and returns each players set of tiles and the list of remaining tiles\"\"\"" ]
[ { "param": "domino_set", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "domino_set", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def assign(domino_set): player_hand = [] computer_hand = [] for _ in range(7): player_hand += [domino_set.pop()] computer_hand += [domino_set.pop()] return player_hand, computer_hand, domino_set
186
977
5ed113ded28667bad8971c9f226aa933fb254623
password520/macro_pack
src/common/utils.py
[ "Apache-2.0" ]
Python
checkModuleExist
<not_specific>
def checkModuleExist(name): r"""Returns if a top-level module with :attr:`name` exists *without** importing it. This is generally safer than try-catch block around a `import X`. It avoids third party libraries breaking assumptions of some of our tests, e.g., setting multiprocessing start method when imported (see librosa/#747, torchvision/#544). """ spec = importlib.util.find_spec(name) return spec is not None
r"""Returns if a top-level module with :attr:`name` exists *without** importing it. This is generally safer than try-catch block around a `import X`. It avoids third party libraries breaking assumptions of some of our tests, e.g., setting multiprocessing start method when imported (see librosa/#747, torchvision/#544).
r"""Returns if a top-level module with :attr:`name` exists *without importing it. This is generally safer than try-catch block around a `import X`. It avoids third party libraries breaking assumptions of some of our tests, e.g., setting multiprocessing start method when imported .
[ "r", "\"", "\"", "\"", "Returns", "if", "a", "top", "-", "level", "module", "with", ":", "attr", ":", "`", "name", "`", "exists", "*", "without", "importing", "it", ".", "This", "is", "generally", "safer", "than", "try", "-", "catch", "block", "around", "a", "`", "import", "X", "`", ".", "It", "avoids", "third", "party", "libraries", "breaking", "assumptions", "of", "some", "of", "our", "tests", "e", ".", "g", ".", "setting", "multiprocessing", "start", "method", "when", "imported", "." ]
def checkModuleExist(name): spec = importlib.util.find_spec(name) return spec is not None
[ "def", "checkModuleExist", "(", "name", ")", ":", "spec", "=", "importlib", ".", "util", ".", "find_spec", "(", "name", ")", "return", "spec", "is", "not", "None" ]
r"""Returns if a top-level module with :attr:`name` exists *without importing it.
[ "r", "\"", "\"", "\"", "Returns", "if", "a", "top", "-", "level", "module", "with", ":", "attr", ":", "`", "name", "`", "exists", "*", "without", "importing", "it", "." ]
[ "r\"\"\"Returns if a top-level module with :attr:`name` exists *without**\n importing it. This is generally safer than try-catch block around a\n `import X`. It avoids third party libraries breaking assumptions of some of\n our tests, e.g., setting multiprocessing start method when imported\n (see librosa/#747, torchvision/#544).\n \"\"\"" ]
[ { "param": "name", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import importlib def checkModuleExist(name): spec = importlib.util.find_spec(name) return spec is not None
187
914
8331cdba8be31b794a2c219b4c04c3cf5553dd87
Sandia-OpenSHMEM/sandia-shmem
scripts/generate_manpages.py
[ "BSD-3-Clause-Open-MPI" ]
Python
funcReplacements
<not_specific>
def funcReplacements(tex): """ Convert every FUNC macro with the bolded argument string (.B <argument>). -- Special consideration to any periods after the macro, since a period at the beginning of a line denotes a comment and the entire line will not appear. (.BR <argument> .) """ keyString = "\FUNC{" while (tex.find(keyString) != -1): index = tex.find(keyString) startBrace = tex.find("{", index) endBrace = tex.find("}", startBrace) innerText = tex[startBrace+1:endBrace] if(endBrace + 1 < len(tex) and (tex[endBrace + 1] == "." or tex[endBrace + 1] == ",") or tex[endBrace + 1] == ";"): tex = tex[:index] + "\n" + \ ".BR \"" + innerText + \ "\" " + tex[endBrace + 1] + "\n" + tex[endBrace+2:] else: tex = tex[:index] + "\n" + \ ".B " + innerText + \ "\n" + tex[endBrace+1:] return tex
Convert every FUNC macro with the bolded argument string (.B <argument>). -- Special consideration to any periods after the macro, since a period at the beginning of a line denotes a comment and the entire line will not appear. (.BR <argument> .)
Convert every FUNC macro with the bolded argument string (.B ). Special consideration to any periods after the macro, since a period at the beginning of a line denotes a comment and the entire line will not appear.
[ "Convert", "every", "FUNC", "macro", "with", "the", "bolded", "argument", "string", "(", ".", "B", ")", ".", "Special", "consideration", "to", "any", "periods", "after", "the", "macro", "since", "a", "period", "at", "the", "beginning", "of", "a", "line", "denotes", "a", "comment", "and", "the", "entire", "line", "will", "not", "appear", "." ]
def funcReplacements(tex): keyString = "\FUNC{" while (tex.find(keyString) != -1): index = tex.find(keyString) startBrace = tex.find("{", index) endBrace = tex.find("}", startBrace) innerText = tex[startBrace+1:endBrace] if(endBrace + 1 < len(tex) and (tex[endBrace + 1] == "." or tex[endBrace + 1] == ",") or tex[endBrace + 1] == ";"): tex = tex[:index] + "\n" + \ ".BR \"" + innerText + \ "\" " + tex[endBrace + 1] + "\n" + tex[endBrace+2:] else: tex = tex[:index] + "\n" + \ ".B " + innerText + \ "\n" + tex[endBrace+1:] return tex
[ "def", "funcReplacements", "(", "tex", ")", ":", "keyString", "=", "\"\\FUNC{\"", "while", "(", "tex", ".", "find", "(", "keyString", ")", "!=", "-", "1", ")", ":", "index", "=", "tex", ".", "find", "(", "keyString", ")", "startBrace", "=", "tex", ".", "find", "(", "\"{\"", ",", "index", ")", "endBrace", "=", "tex", ".", "find", "(", "\"}\"", ",", "startBrace", ")", "innerText", "=", "tex", "[", "startBrace", "+", "1", ":", "endBrace", "]", "if", "(", "endBrace", "+", "1", "<", "len", "(", "tex", ")", "and", "(", "tex", "[", "endBrace", "+", "1", "]", "==", "\".\"", "or", "tex", "[", "endBrace", "+", "1", "]", "==", "\",\"", ")", "or", "tex", "[", "endBrace", "+", "1", "]", "==", "\";\"", ")", ":", "tex", "=", "tex", "[", ":", "index", "]", "+", "\"\\n\"", "+", "\".BR \\\"\"", "+", "innerText", "+", "\"\\\" \"", "+", "tex", "[", "endBrace", "+", "1", "]", "+", "\"\\n\"", "+", "tex", "[", "endBrace", "+", "2", ":", "]", "else", ":", "tex", "=", "tex", "[", ":", "index", "]", "+", "\"\\n\"", "+", "\".B \"", "+", "innerText", "+", "\"\\n\"", "+", "tex", "[", "endBrace", "+", "1", ":", "]", "return", "tex" ]
Convert every FUNC macro with the bolded argument string (.B <argument>).
[ "Convert", "every", "FUNC", "macro", "with", "the", "bolded", "argument", "string", "(", ".", "B", "<argument", ">", ")", "." ]
[ "\"\"\"\n Convert every FUNC macro with the bolded argument string \n (.B <argument>).\n -- Special consideration to any periods after the macro, since a \n period at the beginning of a line denotes a comment and the \n entire line will not appear.\n (.BR <argument> .)\n \"\"\"" ]
[ { "param": "tex", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "tex", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def funcReplacements(tex): keyString = "\FUNC{" while (tex.find(keyString) != -1): index = tex.find(keyString) startBrace = tex.find("{", index) endBrace = tex.find("}", startBrace) innerText = tex[startBrace+1:endBrace] if(endBrace + 1 < len(tex) and (tex[endBrace + 1] == "." or tex[endBrace + 1] == ",") or tex[endBrace + 1] == ";"): tex = tex[:index] + "\n" + \ ".BR \"" + innerText + \ "\" " + tex[endBrace + 1] + "\n" + tex[endBrace+2:] else: tex = tex[:index] + "\n" + \ ".B " + innerText + \ "\n" + tex[endBrace+1:] return tex
188
1,005
8d011bf1ee203ac028187f1f23461d026dcbdeff
bpsinc-native/src_third_party_chromite
cbuildbot/lkgm_manager.py
[ "BSD-3-Clause" ]
Python
VersionCompare
<not_specific>
def VersionCompare(cls, version_string): """Useful method to return a comparable version of a LKGM string.""" lkgm = cls(version_string) return map(int, [lkgm.build_number, lkgm.branch_build_number, lkgm.patch_number, lkgm.revision_number])
Useful method to return a comparable version of a LKGM string.
Useful method to return a comparable version of a LKGM string.
[ "Useful", "method", "to", "return", "a", "comparable", "version", "of", "a", "LKGM", "string", "." ]
def VersionCompare(cls, version_string): lkgm = cls(version_string) return map(int, [lkgm.build_number, lkgm.branch_build_number, lkgm.patch_number, lkgm.revision_number])
[ "def", "VersionCompare", "(", "cls", ",", "version_string", ")", ":", "lkgm", "=", "cls", "(", "version_string", ")", "return", "map", "(", "int", ",", "[", "lkgm", ".", "build_number", ",", "lkgm", ".", "branch_build_number", ",", "lkgm", ".", "patch_number", ",", "lkgm", ".", "revision_number", "]", ")" ]
Useful method to return a comparable version of a LKGM string.
[ "Useful", "method", "to", "return", "a", "comparable", "version", "of", "a", "LKGM", "string", "." ]
[ "\"\"\"Useful method to return a comparable version of a LKGM string.\"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "version_string", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "version_string", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def VersionCompare(cls, version_string): lkgm = cls(version_string) return map(int, [lkgm.build_number, lkgm.branch_build_number, lkgm.patch_number, lkgm.revision_number])
190
274
401f0cfdd4d07937015de2f31a029649edea8015
Lovely-XPP/tkzgeom
src/ConnectSignal/Lambda.py
[ "MIT" ]
Python
connect_lineedit_abstract
null
def connect_lineedit_abstract(scene, properties_list, dict_key, lineedit): """Be an abstract lineEdit callback function.""" ids = scene.list_focus_ids if not lineedit.hasFocus(): for id_ in ids: final_property = scene.project_data.items[id_].item for prop in properties_list: final_property = final_property[prop] final_property[dict_key] = lineedit.text() scene.edit.add_undo_item(scene) else: scene.ui.listWidget.setFocus(True)
Be an abstract lineEdit callback function.
Be an abstract lineEdit callback function.
[ "Be", "an", "abstract", "lineEdit", "callback", "function", "." ]
def connect_lineedit_abstract(scene, properties_list, dict_key, lineedit): ids = scene.list_focus_ids if not lineedit.hasFocus(): for id_ in ids: final_property = scene.project_data.items[id_].item for prop in properties_list: final_property = final_property[prop] final_property[dict_key] = lineedit.text() scene.edit.add_undo_item(scene) else: scene.ui.listWidget.setFocus(True)
[ "def", "connect_lineedit_abstract", "(", "scene", ",", "properties_list", ",", "dict_key", ",", "lineedit", ")", ":", "ids", "=", "scene", ".", "list_focus_ids", "if", "not", "lineedit", ".", "hasFocus", "(", ")", ":", "for", "id_", "in", "ids", ":", "final_property", "=", "scene", ".", "project_data", ".", "items", "[", "id_", "]", ".", "item", "for", "prop", "in", "properties_list", ":", "final_property", "=", "final_property", "[", "prop", "]", "final_property", "[", "dict_key", "]", "=", "lineedit", ".", "text", "(", ")", "scene", ".", "edit", ".", "add_undo_item", "(", "scene", ")", "else", ":", "scene", ".", "ui", ".", "listWidget", ".", "setFocus", "(", "True", ")" ]
Be an abstract lineEdit callback function.
[ "Be", "an", "abstract", "lineEdit", "callback", "function", "." ]
[ "\"\"\"Be an abstract lineEdit callback function.\"\"\"" ]
[ { "param": "scene", "type": null }, { "param": "properties_list", "type": null }, { "param": "dict_key", "type": null }, { "param": "lineedit", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "scene", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "properties_list", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "dict_key", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "lineedit", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def connect_lineedit_abstract(scene, properties_list, dict_key, lineedit): ids = scene.list_focus_ids if not lineedit.hasFocus(): for id_ in ids: final_property = scene.project_data.items[id_].item for prop in properties_list: final_property = final_property[prop] final_property[dict_key] = lineedit.text() scene.edit.add_undo_item(scene) else: scene.ui.listWidget.setFocus(True)
191
417
8dc9f4a1bcd36a48d92389f10a73fbb5256a500a
nhasbun/ds3231_test_tool
time_sync.py
[ "MIT" ]
Python
_sync_time
null
def _sync_time(_serial): """ Activates programming mode on arduino and sync time between machine and ds3231 rtc. """ current_date = datetime.now() year = current_date.year % 100 # extract 2 last digits month = current_date.month day = current_date.day hour = current_date.hour minute = current_date.minute second = current_date.second _serial.write(b'p') # Enabling programming mode # Formatting data data = f"{year},{month},{day},{hour},{minute},{second}" data_b = data.encode('ascii') _serial.write(data_b)
Activates programming mode on arduino and sync time between machine and ds3231 rtc.
Activates programming mode on arduino and sync time between machine and ds3231 rtc.
[ "Activates", "programming", "mode", "on", "arduino", "and", "sync", "time", "between", "machine", "and", "ds3231", "rtc", "." ]
def _sync_time(_serial): current_date = datetime.now() year = current_date.year % 100 month = current_date.month day = current_date.day hour = current_date.hour minute = current_date.minute second = current_date.second _serial.write(b'p') data = f"{year},{month},{day},{hour},{minute},{second}" data_b = data.encode('ascii') _serial.write(data_b)
[ "def", "_sync_time", "(", "_serial", ")", ":", "current_date", "=", "datetime", ".", "now", "(", ")", "year", "=", "current_date", ".", "year", "%", "100", "month", "=", "current_date", ".", "month", "day", "=", "current_date", ".", "day", "hour", "=", "current_date", ".", "hour", "minute", "=", "current_date", ".", "minute", "second", "=", "current_date", ".", "second", "_serial", ".", "write", "(", "b'p'", ")", "data", "=", "f\"{year},{month},{day},{hour},{minute},{second}\"", "data_b", "=", "data", ".", "encode", "(", "'ascii'", ")", "_serial", ".", "write", "(", "data_b", ")" ]
Activates programming mode on arduino and sync time between machine and ds3231 rtc.
[ "Activates", "programming", "mode", "on", "arduino", "and", "sync", "time", "between", "machine", "and", "ds3231", "rtc", "." ]
[ "\"\"\"\n Activates programming mode on arduino and sync time between machine and\n ds3231 rtc.\n \"\"\"", "# extract 2 last digits", "# Enabling programming mode", "# Formatting data" ]
[ { "param": "_serial", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "_serial", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import datetime def _sync_time(_serial): current_date = datetime.now() year = current_date.year % 100 month = current_date.month day = current_date.day hour = current_date.hour minute = current_date.minute second = current_date.second _serial.write(b'p') data = f"{year},{month},{day},{hour},{minute},{second}" data_b = data.encode('ascii') _serial.write(data_b)
192
809
9f20e621b3d55a1272e24858daf2018cf38bc79b
bendichter/brainrender
brainrender/atlases/custom_atlases/insects_brains_db.py
[ "MIT" ]
Python
add_descendants_to_tree
<not_specific>
def add_descendants_to_tree(tree, structure, parent_id=None): """ Recursively goes through all the the descendants of a region and adds them to the tree """ if parent_id is not None: tree.create_node( tag=structure["name"], identifier=structure["id"], parent=parent_id, ) else: tree.create_node( tag=structure["name"], identifier=structure["id"], ) if "children" not in structure.keys(): return if structure["children"]: for child in structure["children"]: add_descendants_to_tree(tree, child, structure["id"])
Recursively goes through all the the descendants of a region and adds them to the tree
Recursively goes through all the the descendants of a region and adds them to the tree
[ "Recursively", "goes", "through", "all", "the", "the", "descendants", "of", "a", "region", "and", "adds", "them", "to", "the", "tree" ]
def add_descendants_to_tree(tree, structure, parent_id=None): if parent_id is not None: tree.create_node( tag=structure["name"], identifier=structure["id"], parent=parent_id, ) else: tree.create_node( tag=structure["name"], identifier=structure["id"], ) if "children" not in structure.keys(): return if structure["children"]: for child in structure["children"]: add_descendants_to_tree(tree, child, structure["id"])
[ "def", "add_descendants_to_tree", "(", "tree", ",", "structure", ",", "parent_id", "=", "None", ")", ":", "if", "parent_id", "is", "not", "None", ":", "tree", ".", "create_node", "(", "tag", "=", "structure", "[", "\"name\"", "]", ",", "identifier", "=", "structure", "[", "\"id\"", "]", ",", "parent", "=", "parent_id", ",", ")", "else", ":", "tree", ".", "create_node", "(", "tag", "=", "structure", "[", "\"name\"", "]", ",", "identifier", "=", "structure", "[", "\"id\"", "]", ",", ")", "if", "\"children\"", "not", "in", "structure", ".", "keys", "(", ")", ":", "return", "if", "structure", "[", "\"children\"", "]", ":", "for", "child", "in", "structure", "[", "\"children\"", "]", ":", "add_descendants_to_tree", "(", "tree", ",", "child", ",", "structure", "[", "\"id\"", "]", ")" ]
Recursively goes through all the the descendants of a region and adds them to the tree
[ "Recursively", "goes", "through", "all", "the", "the", "descendants", "of", "a", "region", "and", "adds", "them", "to", "the", "tree" ]
[ "\"\"\"\n Recursively goes through all the the descendants of a region and adds them to the tree\n \"\"\"" ]
[ { "param": "tree", "type": null }, { "param": "structure", "type": null }, { "param": "parent_id", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "tree", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "structure", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "parent_id", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def add_descendants_to_tree(tree, structure, parent_id=None): if parent_id is not None: tree.create_node( tag=structure["name"], identifier=structure["id"], parent=parent_id, ) else: tree.create_node( tag=structure["name"], identifier=structure["id"], ) if "children" not in structure.keys(): return if structure["children"]: for child in structure["children"]: add_descendants_to_tree(tree, child, structure["id"])
194
517
a3532fa0fceeb8c1d1988dcc10ce38982fc43414
aiventures/phpbb_scraper
soup_converter.py
[ "MIT" ]
Python
convert_datetime
<not_specific>
def convert_datetime(date_string, date_format='%A %d. %B %Y, %H:%M'): """converts date string of format WEEKDAY DD. MONTH YYYY, HH:MM into date format""" try: # Convert Dates of sort Sonntag 9. Juni 2019, 09:00 as date format date_obj = datetime.strptime(date_string.strip(), date_format) except: date_obj = datetime(1900, 1, 1) print(traceback.format_exc()) return date_obj
converts date string of format WEEKDAY DD. MONTH YYYY, HH:MM into date format
converts date string of format WEEKDAY DD. MONTH YYYY, HH:MM into date format
[ "converts", "date", "string", "of", "format", "WEEKDAY", "DD", ".", "MONTH", "YYYY", "HH", ":", "MM", "into", "date", "format" ]
def convert_datetime(date_string, date_format='%A %d. %B %Y, %H:%M'): try: date_obj = datetime.strptime(date_string.strip(), date_format) except: date_obj = datetime(1900, 1, 1) print(traceback.format_exc()) return date_obj
[ "def", "convert_datetime", "(", "date_string", ",", "date_format", "=", "'%A %d. %B %Y, %H:%M'", ")", ":", "try", ":", "date_obj", "=", "datetime", ".", "strptime", "(", "date_string", ".", "strip", "(", ")", ",", "date_format", ")", "except", ":", "date_obj", "=", "datetime", "(", "1900", ",", "1", ",", "1", ")", "print", "(", "traceback", ".", "format_exc", "(", ")", ")", "return", "date_obj" ]
converts date string of format WEEKDAY DD.
[ "converts", "date", "string", "of", "format", "WEEKDAY", "DD", "." ]
[ "\"\"\"converts date string of format WEEKDAY DD. MONTH YYYY, HH:MM into date format\"\"\"", "# Convert Dates of sort Sonntag 9. Juni 2019, 09:00 as date format" ]
[ { "param": "date_string", "type": null }, { "param": "date_format", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "date_string", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "date_format", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import traceback import datetime def convert_datetime(date_string, date_format='%A %d. %B %Y, %H:%M'): try: date_obj = datetime.strptime(date_string.strip(), date_format) except: date_obj = datetime(1900, 1, 1) print(traceback.format_exc()) return date_obj
195
205
16217ec741e174fa5a05bb07cc2d4a68998b183d
clamsproject/clams-sdk-python
clams/restify/__init__.py
[ "Apache-2.0" ]
Python
bool_param
<not_specific>
def bool_param(value): """ Helper function to convert string values to bool type. """ return False if value in (False, 0, 'False', 'false', '0') else True
Helper function to convert string values to bool type.
Helper function to convert string values to bool type.
[ "Helper", "function", "to", "convert", "string", "values", "to", "bool", "type", "." ]
def bool_param(value): return False if value in (False, 0, 'False', 'false', '0') else True
[ "def", "bool_param", "(", "value", ")", ":", "return", "False", "if", "value", "in", "(", "False", ",", "0", ",", "'False'", ",", "'false'", ",", "'0'", ")", "else", "True" ]
Helper function to convert string values to bool type.
[ "Helper", "function", "to", "convert", "string", "values", "to", "bool", "type", "." ]
[ "\"\"\"\n Helper function to convert string values to bool type.\n \"\"\"" ]
[ { "param": "value", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "value", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def bool_param(value): return False if value in (False, 0, 'False', 'false', '0') else True
196
1,006
752df89308491fcefb2c471834f66aebfdb1aa75
Nithinraot/pythontox
stringz/__init__.py
[ "MIT" ]
Python
count_characters
<not_specific>
def count_characters(string): """Return the amount of characters in the string.""" if type(string) is not str: raise ValueError("This is not a string") return len(string)
Return the amount of characters in the string.
Return the amount of characters in the string.
[ "Return", "the", "amount", "of", "characters", "in", "the", "string", "." ]
def count_characters(string): if type(string) is not str: raise ValueError("This is not a string") return len(string)
[ "def", "count_characters", "(", "string", ")", ":", "if", "type", "(", "string", ")", "is", "not", "str", ":", "raise", "ValueError", "(", "\"This is not a string\"", ")", "return", "len", "(", "string", ")" ]
Return the amount of characters in the string.
[ "Return", "the", "amount", "of", "characters", "in", "the", "string", "." ]
[ "\"\"\"Return the amount of characters in the string.\"\"\"" ]
[ { "param": "string", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "string", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def count_characters(string): if type(string) is not str: raise ValueError("This is not a string") return len(string)
197
137
0ca358a0d1aeb186b33f989fdcce523ec2a72ae3
damogranlabs/dlpt
dlpt/json.py
[ "MIT" ]
Python
write
null
def write(data: Dict[str, Any], fPath: str, indent: int = 2, sortKeys: bool = True, *args): """ Write given data to a file in a JSON format. Args: data: serializable object to store to a file in JSON format. fPath: destination file path. indent: number of spaces to use while building file line indentation. sortKeys: if True, data keys are sorted alphabetically, else left unchanged. *args: `json.dump()` additional arguments. """ with open(fPath, 'w+') as fHandler: json.dump(data, fHandler, sort_keys=sortKeys, indent=indent)
Write given data to a file in a JSON format. Args: data: serializable object to store to a file in JSON format. fPath: destination file path. indent: number of spaces to use while building file line indentation. sortKeys: if True, data keys are sorted alphabetically, else left unchanged. *args: `json.dump()` additional arguments.
Write given data to a file in a JSON format.
[ "Write", "given", "data", "to", "a", "file", "in", "a", "JSON", "format", "." ]
def write(data: Dict[str, Any], fPath: str, indent: int = 2, sortKeys: bool = True, *args): with open(fPath, 'w+') as fHandler: json.dump(data, fHandler, sort_keys=sortKeys, indent=indent)
[ "def", "write", "(", "data", ":", "Dict", "[", "str", ",", "Any", "]", ",", "fPath", ":", "str", ",", "indent", ":", "int", "=", "2", ",", "sortKeys", ":", "bool", "=", "True", ",", "*", "args", ")", ":", "with", "open", "(", "fPath", ",", "'w+'", ")", "as", "fHandler", ":", "json", ".", "dump", "(", "data", ",", "fHandler", ",", "sort_keys", "=", "sortKeys", ",", "indent", "=", "indent", ")" ]
Write given data to a file in a JSON format.
[ "Write", "given", "data", "to", "a", "file", "in", "a", "JSON", "format", "." ]
[ "\"\"\" Write given data to a file in a JSON format.\n\n Args:\n data: serializable object to store to a file in JSON format. \n fPath: destination file path.\n indent: number of spaces to use while building file line indentation.\n sortKeys: if True, data keys are sorted alphabetically, else \n left unchanged.\n *args: `json.dump()` additional arguments.\n \"\"\"" ]
[ { "param": "data", "type": "Dict[str, Any]" }, { "param": "fPath", "type": "str" }, { "param": "indent", "type": "int" }, { "param": "sortKeys", "type": "bool" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "data", "type": "Dict[str, Any]", "docstring": "serializable object to store to a file in JSON format.", "docstring_tokens": [ "serializable", "object", "to", "store", "to", "a", "file", "in", "JSON", "format", "." ], "default": null, "is_optional": null }, { "identifier": "fPath", "type": "str", "docstring": "destination file path.", "docstring_tokens": [ "destination", "file", "path", "." ], "default": null, "is_optional": null }, { "identifier": "indent", "type": "int", "docstring": "number of spaces to use while building file line indentation.", "docstring_tokens": [ "number", "of", "spaces", "to", "use", "while", "building", "file", "line", "indentation", "." ], "default": null, "is_optional": null }, { "identifier": "sortKeys", "type": "bool", "docstring": "if True, data keys are sorted alphabetically, else\nleft unchanged.", "docstring_tokens": [ "if", "True", "data", "keys", "are", "sorted", "alphabetically", "else", "left", "unchanged", "." ], "default": null, "is_optional": null } ], "outlier_params": [ { "identifier": "*args", "type": null, "docstring": "`json.dump()` additional arguments.", "docstring_tokens": [ "`", "json", ".", "dump", "()", "`", "additional", "arguments", "." ], "default": null, "is_optional": null } ], "others": [] }
import json def write(data: Dict[str, Any], fPath: str, indent: int = 2, sortKeys: bool = True, *args): with open(fPath, 'w+') as fHandler: json.dump(data, fHandler, sort_keys=sortKeys, indent=indent)
198
357
a3b9a1b0f3ac451151395b039d12173cc6c0bbd1
rootasjey/oc-py-p4
views/tournament/add_player.py
[ "MIT" ]
Python
show_single_player
null
def show_single_player(player): """Format & display a single player to the console.""" print("------") print( f"• First name: {player.first_name}\n• Last name: {player.last_name}\n• Elo: {player.elo}\n• Sex: {player.sex}\n• Birth date: {player.birth_date}" ) print("------") print("")
Format & display a single player to the console.
Format & display a single player to the console.
[ "Format", "&", "display", "a", "single", "player", "to", "the", "console", "." ]
def show_single_player(player): print("------") print( f"• First name: {player.first_name}\n• Last name: {player.last_name}\n• Elo: {player.elo}\n• Sex: {player.sex}\n• Birth date: {player.birth_date}" ) print("------") print("")
[ "def", "show_single_player", "(", "player", ")", ":", "print", "(", "\"------\"", ")", "print", "(", "f\"• First name: {player.first_name}\\n• Last name: {player.last_name}\\n• Elo: {player.elo}\\n• Sex: {player.sex}\\n• Birth date: {player.birth_date}\"", ")", "print", "(", "\"------\"", ")", "print", "(", "\"\"", ")" ]
Format & display a single player to the console.
[ "Format", "&", "display", "a", "single", "player", "to", "the", "console", "." ]
[ "\"\"\"Format & display a single player to the console.\"\"\"" ]
[ { "param": "player", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "player", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def show_single_player(player): print("------") print( f"• First name: {player.first_name}\n• Last name: {player.last_name}\n• Elo: {player.elo}\n• Sex: {player.sex}\n• Birth date: {player.birth_date}" ) print("------") print("")
199
307
728d8a694d4b21c4c1082be2cfaa19ea53ea9f5d
Redmar-van-den-Berg/HiFi-assembly
scripts/pyBlast.py
[ "MIT" ]
Python
_minmax
<not_specific>
def _minmax(*args): """ Return the min and max of the input arguments """ min_ = min(*args) max_ = max(*args) return(min_, max_)
Return the min and max of the input arguments
Return the min and max of the input arguments
[ "Return", "the", "min", "and", "max", "of", "the", "input", "arguments" ]
def _minmax(*args): min_ = min(*args) max_ = max(*args) return(min_, max_)
[ "def", "_minmax", "(", "*", "args", ")", ":", "min_", "=", "min", "(", "*", "args", ")", "max_", "=", "max", "(", "*", "args", ")", "return", "(", "min_", ",", "max_", ")" ]
Return the min and max of the input arguments
[ "Return", "the", "min", "and", "max", "of", "the", "input", "arguments" ]
[ "\"\"\" Return the min and max of the input arguments \"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
def _minmax(*args): min_ = min(*args) max_ = max(*args) return(min_, max_)
200
836
d97d793f7717c0cc65c00e8e10e88d008b072e96
tjmoran/Formation
hoverset/util/execution.py
[ "MIT" ]
Python
as_thread
<not_specific>
def as_thread(func): """ Run the function in a separate thread :param func: the function to be executed in a separate thread :return: wrapped function """ @functools.wraps(func) def wrap(*args, **kwargs): threading.Thread(target=func, args=args, kwargs=kwargs).start() return wrap
Run the function in a separate thread :param func: the function to be executed in a separate thread :return: wrapped function
Run the function in a separate thread
[ "Run", "the", "function", "in", "a", "separate", "thread" ]
def as_thread(func): @functools.wraps(func) def wrap(*args, **kwargs): threading.Thread(target=func, args=args, kwargs=kwargs).start() return wrap
[ "def", "as_thread", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrap", "(", "*", "args", ",", "**", "kwargs", ")", ":", "threading", ".", "Thread", "(", "target", "=", "func", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ")", ".", "start", "(", ")", "return", "wrap" ]
Run the function in a separate thread
[ "Run", "the", "function", "in", "a", "separate", "thread" ]
[ "\"\"\"\n Run the function in a separate thread\n :param func: the function to be executed in a separate thread\n :return: wrapped function\n \"\"\"" ]
[ { "param": "func", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "func", "type": null, "docstring": "the function to be executed in a separate thread", "docstring_tokens": [ "the", "function", "to", "be", "executed", "in", "a", "separate", "thread" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import functools import threading def as_thread(func): @functools.wraps(func) def wrap(*args, **kwargs): threading.Thread(target=func, args=args, kwargs=kwargs).start() return wrap
201
183
b11278a9c7dfa20e279b3cf0b8f08bfee67dbab2
agdsn/sipa
sipa/mail.py
[ "MIT" ]
Python
compose_body
<not_specific>
def compose_body(message: str, header: Optional[Dict[str, Any]] = None): """Prepend additional information to a message. :param message: :param header: Dict of the additional "key: value" entries to be prepended to the mail. :returns: The composed body """ if not header: return message serialized_header = "\n".join(f"{k}: {v}" for k, v in header.items()) return f"{serialized_header}\n\n{message}"
Prepend additional information to a message. :param message: :param header: Dict of the additional "key: value" entries to be prepended to the mail. :returns: The composed body
Prepend additional information to a message.
[ "Prepend", "additional", "information", "to", "a", "message", "." ]
def compose_body(message: str, header: Optional[Dict[str, Any]] = None): if not header: return message serialized_header = "\n".join(f"{k}: {v}" for k, v in header.items()) return f"{serialized_header}\n\n{message}"
[ "def", "compose_body", "(", "message", ":", "str", ",", "header", ":", "Optional", "[", "Dict", "[", "str", ",", "Any", "]", "]", "=", "None", ")", ":", "if", "not", "header", ":", "return", "message", "serialized_header", "=", "\"\\n\"", ".", "join", "(", "f\"{k}: {v}\"", "for", "k", ",", "v", "in", "header", ".", "items", "(", ")", ")", "return", "f\"{serialized_header}\\n\\n{message}\"" ]
Prepend additional information to a message.
[ "Prepend", "additional", "information", "to", "a", "message", "." ]
[ "\"\"\"Prepend additional information to a message.\n\n :param message:\n :param header: Dict of the additional \"key: value\"\n entries to be prepended to the mail.\n\n :returns: The composed body\n \"\"\"" ]
[ { "param": "message", "type": "str" }, { "param": "header", "type": "Optional[Dict[str, Any]]" } ]
{ "returns": [ { "docstring": "The composed body", "docstring_tokens": [ "The", "composed", "body" ], "type": null } ], "raises": [], "params": [ { "identifier": "message", "type": "str", "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "header", "type": "Optional[Dict[str, Any]]", "docstring": "Dict of the additional \"key: value\"\nentries to be prepended to the mail.", "docstring_tokens": [ "Dict", "of", "the", "additional", "\"", "key", ":", "value", "\"", "entries", "to", "be", "prepended", "to", "the", "mail", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def compose_body(message: str, header: Optional[Dict[str, Any]] = None): if not header: return message serialized_header = "\n".join(f"{k}: {v}" for k, v in header.items()) return f"{serialized_header}\n\n{message}"
202
307
f98828ae4464d7a478c76136008a0318867c8888
jdswalker/Advent-of-Code-2015
advent_of_code/solvers/day_16.py
[ "MIT" ]
Python
_get_aunts_with_detail_gt
<not_specific>
def _get_aunts_with_detail_gt(aunts, detail, target): """Get aunts without the detail or with a value greater than the target Args: aunts (dict): Stores remembered details about each Aunt Sue detail (str): Check aunts based on this remembered detail target (int): Lower limit for the detail value of the correct Aunt Returns: dict: Aunts without the detail or with a detail value > target """ return { aunt: memory for aunt, memory in aunts.items() if detail not in memory or memory[detail] > target }
Get aunts without the detail or with a value greater than the target Args: aunts (dict): Stores remembered details about each Aunt Sue detail (str): Check aunts based on this remembered detail target (int): Lower limit for the detail value of the correct Aunt Returns: dict: Aunts without the detail or with a detail value > target
Get aunts without the detail or with a value greater than the target
[ "Get", "aunts", "without", "the", "detail", "or", "with", "a", "value", "greater", "than", "the", "target" ]
def _get_aunts_with_detail_gt(aunts, detail, target): return { aunt: memory for aunt, memory in aunts.items() if detail not in memory or memory[detail] > target }
[ "def", "_get_aunts_with_detail_gt", "(", "aunts", ",", "detail", ",", "target", ")", ":", "return", "{", "aunt", ":", "memory", "for", "aunt", ",", "memory", "in", "aunts", ".", "items", "(", ")", "if", "detail", "not", "in", "memory", "or", "memory", "[", "detail", "]", ">", "target", "}" ]
Get aunts without the detail or with a value greater than the target
[ "Get", "aunts", "without", "the", "detail", "or", "with", "a", "value", "greater", "than", "the", "target" ]
[ "\"\"\"Get aunts without the detail or with a value greater than the target\n\n Args:\n aunts (dict): Stores remembered details about each Aunt Sue\n detail (str): Check aunts based on this remembered detail\n target (int): Lower limit for the detail value of the correct Aunt\n Returns:\n dict: Aunts without the detail or with a detail value > target\n \"\"\"" ]
[ { "param": "aunts", "type": null }, { "param": "detail", "type": null }, { "param": "target", "type": null } ]
{ "returns": [ { "docstring": "Aunts without the detail or with a detail value > target", "docstring_tokens": [ "Aunts", "without", "the", "detail", "or", "with", "a", "detail", "value", ">", "target" ], "type": "dict" } ], "raises": [], "params": [ { "identifier": "aunts", "type": null, "docstring": "Stores remembered details about each Aunt Sue", "docstring_tokens": [ "Stores", "remembered", "details", "about", "each", "Aunt", "Sue" ], "default": null, "is_optional": false }, { "identifier": "detail", "type": null, "docstring": "Check aunts based on this remembered detail", "docstring_tokens": [ "Check", "aunts", "based", "on", "this", "remembered", "detail" ], "default": null, "is_optional": false }, { "identifier": "target", "type": null, "docstring": "Lower limit for the detail value of the correct Aunt", "docstring_tokens": [ "Lower", "limit", "for", "the", "detail", "value", "of", "the", "correct", "Aunt" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def _get_aunts_with_detail_gt(aunts, detail, target): return { aunt: memory for aunt, memory in aunts.items() if detail not in memory or memory[detail] > target }
203
793
b02e42e8ee7aecd0a6eb07e3eadb33d854800223
markkvdb/pyscihub
pyscihub/cli.py
[ "MIT" ]
Python
cli
null
def cli(ctx, output, verbose): """CLI to download PDFs from Sci-Hub.""" ctx.ensure_object(dict) ctx.obj["OUTPUT"] = output ctx.obj["VERBOSE"] = verbose
CLI to download PDFs from Sci-Hub.
CLI to download PDFs from Sci-Hub.
[ "CLI", "to", "download", "PDFs", "from", "Sci", "-", "Hub", "." ]
def cli(ctx, output, verbose): ctx.ensure_object(dict) ctx.obj["OUTPUT"] = output ctx.obj["VERBOSE"] = verbose
[ "def", "cli", "(", "ctx", ",", "output", ",", "verbose", ")", ":", "ctx", ".", "ensure_object", "(", "dict", ")", "ctx", ".", "obj", "[", "\"OUTPUT\"", "]", "=", "output", "ctx", ".", "obj", "[", "\"VERBOSE\"", "]", "=", "verbose" ]
CLI to download PDFs from Sci-Hub.
[ "CLI", "to", "download", "PDFs", "from", "Sci", "-", "Hub", "." ]
[ "\"\"\"CLI to download PDFs from Sci-Hub.\"\"\"" ]
[ { "param": "ctx", "type": null }, { "param": "output", "type": null }, { "param": "verbose", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "ctx", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "output", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "verbose", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def cli(ctx, output, verbose): ctx.ensure_object(dict) ctx.obj["OUTPUT"] = output ctx.obj["VERBOSE"] = verbose
204
1,016
ad282837bde39ae6a8e7a4a227789f7d4566e59a
qkrguswn2401/dstc8-meta-dialog
mldc/preprocessing/featurizer.py
[ "MIT" ]
Python
no_progress
<not_specific>
def no_progress(): """ determines if we want to see progressbars in the output do not show progress bars if: - if we aren't on an interactive terminal or - the user wants verbose logging """ return False return not sys.stdout.isatty()
determines if we want to see progressbars in the output do not show progress bars if: - if we aren't on an interactive terminal or - the user wants verbose logging
determines if we want to see progressbars in the output do not show progress bars if: if we aren't on an interactive terminal or the user wants verbose logging
[ "determines", "if", "we", "want", "to", "see", "progressbars", "in", "the", "output", "do", "not", "show", "progress", "bars", "if", ":", "if", "we", "aren", "'", "t", "on", "an", "interactive", "terminal", "or", "the", "user", "wants", "verbose", "logging" ]
def no_progress(): return False return not sys.stdout.isatty()
[ "def", "no_progress", "(", ")", ":", "return", "False", "return", "not", "sys", ".", "stdout", ".", "isatty", "(", ")" ]
determines if we want to see progressbars in the output do not show progress bars if: if we aren't on an interactive terminal or the user wants verbose logging
[ "determines", "if", "we", "want", "to", "see", "progressbars", "in", "the", "output", "do", "not", "show", "progress", "bars", "if", ":", "if", "we", "aren", "'", "t", "on", "an", "interactive", "terminal", "or", "the", "user", "wants", "verbose", "logging" ]
[ "\"\"\"\n determines if we want to see progressbars in the output\n\n do not show progress bars if:\n - if we aren't on an interactive terminal or\n - the user wants verbose logging\n \"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
import sys def no_progress(): return False return not sys.stdout.isatty()
205
293
7f20e655ce92a0699469b8c404222188dbd6490c
Mati86/sonic-mgmt
tests/common/ixia/qos_fixtures.py
[ "Apache-2.0" ]
Python
prio_dscp_map
<not_specific>
def prio_dscp_map(duthosts, rand_one_dut_hostname): """ This fixture reads the QOS parameters from SONiC DUT, and creates priority Vs. DSCP priority port map Args: duthosts (pytest fixture) : list of DUTs rand_one_dut_hostname (pytest fixture): DUT hostname Returns: Priority vs. DSCP map (dictionary, key = priority). Example: {0: [0], 1: [1], 2: [2], 3: [3], 4: [4] ....} """ duthost = duthosts[rand_one_dut_hostname] config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] if "DSCP_TO_TC_MAP" not in config_facts.keys(): return None dscp_to_tc_map_lists = config_facts["DSCP_TO_TC_MAP"] if len(dscp_to_tc_map_lists) != 1: return None profile = dscp_to_tc_map_lists.keys()[0] dscp_to_tc_map = dscp_to_tc_map_lists[profile] result = {} for dscp in dscp_to_tc_map: tc = int(dscp_to_tc_map[dscp]) result.setdefault(tc, []).append(int(dscp)) return result
This fixture reads the QOS parameters from SONiC DUT, and creates priority Vs. DSCP priority port map Args: duthosts (pytest fixture) : list of DUTs rand_one_dut_hostname (pytest fixture): DUT hostname Returns: Priority vs. DSCP map (dictionary, key = priority). Example: {0: [0], 1: [1], 2: [2], 3: [3], 4: [4] ....}
This fixture reads the QOS parameters from SONiC DUT, and creates priority Vs. DSCP priority port map
[ "This", "fixture", "reads", "the", "QOS", "parameters", "from", "SONiC", "DUT", "and", "creates", "priority", "Vs", ".", "DSCP", "priority", "port", "map" ]
def prio_dscp_map(duthosts, rand_one_dut_hostname): duthost = duthosts[rand_one_dut_hostname] config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] if "DSCP_TO_TC_MAP" not in config_facts.keys(): return None dscp_to_tc_map_lists = config_facts["DSCP_TO_TC_MAP"] if len(dscp_to_tc_map_lists) != 1: return None profile = dscp_to_tc_map_lists.keys()[0] dscp_to_tc_map = dscp_to_tc_map_lists[profile] result = {} for dscp in dscp_to_tc_map: tc = int(dscp_to_tc_map[dscp]) result.setdefault(tc, []).append(int(dscp)) return result
[ "def", "prio_dscp_map", "(", "duthosts", ",", "rand_one_dut_hostname", ")", ":", "duthost", "=", "duthosts", "[", "rand_one_dut_hostname", "]", "config_facts", "=", "duthost", ".", "config_facts", "(", "host", "=", "duthost", ".", "hostname", ",", "source", "=", "\"running\"", ")", "[", "'ansible_facts'", "]", "if", "\"DSCP_TO_TC_MAP\"", "not", "in", "config_facts", ".", "keys", "(", ")", ":", "return", "None", "dscp_to_tc_map_lists", "=", "config_facts", "[", "\"DSCP_TO_TC_MAP\"", "]", "if", "len", "(", "dscp_to_tc_map_lists", ")", "!=", "1", ":", "return", "None", "profile", "=", "dscp_to_tc_map_lists", ".", "keys", "(", ")", "[", "0", "]", "dscp_to_tc_map", "=", "dscp_to_tc_map_lists", "[", "profile", "]", "result", "=", "{", "}", "for", "dscp", "in", "dscp_to_tc_map", ":", "tc", "=", "int", "(", "dscp_to_tc_map", "[", "dscp", "]", ")", "result", ".", "setdefault", "(", "tc", ",", "[", "]", ")", ".", "append", "(", "int", "(", "dscp", ")", ")", "return", "result" ]
This fixture reads the QOS parameters from SONiC DUT, and creates priority Vs. DSCP priority port map
[ "This", "fixture", "reads", "the", "QOS", "parameters", "from", "SONiC", "DUT", "and", "creates", "priority", "Vs", ".", "DSCP", "priority", "port", "map" ]
[ "\"\"\"\n This fixture reads the QOS parameters from SONiC DUT, and creates\n priority Vs. DSCP priority port map\n\n Args:\n duthosts (pytest fixture) : list of DUTs\n rand_one_dut_hostname (pytest fixture): DUT hostname\n\n Returns:\n Priority vs. DSCP map (dictionary, key = priority).\n Example: {0: [0], 1: [1], 2: [2], 3: [3], 4: [4] ....}\n \"\"\"" ]
[ { "param": "duthosts", "type": null }, { "param": "rand_one_dut_hostname", "type": null } ]
{ "returns": [ { "docstring": "Priority vs. DSCP map (dictionary, key = priority).", "docstring_tokens": [ "Priority", "vs", ".", "DSCP", "map", "(", "dictionary", "key", "=", "priority", ")", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "duthosts", "type": null, "docstring": "list of DUTs", "docstring_tokens": [ "list", "of", "DUTs" ], "default": null, "is_optional": false }, { "identifier": "rand_one_dut_hostname", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def prio_dscp_map(duthosts, rand_one_dut_hostname): duthost = duthosts[rand_one_dut_hostname] config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] if "DSCP_TO_TC_MAP" not in config_facts.keys(): return None dscp_to_tc_map_lists = config_facts["DSCP_TO_TC_MAP"] if len(dscp_to_tc_map_lists) != 1: return None profile = dscp_to_tc_map_lists.keys()[0] dscp_to_tc_map = dscp_to_tc_map_lists[profile] result = {} for dscp in dscp_to_tc_map: tc = int(dscp_to_tc_map[dscp]) result.setdefault(tc, []).append(int(dscp)) return result
206
146
79ba399c117d1cd7eb919d3bb5e3b5eb610b01c5
coldrye-collaboration/feincms3
feincms3/regions.py
[ "BSD-3-Clause" ]
Python
from_contents
<not_specific>
def from_contents(cls, contents, *, renderer, **kwargs): """ Create and return a regions instance using the bare minimum of a contents instance and a renderer. Additional keyword arguments are forwarded to the regions constructor. """ return cls(contents=contents, renderer=renderer, **kwargs)
Create and return a regions instance using the bare minimum of a contents instance and a renderer. Additional keyword arguments are forwarded to the regions constructor.
Create and return a regions instance using the bare minimum of a contents instance and a renderer. Additional keyword arguments are forwarded to the regions constructor.
[ "Create", "and", "return", "a", "regions", "instance", "using", "the", "bare", "minimum", "of", "a", "contents", "instance", "and", "a", "renderer", ".", "Additional", "keyword", "arguments", "are", "forwarded", "to", "the", "regions", "constructor", "." ]
def from_contents(cls, contents, *, renderer, **kwargs): return cls(contents=contents, renderer=renderer, **kwargs)
[ "def", "from_contents", "(", "cls", ",", "contents", ",", "*", ",", "renderer", ",", "**", "kwargs", ")", ":", "return", "cls", "(", "contents", "=", "contents", ",", "renderer", "=", "renderer", ",", "**", "kwargs", ")" ]
Create and return a regions instance using the bare minimum of a contents instance and a renderer.
[ "Create", "and", "return", "a", "regions", "instance", "using", "the", "bare", "minimum", "of", "a", "contents", "instance", "and", "a", "renderer", "." ]
[ "\"\"\"\n Create and return a regions instance using the bare minimum of a\n contents instance and a renderer. Additional keyword arguments are\n forwarded to the regions constructor.\n \"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "contents", "type": null }, { "param": "renderer", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "contents", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "renderer", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def from_contents(cls, contents, *, renderer, **kwargs): return cls(contents=contents, renderer=renderer, **kwargs)
207
238
f0df8daa55461bc2328c0fd49de5f1b3f5ee909f
sbabashahi/backend
utilities/utilities.py
[ "MIT" ]
Python
jwt_get_secret_key
<not_specific>
def jwt_get_secret_key(user): """ Use this in generating and checking JWT token, and when logout jwt_secret will change so previous JWT token wil be invalidate :param user: :return: """ return user.profile.jwt_secret
Use this in generating and checking JWT token, and when logout jwt_secret will change so previous JWT token wil be invalidate :param user: :return:
Use this in generating and checking JWT token, and when logout jwt_secret will change so previous JWT token wil be invalidate
[ "Use", "this", "in", "generating", "and", "checking", "JWT", "token", "and", "when", "logout", "jwt_secret", "will", "change", "so", "previous", "JWT", "token", "wil", "be", "invalidate" ]
def jwt_get_secret_key(user): return user.profile.jwt_secret
[ "def", "jwt_get_secret_key", "(", "user", ")", ":", "return", "user", ".", "profile", ".", "jwt_secret" ]
Use this in generating and checking JWT token, and when logout jwt_secret will change so previous JWT token wil be invalidate
[ "Use", "this", "in", "generating", "and", "checking", "JWT", "token", "and", "when", "logout", "jwt_secret", "will", "change", "so", "previous", "JWT", "token", "wil", "be", "invalidate" ]
[ "\"\"\"\n Use this in generating and checking JWT token,\n and when logout jwt_secret will change so previous JWT token wil be invalidate\n :param user:\n :return:\n \"\"\"" ]
[ { "param": "user", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "user", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def jwt_get_secret_key(user): return user.profile.jwt_secret
208
1,010
4107dc73b7b61e25b8519db94cb6f686dec4fa50
JustNao/DofusHelper
modules/hdvMissingItems.py
[ "MIT" ]
Python
printProgressBar
null
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"): global requestCount """ Call in a loop to create terminal progress bar @params: iteration - Required : current iteration (Int) total - Required : total iterations (Int) prefix - Optional : prefix string (Str) suffix - Optional : suffix string (Str) decimals - Optional : positive number of decimals in percent complete (Int) length - Optional : character length of bar (Int) fill - Optional : bar fill character (Str) printEnd - Optional : end character (e.g. "\r", "\r\n") (Str) """ requestCount += 1 percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total))) filledLength = int(length * iteration // total) bar = fill * filledLength + '-' * (length - filledLength) print(f'\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd) # Print New Line on Complete if iteration == total: print()
Call in a loop to create terminal progress bar @params: iteration - Required : current iteration (Int) total - Required : total iterations (Int) prefix - Optional : prefix string (Str) suffix - Optional : suffix string (Str) decimals - Optional : positive number of decimals in percent complete (Int) length - Optional : character length of bar (Int) fill - Optional : bar fill character (Str) printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
Call in a loop to create terminal progress bar
[ "Call", "in", "a", "loop", "to", "create", "terminal", "progress", "bar" ]
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"): global requestCount requestCount += 1 percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total))) filledLength = int(length * iteration // total) bar = fill * filledLength + '-' * (length - filledLength) print(f'\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd) if iteration == total: print()
[ "def", "printProgressBar", "(", "iteration", ",", "total", ",", "prefix", "=", "''", ",", "suffix", "=", "''", ",", "decimals", "=", "1", ",", "length", "=", "100", ",", "fill", "=", "'█', ", "p", "intEnd =", "\"", "r\"):", "\r", "", "global", "requestCount", "requestCount", "+=", "1", "percent", "=", "(", "\"{0:.\"", "+", "str", "(", "decimals", ")", "+", "\"f}\"", ")", ".", "format", "(", "100", "*", "(", "iteration", "/", "float", "(", "total", ")", ")", ")", "filledLength", "=", "int", "(", "length", "*", "iteration", "//", "total", ")", "bar", "=", "fill", "*", "filledLength", "+", "'-'", "*", "(", "length", "-", "filledLength", ")", "print", "(", "f'\\r{prefix} |{bar}| {percent}% {suffix}'", ",", "end", "=", "printEnd", ")", "if", "iteration", "==", "total", ":", "print", "(", ")" ]
Call in a loop to create terminal progress bar
[ "Call", "in", "a", "loop", "to", "create", "terminal", "progress", "bar" ]
[ "\"\"\"\r\n Call in a loop to create terminal progress bar\r\n @params:\r\n iteration - Required : current iteration (Int)\r\n total - Required : total iterations (Int)\r\n prefix - Optional : prefix string (Str)\r\n suffix - Optional : suffix string (Str)\r\n decimals - Optional : positive number of decimals in percent complete (Int)\r\n length - Optional : character length of bar (Int)\r\n fill - Optional : bar fill character (Str)\r\n printEnd - Optional : end character (e.g. \"\\r\", \"\\r\\n\") (Str)\r\n \"\"\"", "# Print New Line on Complete\r" ]
[ { "param": "iteration", "type": null }, { "param": "total", "type": null }, { "param": "prefix", "type": null }, { "param": "suffix", "type": null }, { "param": "decimals", "type": null }, { "param": "length", "type": null }, { "param": "fill", "type": null }, { "param": "printEnd", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "iteration", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "total", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "prefix", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "suffix", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "decimals", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "length", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "fill", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "printEnd", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [ { "identifier": "params", "docstring": "iteration - Required : current iteration (Int)\ntotal - Required : total iterations (Int)\nprefix - Optional : prefix string (Str)\nsuffix - Optional : suffix string (Str)\ndecimals - Optional : positive number of decimals in percent complete (Int)\nlength - Optional : character length of bar (Int)\nfill - Optional : bar fill character (Str)\nprintEnd - Optional : end character (Str)", "docstring_tokens": [ "iteration", "-", "Required", ":", "current", "iteration", "(", "Int", ")", "total", "-", "Required", ":", "total", "iterations", "(", "Int", ")", "prefix", "-", "Optional", ":", "prefix", "string", "(", "Str", ")", "suffix", "-", "Optional", ":", "suffix", "string", "(", "Str", ")", "decimals", "-", "Optional", ":", "positive", "number", "of", "decimals", "in", "percent", "complete", "(", "Int", ")", "length", "-", "Optional", ":", "character", "length", "of", "bar", "(", "Int", ")", "fill", "-", "Optional", ":", "bar", "fill", "character", "(", "Str", ")", "printEnd", "-", "Optional", ":", "end", "character", "(", "Str", ")" ] } ] }
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"): global requestCount requestCount += 1 percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total))) filledLength = int(length * iteration // total) bar = fill * filledLength + '-' * (length - filledLength) print(f'\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd) if iteration == total: print()
209
634
afa3172084270b8bfaa80d5d1a82df4c00dc739d
datalogics-cgreen/server_core
classifier.py
[ "Apache-2.0" ]
Python
and_up
<not_specific>
def and_up(cls, young, keyword): """Encapsulates the logic of what "[x] and up" actually means. Given the lower end of an age range, tries to determine the upper end of the range. """ if young is None: return None if not any( [keyword.endswith(x) for x in ("and up", "and up.", "+", "+.") ] ): return None if young >= 18: old = young elif young >= 12: # "12 and up", "14 and up", etc. are # generally intended to cover the entire # YA span. old = 17 elif young >= 8: # "8 and up" means something like "8-12" old = young + 4 else: # Whereas "3 and up" really means more # like "3 to 5". old = young + 2 return old
Encapsulates the logic of what "[x] and up" actually means. Given the lower end of an age range, tries to determine the upper end of the range.
Encapsulates the logic of what "[x] and up" actually means. Given the lower end of an age range, tries to determine the upper end of the range.
[ "Encapsulates", "the", "logic", "of", "what", "\"", "[", "x", "]", "and", "up", "\"", "actually", "means", ".", "Given", "the", "lower", "end", "of", "an", "age", "range", "tries", "to", "determine", "the", "upper", "end", "of", "the", "range", "." ]
def and_up(cls, young, keyword): if young is None: return None if not any( [keyword.endswith(x) for x in ("and up", "and up.", "+", "+.") ] ): return None if young >= 18: old = young elif young >= 12: old = 17 elif young >= 8: old = young + 4 else: old = young + 2 return old
[ "def", "and_up", "(", "cls", ",", "young", ",", "keyword", ")", ":", "if", "young", "is", "None", ":", "return", "None", "if", "not", "any", "(", "[", "keyword", ".", "endswith", "(", "x", ")", "for", "x", "in", "(", "\"and up\"", ",", "\"and up.\"", ",", "\"+\"", ",", "\"+.\"", ")", "]", ")", ":", "return", "None", "if", "young", ">=", "18", ":", "old", "=", "young", "elif", "young", ">=", "12", ":", "old", "=", "17", "elif", "young", ">=", "8", ":", "old", "=", "young", "+", "4", "else", ":", "old", "=", "young", "+", "2", "return", "old" ]
Encapsulates the logic of what "[x] and up" actually means.
[ "Encapsulates", "the", "logic", "of", "what", "\"", "[", "x", "]", "and", "up", "\"", "actually", "means", "." ]
[ "\"\"\"Encapsulates the logic of what \"[x] and up\" actually means.\n\n Given the lower end of an age range, tries to determine the\n upper end of the range.\n \"\"\"", "# \"12 and up\", \"14 and up\", etc. are", "# generally intended to cover the entire", "# YA span.", "# \"8 and up\" means something like \"8-12\"", "# Whereas \"3 and up\" really means more", "# like \"3 to 5\"." ]
[ { "param": "cls", "type": null }, { "param": "young", "type": null }, { "param": "keyword", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "young", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "keyword", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def and_up(cls, young, keyword): if young is None: return None if not any( [keyword.endswith(x) for x in ("and up", "and up.", "+", "+.") ] ): return None if young >= 18: old = young elif young >= 12: old = 17 elif young >= 8: old = young + 4 else: old = young + 2 return old
210
613
762afb689c07f82cb9b864c2768c5ec49c037b18
neighthan/singleshotpose
singleshotpose/multi_obj_pose_estimation/train_multi.py
[ "MIT" ]
Python
adjust_learning_rate
<not_specific>
def adjust_learning_rate(optimizer, batch, learning_rate, steps, scales, batch_size): """ Modifies `optimizer`'s learning rate(s) directly and returns the new lr. """ lr = learning_rate for i in range(len(steps)): scale = scales[i] if i < len(scales) else 1 if batch >= steps[i]: lr = lr * scale if batch == steps[i]: break else: break for param_group in optimizer.param_groups: param_group["lr"] = lr / batch_size return lr
Modifies `optimizer`'s learning rate(s) directly and returns the new lr.
Modifies `optimizer`'s learning rate(s) directly and returns the new lr.
[ "Modifies", "`", "optimizer", "`", "'", "s", "learning", "rate", "(", "s", ")", "directly", "and", "returns", "the", "new", "lr", "." ]
def adjust_learning_rate(optimizer, batch, learning_rate, steps, scales, batch_size): lr = learning_rate for i in range(len(steps)): scale = scales[i] if i < len(scales) else 1 if batch >= steps[i]: lr = lr * scale if batch == steps[i]: break else: break for param_group in optimizer.param_groups: param_group["lr"] = lr / batch_size return lr
[ "def", "adjust_learning_rate", "(", "optimizer", ",", "batch", ",", "learning_rate", ",", "steps", ",", "scales", ",", "batch_size", ")", ":", "lr", "=", "learning_rate", "for", "i", "in", "range", "(", "len", "(", "steps", ")", ")", ":", "scale", "=", "scales", "[", "i", "]", "if", "i", "<", "len", "(", "scales", ")", "else", "1", "if", "batch", ">=", "steps", "[", "i", "]", ":", "lr", "=", "lr", "*", "scale", "if", "batch", "==", "steps", "[", "i", "]", ":", "break", "else", ":", "break", "for", "param_group", "in", "optimizer", ".", "param_groups", ":", "param_group", "[", "\"lr\"", "]", "=", "lr", "/", "batch_size", "return", "lr" ]
Modifies `optimizer`'s learning rate(s) directly and returns the new lr.
[ "Modifies", "`", "optimizer", "`", "'", "s", "learning", "rate", "(", "s", ")", "directly", "and", "returns", "the", "new", "lr", "." ]
[ "\"\"\"\n Modifies `optimizer`'s learning rate(s) directly and returns the new lr.\n \"\"\"" ]
[ { "param": "optimizer", "type": null }, { "param": "batch", "type": null }, { "param": "learning_rate", "type": null }, { "param": "steps", "type": null }, { "param": "scales", "type": null }, { "param": "batch_size", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "optimizer", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "batch", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "learning_rate", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "steps", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "scales", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "batch_size", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def adjust_learning_rate(optimizer, batch, learning_rate, steps, scales, batch_size): lr = learning_rate for i in range(len(steps)): scale = scales[i] if i < len(scales) else 1 if batch >= steps[i]: lr = lr * scale if batch == steps[i]: break else: break for param_group in optimizer.param_groups: param_group["lr"] = lr / batch_size return lr
211
975
54d0950e030699802baf5af83854b68fbe485b67
bunjiboys/lemur
lemur/plugins/lemur_digicert/plugin.py
[ "Apache-2.0" ]
Python
handle_cis_response
<not_specific>
def handle_cis_response(response): """ Handle the DigiCert CIS API response and any errors it might have experienced. :param response: :return: """ if response.status_code > 399: raise Exception(response.json()['errors'][0]['message']) return response.json()
Handle the DigiCert CIS API response and any errors it might have experienced. :param response: :return:
Handle the DigiCert CIS API response and any errors it might have experienced.
[ "Handle", "the", "DigiCert", "CIS", "API", "response", "and", "any", "errors", "it", "might", "have", "experienced", "." ]
def handle_cis_response(response): if response.status_code > 399: raise Exception(response.json()['errors'][0]['message']) return response.json()
[ "def", "handle_cis_response", "(", "response", ")", ":", "if", "response", ".", "status_code", ">", "399", ":", "raise", "Exception", "(", "response", ".", "json", "(", ")", "[", "'errors'", "]", "[", "0", "]", "[", "'message'", "]", ")", "return", "response", ".", "json", "(", ")" ]
Handle the DigiCert CIS API response and any errors it might have experienced.
[ "Handle", "the", "DigiCert", "CIS", "API", "response", "and", "any", "errors", "it", "might", "have", "experienced", "." ]
[ "\"\"\"\n Handle the DigiCert CIS API response and any errors it might have experienced.\n :param response:\n :return:\n \"\"\"" ]
[ { "param": "response", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "response", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def handle_cis_response(response): if response.status_code > 399: raise Exception(response.json()['errors'][0]['message']) return response.json()
212
819
dfb36503f6f0f1f93cb072de3b6a16d109ae3f95
idkidknow/repeater_analyzer
repeater_analyzer.py
[ "MIT" ]
Python
message_filter
<not_specific>
def message_filter(msg): """Return True if the message cannot be considered as a repeat.""" if msg == '' or '--' in msg or 'List:\n' in msg: return True return False
Return True if the message cannot be considered as a repeat.
Return True if the message cannot be considered as a repeat.
[ "Return", "True", "if", "the", "message", "cannot", "be", "considered", "as", "a", "repeat", "." ]
def message_filter(msg): if msg == '' or '--' in msg or 'List:\n' in msg: return True return False
[ "def", "message_filter", "(", "msg", ")", ":", "if", "msg", "==", "''", "or", "'--'", "in", "msg", "or", "'List:\\n'", "in", "msg", ":", "return", "True", "return", "False" ]
Return True if the message cannot be considered as a repeat.
[ "Return", "True", "if", "the", "message", "cannot", "be", "considered", "as", "a", "repeat", "." ]
[ "\"\"\"Return True if the message cannot be considered as a repeat.\"\"\"" ]
[ { "param": "msg", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "msg", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def message_filter(msg): if msg == '' or '--' in msg or 'List:\n' in msg: return True return False
213
137
ce30168d742cc09b3ea1ca5e1da4cba007236a5c
reiniervlinschoten/castoredc_api
castoredc_api/importer/helpers.py
[ "MIT" ]
Python
castorize_num_column
<not_specific>
def castorize_num_column(data: list, target_field: "CastorField"): """Castorizes a numeric column and replaces errors with 'Error'.""" new_list = [] for datapoint in data: if datapoint is None: new_list.append(None) else: try: # Test if data point is convertible to float numeric_datapoint = float(datapoint) # Test if between bounds if target_field.field_max > numeric_datapoint > target_field.field_min: new_list.append(datapoint) else: new_list.append("Error: number out of bounds") except ValueError: new_list.append("Error: not a number") return new_list
Castorizes a numeric column and replaces errors with 'Error'.
Castorizes a numeric column and replaces errors with 'Error'.
[ "Castorizes", "a", "numeric", "column", "and", "replaces", "errors", "with", "'", "Error", "'", "." ]
def castorize_num_column(data: list, target_field: "CastorField"): new_list = [] for datapoint in data: if datapoint is None: new_list.append(None) else: try: numeric_datapoint = float(datapoint) if target_field.field_max > numeric_datapoint > target_field.field_min: new_list.append(datapoint) else: new_list.append("Error: number out of bounds") except ValueError: new_list.append("Error: not a number") return new_list
[ "def", "castorize_num_column", "(", "data", ":", "list", ",", "target_field", ":", "\"CastorField\"", ")", ":", "new_list", "=", "[", "]", "for", "datapoint", "in", "data", ":", "if", "datapoint", "is", "None", ":", "new_list", ".", "append", "(", "None", ")", "else", ":", "try", ":", "numeric_datapoint", "=", "float", "(", "datapoint", ")", "if", "target_field", ".", "field_max", ">", "numeric_datapoint", ">", "target_field", ".", "field_min", ":", "new_list", ".", "append", "(", "datapoint", ")", "else", ":", "new_list", ".", "append", "(", "\"Error: number out of bounds\"", ")", "except", "ValueError", ":", "new_list", ".", "append", "(", "\"Error: not a number\"", ")", "return", "new_list" ]
Castorizes a numeric column and replaces errors with 'Error'.
[ "Castorizes", "a", "numeric", "column", "and", "replaces", "errors", "with", "'", "Error", "'", "." ]
[ "\"\"\"Castorizes a numeric column and replaces errors with 'Error'.\"\"\"", "# Test if data point is convertible to float", "# Test if between bounds" ]
[ { "param": "data", "type": "list" }, { "param": "target_field", "type": "\"CastorField\"" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "data", "type": "list", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "target_field", "type": "\"CastorField\"", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def castorize_num_column(data: list, target_field: "CastorField"): new_list = [] for datapoint in data: if datapoint is None: new_list.append(None) else: try: numeric_datapoint = float(datapoint) if target_field.field_max > numeric_datapoint > target_field.field_min: new_list.append(datapoint) else: new_list.append("Error: number out of bounds") except ValueError: new_list.append("Error: not a number") return new_list
214
528
97a7547e2dd8fc08efea8b582df958660254c8ec
NAELIC/dribbler
mbedignore.py
[ "MIT" ]
Python
_is_ignored
<not_specific>
def _is_ignored(path): """ Checks whether the specified path is ignored within the Mbed compilation. In details, takes the .mbedignore file in the path and checks whether it contains a line with string '*'. """ mbedignore_path = os.path.join(path, '.mbedignore') if not os.path.isfile(mbedignore_path): return False with open(mbedignore_path) as f: lines = f.read().splitlines() return '*' in lines
Checks whether the specified path is ignored within the Mbed compilation. In details, takes the .mbedignore file in the path and checks whether it contains a line with string '*'.
Checks whether the specified path is ignored within the Mbed compilation. In details, takes the .mbedignore file in the path and checks whether it contains a line with string '*'.
[ "Checks", "whether", "the", "specified", "path", "is", "ignored", "within", "the", "Mbed", "compilation", ".", "In", "details", "takes", "the", ".", "mbedignore", "file", "in", "the", "path", "and", "checks", "whether", "it", "contains", "a", "line", "with", "string", "'", "*", "'", "." ]
def _is_ignored(path): mbedignore_path = os.path.join(path, '.mbedignore') if not os.path.isfile(mbedignore_path): return False with open(mbedignore_path) as f: lines = f.read().splitlines() return '*' in lines
[ "def", "_is_ignored", "(", "path", ")", ":", "mbedignore_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'.mbedignore'", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "mbedignore_path", ")", ":", "return", "False", "with", "open", "(", "mbedignore_path", ")", "as", "f", ":", "lines", "=", "f", ".", "read", "(", ")", ".", "splitlines", "(", ")", "return", "'*'", "in", "lines" ]
Checks whether the specified path is ignored within the Mbed compilation.
[ "Checks", "whether", "the", "specified", "path", "is", "ignored", "within", "the", "Mbed", "compilation", "." ]
[ "\"\"\"\n Checks whether the specified path is ignored within the Mbed compilation.\n In details, takes the .mbedignore file in the path and checks whether it\n contains a line with string '*'.\n \"\"\"" ]
[ { "param": "path", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "path", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def _is_ignored(path): mbedignore_path = os.path.join(path, '.mbedignore') if not os.path.isfile(mbedignore_path): return False with open(mbedignore_path) as f: lines = f.read().splitlines() return '*' in lines
215
822
c4631afab38dc24e12eb9597fd364eefa032343f
gpooja3/pyvcloud
pyvcloud/vcd/utils.py
[ "Apache-2.0" ]
Python
extract_id
<not_specific>
def extract_id(urn): """Extract id from an urn. 'urn:vcloud:catalog:39867ab4-04e0-4b13-b468-08abcc1de810' will produce '39867ab4-04e0-4b13-b468-08abcc1de810' :param str urn: a vcloud resource urn. :return: the extracted id :rtype: str """ if urn is None: return None if ':' in urn: return urn.split(':')[-1] else: return urn
Extract id from an urn. 'urn:vcloud:catalog:39867ab4-04e0-4b13-b468-08abcc1de810' will produce '39867ab4-04e0-4b13-b468-08abcc1de810' :param str urn: a vcloud resource urn. :return: the extracted id :rtype: str
Extract id from an urn.
[ "Extract", "id", "from", "an", "urn", "." ]
def extract_id(urn): if urn is None: return None if ':' in urn: return urn.split(':')[-1] else: return urn
[ "def", "extract_id", "(", "urn", ")", ":", "if", "urn", "is", "None", ":", "return", "None", "if", "':'", "in", "urn", ":", "return", "urn", ".", "split", "(", "':'", ")", "[", "-", "1", "]", "else", ":", "return", "urn" ]
Extract id from an urn.
[ "Extract", "id", "from", "an", "urn", "." ]
[ "\"\"\"Extract id from an urn.\n\n 'urn:vcloud:catalog:39867ab4-04e0-4b13-b468-08abcc1de810' will produce\n '39867ab4-04e0-4b13-b468-08abcc1de810'\n\n :param str urn: a vcloud resource urn.\n\n :return: the extracted id\n\n :rtype: str\n \"\"\"" ]
[ { "param": "urn", "type": null } ]
{ "returns": [ { "docstring": "the extracted id", "docstring_tokens": [ "the", "extracted", "id" ], "type": "str" } ], "raises": [], "params": [ { "identifier": "urn", "type": null, "docstring": "a vcloud resource urn.", "docstring_tokens": [ "a", "vcloud", "resource", "urn", "." ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def extract_id(urn): if urn is None: return None if ':' in urn: return urn.split(':')[-1] else: return urn
216
395
c46c36f2ee4e1c5ba74d20d8000a24520a0967c8
ilinastoilkovska/syncTA
helper.py
[ "Apache-2.0" ]
Python
sum_counters_le
<not_specific>
def sum_counters_le(sum_cnt, num): """ Returns an SMT inequality assertion given a list of counters and a threshold """ return "(<= (+ " + sum_cnt + ") " + str(num) + ")\n"
Returns an SMT inequality assertion given a list of counters and a threshold
Returns an SMT inequality assertion given a list of counters and a threshold
[ "Returns", "an", "SMT", "inequality", "assertion", "given", "a", "list", "of", "counters", "and", "a", "threshold" ]
def sum_counters_le(sum_cnt, num): return "(<= (+ " + sum_cnt + ") " + str(num) + ")\n"
[ "def", "sum_counters_le", "(", "sum_cnt", ",", "num", ")", ":", "return", "\"(<= (+ \"", "+", "sum_cnt", "+", "\") \"", "+", "str", "(", "num", ")", "+", "\")\\n\"" ]
Returns an SMT inequality assertion given a list of counters and a threshold
[ "Returns", "an", "SMT", "inequality", "assertion", "given", "a", "list", "of", "counters", "and", "a", "threshold" ]
[ "\"\"\"\r\n Returns an SMT inequality assertion given a list of counters and a threshold\r\n \"\"\"" ]
[ { "param": "sum_cnt", "type": null }, { "param": "num", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "sum_cnt", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "num", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def sum_counters_le(sum_cnt, num): return "(<= (+ " + sum_cnt + ") " + str(num) + ")\n"
217
213
01923d51812dd3fc50794952473c11d121fbc45d
zzyyrr/fuzzbench
analysis/data_utils.py
[ "Apache-2.0" ]
Python
benchmark_rank_by_median
<not_specific>
def benchmark_rank_by_median(benchmark_snapshot_df): """Returns ranking of fuzzers based on median coverage.""" assert benchmark_snapshot_df.time.nunique() == 1, 'Not a snapshot!' medians = benchmark_snapshot_df.groupby('fuzzer')['edges_covered'].median() medians.rename('median cov', inplace=True) return medians.sort_values(ascending=False)
Returns ranking of fuzzers based on median coverage.
Returns ranking of fuzzers based on median coverage.
[ "Returns", "ranking", "of", "fuzzers", "based", "on", "median", "coverage", "." ]
def benchmark_rank_by_median(benchmark_snapshot_df): assert benchmark_snapshot_df.time.nunique() == 1, 'Not a snapshot!' medians = benchmark_snapshot_df.groupby('fuzzer')['edges_covered'].median() medians.rename('median cov', inplace=True) return medians.sort_values(ascending=False)
[ "def", "benchmark_rank_by_median", "(", "benchmark_snapshot_df", ")", ":", "assert", "benchmark_snapshot_df", ".", "time", ".", "nunique", "(", ")", "==", "1", ",", "'Not a snapshot!'", "medians", "=", "benchmark_snapshot_df", ".", "groupby", "(", "'fuzzer'", ")", "[", "'edges_covered'", "]", ".", "median", "(", ")", "medians", ".", "rename", "(", "'median cov'", ",", "inplace", "=", "True", ")", "return", "medians", ".", "sort_values", "(", "ascending", "=", "False", ")" ]
Returns ranking of fuzzers based on median coverage.
[ "Returns", "ranking", "of", "fuzzers", "based", "on", "median", "coverage", "." ]
[ "\"\"\"Returns ranking of fuzzers based on median coverage.\"\"\"" ]
[ { "param": "benchmark_snapshot_df", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "benchmark_snapshot_df", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def benchmark_rank_by_median(benchmark_snapshot_df): assert benchmark_snapshot_df.time.nunique() == 1, 'Not a snapshot!' medians = benchmark_snapshot_df.groupby('fuzzer')['edges_covered'].median() medians.rename('median cov', inplace=True) return medians.sort_values(ascending=False)
219
635