hexsha
stringlengths
40
40
repo
stringlengths
5
121
path
stringlengths
4
227
license
sequence
language
stringclasses
1 value
identifier
stringlengths
1
107
return_type
stringlengths
2
237
original_string
stringlengths
75
13.4k
original_docstring
stringlengths
13
12.9k
docstring
stringlengths
13
2.57k
docstring_tokens
sequence
code
stringlengths
23
1.88k
code_tokens
sequence
short_docstring
stringlengths
1
1.32k
short_docstring_tokens
sequence
comment
sequence
parameters
list
docstring_params
dict
code_with_imports
stringlengths
23
1.88k
idxs
int64
0
611k
cluster
int64
0
1.02k
b3fd05c9e664bcedf8d86f451e19e16aa7662d54
CubieDev/nltk
nltk/internals.py
[ "Apache-2.0" ]
Python
_add_epytext_field
null
def _add_epytext_field(obj, field, message): """Add an epytext @field to a given object's docstring.""" indent = "" # If we already have a docstring, then add a blank line to separate # it from the new field, and check its indentation. if obj.__doc__: obj.__doc__ = obj.__doc__.rstrip() + "\n\n" indents = re.findall(r"(?<=\n)[ ]+(?!\s)", obj.__doc__.expandtabs()) if indents: indent = min(indents) # If we don't have a docstring, add an empty one. else: obj.__doc__ = "" obj.__doc__ += textwrap.fill( f"@{field}: {message}", initial_indent=indent, subsequent_indent=indent + " ", )
Add an epytext @field to a given object's docstring.
Add an epytext @field to a given object's docstring.
[ "Add", "an", "epytext", "@field", "to", "a", "given", "object", "'", "s", "docstring", "." ]
def _add_epytext_field(obj, field, message): indent = "" if obj.__doc__: obj.__doc__ = obj.__doc__.rstrip() + "\n\n" indents = re.findall(r"(?<=\n)[ ]+(?!\s)", obj.__doc__.expandtabs()) if indents: indent = min(indents) else: obj.__doc__ = "" obj.__doc__ += textwrap.fill( f"@{field}: {message}", initial_indent=indent, subsequent_indent=indent + " ", )
[ "def", "_add_epytext_field", "(", "obj", ",", "field", ",", "message", ")", ":", "indent", "=", "\"\"", "if", "obj", ".", "__doc__", ":", "obj", ".", "__doc__", "=", "obj", ".", "__doc__", ".", "rstrip", "(", ")", "+", "\"\\n\\n\"", "indents", "=", "re", ".", "findall", "(", "r\"(?<=\\n)[ ]+(?!\\s)\"", ",", "obj", ".", "__doc__", ".", "expandtabs", "(", ")", ")", "if", "indents", ":", "indent", "=", "min", "(", "indents", ")", "else", ":", "obj", ".", "__doc__", "=", "\"\"", "obj", ".", "__doc__", "+=", "textwrap", ".", "fill", "(", "f\"@{field}: {message}\"", ",", "initial_indent", "=", "indent", ",", "subsequent_indent", "=", "indent", "+", "\" \"", ",", ")" ]
Add an epytext @field to a given object's docstring.
[ "Add", "an", "epytext", "@field", "to", "a", "given", "object", "'", "s", "docstring", "." ]
[ "\"\"\"Add an epytext @field to a given object's docstring.\"\"\"", "# If we already have a docstring, then add a blank line to separate", "# it from the new field, and check its indentation.", "# If we don't have a docstring, add an empty one." ]
[ { "param": "obj", "type": null }, { "param": "field", "type": null }, { "param": "message", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "obj", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "field", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "message", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import textwrap import re def _add_epytext_field(obj, field, message): indent = "" if obj.__doc__: obj.__doc__ = obj.__doc__.rstrip() + "\n\n" indents = re.findall(r"(?<=\n)[ ]+(?!\s)", obj.__doc__.expandtabs()) if indents: indent = min(indents) else: obj.__doc__ = "" obj.__doc__ += textwrap.fill( f"@{field}: {message}", initial_indent=indent, subsequent_indent=indent + " ", )
450
750
5ee32ae48dd08e970e0615852b1176f7cf0afc2b
Liam3851/freezegun
tests/test_warnings.py
[ "Apache-2.0" ]
Python
assert_no_warnings
null
def assert_no_warnings(): """A context manager that makes sure no warnings was emitted.""" with warnings.catch_warnings(record=True) as caught_warnings: warnings.filterwarnings('always') yield assert not caught_warnings
A context manager that makes sure no warnings was emitted.
A context manager that makes sure no warnings was emitted.
[ "A", "context", "manager", "that", "makes", "sure", "no", "warnings", "was", "emitted", "." ]
def assert_no_warnings(): with warnings.catch_warnings(record=True) as caught_warnings: warnings.filterwarnings('always') yield assert not caught_warnings
[ "def", "assert_no_warnings", "(", ")", ":", "with", "warnings", ".", "catch_warnings", "(", "record", "=", "True", ")", "as", "caught_warnings", ":", "warnings", ".", "filterwarnings", "(", "'always'", ")", "yield", "assert", "not", "caught_warnings" ]
A context manager that makes sure no warnings was emitted.
[ "A", "context", "manager", "that", "makes", "sure", "no", "warnings", "was", "emitted", "." ]
[ "\"\"\"A context manager that makes sure no warnings was emitted.\"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
import warnings def assert_no_warnings(): with warnings.catch_warnings(record=True) as caught_warnings: warnings.filterwarnings('always') yield assert not caught_warnings
452
13
5dea3f28c423551dbb2c833e6676f58849ae155a
dmulyalin/nornir-salt
nornir_salt/plugins/processors/DataProcessor.py
[ "MIT" ]
Python
to_str
<not_specific>
def to_str(data, **kwargs): """ Reference Name ``to_str`` Function to transform Python structure to string without applying any formatting :param data: (structure) Python structure to transform :param kwargs: (dict) ignored :return: string """ return str(data)
Reference Name ``to_str`` Function to transform Python structure to string without applying any formatting :param data: (structure) Python structure to transform :param kwargs: (dict) ignored :return: string
Reference Name ``to_str`` Function to transform Python structure to string without applying any formatting
[ "Reference", "Name", "`", "`", "to_str", "`", "`", "Function", "to", "transform", "Python", "structure", "to", "string", "without", "applying", "any", "formatting" ]
def to_str(data, **kwargs): return str(data)
[ "def", "to_str", "(", "data", ",", "**", "kwargs", ")", ":", "return", "str", "(", "data", ")" ]
Reference Name ``to_str`` Function to transform Python structure to string without applying any formatting
[ "Reference", "Name", "`", "`", "to_str", "`", "`", "Function", "to", "transform", "Python", "structure", "to", "string", "without", "applying", "any", "formatting" ]
[ "\"\"\"\n Reference Name ``to_str``\n\n Function to transform Python structure to string without applying any formatting\n\n :param data: (structure) Python structure to transform\n :param kwargs: (dict) ignored\n :return: string\n \"\"\"" ]
[ { "param": "data", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "data", "type": null, "docstring": "(structure) Python structure to transform", "docstring_tokens": [ "(", "structure", ")", "Python", "structure", "to", "transform" ], "default": null, "is_optional": null } ], "outlier_params": [ { "identifier": "kwargs", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "others": [] }
def to_str(data, **kwargs): return str(data)
453
957
9465eeae64be31193e76e8b4e7d4e25731b76a4e
pafoster/conformance_distance_experiments_cochrane_et_al_2020
empirical-outliers/shuffle.py
[ "MIT" ]
Python
concatenate
null
def concatenate(u, words, front=False): """Concatenates a letter with each word in an iterable.""" for word in words: if front: yield tuple([u] + list(word)) else: yield tuple(list(word) + [u])
Concatenates a letter with each word in an iterable.
Concatenates a letter with each word in an iterable.
[ "Concatenates", "a", "letter", "with", "each", "word", "in", "an", "iterable", "." ]
def concatenate(u, words, front=False): for word in words: if front: yield tuple([u] + list(word)) else: yield tuple(list(word) + [u])
[ "def", "concatenate", "(", "u", ",", "words", ",", "front", "=", "False", ")", ":", "for", "word", "in", "words", ":", "if", "front", ":", "yield", "tuple", "(", "[", "u", "]", "+", "list", "(", "word", ")", ")", "else", ":", "yield", "tuple", "(", "list", "(", "word", ")", "+", "[", "u", "]", ")" ]
Concatenates a letter with each word in an iterable.
[ "Concatenates", "a", "letter", "with", "each", "word", "in", "an", "iterable", "." ]
[ "\"\"\"Concatenates a letter with each word in an iterable.\"\"\"" ]
[ { "param": "u", "type": null }, { "param": "words", "type": null }, { "param": "front", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "u", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "words", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "front", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def concatenate(u, words, front=False): for word in words: if front: yield tuple([u] + list(word)) else: yield tuple(list(word) + [u])
454
946
eb5d5bd3a7da29387055eb8d88d96f0ff6d2949d
Nizebulous/hites.org
hites/settings.py
[ "MIT" ]
Python
env_var
<not_specific>
def env_var(var, default=None): """ Read the value from an environment variable. Provided default is used if no environment variable is present provided """ if default is None: assert var in os.environ, 'Must set {} evironment variable'.format(var) return os.environ[var]
Read the value from an environment variable. Provided default is used if no environment variable is present provided
Read the value from an environment variable. Provided default is used if no environment variable is present provided
[ "Read", "the", "value", "from", "an", "environment", "variable", ".", "Provided", "default", "is", "used", "if", "no", "environment", "variable", "is", "present", "provided" ]
def env_var(var, default=None): if default is None: assert var in os.environ, 'Must set {} evironment variable'.format(var) return os.environ[var]
[ "def", "env_var", "(", "var", ",", "default", "=", "None", ")", ":", "if", "default", "is", "None", ":", "assert", "var", "in", "os", ".", "environ", ",", "'Must set {} evironment variable'", ".", "format", "(", "var", ")", "return", "os", ".", "environ", "[", "var", "]" ]
Read the value from an environment variable.
[ "Read", "the", "value", "from", "an", "environment", "variable", "." ]
[ "\"\"\"\n Read the value from an environment variable. Provided default is\n used if no environment variable is present provided\n \"\"\"" ]
[ { "param": "var", "type": null }, { "param": "default", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "var", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "default", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def env_var(var, default=None): if default is None: assert var in os.environ, 'Must set {} evironment variable'.format(var) return os.environ[var]
455
856
53d206d8fffb5b6f6c9f4bbebde500332cc5b1f8
opticspy/lightpipes-python
LightPipes/zernike.py
[ "BSD-3-Clause" ]
Python
noll_to_zern
<not_specific>
def noll_to_zern(j): """ *Convert linear Noll index to tuple of Zernike indices.* :param j: the linear Noll coordinate, n is the radial Zernike index and m is the azimuthal Zernike index. :type j: int, float :return: name of Noll Zernike term :rtype: string (n, m) tuple of Zernike indices .. seealso:: * :ref:`Manual: Zernike polynomials.<Zernike polynomials.>` * `https://oeis.org <https://oeis.org/A176988>`_ * `Tim van Werkhoven, https://github.com/tvwerkhoven <https://github.com/tvwerkhoven>`_ * :ref:`Examples: Zernike aberration.<Zernike aberration.>` """ if (j == 0): raise ValueError("Noll indices start at 1, 0 is invalid.") n = 0 j1 = j-1 while (j1 > n): n += 1 j1 -= n m = (-1)**j * ((n % 2) + 2 * int((j1+((n+1)%2)) / 2.0 )) return (n, m)
*Convert linear Noll index to tuple of Zernike indices.* :param j: the linear Noll coordinate, n is the radial Zernike index and m is the azimuthal Zernike index. :type j: int, float :return: name of Noll Zernike term :rtype: string (n, m) tuple of Zernike indices .. seealso:: * :ref:`Manual: Zernike polynomials.<Zernike polynomials.>` * `https://oeis.org <https://oeis.org/A176988>`_ * `Tim van Werkhoven, https://github.com/tvwerkhoven <https://github.com/tvwerkhoven>`_ * :ref:`Examples: Zernike aberration.<Zernike aberration.>`
Convert linear Noll index to tuple of Zernike indices.
[ "Convert", "linear", "Noll", "index", "to", "tuple", "of", "Zernike", "indices", "." ]
def noll_to_zern(j): if (j == 0): raise ValueError("Noll indices start at 1, 0 is invalid.") n = 0 j1 = j-1 while (j1 > n): n += 1 j1 -= n m = (-1)**j * ((n % 2) + 2 * int((j1+((n+1)%2)) / 2.0 )) return (n, m)
[ "def", "noll_to_zern", "(", "j", ")", ":", "if", "(", "j", "==", "0", ")", ":", "raise", "ValueError", "(", "\"Noll indices start at 1, 0 is invalid.\"", ")", "n", "=", "0", "j1", "=", "j", "-", "1", "while", "(", "j1", ">", "n", ")", ":", "n", "+=", "1", "j1", "-=", "n", "m", "=", "(", "-", "1", ")", "**", "j", "*", "(", "(", "n", "%", "2", ")", "+", "2", "*", "int", "(", "(", "j1", "+", "(", "(", "n", "+", "1", ")", "%", "2", ")", ")", "/", "2.0", ")", ")", "return", "(", "n", ",", "m", ")" ]
Convert linear Noll index to tuple of Zernike indices.
[ "Convert", "linear", "Noll", "index", "to", "tuple", "of", "Zernike", "indices", "." ]
[ "\"\"\"\n *Convert linear Noll index to tuple of Zernike indices.*\n \n :param j: the linear Noll coordinate, n is the radial Zernike index and m is the azimuthal Zernike index.\n :type j: int, float\n :return: name of Noll Zernike term\n :rtype: string (n, m) tuple of Zernike indices\n \n .. seealso::\n \n * :ref:`Manual: Zernike polynomials.<Zernike polynomials.>`\n * `https://oeis.org <https://oeis.org/A176988>`_\n * `Tim van Werkhoven, https://github.com/tvwerkhoven <https://github.com/tvwerkhoven>`_\n * :ref:`Examples: Zernike aberration.<Zernike aberration.>`\n \"\"\"" ]
[ { "param": "j", "type": null } ]
{ "returns": [ { "docstring": "name of Noll Zernike term", "docstring_tokens": [ "name", "of", "Noll", "Zernike", "term" ], "type": "string (n, m) tuple of Zernike indices\n.. seealso::\n\n * :ref:`Manual: Zernike polynomials.<Zernike polynomials.>`\n * `https://oeis.org <https://oeis.org/A176988>`_\n * `Tim van Werkhoven, https://github.com/tvwerkhoven <https://github.com/tvwerkhoven>`_\n * :ref:`Examples: Zernike aberration.<Zernike aberration.>`" } ], "raises": [], "params": [ { "identifier": "j", "type": null, "docstring": "the linear Noll coordinate, n is the radial Zernike index and m is the azimuthal Zernike index.", "docstring_tokens": [ "the", "linear", "Noll", "coordinate", "n", "is", "the", "radial", "Zernike", "index", "and", "m", "is", "the", "azimuthal", "Zernike", "index", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def noll_to_zern(j): if (j == 0): raise ValueError("Noll indices start at 1, 0 is invalid.") n = 0 j1 = j-1 while (j1 > n): n += 1 j1 -= n m = (-1)**j * ((n % 2) + 2 * int((j1+((n+1)%2)) / 2.0 )) return (n, m)
456
353
92aceea8a9a0986622e5c58333f55bea0518e335
AnthonyNg404/61A
projects/cats/typing.py
[ "Unlicense" ]
Python
swap_diff
<not_specific>
def swap_diff(start, goal, limit): """A diff function for autocorrect that determines how many letters in START need to be substituted to create GOAL, then adds the difference in their lengths. """ # BEGIN PROBLEM 6 list_start = list(start) list_goal = list(goal) def count_changes(lst_start, lst_goal, total): if lst_start == [] or lst_goal == []: return total + abs(len(lst_start) - len(lst_goal)) else: if lst_start[0] != lst_goal[0]: total += 1 if total > limit: return total return count_changes(lst_start[1: ], lst_goal[1: ], total) else: return count_changes(lst_start[1: ], lst_goal[1: ], total) return count_changes(list_start, list_goal, 0) # END PROBLEM 6
A diff function for autocorrect that determines how many letters in START need to be substituted to create GOAL, then adds the difference in their lengths.
A diff function for autocorrect that determines how many letters in START need to be substituted to create GOAL, then adds the difference in their lengths.
[ "A", "diff", "function", "for", "autocorrect", "that", "determines", "how", "many", "letters", "in", "START", "need", "to", "be", "substituted", "to", "create", "GOAL", "then", "adds", "the", "difference", "in", "their", "lengths", "." ]
def swap_diff(start, goal, limit): list_start = list(start) list_goal = list(goal) def count_changes(lst_start, lst_goal, total): if lst_start == [] or lst_goal == []: return total + abs(len(lst_start) - len(lst_goal)) else: if lst_start[0] != lst_goal[0]: total += 1 if total > limit: return total return count_changes(lst_start[1: ], lst_goal[1: ], total) else: return count_changes(lst_start[1: ], lst_goal[1: ], total) return count_changes(list_start, list_goal, 0)
[ "def", "swap_diff", "(", "start", ",", "goal", ",", "limit", ")", ":", "list_start", "=", "list", "(", "start", ")", "list_goal", "=", "list", "(", "goal", ")", "def", "count_changes", "(", "lst_start", ",", "lst_goal", ",", "total", ")", ":", "if", "lst_start", "==", "[", "]", "or", "lst_goal", "==", "[", "]", ":", "return", "total", "+", "abs", "(", "len", "(", "lst_start", ")", "-", "len", "(", "lst_goal", ")", ")", "else", ":", "if", "lst_start", "[", "0", "]", "!=", "lst_goal", "[", "0", "]", ":", "total", "+=", "1", "if", "total", ">", "limit", ":", "return", "total", "return", "count_changes", "(", "lst_start", "[", "1", ":", "]", ",", "lst_goal", "[", "1", ":", "]", ",", "total", ")", "else", ":", "return", "count_changes", "(", "lst_start", "[", "1", ":", "]", ",", "lst_goal", "[", "1", ":", "]", ",", "total", ")", "return", "count_changes", "(", "list_start", ",", "list_goal", ",", "0", ")" ]
A diff function for autocorrect that determines how many letters in START need to be substituted to create GOAL, then adds the difference in their lengths.
[ "A", "diff", "function", "for", "autocorrect", "that", "determines", "how", "many", "letters", "in", "START", "need", "to", "be", "substituted", "to", "create", "GOAL", "then", "adds", "the", "difference", "in", "their", "lengths", "." ]
[ "\"\"\"A diff function for autocorrect that determines how many letters\n in START need to be substituted to create GOAL, then adds the difference in\n their lengths.\n \"\"\"", "# BEGIN PROBLEM 6", "# END PROBLEM 6" ]
[ { "param": "start", "type": null }, { "param": "goal", "type": null }, { "param": "limit", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "start", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "goal", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "limit", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def swap_diff(start, goal, limit): list_start = list(start) list_goal = list(goal) def count_changes(lst_start, lst_goal, total): if lst_start == [] or lst_goal == []: return total + abs(len(lst_start) - len(lst_goal)) else: if lst_start[0] != lst_goal[0]: total += 1 if total > limit: return total return count_changes(lst_start[1: ], lst_goal[1: ], total) else: return count_changes(lst_start[1: ], lst_goal[1: ], total) return count_changes(list_start, list_goal, 0)
457
256
b7222c2000fc6bb293bb5d85908971c78e299182
hershman/server
tests/utils.py
[ "Apache-2.0" ]
Python
arrayGenerator
null
def arrayGenerator(generator, length=10): """ Creates a generator that returns an an array of values taken from the supplied generator. """ while True: yield list(itertools.islice(generator, length))
Creates a generator that returns an an array of values taken from the supplied generator.
Creates a generator that returns an an array of values taken from the supplied generator.
[ "Creates", "a", "generator", "that", "returns", "an", "an", "array", "of", "values", "taken", "from", "the", "supplied", "generator", "." ]
def arrayGenerator(generator, length=10): while True: yield list(itertools.islice(generator, length))
[ "def", "arrayGenerator", "(", "generator", ",", "length", "=", "10", ")", ":", "while", "True", ":", "yield", "list", "(", "itertools", ".", "islice", "(", "generator", ",", "length", ")", ")" ]
Creates a generator that returns an an array of values taken from the supplied generator.
[ "Creates", "a", "generator", "that", "returns", "an", "an", "array", "of", "values", "taken", "from", "the", "supplied", "generator", "." ]
[ "\"\"\"\n Creates a generator that returns an an array of values taken from the\n supplied generator.\n \"\"\"" ]
[ { "param": "generator", "type": null }, { "param": "length", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "generator", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "length", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import itertools def arrayGenerator(generator, length=10): while True: yield list(itertools.islice(generator, length))
458
1,020
c6d148f1d4ef22ec42817b1575a3a10608427f79
ManuelLobo/IBEnt
src/other/features/numerical_feature_selection.py
[ "MIT" ]
Python
base_write
null
def base_write(good_features, prop_file): """ writes base.prop file with base features and gets the precision good_features is dictionary with the good features and the value""" #Base file based on IBEnt's file. Still need if its good enough base_features = """trainFile = models/hpo_train.bilou serializeTo = models/hpo_train.ser.gz map = word=0,answer=1 useClassFeature=true useWord=true maxNGramLeng=14 entitySubclassification = SBIEO wordShape=chris2useLC useNeighborNGrams=true useNext=true useWordPairs=true useNextSequences=true normalizeTimex=true useDisjShape=true useWordTag=true """ base_prop = open(prop_file, "w") base_prop.write(base_features) for feature in good_features: base_prop.write(feature + "=" + str(good_features[feature]) + "\n") base_prop.close() os.system("bash src/other/features/features_selection.sh")
writes base.prop file with base features and gets the precision good_features is dictionary with the good features and the value
writes base.prop file with base features and gets the precision good_features is dictionary with the good features and the value
[ "writes", "base", ".", "prop", "file", "with", "base", "features", "and", "gets", "the", "precision", "good_features", "is", "dictionary", "with", "the", "good", "features", "and", "the", "value" ]
def base_write(good_features, prop_file): base_features = """trainFile = models/hpo_train.bilou serializeTo = models/hpo_train.ser.gz map = word=0,answer=1 useClassFeature=true useWord=true maxNGramLeng=14 entitySubclassification = SBIEO wordShape=chris2useLC useNeighborNGrams=true useNext=true useWordPairs=true useNextSequences=true normalizeTimex=true useDisjShape=true useWordTag=true """ base_prop = open(prop_file, "w") base_prop.write(base_features) for feature in good_features: base_prop.write(feature + "=" + str(good_features[feature]) + "\n") base_prop.close() os.system("bash src/other/features/features_selection.sh")
[ "def", "base_write", "(", "good_features", ",", "prop_file", ")", ":", "base_features", "=", "\"\"\"trainFile = models/hpo_train.bilou\r\n\tserializeTo = models/hpo_train.ser.gz\r\n\tmap = word=0,answer=1\r\n\r\n\tuseClassFeature=true\r\n\tuseWord=true\r\n\tmaxNGramLeng=14\r\n\tentitySubclassification = SBIEO\r\n\twordShape=chris2useLC\r\n\tuseNeighborNGrams=true\r\n\tuseNext=true\r\n\tuseWordPairs=true\r\n\tuseNextSequences=true\r\n\tnormalizeTimex=true\r\n\tuseDisjShape=true\r\n\tuseWordTag=true\r\n\t\"\"\"", "base_prop", "=", "open", "(", "prop_file", ",", "\"w\"", ")", "base_prop", ".", "write", "(", "base_features", ")", "for", "feature", "in", "good_features", ":", "base_prop", ".", "write", "(", "feature", "+", "\"=\"", "+", "str", "(", "good_features", "[", "feature", "]", ")", "+", "\"\\n\"", ")", "base_prop", ".", "close", "(", ")", "os", ".", "system", "(", "\"bash src/other/features/features_selection.sh\"", ")" ]
writes base.prop file with base features and gets the precision good_features is dictionary with the good features and the value
[ "writes", "base", ".", "prop", "file", "with", "base", "features", "and", "gets", "the", "precision", "good_features", "is", "dictionary", "with", "the", "good", "features", "and", "the", "value" ]
[ "\"\"\" writes base.prop file with base features and gets the precision\r\n\t\tgood_features is dictionary with the good features and the value\"\"\"", "#Base file based on IBEnt's file. Still need if its good enough\r" ]
[ { "param": "good_features", "type": null }, { "param": "prop_file", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "good_features", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "prop_file", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def base_write(good_features, prop_file): base_features = """trainFile = models/hpo_train.bilou serializeTo = models/hpo_train.ser.gz map = word=0,answer=1 useClassFeature=true useWord=true maxNGramLeng=14 entitySubclassification = SBIEO wordShape=chris2useLC useNeighborNGrams=true useNext=true useWordPairs=true useNextSequences=true normalizeTimex=true useDisjShape=true useWordTag=true """ base_prop = open(prop_file, "w") base_prop.write(base_features) for feature in good_features: base_prop.write(feature + "=" + str(good_features[feature]) + "\n") base_prop.close() os.system("bash src/other/features/features_selection.sh")
459
928
c5c5cf86edae0c90e7ef115d1bec2f664de5e6c1
komiamiko/doodads
transfinite/ordinal.py
[ "MIT" ]
Python
pow_sq
<not_specific>
def pow_sq(x, y): """ Compute x^y, without calling its pow, using exponentiation by squaring. """ r = 1 while y: if y & 1: r = r * x x = x * x y >>= 1 return r
Compute x^y, without calling its pow, using exponentiation by squaring.
Compute x^y, without calling its pow, using exponentiation by squaring.
[ "Compute", "x^y", "without", "calling", "its", "pow", "using", "exponentiation", "by", "squaring", "." ]
def pow_sq(x, y): r = 1 while y: if y & 1: r = r * x x = x * x y >>= 1 return r
[ "def", "pow_sq", "(", "x", ",", "y", ")", ":", "r", "=", "1", "while", "y", ":", "if", "y", "&", "1", ":", "r", "=", "r", "*", "x", "x", "=", "x", "*", "x", "y", ">>=", "1", "return", "r" ]
Compute x^y, without calling its pow, using exponentiation by squaring.
[ "Compute", "x^y", "without", "calling", "its", "pow", "using", "exponentiation", "by", "squaring", "." ]
[ "\"\"\"\n Compute x^y, without calling its pow,\n using exponentiation by squaring.\n \"\"\"" ]
[ { "param": "x", "type": null }, { "param": "y", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "x", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "y", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def pow_sq(x, y): r = 1 while y: if y & 1: r = r * x x = x * x y >>= 1 return r
460
669
8a3cefae8e8a15cdfba4d3a5fa11ab79030a1b82
spfanning/research-projects-RIT
MonteCarloMarginalizeCode/Code/RIFT/misc/modules.py
[ "MIT" ]
Python
transform_s1zs2z_chi_eff_chi_a
<not_specific>
def transform_s1zs2z_chi_eff_chi_a(mass1, mass2, spin1z, spin2z): #Copied from pycbc https://github.com/gwastro/pycbc/blob/master/pycbc/conversions.py """ Returns the aligned mass-weighted spin difference from mass1, mass2, spin1z, and spin2z. """ chi_eff = (spin1z * mass1 + spin2z * mass2) / (mass1 + mass2) chi_a = (spin2z * mass2 - spin1z * mass1) / (mass2 + mass1) return chi_eff,chi_a
Returns the aligned mass-weighted spin difference from mass1, mass2, spin1z, and spin2z.
Returns the aligned mass-weighted spin difference from mass1, mass2, spin1z, and spin2z.
[ "Returns", "the", "aligned", "mass", "-", "weighted", "spin", "difference", "from", "mass1", "mass2", "spin1z", "and", "spin2z", "." ]
def transform_s1zs2z_chi_eff_chi_a(mass1, mass2, spin1z, spin2z): chi_eff = (spin1z * mass1 + spin2z * mass2) / (mass1 + mass2) chi_a = (spin2z * mass2 - spin1z * mass1) / (mass2 + mass1) return chi_eff,chi_a
[ "def", "transform_s1zs2z_chi_eff_chi_a", "(", "mass1", ",", "mass2", ",", "spin1z", ",", "spin2z", ")", ":", "chi_eff", "=", "(", "spin1z", "*", "mass1", "+", "spin2z", "*", "mass2", ")", "/", "(", "mass1", "+", "mass2", ")", "chi_a", "=", "(", "spin2z", "*", "mass2", "-", "spin1z", "*", "mass1", ")", "/", "(", "mass2", "+", "mass1", ")", "return", "chi_eff", ",", "chi_a" ]
Returns the aligned mass-weighted spin difference from mass1, mass2, spin1z, and spin2z.
[ "Returns", "the", "aligned", "mass", "-", "weighted", "spin", "difference", "from", "mass1", "mass2", "spin1z", "and", "spin2z", "." ]
[ "#Copied from pycbc https://github.com/gwastro/pycbc/blob/master/pycbc/conversions.py ", "\"\"\" Returns the aligned mass-weighted spin difference from mass1, mass2, \n spin1z, and spin2z. \n \"\"\"" ]
[ { "param": "mass1", "type": null }, { "param": "mass2", "type": null }, { "param": "spin1z", "type": null }, { "param": "spin2z", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "mass1", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "mass2", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "spin1z", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "spin2z", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def transform_s1zs2z_chi_eff_chi_a(mass1, mass2, spin1z, spin2z): chi_eff = (spin1z * mass1 + spin2z * mass2) / (mass1 + mass2) chi_a = (spin2z * mass2 - spin1z * mass1) / (mass2 + mass1) return chi_eff,chi_a
461
482
362058bd5957656ef0587fc8bc530faa64d20c2a
indrajithbandara/neko
neko/strings.py
[ "MIT" ]
Python
pascal_to_space
<not_specific>
def pascal_to_space(text): """ Takes a string in PascalCaseFormatting and attempts to detect any word boundaries. Spaces are inserted in detected word boundaries and the modified string is returned. """ # We only match if we have a lowercase letter followed by an uppercase one. # We do not account for internal spaces existing. result = re.sub(r'(?<=[a-z])(?=[A-Z])', ' ', text, flags=re.U | re.M) return result.title()
Takes a string in PascalCaseFormatting and attempts to detect any word boundaries. Spaces are inserted in detected word boundaries and the modified string is returned.
Takes a string in PascalCaseFormatting and attempts to detect any word boundaries. Spaces are inserted in detected word boundaries and the modified string is returned.
[ "Takes", "a", "string", "in", "PascalCaseFormatting", "and", "attempts", "to", "detect", "any", "word", "boundaries", ".", "Spaces", "are", "inserted", "in", "detected", "word", "boundaries", "and", "the", "modified", "string", "is", "returned", "." ]
def pascal_to_space(text): result = re.sub(r'(?<=[a-z])(?=[A-Z])', ' ', text, flags=re.U | re.M) return result.title()
[ "def", "pascal_to_space", "(", "text", ")", ":", "result", "=", "re", ".", "sub", "(", "r'(?<=[a-z])(?=[A-Z])'", ",", "' '", ",", "text", ",", "flags", "=", "re", ".", "U", "|", "re", ".", "M", ")", "return", "result", ".", "title", "(", ")" ]
Takes a string in PascalCaseFormatting and attempts to detect any word boundaries.
[ "Takes", "a", "string", "in", "PascalCaseFormatting", "and", "attempts", "to", "detect", "any", "word", "boundaries", "." ]
[ "\"\"\"\n Takes a string in PascalCaseFormatting and attempts to detect\n any word boundaries. Spaces are inserted in detected word boundaries\n and the modified string is returned.\n \"\"\"", "# We only match if we have a lowercase letter followed by an uppercase one.", "# We do not account for internal spaces existing." ]
[ { "param": "text", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "text", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def pascal_to_space(text): result = re.sub(r'(?<=[a-z])(?=[A-Z])', ' ', text, flags=re.U | re.M) return result.title()
463
998
146d0d6b3d77d2b112534aca91c588bebee669cd
python-wells/wells
wells/utils.py
[ "Apache-2.0" ]
Python
timestamp_to_datetime
<not_specific>
def timestamp_to_datetime(ts): """convert a timestamp (seconds since Epoch) to datetime object. Args: ts: int, a timestamp in seconds Return: a naive datetime object in default timezone. """ return datetime.datetime.fromtimestamp(ts)
convert a timestamp (seconds since Epoch) to datetime object. Args: ts: int, a timestamp in seconds Return: a naive datetime object in default timezone.
convert a timestamp (seconds since Epoch) to datetime object.
[ "convert", "a", "timestamp", "(", "seconds", "since", "Epoch", ")", "to", "datetime", "object", "." ]
def timestamp_to_datetime(ts): return datetime.datetime.fromtimestamp(ts)
[ "def", "timestamp_to_datetime", "(", "ts", ")", ":", "return", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "ts", ")" ]
convert a timestamp (seconds since Epoch) to datetime object.
[ "convert", "a", "timestamp", "(", "seconds", "since", "Epoch", ")", "to", "datetime", "object", "." ]
[ "\"\"\"convert a timestamp (seconds since Epoch) to datetime object.\n\n Args:\n ts: int, a timestamp in seconds\n\n Return:\n a naive datetime object in default timezone.\n\n \"\"\"" ]
[ { "param": "ts", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "ts", "type": null, "docstring": "int, a timestamp in seconds", "docstring_tokens": [ "int", "a", "timestamp", "in", "seconds" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import datetime def timestamp_to_datetime(ts): return datetime.datetime.fromtimestamp(ts)
464
925
286720919662905098965d4e5915f77798952128
EyeOfPython/bitcoin-cash-node
test/functional/test_framework/util.py
[ "MIT" ]
Python
sync_mempools
<not_specific>
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True): """ Wait until everybody has the same transactions in their memory pools """ stop_time = time.time() + timeout while time.time() <= stop_time: pool = [set(r.getrawmempool()) for r in rpc_connections] if pool.count(pool[0]) == len(rpc_connections): if flush_scheduler: for r in rpc_connections: r.syncwithvalidationinterfacequeue() return time.sleep(wait) raise AssertionError("Mempool sync timed out:{}".format( "".join("\n {!r}".format(m) for m in pool)))
Wait until everybody has the same transactions in their memory pools
Wait until everybody has the same transactions in their memory pools
[ "Wait", "until", "everybody", "has", "the", "same", "transactions", "in", "their", "memory", "pools" ]
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True): stop_time = time.time() + timeout while time.time() <= stop_time: pool = [set(r.getrawmempool()) for r in rpc_connections] if pool.count(pool[0]) == len(rpc_connections): if flush_scheduler: for r in rpc_connections: r.syncwithvalidationinterfacequeue() return time.sleep(wait) raise AssertionError("Mempool sync timed out:{}".format( "".join("\n {!r}".format(m) for m in pool)))
[ "def", "sync_mempools", "(", "rpc_connections", ",", "*", ",", "wait", "=", "1", ",", "timeout", "=", "60", ",", "flush_scheduler", "=", "True", ")", ":", "stop_time", "=", "time", ".", "time", "(", ")", "+", "timeout", "while", "time", ".", "time", "(", ")", "<=", "stop_time", ":", "pool", "=", "[", "set", "(", "r", ".", "getrawmempool", "(", ")", ")", "for", "r", "in", "rpc_connections", "]", "if", "pool", ".", "count", "(", "pool", "[", "0", "]", ")", "==", "len", "(", "rpc_connections", ")", ":", "if", "flush_scheduler", ":", "for", "r", "in", "rpc_connections", ":", "r", ".", "syncwithvalidationinterfacequeue", "(", ")", "return", "time", ".", "sleep", "(", "wait", ")", "raise", "AssertionError", "(", "\"Mempool sync timed out:{}\"", ".", "format", "(", "\"\"", ".", "join", "(", "\"\\n {!r}\"", ".", "format", "(", "m", ")", "for", "m", "in", "pool", ")", ")", ")" ]
Wait until everybody has the same transactions in their memory pools
[ "Wait", "until", "everybody", "has", "the", "same", "transactions", "in", "their", "memory", "pools" ]
[ "\"\"\"\n Wait until everybody has the same transactions in their memory\n pools\n \"\"\"" ]
[ { "param": "rpc_connections", "type": null }, { "param": "wait", "type": null }, { "param": "timeout", "type": null }, { "param": "flush_scheduler", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "rpc_connections", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "wait", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "timeout", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "flush_scheduler", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import time def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True): stop_time = time.time() + timeout while time.time() <= stop_time: pool = [set(r.getrawmempool()) for r in rpc_connections] if pool.count(pool[0]) == len(rpc_connections): if flush_scheduler: for r in rpc_connections: r.syncwithvalidationinterfacequeue() return time.sleep(wait) raise AssertionError("Mempool sync timed out:{}".format( "".join("\n {!r}".format(m) for m in pool)))
465
831
1048b2743c0ebb9cf22fee8bc04fdef020934c55
romnn/datemore
datemore/time.py
[ "MIT" ]
Python
from_native
"Time"
def from_native(cls, _time: datetime.time) -> "Time": """Creates a wrapped Time instance from a native datetime time""" return cls( _time.hour, _time.minute, _time.second, _time.microsecond, fold=_time.fold )
Creates a wrapped Time instance from a native datetime time
Creates a wrapped Time instance from a native datetime time
[ "Creates", "a", "wrapped", "Time", "instance", "from", "a", "native", "datetime", "time" ]
def from_native(cls, _time: datetime.time) -> "Time": return cls( _time.hour, _time.minute, _time.second, _time.microsecond, fold=_time.fold )
[ "def", "from_native", "(", "cls", ",", "_time", ":", "datetime", ".", "time", ")", "->", "\"Time\"", ":", "return", "cls", "(", "_time", ".", "hour", ",", "_time", ".", "minute", ",", "_time", ".", "second", ",", "_time", ".", "microsecond", ",", "fold", "=", "_time", ".", "fold", ")" ]
Creates a wrapped Time instance from a native datetime time
[ "Creates", "a", "wrapped", "Time", "instance", "from", "a", "native", "datetime", "time" ]
[ "\"\"\"Creates a wrapped Time instance from a native datetime time\"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "_time", "type": "datetime.time" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "_time", "type": "datetime.time", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def from_native(cls, _time: datetime.time) -> "Time": return cls( _time.hour, _time.minute, _time.second, _time.microsecond, fold=_time.fold )
466
274
9ab81cdae21a9b55405828abfb3e8b792dc92d4f
larson-group/clubb_release
utilities/check_for_missing_threadprivate.py
[ "Intel", "Unlicense", "NetCDF" ]
Python
findFiles
<not_specific>
def findFiles(dir): """ Find all files in a directory. """ foundFiles = [] # The returned full files locations and names subdirs = [x[0] for x in os.walk(dir)] # Walk through each directory (including the given) # and find subdirectories recursively for subdir in subdirs: # Go through each subdirectory (including this one) currentFiles = next(os.walk(subdir))[2] # Add the found files to a list if (len(currentFiles) > 0): # If files were found for file in currentFiles: # go through each file foundFiles.append(subdir + file) # Add its location and the filename # to the returned files list return foundFiles
Find all files in a directory.
Find all files in a directory.
[ "Find", "all", "files", "in", "a", "directory", "." ]
def findFiles(dir): foundFiles = [] subdirs = [x[0] for x in os.walk(dir)] for subdir in subdirs: currentFiles = next(os.walk(subdir))[2] if (len(currentFiles) > 0): for file in currentFiles: foundFiles.append(subdir + file) return foundFiles
[ "def", "findFiles", "(", "dir", ")", ":", "foundFiles", "=", "[", "]", "subdirs", "=", "[", "x", "[", "0", "]", "for", "x", "in", "os", ".", "walk", "(", "dir", ")", "]", "for", "subdir", "in", "subdirs", ":", "currentFiles", "=", "next", "(", "os", ".", "walk", "(", "subdir", ")", ")", "[", "2", "]", "if", "(", "len", "(", "currentFiles", ")", ">", "0", ")", ":", "for", "file", "in", "currentFiles", ":", "foundFiles", ".", "append", "(", "subdir", "+", "file", ")", "return", "foundFiles" ]
Find all files in a directory.
[ "Find", "all", "files", "in", "a", "directory", "." ]
[ "\"\"\" Find all files in a directory.\r\n \"\"\"", "# The returned full files locations and names\r", "# Walk through each directory (including the given) \r", "# and find subdirectories recursively\r", "# Go through each subdirectory (including this one)\r", "# Add the found files to a list\r", "# If files were found\r", "# go through each file\r", "# Add its location and the filename \r", "# to the returned files list\r" ]
[ { "param": "dir", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "dir", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def findFiles(dir): foundFiles = [] subdirs = [x[0] for x in os.walk(dir)] for subdir in subdirs: currentFiles = next(os.walk(subdir))[2] if (len(currentFiles) > 0): for file in currentFiles: foundFiles.append(subdir + file) return foundFiles
467
385
f7a1f36a3023cb6a8aa55ca68b7ada79806c776f
ahmedlawi92/IzVerifier
IzVerifier/izspecs/containers/izclasses.py
[ "MIT" ]
Python
is_izpack_class
<not_specific>
def is_izpack_class(classname): """ Determines if this references an izpack built-in class. """ if type(classname) is list: classname = classname[0] return not '.' in classname
Determines if this references an izpack built-in class.
Determines if this references an izpack built-in class.
[ "Determines", "if", "this", "references", "an", "izpack", "built", "-", "in", "class", "." ]
def is_izpack_class(classname): if type(classname) is list: classname = classname[0] return not '.' in classname
[ "def", "is_izpack_class", "(", "classname", ")", ":", "if", "type", "(", "classname", ")", "is", "list", ":", "classname", "=", "classname", "[", "0", "]", "return", "not", "'.'", "in", "classname" ]
Determines if this references an izpack built-in class.
[ "Determines", "if", "this", "references", "an", "izpack", "built", "-", "in", "class", "." ]
[ "\"\"\"\n Determines if this references an izpack built-in class.\n \"\"\"" ]
[ { "param": "classname", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "classname", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def is_izpack_class(classname): if type(classname) is list: classname = classname[0] return not '.' in classname
468
510
7a5a02e514e6f0f9f9221ea58f1cff3392ce2b11
ncsu-las/great_expectations
great_expectations/dataset/util.py
[ "Apache-2.0" ]
Python
create_multiple_expectations
<not_specific>
def create_multiple_expectations(df, columns, expectation_type, *args, **kwargs): """Creates an identical expectation for each of the given columns with the specified arguments, if any. Args: df (great_expectations.dataset): A great expectations dataset object. columns (list): A list of column names represented as strings. expectation_type (string): The expectation type. Raises: KeyError if the provided column does not exist. AttributeError if the provided expectation type does not exist or df is not a valid great expectations dataset. Returns: A list of expectation results. """ expectation = getattr(df, expectation_type) results = list() for column in columns: results.append(expectation(column, *args, **kwargs)) return results
Creates an identical expectation for each of the given columns with the specified arguments, if any. Args: df (great_expectations.dataset): A great expectations dataset object. columns (list): A list of column names represented as strings. expectation_type (string): The expectation type. Raises: KeyError if the provided column does not exist. AttributeError if the provided expectation type does not exist or df is not a valid great expectations dataset. Returns: A list of expectation results.
Creates an identical expectation for each of the given columns with the specified arguments, if any. Args: df (great_expectations.dataset): A great expectations dataset object. columns (list): A list of column names represented as strings. expectation_type (string): The expectation type. KeyError if the provided column does not exist. AttributeError if the provided expectation type does not exist or df is not a valid great expectations dataset. A list of expectation results.
[ "Creates", "an", "identical", "expectation", "for", "each", "of", "the", "given", "columns", "with", "the", "specified", "arguments", "if", "any", ".", "Args", ":", "df", "(", "great_expectations", ".", "dataset", ")", ":", "A", "great", "expectations", "dataset", "object", ".", "columns", "(", "list", ")", ":", "A", "list", "of", "column", "names", "represented", "as", "strings", ".", "expectation_type", "(", "string", ")", ":", "The", "expectation", "type", ".", "KeyError", "if", "the", "provided", "column", "does", "not", "exist", ".", "AttributeError", "if", "the", "provided", "expectation", "type", "does", "not", "exist", "or", "df", "is", "not", "a", "valid", "great", "expectations", "dataset", ".", "A", "list", "of", "expectation", "results", "." ]
def create_multiple_expectations(df, columns, expectation_type, *args, **kwargs): expectation = getattr(df, expectation_type) results = list() for column in columns: results.append(expectation(column, *args, **kwargs)) return results
[ "def", "create_multiple_expectations", "(", "df", ",", "columns", ",", "expectation_type", ",", "*", "args", ",", "**", "kwargs", ")", ":", "expectation", "=", "getattr", "(", "df", ",", "expectation_type", ")", "results", "=", "list", "(", ")", "for", "column", "in", "columns", ":", "results", ".", "append", "(", "expectation", "(", "column", ",", "*", "args", ",", "**", "kwargs", ")", ")", "return", "results" ]
Creates an identical expectation for each of the given columns with the specified arguments, if any.
[ "Creates", "an", "identical", "expectation", "for", "each", "of", "the", "given", "columns", "with", "the", "specified", "arguments", "if", "any", "." ]
[ "\"\"\"Creates an identical expectation for each of the given columns with the specified arguments, if any.\n\n Args:\n df (great_expectations.dataset): A great expectations dataset object.\n columns (list): A list of column names represented as strings.\n expectation_type (string): The expectation type.\n\n Raises:\n KeyError if the provided column does not exist.\n AttributeError if the provided expectation type does not exist or df is not a valid great expectations dataset.\n\n Returns:\n A list of expectation results.\n\n\n \"\"\"" ]
[ { "param": "df", "type": null }, { "param": "columns", "type": null }, { "param": "expectation_type", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "df", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "columns", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "expectation_type", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def create_multiple_expectations(df, columns, expectation_type, *args, **kwargs): expectation = getattr(df, expectation_type) results = list() for column in columns: results.append(expectation(column, *args, **kwargs)) return results
469
920
05a58786f1e4349fe16b415cad6244ae6fd1c7cc
seznam/flexp
flexp/browser/browser.py
[ "BSD-3-Clause" ]
Python
html_anchor_navigation
<not_specific>
def html_anchor_navigation(base_dir, experiment_dir, modules): """Build header of an experiment with links to all modules used for rendering. :param base_dir: parent folder in which to look for an experiment folders :param experiment_dir: experiment folder :param modules: list of all loaded modules :return: str """ return "\n".join(( """<header class="w3-container w3-dark-grey"> <h5><a href='#'>{folder}</a></h5> </header>""".format(folder=experiment_dir), "\n".join(""" <div style='white-space: nowrap;'> <div class=\"show toggle-cookie padding-right\" data-toggle='toggle-{id}-all' data-class-off='no-show'>&nbsp;</div> <a class='' href='#{module_title}'>{module_title}</a> </div>""".format( folder=experiment_dir, module_title=module.title, id=module.id) for module in modules), "<hr />" ))
Build header of an experiment with links to all modules used for rendering. :param base_dir: parent folder in which to look for an experiment folders :param experiment_dir: experiment folder :param modules: list of all loaded modules :return: str
Build header of an experiment with links to all modules used for rendering.
[ "Build", "header", "of", "an", "experiment", "with", "links", "to", "all", "modules", "used", "for", "rendering", "." ]
def html_anchor_navigation(base_dir, experiment_dir, modules): return "\n".join(( """<header class="w3-container w3-dark-grey"> <h5><a href='#'>{folder}</a></h5> </header>""".format(folder=experiment_dir), "\n".join(""" <div style='white-space: nowrap;'> <div class=\"show toggle-cookie padding-right\" data-toggle='toggle-{id}-all' data-class-off='no-show'>&nbsp;</div> <a class='' href='#{module_title}'>{module_title}</a> </div>""".format( folder=experiment_dir, module_title=module.title, id=module.id) for module in modules), "<hr />" ))
[ "def", "html_anchor_navigation", "(", "base_dir", ",", "experiment_dir", ",", "modules", ")", ":", "return", "\"\\n\"", ".", "join", "(", "(", "\"\"\"<header class=\"w3-container w3-dark-grey\">\n <h5><a href='#'>{folder}</a></h5>\n </header>\"\"\"", ".", "format", "(", "folder", "=", "experiment_dir", ")", ",", "\"\\n\"", ".", "join", "(", "\"\"\"\n <div style='white-space: nowrap;'>\n <div class=\\\"show toggle-cookie padding-right\\\" data-toggle='toggle-{id}-all' data-class-off='no-show'>&nbsp;</div>\n <a class='' href='#{module_title}'>{module_title}</a>\n </div>\"\"\"", ".", "format", "(", "folder", "=", "experiment_dir", ",", "module_title", "=", "module", ".", "title", ",", "id", "=", "module", ".", "id", ")", "for", "module", "in", "modules", ")", ",", "\"<hr />\"", ")", ")" ]
Build header of an experiment with links to all modules used for rendering.
[ "Build", "header", "of", "an", "experiment", "with", "links", "to", "all", "modules", "used", "for", "rendering", "." ]
[ "\"\"\"Build header of an experiment with links to all modules used for rendering.\n\n :param base_dir: parent folder in which to look for an experiment folders\n :param experiment_dir: experiment folder\n :param modules: list of all loaded modules\n :return: str\n \"\"\"" ]
[ { "param": "base_dir", "type": null }, { "param": "experiment_dir", "type": null }, { "param": "modules", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "base_dir", "type": null, "docstring": "parent folder in which to look for an experiment folders", "docstring_tokens": [ "parent", "folder", "in", "which", "to", "look", "for", "an", "experiment", "folders" ], "default": null, "is_optional": null }, { "identifier": "experiment_dir", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "modules", "type": null, "docstring": "list of all loaded modules", "docstring_tokens": [ "list", "of", "all", "loaded", "modules" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def html_anchor_navigation(base_dir, experiment_dir, modules): return "\n".join(( """<header class="w3-container w3-dark-grey"> <h5><a href='#'>{folder}</a></h5> </header>""".format(folder=experiment_dir), "\n".join(""" <div style='white-space: nowrap;'> <div class=\"show toggle-cookie padding-right\" data-toggle='toggle-{id}-all' data-class-off='no-show'>&nbsp;</div> <a class='' href='#{module_title}'>{module_title}</a> </div>""".format( folder=experiment_dir, module_title=module.title, id=module.id) for module in modules), "<hr />" ))
471
366
c536d395148f7f4a269d51d31faf866f5c6b11a2
macabuag/OasisPiWind
python_codes/windfield_TEST_SPYDER.py
[ "BSD-2-Clause" ]
Python
buffer_grid
<not_specific>
def buffer_grid(gdf, R): """ Function to buffered the windtrack points according to the Rmax""" buffer_length_in_meters = R * 1000 cpr_gdf= gdf.to_crs(epsg=32621) # projected coordinate system cpr_gdf['geometry'] = cpr_gdf.geometry.buffer(buffer_length_in_meters) return cpr_gdf
Function to buffered the windtrack points according to the Rmax
Function to buffered the windtrack points according to the Rmax
[ "Function", "to", "buffered", "the", "windtrack", "points", "according", "to", "the", "Rmax" ]
def buffer_grid(gdf, R): buffer_length_in_meters = R * 1000 cpr_gdf= gdf.to_crs(epsg=32621) cpr_gdf['geometry'] = cpr_gdf.geometry.buffer(buffer_length_in_meters) return cpr_gdf
[ "def", "buffer_grid", "(", "gdf", ",", "R", ")", ":", "buffer_length_in_meters", "=", "R", "*", "1000", "cpr_gdf", "=", "gdf", ".", "to_crs", "(", "epsg", "=", "32621", ")", "cpr_gdf", "[", "'geometry'", "]", "=", "cpr_gdf", ".", "geometry", ".", "buffer", "(", "buffer_length_in_meters", ")", "return", "cpr_gdf" ]
Function to buffered the windtrack points according to the Rmax
[ "Function", "to", "buffered", "the", "windtrack", "points", "according", "to", "the", "Rmax" ]
[ "\"\"\" Function to buffered the windtrack points according to the Rmax\"\"\"", "# projected coordinate system" ]
[ { "param": "gdf", "type": null }, { "param": "R", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "gdf", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "R", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def buffer_grid(gdf, R): buffer_length_in_meters = R * 1000 cpr_gdf= gdf.to_crs(epsg=32621) cpr_gdf['geometry'] = cpr_gdf.geometry.buffer(buffer_length_in_meters) return cpr_gdf
472
438
5117d5c94e0915e982a3a4ad85e09e44f1da9a7d
facebookresearch/Mephisto
mephisto/abstractions/providers/mturk/mturk_utils.py
[ "MIT" ]
Python
create_hit_config
None
def create_hit_config( opt: Dict[str, Any], task_description: str, unique_worker: bool, is_sandbox: bool ) -> None: """Writes a HIT config to file""" mturk_submit_url = "https://workersandbox.mturk.com/mturk/externalSubmit" if not is_sandbox: mturk_submit_url = "https://www.mturk.com/mturk/externalSubmit" hit_config = { "task_description": task_description, "is_sandbox": is_sandbox, "mturk_submit_url": mturk_submit_url, "unique_worker": unique_worker, "frame_height": opt.get("frame_height", 0), "allow_reviews": opt.get("allow_reviews", False), "block_mobile": opt.get("block_mobile", True), # Populate the chat pane title from chat_title, defaulting to the # hit_title if the task provides no chat_title "chat_title": opt.get("chat_title", opt.get("hit_title", "Live Chat")), "template_type": opt.get("frontend_template_type", "default"), } hit_config_file_path = os.path.join(opt["tmp_dir"], "hit_config.json") if os.path.exists(hit_config_file_path): os.remove(hit_config_file_path) with open(hit_config_file_path, "w") as hit_config_file: hit_config_file.write(json.dumps(hit_config))
Writes a HIT config to file
Writes a HIT config to file
[ "Writes", "a", "HIT", "config", "to", "file" ]
def create_hit_config( opt: Dict[str, Any], task_description: str, unique_worker: bool, is_sandbox: bool ) -> None: mturk_submit_url = "https://workersandbox.mturk.com/mturk/externalSubmit" if not is_sandbox: mturk_submit_url = "https://www.mturk.com/mturk/externalSubmit" hit_config = { "task_description": task_description, "is_sandbox": is_sandbox, "mturk_submit_url": mturk_submit_url, "unique_worker": unique_worker, "frame_height": opt.get("frame_height", 0), "allow_reviews": opt.get("allow_reviews", False), "block_mobile": opt.get("block_mobile", True), "chat_title": opt.get("chat_title", opt.get("hit_title", "Live Chat")), "template_type": opt.get("frontend_template_type", "default"), } hit_config_file_path = os.path.join(opt["tmp_dir"], "hit_config.json") if os.path.exists(hit_config_file_path): os.remove(hit_config_file_path) with open(hit_config_file_path, "w") as hit_config_file: hit_config_file.write(json.dumps(hit_config))
[ "def", "create_hit_config", "(", "opt", ":", "Dict", "[", "str", ",", "Any", "]", ",", "task_description", ":", "str", ",", "unique_worker", ":", "bool", ",", "is_sandbox", ":", "bool", ")", "->", "None", ":", "mturk_submit_url", "=", "\"https://workersandbox.mturk.com/mturk/externalSubmit\"", "if", "not", "is_sandbox", ":", "mturk_submit_url", "=", "\"https://www.mturk.com/mturk/externalSubmit\"", "hit_config", "=", "{", "\"task_description\"", ":", "task_description", ",", "\"is_sandbox\"", ":", "is_sandbox", ",", "\"mturk_submit_url\"", ":", "mturk_submit_url", ",", "\"unique_worker\"", ":", "unique_worker", ",", "\"frame_height\"", ":", "opt", ".", "get", "(", "\"frame_height\"", ",", "0", ")", ",", "\"allow_reviews\"", ":", "opt", ".", "get", "(", "\"allow_reviews\"", ",", "False", ")", ",", "\"block_mobile\"", ":", "opt", ".", "get", "(", "\"block_mobile\"", ",", "True", ")", ",", "\"chat_title\"", ":", "opt", ".", "get", "(", "\"chat_title\"", ",", "opt", ".", "get", "(", "\"hit_title\"", ",", "\"Live Chat\"", ")", ")", ",", "\"template_type\"", ":", "opt", ".", "get", "(", "\"frontend_template_type\"", ",", "\"default\"", ")", ",", "}", "hit_config_file_path", "=", "os", ".", "path", ".", "join", "(", "opt", "[", "\"tmp_dir\"", "]", ",", "\"hit_config.json\"", ")", "if", "os", ".", "path", ".", "exists", "(", "hit_config_file_path", ")", ":", "os", ".", "remove", "(", "hit_config_file_path", ")", "with", "open", "(", "hit_config_file_path", ",", "\"w\"", ")", "as", "hit_config_file", ":", "hit_config_file", ".", "write", "(", "json", ".", "dumps", "(", "hit_config", ")", ")" ]
Writes a HIT config to file
[ "Writes", "a", "HIT", "config", "to", "file" ]
[ "\"\"\"Writes a HIT config to file\"\"\"", "# Populate the chat pane title from chat_title, defaulting to the", "# hit_title if the task provides no chat_title" ]
[ { "param": "opt", "type": "Dict[str, Any]" }, { "param": "task_description", "type": "str" }, { "param": "unique_worker", "type": "bool" }, { "param": "is_sandbox", "type": "bool" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "opt", "type": "Dict[str, Any]", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "task_description", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "unique_worker", "type": "bool", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "is_sandbox", "type": "bool", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os import json def create_hit_config( opt: Dict[str, Any], task_description: str, unique_worker: bool, is_sandbox: bool ) -> None: mturk_submit_url = "https://workersandbox.mturk.com/mturk/externalSubmit" if not is_sandbox: mturk_submit_url = "https://www.mturk.com/mturk/externalSubmit" hit_config = { "task_description": task_description, "is_sandbox": is_sandbox, "mturk_submit_url": mturk_submit_url, "unique_worker": unique_worker, "frame_height": opt.get("frame_height", 0), "allow_reviews": opt.get("allow_reviews", False), "block_mobile": opt.get("block_mobile", True), "chat_title": opt.get("chat_title", opt.get("hit_title", "Live Chat")), "template_type": opt.get("frontend_template_type", "default"), } hit_config_file_path = os.path.join(opt["tmp_dir"], "hit_config.json") if os.path.exists(hit_config_file_path): os.remove(hit_config_file_path) with open(hit_config_file_path, "w") as hit_config_file: hit_config_file.write(json.dumps(hit_config))
473
16
8bc0fb46ef57590f8f829484cc331966e5bd8def
sunghunbae/mixedsets
mixedsets.py
[ "MIT" ]
Python
min_peak_distance
<not_specific>
def min_peak_distance (peaks,index,overlap) : # number of indices should be 2 or more if len(index) == 1 : return None # put a molecular index to each peak val=[] mol=[] for i in index : val.extend( peaks[i] ) mol.extend( [i]*len( peaks[i]) ) sortedIndex= sorted(range(len(val)), key=lambda x: val[x]) val= sorted(val) mol= [ mol[x] for x in sortedIndex ] """ remove overlapped peaks For a sorted peak list, val, measure distance, d, between neighboring peaks. If d is larger than overlap keep this distance otherwise replace peaks' molecular index with -1 """ prev= None for k, v in enumerate(val) : if prev is not None: d = v - prev if d < overlap : mol[k] = -1 mol[k-1] = -1 prev = v # At least one peak must be present in the final set # so that it represents the molecule in the spectrum for i in index: if i not in mol: return None # return minimum peak distance mpd= [] prev= None for k, v in enumerate(val) : if mol[k] != -1 : if prev is not None : d = v - prev mpd.append (d) prev = v return min(mpd)
remove overlapped peaks For a sorted peak list, val, measure distance, d, between neighboring peaks. If d is larger than overlap keep this distance otherwise replace peaks' molecular index with -1
remove overlapped peaks For a sorted peak list, val, measure distance, d, between neighboring peaks. If d is larger than overlap keep this distance otherwise replace peaks' molecular index with -1
[ "remove", "overlapped", "peaks", "For", "a", "sorted", "peak", "list", "val", "measure", "distance", "d", "between", "neighboring", "peaks", ".", "If", "d", "is", "larger", "than", "overlap", "keep", "this", "distance", "otherwise", "replace", "peaks", "'", "molecular", "index", "with", "-", "1" ]
def min_peak_distance (peaks,index,overlap) : if len(index) == 1 : return None val=[] mol=[] for i in index : val.extend( peaks[i] ) mol.extend( [i]*len( peaks[i]) ) sortedIndex= sorted(range(len(val)), key=lambda x: val[x]) val= sorted(val) mol= [ mol[x] for x in sortedIndex ] prev= None for k, v in enumerate(val) : if prev is not None: d = v - prev if d < overlap : mol[k] = -1 mol[k-1] = -1 prev = v for i in index: if i not in mol: return None mpd= [] prev= None for k, v in enumerate(val) : if mol[k] != -1 : if prev is not None : d = v - prev mpd.append (d) prev = v return min(mpd)
[ "def", "min_peak_distance", "(", "peaks", ",", "index", ",", "overlap", ")", ":", "if", "len", "(", "index", ")", "==", "1", ":", "return", "None", "val", "=", "[", "]", "mol", "=", "[", "]", "for", "i", "in", "index", ":", "val", ".", "extend", "(", "peaks", "[", "i", "]", ")", "mol", ".", "extend", "(", "[", "i", "]", "*", "len", "(", "peaks", "[", "i", "]", ")", ")", "sortedIndex", "=", "sorted", "(", "range", "(", "len", "(", "val", ")", ")", ",", "key", "=", "lambda", "x", ":", "val", "[", "x", "]", ")", "val", "=", "sorted", "(", "val", ")", "mol", "=", "[", "mol", "[", "x", "]", "for", "x", "in", "sortedIndex", "]", "prev", "=", "None", "for", "k", ",", "v", "in", "enumerate", "(", "val", ")", ":", "if", "prev", "is", "not", "None", ":", "d", "=", "v", "-", "prev", "if", "d", "<", "overlap", ":", "mol", "[", "k", "]", "=", "-", "1", "mol", "[", "k", "-", "1", "]", "=", "-", "1", "prev", "=", "v", "for", "i", "in", "index", ":", "if", "i", "not", "in", "mol", ":", "return", "None", "mpd", "=", "[", "]", "prev", "=", "None", "for", "k", ",", "v", "in", "enumerate", "(", "val", ")", ":", "if", "mol", "[", "k", "]", "!=", "-", "1", ":", "if", "prev", "is", "not", "None", ":", "d", "=", "v", "-", "prev", "mpd", ".", "append", "(", "d", ")", "prev", "=", "v", "return", "min", "(", "mpd", ")" ]
remove overlapped peaks For a sorted peak list, val, measure distance, d, between neighboring peaks.
[ "remove", "overlapped", "peaks", "For", "a", "sorted", "peak", "list", "val", "measure", "distance", "d", "between", "neighboring", "peaks", "." ]
[ "# number of indices should be 2 or more", "# put a molecular index to each peak", "\"\"\" \n\tremove overlapped peaks\n\tFor a sorted peak list, val, measure distance, d, \n\tbetween neighboring peaks. If d is larger than \n\toverlap keep this distance otherwise replace \n\tpeaks' molecular index with -1\n\t\"\"\"", "# At least one peak must be present in the final set", "# so that it represents the molecule in the spectrum", "# return minimum peak distance" ]
[ { "param": "peaks", "type": null }, { "param": "index", "type": null }, { "param": "overlap", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "peaks", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "index", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "overlap", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def min_peak_distance (peaks,index,overlap) : if len(index) == 1 : return None val=[] mol=[] for i in index : val.extend( peaks[i] ) mol.extend( [i]*len( peaks[i]) ) sortedIndex= sorted(range(len(val)), key=lambda x: val[x]) val= sorted(val) mol= [ mol[x] for x in sortedIndex ] prev= None for k, v in enumerate(val) : if prev is not None: d = v - prev if d < overlap : mol[k] = -1 mol[k-1] = -1 prev = v for i in index: if i not in mol: return None mpd= [] prev= None for k, v in enumerate(val) : if mol[k] != -1 : if prev is not None : d = v - prev mpd.append (d) prev = v return min(mpd)
474
249
40d907e8cd5619ba613f70d8860528738838938b
BjornChrisnach/Coursera_Python_concise_intro
Exercises4.py
[ "MIT" ]
Python
make_same_random
<not_specific>
def make_same_random(): """ Make a list of 10 random integers that are the same each time """ numlis = [] random.seed(17) # set the seed from which random numbers are made for i in range(0,10): numlis.append(random.randint(1,100)) return numlis
Make a list of 10 random integers that are the same each time
Make a list of 10 random integers that are the same each time
[ "Make", "a", "list", "of", "10", "random", "integers", "that", "are", "the", "same", "each", "time" ]
def make_same_random(): numlis = [] random.seed(17) for i in range(0,10): numlis.append(random.randint(1,100)) return numlis
[ "def", "make_same_random", "(", ")", ":", "numlis", "=", "[", "]", "random", ".", "seed", "(", "17", ")", "for", "i", "in", "range", "(", "0", ",", "10", ")", ":", "numlis", ".", "append", "(", "random", ".", "randint", "(", "1", ",", "100", ")", ")", "return", "numlis" ]
Make a list of 10 random integers that are the same each time
[ "Make", "a", "list", "of", "10", "random", "integers", "that", "are", "the", "same", "each", "time" ]
[ "\"\"\" Make a list of 10 random integers that are the same each time \"\"\"", "# set the seed from which random numbers are made" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
import random def make_same_random(): numlis = [] random.seed(17) for i in range(0,10): numlis.append(random.randint(1,100)) return numlis
475
728
670da5101ae8205e3ed9c8d5145d066e4c20673a
ntezak/plotille
plotille/_util.py
[ "MIT" ]
Python
roundeven
<not_specific>
def roundeven(x): """Round to next even integer number in case of `X.5` In Python3 this is the same as `round(x, 0)`, but since Python2 rounds up in that case and I want consistent behaviour, here is the roundeven function. Parameters: x: float The number to round. Returns: int: floor(x) if x - floor(x) < 0.5 ceil(x) if x - floor(x) > 0.5 next even of x if x - floor(x) == 0.5 """ if math.isinf(x) or math.isnan(x): return x # same behaviour as in python2 x_r = round(x) if abs(x_r - x) == 0.5: return int(2.0 * round(x / 2)) return x_r
Round to next even integer number in case of `X.5` In Python3 this is the same as `round(x, 0)`, but since Python2 rounds up in that case and I want consistent behaviour, here is the roundeven function. Parameters: x: float The number to round. Returns: int: floor(x) if x - floor(x) < 0.5 ceil(x) if x - floor(x) > 0.5 next even of x if x - floor(x) == 0.5
Round to next even integer number in case of `X.5` In Python3 this is the same as `round(x, 0)`, but since Python2 rounds up in that case and I want consistent behaviour, here is the roundeven function.
[ "Round", "to", "next", "even", "integer", "number", "in", "case", "of", "`", "X", ".", "5", "`", "In", "Python3", "this", "is", "the", "same", "as", "`", "round", "(", "x", "0", ")", "`", "but", "since", "Python2", "rounds", "up", "in", "that", "case", "and", "I", "want", "consistent", "behaviour", "here", "is", "the", "roundeven", "function", "." ]
def roundeven(x): if math.isinf(x) or math.isnan(x): return x x_r = round(x) if abs(x_r - x) == 0.5: return int(2.0 * round(x / 2)) return x_r
[ "def", "roundeven", "(", "x", ")", ":", "if", "math", ".", "isinf", "(", "x", ")", "or", "math", ".", "isnan", "(", "x", ")", ":", "return", "x", "x_r", "=", "round", "(", "x", ")", "if", "abs", "(", "x_r", "-", "x", ")", "==", "0.5", ":", "return", "int", "(", "2.0", "*", "round", "(", "x", "/", "2", ")", ")", "return", "x_r" ]
Round to next even integer number in case of `X.5` In Python3 this is the same as `round(x, 0)`, but since Python2 rounds up in that case and I want consistent behaviour, here is the roundeven function.
[ "Round", "to", "next", "even", "integer", "number", "in", "case", "of", "`", "X", ".", "5", "`", "In", "Python3", "this", "is", "the", "same", "as", "`", "round", "(", "x", "0", ")", "`", "but", "since", "Python2", "rounds", "up", "in", "that", "case", "and", "I", "want", "consistent", "behaviour", "here", "is", "the", "roundeven", "function", "." ]
[ "\"\"\"Round to next even integer number in case of `X.5`\n\n In Python3 this is the same as `round(x, 0)`, but since Python2 rounds up\n in that case and I want consistent behaviour, here is the roundeven function.\n\n Parameters:\n x: float The number to round.\n\n Returns:\n int: floor(x) if x - floor(x) < 0.5\n ceil(x) if x - floor(x) > 0.5\n next even of x if x - floor(x) == 0.5\n \"\"\"", "# same behaviour as in python2" ]
[ { "param": "x", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": "int" } ], "raises": [], "params": [ { "identifier": "x", "type": null, "docstring": "float The number to round.", "docstring_tokens": [ "float", "The", "number", "to", "round", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import math def roundeven(x): if math.isinf(x) or math.isnan(x): return x x_r = round(x) if abs(x_r - x) == 0.5: return int(2.0 * round(x / 2)) return x_r
476
868
98651fc248a52c06302df8944670d947917092b8
KejiZhaoLab/cLoops2
cLoops2/callPeaks.py
[ "BSD-3-Clause" ]
Python
sortPeaksByLength
<not_specific>
def sortPeaksByLength(peaks): """ Sort peaks from longer to shoter. """ #sort peaks according to size, from small to big peaks = sorted( peaks, key=lambda peak: peak.length ) return peaks
Sort peaks from longer to shoter.
Sort peaks from longer to shoter.
[ "Sort", "peaks", "from", "longer", "to", "shoter", "." ]
def sortPeaksByLength(peaks): peaks = sorted( peaks, key=lambda peak: peak.length ) return peaks
[ "def", "sortPeaksByLength", "(", "peaks", ")", ":", "peaks", "=", "sorted", "(", "peaks", ",", "key", "=", "lambda", "peak", ":", "peak", ".", "length", ")", "return", "peaks" ]
Sort peaks from longer to shoter.
[ "Sort", "peaks", "from", "longer", "to", "shoter", "." ]
[ "\"\"\"\n Sort peaks from longer to shoter.\n \"\"\"", "#sort peaks according to size, from small to big" ]
[ { "param": "peaks", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "peaks", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def sortPeaksByLength(peaks): peaks = sorted( peaks, key=lambda peak: peak.length ) return peaks
477
863
78155a9c05cf01577406ed619f2e48891a2346c9
eea/volto-measurescatalog
discodata/importer.py
[ "MIT" ]
Python
nice_print
null
def nice_print(data): """ Nice print of json data, useful for debugging """ try: print(data, indent=4, sort_keys=True) except TypeError: print(json.dumps(data, indent=4, sort_keys=True))
Nice print of json data, useful for debugging
Nice print of json data, useful for debugging
[ "Nice", "print", "of", "json", "data", "useful", "for", "debugging" ]
def nice_print(data): try: print(data, indent=4, sort_keys=True) except TypeError: print(json.dumps(data, indent=4, sort_keys=True))
[ "def", "nice_print", "(", "data", ")", ":", "try", ":", "print", "(", "data", ",", "indent", "=", "4", ",", "sort_keys", "=", "True", ")", "except", "TypeError", ":", "print", "(", "json", ".", "dumps", "(", "data", ",", "indent", "=", "4", ",", "sort_keys", "=", "True", ")", ")" ]
Nice print of json data, useful for debugging
[ "Nice", "print", "of", "json", "data", "useful", "for", "debugging" ]
[ "\"\"\" Nice print of json data, useful for debugging\n \"\"\"" ]
[ { "param": "data", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "data", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import json def nice_print(data): try: print(data, indent=4, sort_keys=True) except TypeError: print(json.dumps(data, indent=4, sort_keys=True))
478
121
6c1baa1fe0382cc37b466f68a4aa43f89bdf2379
feichtip/selanneal
src/selanneal/optimise.py
[ "MIT" ]
Python
pd_selection
<not_specific>
def pd_selection(axes, best_state, precision=4, roundDownUp=False): """ precision [int]: floating point precision to which the bounds are rounded roundDownUp [bool]: round lower bound down and upper bound up """ selection = [] for axis, state in zip(axes, best_state): if axis.value(state[1]) is None: lb = axis.value(state[0]) ub = axis.value(state[1] - 1) if roundDownUp: lb = math.floor(lb * 10**precision) / 10**precision ub = math.ceil(ub * 10**precision) / 10**precision selection.append(f'{lb:.{precision}f} <= {axis.metadata} <= {ub:.{precision}f}') else: lb = axis.value(state[0]) ub = axis.value(state[1]) if roundDownUp: lb = math.floor(lb * 10**precision) / 10**precision ub = math.ceil(ub * 10**precision) / 10**precision selection.append(f'{lb:.{precision}f} <= {axis.metadata} < {ub:.{precision}f}') return selection
precision [int]: floating point precision to which the bounds are rounded roundDownUp [bool]: round lower bound down and upper bound up
precision [int]: floating point precision to which the bounds are rounded roundDownUp [bool]: round lower bound down and upper bound up
[ "precision", "[", "int", "]", ":", "floating", "point", "precision", "to", "which", "the", "bounds", "are", "rounded", "roundDownUp", "[", "bool", "]", ":", "round", "lower", "bound", "down", "and", "upper", "bound", "up" ]
def pd_selection(axes, best_state, precision=4, roundDownUp=False): selection = [] for axis, state in zip(axes, best_state): if axis.value(state[1]) is None: lb = axis.value(state[0]) ub = axis.value(state[1] - 1) if roundDownUp: lb = math.floor(lb * 10**precision) / 10**precision ub = math.ceil(ub * 10**precision) / 10**precision selection.append(f'{lb:.{precision}f} <= {axis.metadata} <= {ub:.{precision}f}') else: lb = axis.value(state[0]) ub = axis.value(state[1]) if roundDownUp: lb = math.floor(lb * 10**precision) / 10**precision ub = math.ceil(ub * 10**precision) / 10**precision selection.append(f'{lb:.{precision}f} <= {axis.metadata} < {ub:.{precision}f}') return selection
[ "def", "pd_selection", "(", "axes", ",", "best_state", ",", "precision", "=", "4", ",", "roundDownUp", "=", "False", ")", ":", "selection", "=", "[", "]", "for", "axis", ",", "state", "in", "zip", "(", "axes", ",", "best_state", ")", ":", "if", "axis", ".", "value", "(", "state", "[", "1", "]", ")", "is", "None", ":", "lb", "=", "axis", ".", "value", "(", "state", "[", "0", "]", ")", "ub", "=", "axis", ".", "value", "(", "state", "[", "1", "]", "-", "1", ")", "if", "roundDownUp", ":", "lb", "=", "math", ".", "floor", "(", "lb", "*", "10", "**", "precision", ")", "/", "10", "**", "precision", "ub", "=", "math", ".", "ceil", "(", "ub", "*", "10", "**", "precision", ")", "/", "10", "**", "precision", "selection", ".", "append", "(", "f'{lb:.{precision}f} <= {axis.metadata} <= {ub:.{precision}f}'", ")", "else", ":", "lb", "=", "axis", ".", "value", "(", "state", "[", "0", "]", ")", "ub", "=", "axis", ".", "value", "(", "state", "[", "1", "]", ")", "if", "roundDownUp", ":", "lb", "=", "math", ".", "floor", "(", "lb", "*", "10", "**", "precision", ")", "/", "10", "**", "precision", "ub", "=", "math", ".", "ceil", "(", "ub", "*", "10", "**", "precision", ")", "/", "10", "**", "precision", "selection", ".", "append", "(", "f'{lb:.{precision}f} <= {axis.metadata} < {ub:.{precision}f}'", ")", "return", "selection" ]
precision [int]: floating point precision to which the bounds are rounded roundDownUp [bool]: round lower bound down and upper bound up
[ "precision", "[", "int", "]", ":", "floating", "point", "precision", "to", "which", "the", "bounds", "are", "rounded", "roundDownUp", "[", "bool", "]", ":", "round", "lower", "bound", "down", "and", "upper", "bound", "up" ]
[ "\"\"\"\n precision [int]: floating point precision to which the bounds are rounded\n roundDownUp [bool]: round lower bound down and upper bound up\n \"\"\"" ]
[ { "param": "axes", "type": null }, { "param": "best_state", "type": null }, { "param": "precision", "type": null }, { "param": "roundDownUp", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "axes", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "best_state", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "precision", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "roundDownUp", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import math def pd_selection(axes, best_state, precision=4, roundDownUp=False): selection = [] for axis, state in zip(axes, best_state): if axis.value(state[1]) is None: lb = axis.value(state[0]) ub = axis.value(state[1] - 1) if roundDownUp: lb = math.floor(lb * 10**precision) / 10**precision ub = math.ceil(ub * 10**precision) / 10**precision selection.append(f'{lb:.{precision}f} <= {axis.metadata} <= {ub:.{precision}f}') else: lb = axis.value(state[0]) ub = axis.value(state[1]) if roundDownUp: lb = math.floor(lb * 10**precision) / 10**precision ub = math.ceil(ub * 10**precision) / 10**precision selection.append(f'{lb:.{precision}f} <= {axis.metadata} < {ub:.{precision}f}') return selection
479
468
8049ef8e693a61bf53d749980cc52c0be475b468
bansallab/roundup
_210_past.py
[ "MIT" ]
Python
is_sale
<not_specific>
def is_sale(this_line): """Determine whether a given line describes a sale of cattle.""" is_not_succinct = len(re.split(r'\s{3,}', this_line)) > 3 has_price = re.search(r'[0-9]+\.[0-9]{2}', this_line) return bool(has_price and is_not_succinct)
Determine whether a given line describes a sale of cattle.
Determine whether a given line describes a sale of cattle.
[ "Determine", "whether", "a", "given", "line", "describes", "a", "sale", "of", "cattle", "." ]
def is_sale(this_line): is_not_succinct = len(re.split(r'\s{3,}', this_line)) > 3 has_price = re.search(r'[0-9]+\.[0-9]{2}', this_line) return bool(has_price and is_not_succinct)
[ "def", "is_sale", "(", "this_line", ")", ":", "is_not_succinct", "=", "len", "(", "re", ".", "split", "(", "r'\\s{3,}'", ",", "this_line", ")", ")", ">", "3", "has_price", "=", "re", ".", "search", "(", "r'[0-9]+\\.[0-9]{2}'", ",", "this_line", ")", "return", "bool", "(", "has_price", "and", "is_not_succinct", ")" ]
Determine whether a given line describes a sale of cattle.
[ "Determine", "whether", "a", "given", "line", "describes", "a", "sale", "of", "cattle", "." ]
[ "\"\"\"Determine whether a given line describes a sale of cattle.\"\"\"" ]
[ { "param": "this_line", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "this_line", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def is_sale(this_line): is_not_succinct = len(re.split(r'\s{3,}', this_line)) > 3 has_price = re.search(r'[0-9]+\.[0-9]{2}', this_line) return bool(has_price and is_not_succinct)
480
699
c68e63e93e1dae172ea1dc4d0eae3629812addf4
Mati86/sonic-mgmt
tests/common/dualtor/tunnel_traffic_utils.py
[ "Apache-2.0" ]
Python
_find_ipv4_lo_addr
<not_specific>
def _find_ipv4_lo_addr(config_facts): """Find the ipv4 Loopback0 address.""" for addr in config_facts["LOOPBACK_INTERFACE"]["Loopback0"]: if isinstance(ipaddress.ip_network(addr), ipaddress.IPv4Network): return addr.split("/")[0]
Find the ipv4 Loopback0 address.
Find the ipv4 Loopback0 address.
[ "Find", "the", "ipv4", "Loopback0", "address", "." ]
def _find_ipv4_lo_addr(config_facts): for addr in config_facts["LOOPBACK_INTERFACE"]["Loopback0"]: if isinstance(ipaddress.ip_network(addr), ipaddress.IPv4Network): return addr.split("/")[0]
[ "def", "_find_ipv4_lo_addr", "(", "config_facts", ")", ":", "for", "addr", "in", "config_facts", "[", "\"LOOPBACK_INTERFACE\"", "]", "[", "\"Loopback0\"", "]", ":", "if", "isinstance", "(", "ipaddress", ".", "ip_network", "(", "addr", ")", ",", "ipaddress", ".", "IPv4Network", ")", ":", "return", "addr", ".", "split", "(", "\"/\"", ")", "[", "0", "]" ]
Find the ipv4 Loopback0 address.
[ "Find", "the", "ipv4", "Loopback0", "address", "." ]
[ "\"\"\"Find the ipv4 Loopback0 address.\"\"\"" ]
[ { "param": "config_facts", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "config_facts", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import ipaddress def _find_ipv4_lo_addr(config_facts): for addr in config_facts["LOOPBACK_INTERFACE"]["Loopback0"]: if isinstance(ipaddress.ip_network(addr), ipaddress.IPv4Network): return addr.split("/")[0]
481
853
b228a6e0ad93037ee605697cba50fd710057b2ec
samuelebortolotti/wikidump
utils/metrics_uploader/wikibreak_metrics.py
[ "MIT" ]
Python
upload_metrics
null
def upload_metrics(connection: connection, cursor: cursor, metrics: list[Metric]): """ Insert the metrics straight into the database Args: cursor ([type]): database cursor metrics (list[Metric]): list of metrics """ for metric in metrics: if not metric.wikibreak_category2: cursor.execute("INSERT INTO templates (name, year, month, category, wikibreak_category1, wikibreak_subcategory, amount, cumulative_amount) VALUES('{}', {}, {}, '{}', '{}', '{}', {}, {})".format(metric.name, metric.year, metric.month, metric.category, metric.wikibreak_category1, metric.wikibreak_subcategory, metric.amount, metric.cumulative_amount)) else: cursor.execute("INSERT INTO templates (name, year, month, category, wikibreak_category1, wikibreak_category2, wikibreak_subcategory, amount, cumulative_amount) VALUES('{}', {}, {}, '{}', '{}', '{}', '{}', {}, {})".format(metric.name, metric.year, metric.month, metric.category, metric.wikibreak_category1, metric.wikibreak_category2, metric.wikibreak_subcategory, metric.amount, metric.cumulative_amount)) connection.commit() print("Metrics updated successfully")
Insert the metrics straight into the database Args: cursor ([type]): database cursor metrics (list[Metric]): list of metrics
Insert the metrics straight into the database
[ "Insert", "the", "metrics", "straight", "into", "the", "database" ]
def upload_metrics(connection: connection, cursor: cursor, metrics: list[Metric]): for metric in metrics: if not metric.wikibreak_category2: cursor.execute("INSERT INTO templates (name, year, month, category, wikibreak_category1, wikibreak_subcategory, amount, cumulative_amount) VALUES('{}', {}, {}, '{}', '{}', '{}', {}, {})".format(metric.name, metric.year, metric.month, metric.category, metric.wikibreak_category1, metric.wikibreak_subcategory, metric.amount, metric.cumulative_amount)) else: cursor.execute("INSERT INTO templates (name, year, month, category, wikibreak_category1, wikibreak_category2, wikibreak_subcategory, amount, cumulative_amount) VALUES('{}', {}, {}, '{}', '{}', '{}', '{}', {}, {})".format(metric.name, metric.year, metric.month, metric.category, metric.wikibreak_category1, metric.wikibreak_category2, metric.wikibreak_subcategory, metric.amount, metric.cumulative_amount)) connection.commit() print("Metrics updated successfully")
[ "def", "upload_metrics", "(", "connection", ":", "connection", ",", "cursor", ":", "cursor", ",", "metrics", ":", "list", "[", "Metric", "]", ")", ":", "for", "metric", "in", "metrics", ":", "if", "not", "metric", ".", "wikibreak_category2", ":", "cursor", ".", "execute", "(", "\"INSERT INTO templates (name, year, month, category, wikibreak_category1, wikibreak_subcategory, amount, cumulative_amount) VALUES('{}', {}, {}, '{}', '{}', '{}', {}, {})\"", ".", "format", "(", "metric", ".", "name", ",", "metric", ".", "year", ",", "metric", ".", "month", ",", "metric", ".", "category", ",", "metric", ".", "wikibreak_category1", ",", "metric", ".", "wikibreak_subcategory", ",", "metric", ".", "amount", ",", "metric", ".", "cumulative_amount", ")", ")", "else", ":", "cursor", ".", "execute", "(", "\"INSERT INTO templates (name, year, month, category, wikibreak_category1, wikibreak_category2, wikibreak_subcategory, amount, cumulative_amount) VALUES('{}', {}, {}, '{}', '{}', '{}', '{}', {}, {})\"", ".", "format", "(", "metric", ".", "name", ",", "metric", ".", "year", ",", "metric", ".", "month", ",", "metric", ".", "category", ",", "metric", ".", "wikibreak_category1", ",", "metric", ".", "wikibreak_category2", ",", "metric", ".", "wikibreak_subcategory", ",", "metric", ".", "amount", ",", "metric", ".", "cumulative_amount", ")", ")", "connection", ".", "commit", "(", ")", "print", "(", "\"Metrics updated successfully\"", ")" ]
Insert the metrics straight into the database
[ "Insert", "the", "metrics", "straight", "into", "the", "database" ]
[ "\"\"\"\n Insert the metrics straight into the database\n\n Args:\n cursor ([type]): database cursor\n metrics (list[Metric]): list of metrics\n \"\"\"" ]
[ { "param": "connection", "type": "connection" }, { "param": "cursor", "type": "cursor" }, { "param": "metrics", "type": "list[Metric]" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "connection", "type": "connection", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "cursor", "type": "cursor", "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false }, { "identifier": "metrics", "type": "list[Metric]", "docstring": "list of metrics", "docstring_tokens": [ "list", "of", "metrics" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def upload_metrics(connection: connection, cursor: cursor, metrics: list[Metric]): for metric in metrics: if not metric.wikibreak_category2: cursor.execute("INSERT INTO templates (name, year, month, category, wikibreak_category1, wikibreak_subcategory, amount, cumulative_amount) VALUES('{}', {}, {}, '{}', '{}', '{}', {}, {})".format(metric.name, metric.year, metric.month, metric.category, metric.wikibreak_category1, metric.wikibreak_subcategory, metric.amount, metric.cumulative_amount)) else: cursor.execute("INSERT INTO templates (name, year, month, category, wikibreak_category1, wikibreak_category2, wikibreak_subcategory, amount, cumulative_amount) VALUES('{}', {}, {}, '{}', '{}', '{}', '{}', {}, {})".format(metric.name, metric.year, metric.month, metric.category, metric.wikibreak_category1, metric.wikibreak_category2, metric.wikibreak_subcategory, metric.amount, metric.cumulative_amount)) connection.commit() print("Metrics updated successfully")
482
250
0bba2e1db0e0b23acaddf6766de21544fbc06b94
karttur/geoimagine03-grass
grid/grid.py
[ "BSD-3-Clause" ]
Python
select
null
def select(parms, ptype): """Select only a certain type of parameters. :param parms: a DictType parameter with inputs or outputs of a Module class :type parms: DictType parameters :param ptype: String define the type of parameter that we want to select, valid ptype are: 'raster', 'vector', 'group' :type ptype: str :returns: An iterator with the value of the parameter. >>> slp = Module('r.slope.aspect', ... elevation='ele', slope='slp', aspect='asp', ... run_=False) >>> for rast in select(slp.outputs, 'raster'): ... print(rast) ... slp asp """ for k in parms: par = parms[k] if par.type == ptype or par.typedesc == ptype and par.value: if par.multiple: for val in par.value: yield val else: yield par.value
Select only a certain type of parameters. :param parms: a DictType parameter with inputs or outputs of a Module class :type parms: DictType parameters :param ptype: String define the type of parameter that we want to select, valid ptype are: 'raster', 'vector', 'group' :type ptype: str :returns: An iterator with the value of the parameter. >>> slp = Module('r.slope.aspect', ... elevation='ele', slope='slp', aspect='asp', ... run_=False) >>> for rast in select(slp.outputs, 'raster'): ... print(rast) ... slp asp
Select only a certain type of parameters.
[ "Select", "only", "a", "certain", "type", "of", "parameters", "." ]
def select(parms, ptype): for k in parms: par = parms[k] if par.type == ptype or par.typedesc == ptype and par.value: if par.multiple: for val in par.value: yield val else: yield par.value
[ "def", "select", "(", "parms", ",", "ptype", ")", ":", "for", "k", "in", "parms", ":", "par", "=", "parms", "[", "k", "]", "if", "par", ".", "type", "==", "ptype", "or", "par", ".", "typedesc", "==", "ptype", "and", "par", ".", "value", ":", "if", "par", ".", "multiple", ":", "for", "val", "in", "par", ".", "value", ":", "yield", "val", "else", ":", "yield", "par", ".", "value" ]
Select only a certain type of parameters.
[ "Select", "only", "a", "certain", "type", "of", "parameters", "." ]
[ "\"\"\"Select only a certain type of parameters.\n\n :param parms: a DictType parameter with inputs or outputs of a Module class\n :type parms: DictType parameters\n :param ptype: String define the type of parameter that we want to select,\n valid ptype are: 'raster', 'vector', 'group'\n :type ptype: str\n :returns: An iterator with the value of the parameter.\n\n >>> slp = Module('r.slope.aspect',\n ... elevation='ele', slope='slp', aspect='asp',\n ... run_=False)\n >>> for rast in select(slp.outputs, 'raster'):\n ... print(rast)\n ...\n slp\n asp\n \"\"\"" ]
[ { "param": "parms", "type": null }, { "param": "ptype", "type": null } ]
{ "returns": [ { "docstring": "An iterator with the value of the parameter.\n>>> slp = Module('r.slope.aspect',\nelevation='ele', slope='slp', aspect='asp',\nrun_=False)\n>>> for rast in select(slp.outputs, 'raster'):\nprint(rast)\n\nslp\nasp", "docstring_tokens": [ "An", "iterator", "with", "the", "value", "of", "the", "parameter", ".", ">>>", "slp", "=", "Module", "(", "'", "r", ".", "slope", ".", "aspect", "'", "elevation", "=", "'", "ele", "'", "slope", "=", "'", "slp", "'", "aspect", "=", "'", "asp", "'", "run_", "=", "False", ")", ">>>", "for", "rast", "in", "select", "(", "slp", ".", "outputs", "'", "raster", "'", ")", ":", "print", "(", "rast", ")", "slp", "asp" ], "type": null } ], "raises": [], "params": [ { "identifier": "parms", "type": null, "docstring": "a DictType parameter with inputs or outputs of a Module class", "docstring_tokens": [ "a", "DictType", "parameter", "with", "inputs", "or", "outputs", "of", "a", "Module", "class" ], "default": null, "is_optional": null }, { "identifier": "ptype", "type": null, "docstring": "String define the type of parameter that we want to select,\nvalid ptype are: 'raster', 'vector', 'group'", "docstring_tokens": [ "String", "define", "the", "type", "of", "parameter", "that", "we", "want", "to", "select", "valid", "ptype", "are", ":", "'", "raster", "'", "'", "vector", "'", "'", "group", "'" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def select(parms, ptype): for k in parms: par = parms[k] if par.type == ptype or par.typedesc == ptype and par.value: if par.multiple: for val in par.value: yield val else: yield par.value
483
296
f2f7806718902c137acf1a84580c4a62b18d64ba
pradyunsg/warehouse
warehouse/oidc/models.py
[ "Apache-2.0" ]
Python
all_known_claims
<not_specific>
def all_known_claims(cls): """ Returns all claims "known" to this provider. """ return ( cls.__verifiable_claims__.keys() | cls.__preverified_claims__ | cls.__unchecked_claims__ )
Returns all claims "known" to this provider.
Returns all claims "known" to this provider.
[ "Returns", "all", "claims", "\"", "known", "\"", "to", "this", "provider", "." ]
def all_known_claims(cls): return ( cls.__verifiable_claims__.keys() | cls.__preverified_claims__ | cls.__unchecked_claims__ )
[ "def", "all_known_claims", "(", "cls", ")", ":", "return", "(", "cls", ".", "__verifiable_claims__", ".", "keys", "(", ")", "|", "cls", ".", "__preverified_claims__", "|", "cls", ".", "__unchecked_claims__", ")" ]
Returns all claims "known" to this provider.
[ "Returns", "all", "claims", "\"", "known", "\"", "to", "this", "provider", "." ]
[ "\"\"\"\n Returns all claims \"known\" to this provider.\n \"\"\"" ]
[ { "param": "cls", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def all_known_claims(cls): return ( cls.__verifiable_claims__.keys() | cls.__preverified_claims__ | cls.__unchecked_claims__ )
484
793
039204be1a697d925c243c47fa78983d7b76e5bf
matiasfreitasguimaraes/PyLets
PyLets/Others.py
[ "MIT" ]
Python
find_gcd
<not_specific>
def find_gcd(NumpyArray): """Retorna o Maximo Divisor Comum de um Numpy Array""" def gcd(x, y): while y > 0: x, y = y, x % y return x Ngcd = NumpyArray[0] for i in range(1, len(NumpyArray)): Ngcd = gcd(Ngcd, NumpyArray[i]) return Ngcd
Retorna o Maximo Divisor Comum de um Numpy Array
Retorna o Maximo Divisor Comum de um Numpy Array
[ "Retorna", "o", "Maximo", "Divisor", "Comum", "de", "um", "Numpy", "Array" ]
def find_gcd(NumpyArray): def gcd(x, y): while y > 0: x, y = y, x % y return x Ngcd = NumpyArray[0] for i in range(1, len(NumpyArray)): Ngcd = gcd(Ngcd, NumpyArray[i]) return Ngcd
[ "def", "find_gcd", "(", "NumpyArray", ")", ":", "def", "gcd", "(", "x", ",", "y", ")", ":", "while", "y", ">", "0", ":", "x", ",", "y", "=", "y", ",", "x", "%", "y", "return", "x", "Ngcd", "=", "NumpyArray", "[", "0", "]", "for", "i", "in", "range", "(", "1", ",", "len", "(", "NumpyArray", ")", ")", ":", "Ngcd", "=", "gcd", "(", "Ngcd", ",", "NumpyArray", "[", "i", "]", ")", "return", "Ngcd" ]
Retorna o Maximo Divisor Comum de um Numpy Array
[ "Retorna", "o", "Maximo", "Divisor", "Comum", "de", "um", "Numpy", "Array" ]
[ "\"\"\"Retorna o Maximo Divisor Comum de um Numpy Array\"\"\"" ]
[ { "param": "NumpyArray", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "NumpyArray", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def find_gcd(NumpyArray): def gcd(x, y): while y > 0: x, y = y, x % y return x Ngcd = NumpyArray[0] for i in range(1, len(NumpyArray)): Ngcd = gcd(Ngcd, NumpyArray[i]) return Ngcd
485
153
7f17e2387e38b2e3c1bdd95e86fd031c74486481
hefen1/chromium
testing/legion/rpc_methods.py
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
Python
ReadStderr
<not_specific>
def ReadStderr(cls, key): """Returns all stderr read since the last call to ReadStderr. See ReadStdout for additional details. """ proc = cls._processes[key] with proc.data_lock: # Perform a "read" on the stderr data stderr = proc.stderr proc.stderr = '' return stderr
Returns all stderr read since the last call to ReadStderr. See ReadStdout for additional details.
Returns all stderr read since the last call to ReadStderr. See ReadStdout for additional details.
[ "Returns", "all", "stderr", "read", "since", "the", "last", "call", "to", "ReadStderr", ".", "See", "ReadStdout", "for", "additional", "details", "." ]
def ReadStderr(cls, key): proc = cls._processes[key] with proc.data_lock: stderr = proc.stderr proc.stderr = '' return stderr
[ "def", "ReadStderr", "(", "cls", ",", "key", ")", ":", "proc", "=", "cls", ".", "_processes", "[", "key", "]", "with", "proc", ".", "data_lock", ":", "stderr", "=", "proc", ".", "stderr", "proc", ".", "stderr", "=", "''", "return", "stderr" ]
Returns all stderr read since the last call to ReadStderr.
[ "Returns", "all", "stderr", "read", "since", "the", "last", "call", "to", "ReadStderr", "." ]
[ "\"\"\"Returns all stderr read since the last call to ReadStderr.\n\n See ReadStdout for additional details.\n \"\"\"", "# Perform a \"read\" on the stderr data" ]
[ { "param": "cls", "type": null }, { "param": "key", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "key", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def ReadStderr(cls, key): proc = cls._processes[key] with proc.data_lock: stderr = proc.stderr proc.stderr = '' return stderr
486
501
629f6616824849f45690e06418a1c021ad59f79f
pkthein/sparts_all_fam
proto/tp_organization_1.0/build/lib/sparts_organization/organization_cli.py
[ "Apache-2.0" ]
Python
add_retrieve_parser
null
def add_retrieve_parser(subparsers, parent_parser): """ Bash "retrieve" subcommand handler for the Transaction Family : Organization Args: subparsers (ArgumentParser): Subcommand parser parent_parser (ArgumentParser): ArgumentParser object containing all the parameters """ parser = subparsers.add_parser("retrieve", parents=[parent_parser]) parser.add_argument( "org_id", type=str, help="an identifier for the organization") parser.add_argument( "-a", "--all", action="store_true", default=False, help="show history of uuid") parser.add_argument( "--range", nargs=2, metavar=("START", "END"), default=None, help="show history of uuid within the range; FORMAT : yyyymmdd")
Bash "retrieve" subcommand handler for the Transaction Family : Organization Args: subparsers (ArgumentParser): Subcommand parser parent_parser (ArgumentParser): ArgumentParser object containing all the parameters
Bash "retrieve" subcommand handler for the Transaction Family : Organization
[ "Bash", "\"", "retrieve", "\"", "subcommand", "handler", "for", "the", "Transaction", "Family", ":", "Organization" ]
def add_retrieve_parser(subparsers, parent_parser): parser = subparsers.add_parser("retrieve", parents=[parent_parser]) parser.add_argument( "org_id", type=str, help="an identifier for the organization") parser.add_argument( "-a", "--all", action="store_true", default=False, help="show history of uuid") parser.add_argument( "--range", nargs=2, metavar=("START", "END"), default=None, help="show history of uuid within the range; FORMAT : yyyymmdd")
[ "def", "add_retrieve_parser", "(", "subparsers", ",", "parent_parser", ")", ":", "parser", "=", "subparsers", ".", "add_parser", "(", "\"retrieve\"", ",", "parents", "=", "[", "parent_parser", "]", ")", "parser", ".", "add_argument", "(", "\"org_id\"", ",", "type", "=", "str", ",", "help", "=", "\"an identifier for the organization\"", ")", "parser", ".", "add_argument", "(", "\"-a\"", ",", "\"--all\"", ",", "action", "=", "\"store_true\"", ",", "default", "=", "False", ",", "help", "=", "\"show history of uuid\"", ")", "parser", ".", "add_argument", "(", "\"--range\"", ",", "nargs", "=", "2", ",", "metavar", "=", "(", "\"START\"", ",", "\"END\"", ")", ",", "default", "=", "None", ",", "help", "=", "\"show history of uuid within the range; FORMAT : yyyymmdd\"", ")" ]
Bash "retrieve" subcommand handler for the Transaction Family : Organization
[ "Bash", "\"", "retrieve", "\"", "subcommand", "handler", "for", "the", "Transaction", "Family", ":", "Organization" ]
[ "\"\"\"\n Bash \"retrieve\" subcommand handler for the Transaction Family : Organization\n \n Args:\n subparsers (ArgumentParser): Subcommand parser\n parent_parser (ArgumentParser):\n ArgumentParser object containing all the parameters\n \n \"\"\"" ]
[ { "param": "subparsers", "type": null }, { "param": "parent_parser", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "subparsers", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false }, { "identifier": "parent_parser", "type": null, "docstring": "ArgumentParser object containing all the parameters", "docstring_tokens": [ "ArgumentParser", "object", "containing", "all", "the", "parameters" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def add_retrieve_parser(subparsers, parent_parser): parser = subparsers.add_parser("retrieve", parents=[parent_parser]) parser.add_argument( "org_id", type=str, help="an identifier for the organization") parser.add_argument( "-a", "--all", action="store_true", default=False, help="show history of uuid") parser.add_argument( "--range", nargs=2, metavar=("START", "END"), default=None, help="show history of uuid within the range; FORMAT : yyyymmdd")
487
827
b9f53d9f43f86939b2445b283768bc02d74c9933
giserh/book-python
machine-learning/solution/ml-linear-regression.py
[ "MIT" ]
Python
cal_mean
<not_specific>
def cal_mean(readings): """Function to calculate the mean value of the input readings""" readings_total = sum(readings) number_of_readings = len(readings) mean = readings_total / float(number_of_readings) return mean
Function to calculate the mean value of the input readings
Function to calculate the mean value of the input readings
[ "Function", "to", "calculate", "the", "mean", "value", "of", "the", "input", "readings" ]
def cal_mean(readings): readings_total = sum(readings) number_of_readings = len(readings) mean = readings_total / float(number_of_readings) return mean
[ "def", "cal_mean", "(", "readings", ")", ":", "readings_total", "=", "sum", "(", "readings", ")", "number_of_readings", "=", "len", "(", "readings", ")", "mean", "=", "readings_total", "/", "float", "(", "number_of_readings", ")", "return", "mean" ]
Function to calculate the mean value of the input readings
[ "Function", "to", "calculate", "the", "mean", "value", "of", "the", "input", "readings" ]
[ "\"\"\"Function to calculate the mean value of the input readings\"\"\"" ]
[ { "param": "readings", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "readings", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def cal_mean(readings): readings_total = sum(readings) number_of_readings = len(readings) mean = readings_total / float(number_of_readings) return mean
488
260
793f41ce42c72b98acbc170a04df212c8b9a1f03
databand-ai/dbnd
modules/dbnd/src/dbnd/_core/tracking/script_tracking_manager.py
[ "Apache-2.0" ]
Python
dbnd_tracking_stop
null
def dbnd_tracking_stop(): """ Stops and clears the script tracking if exists """ global _dbnd_script_manager if _dbnd_script_manager: _dbnd_script_manager.stop() _dbnd_script_manager = None
Stops and clears the script tracking if exists
Stops and clears the script tracking if exists
[ "Stops", "and", "clears", "the", "script", "tracking", "if", "exists" ]
def dbnd_tracking_stop(): global _dbnd_script_manager if _dbnd_script_manager: _dbnd_script_manager.stop() _dbnd_script_manager = None
[ "def", "dbnd_tracking_stop", "(", ")", ":", "global", "_dbnd_script_manager", "if", "_dbnd_script_manager", ":", "_dbnd_script_manager", ".", "stop", "(", ")", "_dbnd_script_manager", "=", "None" ]
Stops and clears the script tracking if exists
[ "Stops", "and", "clears", "the", "script", "tracking", "if", "exists" ]
[ "\"\"\"\n Stops and clears the script tracking if exists\n \"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
def dbnd_tracking_stop(): global _dbnd_script_manager if _dbnd_script_manager: _dbnd_script_manager.stop() _dbnd_script_manager = None
489
718
0126bc7771521ffa1d77d2f5bda7cb6bafff3736
fochoao/cpython
Lib/site-packages/hackedit/vendor/humanfriendly/__init__.py
[ "bzip2-1.0.6", "0BSD" ]
Python
round_number
<not_specific>
def round_number(count, keep_width=False): """ Round a floating point number to two decimal places in a human friendly format. :param count: The number to format. :param keep_width: ``True`` if trailing zeros should not be stripped, ``False`` if they can be stripped. :returns: The formatted number as a string. If no decimal places are required to represent the number, they will be omitted. The main purpose of this function is to be used by functions like :func:`format_length()`, :func:`format_size()` and :func:`format_timespan()`. Here are some examples: >>> from humanfriendly import round_number >>> round_number(1) '1' >>> round_number(math.pi) '3.14' >>> round_number(5.001) '5' """ text = '%.2f' % float(count) if not keep_width: text = re.sub('0+$', '', text) text = re.sub('\.$', '', text) return text
Round a floating point number to two decimal places in a human friendly format. :param count: The number to format. :param keep_width: ``True`` if trailing zeros should not be stripped, ``False`` if they can be stripped. :returns: The formatted number as a string. If no decimal places are required to represent the number, they will be omitted. The main purpose of this function is to be used by functions like :func:`format_length()`, :func:`format_size()` and :func:`format_timespan()`. Here are some examples: >>> from humanfriendly import round_number >>> round_number(1) '1' >>> round_number(math.pi) '3.14' >>> round_number(5.001) '5'
Round a floating point number to two decimal places in a human friendly format.
[ "Round", "a", "floating", "point", "number", "to", "two", "decimal", "places", "in", "a", "human", "friendly", "format", "." ]
def round_number(count, keep_width=False): text = '%.2f' % float(count) if not keep_width: text = re.sub('0+$', '', text) text = re.sub('\.$', '', text) return text
[ "def", "round_number", "(", "count", ",", "keep_width", "=", "False", ")", ":", "text", "=", "'%.2f'", "%", "float", "(", "count", ")", "if", "not", "keep_width", ":", "text", "=", "re", ".", "sub", "(", "'0+$'", ",", "''", ",", "text", ")", "text", "=", "re", ".", "sub", "(", "'\\.$'", ",", "''", ",", "text", ")", "return", "text" ]
Round a floating point number to two decimal places in a human friendly format.
[ "Round", "a", "floating", "point", "number", "to", "two", "decimal", "places", "in", "a", "human", "friendly", "format", "." ]
[ "\"\"\"\n Round a floating point number to two decimal places in a human friendly format.\n\n :param count: The number to format.\n :param keep_width: ``True`` if trailing zeros should not be stripped,\n ``False`` if they can be stripped.\n :returns: The formatted number as a string. If no decimal places are\n required to represent the number, they will be omitted.\n\n The main purpose of this function is to be used by functions like\n :func:`format_length()`, :func:`format_size()` and\n :func:`format_timespan()`.\n\n Here are some examples:\n\n >>> from humanfriendly import round_number\n >>> round_number(1)\n '1'\n >>> round_number(math.pi)\n '3.14'\n >>> round_number(5.001)\n '5'\n \"\"\"" ]
[ { "param": "count", "type": null }, { "param": "keep_width", "type": null } ]
{ "returns": [ { "docstring": "The formatted number as a string. If no decimal places are\nrequired to represent the number, they will be omitted.\n\nThe main purpose of this function is to be used by functions like", "docstring_tokens": [ "The", "formatted", "number", "as", "a", "string", ".", "If", "no", "decimal", "places", "are", "required", "to", "represent", "the", "number", "they", "will", "be", "omitted", ".", "The", "main", "purpose", "of", "this", "function", "is", "to", "be", "used", "by", "functions", "like" ], "type": null } ], "raises": [], "params": [ { "identifier": "count", "type": null, "docstring": "The number to format.", "docstring_tokens": [ "The", "number", "to", "format", "." ], "default": null, "is_optional": null }, { "identifier": "keep_width", "type": null, "docstring": "``True`` if trailing zeros should not be stripped,\n``False`` if they can be stripped.", "docstring_tokens": [ "`", "`", "True", "`", "`", "if", "trailing", "zeros", "should", "not", "be", "stripped", "`", "`", "False", "`", "`", "if", "they", "can", "be", "stripped", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [ { "identifier": "func", "docstring": "`format_length()`, :func:`format_size()` and", "docstring_tokens": [ "`", "format_length", "()", "`", ":", "func", ":", "`", "format_size", "()", "`", "and" ] }, { "identifier": "func", "docstring": null, "docstring_tokens": [ "None" ] } ] }
import re def round_number(count, keep_width=False): text = '%.2f' % float(count) if not keep_width: text = re.sub('0+$', '', text) text = re.sub('\.$', '', text) return text
491
278
e6233871f7acc5c3114625fb6057708c34018ba6
ET-NCMP/MarineQC
qc.py
[ "BSD-3-Clause" ]
Python
pentad_to_month_day
<not_specific>
def pentad_to_month_day(p): """ Given a pentad number, return the month and day of the first day in the pentad :param p: pentad number from 1 to 73 :type p: integer :return: month and day of the first day of the pentad :rtype: integer """ assert 0 < p < 74, 'p outside allowed range 1-73 ' + str(p) m = [1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12] d = [1, 6, 11, 16, 21, 26, 31, 5, 10, 15, 20, 25, 2, 7, 12, 17, 22, 27, 1, 6, 11, 16, 21, 26, 1, 6, 11, 16, 21, 26, 31, 5, 10, 15, 20, 25, 30, 5, 10, 15, 20, 25, 30, 4, 9, 14, 19, 24, 29, 3, 8, 13, 18, 23, 28, 3, 8, 13, 18, 23, 28, 2, 7, 12, 17, 22, 27, 2, 7, 12, 17, 22, 27] return m[p - 1], d[p - 1]
Given a pentad number, return the month and day of the first day in the pentad :param p: pentad number from 1 to 73 :type p: integer :return: month and day of the first day of the pentad :rtype: integer
Given a pentad number, return the month and day of the first day in the pentad
[ "Given", "a", "pentad", "number", "return", "the", "month", "and", "day", "of", "the", "first", "day", "in", "the", "pentad" ]
def pentad_to_month_day(p): assert 0 < p < 74, 'p outside allowed range 1-73 ' + str(p) m = [1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12] d = [1, 6, 11, 16, 21, 26, 31, 5, 10, 15, 20, 25, 2, 7, 12, 17, 22, 27, 1, 6, 11, 16, 21, 26, 1, 6, 11, 16, 21, 26, 31, 5, 10, 15, 20, 25, 30, 5, 10, 15, 20, 25, 30, 4, 9, 14, 19, 24, 29, 3, 8, 13, 18, 23, 28, 3, 8, 13, 18, 23, 28, 2, 7, 12, 17, 22, 27, 2, 7, 12, 17, 22, 27] return m[p - 1], d[p - 1]
[ "def", "pentad_to_month_day", "(", "p", ")", ":", "assert", "0", "<", "p", "<", "74", ",", "'p outside allowed range 1-73 '", "+", "str", "(", "p", ")", "m", "=", "[", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "2", ",", "2", ",", "2", ",", "2", ",", "2", ",", "3", ",", "3", ",", "3", ",", "3", ",", "3", ",", "3", ",", "4", ",", "4", ",", "4", ",", "4", ",", "4", ",", "4", ",", "5", ",", "5", ",", "5", ",", "5", ",", "5", ",", "5", ",", "5", ",", "6", ",", "6", ",", "6", ",", "6", ",", "6", ",", "6", ",", "7", ",", "7", ",", "7", ",", "7", ",", "7", ",", "7", ",", "8", ",", "8", ",", "8", ",", "8", ",", "8", ",", "8", ",", "9", ",", "9", ",", "9", ",", "9", ",", "9", ",", "9", ",", "10", ",", "10", ",", "10", ",", "10", ",", "10", ",", "10", ",", "11", ",", "11", ",", "11", ",", "11", ",", "11", ",", "11", ",", "12", ",", "12", ",", "12", ",", "12", ",", "12", ",", "12", "]", "d", "=", "[", "1", ",", "6", ",", "11", ",", "16", ",", "21", ",", "26", ",", "31", ",", "5", ",", "10", ",", "15", ",", "20", ",", "25", ",", "2", ",", "7", ",", "12", ",", "17", ",", "22", ",", "27", ",", "1", ",", "6", ",", "11", ",", "16", ",", "21", ",", "26", ",", "1", ",", "6", ",", "11", ",", "16", ",", "21", ",", "26", ",", "31", ",", "5", ",", "10", ",", "15", ",", "20", ",", "25", ",", "30", ",", "5", ",", "10", ",", "15", ",", "20", ",", "25", ",", "30", ",", "4", ",", "9", ",", "14", ",", "19", ",", "24", ",", "29", ",", "3", ",", "8", ",", "13", ",", "18", ",", "23", ",", "28", ",", "3", ",", "8", ",", "13", ",", "18", ",", "23", ",", "28", ",", "2", ",", "7", ",", "12", ",", "17", ",", "22", ",", "27", ",", "2", ",", "7", ",", "12", ",", "17", ",", "22", ",", "27", "]", "return", "m", "[", "p", "-", "1", "]", ",", "d", "[", "p", "-", "1", "]" ]
Given a pentad number, return the month and day of the first day in the pentad
[ "Given", "a", "pentad", "number", "return", "the", "month", "and", "day", "of", "the", "first", "day", "in", "the", "pentad" ]
[ "\"\"\"\n Given a pentad number, return the month and day of the first day in the pentad\n \n :param p: pentad number from 1 to 73\n :type p: integer\n \n :return: month and day of the first day of the pentad\n :rtype: integer \n \"\"\"" ]
[ { "param": "p", "type": null } ]
{ "returns": [ { "docstring": "month and day of the first day of the pentad", "docstring_tokens": [ "month", "and", "day", "of", "the", "first", "day", "of", "the", "pentad" ], "type": "integer" } ], "raises": [], "params": [ { "identifier": "p", "type": null, "docstring": "pentad number from 1 to 73", "docstring_tokens": [ "pentad", "number", "from", "1", "to", "73" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def pentad_to_month_day(p): assert 0 < p < 74, 'p outside allowed range 1-73 ' + str(p) m = [1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12] d = [1, 6, 11, 16, 21, 26, 31, 5, 10, 15, 20, 25, 2, 7, 12, 17, 22, 27, 1, 6, 11, 16, 21, 26, 1, 6, 11, 16, 21, 26, 31, 5, 10, 15, 20, 25, 30, 5, 10, 15, 20, 25, 30, 4, 9, 14, 19, 24, 29, 3, 8, 13, 18, 23, 28, 3, 8, 13, 18, 23, 28, 2, 7, 12, 17, 22, 27, 2, 7, 12, 17, 22, 27] return m[p - 1], d[p - 1]
492
336
905488382db20ea79628640781d1516fde01b024
rrozema12/Data-Mining-Classifiers
src/classifier_util.py
[ "MIT" ]
Python
_findFalses
<not_specific>
def _findFalses(splitLabels, forLabel): """ Takes an array of labels, counts the number of FP and FN """ falses = 0 for row in splitLabels: for actual, predicted in row: # If either the predicted or actual is the label we care about if actual == forLabel: if actual != predicted: #if label is false falses += 1 elif predicted == forLabel: if actual != predicted: falses += 1 return falses
Takes an array of labels, counts the number of FP and FN
Takes an array of labels, counts the number of FP and FN
[ "Takes", "an", "array", "of", "labels", "counts", "the", "number", "of", "FP", "and", "FN" ]
def _findFalses(splitLabels, forLabel): falses = 0 for row in splitLabels: for actual, predicted in row: if actual == forLabel: if actual != predicted: falses += 1 elif predicted == forLabel: if actual != predicted: falses += 1 return falses
[ "def", "_findFalses", "(", "splitLabels", ",", "forLabel", ")", ":", "falses", "=", "0", "for", "row", "in", "splitLabels", ":", "for", "actual", ",", "predicted", "in", "row", ":", "if", "actual", "==", "forLabel", ":", "if", "actual", "!=", "predicted", ":", "falses", "+=", "1", "elif", "predicted", "==", "forLabel", ":", "if", "actual", "!=", "predicted", ":", "falses", "+=", "1", "return", "falses" ]
Takes an array of labels, counts the number of FP and FN
[ "Takes", "an", "array", "of", "labels", "counts", "the", "number", "of", "FP", "and", "FN" ]
[ "\"\"\" Takes an array of labels, counts the number of FP and FN \"\"\"", "# If either the predicted or actual is the label we care about", "#if label is false" ]
[ { "param": "splitLabels", "type": null }, { "param": "forLabel", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "splitLabels", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "forLabel", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _findFalses(splitLabels, forLabel): falses = 0 for row in splitLabels: for actual, predicted in row: if actual == forLabel: if actual != predicted: falses += 1 elif predicted == forLabel: if actual != predicted: falses += 1 return falses
493
670
d9d6e4fae28141fb442be1afbb47acc1ea10fd02
jkafader-esnet/pypond
pypond/util.py
[ "BSD-3-Clause-LBNL" ]
Python
generate_paths
<not_specific>
def generate_paths(dic): # pragma: no cover """ Generate a list of all possible field paths in a dict. This is for determining all paths in a dict when none is given. Currently unused, but keeping since we will probably need it. Parameters ---------- dic : dict A dict, generally the payload from an Event class. Returns ------- list A list of strings of all the paths in the dict. """ paths = list() def recurse(data, keys=()): """ Do the actual recursion and yield the keys to generate_paths() """ if isinstance(data, dict): for key in list(data.keys()): for path in recurse(data[key], keys + (key,)): yield path else: yield keys for key in recurse(dic): paths.append(key) return paths
Generate a list of all possible field paths in a dict. This is for determining all paths in a dict when none is given. Currently unused, but keeping since we will probably need it. Parameters ---------- dic : dict A dict, generally the payload from an Event class. Returns ------- list A list of strings of all the paths in the dict.
Generate a list of all possible field paths in a dict. This is for determining all paths in a dict when none is given. Currently unused, but keeping since we will probably need it. Parameters dic : dict A dict, generally the payload from an Event class. Returns list A list of strings of all the paths in the dict.
[ "Generate", "a", "list", "of", "all", "possible", "field", "paths", "in", "a", "dict", ".", "This", "is", "for", "determining", "all", "paths", "in", "a", "dict", "when", "none", "is", "given", ".", "Currently", "unused", "but", "keeping", "since", "we", "will", "probably", "need", "it", ".", "Parameters", "dic", ":", "dict", "A", "dict", "generally", "the", "payload", "from", "an", "Event", "class", ".", "Returns", "list", "A", "list", "of", "strings", "of", "all", "the", "paths", "in", "the", "dict", "." ]
def generate_paths(dic): paths = list() def recurse(data, keys=()): if isinstance(data, dict): for key in list(data.keys()): for path in recurse(data[key], keys + (key,)): yield path else: yield keys for key in recurse(dic): paths.append(key) return paths
[ "def", "generate_paths", "(", "dic", ")", ":", "paths", "=", "list", "(", ")", "def", "recurse", "(", "data", ",", "keys", "=", "(", ")", ")", ":", "\"\"\"\n Do the actual recursion and yield the keys to generate_paths()\n \"\"\"", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "for", "key", "in", "list", "(", "data", ".", "keys", "(", ")", ")", ":", "for", "path", "in", "recurse", "(", "data", "[", "key", "]", ",", "keys", "+", "(", "key", ",", ")", ")", ":", "yield", "path", "else", ":", "yield", "keys", "for", "key", "in", "recurse", "(", "dic", ")", ":", "paths", ".", "append", "(", "key", ")", "return", "paths" ]
Generate a list of all possible field paths in a dict.
[ "Generate", "a", "list", "of", "all", "possible", "field", "paths", "in", "a", "dict", "." ]
[ "# pragma: no cover", "\"\"\"\n Generate a list of all possible field paths in a dict. This is\n for determining all paths in a dict when none is given.\n\n Currently unused, but keeping since we will probably need it.\n\n Parameters\n ----------\n dic : dict\n A dict, generally the payload from an Event class.\n\n Returns\n -------\n list\n A list of strings of all the paths in the dict.\n \"\"\"", "\"\"\"\n Do the actual recursion and yield the keys to generate_paths()\n \"\"\"" ]
[ { "param": "dic", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "dic", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def generate_paths(dic): paths = list() def recurse(data, keys=()): if isinstance(data, dict): for key in list(data.keys()): for path in recurse(data[key], keys + (key,)): yield path else: yield keys for key in recurse(dic): paths.append(key) return paths
495
279
562e55941442dc5d9efe6d3abcfdd2d7026cebe0
Mind-the-Pineapple/tpot-age
BayOptPy/tpot/tpot_diagnostics.py
[ "MIT" ]
Python
print_number_of_sucessful_models_per_generation
null
def print_number_of_sucessful_models_per_generation(results, start_gen, stop_gen): ''' This funciton prints then umber of sucessfully evaluated models per generation inputs: ------- results: Dictionary with the model definition per generation start_gen: first generation to take into account (default 2) stop_gen: last generation to take into account (default100)_ ''' for generation in range(stop_gen, start_gen, -1): n_evaluated_models = len(results['evaluated_individuals'][generation]) print('Generation %d: %d were evaluated' %(generation, n_evaluated_models))
This funciton prints then umber of sucessfully evaluated models per generation inputs: ------- results: Dictionary with the model definition per generation start_gen: first generation to take into account (default 2) stop_gen: last generation to take into account (default100)_
This funciton prints then umber of sucessfully evaluated models per generation inputs. Dictionary with the model definition per generation start_gen: first generation to take into account (default 2) stop_gen: last generation to take into account (default100)_
[ "This", "funciton", "prints", "then", "umber", "of", "sucessfully", "evaluated", "models", "per", "generation", "inputs", ".", "Dictionary", "with", "the", "model", "definition", "per", "generation", "start_gen", ":", "first", "generation", "to", "take", "into", "account", "(", "default", "2", ")", "stop_gen", ":", "last", "generation", "to", "take", "into", "account", "(", "default100", ")", "_" ]
def print_number_of_sucessful_models_per_generation(results, start_gen, stop_gen): for generation in range(stop_gen, start_gen, -1): n_evaluated_models = len(results['evaluated_individuals'][generation]) print('Generation %d: %d were evaluated' %(generation, n_evaluated_models))
[ "def", "print_number_of_sucessful_models_per_generation", "(", "results", ",", "start_gen", ",", "stop_gen", ")", ":", "for", "generation", "in", "range", "(", "stop_gen", ",", "start_gen", ",", "-", "1", ")", ":", "n_evaluated_models", "=", "len", "(", "results", "[", "'evaluated_individuals'", "]", "[", "generation", "]", ")", "print", "(", "'Generation %d: %d were evaluated'", "%", "(", "generation", ",", "n_evaluated_models", ")", ")" ]
This funciton prints then umber of sucessfully evaluated models per generation inputs:
[ "This", "funciton", "prints", "then", "umber", "of", "sucessfully", "evaluated", "models", "per", "generation", "inputs", ":" ]
[ "'''\n This funciton prints then umber of sucessfully evaluated models per generation\n inputs:\n -------\n results: Dictionary with the model definition per generation\n start_gen: first generation to take into account (default 2)\n stop_gen: last generation to take into account (default100)_\n '''" ]
[ { "param": "results", "type": null }, { "param": "start_gen", "type": null }, { "param": "stop_gen", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "results", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "start_gen", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "stop_gen", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def print_number_of_sucessful_models_per_generation(results, start_gen, stop_gen): for generation in range(stop_gen, start_gen, -1): n_evaluated_models = len(results['evaluated_individuals'][generation]) print('Generation %d: %d were evaluated' %(generation, n_evaluated_models))
497
419
7fb6d81eb0abb84b5595552b49043f45d922b1bb
bamarni/universe
scripts/gen_universe.py
[ "Apache-2.0" ]
Python
format_universe_repo_content_type
<not_specific>
def format_universe_repo_content_type(universe_version): """ Formats a universe repo content-type of version `universe-version` :param universe_version: Universe content type version: "v3" or "v4" :type universe_version: str :return: content-type of the universe repo version `universe_version` :rtype: str """ content_type = "application/" \ "vnd.dcos.universe.repo+json;" \ "charset=utf-8;version=" \ + universe_version return content_type
Formats a universe repo content-type of version `universe-version` :param universe_version: Universe content type version: "v3" or "v4" :type universe_version: str :return: content-type of the universe repo version `universe_version` :rtype: str
Formats a universe repo content-type of version `universe-version`
[ "Formats", "a", "universe", "repo", "content", "-", "type", "of", "version", "`", "universe", "-", "version", "`" ]
def format_universe_repo_content_type(universe_version): content_type = "application/" \ "vnd.dcos.universe.repo+json;" \ "charset=utf-8;version=" \ + universe_version return content_type
[ "def", "format_universe_repo_content_type", "(", "universe_version", ")", ":", "content_type", "=", "\"application/\"", "\"vnd.dcos.universe.repo+json;\"", "\"charset=utf-8;version=\"", "+", "universe_version", "return", "content_type" ]
Formats a universe repo content-type of version `universe-version`
[ "Formats", "a", "universe", "repo", "content", "-", "type", "of", "version", "`", "universe", "-", "version", "`" ]
[ "\"\"\" Formats a universe repo content-type of version `universe-version`\n\n :param universe_version: Universe content type version: \"v3\" or \"v4\"\n :type universe_version: str\n :return: content-type of the universe repo version `universe_version`\n :rtype: str\n \"\"\"" ]
[ { "param": "universe_version", "type": null } ]
{ "returns": [ { "docstring": "content-type of the universe repo version `universe_version`", "docstring_tokens": [ "content", "-", "type", "of", "the", "universe", "repo", "version", "`", "universe_version", "`" ], "type": "str" } ], "raises": [], "params": [ { "identifier": "universe_version", "type": null, "docstring": "Universe content type version: \"v3\" or \"v4\"", "docstring_tokens": [ "Universe", "content", "type", "version", ":", "\"", "v3", "\"", "or", "\"", "v4", "\"" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def format_universe_repo_content_type(universe_version): content_type = "application/" \ "vnd.dcos.universe.repo+json;" \ "charset=utf-8;version=" \ + universe_version return content_type
498
730
b7c6b0ed1a36b4a63cdec680dd3082fc65528323
marcdiazsan/reana
reana/reana_dev/run.py
[ "MIT" ]
Python
construct_workflow_name
<not_specific>
def construct_workflow_name(example, workflow_engine, compute_backend): """Construct suitable workflow name for given REANA example. :param example: REANA example (e.g. reana-demo-root6-roofit) :param workflow_engine: workflow engine to use (cwl, serial, yadage, snakemake) :param compute_backend: compute backend to use (kubernetes, htcondorcern, slurmcern) :type example: str :type workflow_engine: str :type compute_backend: str """ output = "{0}-{1}-{2}".format( example.replace("reana-demo-", ""), workflow_engine, compute_backend ) return output
Construct suitable workflow name for given REANA example. :param example: REANA example (e.g. reana-demo-root6-roofit) :param workflow_engine: workflow engine to use (cwl, serial, yadage, snakemake) :param compute_backend: compute backend to use (kubernetes, htcondorcern, slurmcern) :type example: str :type workflow_engine: str :type compute_backend: str
Construct suitable workflow name for given REANA example.
[ "Construct", "suitable", "workflow", "name", "for", "given", "REANA", "example", "." ]
def construct_workflow_name(example, workflow_engine, compute_backend): output = "{0}-{1}-{2}".format( example.replace("reana-demo-", ""), workflow_engine, compute_backend ) return output
[ "def", "construct_workflow_name", "(", "example", ",", "workflow_engine", ",", "compute_backend", ")", ":", "output", "=", "\"{0}-{1}-{2}\"", ".", "format", "(", "example", ".", "replace", "(", "\"reana-demo-\"", ",", "\"\"", ")", ",", "workflow_engine", ",", "compute_backend", ")", "return", "output" ]
Construct suitable workflow name for given REANA example.
[ "Construct", "suitable", "workflow", "name", "for", "given", "REANA", "example", "." ]
[ "\"\"\"Construct suitable workflow name for given REANA example.\n\n :param example: REANA example (e.g. reana-demo-root6-roofit)\n :param workflow_engine: workflow engine to use (cwl, serial, yadage, snakemake)\n :param compute_backend: compute backend to use (kubernetes, htcondorcern, slurmcern)\n :type example: str\n :type workflow_engine: str\n :type compute_backend: str\n \"\"\"" ]
[ { "param": "example", "type": null }, { "param": "workflow_engine", "type": null }, { "param": "compute_backend", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "example", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "workflow_engine", "type": null, "docstring": "workflow engine to use (cwl, serial, yadage, snakemake)", "docstring_tokens": [ "workflow", "engine", "to", "use", "(", "cwl", "serial", "yadage", "snakemake", ")" ], "default": null, "is_optional": null }, { "identifier": "compute_backend", "type": null, "docstring": "compute backend to use (kubernetes, htcondorcern, slurmcern)", "docstring_tokens": [ "compute", "backend", "to", "use", "(", "kubernetes", "htcondorcern", "slurmcern", ")" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def construct_workflow_name(example, workflow_engine, compute_backend): output = "{0}-{1}-{2}".format( example.replace("reana-demo-", ""), workflow_engine, compute_backend ) return output
499
307
fbfd429a9a4bacd5d3a05aaf446d3276aa1132ef
boppreh/bayesian
bayesian/__init__.py
[ "MIT" ]
Python
gaussian_distribution
<not_specific>
def gaussian_distribution(values): """ Given a list of values, returns the (mean, variance) tuple for a Gaussian (normal) distribution. """ n = float(len(values)) mean = sum(values) / n if n > 1: variance = sum((value - mean) ** 2 for value in values) / (n - 1) else: variance = 0 return (mean, variance)
Given a list of values, returns the (mean, variance) tuple for a Gaussian (normal) distribution.
Given a list of values, returns the (mean, variance) tuple for a Gaussian (normal) distribution.
[ "Given", "a", "list", "of", "values", "returns", "the", "(", "mean", "variance", ")", "tuple", "for", "a", "Gaussian", "(", "normal", ")", "distribution", "." ]
def gaussian_distribution(values): n = float(len(values)) mean = sum(values) / n if n > 1: variance = sum((value - mean) ** 2 for value in values) / (n - 1) else: variance = 0 return (mean, variance)
[ "def", "gaussian_distribution", "(", "values", ")", ":", "n", "=", "float", "(", "len", "(", "values", ")", ")", "mean", "=", "sum", "(", "values", ")", "/", "n", "if", "n", ">", "1", ":", "variance", "=", "sum", "(", "(", "value", "-", "mean", ")", "**", "2", "for", "value", "in", "values", ")", "/", "(", "n", "-", "1", ")", "else", ":", "variance", "=", "0", "return", "(", "mean", ",", "variance", ")" ]
Given a list of values, returns the (mean, variance) tuple for a Gaussian (normal) distribution.
[ "Given", "a", "list", "of", "values", "returns", "the", "(", "mean", "variance", ")", "tuple", "for", "a", "Gaussian", "(", "normal", ")", "distribution", "." ]
[ "\"\"\"\n Given a list of values, returns the (mean, variance) tuple for a\n Gaussian (normal) distribution.\n \"\"\"" ]
[ { "param": "values", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "values", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def gaussian_distribution(values): n = float(len(values)) mean = sum(values) / n if n > 1: variance = sum((value - mean) ** 2 for value in values) / (n - 1) else: variance = 0 return (mean, variance)
500
929
e03a2d2a118f6d19a0339927959b4f5dfda23d2f
GrandMoff100/homeassistant-core
tests/pylint/__init__.py
[ "Apache-2.0" ]
Python
assert_adds_messages
null
def assert_adds_messages(linter: UnittestLinter, *messages): """Assert that exactly the given method adds the given messages. The list of messages must exactly match *all* the messages added by the method. Additionally, we check to see whether the args in each message can actually be substituted into the message string. """ yield got = linter.release_messages() no_msg = "No message." expected = "\n".join(repr(m) for m in messages) or no_msg got_str = "\n".join(repr(m) for m in got) or no_msg msg = ( "Expected messages did not match actual.\n" f"\nExpected:\n{expected}\n\nGot:\n{got_str}\n" ) assert got == list(messages), msg
Assert that exactly the given method adds the given messages. The list of messages must exactly match *all* the messages added by the method. Additionally, we check to see whether the args in each message can actually be substituted into the message string.
Assert that exactly the given method adds the given messages. The list of messages must exactly match *all* the messages added by the method. Additionally, we check to see whether the args in each message can actually be substituted into the message string.
[ "Assert", "that", "exactly", "the", "given", "method", "adds", "the", "given", "messages", ".", "The", "list", "of", "messages", "must", "exactly", "match", "*", "all", "*", "the", "messages", "added", "by", "the", "method", ".", "Additionally", "we", "check", "to", "see", "whether", "the", "args", "in", "each", "message", "can", "actually", "be", "substituted", "into", "the", "message", "string", "." ]
def assert_adds_messages(linter: UnittestLinter, *messages): yield got = linter.release_messages() no_msg = "No message." expected = "\n".join(repr(m) for m in messages) or no_msg got_str = "\n".join(repr(m) for m in got) or no_msg msg = ( "Expected messages did not match actual.\n" f"\nExpected:\n{expected}\n\nGot:\n{got_str}\n" ) assert got == list(messages), msg
[ "def", "assert_adds_messages", "(", "linter", ":", "UnittestLinter", ",", "*", "messages", ")", ":", "yield", "got", "=", "linter", ".", "release_messages", "(", ")", "no_msg", "=", "\"No message.\"", "expected", "=", "\"\\n\"", ".", "join", "(", "repr", "(", "m", ")", "for", "m", "in", "messages", ")", "or", "no_msg", "got_str", "=", "\"\\n\"", ".", "join", "(", "repr", "(", "m", ")", "for", "m", "in", "got", ")", "or", "no_msg", "msg", "=", "(", "\"Expected messages did not match actual.\\n\"", "f\"\\nExpected:\\n{expected}\\n\\nGot:\\n{got_str}\\n\"", ")", "assert", "got", "==", "list", "(", "messages", ")", ",", "msg" ]
Assert that exactly the given method adds the given messages.
[ "Assert", "that", "exactly", "the", "given", "method", "adds", "the", "given", "messages", "." ]
[ "\"\"\"Assert that exactly the given method adds the given messages.\n\n The list of messages must exactly match *all* the messages added by the\n method. Additionally, we check to see whether the args in each message can\n actually be substituted into the message string.\n \"\"\"" ]
[ { "param": "linter", "type": "UnittestLinter" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "linter", "type": "UnittestLinter", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def assert_adds_messages(linter: UnittestLinter, *messages): yield got = linter.release_messages() no_msg = "No message." expected = "\n".join(repr(m) for m in messages) or no_msg got_str = "\n".join(repr(m) for m in got) or no_msg msg = ( "Expected messages did not match actual.\n" f"\nExpected:\n{expected}\n\nGot:\n{got_str}\n" ) assert got == list(messages), msg
501
462
61a16521014d24fab69dfb3d9ea6937cd4e3d4e5
RicardoParizotto/P4Visor
p4_hlir/graphs/hlir_info.py
[ "Apache-2.0" ]
Python
num_action_type_bits
<not_specific>
def num_action_type_bits(num_actions): """For a table with `num_actions` different actions that can be performed, return the number of bits to do a straightforward encoding of these as separate unique ids.""" assert(num_actions >= 0) if num_actions == 0: return 0 return int(math.ceil(math.log(num_actions, 2)))
For a table with `num_actions` different actions that can be performed, return the number of bits to do a straightforward encoding of these as separate unique ids.
For a table with `num_actions` different actions that can be performed, return the number of bits to do a straightforward encoding of these as separate unique ids.
[ "For", "a", "table", "with", "`", "num_actions", "`", "different", "actions", "that", "can", "be", "performed", "return", "the", "number", "of", "bits", "to", "do", "a", "straightforward", "encoding", "of", "these", "as", "separate", "unique", "ids", "." ]
def num_action_type_bits(num_actions): assert(num_actions >= 0) if num_actions == 0: return 0 return int(math.ceil(math.log(num_actions, 2)))
[ "def", "num_action_type_bits", "(", "num_actions", ")", ":", "assert", "(", "num_actions", ">=", "0", ")", "if", "num_actions", "==", "0", ":", "return", "0", "return", "int", "(", "math", ".", "ceil", "(", "math", ".", "log", "(", "num_actions", ",", "2", ")", ")", ")" ]
For a table with `num_actions` different actions that can be performed, return the number of bits to do a straightforward encoding of these as separate unique ids.
[ "For", "a", "table", "with", "`", "num_actions", "`", "different", "actions", "that", "can", "be", "performed", "return", "the", "number", "of", "bits", "to", "do", "a", "straightforward", "encoding", "of", "these", "as", "separate", "unique", "ids", "." ]
[ "\"\"\"For a table with `num_actions` different actions that can be\n performed, return the number of bits to do a straightforward\n encoding of these as separate unique ids.\"\"\"" ]
[ { "param": "num_actions", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "num_actions", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import math def num_action_type_bits(num_actions): assert(num_actions >= 0) if num_actions == 0: return 0 return int(math.ceil(math.log(num_actions, 2)))
502
209
b1a3d9f66f9c82df50b0c104f2d471b27e6570a3
Neil-Symington/aem_interp_dash
garjmcmctdem_utils/netcdf_utils.py
[ "Apache-2.0" ]
Python
object2array
<not_specific>
def object2array(variable, dtype): """Helper function for converting single variables to a list Parameters ---------- variable : object Python object dtype : python datatype Returns ------- list If variable is a single variables return the variable within a list """ single_var = (type(variable) == dtype) if single_var: return [variable] else: return variable
Helper function for converting single variables to a list Parameters ---------- variable : object Python object dtype : python datatype Returns ------- list If variable is a single variables return the variable within a list
Helper function for converting single variables to a list Parameters variable : object Python object dtype : python datatype Returns list If variable is a single variables return the variable within a list
[ "Helper", "function", "for", "converting", "single", "variables", "to", "a", "list", "Parameters", "variable", ":", "object", "Python", "object", "dtype", ":", "python", "datatype", "Returns", "list", "If", "variable", "is", "a", "single", "variables", "return", "the", "variable", "within", "a", "list" ]
def object2array(variable, dtype): single_var = (type(variable) == dtype) if single_var: return [variable] else: return variable
[ "def", "object2array", "(", "variable", ",", "dtype", ")", ":", "single_var", "=", "(", "type", "(", "variable", ")", "==", "dtype", ")", "if", "single_var", ":", "return", "[", "variable", "]", "else", ":", "return", "variable" ]
Helper function for converting single variables to a list Parameters
[ "Helper", "function", "for", "converting", "single", "variables", "to", "a", "list", "Parameters" ]
[ "\"\"\"Helper function for converting single variables to a list\n\n Parameters\n ----------\n variable : object\n Python object\n dtype : python datatype\n\n Returns\n -------\n list\n If variable is a single variables return the variable within a list\n\n \"\"\"" ]
[ { "param": "variable", "type": null }, { "param": "dtype", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "variable", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "dtype", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def object2array(variable, dtype): single_var = (type(variable) == dtype) if single_var: return [variable] else: return variable
503
561
3364a85cd10d47437d81812307497a0d547246f9
wangj19/99-CapstoneProject-201920
src/shared_gui.py
[ "MIT" ]
Python
handle_left
null
def handle_left(left_entry_box, right_entry_box, mqtt_sender): print('Go Left', -int(left_entry_box.get()), int(right_entry_box.get())) mqtt_sender.send_message('right',[left_entry_box.get(), right_entry_box.get()]) """ Tells the robot to move using the speeds in the given entry boxes, but using the negative of the speed in the left entry box. :type left_entry_box: ttk.Entry :type right_entry_box: ttk.Entry :type mqtt_sender: com.MqttClient """
Tells the robot to move using the speeds in the given entry boxes, but using the negative of the speed in the left entry box. :type left_entry_box: ttk.Entry :type right_entry_box: ttk.Entry :type mqtt_sender: com.MqttClient
Tells the robot to move using the speeds in the given entry boxes, but using the negative of the speed in the left entry box.
[ "Tells", "the", "robot", "to", "move", "using", "the", "speeds", "in", "the", "given", "entry", "boxes", "but", "using", "the", "negative", "of", "the", "speed", "in", "the", "left", "entry", "box", "." ]
def handle_left(left_entry_box, right_entry_box, mqtt_sender): print('Go Left', -int(left_entry_box.get()), int(right_entry_box.get())) mqtt_sender.send_message('right',[left_entry_box.get(), right_entry_box.get()])
[ "def", "handle_left", "(", "left_entry_box", ",", "right_entry_box", ",", "mqtt_sender", ")", ":", "print", "(", "'Go Left'", ",", "-", "int", "(", "left_entry_box", ".", "get", "(", ")", ")", ",", "int", "(", "right_entry_box", ".", "get", "(", ")", ")", ")", "mqtt_sender", ".", "send_message", "(", "'right'", ",", "[", "left_entry_box", ".", "get", "(", ")", ",", "right_entry_box", ".", "get", "(", ")", "]", ")" ]
Tells the robot to move using the speeds in the given entry boxes, but using the negative of the speed in the left entry box.
[ "Tells", "the", "robot", "to", "move", "using", "the", "speeds", "in", "the", "given", "entry", "boxes", "but", "using", "the", "negative", "of", "the", "speed", "in", "the", "left", "entry", "box", "." ]
[ "\"\"\"\n Tells the robot to move using the speeds in the given entry boxes,\n but using the negative of the speed in the left entry box.\n :type left_entry_box: ttk.Entry\n :type right_entry_box: ttk.Entry\n :type mqtt_sender: com.MqttClient\n \"\"\"" ]
[ { "param": "left_entry_box", "type": null }, { "param": "right_entry_box", "type": null }, { "param": "mqtt_sender", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "left_entry_box", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "right_entry_box", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "mqtt_sender", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def handle_left(left_entry_box, right_entry_box, mqtt_sender): print('Go Left', -int(left_entry_box.get()), int(right_entry_box.get())) mqtt_sender.send_message('right',[left_entry_box.get(), right_entry_box.get()])
505
684
d01a490e4778acfec2033d6426232049f85e2dd7
asabyr/LensTools
lenstools/statistics/database.py
[ "MIT" ]
Python
read_table_all
<not_specific>
def read_table_all(cls,db_names,table_name): """ Read the same SQL table from a list of databases and combine the results :param db_names: list of names of the databases to query :type db_names: list. :param table: table to read :type table: str. :returns: :py:class:`Ensemble` """ all_results = list() #Query each database for db_name in db_names: with cls(db_name) as db: all_results.append(db.read_table(table_name)) #Combine and return return cls._constructor_ensemble.concat(all_results,axis=0,ignore_index=True)
Read the same SQL table from a list of databases and combine the results :param db_names: list of names of the databases to query :type db_names: list. :param table: table to read :type table: str. :returns: :py:class:`Ensemble`
Read the same SQL table from a list of databases and combine the results
[ "Read", "the", "same", "SQL", "table", "from", "a", "list", "of", "databases", "and", "combine", "the", "results" ]
def read_table_all(cls,db_names,table_name): all_results = list() for db_name in db_names: with cls(db_name) as db: all_results.append(db.read_table(table_name)) return cls._constructor_ensemble.concat(all_results,axis=0,ignore_index=True)
[ "def", "read_table_all", "(", "cls", ",", "db_names", ",", "table_name", ")", ":", "all_results", "=", "list", "(", ")", "for", "db_name", "in", "db_names", ":", "with", "cls", "(", "db_name", ")", "as", "db", ":", "all_results", ".", "append", "(", "db", ".", "read_table", "(", "table_name", ")", ")", "return", "cls", ".", "_constructor_ensemble", ".", "concat", "(", "all_results", ",", "axis", "=", "0", ",", "ignore_index", "=", "True", ")" ]
Read the same SQL table from a list of databases and combine the results
[ "Read", "the", "same", "SQL", "table", "from", "a", "list", "of", "databases", "and", "combine", "the", "results" ]
[ "\"\"\"\n\t\tRead the same SQL table from a list of databases and combine the results\n\n\t\t:param db_names: list of names of the databases to query\n\t\t:type db_names: list.\n\n\t\t:param table: table to read\n\t\t:type table: str.\n\n\t\t:returns: :py:class:`Ensemble`\n\n\t\t\"\"\"", "#Query each database", "#Combine and return" ]
[ { "param": "cls", "type": null }, { "param": "db_names", "type": null }, { "param": "table_name", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "db_names", "type": null, "docstring": "list of names of the databases to query", "docstring_tokens": [ "list", "of", "names", "of", "the", "databases", "to", "query" ], "default": null, "is_optional": null }, { "identifier": "table_name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [ { "identifier": "table", "type": null, "docstring": "table to read", "docstring_tokens": [ "table", "to", "read" ], "default": null, "is_optional": null } ], "others": [] }
def read_table_all(cls,db_names,table_name): all_results = list() for db_name in db_names: with cls(db_name) as db: all_results.append(db.read_table(table_name)) return cls._constructor_ensemble.concat(all_results,axis=0,ignore_index=True)
506
106
302d73cd7a6bce3c4a2d36554e234f7e81926bc0
parharn/BLR
src/blr/cli/clusterrmdup.py
[ "MIT" ]
Python
meet_requirements
<not_specific>
def meet_requirements(read, summary): """ Checks so read pair meets requirements before being used in analysis. :param read: pysam read :param summary: dict :return: bool """ if read.is_unmapped: summary["Unmapped reads"] += 1 return False if read.mate_is_unmapped: summary["Unmapped reads"] += 1 return False if not read.is_proper_pair: summary["Reads not proper pair"] += 1 return False return True
Checks so read pair meets requirements before being used in analysis. :param read: pysam read :param summary: dict :return: bool
Checks so read pair meets requirements before being used in analysis.
[ "Checks", "so", "read", "pair", "meets", "requirements", "before", "being", "used", "in", "analysis", "." ]
def meet_requirements(read, summary): if read.is_unmapped: summary["Unmapped reads"] += 1 return False if read.mate_is_unmapped: summary["Unmapped reads"] += 1 return False if not read.is_proper_pair: summary["Reads not proper pair"] += 1 return False return True
[ "def", "meet_requirements", "(", "read", ",", "summary", ")", ":", "if", "read", ".", "is_unmapped", ":", "summary", "[", "\"Unmapped reads\"", "]", "+=", "1", "return", "False", "if", "read", ".", "mate_is_unmapped", ":", "summary", "[", "\"Unmapped reads\"", "]", "+=", "1", "return", "False", "if", "not", "read", ".", "is_proper_pair", ":", "summary", "[", "\"Reads not proper pair\"", "]", "+=", "1", "return", "False", "return", "True" ]
Checks so read pair meets requirements before being used in analysis.
[ "Checks", "so", "read", "pair", "meets", "requirements", "before", "being", "used", "in", "analysis", "." ]
[ "\"\"\"\n Checks so read pair meets requirements before being used in analysis.\n :param read: pysam read\n :param summary: dict\n :return: bool\n \"\"\"" ]
[ { "param": "read", "type": null }, { "param": "summary", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "read", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "summary", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def meet_requirements(read, summary): if read.is_unmapped: summary["Unmapped reads"] += 1 return False if read.mate_is_unmapped: summary["Unmapped reads"] += 1 return False if not read.is_proper_pair: summary["Reads not proper pair"] += 1 return False return True
507
1,017
6d0bd849521cd83fd297c28f9262b7995548bd17
Krastanov/cutiepy
cutiepy/symbolic.py
[ "BSD-3-Clause" ]
Python
anon
<not_specific>
def anon(cls, *args, **kwargs): '''Create an object with unique name.''' res = cls('%s'%uuid.uuid4(), *args, **kwargs) res._anon = True return res
Create an object with unique name.
Create an object with unique name.
[ "Create", "an", "object", "with", "unique", "name", "." ]
def anon(cls, *args, **kwargs): res = cls('%s'%uuid.uuid4(), *args, **kwargs) res._anon = True return res
[ "def", "anon", "(", "cls", ",", "*", "args", ",", "**", "kwargs", ")", ":", "res", "=", "cls", "(", "'%s'", "%", "uuid", ".", "uuid4", "(", ")", ",", "*", "args", ",", "**", "kwargs", ")", "res", ".", "_anon", "=", "True", "return", "res" ]
Create an object with unique name.
[ "Create", "an", "object", "with", "unique", "name", "." ]
[ "'''Create an object with unique name.'''" ]
[ { "param": "cls", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import uuid def anon(cls, *args, **kwargs): res = cls('%s'%uuid.uuid4(), *args, **kwargs) res._anon = True return res
509
716
1832d4a0a59526ac02c30e5c421313a3f58f9684
AnshikaY/awesomeScripts
weather_cli/weather_cli.py
[ "MIT" ]
Python
print_weather_data
<not_specific>
def print_weather_data(data, verbose): """Displays the weather data in human readable format. Args: data (dict): dictionary that contains the weather data. verbose (boolean): if `True`, more detail will be printed about the weather and the city. """ info = f""" ------------------------------ City : {data['name']} Country : {data['sys']['country']}""" if verbose: info += f""" Longitude : {data['coord']['lon']} Latitude : {data['coord']['lat']} Sunrise : {datetime.fromtimestamp(data['sys']['sunrise']).time()} Sunset : {datetime.fromtimestamp(data['sys']['sunset']).time()}""" info += f""" ------------------------------ Temperature : {data['main']['temp']}""" if verbose: info += f""" Temp min : {data['main']['temp_min']} Temp max : {data['main']['temp_max']}""" info += f""" Pressure : {data['main']['pressure']} Humidity : {data['main']['humidity']} Wind speed : {data['wind']['speed']} Wind deg : {data['wind']['deg']} ------------------------------""" return info
Displays the weather data in human readable format. Args: data (dict): dictionary that contains the weather data. verbose (boolean): if `True`, more detail will be printed about the weather and the city.
Displays the weather data in human readable format. Args: data (dict): dictionary that contains the weather data. verbose (boolean): if `True`, more detail will be printed about the weather and the city.
[ "Displays", "the", "weather", "data", "in", "human", "readable", "format", ".", "Args", ":", "data", "(", "dict", ")", ":", "dictionary", "that", "contains", "the", "weather", "data", ".", "verbose", "(", "boolean", ")", ":", "if", "`", "True", "`", "more", "detail", "will", "be", "printed", "about", "the", "weather", "and", "the", "city", "." ]
def print_weather_data(data, verbose): info = f""" ------------------------------ City : {data['name']} Country : {data['sys']['country']}""" if verbose: info += f""" Longitude : {data['coord']['lon']} Latitude : {data['coord']['lat']} Sunrise : {datetime.fromtimestamp(data['sys']['sunrise']).time()} Sunset : {datetime.fromtimestamp(data['sys']['sunset']).time()}""" info += f""" ------------------------------ Temperature : {data['main']['temp']}""" if verbose: info += f""" Temp min : {data['main']['temp_min']} Temp max : {data['main']['temp_max']}""" info += f""" Pressure : {data['main']['pressure']} Humidity : {data['main']['humidity']} Wind speed : {data['wind']['speed']} Wind deg : {data['wind']['deg']} ------------------------------""" return info
[ "def", "print_weather_data", "(", "data", ",", "verbose", ")", ":", "info", "=", "f\"\"\"\n ------------------------------\n City : {data['name']}\n Country : {data['sys']['country']}\"\"\"", "if", "verbose", ":", "info", "+=", "f\"\"\"\n Longitude : {data['coord']['lon']}\n Latitude : {data['coord']['lat']}\n Sunrise : {datetime.fromtimestamp(data['sys']['sunrise']).time()}\n Sunset : {datetime.fromtimestamp(data['sys']['sunset']).time()}\"\"\"", "info", "+=", "f\"\"\"\n ------------------------------\n Temperature : {data['main']['temp']}\"\"\"", "if", "verbose", ":", "info", "+=", "f\"\"\"\n Temp min : {data['main']['temp_min']}\n Temp max : {data['main']['temp_max']}\"\"\"", "info", "+=", "f\"\"\"\n Pressure : {data['main']['pressure']}\n Humidity : {data['main']['humidity']}\n Wind speed : {data['wind']['speed']}\n Wind deg : {data['wind']['deg']}\n ------------------------------\"\"\"", "return", "info" ]
Displays the weather data in human readable format.
[ "Displays", "the", "weather", "data", "in", "human", "readable", "format", "." ]
[ "\"\"\"Displays the weather data in human readable format.\n\n Args:\n data (dict): dictionary that contains the weather data.\n verbose (boolean): if `True`, more detail will be printed about\n the weather and the city.\n \"\"\"" ]
[ { "param": "data", "type": null }, { "param": "verbose", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "data", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "verbose", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import datetime def print_weather_data(data, verbose): info = f""" ------------------------------ City : {data['name']} Country : {data['sys']['country']}""" if verbose: info += f""" Longitude : {data['coord']['lon']} Latitude : {data['coord']['lat']} Sunrise : {datetime.fromtimestamp(data['sys']['sunrise']).time()} Sunset : {datetime.fromtimestamp(data['sys']['sunset']).time()}""" info += f""" ------------------------------ Temperature : {data['main']['temp']}""" if verbose: info += f""" Temp min : {data['main']['temp_min']} Temp max : {data['main']['temp_max']}""" info += f""" Pressure : {data['main']['pressure']} Humidity : {data['main']['humidity']} Wind speed : {data['wind']['speed']} Wind deg : {data['wind']['deg']} ------------------------------""" return info
510
404
4d420190daafd093fa1c70e53c5d1f2231be38f3
PapenfussLab/Mungo
mungo/blast.py
[ "Artistic-2.0" ]
Python
genomeToBlock
<not_specific>
def genomeToBlock(chrom, start, end, L=1000000000, blockSize=5000000, delimiter='.'): """Transform genomic to block coordinates. @param chrom: Chromosome @param start: Start coordinate @param end: End coorinate @param L: Chromosome length @param blockSize: Block size (Default: 5000000) @returns: (block, relStart, relEnd) """ strand = '+' if start>end: strand = '-' start,end = end,start blockStart = 1 + blockSize*int(start/blockSize) blockEnd = min(L, blockStart+blockSize-1) block = '%s%s%i-%i' % (chrom, delimiter, blockStart, blockEnd) relStart = start % blockSize relEnd = end % blockSize if strand=='-': relStart,relEnd = relEnd,relStart return block,relStart,relEnd
Transform genomic to block coordinates. @param chrom: Chromosome @param start: Start coordinate @param end: End coorinate @param L: Chromosome length @param blockSize: Block size (Default: 5000000) @returns: (block, relStart, relEnd)
Transform genomic to block coordinates.
[ "Transform", "genomic", "to", "block", "coordinates", "." ]
def genomeToBlock(chrom, start, end, L=1000000000, blockSize=5000000, delimiter='.'): strand = '+' if start>end: strand = '-' start,end = end,start blockStart = 1 + blockSize*int(start/blockSize) blockEnd = min(L, blockStart+blockSize-1) block = '%s%s%i-%i' % (chrom, delimiter, blockStart, blockEnd) relStart = start % blockSize relEnd = end % blockSize if strand=='-': relStart,relEnd = relEnd,relStart return block,relStart,relEnd
[ "def", "genomeToBlock", "(", "chrom", ",", "start", ",", "end", ",", "L", "=", "1000000000", ",", "blockSize", "=", "5000000", ",", "delimiter", "=", "'.'", ")", ":", "strand", "=", "'+'", "if", "start", ">", "end", ":", "strand", "=", "'-'", "start", ",", "end", "=", "end", ",", "start", "blockStart", "=", "1", "+", "blockSize", "*", "int", "(", "start", "/", "blockSize", ")", "blockEnd", "=", "min", "(", "L", ",", "blockStart", "+", "blockSize", "-", "1", ")", "block", "=", "'%s%s%i-%i'", "%", "(", "chrom", ",", "delimiter", ",", "blockStart", ",", "blockEnd", ")", "relStart", "=", "start", "%", "blockSize", "relEnd", "=", "end", "%", "blockSize", "if", "strand", "==", "'-'", ":", "relStart", ",", "relEnd", "=", "relEnd", ",", "relStart", "return", "block", ",", "relStart", ",", "relEnd" ]
Transform genomic to block coordinates.
[ "Transform", "genomic", "to", "block", "coordinates", "." ]
[ "\"\"\"Transform genomic to block coordinates.\n \n @param chrom: Chromosome\n @param start: Start coordinate\n @param end: End coorinate\n @param L: Chromosome length\n @param blockSize: Block size (Default: 5000000)\n @returns: (block, relStart, relEnd)\n \"\"\"" ]
[ { "param": "chrom", "type": null }, { "param": "start", "type": null }, { "param": "end", "type": null }, { "param": "L", "type": null }, { "param": "blockSize", "type": null }, { "param": "delimiter", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "chrom", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false }, { "identifier": "start", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false }, { "identifier": "end", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false }, { "identifier": "L", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false }, { "identifier": "blockSize", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false }, { "identifier": "delimiter", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [ { "identifier": "returns", "docstring": "(block, relStart, relEnd)", "docstring_tokens": [ "(", "block", "relStart", "relEnd", ")" ] } ] }
def genomeToBlock(chrom, start, end, L=1000000000, blockSize=5000000, delimiter='.'): strand = '+' if start>end: strand = '-' start,end = end,start blockStart = 1 + blockSize*int(start/blockSize) blockEnd = min(L, blockStart+blockSize-1) block = '%s%s%i-%i' % (chrom, delimiter, blockStart, blockEnd) relStart = start % blockSize relEnd = end % blockSize if strand=='-': relStart,relEnd = relEnd,relStart return block,relStart,relEnd
511
962
f65fb12dc6670add0f4e879fdc9932de77ab64d3
coretl/bluesky
bluesky/log.py
[ "BSD-3-Clause" ]
Python
validate_level
int
def validate_level(level) -> int: ''' Return a int for level comparison ''' if isinstance(level, int): levelno = level elif isinstance(level, str): levelno = logging.getLevelName(level) if isinstance(levelno, int): return levelno else: raise ValueError("Your level is illegal, please use one of python logging string")
Return a int for level comparison
Return a int for level comparison
[ "Return", "a", "int", "for", "level", "comparison" ]
def validate_level(level) -> int: if isinstance(level, int): levelno = level elif isinstance(level, str): levelno = logging.getLevelName(level) if isinstance(levelno, int): return levelno else: raise ValueError("Your level is illegal, please use one of python logging string")
[ "def", "validate_level", "(", "level", ")", "->", "int", ":", "if", "isinstance", "(", "level", ",", "int", ")", ":", "levelno", "=", "level", "elif", "isinstance", "(", "level", ",", "str", ")", ":", "levelno", "=", "logging", ".", "getLevelName", "(", "level", ")", "if", "isinstance", "(", "levelno", ",", "int", ")", ":", "return", "levelno", "else", ":", "raise", "ValueError", "(", "\"Your level is illegal, please use one of python logging string\"", ")" ]
Return a int for level comparison
[ "Return", "a", "int", "for", "level", "comparison" ]
[ "'''\n Return a int for level comparison\n\n '''" ]
[ { "param": "level", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "level", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import logging def validate_level(level) -> int: if isinstance(level, int): levelno = level elif isinstance(level, str): levelno = logging.getLevelName(level) if isinstance(levelno, int): return levelno else: raise ValueError("Your level is illegal, please use one of python logging string")
512
41
f897d308f3a34dd34b94e84ebe45166e0472df72
sorice/preprocess
preprocess/basic/normalize.py
[ "BSD-3-Clause" ]
Python
replace_dot_sequence
str
def replace_dot_sequence(text: str) -> str: """ Replace a contiguous dot sequence by the same amount of whitespace. Please read carefully the documentation to see all the conventions adopted to replace this sequences, and how to maintain dot sentence delimiters for sentence tokenizers. Note ---- It can't be implemented without the finditer function. This expression r'(\w+)[.]\s*[.]+[\s|[.]]*' changes the sequences of points but it is impossible to handle the number of white spaces. This functions it is used also for the alignment process after normalization, where maintaining the length of the original text is important. """ for i in re.finditer('[.]\s*?[.]+?[\s|[.]]*',text): for j in range(i.start(),i.end()): if text[j] == '.' or text[j]==' ': text = text[:j]+' '+text[j+1:] return text
Replace a contiguous dot sequence by the same amount of whitespace. Please read carefully the documentation to see all the conventions adopted to replace this sequences, and how to maintain dot sentence delimiters for sentence tokenizers. Note ---- It can't be implemented without the finditer function. This expression r'(\w+)[.]\s*[.]+[\s|[.]]*' changes the sequences of points but it is impossible to handle the number of white spaces. This functions it is used also for the alignment process after normalization, where maintaining the length of the original text is important.
Replace a contiguous dot sequence by the same amount of whitespace. Please read carefully the documentation to see all the conventions adopted to replace this sequences, and how to maintain dot sentence delimiters for sentence tokenizers. Note It can't be implemented without the finditer function. This expression r'(\w+)[.]\s*[.]+[\s|[.]]*' changes the sequences of points but it is impossible to handle the number of white spaces. This functions it is used also for the alignment process after normalization, where maintaining the length of the original text is important.
[ "Replace", "a", "contiguous", "dot", "sequence", "by", "the", "same", "amount", "of", "whitespace", ".", "Please", "read", "carefully", "the", "documentation", "to", "see", "all", "the", "conventions", "adopted", "to", "replace", "this", "sequences", "and", "how", "to", "maintain", "dot", "sentence", "delimiters", "for", "sentence", "tokenizers", ".", "Note", "It", "can", "'", "t", "be", "implemented", "without", "the", "finditer", "function", ".", "This", "expression", "r", "'", "(", "\\", "w", "+", ")", "[", ".", "]", "\\", "s", "*", "[", ".", "]", "+", "[", "\\", "s|", "[", ".", "]]", "*", "'", "changes", "the", "sequences", "of", "points", "but", "it", "is", "impossible", "to", "handle", "the", "number", "of", "white", "spaces", ".", "This", "functions", "it", "is", "used", "also", "for", "the", "alignment", "process", "after", "normalization", "where", "maintaining", "the", "length", "of", "the", "original", "text", "is", "important", "." ]
def replace_dot_sequence(text: str) -> str: for i in re.finditer('[.]\s*?[.]+?[\s|[.]]*',text): for j in range(i.start(),i.end()): if text[j] == '.' or text[j]==' ': text = text[:j]+' '+text[j+1:] return text
[ "def", "replace_dot_sequence", "(", "text", ":", "str", ")", "->", "str", ":", "for", "i", "in", "re", ".", "finditer", "(", "'[.]\\s*?[.]+?[\\s|[.]]*'", ",", "text", ")", ":", "for", "j", "in", "range", "(", "i", ".", "start", "(", ")", ",", "i", ".", "end", "(", ")", ")", ":", "if", "text", "[", "j", "]", "==", "'.'", "or", "text", "[", "j", "]", "==", "' '", ":", "text", "=", "text", "[", ":", "j", "]", "+", "' '", "+", "text", "[", "j", "+", "1", ":", "]", "return", "text" ]
Replace a contiguous dot sequence by the same amount of whitespace.
[ "Replace", "a", "contiguous", "dot", "sequence", "by", "the", "same", "amount", "of", "whitespace", "." ]
[ "\"\"\"\n Replace a contiguous dot sequence by the same amount of \n whitespace.\n\n Please read carefully the documentation to see all the \n conventions adopted to replace this sequences, and how to \n maintain dot sentence delimiters for sentence tokenizers.\n\n Note\n ----\n\n It can't be implemented without the finditer function.\n This expression r'(\\w+)[.]\\s*[.]+[\\s|[.]]*' changes the sequences \n of points but it is impossible to handle the number of white spaces.\n This functions it is used also for the alignment process after \n normalization, where maintaining the length of the original text is\n important.\n\n \"\"\"" ]
[ { "param": "text", "type": "str" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "text", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def replace_dot_sequence(text: str) -> str: for i in re.finditer('[.]\s*?[.]+?[\s|[.]]*',text): for j in range(i.start(),i.end()): if text[j] == '.' or text[j]==' ': text = text[:j]+' '+text[j+1:] return text
514
165
5ad16a43a1707833f4dfd4d01aaa3a2f6361a3f4
inimah/protoinfomax
src/extract_sentiment.py
[ "MIT" ]
Python
extract_topn_from_vector
<not_specific>
def extract_topn_from_vector(feature_names, sorted_items, topn=10): """get the feature names and tf-idf score of top n items""" #use only topn items from vector sorted_items = sorted_items[:topn] score_vals = [] feature_vals = [] for idx, score in sorted_items: fname = feature_names[idx] #keep track of feature name and its corresponding score score_vals.append(round(score, 3)) feature_vals.append(feature_names[idx]) #create a tuples of feature,score #results = zip(feature_vals,score_vals) results= {} for idx in range(len(feature_vals)): results[feature_vals[idx]]=score_vals[idx] return results
get the feature names and tf-idf score of top n items
get the feature names and tf-idf score of top n items
[ "get", "the", "feature", "names", "and", "tf", "-", "idf", "score", "of", "top", "n", "items" ]
def extract_topn_from_vector(feature_names, sorted_items, topn=10): sorted_items = sorted_items[:topn] score_vals = [] feature_vals = [] for idx, score in sorted_items: fname = feature_names[idx] score_vals.append(round(score, 3)) feature_vals.append(feature_names[idx]) results= {} for idx in range(len(feature_vals)): results[feature_vals[idx]]=score_vals[idx] return results
[ "def", "extract_topn_from_vector", "(", "feature_names", ",", "sorted_items", ",", "topn", "=", "10", ")", ":", "sorted_items", "=", "sorted_items", "[", ":", "topn", "]", "score_vals", "=", "[", "]", "feature_vals", "=", "[", "]", "for", "idx", ",", "score", "in", "sorted_items", ":", "fname", "=", "feature_names", "[", "idx", "]", "score_vals", ".", "append", "(", "round", "(", "score", ",", "3", ")", ")", "feature_vals", ".", "append", "(", "feature_names", "[", "idx", "]", ")", "results", "=", "{", "}", "for", "idx", "in", "range", "(", "len", "(", "feature_vals", ")", ")", ":", "results", "[", "feature_vals", "[", "idx", "]", "]", "=", "score_vals", "[", "idx", "]", "return", "results" ]
get the feature names and tf-idf score of top n items
[ "get", "the", "feature", "names", "and", "tf", "-", "idf", "score", "of", "top", "n", "items" ]
[ "\"\"\"get the feature names and tf-idf score of top n items\"\"\"", "#use only topn items from vector\r", "#keep track of feature name and its corresponding score\r", "#create a tuples of feature,score\r", "#results = zip(feature_vals,score_vals)\r" ]
[ { "param": "feature_names", "type": null }, { "param": "sorted_items", "type": null }, { "param": "topn", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "feature_names", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "sorted_items", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "topn", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def extract_topn_from_vector(feature_names, sorted_items, topn=10): sorted_items = sorted_items[:topn] score_vals = [] feature_vals = [] for idx, score in sorted_items: fname = feature_names[idx] score_vals.append(round(score, 3)) feature_vals.append(feature_names[idx]) results= {} for idx in range(len(feature_vals)): results[feature_vals[idx]]=score_vals[idx] return results
515
645
7c4d7b02e48855aa6572d8db6957b7024cbd47ee
inversepixel/flip
python/flip.py
[ "MIT", "BSD-3-Clause" ]
Python
check_larger_than_one
<not_specific>
def check_larger_than_one(value): """ Checks that value is larger than one. If so, return integer version of it, otherwise raises error :param value: float :raise: argparseArgumentTypeError if value <= 1 :return: integer """ ivalue = int(value) if ivalue <= 1: raise argparse.ArgumentTypeError("Number of exposures must be greater than or equal to two. You entered %s." % value) return ivalue
Checks that value is larger than one. If so, return integer version of it, otherwise raises error :param value: float :raise: argparseArgumentTypeError if value <= 1 :return: integer
Checks that value is larger than one. If so, return integer version of it, otherwise raises error
[ "Checks", "that", "value", "is", "larger", "than", "one", ".", "If", "so", "return", "integer", "version", "of", "it", "otherwise", "raises", "error" ]
def check_larger_than_one(value): ivalue = int(value) if ivalue <= 1: raise argparse.ArgumentTypeError("Number of exposures must be greater than or equal to two. You entered %s." % value) return ivalue
[ "def", "check_larger_than_one", "(", "value", ")", ":", "ivalue", "=", "int", "(", "value", ")", "if", "ivalue", "<=", "1", ":", "raise", "argparse", ".", "ArgumentTypeError", "(", "\"Number of exposures must be greater than or equal to two. You entered %s.\"", "%", "value", ")", "return", "ivalue" ]
Checks that value is larger than one.
[ "Checks", "that", "value", "is", "larger", "than", "one", "." ]
[ "\"\"\"\n\tChecks that value is larger than one. If so, return integer version of it, otherwise raises error\n\n\t:param value: float\n\t:raise: argparseArgumentTypeError if value <= 1\n\t:return: integer\n\t\"\"\"" ]
[ { "param": "value", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [ { "docstring": "argparseArgumentTypeError if value <= 1", "docstring_tokens": [ "argparseArgumentTypeError", "if", "value", "<", "=", "1" ], "type": null } ], "params": [ { "identifier": "value", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import argparse def check_larger_than_one(value): ivalue = int(value) if ivalue <= 1: raise argparse.ArgumentTypeError("Number of exposures must be greater than or equal to two. You entered %s." % value) return ivalue
516
354
d0c862c8874f1df87fbfcc1479b6cd11cf6e97ea
markphip/subversion
tools/client-side/svn-vendor.py
[ "Apache-2.0" ]
Python
filename_sort_key
<not_specific>
def filename_sort_key(s): ''' Function to sort filenames so that parent directory is always followed by its children. Without it, [ "/a", "/a-b", "/a/b", "/a-b/c" ] would not be sorted correctly. ''' return s.replace('/', '\001')
Function to sort filenames so that parent directory is always followed by its children. Without it, [ "/a", "/a-b", "/a/b", "/a-b/c" ] would not be sorted correctly.
Function to sort filenames so that parent directory is always followed by its children.
[ "Function", "to", "sort", "filenames", "so", "that", "parent", "directory", "is", "always", "followed", "by", "its", "children", "." ]
def filename_sort_key(s): return s.replace('/', '\001')
[ "def", "filename_sort_key", "(", "s", ")", ":", "return", "s", ".", "replace", "(", "'/'", ",", "'\\001'", ")" ]
Function to sort filenames so that parent directory is always followed by its children.
[ "Function", "to", "sort", "filenames", "so", "that", "parent", "directory", "is", "always", "followed", "by", "its", "children", "." ]
[ "'''\n Function to sort filenames so that parent directory is always followed\n by its children. Without it, [ \"/a\", \"/a-b\", \"/a/b\", \"/a-b/c\" ] would\n not be sorted correctly.\n '''" ]
[ { "param": "s", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "s", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def filename_sort_key(s): return s.replace('/', '\001')
517
1,013
d8d5c7c6d4bbac3004ab4d6d1af9fea0330900e2
olokshyn/CAR
tweets_analyzer/tweets_analyzer.py
[ "MIT" ]
Python
_clean_tweet
<not_specific>
def _clean_tweet(tweet_text): """ Clean tweet text by removing links, special characters. """ return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t]) | (\w+:\ / \ / \S+) ", " ", tweet_text).split())
Clean tweet text by removing links, special characters.
Clean tweet text by removing links, special characters.
[ "Clean", "tweet", "text", "by", "removing", "links", "special", "characters", "." ]
def _clean_tweet(tweet_text): return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t]) | (\w+:\ / \ / \S+) ", " ", tweet_text).split())
[ "def", "_clean_tweet", "(", "tweet_text", ")", ":", "return", "' '", ".", "join", "(", "re", ".", "sub", "(", "\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t]) | (\\w+:\\ / \\ / \\S+) \"", ",", "\" \"", ",", "tweet_text", ")", ".", "split", "(", ")", ")" ]
Clean tweet text by removing links, special characters.
[ "Clean", "tweet", "text", "by", "removing", "links", "special", "characters", "." ]
[ "\"\"\"\n Clean tweet text by removing links, special characters.\n \"\"\"" ]
[ { "param": "tweet_text", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "tweet_text", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def _clean_tweet(tweet_text): return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t]) | (\w+:\ / \ / \S+) ", " ", tweet_text).split())
518
349
fd0cc8d772af05383da134c12fe18f96f3a78b9c
voussoir/timesearch
timesearch_modules/livestream.py
[ "BSD-3-Clause" ]
Python
cycle_generators
null
def cycle_generators(generators, only_once, sleepy): ''' Given multiple generators, yield an item from each one, cycling through them in a round-robin fashion. This is useful if you want to convert multiple livestream generators into a single generator that take turns updating each of them and yields all of their items. ''' while True: for generator in generators: yield next(generator) if only_once: break time.sleep(sleepy)
Given multiple generators, yield an item from each one, cycling through them in a round-robin fashion. This is useful if you want to convert multiple livestream generators into a single generator that take turns updating each of them and yields all of their items.
Given multiple generators, yield an item from each one, cycling through them in a round-robin fashion. This is useful if you want to convert multiple livestream generators into a single generator that take turns updating each of them and yields all of their items.
[ "Given", "multiple", "generators", "yield", "an", "item", "from", "each", "one", "cycling", "through", "them", "in", "a", "round", "-", "robin", "fashion", ".", "This", "is", "useful", "if", "you", "want", "to", "convert", "multiple", "livestream", "generators", "into", "a", "single", "generator", "that", "take", "turns", "updating", "each", "of", "them", "and", "yields", "all", "of", "their", "items", "." ]
def cycle_generators(generators, only_once, sleepy): while True: for generator in generators: yield next(generator) if only_once: break time.sleep(sleepy)
[ "def", "cycle_generators", "(", "generators", ",", "only_once", ",", "sleepy", ")", ":", "while", "True", ":", "for", "generator", "in", "generators", ":", "yield", "next", "(", "generator", ")", "if", "only_once", ":", "break", "time", ".", "sleep", "(", "sleepy", ")" ]
Given multiple generators, yield an item from each one, cycling through them in a round-robin fashion.
[ "Given", "multiple", "generators", "yield", "an", "item", "from", "each", "one", "cycling", "through", "them", "in", "a", "round", "-", "robin", "fashion", "." ]
[ "'''\n Given multiple generators, yield an item from each one, cycling through\n them in a round-robin fashion.\n\n This is useful if you want to convert multiple livestream generators into a\n single generator that take turns updating each of them and yields all of\n their items.\n '''" ]
[ { "param": "generators", "type": null }, { "param": "only_once", "type": null }, { "param": "sleepy", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "generators", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "only_once", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "sleepy", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import time def cycle_generators(generators, only_once, sleepy): while True: for generator in generators: yield next(generator) if only_once: break time.sleep(sleepy)
519
49
d9174781514b5973be8988abd8fedd163c8adf55
AhmadManzoor/jazzpos
vendor/smartmin/templatetags/smartmin.py
[ "MIT" ]
Python
field_help
<not_specific>
def field_help(view, field): """ Returns the field help for the passed in field """ return view.lookup_field_help(field)
Returns the field help for the passed in field
Returns the field help for the passed in field
[ "Returns", "the", "field", "help", "for", "the", "passed", "in", "field" ]
def field_help(view, field): return view.lookup_field_help(field)
[ "def", "field_help", "(", "view", ",", "field", ")", ":", "return", "view", ".", "lookup_field_help", "(", "field", ")" ]
Returns the field help for the passed in field
[ "Returns", "the", "field", "help", "for", "the", "passed", "in", "field" ]
[ "\"\"\"\n Returns the field help for the passed in field\n \"\"\"" ]
[ { "param": "view", "type": null }, { "param": "field", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "view", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "field", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def field_help(view, field): return view.lookup_field_help(field)
520
1,022
75e358a2d6eb1fde93377b19549b99d16bcea44c
mammothb/q-bot
qbot/utility.py
[ "MIT" ]
Python
replace_multiple
<not_specific>
def replace_multiple(replacement, string): """Replace multiple substrings in one pass https://stackoverflow.com/questions/6116978 Keyword arguments: replacement -- various substrings and their replacements string -- the original string which contains the substrings that need replacing """ replacement = dict((re.escape(key), val) for key, val in replacement.items()) pattern = re.compile("|".join(replacement.keys())) return pattern.sub(lambda m: replacement[re.escape(m.group(0))], string)
Replace multiple substrings in one pass https://stackoverflow.com/questions/6116978 Keyword arguments: replacement -- various substrings and their replacements string -- the original string which contains the substrings that need replacing
Keyword arguments: replacement -- various substrings and their replacements string -- the original string which contains the substrings that need replacing
[ "Keyword", "arguments", ":", "replacement", "--", "various", "substrings", "and", "their", "replacements", "string", "--", "the", "original", "string", "which", "contains", "the", "substrings", "that", "need", "replacing" ]
def replace_multiple(replacement, string): replacement = dict((re.escape(key), val) for key, val in replacement.items()) pattern = re.compile("|".join(replacement.keys())) return pattern.sub(lambda m: replacement[re.escape(m.group(0))], string)
[ "def", "replace_multiple", "(", "replacement", ",", "string", ")", ":", "replacement", "=", "dict", "(", "(", "re", ".", "escape", "(", "key", ")", ",", "val", ")", "for", "key", ",", "val", "in", "replacement", ".", "items", "(", ")", ")", "pattern", "=", "re", ".", "compile", "(", "\"|\"", ".", "join", "(", "replacement", ".", "keys", "(", ")", ")", ")", "return", "pattern", ".", "sub", "(", "lambda", "m", ":", "replacement", "[", "re", ".", "escape", "(", "m", ".", "group", "(", "0", ")", ")", "]", ",", "string", ")" ]
Replace multiple substrings in one pass https://stackoverflow.com/questions/6116978
[ "Replace", "multiple", "substrings", "in", "one", "pass", "https", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "6116978" ]
[ "\"\"\"Replace multiple substrings in one pass\n https://stackoverflow.com/questions/6116978\n\n Keyword arguments:\n replacement -- various substrings and their replacements\n string -- the original string which contains the substrings that need\n replacing\n \"\"\"" ]
[ { "param": "replacement", "type": null }, { "param": "string", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "replacement", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "string", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def replace_multiple(replacement, string): replacement = dict((re.escape(key), val) for key, val in replacement.items()) pattern = re.compile("|".join(replacement.keys())) return pattern.sub(lambda m: replacement[re.escape(m.group(0))], string)
521
703
c9c1792b543fc03b2b8f9f914b14a3bcfc064244
Debith/pygim
src/pygim/system/utils/performance.py
[ "MIT" ]
Python
quick_timer
null
def quick_timer(title="Code block"): """ Use this function to quickly measure time on a code block. Usage: >>> with quick_timer(): ... slow_code() ... Arguments: title: Print the time used to run code block. """ start = time.time() yield end = time.time() print(f"{title} executed in {end-start:.2f} seconds!")
Use this function to quickly measure time on a code block. Usage: >>> with quick_timer(): ... slow_code() ... Arguments: title: Print the time used to run code block.
Use this function to quickly measure time on a code block.
[ "Use", "this", "function", "to", "quickly", "measure", "time", "on", "a", "code", "block", "." ]
def quick_timer(title="Code block"): start = time.time() yield end = time.time() print(f"{title} executed in {end-start:.2f} seconds!")
[ "def", "quick_timer", "(", "title", "=", "\"Code block\"", ")", ":", "start", "=", "time", ".", "time", "(", ")", "yield", "end", "=", "time", ".", "time", "(", ")", "print", "(", "f\"{title} executed in {end-start:.2f} seconds!\"", ")" ]
Use this function to quickly measure time on a code block.
[ "Use", "this", "function", "to", "quickly", "measure", "time", "on", "a", "code", "block", "." ]
[ "\"\"\"\n Use this function to quickly measure time on a code block.\n\n Usage:\n >>> with quick_timer():\n ... slow_code()\n ...\n\n Arguments:\n title: Print the time used to run code block.\n \"\"\"" ]
[ { "param": "title", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "title", "type": null, "docstring": "Print the time used to run code block.", "docstring_tokens": [ "Print", "the", "time", "used", "to", "run", "code", "block", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import time def quick_timer(title="Code block"): start = time.time() yield end = time.time() print(f"{title} executed in {end-start:.2f} seconds!")
522
1,018
7a96b4eab6e8ec4bd1c7e790b11e77d0afee7bbe
cinek810/ansible-deploy
source/modules/runners/run.py
[ "MIT" ]
Python
reassign_commit_and_workdir
<not_specific>
def reassign_commit_and_workdir(commit: str, workdir: str): """Change workdir if commit is a path (option --self-setup enabled in main)""" if not commit: commit = "" elif os.path.exists(os.path.abspath(commit)): workdir = commit commit = "" return workdir, commit
Change workdir if commit is a path (option --self-setup enabled in main)
Change workdir if commit is a path (option --self-setup enabled in main)
[ "Change", "workdir", "if", "commit", "is", "a", "path", "(", "option", "--", "self", "-", "setup", "enabled", "in", "main", ")" ]
def reassign_commit_and_workdir(commit: str, workdir: str): if not commit: commit = "" elif os.path.exists(os.path.abspath(commit)): workdir = commit commit = "" return workdir, commit
[ "def", "reassign_commit_and_workdir", "(", "commit", ":", "str", ",", "workdir", ":", "str", ")", ":", "if", "not", "commit", ":", "commit", "=", "\"\"", "elif", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "abspath", "(", "commit", ")", ")", ":", "workdir", "=", "commit", "commit", "=", "\"\"", "return", "workdir", ",", "commit" ]
Change workdir if commit is a path (option --self-setup enabled in main)
[ "Change", "workdir", "if", "commit", "is", "a", "path", "(", "option", "--", "self", "-", "setup", "enabled", "in", "main", ")" ]
[ "\"\"\"Change workdir if commit is a path (option --self-setup enabled in main)\"\"\"" ]
[ { "param": "commit", "type": "str" }, { "param": "workdir", "type": "str" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "commit", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "workdir", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def reassign_commit_and_workdir(commit: str, workdir: str): if not commit: commit = "" elif os.path.exists(os.path.abspath(commit)): workdir = commit commit = "" return workdir, commit
524
828
97868b2c92c411e1b314ac146901075827a8d9fe
sonuGlobality/dd-trace-py
ddtrace/contrib/django/utils.py
[ "Apache-2.0", "BSD-3-Clause" ]
Python
resource_from_cache_prefix
<not_specific>
def resource_from_cache_prefix(resource, cache): """ Combine the resource name with the cache prefix (if any) """ if getattr(cache, "key_prefix", None): name = " ".join((resource, cache.key_prefix)) else: name = resource # enforce lowercase to make the output nicer to read return name.lower()
Combine the resource name with the cache prefix (if any)
Combine the resource name with the cache prefix (if any)
[ "Combine", "the", "resource", "name", "with", "the", "cache", "prefix", "(", "if", "any", ")" ]
def resource_from_cache_prefix(resource, cache): if getattr(cache, "key_prefix", None): name = " ".join((resource, cache.key_prefix)) else: name = resource return name.lower()
[ "def", "resource_from_cache_prefix", "(", "resource", ",", "cache", ")", ":", "if", "getattr", "(", "cache", ",", "\"key_prefix\"", ",", "None", ")", ":", "name", "=", "\" \"", ".", "join", "(", "(", "resource", ",", "cache", ".", "key_prefix", ")", ")", "else", ":", "name", "=", "resource", "return", "name", ".", "lower", "(", ")" ]
Combine the resource name with the cache prefix (if any)
[ "Combine", "the", "resource", "name", "with", "the", "cache", "prefix", "(", "if", "any", ")" ]
[ "\"\"\"\n Combine the resource name with the cache prefix (if any)\n \"\"\"", "# enforce lowercase to make the output nicer to read" ]
[ { "param": "resource", "type": null }, { "param": "cache", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "resource", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "cache", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def resource_from_cache_prefix(resource, cache): if getattr(cache, "key_prefix", None): name = " ".join((resource, cache.key_prefix)) else: name = resource return name.lower()
525
632
734095d094334e24063ff8d955f4ce6ac7977c11
clagis/droplet
clean.py
[ "BSD-3-Clause" ]
Python
BS_tag
<not_specific>
def BS_tag(data): """ Remove the non-bidding beads and fuse the bidding ones in the same site. Parameters: ----------- data: pandas dataframe must contain at least three columns named polyIndex, beadPosition and beadType. Returns: -------- data3: pandas dataframe The cdataframe with only the bidding sites for each polymer. """ data2 = data.copy(deep=True) L = [] """ For each polyIndex we create a small dataframe and label the beads if they are not in the backbone by the index of the first one in the chain and by -1 otherwise. So the beads in the first bidding site will all be labeled 0, etc. """ for elem in data2.groupby(['polyIndex']).mean().index.to_list(): beadP = 0 working_df = data2.loc[data2['polyIndex'] == elem, 'beadType'] idx = working_df.index.to_list() for k in idx: if working_df.at[k] != 2: L += [beadP] else: L += [-1] beadP += 1 # Drop the irrelevant beads (backbone) data2['beadPosition'] = L index_drops = data2[data2['beadType'] == 2].index data2.drop(index_drops,inplace=True) # Aggregate each bidding site data3 = data2.groupby(['polyIndex','beadPosition']).mean().reset_index() # Relabel by their position on the polymer (needed for linear links) positions = list(data3['beadPosition'].unique()) data3['beadPosition'] = data3['beadPosition'].apply(lambda x: positions.index(x)) data3 = data3.astype({'beadPosition': 'int64'}) return data3
Remove the non-bidding beads and fuse the bidding ones in the same site. Parameters: ----------- data: pandas dataframe must contain at least three columns named polyIndex, beadPosition and beadType. Returns: -------- data3: pandas dataframe The cdataframe with only the bidding sites for each polymer.
Remove the non-bidding beads and fuse the bidding ones in the same site. Parameters. pandas dataframe must contain at least three columns named polyIndex, beadPosition and beadType. pandas dataframe The cdataframe with only the bidding sites for each polymer.
[ "Remove", "the", "non", "-", "bidding", "beads", "and", "fuse", "the", "bidding", "ones", "in", "the", "same", "site", ".", "Parameters", ".", "pandas", "dataframe", "must", "contain", "at", "least", "three", "columns", "named", "polyIndex", "beadPosition", "and", "beadType", ".", "pandas", "dataframe", "The", "cdataframe", "with", "only", "the", "bidding", "sites", "for", "each", "polymer", "." ]
def BS_tag(data): data2 = data.copy(deep=True) L = [] for elem in data2.groupby(['polyIndex']).mean().index.to_list(): beadP = 0 working_df = data2.loc[data2['polyIndex'] == elem, 'beadType'] idx = working_df.index.to_list() for k in idx: if working_df.at[k] != 2: L += [beadP] else: L += [-1] beadP += 1 data2['beadPosition'] = L index_drops = data2[data2['beadType'] == 2].index data2.drop(index_drops,inplace=True) data3 = data2.groupby(['polyIndex','beadPosition']).mean().reset_index() positions = list(data3['beadPosition'].unique()) data3['beadPosition'] = data3['beadPosition'].apply(lambda x: positions.index(x)) data3 = data3.astype({'beadPosition': 'int64'}) return data3
[ "def", "BS_tag", "(", "data", ")", ":", "data2", "=", "data", ".", "copy", "(", "deep", "=", "True", ")", "L", "=", "[", "]", "\"\"\"\n For each polyIndex we create a small dataframe and label the beads if they\n are not in the backbone by the index of the first one in the chain and by\n -1 otherwise. So the beads in the first bidding site will all be labeled 0, etc.\n \"\"\"", "for", "elem", "in", "data2", ".", "groupby", "(", "[", "'polyIndex'", "]", ")", ".", "mean", "(", ")", ".", "index", ".", "to_list", "(", ")", ":", "beadP", "=", "0", "working_df", "=", "data2", ".", "loc", "[", "data2", "[", "'polyIndex'", "]", "==", "elem", ",", "'beadType'", "]", "idx", "=", "working_df", ".", "index", ".", "to_list", "(", ")", "for", "k", "in", "idx", ":", "if", "working_df", ".", "at", "[", "k", "]", "!=", "2", ":", "L", "+=", "[", "beadP", "]", "else", ":", "L", "+=", "[", "-", "1", "]", "beadP", "+=", "1", "data2", "[", "'beadPosition'", "]", "=", "L", "index_drops", "=", "data2", "[", "data2", "[", "'beadType'", "]", "==", "2", "]", ".", "index", "data2", ".", "drop", "(", "index_drops", ",", "inplace", "=", "True", ")", "data3", "=", "data2", ".", "groupby", "(", "[", "'polyIndex'", ",", "'beadPosition'", "]", ")", ".", "mean", "(", ")", ".", "reset_index", "(", ")", "positions", "=", "list", "(", "data3", "[", "'beadPosition'", "]", ".", "unique", "(", ")", ")", "data3", "[", "'beadPosition'", "]", "=", "data3", "[", "'beadPosition'", "]", ".", "apply", "(", "lambda", "x", ":", "positions", ".", "index", "(", "x", ")", ")", "data3", "=", "data3", ".", "astype", "(", "{", "'beadPosition'", ":", "'int64'", "}", ")", "return", "data3" ]
Remove the non-bidding beads and fuse the bidding ones in the same site.
[ "Remove", "the", "non", "-", "bidding", "beads", "and", "fuse", "the", "bidding", "ones", "in", "the", "same", "site", "." ]
[ "\"\"\"\n Remove the non-bidding beads and fuse the bidding ones in the same site.\n\n Parameters:\n -----------\n data: pandas dataframe\n must contain at least three columns named polyIndex, beadPosition and \n beadType.\n\n Returns:\n --------\n data3: pandas dataframe\n The cdataframe with only the bidding sites for each polymer.\n \"\"\"", "\"\"\"\n For each polyIndex we create a small dataframe and label the beads if they\n are not in the backbone by the index of the first one in the chain and by\n -1 otherwise. So the beads in the first bidding site will all be labeled 0, etc.\n \"\"\"", "# Drop the irrelevant beads (backbone)", "# Aggregate each bidding site", "# Relabel by their position on the polymer (needed for linear links)" ]
[ { "param": "data", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "data", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def BS_tag(data): data2 = data.copy(deep=True) L = [] for elem in data2.groupby(['polyIndex']).mean().index.to_list(): beadP = 0 working_df = data2.loc[data2['polyIndex'] == elem, 'beadType'] idx = working_df.index.to_list() for k in idx: if working_df.at[k] != 2: L += [beadP] else: L += [-1] beadP += 1 data2['beadPosition'] = L index_drops = data2[data2['beadType'] == 2].index data2.drop(index_drops,inplace=True) data3 = data2.groupby(['polyIndex','beadPosition']).mean().reset_index() positions = list(data3['beadPosition'].unique()) data3['beadPosition'] = data3['beadPosition'].apply(lambda x: positions.index(x)) data3 = data3.astype({'beadPosition': 'int64'}) return data3
526
404
41ec530569f08b12981fd51fcc2ee5c12f08e127
dsirianni/QCEngine
qcengine/testing.py
[ "BSD-3-Clause" ]
Python
environ_context
null
def environ_context(env): """Temporarily set environment variables inside the context manager and fully restore previous environment afterwards """ original_env = {key: os.getenv(key) for key in env} os.environ.update(env) try: yield finally: for key, value in original_env.items(): if value is None: del os.environ[key] else: os.environ[key] = value
Temporarily set environment variables inside the context manager and fully restore previous environment afterwards
Temporarily set environment variables inside the context manager and fully restore previous environment afterwards
[ "Temporarily", "set", "environment", "variables", "inside", "the", "context", "manager", "and", "fully", "restore", "previous", "environment", "afterwards" ]
def environ_context(env): original_env = {key: os.getenv(key) for key in env} os.environ.update(env) try: yield finally: for key, value in original_env.items(): if value is None: del os.environ[key] else: os.environ[key] = value
[ "def", "environ_context", "(", "env", ")", ":", "original_env", "=", "{", "key", ":", "os", ".", "getenv", "(", "key", ")", "for", "key", "in", "env", "}", "os", ".", "environ", ".", "update", "(", "env", ")", "try", ":", "yield", "finally", ":", "for", "key", ",", "value", "in", "original_env", ".", "items", "(", ")", ":", "if", "value", "is", "None", ":", "del", "os", ".", "environ", "[", "key", "]", "else", ":", "os", ".", "environ", "[", "key", "]", "=", "value" ]
Temporarily set environment variables inside the context manager and fully restore previous environment afterwards
[ "Temporarily", "set", "environment", "variables", "inside", "the", "context", "manager", "and", "fully", "restore", "previous", "environment", "afterwards" ]
[ "\"\"\"Temporarily set environment variables inside the context manager and\n fully restore previous environment afterwards\n \"\"\"" ]
[ { "param": "env", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "env", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def environ_context(env): original_env = {key: os.getenv(key) for key in env} os.environ.update(env) try: yield finally: for key, value in original_env.items(): if value is None: del os.environ[key] else: os.environ[key] = value
527
931
971559a27f6fab2538a36e8fdefb425b3dc81c49
hugovk/pytest-localftpserver
pytest_localftpserver/helper_functions.py
[ "Apache-2.0" ]
Python
pretty_logger
null
def pretty_logger(heading, msg): """ Helper function to pretty log function output for debug purposes Parameters ---------- heading: str Heading of the section which should be logged msg: str Message to be logged """ heading_width = 50 decoration_str = "\n" + "#"*heading_width + "\n" heading = "#" + heading.center(heading_width-2) + "#" heading = "{decoration_str}{heading}{decoration_str}\n\n".format(decoration_str=decoration_str, heading=heading) logging.info(heading+msg+"\n"*2)
Helper function to pretty log function output for debug purposes Parameters ---------- heading: str Heading of the section which should be logged msg: str Message to be logged
Helper function to pretty log function output for debug purposes Parameters str Heading of the section which should be logged msg: str Message to be logged
[ "Helper", "function", "to", "pretty", "log", "function", "output", "for", "debug", "purposes", "Parameters", "str", "Heading", "of", "the", "section", "which", "should", "be", "logged", "msg", ":", "str", "Message", "to", "be", "logged" ]
def pretty_logger(heading, msg): heading_width = 50 decoration_str = "\n" + "#"*heading_width + "\n" heading = "#" + heading.center(heading_width-2) + "#" heading = "{decoration_str}{heading}{decoration_str}\n\n".format(decoration_str=decoration_str, heading=heading) logging.info(heading+msg+"\n"*2)
[ "def", "pretty_logger", "(", "heading", ",", "msg", ")", ":", "heading_width", "=", "50", "decoration_str", "=", "\"\\n\"", "+", "\"#\"", "*", "heading_width", "+", "\"\\n\"", "heading", "=", "\"#\"", "+", "heading", ".", "center", "(", "heading_width", "-", "2", ")", "+", "\"#\"", "heading", "=", "\"{decoration_str}{heading}{decoration_str}\\n\\n\"", ".", "format", "(", "decoration_str", "=", "decoration_str", ",", "heading", "=", "heading", ")", "logging", ".", "info", "(", "heading", "+", "msg", "+", "\"\\n\"", "*", "2", ")" ]
Helper function to pretty log function output for debug purposes Parameters
[ "Helper", "function", "to", "pretty", "log", "function", "output", "for", "debug", "purposes", "Parameters" ]
[ "\"\"\"\n Helper function to pretty log function output for debug purposes\n\n Parameters\n ----------\n heading: str\n Heading of the section which should be logged\n msg: str\n Message to be logged\n \"\"\"" ]
[ { "param": "heading", "type": null }, { "param": "msg", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "heading", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "msg", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import logging def pretty_logger(heading, msg): heading_width = 50 decoration_str = "\n" + "#"*heading_width + "\n" heading = "#" + heading.center(heading_width-2) + "#" heading = "{decoration_str}{heading}{decoration_str}\n\n".format(decoration_str=decoration_str, heading=heading) logging.info(heading+msg+"\n"*2)
528
291
a3ef0b88cefc0bb3df8b482f244b3d747d097850
tbttfox/MeshCrawler
meshcrawlerGen.py
[ "MIT" ]
Python
_growByFace
<not_specific>
def _growByFace(mesh, growSet, exclude): """ Grow a set of verts along edges without any fanciness Args: mesh: The mesh object for the growth growSet: A set of Vertex objects to grow. exclude: A set of Vertex objects to exclude from the growth Returns: newGrowSet: the grown verts newExclude: the next set of verts to exclude """ grown = set() for vert in growSet: grown.update(mesh.adjacentVertsByFace(vert)) newgrown = grown - exclude newexclude = exclude | growSet return newgrown, newexclude
Grow a set of verts along edges without any fanciness Args: mesh: The mesh object for the growth growSet: A set of Vertex objects to grow. exclude: A set of Vertex objects to exclude from the growth Returns: newGrowSet: the grown verts newExclude: the next set of verts to exclude
Grow a set of verts along edges without any fanciness
[ "Grow", "a", "set", "of", "verts", "along", "edges", "without", "any", "fanciness" ]
def _growByFace(mesh, growSet, exclude): grown = set() for vert in growSet: grown.update(mesh.adjacentVertsByFace(vert)) newgrown = grown - exclude newexclude = exclude | growSet return newgrown, newexclude
[ "def", "_growByFace", "(", "mesh", ",", "growSet", ",", "exclude", ")", ":", "grown", "=", "set", "(", ")", "for", "vert", "in", "growSet", ":", "grown", ".", "update", "(", "mesh", ".", "adjacentVertsByFace", "(", "vert", ")", ")", "newgrown", "=", "grown", "-", "exclude", "newexclude", "=", "exclude", "|", "growSet", "return", "newgrown", ",", "newexclude" ]
Grow a set of verts along edges without any fanciness
[ "Grow", "a", "set", "of", "verts", "along", "edges", "without", "any", "fanciness" ]
[ "\"\"\" Grow a set of verts along edges without any fanciness\r\n\tArgs:\r\n\t\tmesh: The mesh object for the growth\r\n\t\tgrowSet: A set of Vertex objects to grow.\r\n\t\texclude: A set of Vertex objects to exclude from\r\n\t\t\tthe growth\r\n\r\n\tReturns:\r\n\t\tnewGrowSet: the grown verts\r\n\t\tnewExclude: the next set of verts to exclude\r\n\t\"\"\"" ]
[ { "param": "mesh", "type": null }, { "param": "growSet", "type": null }, { "param": "exclude", "type": null } ]
{ "returns": [ { "docstring": "the grown verts\nnewExclude: the next set of verts to exclude", "docstring_tokens": [ "the", "grown", "verts", "newExclude", ":", "the", "next", "set", "of", "verts", "to", "exclude" ], "type": "newGrowSet" } ], "raises": [], "params": [ { "identifier": "mesh", "type": null, "docstring": "The mesh object for the growth", "docstring_tokens": [ "The", "mesh", "object", "for", "the", "growth" ], "default": null, "is_optional": null }, { "identifier": "growSet", "type": null, "docstring": "A set of Vertex objects to grow.", "docstring_tokens": [ "A", "set", "of", "Vertex", "objects", "to", "grow", "." ], "default": null, "is_optional": null }, { "identifier": "exclude", "type": null, "docstring": "A set of Vertex objects to exclude from\nthe growth", "docstring_tokens": [ "A", "set", "of", "Vertex", "objects", "to", "exclude", "from", "the", "growth" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _growByFace(mesh, growSet, exclude): grown = set() for vert in growSet: grown.update(mesh.adjacentVertsByFace(vert)) newgrown = grown - exclude newexclude = exclude | growSet return newgrown, newexclude
529
901
362a29768718447e693bc91c9705990e15456c9f
piccolo-orm/targ
tests/test_command.py
[ "MIT" ]
Python
print_
null
def print_(value: t.Any, *args, **kwargs): """ When patching the builtin print statement, this is used as a side effect, so we can still use debug statements. """ sys.stdout.write(str(value) + "\n") sys.stdout.flush()
When patching the builtin print statement, this is used as a side effect, so we can still use debug statements.
When patching the builtin print statement, this is used as a side effect, so we can still use debug statements.
[ "When", "patching", "the", "builtin", "print", "statement", "this", "is", "used", "as", "a", "side", "effect", "so", "we", "can", "still", "use", "debug", "statements", "." ]
def print_(value: t.Any, *args, **kwargs): sys.stdout.write(str(value) + "\n") sys.stdout.flush()
[ "def", "print_", "(", "value", ":", "t", ".", "Any", ",", "*", "args", ",", "**", "kwargs", ")", ":", "sys", ".", "stdout", ".", "write", "(", "str", "(", "value", ")", "+", "\"\\n\"", ")", "sys", ".", "stdout", ".", "flush", "(", ")" ]
When patching the builtin print statement, this is used as a side effect, so we can still use debug statements.
[ "When", "patching", "the", "builtin", "print", "statement", "this", "is", "used", "as", "a", "side", "effect", "so", "we", "can", "still", "use", "debug", "statements", "." ]
[ "\"\"\"\n When patching the builtin print statement, this is used as a side effect,\n so we can still use debug statements.\n \"\"\"" ]
[ { "param": "value", "type": "t.Any" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "value", "type": "t.Any", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import sys def print_(value: t.Any, *args, **kwargs): sys.stdout.write(str(value) + "\n") sys.stdout.flush()
530
326
56c3a45c439a6c0abc0c917dc72a18fd27570e90
Eric-Kobayashi/bankofbrain
pgclasses.py
[ "MIT" ]
Python
_fix_column_name
<not_specific>
def _fix_column_name(s): ''' Transform special characters to underscore for easy python reading ''' sfixed = s to_fix = [': ',':',' ', '/', '^', '.'] for f in to_fix: if f in s: sfixed = sfixed.replace(f, '_') sfixed = ''.join([i if ord(i) < 128 else 'u' for i in sfixed]) sfixed = sfixed.replace('uum', 'um') return sfixed
Transform special characters to underscore for easy python reading
Transform special characters to underscore for easy python reading
[ "Transform", "special", "characters", "to", "underscore", "for", "easy", "python", "reading" ]
def _fix_column_name(s): sfixed = s to_fix = [': ',':',' ', '/', '^', '.'] for f in to_fix: if f in s: sfixed = sfixed.replace(f, '_') sfixed = ''.join([i if ord(i) < 128 else 'u' for i in sfixed]) sfixed = sfixed.replace('uum', 'um') return sfixed
[ "def", "_fix_column_name", "(", "s", ")", ":", "sfixed", "=", "s", "to_fix", "=", "[", "': '", ",", "':'", ",", "' '", ",", "'/'", ",", "'^'", ",", "'.'", "]", "for", "f", "in", "to_fix", ":", "if", "f", "in", "s", ":", "sfixed", "=", "sfixed", ".", "replace", "(", "f", ",", "'_'", ")", "sfixed", "=", "''", ".", "join", "(", "[", "i", "if", "ord", "(", "i", ")", "<", "128", "else", "'u'", "for", "i", "in", "sfixed", "]", ")", "sfixed", "=", "sfixed", ".", "replace", "(", "'uum'", ",", "'um'", ")", "return", "sfixed" ]
Transform special characters to underscore for easy python reading
[ "Transform", "special", "characters", "to", "underscore", "for", "easy", "python", "reading" ]
[ "'''\n Transform special characters to underscore for easy python reading\n '''" ]
[ { "param": "s", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "s", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _fix_column_name(s): sfixed = s to_fix = [': ',':',' ', '/', '^', '.'] for f in to_fix: if f in s: sfixed = sfixed.replace(f, '_') sfixed = ''.join([i if ord(i) < 128 else 'u' for i in sfixed]) sfixed = sfixed.replace('uum', 'um') return sfixed
531
187
c429a6891cc7cccc7dbe336e38a0f67c548862e2
venkateshvsn/patterns
package/alphabets/small_alphabets/o.py
[ "MIT" ]
Python
for_o
null
def for_o(): """printing small 'o' using for loop""" for row in range(4): for col in range(4): if col==0 and row in(1,2) or col==3 and row in(1,2) or row==0 and col in(1,2) or row==3 and col in(1,2): print("*",end=" ") else: print(" ",end=" ") print()
printing small 'o' using for loop
printing small 'o' using for loop
[ "printing", "small", "'", "o", "'", "using", "for", "loop" ]
def for_o(): for row in range(4): for col in range(4): if col==0 and row in(1,2) or col==3 and row in(1,2) or row==0 and col in(1,2) or row==3 and col in(1,2): print("*",end=" ") else: print(" ",end=" ") print()
[ "def", "for_o", "(", ")", ":", "for", "row", "in", "range", "(", "4", ")", ":", "for", "col", "in", "range", "(", "4", ")", ":", "if", "col", "==", "0", "and", "row", "in", "(", "1", ",", "2", ")", "or", "col", "==", "3", "and", "row", "in", "(", "1", ",", "2", ")", "or", "row", "==", "0", "and", "col", "in", "(", "1", ",", "2", ")", "or", "row", "==", "3", "and", "col", "in", "(", "1", ",", "2", ")", ":", "print", "(", "\"*\"", ",", "end", "=", "\" \"", ")", "else", ":", "print", "(", "\" \"", ",", "end", "=", "\" \"", ")", "print", "(", ")" ]
printing small 'o' using for loop
[ "printing", "small", "'", "o", "'", "using", "for", "loop" ]
[ "\"\"\"printing small 'o' using for loop\"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
def for_o(): for row in range(4): for col in range(4): if col==0 and row in(1,2) or col==3 and row in(1,2) or row==0 and col in(1,2) or row==3 and col in(1,2): print("*",end=" ") else: print(" ",end=" ") print()
532
560
d6084ba006fca3dbf02fce40a48a6bd8128d4046
Tribler/isort
isort/isort.py
[ "MIT" ]
Python
_strip_top_comments
str
def _strip_top_comments(lines: Sequence[str], line_separator: str) -> str: """Strips # comments that exist at the top of the given lines""" lines = copy.copy(lines) while lines and lines[0].startswith("#"): lines = lines[1:] return line_separator.join(lines)
Strips # comments that exist at the top of the given lines
Strips # comments that exist at the top of the given lines
[ "Strips", "#", "comments", "that", "exist", "at", "the", "top", "of", "the", "given", "lines" ]
def _strip_top_comments(lines: Sequence[str], line_separator: str) -> str: lines = copy.copy(lines) while lines and lines[0].startswith("#"): lines = lines[1:] return line_separator.join(lines)
[ "def", "_strip_top_comments", "(", "lines", ":", "Sequence", "[", "str", "]", ",", "line_separator", ":", "str", ")", "->", "str", ":", "lines", "=", "copy", ".", "copy", "(", "lines", ")", "while", "lines", "and", "lines", "[", "0", "]", ".", "startswith", "(", "\"#\"", ")", ":", "lines", "=", "lines", "[", "1", ":", "]", "return", "line_separator", ".", "join", "(", "lines", ")" ]
Strips # comments that exist at the top of the given lines
[ "Strips", "#", "comments", "that", "exist", "at", "the", "top", "of", "the", "given", "lines" ]
[ "\"\"\"Strips # comments that exist at the top of the given lines\"\"\"" ]
[ { "param": "lines", "type": "Sequence[str]" }, { "param": "line_separator", "type": "str" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "lines", "type": "Sequence[str]", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "line_separator", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import copy def _strip_top_comments(lines: Sequence[str], line_separator: str) -> str: lines = copy.copy(lines) while lines and lines[0].startswith("#"): lines = lines[1:] return line_separator.join(lines)
533
781
b48a3fac94ccf175c4e06e11727efcfc01d0e71e
eddo888/swapsies
Swapsies/xmi.py
[ "MIT" ]
Python
generateID
<not_specific>
def generateID(): """ todo: cache uuid based on package path Namespac """ return 'EDDO_%s' % uuid.uuid4()
todo: cache uuid based on package path Namespac
cache uuid based on package path Namespac
[ "cache", "uuid", "based", "on", "package", "path", "Namespac" ]
def generateID(): return 'EDDO_%s' % uuid.uuid4()
[ "def", "generateID", "(", ")", ":", "return", "'EDDO_%s'", "%", "uuid", ".", "uuid4", "(", ")" ]
todo: cache uuid based on package path Namespac
[ "todo", ":", "cache", "uuid", "based", "on", "package", "path", "Namespac" ]
[ "\"\"\"\n\ttodo: cache uuid based on package path Namespac\n\t\"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
import uuid def generateID(): return 'EDDO_%s' % uuid.uuid4()
534
134
4297231d77d1dd0fc298fe508723210a3c32aa7b
E10-10/Product-Clustering
CSScoring.py
[ "MIT" ]
Python
precision_i
<not_specific>
def precision_i(cl_real_i, cl_pred_i): ''' Description: Calculate precision for a single posting_id Parameters: argument1 (list): list of posting_id's belonging to the real cluster argument2 (list): list of posting_id's belonging to the predicted cluster Returns: float value of precision ''' s_pred = set(cl_pred_i) s_real = set(cl_real_i) s_diff_p_r = s_pred.difference(s_real) return (len(s_pred) - len(s_diff_p_r)) / len(s_pred)
Description: Calculate precision for a single posting_id Parameters: argument1 (list): list of posting_id's belonging to the real cluster argument2 (list): list of posting_id's belonging to the predicted cluster Returns: float value of precision
Calculate precision for a single posting_id Parameters: argument1 (list): list of posting_id's belonging to the real cluster argument2 (list): list of posting_id's belonging to the predicted cluster Returns: float value of precision
[ "Calculate", "precision", "for", "a", "single", "posting_id", "Parameters", ":", "argument1", "(", "list", ")", ":", "list", "of", "posting_id", "'", "s", "belonging", "to", "the", "real", "cluster", "argument2", "(", "list", ")", ":", "list", "of", "posting_id", "'", "s", "belonging", "to", "the", "predicted", "cluster", "Returns", ":", "float", "value", "of", "precision" ]
def precision_i(cl_real_i, cl_pred_i): s_pred = set(cl_pred_i) s_real = set(cl_real_i) s_diff_p_r = s_pred.difference(s_real) return (len(s_pred) - len(s_diff_p_r)) / len(s_pred)
[ "def", "precision_i", "(", "cl_real_i", ",", "cl_pred_i", ")", ":", "s_pred", "=", "set", "(", "cl_pred_i", ")", "s_real", "=", "set", "(", "cl_real_i", ")", "s_diff_p_r", "=", "s_pred", ".", "difference", "(", "s_real", ")", "return", "(", "len", "(", "s_pred", ")", "-", "len", "(", "s_diff_p_r", ")", ")", "/", "len", "(", "s_pred", ")" ]
Description: Calculate precision for a single posting_id Parameters: argument1 (list): list of posting_id's belonging to the real cluster argument2 (list): list of posting_id's belonging to the predicted cluster Returns: float value of precision
[ "Description", ":", "Calculate", "precision", "for", "a", "single", "posting_id", "Parameters", ":", "argument1", "(", "list", ")", ":", "list", "of", "posting_id", "'", "s", "belonging", "to", "the", "real", "cluster", "argument2", "(", "list", ")", ":", "list", "of", "posting_id", "'", "s", "belonging", "to", "the", "predicted", "cluster", "Returns", ":", "float", "value", "of", "precision" ]
[ "'''\r\n Description:\r\n Calculate precision for a single posting_id\r\n Parameters: \r\n argument1 (list): list of posting_id's belonging to the real cluster\r\n argument2 (list): list of posting_id's belonging to the predicted cluster\r\n Returns: \r\n float value of precision \r\n '''" ]
[ { "param": "cl_real_i", "type": null }, { "param": "cl_pred_i", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cl_real_i", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "cl_pred_i", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def precision_i(cl_real_i, cl_pred_i): s_pred = set(cl_pred_i) s_real = set(cl_real_i) s_diff_p_r = s_pred.difference(s_real) return (len(s_pred) - len(s_diff_p_r)) / len(s_pred)
535
961
6056bd744740a6183b0066f334718d075970eaaa
lopegeor1/project-7-stock-performance-lopegeor1
portfolio/portfolio_report.py
[ "MIT" ]
Python
save_portfolio
<not_specific>
def save_portfolio(csv_source, filename): """Saves data to a CSV file""" csv_target = csv_source with open(filename, 'w', newline='') as file: writer = csv.DictWriter(file, ['symbol', 'units', 'cost']) writer.writeheader() writer.writerows(csv_source) return csv_target
Saves data to a CSV file
Saves data to a CSV file
[ "Saves", "data", "to", "a", "CSV", "file" ]
def save_portfolio(csv_source, filename): csv_target = csv_source with open(filename, 'w', newline='') as file: writer = csv.DictWriter(file, ['symbol', 'units', 'cost']) writer.writeheader() writer.writerows(csv_source) return csv_target
[ "def", "save_portfolio", "(", "csv_source", ",", "filename", ")", ":", "csv_target", "=", "csv_source", "with", "open", "(", "filename", ",", "'w'", ",", "newline", "=", "''", ")", "as", "file", ":", "writer", "=", "csv", ".", "DictWriter", "(", "file", ",", "[", "'symbol'", ",", "'units'", ",", "'cost'", "]", ")", "writer", ".", "writeheader", "(", ")", "writer", ".", "writerows", "(", "csv_source", ")", "return", "csv_target" ]
Saves data to a CSV file
[ "Saves", "data", "to", "a", "CSV", "file" ]
[ "\"\"\"Saves data to a CSV file\"\"\"" ]
[ { "param": "csv_source", "type": null }, { "param": "filename", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "csv_source", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "filename", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import csv def save_portfolio(csv_source, filename): csv_target = csv_source with open(filename, 'w', newline='') as file: writer = csv.DictWriter(file, ['symbol', 'units', 'cost']) writer.writeheader() writer.writerows(csv_source) return csv_target
536
578
0ba470664339dd70a786e040d154c2f6fd2bcea7
RNAcentral/rnacentral-import-pipeline
rnacentral_pipeline/databases/generic/v1.py
[ "Apache-2.0" ]
Python
external_id
<not_specific>
def external_id(record): """ Extract the external id for the record. """ _, eid = record["primaryId"].split(":", 1) return eid
Extract the external id for the record.
Extract the external id for the record.
[ "Extract", "the", "external", "id", "for", "the", "record", "." ]
def external_id(record): _, eid = record["primaryId"].split(":", 1) return eid
[ "def", "external_id", "(", "record", ")", ":", "_", ",", "eid", "=", "record", "[", "\"primaryId\"", "]", ".", "split", "(", "\":\"", ",", "1", ")", "return", "eid" ]
Extract the external id for the record.
[ "Extract", "the", "external", "id", "for", "the", "record", "." ]
[ "\"\"\"\n Extract the external id for the record.\n \"\"\"" ]
[ { "param": "record", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "record", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def external_id(record): _, eid = record["primaryId"].split(":", 1) return eid
537
212
d3492817f3759e575feb12d1ab12700de1cf1b23
liquidinvestigations/hoover-snoop2
snoop/data/analyzers/email.py
[ "MIT" ]
Python
iter_parts
null
def iter_parts(message, numbers=[]): """Yields multipart messages into identifiable parts. The numbers are the positions in each part of the tree. """ if message.is_multipart(): for n, part in enumerate(message.get_payload(), 1): yield from iter_parts(part, numbers + [str(n)]) else: yield '.'.join(numbers), message
Yields multipart messages into identifiable parts. The numbers are the positions in each part of the tree.
Yields multipart messages into identifiable parts. The numbers are the positions in each part of the tree.
[ "Yields", "multipart", "messages", "into", "identifiable", "parts", ".", "The", "numbers", "are", "the", "positions", "in", "each", "part", "of", "the", "tree", "." ]
def iter_parts(message, numbers=[]): if message.is_multipart(): for n, part in enumerate(message.get_payload(), 1): yield from iter_parts(part, numbers + [str(n)]) else: yield '.'.join(numbers), message
[ "def", "iter_parts", "(", "message", ",", "numbers", "=", "[", "]", ")", ":", "if", "message", ".", "is_multipart", "(", ")", ":", "for", "n", ",", "part", "in", "enumerate", "(", "message", ".", "get_payload", "(", ")", ",", "1", ")", ":", "yield", "from", "iter_parts", "(", "part", ",", "numbers", "+", "[", "str", "(", "n", ")", "]", ")", "else", ":", "yield", "'.'", ".", "join", "(", "numbers", ")", ",", "message" ]
Yields multipart messages into identifiable parts.
[ "Yields", "multipart", "messages", "into", "identifiable", "parts", "." ]
[ "\"\"\"Yields multipart messages into identifiable parts.\n\n The numbers are the positions in each part of the tree.\n \"\"\"" ]
[ { "param": "message", "type": null }, { "param": "numbers", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "message", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "numbers", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def iter_parts(message, numbers=[]): if message.is_multipart(): for n, part in enumerate(message.get_payload(), 1): yield from iter_parts(part, numbers + [str(n)]) else: yield '.'.join(numbers), message
538
854
ba919973bbe902138e4c7cf69565886868e22472
yjzhenglamarmota/isce2
contrib/stack/alosStack/StackPulic.py
[ "ECL-2.0", "Apache-2.0" ]
Python
hasGPU
<not_specific>
def hasGPU(): ''' Determine if GPU modules are available. ''' flag = False try: from zerodop.GPUtopozero.GPUtopozero import PyTopozero from zerodop.GPUgeo2rdr.GPUgeo2rdr import PyGeo2rdr flag = True except: pass return flag
Determine if GPU modules are available.
Determine if GPU modules are available.
[ "Determine", "if", "GPU", "modules", "are", "available", "." ]
def hasGPU(): flag = False try: from zerodop.GPUtopozero.GPUtopozero import PyTopozero from zerodop.GPUgeo2rdr.GPUgeo2rdr import PyGeo2rdr flag = True except: pass return flag
[ "def", "hasGPU", "(", ")", ":", "flag", "=", "False", "try", ":", "from", "zerodop", ".", "GPUtopozero", ".", "GPUtopozero", "import", "PyTopozero", "from", "zerodop", ".", "GPUgeo2rdr", ".", "GPUgeo2rdr", "import", "PyGeo2rdr", "flag", "=", "True", "except", ":", "pass", "return", "flag" ]
Determine if GPU modules are available.
[ "Determine", "if", "GPU", "modules", "are", "available", "." ]
[ "'''\n Determine if GPU modules are available.\n '''" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
def hasGPU(): flag = False try: from zerodop.GPUtopozero.GPUtopozero import PyTopozero from zerodop.GPUgeo2rdr.GPUgeo2rdr import PyGeo2rdr flag = True except: pass return flag
539
545
d9cdb13609f6d98600ff48882b0d2062d6feda8e
infosmith/batteries
helpers/oop/reflection/helpers.py
[ "MIT" ]
Python
is_magic
<not_specific>
def is_magic(attribute): """Check if attribute name matches magic attribute convention.""" return all([ attribute.startswith('__'), attribute.endswith('__')])
Check if attribute name matches magic attribute convention.
Check if attribute name matches magic attribute convention.
[ "Check", "if", "attribute", "name", "matches", "magic", "attribute", "convention", "." ]
def is_magic(attribute): return all([ attribute.startswith('__'), attribute.endswith('__')])
[ "def", "is_magic", "(", "attribute", ")", ":", "return", "all", "(", "[", "attribute", ".", "startswith", "(", "'__'", ")", ",", "attribute", ".", "endswith", "(", "'__'", ")", "]", ")" ]
Check if attribute name matches magic attribute convention.
[ "Check", "if", "attribute", "name", "matches", "magic", "attribute", "convention", "." ]
[ "\"\"\"Check if attribute name matches magic attribute convention.\"\"\"" ]
[ { "param": "attribute", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "attribute", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def is_magic(attribute): return all([ attribute.startswith('__'), attribute.endswith('__')])
540
817
9a9d5deac39acb40d45765a0d7d843dad51fa11c
lambdalisue/vim-quern
rplugin/python3/lista/prompt/keymap.py
[ "MIT" ]
Python
from_rules
<not_specific>
def from_rules(cls, nvim, rules): """Create a keymap instance from a rule tuple. Args: nvim (neovim.Nvim): A ``neovim.Nvim`` instance. rules (tuple): A tuple of rules. Example: >>> from .keystroke import Keystroke >>> from unittest.mock import MagicMock >>> nvim = MagicMock() >>> nvim.options = {'encoding': 'utf-8'} >>> lhs1 = Keystroke.parse(nvim, '<C-H>') >>> lhs2 = Keystroke.parse(nvim, '<C-D>') >>> lhs3 = Keystroke.parse(nvim, '<C-M>') >>> rhs1 = Keystroke.parse(nvim, '<BS>') >>> rhs2 = Keystroke.parse(nvim, '<DEL>') >>> rhs3 = Keystroke.parse(nvim, '<CR>') >>> keymap = Keymap.from_rules(nvim, [ ... (lhs1, rhs1), ... (lhs2, rhs2, 'noremap'), ... (lhs3, rhs3, 'nowait'), ... ]) Returns: Keymap: A keymap instance """ keymap = cls() keymap.register_from_rules(nvim, rules) return keymap
Create a keymap instance from a rule tuple. Args: nvim (neovim.Nvim): A ``neovim.Nvim`` instance. rules (tuple): A tuple of rules. Example: >>> from .keystroke import Keystroke >>> from unittest.mock import MagicMock >>> nvim = MagicMock() >>> nvim.options = {'encoding': 'utf-8'} >>> lhs1 = Keystroke.parse(nvim, '<C-H>') >>> lhs2 = Keystroke.parse(nvim, '<C-D>') >>> lhs3 = Keystroke.parse(nvim, '<C-M>') >>> rhs1 = Keystroke.parse(nvim, '<BS>') >>> rhs2 = Keystroke.parse(nvim, '<DEL>') >>> rhs3 = Keystroke.parse(nvim, '<CR>') >>> keymap = Keymap.from_rules(nvim, [ ... (lhs1, rhs1), ... (lhs2, rhs2, 'noremap'), ... (lhs3, rhs3, 'nowait'), ... ]) Returns: Keymap: A keymap instance
Create a keymap instance from a rule tuple.
[ "Create", "a", "keymap", "instance", "from", "a", "rule", "tuple", "." ]
def from_rules(cls, nvim, rules): keymap = cls() keymap.register_from_rules(nvim, rules) return keymap
[ "def", "from_rules", "(", "cls", ",", "nvim", ",", "rules", ")", ":", "keymap", "=", "cls", "(", ")", "keymap", ".", "register_from_rules", "(", "nvim", ",", "rules", ")", "return", "keymap" ]
Create a keymap instance from a rule tuple.
[ "Create", "a", "keymap", "instance", "from", "a", "rule", "tuple", "." ]
[ "\"\"\"Create a keymap instance from a rule tuple.\n\n Args:\n nvim (neovim.Nvim): A ``neovim.Nvim`` instance.\n rules (tuple): A tuple of rules.\n\n Example:\n >>> from .keystroke import Keystroke\n >>> from unittest.mock import MagicMock\n >>> nvim = MagicMock()\n >>> nvim.options = {'encoding': 'utf-8'}\n >>> lhs1 = Keystroke.parse(nvim, '<C-H>')\n >>> lhs2 = Keystroke.parse(nvim, '<C-D>')\n >>> lhs3 = Keystroke.parse(nvim, '<C-M>')\n >>> rhs1 = Keystroke.parse(nvim, '<BS>')\n >>> rhs2 = Keystroke.parse(nvim, '<DEL>')\n >>> rhs3 = Keystroke.parse(nvim, '<CR>')\n >>> keymap = Keymap.from_rules(nvim, [\n ... (lhs1, rhs1),\n ... (lhs2, rhs2, 'noremap'),\n ... (lhs3, rhs3, 'nowait'),\n ... ])\n\n Returns:\n Keymap: A keymap instance\n \"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "nvim", "type": null }, { "param": "rules", "type": null } ]
{ "returns": [ { "docstring": "A keymap instance", "docstring_tokens": [ "A", "keymap", "instance" ], "type": "Keymap" } ], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "nvim", "type": null, "docstring": "A ``neovim.Nvim`` instance.", "docstring_tokens": [ "A", "`", "`", "neovim", ".", "Nvim", "`", "`", "instance", "." ], "default": null, "is_optional": false }, { "identifier": "rules", "type": null, "docstring": "A tuple of rules.", "docstring_tokens": [ "A", "tuple", "of", "rules", "." ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [ { "identifier": "examples", "docstring": ">>> from .keystroke import Keystroke\n>>> from unittest.mock import MagicMock\n>>> nvim = MagicMock()\n>>> nvim.options = {'encoding': 'utf-8'}\n>>> lhs1 = Keystroke.parse(nvim, '')\n>>> lhs2 = Keystroke.parse(nvim, '')\n>>> lhs3 = Keystroke.parse(nvim, '')\n>>> rhs1 = Keystroke.parse(nvim, '')\n>>> rhs2 = Keystroke.parse(nvim, '')\n>>> rhs3 = Keystroke.parse(nvim, '')\n>>> keymap = Keymap.from_rules(nvim, [\n(lhs1, rhs1),\n(lhs2, rhs2, 'noremap'),\n(lhs3, rhs3, 'nowait'),\n])", "docstring_tokens": [ ">>>", "from", ".", "keystroke", "import", "Keystroke", ">>>", "from", "unittest", ".", "mock", "import", "MagicMock", ">>>", "nvim", "=", "MagicMock", "()", ">>>", "nvim", ".", "options", "=", "{", "'", "encoding", "'", ":", "'", "utf", "-", "8", "'", "}", ">>>", "lhs1", "=", "Keystroke", ".", "parse", "(", "nvim", "'", "'", ")", ">>>", "lhs2", "=", "Keystroke", ".", "parse", "(", "nvim", "'", "'", ")", ">>>", "lhs3", "=", "Keystroke", ".", "parse", "(", "nvim", "'", "'", ")", ">>>", "rhs1", "=", "Keystroke", ".", "parse", "(", "nvim", "'", "'", ")", ">>>", "rhs2", "=", "Keystroke", ".", "parse", "(", "nvim", "'", "'", ")", ">>>", "rhs3", "=", "Keystroke", ".", "parse", "(", "nvim", "'", "'", ")", ">>>", "keymap", "=", "Keymap", ".", "from_rules", "(", "nvim", "[", "(", "lhs1", "rhs1", ")", "(", "lhs2", "rhs2", "'", "noremap", "'", ")", "(", "lhs3", "rhs3", "'", "nowait", "'", ")", "]", ")" ] } ] }
def from_rules(cls, nvim, rules): keymap = cls() keymap.register_from_rules(nvim, rules) return keymap
541
655
a0f0c06e26b9ddc2643f30bc688e91910f5d532a
egpbos/kwandl
kwandl.py
[ "Apache-2.0" ]
Python
parse_snippet
<not_specific>
def parse_snippet(source, filename, mode, flags, firstlineno): """Like ast.parse, but accepts indented code snippet with a line number offset.""" args = filename, mode, flags | ast.PyCF_ONLY_AST, True prefix = "\n" try: a = compile(prefix + source, *args) except IndentationError: # Already indented? Wrap with dummy compound statement prefix = "with 0:\n" a = compile(prefix + source, *args) # Peel wrapper a.body = a.body[0].body ast.increment_lineno(a, firstlineno - 2) return a
Like ast.parse, but accepts indented code snippet with a line number offset.
Like ast.parse, but accepts indented code snippet with a line number offset.
[ "Like", "ast", ".", "parse", "but", "accepts", "indented", "code", "snippet", "with", "a", "line", "number", "offset", "." ]
def parse_snippet(source, filename, mode, flags, firstlineno): args = filename, mode, flags | ast.PyCF_ONLY_AST, True prefix = "\n" try: a = compile(prefix + source, *args) except IndentationError: prefix = "with 0:\n" a = compile(prefix + source, *args) a.body = a.body[0].body ast.increment_lineno(a, firstlineno - 2) return a
[ "def", "parse_snippet", "(", "source", ",", "filename", ",", "mode", ",", "flags", ",", "firstlineno", ")", ":", "args", "=", "filename", ",", "mode", ",", "flags", "|", "ast", ".", "PyCF_ONLY_AST", ",", "True", "prefix", "=", "\"\\n\"", "try", ":", "a", "=", "compile", "(", "prefix", "+", "source", ",", "*", "args", ")", "except", "IndentationError", ":", "prefix", "=", "\"with 0:\\n\"", "a", "=", "compile", "(", "prefix", "+", "source", ",", "*", "args", ")", "a", ".", "body", "=", "a", ".", "body", "[", "0", "]", ".", "body", "ast", ".", "increment_lineno", "(", "a", ",", "firstlineno", "-", "2", ")", "return", "a" ]
Like ast.parse, but accepts indented code snippet with a line number offset.
[ "Like", "ast", ".", "parse", "but", "accepts", "indented", "code", "snippet", "with", "a", "line", "number", "offset", "." ]
[ "\"\"\"Like ast.parse, but accepts indented code snippet with a line number offset.\"\"\"", "# Already indented? Wrap with dummy compound statement", "# Peel wrapper" ]
[ { "param": "source", "type": null }, { "param": "filename", "type": null }, { "param": "mode", "type": null }, { "param": "flags", "type": null }, { "param": "firstlineno", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "source", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "filename", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "mode", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "flags", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "firstlineno", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import ast def parse_snippet(source, filename, mode, flags, firstlineno): args = filename, mode, flags | ast.PyCF_ONLY_AST, True prefix = "\n" try: a = compile(prefix + source, *args) except IndentationError: prefix = "with 0:\n" a = compile(prefix + source, *args) a.body = a.body[0].body ast.increment_lineno(a, firstlineno - 2) return a
542
374
309383637f400a32f422490cd89ae39bd0ef45cf
ereidelbach/cfbAnalysis
src/data/aggregate_CFBStats_by_team.py
[ "MIT" ]
Python
identifyRawData
<not_specific>
def identifyRawData(path_project): ''' Purpose: In their raw format, statistics are broken out into individual files for each category, for every available year. This function loops over every stat folder, for each team, in `data/raw/CFBStats` and creates a dictionary to store a record of folders that exist for each team Input: (1) path_project (pathlib Path): Project directory file path containing the project's files Output: (1) dict_paths_all (dictionary): Contains a listing of all subfolders and paths for each team on file ''' # Set the path to the raw data folder path_data_raw = path_project.joinpath(pathlib.Path('data/raw/CFBStats')) # Create a dictionary for storing all folder paths for each team dict_paths_all = {} # Identify the team folders list_path_team = [f for f in path_data_raw.iterdir() if f.is_dir()] list_path_team.sort() # Loop over every team's folders for path_team in list_path_team: # Create a dictionary for storing each team's folder paths dict_paths_team = {} # Identify every category subfolder the team has list_path_subfolder = [x for x in path_team.iterdir() if x.is_dir()] list_path_subfolder.sort() # Loop over every subfolder and add it to the team's dictionary for path_subfolder in list_path_subfolder: dict_paths_team[str(path_subfolder).split('\\')[-1]] = path_subfolder # Add the team's subfolder to the master dictionary dict_paths_all[str(path_team).split('\\')[-1]] = dict_paths_team return dict_paths_all
Purpose: In their raw format, statistics are broken out into individual files for each category, for every available year. This function loops over every stat folder, for each team, in `data/raw/CFBStats` and creates a dictionary to store a record of folders that exist for each team Input: (1) path_project (pathlib Path): Project directory file path containing the project's files Output: (1) dict_paths_all (dictionary): Contains a listing of all subfolders and paths for each team on file
In their raw format, statistics are broken out into individual files for each category, for every available year. This function loops over every stat folder, for each team, in `data/raw/CFBStats` and creates a dictionary to store a record of folders that exist for each team (1) path_project (pathlib Path): Project directory file path containing the project's files (1) dict_paths_all (dictionary): Contains a listing of all subfolders and paths for each team on file
[ "In", "their", "raw", "format", "statistics", "are", "broken", "out", "into", "individual", "files", "for", "each", "category", "for", "every", "available", "year", ".", "This", "function", "loops", "over", "every", "stat", "folder", "for", "each", "team", "in", "`", "data", "/", "raw", "/", "CFBStats", "`", "and", "creates", "a", "dictionary", "to", "store", "a", "record", "of", "folders", "that", "exist", "for", "each", "team", "(", "1", ")", "path_project", "(", "pathlib", "Path", ")", ":", "Project", "directory", "file", "path", "containing", "the", "project", "'", "s", "files", "(", "1", ")", "dict_paths_all", "(", "dictionary", ")", ":", "Contains", "a", "listing", "of", "all", "subfolders", "and", "paths", "for", "each", "team", "on", "file" ]
def identifyRawData(path_project): path_data_raw = path_project.joinpath(pathlib.Path('data/raw/CFBStats')) dict_paths_all = {} list_path_team = [f for f in path_data_raw.iterdir() if f.is_dir()] list_path_team.sort() for path_team in list_path_team: dict_paths_team = {} list_path_subfolder = [x for x in path_team.iterdir() if x.is_dir()] list_path_subfolder.sort() for path_subfolder in list_path_subfolder: dict_paths_team[str(path_subfolder).split('\\')[-1]] = path_subfolder dict_paths_all[str(path_team).split('\\')[-1]] = dict_paths_team return dict_paths_all
[ "def", "identifyRawData", "(", "path_project", ")", ":", "path_data_raw", "=", "path_project", ".", "joinpath", "(", "pathlib", ".", "Path", "(", "'data/raw/CFBStats'", ")", ")", "dict_paths_all", "=", "{", "}", "list_path_team", "=", "[", "f", "for", "f", "in", "path_data_raw", ".", "iterdir", "(", ")", "if", "f", ".", "is_dir", "(", ")", "]", "list_path_team", ".", "sort", "(", ")", "for", "path_team", "in", "list_path_team", ":", "dict_paths_team", "=", "{", "}", "list_path_subfolder", "=", "[", "x", "for", "x", "in", "path_team", ".", "iterdir", "(", ")", "if", "x", ".", "is_dir", "(", ")", "]", "list_path_subfolder", ".", "sort", "(", ")", "for", "path_subfolder", "in", "list_path_subfolder", ":", "dict_paths_team", "[", "str", "(", "path_subfolder", ")", ".", "split", "(", "'\\\\'", ")", "[", "-", "1", "]", "]", "=", "path_subfolder", "dict_paths_all", "[", "str", "(", "path_team", ")", ".", "split", "(", "'\\\\'", ")", "[", "-", "1", "]", "]", "=", "dict_paths_team", "return", "dict_paths_all" ]
Purpose: In their raw format, statistics are broken out into individual files for each category, for every available year.
[ "Purpose", ":", "In", "their", "raw", "format", "statistics", "are", "broken", "out", "into", "individual", "files", "for", "each", "category", "for", "every", "available", "year", "." ]
[ "'''\n Purpose: In their raw format, statistics are broken out into individual \n files for each category, for every available year. This function\n loops over every stat folder, for each team, in `data/raw/CFBStats`\n and creates a dictionary to store a record of folders that exist \n for each team \n \n Input:\n (1) path_project (pathlib Path): Project directory file path containing\n the project's files\n \n Output:\n (1) dict_paths_all (dictionary): Contains a listing of all subfolders\n and paths for each team on file\n '''", "# Set the path to the raw data folder", "# Create a dictionary for storing all folder paths for each team", "# Identify the team folders", "# Loop over every team's folders", "# Create a dictionary for storing each team's folder paths", "# Identify every category subfolder the team has", "# Loop over every subfolder and add it to the team's dictionary", "# Add the team's subfolder to the master dictionary" ]
[ { "param": "path_project", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "path_project", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import pathlib def identifyRawData(path_project): path_data_raw = path_project.joinpath(pathlib.Path('data/raw/CFBStats')) dict_paths_all = {} list_path_team = [f for f in path_data_raw.iterdir() if f.is_dir()] list_path_team.sort() for path_team in list_path_team: dict_paths_team = {} list_path_subfolder = [x for x in path_team.iterdir() if x.is_dir()] list_path_subfolder.sort() for path_subfolder in list_path_subfolder: dict_paths_team[str(path_subfolder).split('\\')[-1]] = path_subfolder dict_paths_all[str(path_team).split('\\')[-1]] = dict_paths_team return dict_paths_all
543
146
d1dc1088215b31ff0b6ce0fafe8fc7cf4ba23b6f
DecoPath/DecoPath
viewer/src/form_processing_utils.py
[ "Apache-2.0" ]
Python
add_forms_to_formset
<not_specific>
def add_forms_to_formset(request, formset, extra: int): """Add additional forms to formset.""" # Generate copy of request.POST dictionary formset_dictionary_copy = request.POST.copy() # Update formset management form with additional forms formset_dictionary_copy['form-TOTAL_FORMS'] = int(formset_dictionary_copy['form-TOTAL_FORMS']) + extra # Update formset sent to user with additional empty forms return formset(formset_dictionary_copy)
Add additional forms to formset.
Add additional forms to formset.
[ "Add", "additional", "forms", "to", "formset", "." ]
def add_forms_to_formset(request, formset, extra: int): formset_dictionary_copy = request.POST.copy() formset_dictionary_copy['form-TOTAL_FORMS'] = int(formset_dictionary_copy['form-TOTAL_FORMS']) + extra return formset(formset_dictionary_copy)
[ "def", "add_forms_to_formset", "(", "request", ",", "formset", ",", "extra", ":", "int", ")", ":", "formset_dictionary_copy", "=", "request", ".", "POST", ".", "copy", "(", ")", "formset_dictionary_copy", "[", "'form-TOTAL_FORMS'", "]", "=", "int", "(", "formset_dictionary_copy", "[", "'form-TOTAL_FORMS'", "]", ")", "+", "extra", "return", "formset", "(", "formset_dictionary_copy", ")" ]
Add additional forms to formset.
[ "Add", "additional", "forms", "to", "formset", "." ]
[ "\"\"\"Add additional forms to formset.\"\"\"", "# Generate copy of request.POST dictionary", "# Update formset management form with additional forms", "# Update formset sent to user with additional empty forms" ]
[ { "param": "request", "type": null }, { "param": "formset", "type": null }, { "param": "extra", "type": "int" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "request", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "formset", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "extra", "type": "int", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def add_forms_to_formset(request, formset, extra: int): formset_dictionary_copy = request.POST.copy() formset_dictionary_copy['form-TOTAL_FORMS'] = int(formset_dictionary_copy['form-TOTAL_FORMS']) + extra return formset(formset_dictionary_copy)
544
626
c4d66ad4f4cb6601a0b5fff95e18d68fdc5f0838
ch1huizong/scode
stdlib2-src/dist-packages/cupshelpers/cupshelpers.py
[ "MIT" ]
Python
activateNewPrinter
null
def activateNewPrinter(connection, name): """ Set a new printer enabled, accepting jobs, and (if necessary) the default printer. @param connection: CUPS connection @type connection: cups.Connection object @param name: printer name @type name: string @raise cups.IPPError: IPP error """ connection.enablePrinter (name) connection.acceptJobs (name) # Set as the default if there is not already a default printer. if connection.getDefault () == None: connection.setDefault (name)
Set a new printer enabled, accepting jobs, and (if necessary) the default printer. @param connection: CUPS connection @type connection: cups.Connection object @param name: printer name @type name: string @raise cups.IPPError: IPP error
Set a new printer enabled, accepting jobs, and (if necessary) the default printer.
[ "Set", "a", "new", "printer", "enabled", "accepting", "jobs", "and", "(", "if", "necessary", ")", "the", "default", "printer", "." ]
def activateNewPrinter(connection, name): connection.enablePrinter (name) connection.acceptJobs (name) if connection.getDefault () == None: connection.setDefault (name)
[ "def", "activateNewPrinter", "(", "connection", ",", "name", ")", ":", "connection", ".", "enablePrinter", "(", "name", ")", "connection", ".", "acceptJobs", "(", "name", ")", "if", "connection", ".", "getDefault", "(", ")", "==", "None", ":", "connection", ".", "setDefault", "(", "name", ")" ]
Set a new printer enabled, accepting jobs, and (if necessary) the default printer.
[ "Set", "a", "new", "printer", "enabled", "accepting", "jobs", "and", "(", "if", "necessary", ")", "the", "default", "printer", "." ]
[ "\"\"\"\n Set a new printer enabled, accepting jobs, and (if necessary) the\n default printer.\n\n @param connection: CUPS connection\n @type connection: cups.Connection object\n @param name: printer name\n @type name: string\n @raise cups.IPPError: IPP error\n \"\"\"", "# Set as the default if there is not already a default printer." ]
[ { "param": "connection", "type": null }, { "param": "name", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "connection", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false }, { "identifier": "name", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [ { "identifier": "IPPError", "docstring": null, "docstring_tokens": [ "None" ] } ] }
def activateNewPrinter(connection, name): connection.enablePrinter (name) connection.acceptJobs (name) if connection.getDefault () == None: connection.setDefault (name)
545
440
639f456516f567e5ddd5767d23734379d44b64e3
maximenoel/ceph-open-terrarium
salt/create_din_roster_pillar.py
[ "Apache-2.0" ]
Python
write_pillar
null
def write_pillar(pillar_file, master_ip): """ Writes salt-master ip to pillar so that minion can write their minion.conf file with the salt-master ip """ pillar_file.write("deepsea-master-ip: {}\n".format(master_ip))
Writes salt-master ip to pillar so that minion can write their minion.conf file with the salt-master ip
Writes salt-master ip to pillar so that minion can write their minion.conf file with the salt-master ip
[ "Writes", "salt", "-", "master", "ip", "to", "pillar", "so", "that", "minion", "can", "write", "their", "minion", ".", "conf", "file", "with", "the", "salt", "-", "master", "ip" ]
def write_pillar(pillar_file, master_ip): pillar_file.write("deepsea-master-ip: {}\n".format(master_ip))
[ "def", "write_pillar", "(", "pillar_file", ",", "master_ip", ")", ":", "pillar_file", ".", "write", "(", "\"deepsea-master-ip: {}\\n\"", ".", "format", "(", "master_ip", ")", ")" ]
Writes salt-master ip to pillar so that minion can write their minion.conf file with the salt-master ip
[ "Writes", "salt", "-", "master", "ip", "to", "pillar", "so", "that", "minion", "can", "write", "their", "minion", ".", "conf", "file", "with", "the", "salt", "-", "master", "ip" ]
[ "\"\"\"\n Writes salt-master ip to pillar so that minion can write their minion.conf\n file with the salt-master ip\n \"\"\"" ]
[ { "param": "pillar_file", "type": null }, { "param": "master_ip", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "pillar_file", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "master_ip", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def write_pillar(pillar_file, master_ip): pillar_file.write("deepsea-master-ip: {}\n".format(master_ip))
546
113
ba6ede3d18b8a552f12aae37fbad7f16a646132f
GProtoZeroW/skidl
skidl/erc.py
[ "MIT" ]
Python
add_to_exec_or_eval_list
null
def add_to_exec_or_eval_list(class_or_inst, list_name, func): """Append a function to a function list of a class or class instance.""" if list_name not in class_or_inst.__dict__: setattr(class_or_inst, list_name, []) getattr(class_or_inst, list_name).append(func)
Append a function to a function list of a class or class instance.
Append a function to a function list of a class or class instance.
[ "Append", "a", "function", "to", "a", "function", "list", "of", "a", "class", "or", "class", "instance", "." ]
def add_to_exec_or_eval_list(class_or_inst, list_name, func): if list_name not in class_or_inst.__dict__: setattr(class_or_inst, list_name, []) getattr(class_or_inst, list_name).append(func)
[ "def", "add_to_exec_or_eval_list", "(", "class_or_inst", ",", "list_name", ",", "func", ")", ":", "if", "list_name", "not", "in", "class_or_inst", ".", "__dict__", ":", "setattr", "(", "class_or_inst", ",", "list_name", ",", "[", "]", ")", "getattr", "(", "class_or_inst", ",", "list_name", ")", ".", "append", "(", "func", ")" ]
Append a function to a function list of a class or class instance.
[ "Append", "a", "function", "to", "a", "function", "list", "of", "a", "class", "or", "class", "instance", "." ]
[ "\"\"\"Append a function to a function list of a class or class instance.\"\"\"" ]
[ { "param": "class_or_inst", "type": null }, { "param": "list_name", "type": null }, { "param": "func", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "class_or_inst", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "list_name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "func", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def add_to_exec_or_eval_list(class_or_inst, list_name, func): if list_name not in class_or_inst.__dict__: setattr(class_or_inst, list_name, []) getattr(class_or_inst, list_name).append(func)
548
351
93819aefc4a0c6918cc7cff29cfb796c092b6523
dharif23/ncprep
ncprep/_operations.py
[ "MIT" ]
Python
initial_message
null
def initial_message(script=None, custom_message=None): """ This function creates initial message and prints it """ marker = '-' # Must be single character # Print a general help message date_time = datetime.datetime.now() _print_string = "Column based text filtering and processing" _print_string += " [ " + date_time.strftime("%d-%B-%Y %H:%M:%S") + " ]" # Help message display _help_string = "Need help?: python {} -h/--help".format(script) # Create prefix and suffix prefix = marker * 2 + ' ' suffix = ' ' + marker * 2 print_string = prefix + _print_string custom_message = prefix + custom_message help_string = prefix + _help_string # Take max str_length = max([len(print_string), len(custom_message), len(help_string)]) + 3 # Create padding print_string = print_string + " " * (str_length - len(print_string) - len(suffix)) + suffix custom_message = custom_message + " " * (str_length - len(custom_message) - len(suffix)) + suffix help_string = help_string + " " * (str_length - len(help_string) - len(suffix)) + suffix # Print line_block = marker * str_length print(line_block, print_string, custom_message, help_string, line_block, sep='\n')
This function creates initial message and prints it
This function creates initial message and prints it
[ "This", "function", "creates", "initial", "message", "and", "prints", "it" ]
def initial_message(script=None, custom_message=None): marker = '-' date_time = datetime.datetime.now() _print_string = "Column based text filtering and processing" _print_string += " [ " + date_time.strftime("%d-%B-%Y %H:%M:%S") + " ]" _help_string = "Need help?: python {} -h/--help".format(script) prefix = marker * 2 + ' ' suffix = ' ' + marker * 2 print_string = prefix + _print_string custom_message = prefix + custom_message help_string = prefix + _help_string str_length = max([len(print_string), len(custom_message), len(help_string)]) + 3 print_string = print_string + " " * (str_length - len(print_string) - len(suffix)) + suffix custom_message = custom_message + " " * (str_length - len(custom_message) - len(suffix)) + suffix help_string = help_string + " " * (str_length - len(help_string) - len(suffix)) + suffix line_block = marker * str_length print(line_block, print_string, custom_message, help_string, line_block, sep='\n')
[ "def", "initial_message", "(", "script", "=", "None", ",", "custom_message", "=", "None", ")", ":", "marker", "=", "'-'", "date_time", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "_print_string", "=", "\"Column based text filtering and processing\"", "_print_string", "+=", "\" [ \"", "+", "date_time", ".", "strftime", "(", "\"%d-%B-%Y %H:%M:%S\"", ")", "+", "\" ]\"", "_help_string", "=", "\"Need help?: python {} -h/--help\"", ".", "format", "(", "script", ")", "prefix", "=", "marker", "*", "2", "+", "' '", "suffix", "=", "' '", "+", "marker", "*", "2", "print_string", "=", "prefix", "+", "_print_string", "custom_message", "=", "prefix", "+", "custom_message", "help_string", "=", "prefix", "+", "_help_string", "str_length", "=", "max", "(", "[", "len", "(", "print_string", ")", ",", "len", "(", "custom_message", ")", ",", "len", "(", "help_string", ")", "]", ")", "+", "3", "print_string", "=", "print_string", "+", "\" \"", "*", "(", "str_length", "-", "len", "(", "print_string", ")", "-", "len", "(", "suffix", ")", ")", "+", "suffix", "custom_message", "=", "custom_message", "+", "\" \"", "*", "(", "str_length", "-", "len", "(", "custom_message", ")", "-", "len", "(", "suffix", ")", ")", "+", "suffix", "help_string", "=", "help_string", "+", "\" \"", "*", "(", "str_length", "-", "len", "(", "help_string", ")", "-", "len", "(", "suffix", ")", ")", "+", "suffix", "line_block", "=", "marker", "*", "str_length", "print", "(", "line_block", ",", "print_string", ",", "custom_message", ",", "help_string", ",", "line_block", ",", "sep", "=", "'\\n'", ")" ]
This function creates initial message and prints it
[ "This", "function", "creates", "initial", "message", "and", "prints", "it" ]
[ "\"\"\"\n This function creates initial message and prints it\n \"\"\"", "# Must be single character", "# Print a general help message", "# Help message display", "# Create prefix and suffix", "# Take max", "# Create padding", "# Print" ]
[ { "param": "script", "type": null }, { "param": "custom_message", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "script", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "custom_message", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import datetime def initial_message(script=None, custom_message=None): marker = '-' date_time = datetime.datetime.now() _print_string = "Column based text filtering and processing" _print_string += " [ " + date_time.strftime("%d-%B-%Y %H:%M:%S") + " ]" _help_string = "Need help?: python {} -h/--help".format(script) prefix = marker * 2 + ' ' suffix = ' ' + marker * 2 print_string = prefix + _print_string custom_message = prefix + custom_message help_string = prefix + _help_string str_length = max([len(print_string), len(custom_message), len(help_string)]) + 3 print_string = print_string + " " * (str_length - len(print_string) - len(suffix)) + suffix custom_message = custom_message + " " * (str_length - len(custom_message) - len(suffix)) + suffix help_string = help_string + " " * (str_length - len(help_string) - len(suffix)) + suffix line_block = marker * str_length print(line_block, print_string, custom_message, help_string, line_block, sep='\n')
550
447
d7f84eaa8e9cc6715f20bad257c4f635c1d05d1e
jstzwjr/FaceX-Zoo
training_mode/semi-siamese_training/train.py
[ "Apache-2.0" ]
Python
moving_average
null
def moving_average(probe, gallery, alpha): """Update the gallery-set network in the momentum way.(MoCo) """ for param_probe, param_gallery in zip(probe.parameters(), gallery.parameters()): param_gallery.data = \ alpha* param_gallery.data + (1 - alpha) * param_probe.detach().data
Update the gallery-set network in the momentum way.(MoCo)
Update the gallery-set network in the momentum way.(MoCo)
[ "Update", "the", "gallery", "-", "set", "network", "in", "the", "momentum", "way", ".", "(", "MoCo", ")" ]
def moving_average(probe, gallery, alpha): for param_probe, param_gallery in zip(probe.parameters(), gallery.parameters()): param_gallery.data = \ alpha* param_gallery.data + (1 - alpha) * param_probe.detach().data
[ "def", "moving_average", "(", "probe", ",", "gallery", ",", "alpha", ")", ":", "for", "param_probe", ",", "param_gallery", "in", "zip", "(", "probe", ".", "parameters", "(", ")", ",", "gallery", ".", "parameters", "(", ")", ")", ":", "param_gallery", ".", "data", "=", "alpha", "*", "param_gallery", ".", "data", "+", "(", "1", "-", "alpha", ")", "*", "param_probe", ".", "detach", "(", ")", ".", "data" ]
Update the gallery-set network in the momentum way.
[ "Update", "the", "gallery", "-", "set", "network", "in", "the", "momentum", "way", "." ]
[ "\"\"\"Update the gallery-set network in the momentum way.(MoCo)\r\n \"\"\"" ]
[ { "param": "probe", "type": null }, { "param": "gallery", "type": null }, { "param": "alpha", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "probe", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "gallery", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "alpha", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def moving_average(probe, gallery, alpha): for param_probe, param_gallery in zip(probe.parameters(), gallery.parameters()): param_gallery.data = \ alpha* param_gallery.data + (1 - alpha) * param_probe.detach().data
552
632
750d4cfdb4f1c15caad3c2a6a796807ecaeccc00
mesbahamin/test
tests/conftest.py
[ "MIT" ]
Python
invalid_file
<not_specific>
def invalid_file(tmpdir, request): """Return a path to an invalid config file. Remove the file when a test is finished with it. """ data_dir = pathlib.Path(str(tmpdir)) invalid_file = data_dir.joinpath('invalid') with invalid_file.open('w') as f: f.write('this is invalid') def tearDown(): if invalid_file.exists(): invalid_file.unlink() request.addfinalizer(tearDown) return invalid_file
Return a path to an invalid config file. Remove the file when a test is finished with it.
Return a path to an invalid config file. Remove the file when a test is finished with it.
[ "Return", "a", "path", "to", "an", "invalid", "config", "file", ".", "Remove", "the", "file", "when", "a", "test", "is", "finished", "with", "it", "." ]
def invalid_file(tmpdir, request): data_dir = pathlib.Path(str(tmpdir)) invalid_file = data_dir.joinpath('invalid') with invalid_file.open('w') as f: f.write('this is invalid') def tearDown(): if invalid_file.exists(): invalid_file.unlink() request.addfinalizer(tearDown) return invalid_file
[ "def", "invalid_file", "(", "tmpdir", ",", "request", ")", ":", "data_dir", "=", "pathlib", ".", "Path", "(", "str", "(", "tmpdir", ")", ")", "invalid_file", "=", "data_dir", ".", "joinpath", "(", "'invalid'", ")", "with", "invalid_file", ".", "open", "(", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "'this is invalid'", ")", "def", "tearDown", "(", ")", ":", "if", "invalid_file", ".", "exists", "(", ")", ":", "invalid_file", ".", "unlink", "(", ")", "request", ".", "addfinalizer", "(", "tearDown", ")", "return", "invalid_file" ]
Return a path to an invalid config file.
[ "Return", "a", "path", "to", "an", "invalid", "config", "file", "." ]
[ "\"\"\"Return a path to an invalid config file.\n Remove the file when a test is finished with it.\n \"\"\"" ]
[ { "param": "tmpdir", "type": null }, { "param": "request", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "tmpdir", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "request", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import pathlib def invalid_file(tmpdir, request): data_dir = pathlib.Path(str(tmpdir)) invalid_file = data_dir.joinpath('invalid') with invalid_file.open('w') as f: f.write('this is invalid') def tearDown(): if invalid_file.exists(): invalid_file.unlink() request.addfinalizer(tearDown) return invalid_file
553
423
3ecb8aebc6db34b0071190449249fe040eca4db6
LaudateCorpus1/python-dataproc
samples/snippets/list_clusters.py
[ "Apache-2.0" ]
Python
list_clusters
null
def list_clusters(dataproc, project, region): """List the details of clusters in the region.""" for cluster in dataproc.list_clusters( request={"project_id": project, "region": region} ): print( ( "{} - {}".format( cluster.cluster_name, cluster.status.state.name ) ) )
List the details of clusters in the region.
List the details of clusters in the region.
[ "List", "the", "details", "of", "clusters", "in", "the", "region", "." ]
def list_clusters(dataproc, project, region): for cluster in dataproc.list_clusters( request={"project_id": project, "region": region} ): print( ( "{} - {}".format( cluster.cluster_name, cluster.status.state.name ) ) )
[ "def", "list_clusters", "(", "dataproc", ",", "project", ",", "region", ")", ":", "for", "cluster", "in", "dataproc", ".", "list_clusters", "(", "request", "=", "{", "\"project_id\"", ":", "project", ",", "\"region\"", ":", "region", "}", ")", ":", "print", "(", "(", "\"{} - {}\"", ".", "format", "(", "cluster", ".", "cluster_name", ",", "cluster", ".", "status", ".", "state", ".", "name", ")", ")", ")" ]
List the details of clusters in the region.
[ "List", "the", "details", "of", "clusters", "in", "the", "region", "." ]
[ "\"\"\"List the details of clusters in the region.\"\"\"" ]
[ { "param": "dataproc", "type": null }, { "param": "project", "type": null }, { "param": "region", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "dataproc", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "project", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "region", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def list_clusters(dataproc, project, region): for cluster in dataproc.list_clusters( request={"project_id": project, "region": region} ): print( ( "{} - {}".format( cluster.cluster_name, cluster.status.state.name ) ) )
554
294
9d18fff8200c18d30fff0cda3d35748322b4dff3
ostar0816/mc-crypto
venv/lib/python3.6/site-packages/mypy/checker.py
[ "MIT" ]
Python
gen_unique_name
str
def gen_unique_name(base: str, table: SymbolTable) -> str: """Generate a name that does not appear in table by appending numbers to base.""" if base not in table: return base i = 1 while base + str(i) in table: i += 1 return base + str(i)
Generate a name that does not appear in table by appending numbers to base.
Generate a name that does not appear in table by appending numbers to base.
[ "Generate", "a", "name", "that", "does", "not", "appear", "in", "table", "by", "appending", "numbers", "to", "base", "." ]
def gen_unique_name(base: str, table: SymbolTable) -> str: if base not in table: return base i = 1 while base + str(i) in table: i += 1 return base + str(i)
[ "def", "gen_unique_name", "(", "base", ":", "str", ",", "table", ":", "SymbolTable", ")", "->", "str", ":", "if", "base", "not", "in", "table", ":", "return", "base", "i", "=", "1", "while", "base", "+", "str", "(", "i", ")", "in", "table", ":", "i", "+=", "1", "return", "base", "+", "str", "(", "i", ")" ]
Generate a name that does not appear in table by appending numbers to base.
[ "Generate", "a", "name", "that", "does", "not", "appear", "in", "table", "by", "appending", "numbers", "to", "base", "." ]
[ "\"\"\"Generate a name that does not appear in table by appending numbers to base.\"\"\"" ]
[ { "param": "base", "type": "str" }, { "param": "table", "type": "SymbolTable" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "base", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "table", "type": "SymbolTable", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def gen_unique_name(base: str, table: SymbolTable) -> str: if base not in table: return base i = 1 while base + str(i) in table: i += 1 return base + str(i)
555
408
6ef36c7e46d92046abaa699d4b79c4e9575e51a2
t3kt/raytk
src/lib/_stubs/TDJSON.py
[ "CC-BY-4.0" ]
Python
destroyOtherPagesAndParameters
null
def destroyOtherPagesAndParameters(comp, pageNames, parNames): """ Destroys all custom pages and parameters on comp that are not found in pageNames or parNames """ for p in comp.customPars: try: if p.name not in parNames: p.destroy() except Exception as e: # already destroyed # debug(e) continue for page in comp.customPages: if page.name not in pageNames: try: page.destroy() except: # already destroyed continue
Destroys all custom pages and parameters on comp that are not found in pageNames or parNames
Destroys all custom pages and parameters on comp that are not found in pageNames or parNames
[ "Destroys", "all", "custom", "pages", "and", "parameters", "on", "comp", "that", "are", "not", "found", "in", "pageNames", "or", "parNames" ]
def destroyOtherPagesAndParameters(comp, pageNames, parNames): for p in comp.customPars: try: if p.name not in parNames: p.destroy() except Exception as e: continue for page in comp.customPages: if page.name not in pageNames: try: page.destroy() except: continue
[ "def", "destroyOtherPagesAndParameters", "(", "comp", ",", "pageNames", ",", "parNames", ")", ":", "for", "p", "in", "comp", ".", "customPars", ":", "try", ":", "if", "p", ".", "name", "not", "in", "parNames", ":", "p", ".", "destroy", "(", ")", "except", "Exception", "as", "e", ":", "continue", "for", "page", "in", "comp", ".", "customPages", ":", "if", "page", ".", "name", "not", "in", "pageNames", ":", "try", ":", "page", ".", "destroy", "(", ")", "except", ":", "continue" ]
Destroys all custom pages and parameters on comp that are not found in pageNames or parNames
[ "Destroys", "all", "custom", "pages", "and", "parameters", "on", "comp", "that", "are", "not", "found", "in", "pageNames", "or", "parNames" ]
[ "\"\"\"\n\tDestroys all custom pages and parameters on comp that are not found in\n\tpageNames or parNames\n\t\"\"\"", "# already destroyed", "# debug(e)", "# already destroyed" ]
[ { "param": "comp", "type": null }, { "param": "pageNames", "type": null }, { "param": "parNames", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "comp", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "pageNames", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "parNames", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def destroyOtherPagesAndParameters(comp, pageNames, parNames): for p in comp.customPars: try: if p.name not in parNames: p.destroy() except Exception as e: continue for page in comp.customPages: if page.name not in pageNames: try: page.destroy() except: continue
556
436
b7bf2f03b8f69d7507ebfa9c740674ea395a592b
ray8828/occupancy_flow
im2mesh/config.py
[ "MIT" ]
Python
update_recursive
null
def update_recursive(dict1, dict2): ''' Update two config dictionaries recursively. Args: dict1 (dict): first dictionary to be updated dict2 (dict): second dictionary which entries should be used ''' for k, v in dict2.items(): if k not in dict1: dict1[k] = dict() if isinstance(v, dict): update_recursive(dict1[k], v) else: dict1[k] = v
Update two config dictionaries recursively. Args: dict1 (dict): first dictionary to be updated dict2 (dict): second dictionary which entries should be used
Update two config dictionaries recursively.
[ "Update", "two", "config", "dictionaries", "recursively", "." ]
def update_recursive(dict1, dict2): for k, v in dict2.items(): if k not in dict1: dict1[k] = dict() if isinstance(v, dict): update_recursive(dict1[k], v) else: dict1[k] = v
[ "def", "update_recursive", "(", "dict1", ",", "dict2", ")", ":", "for", "k", ",", "v", "in", "dict2", ".", "items", "(", ")", ":", "if", "k", "not", "in", "dict1", ":", "dict1", "[", "k", "]", "=", "dict", "(", ")", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "update_recursive", "(", "dict1", "[", "k", "]", ",", "v", ")", "else", ":", "dict1", "[", "k", "]", "=", "v" ]
Update two config dictionaries recursively.
[ "Update", "two", "config", "dictionaries", "recursively", "." ]
[ "''' Update two config dictionaries recursively.\n\n Args:\n dict1 (dict): first dictionary to be updated\n dict2 (dict): second dictionary which entries should be used\n\n '''" ]
[ { "param": "dict1", "type": null }, { "param": "dict2", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "dict1", "type": null, "docstring": "first dictionary to be updated", "docstring_tokens": [ "first", "dictionary", "to", "be", "updated" ], "default": null, "is_optional": false }, { "identifier": "dict2", "type": null, "docstring": "second dictionary which entries should be used", "docstring_tokens": [ "second", "dictionary", "which", "entries", "should", "be", "used" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def update_recursive(dict1, dict2): for k, v in dict2.items(): if k not in dict1: dict1[k] = dict() if isinstance(v, dict): update_recursive(dict1[k], v) else: dict1[k] = v
557
757
35fc5a0a1f0f0f6b55e9b681c2ac984e2ece3d4e
maitelab/maitenotas_v4
storage.py
[ "Python-2.0" ]
Python
createTable
None
def createTable(conn: sqlite3.Connection, create_tablesql: str) -> None: """ create a table from the create_tablesql statement""" try: cursor = conn.cursor() cursor.execute(create_tablesql) except: traceback.print_exc()
create a table from the create_tablesql statement
create a table from the create_tablesql statement
[ "create", "a", "table", "from", "the", "create_tablesql", "statement" ]
def createTable(conn: sqlite3.Connection, create_tablesql: str) -> None: try: cursor = conn.cursor() cursor.execute(create_tablesql) except: traceback.print_exc()
[ "def", "createTable", "(", "conn", ":", "sqlite3", ".", "Connection", ",", "create_tablesql", ":", "str", ")", "->", "None", ":", "try", ":", "cursor", "=", "conn", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "create_tablesql", ")", "except", ":", "traceback", ".", "print_exc", "(", ")" ]
create a table from the create_tablesql statement
[ "create", "a", "table", "from", "the", "create_tablesql", "statement" ]
[ "\"\"\" create a table from the create_tablesql statement\"\"\"" ]
[ { "param": "conn", "type": "sqlite3.Connection" }, { "param": "create_tablesql", "type": "str" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "conn", "type": "sqlite3.Connection", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "create_tablesql", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import traceback def createTable(conn: sqlite3.Connection, create_tablesql: str) -> None: try: cursor = conn.cursor() cursor.execute(create_tablesql) except: traceback.print_exc()
558
781
2f11717e0a1e60a46aec35db4600127ea552e1b1
liuzengzhen1/models
PaddleNLP/similarity_net/utils.py
[ "Apache-2.0" ]
Python
pattern_match
<not_specific>
def pattern_match(pattern, line): """ Check whether a string is matched Args: pattern: mathing pattern line : input string Returns: True/False """ if re.match(pattern, line): return True else: return False
Check whether a string is matched Args: pattern: mathing pattern line : input string Returns: True/False
Check whether a string is matched
[ "Check", "whether", "a", "string", "is", "matched" ]
def pattern_match(pattern, line): if re.match(pattern, line): return True else: return False
[ "def", "pattern_match", "(", "pattern", ",", "line", ")", ":", "if", "re", ".", "match", "(", "pattern", ",", "line", ")", ":", "return", "True", "else", ":", "return", "False" ]
Check whether a string is matched
[ "Check", "whether", "a", "string", "is", "matched" ]
[ "\"\"\"\n Check whether a string is matched\n Args:\n pattern: mathing pattern\n line : input string\n Returns:\n True/False\n \"\"\"" ]
[ { "param": "pattern", "type": null }, { "param": "line", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "pattern", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "line", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [ { "identifier": "line ", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "others": [] }
import re def pattern_match(pattern, line): if re.match(pattern, line): return True else: return False
559
561
966336109335b17c64cd21afee012bc477702793
StanwieCB/stanford-cs221-proj
utils/util.py
[ "MIT" ]
Python
add_imagenet_mean_batch
<not_specific>
def add_imagenet_mean_batch(batch): """Add ImageNet mean pixel-wise from a BGR image.""" tensortype = type(batch.data) mean = tensortype(batch.data.size()) mean[:, 0, :, :] = 103.939 mean[:, 1, :, :] = 116.779 mean[:, 2, :, :] = 123.680 return batch + mean
Add ImageNet mean pixel-wise from a BGR image.
Add ImageNet mean pixel-wise from a BGR image.
[ "Add", "ImageNet", "mean", "pixel", "-", "wise", "from", "a", "BGR", "image", "." ]
def add_imagenet_mean_batch(batch): tensortype = type(batch.data) mean = tensortype(batch.data.size()) mean[:, 0, :, :] = 103.939 mean[:, 1, :, :] = 116.779 mean[:, 2, :, :] = 123.680 return batch + mean
[ "def", "add_imagenet_mean_batch", "(", "batch", ")", ":", "tensortype", "=", "type", "(", "batch", ".", "data", ")", "mean", "=", "tensortype", "(", "batch", ".", "data", ".", "size", "(", ")", ")", "mean", "[", ":", ",", "0", ",", ":", ",", ":", "]", "=", "103.939", "mean", "[", ":", ",", "1", ",", ":", ",", ":", "]", "=", "116.779", "mean", "[", ":", ",", "2", ",", ":", ",", ":", "]", "=", "123.680", "return", "batch", "+", "mean" ]
Add ImageNet mean pixel-wise from a BGR image.
[ "Add", "ImageNet", "mean", "pixel", "-", "wise", "from", "a", "BGR", "image", "." ]
[ "\"\"\"Add ImageNet mean pixel-wise from a BGR image.\"\"\"" ]
[ { "param": "batch", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "batch", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def add_imagenet_mean_batch(batch): tensortype = type(batch.data) mean = tensortype(batch.data.size()) mean[:, 0, :, :] = 103.939 mean[:, 1, :, :] = 116.779 mean[:, 2, :, :] = 123.680 return batch + mean
560
191
aaeb36e839ad2e537d037c43cdc1705112b57b09
fmariv/udt-qgis-plugin
actions/generador_mmc.py
[ "MIT" ]
Python
convert_date
<not_specific>
def convert_date(date): """ Transform a date from the date format to a string format """ date_converted = f'{date[0:4]}-{date[4:6]}-{date[6:8]}' return date_converted
Transform a date from the date format to a string format
Transform a date from the date format to a string format
[ "Transform", "a", "date", "from", "the", "date", "format", "to", "a", "string", "format" ]
def convert_date(date): date_converted = f'{date[0:4]}-{date[4:6]}-{date[6:8]}' return date_converted
[ "def", "convert_date", "(", "date", ")", ":", "date_converted", "=", "f'{date[0:4]}-{date[4:6]}-{date[6:8]}'", "return", "date_converted" ]
Transform a date from the date format to a string format
[ "Transform", "a", "date", "from", "the", "date", "format", "to", "a", "string", "format" ]
[ "\"\"\" Transform a date from the date format to a string format \"\"\"" ]
[ { "param": "date", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "date", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def convert_date(date): date_converted = f'{date[0:4]}-{date[4:6]}-{date[6:8]}' return date_converted
561
386
7ebc623e7da55b3787f1f66fa8e83ce57eac9c10
RobertoPrevato/aiohttp-three-template
core/__init__.py
[ "MIT" ]
Python
has_params
<not_specific>
def has_params(data, *args): """ Validates required parameters against an object. :param data: :param args: required parameters :return: """ if data is None: return False for a in args: if not a in data: return False v = data[a] if v is None or v == "": return False return True
Validates required parameters against an object. :param data: :param args: required parameters :return:
Validates required parameters against an object.
[ "Validates", "required", "parameters", "against", "an", "object", "." ]
def has_params(data, *args): if data is None: return False for a in args: if not a in data: return False v = data[a] if v is None or v == "": return False return True
[ "def", "has_params", "(", "data", ",", "*", "args", ")", ":", "if", "data", "is", "None", ":", "return", "False", "for", "a", "in", "args", ":", "if", "not", "a", "in", "data", ":", "return", "False", "v", "=", "data", "[", "a", "]", "if", "v", "is", "None", "or", "v", "==", "\"\"", ":", "return", "False", "return", "True" ]
Validates required parameters against an object.
[ "Validates", "required", "parameters", "against", "an", "object", "." ]
[ "\"\"\"\n Validates required parameters against an object.\n :param data:\n :param args: required parameters\n :return:\n \"\"\"" ]
[ { "param": "data", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "data", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [ { "identifier": "args", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "others": [] }
def has_params(data, *args): if data is None: return False for a in args: if not a in data: return False v = data[a] if v is None or v == "": return False return True
562
105