repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
calmjs/calmjs
src/calmjs/dist.py
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/dist.py#L162-L173
def find_packages_requirements_dists(pkg_names, working_set=None): """ Return the entire list of dependency requirements, reversed from the bottom. """ working_set = working_set or default_working_set requirements = [ r for r in (Requirement.parse(req) for req in pkg_names) if working_set.find(r) ] return list(reversed(working_set.resolve(requirements)))
[ "def", "find_packages_requirements_dists", "(", "pkg_names", ",", "working_set", "=", "None", ")", ":", "working_set", "=", "working_set", "or", "default_working_set", "requirements", "=", "[", "r", "for", "r", "in", "(", "Requirement", ".", "parse", "(", "req",...
Return the entire list of dependency requirements, reversed from the bottom.
[ "Return", "the", "entire", "list", "of", "dependency", "requirements", "reversed", "from", "the", "bottom", "." ]
python
train
MacHu-GWU/angora-project
angora/filesystem/filesystem.py
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/filesystem/filesystem.py#L777-L821
def from_path_except(dir_path, ignore=list(), ignore_ext=list(), ignore_pattern=list()): """Create a new FileCollection, and select all files except file matching ignore-rule:: dir_path = "your/path" fc = FileCollection.from_path_except( dir_path, ignore=["test"], ignore_ext=[".log", ".tmp"] ignore_pattern=["some_pattern"]) :param dir_path: the root directory you want to start with :param ignore: file or directory defined in this list will be ignored. :param ignore_ext: file with extensions defined in this list will be ignored. :param ignore_pattern: any file or directory that contains this pattern will be ignored. **中文文档** 选择dir_path下的所有文件, 在ignore, ignore_ext, ignore_pattern中所定义 的文件将被排除在外。 """ ignore = [i.lower() for i in ignore] ignore_ext = [i.lower() for i in ignore_ext] ignore_pattern = [i.lower() for i in ignore_pattern] def filter(winfile): relpath = os.path.relpath(winfile.abspath, dir_path).lower() # exclude ignore for path in ignore: if relpath.startswith(path): return False # exclude ignore extension if winfile.ext in ignore_ext: return False # exclude ignore pattern for pattern in ignore_pattern: if pattern in relpath: return False return True return FileCollection.from_path_by_criterion( dir_path, filter, keepboth=False)
[ "def", "from_path_except", "(", "dir_path", ",", "ignore", "=", "list", "(", ")", ",", "ignore_ext", "=", "list", "(", ")", ",", "ignore_pattern", "=", "list", "(", ")", ")", ":", "ignore", "=", "[", "i", ".", "lower", "(", ")", "for", "i", "in", ...
Create a new FileCollection, and select all files except file matching ignore-rule:: dir_path = "your/path" fc = FileCollection.from_path_except( dir_path, ignore=["test"], ignore_ext=[".log", ".tmp"] ignore_pattern=["some_pattern"]) :param dir_path: the root directory you want to start with :param ignore: file or directory defined in this list will be ignored. :param ignore_ext: file with extensions defined in this list will be ignored. :param ignore_pattern: any file or directory that contains this pattern will be ignored. **中文文档** 选择dir_path下的所有文件, 在ignore, ignore_ext, ignore_pattern中所定义 的文件将被排除在外。
[ "Create", "a", "new", "FileCollection", "and", "select", "all", "files", "except", "file", "matching", "ignore", "-", "rule", "::", "dir_path", "=", "your", "/", "path", "fc", "=", "FileCollection", ".", "from_path_except", "(", "dir_path", "ignore", "=", "[...
python
train
biocommons/biocommons.seqrepo
biocommons/seqrepo/py2compat/_commonpath.py
https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/biocommons/seqrepo/py2compat/_commonpath.py#L5-L58
def commonpath(paths): """py2 compatible version of py3's os.path.commonpath >>> commonpath([""]) '' >>> commonpath(["/"]) '/' >>> commonpath(["/a"]) '/a' >>> commonpath(["/a//"]) '/a' >>> commonpath(["/a", "/a"]) '/a' >>> commonpath(["/a/b", "/a"]) '/a' >>> commonpath(["/a/b", "/a/b"]) '/a/b' >>> commonpath(["/a/b/c", "/a/b/d"]) '/a/b' >>> commonpath(["/a/b/c", "/a/b/d", "//a//b//e//"]) '/a/b' >>> commonpath([]) Traceback (most recent call last): ... ValueError: commonpath() arg is an empty sequence >>> commonpath(["/absolute/path", "relative/path"]) Traceback (most recent call last): ... ValueError: (Can't mix absolute and relative paths") """ assert os.sep == "/", "tested only on slash-delimited paths" split_re = re.compile(os.sep + "+") if len(paths) == 0: raise ValueError("commonpath() arg is an empty sequence") spaths = [p.rstrip(os.sep) for p in paths] splitpaths = [split_re.split(p) for p in spaths] if all(p.startswith(os.sep) for p in paths): abs_paths = True splitpaths = [p[1:] for p in splitpaths] elif all(not p.startswith(os.sep) for p in paths): abs_paths = False else: raise ValueError("Can't mix absolute and relative paths") splitpaths0 = splitpaths[0] splitpaths1n = splitpaths[1:] min_length = min(len(p) for p in splitpaths) equal = [i for i in range(min_length) if all(splitpaths0[i] == sp[i] for sp in splitpaths1n)] max_equal = max(equal or [-1]) commonelems = splitpaths0[:max_equal + 1] commonpath = os.sep.join(commonelems) return (os.sep if abs_paths else '') + commonpath
[ "def", "commonpath", "(", "paths", ")", ":", "assert", "os", ".", "sep", "==", "\"/\"", ",", "\"tested only on slash-delimited paths\"", "split_re", "=", "re", ".", "compile", "(", "os", ".", "sep", "+", "\"+\"", ")", "if", "len", "(", "paths", ")", "=="...
py2 compatible version of py3's os.path.commonpath >>> commonpath([""]) '' >>> commonpath(["/"]) '/' >>> commonpath(["/a"]) '/a' >>> commonpath(["/a//"]) '/a' >>> commonpath(["/a", "/a"]) '/a' >>> commonpath(["/a/b", "/a"]) '/a' >>> commonpath(["/a/b", "/a/b"]) '/a/b' >>> commonpath(["/a/b/c", "/a/b/d"]) '/a/b' >>> commonpath(["/a/b/c", "/a/b/d", "//a//b//e//"]) '/a/b' >>> commonpath([]) Traceback (most recent call last): ... ValueError: commonpath() arg is an empty sequence >>> commonpath(["/absolute/path", "relative/path"]) Traceback (most recent call last): ... ValueError: (Can't mix absolute and relative paths")
[ "py2", "compatible", "version", "of", "py3", "s", "os", ".", "path", ".", "commonpath" ]
python
train
oasiswork/zimsoap
zimsoap/client.py
https://github.com/oasiswork/zimsoap/blob/d1ea2eb4d50f263c9a16e5549af03f1eff3e295e/zimsoap/client.py#L764-L774
def modify_calendar_resource(self, calres, attrs): """ :param calres: a zobjects.CalendarResource :param attrs: a dictionary of attributes to set ({key:value,...}) """ attrs = [{'n': k, '_content': v} for k, v in attrs.items()] self.request('ModifyCalendarResource', { 'id': self._get_or_fetch_id( calres, self.get_calendar_resource), 'a': attrs })
[ "def", "modify_calendar_resource", "(", "self", ",", "calres", ",", "attrs", ")", ":", "attrs", "=", "[", "{", "'n'", ":", "k", ",", "'_content'", ":", "v", "}", "for", "k", ",", "v", "in", "attrs", ".", "items", "(", ")", "]", "self", ".", "requ...
:param calres: a zobjects.CalendarResource :param attrs: a dictionary of attributes to set ({key:value,...})
[ ":", "param", "calres", ":", "a", "zobjects", ".", "CalendarResource", ":", "param", "attrs", ":", "a", "dictionary", "of", "attributes", "to", "set", "(", "{", "key", ":", "value", "...", "}", ")" ]
python
train
bhmm/bhmm
bhmm/hmm/generic_hmm.py
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/hmm/generic_hmm.py#L167-L172
def is_stationary(self): r""" Whether the MSM is stationary, i.e. whether the initial distribution is the stationary distribution of the hidden transition matrix. """ # for disconnected matrices, the stationary distribution depends on the estimator, so we can't compute # it directly. Therefore we test whether the initial distribution is stationary. return np.allclose(np.dot(self._Pi, self._Tij), self._Pi)
[ "def", "is_stationary", "(", "self", ")", ":", "# for disconnected matrices, the stationary distribution depends on the estimator, so we can't compute", "# it directly. Therefore we test whether the initial distribution is stationary.", "return", "np", ".", "allclose", "(", "np", ".", ...
r""" Whether the MSM is stationary, i.e. whether the initial distribution is the stationary distribution of the hidden transition matrix.
[ "r", "Whether", "the", "MSM", "is", "stationary", "i", ".", "e", ".", "whether", "the", "initial", "distribution", "is", "the", "stationary", "distribution", "of", "the", "hidden", "transition", "matrix", "." ]
python
train
michael-lazar/rtv
rtv/page.py
https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/page.py#L481-L512
def prompt_and_select_link(self): """ Prompt the user to select a link from a list to open. Return the link that was selected, or ``None`` if no link was selected. """ data = self.get_selected_item() url_full = data.get('url_full') permalink = data.get('permalink') if url_full and url_full != permalink: # The item is a link-only submission that won't contain text link = url_full else: html = data.get('html') if html: extracted_links = self.content.extract_links(html) if not extracted_links: # Only one selection to choose from, so just pick it link = permalink else: # Let the user decide which link to open links = [] if permalink: links += [{'text': 'Permalink', 'href': permalink}] links += extracted_links link = self.term.prompt_user_to_select_link(links) else: # Some items like hidden comments don't have any HTML to parse link = permalink return link
[ "def", "prompt_and_select_link", "(", "self", ")", ":", "data", "=", "self", ".", "get_selected_item", "(", ")", "url_full", "=", "data", ".", "get", "(", "'url_full'", ")", "permalink", "=", "data", ".", "get", "(", "'permalink'", ")", "if", "url_full", ...
Prompt the user to select a link from a list to open. Return the link that was selected, or ``None`` if no link was selected.
[ "Prompt", "the", "user", "to", "select", "a", "link", "from", "a", "list", "to", "open", "." ]
python
train
cni/MRS
MRS/analysis.py
https://github.com/cni/MRS/blob/16098b3cf4830780efd787fee9efa46513850283/MRS/analysis.py#L64-L96
def separate_signals(data, w_idx=[1, 2, 3]): """ Separate the water and non-water data from each other Parameters ---------- data : nd array FID signal with shape (transients, echos, coils, time-points) w_idx : list (optional) Indices into the 'transients' (0th) dimension of the data for the signal that is not water-suppressed Returns ------- water_data, w_supp_data : tuple The first element is an array with the transients in the data in which no water suppression was applied. The second element is an array with the transients in which water suppression was applied """ # The transients are the first dimension in the data idxes_w = np.zeros(data.shape[0], dtype=bool) idxes_w[w_idx] = True # Data with water unsuppressed (first four transients - we throw away the # first one which is probably crap): w_data = data[np.where(idxes_w)] # Data with water suppressed (the rest of the transients): idxes_nonw = np.zeros(data.shape[0], dtype=bool) idxes_nonw[np.where(~idxes_w)] = True idxes_nonw[0] = False w_supp_data = data[np.where(idxes_nonw)] return w_data, w_supp_data
[ "def", "separate_signals", "(", "data", ",", "w_idx", "=", "[", "1", ",", "2", ",", "3", "]", ")", ":", "# The transients are the first dimension in the data", "idxes_w", "=", "np", ".", "zeros", "(", "data", ".", "shape", "[", "0", "]", ",", "dtype", "=...
Separate the water and non-water data from each other Parameters ---------- data : nd array FID signal with shape (transients, echos, coils, time-points) w_idx : list (optional) Indices into the 'transients' (0th) dimension of the data for the signal that is not water-suppressed Returns ------- water_data, w_supp_data : tuple The first element is an array with the transients in the data in which no water suppression was applied. The second element is an array with the transients in which water suppression was applied
[ "Separate", "the", "water", "and", "non", "-", "water", "data", "from", "each", "other" ]
python
train
chrisrink10/basilisp
src/basilisp/lang/runtime.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/runtime.py#L1261-L1267
def init_ns_var(which_ns: str = CORE_NS, ns_var_name: str = NS_VAR_NAME) -> Var: """Initialize the dynamic `*ns*` variable in the Namespace `which_ns`.""" core_sym = sym.Symbol(which_ns) core_ns = Namespace.get_or_create(core_sym) ns_var = Var.intern(core_sym, sym.Symbol(ns_var_name), core_ns, dynamic=True) logger.debug(f"Created namespace variable {sym.symbol(ns_var_name, ns=which_ns)}") return ns_var
[ "def", "init_ns_var", "(", "which_ns", ":", "str", "=", "CORE_NS", ",", "ns_var_name", ":", "str", "=", "NS_VAR_NAME", ")", "->", "Var", ":", "core_sym", "=", "sym", ".", "Symbol", "(", "which_ns", ")", "core_ns", "=", "Namespace", ".", "get_or_create", ...
Initialize the dynamic `*ns*` variable in the Namespace `which_ns`.
[ "Initialize", "the", "dynamic", "*", "ns", "*", "variable", "in", "the", "Namespace", "which_ns", "." ]
python
test
iskandr/knnimpute
knnimpute/normalized_distance.py
https://github.com/iskandr/knnimpute/blob/9a1b8abed9ce6c07a606f3f28cf45333e84d62f4/knnimpute/normalized_distance.py#L18-L84
def all_pairs_normalized_distances(X): """ We can't really compute distances over incomplete data since rows are missing different numbers of entries. The next best thing is the mean squared difference between two vectors (a normalized distance), which gets computed only over the columns that two vectors have in common. If two vectors have no features in common then their distance is infinity. Parameters ---------- X : np.ndarray Data matrix of shape (n_samples, n_features) with missing entries marked using np.nan Returns a (n_samples, n_samples) matrix of pairwise normalized distances. """ n_rows, n_cols = X.shape # matrix of mean squared difference between between samples D = np.ones((n_rows, n_rows), dtype="float32", order="C") * np.inf # we can cheaply determine the number of columns that two rows share # by taking the dot product between their finite masks observed_elements = np.isfinite(X).astype(int) n_shared_features_for_pairs_of_rows = np.dot( observed_elements, observed_elements.T) no_overlapping_features_rows = n_shared_features_for_pairs_of_rows == 0 number_incomparable_rows = no_overlapping_features_rows.sum(axis=1) row_overlaps_every_other_row = (number_incomparable_rows == 0) row_overlaps_no_other_rows = number_incomparable_rows == n_rows valid_rows_mask = ~row_overlaps_no_other_rows valid_row_indices = np.where(valid_rows_mask)[0] # preallocate all the arrays that we would otherwise create in the # following loop and pass them as "out" parameters to NumPy ufuncs diffs = np.zeros_like(X) missing_differences = np.zeros_like(diffs, dtype=bool) valid_rows = np.zeros(n_rows, dtype=bool) ssd = np.zeros(n_rows, dtype=X.dtype) for i in valid_row_indices: x = X[i, :] np.subtract(X, x.reshape((1, n_cols)), out=diffs) np.isnan(diffs, out=missing_differences) # zero out all NaN's diffs[missing_differences] = 0 # square each difference diffs **= 2 observed_counts_per_row = n_shared_features_for_pairs_of_rows[i] if row_overlaps_every_other_row[i]: # add up all the non-missing squared differences diffs.sum(axis=1, out=D[i, :]) D[i, :] /= observed_counts_per_row else: np.logical_not(no_overlapping_features_rows[i], out=valid_rows) # add up all the non-missing squared differences diffs.sum(axis=1, out=ssd) ssd[valid_rows] /= observed_counts_per_row[valid_rows] D[i, valid_rows] = ssd[valid_rows] return D
[ "def", "all_pairs_normalized_distances", "(", "X", ")", ":", "n_rows", ",", "n_cols", "=", "X", ".", "shape", "# matrix of mean squared difference between between samples", "D", "=", "np", ".", "ones", "(", "(", "n_rows", ",", "n_rows", ")", ",", "dtype", "=", ...
We can't really compute distances over incomplete data since rows are missing different numbers of entries. The next best thing is the mean squared difference between two vectors (a normalized distance), which gets computed only over the columns that two vectors have in common. If two vectors have no features in common then their distance is infinity. Parameters ---------- X : np.ndarray Data matrix of shape (n_samples, n_features) with missing entries marked using np.nan Returns a (n_samples, n_samples) matrix of pairwise normalized distances.
[ "We", "can", "t", "really", "compute", "distances", "over", "incomplete", "data", "since", "rows", "are", "missing", "different", "numbers", "of", "entries", ".", "The", "next", "best", "thing", "is", "the", "mean", "squared", "difference", "between", "two", ...
python
train
Azure/azure-cli-extensions
src/storage-preview/azext_storage_preview/_validators.py
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/storage-preview/azext_storage_preview/_validators.py#L355-L375
def get_datetime_type(to_string): """ Validates UTC datetime. Examples of accepted forms: 2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 """ from datetime import datetime def datetime_type(string): """ Validates UTC datetime. Examples of accepted forms: 2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 """ accepted_date_formats = ['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%MZ', '%Y-%m-%dT%HZ', '%Y-%m-%d'] for form in accepted_date_formats: try: if to_string: return datetime.strptime(string, form).strftime(form) return datetime.strptime(string, form) except ValueError: continue raise ValueError("Input '{}' not valid. Valid example: 2000-12-31T12:59:59Z".format(string)) return datetime_type
[ "def", "get_datetime_type", "(", "to_string", ")", ":", "from", "datetime", "import", "datetime", "def", "datetime_type", "(", "string", ")", ":", "\"\"\" Validates UTC datetime. Examples of accepted forms:\n 2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-...
Validates UTC datetime. Examples of accepted forms: 2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31
[ "Validates", "UTC", "datetime", ".", "Examples", "of", "accepted", "forms", ":", "2017", "-", "12", "-", "31T01", ":", "11", ":", "59Z", "2017", "-", "12", "-", "31T01", ":", "11Z", "or", "2017", "-", "12", "-", "31T01Z", "or", "2017", "-", "12", ...
python
train
AlecAivazis/graphql-over-kafka
nautilus/management/scripts/create.py
https://github.com/AlecAivazis/graphql-over-kafka/blob/70e2acef27a2f87355590be1a6ca60ce3ab4d09c/nautilus/management/scripts/create.py#L36-L47
def api(): """ Create the folder/directories for an ApiGateway service. """ # the template context context = { 'name': 'api', 'secret_key': random_string(32) } render_template(template='common', context=context) render_template(template='api', context=context)
[ "def", "api", "(", ")", ":", "# the template context", "context", "=", "{", "'name'", ":", "'api'", ",", "'secret_key'", ":", "random_string", "(", "32", ")", "}", "render_template", "(", "template", "=", "'common'", ",", "context", "=", "context", ")", "r...
Create the folder/directories for an ApiGateway service.
[ "Create", "the", "folder", "/", "directories", "for", "an", "ApiGateway", "service", "." ]
python
train
Rikanishu/static-bundle
static_bundle/builders.py
https://github.com/Rikanishu/static-bundle/blob/2f6458cb9d9d9049b4fd829f7d6951a45d547c68/static_bundle/builders.py#L157-L170
def render_asset(self, name): """ Render all includes in asset by names :type name: str|unicode :rtype: str|unicode """ result = "" if self.has_asset(name): asset = self.get_asset(name) if asset.files: for f in asset.files: result += f.render_include() + "\r\n" return result
[ "def", "render_asset", "(", "self", ",", "name", ")", ":", "result", "=", "\"\"", "if", "self", ".", "has_asset", "(", "name", ")", ":", "asset", "=", "self", ".", "get_asset", "(", "name", ")", "if", "asset", ".", "files", ":", "for", "f", "in", ...
Render all includes in asset by names :type name: str|unicode :rtype: str|unicode
[ "Render", "all", "includes", "in", "asset", "by", "names" ]
python
valid
xapple/plumbing
plumbing/trees/__init__.py
https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/trees/__init__.py#L96-L102
def mend(self, length): """Cut all branches from this node to its children and adopt all nodes at certain level.""" if length == 0: raise Exception("Can't mend the root !") if length == 1: return self.children = OrderedDict((node.name, node) for node in self.get_level(length)) for child in self.children.values(): child.parent = self
[ "def", "mend", "(", "self", ",", "length", ")", ":", "if", "length", "==", "0", ":", "raise", "Exception", "(", "\"Can't mend the root !\"", ")", "if", "length", "==", "1", ":", "return", "self", ".", "children", "=", "OrderedDict", "(", "(", "node", "...
Cut all branches from this node to its children and adopt all nodes at certain level.
[ "Cut", "all", "branches", "from", "this", "node", "to", "its", "children", "and", "adopt", "all", "nodes", "at", "certain", "level", "." ]
python
train
arcturial/clickatell-python
clickatell/http/__init__.py
https://github.com/arcturial/clickatell-python/blob/4a554c28edaf2e5d0d9e81b4c9415241bfd61d00/clickatell/http/__init__.py#L78-L88
def stopMessage(self, apiMsgId): """ See parent method for documentation """ content = self.parseLegacy(self.request('http/delmsg', {'apimsgid': apiMsgId})) return { 'id': content['ID'], 'status': content['Status'], 'description': self.getStatus(content['Status']) }
[ "def", "stopMessage", "(", "self", ",", "apiMsgId", ")", ":", "content", "=", "self", ".", "parseLegacy", "(", "self", ".", "request", "(", "'http/delmsg'", ",", "{", "'apimsgid'", ":", "apiMsgId", "}", ")", ")", "return", "{", "'id'", ":", "content", ...
See parent method for documentation
[ "See", "parent", "method", "for", "documentation" ]
python
train
apache/airflow
airflow/utils/log/wasb_task_handler.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/log/wasb_task_handler.py#L123-L133
def wasb_log_exists(self, remote_log_location): """ Check if remote_log_location exists in remote storage :param remote_log_location: log's location in remote storage :return: True if location exists else False """ try: return self.hook.check_for_blob(self.wasb_container, remote_log_location) except Exception: pass return False
[ "def", "wasb_log_exists", "(", "self", ",", "remote_log_location", ")", ":", "try", ":", "return", "self", ".", "hook", ".", "check_for_blob", "(", "self", ".", "wasb_container", ",", "remote_log_location", ")", "except", "Exception", ":", "pass", "return", "F...
Check if remote_log_location exists in remote storage :param remote_log_location: log's location in remote storage :return: True if location exists else False
[ "Check", "if", "remote_log_location", "exists", "in", "remote", "storage", ":", "param", "remote_log_location", ":", "log", "s", "location", "in", "remote", "storage", ":", "return", ":", "True", "if", "location", "exists", "else", "False" ]
python
test
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L3451-L3470
def dtpool(name): """ Return the data about a kernel pool variable. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dtpool_c.html :param name: Name of the variable whose value is to be returned. :type name: str :return: Number of values returned for name, Type of the variable "C", "N", or "X". :rtype: tuple """ name = stypes.stringToCharP(name) found = ctypes.c_int() n = ctypes.c_int() typeout = ctypes.c_char() libspice.dtpool_c(name, ctypes.byref(found), ctypes.byref(n), ctypes.byref(typeout)) return n.value, stypes.toPythonString(typeout.value), bool(found.value)
[ "def", "dtpool", "(", "name", ")", ":", "name", "=", "stypes", ".", "stringToCharP", "(", "name", ")", "found", "=", "ctypes", ".", "c_int", "(", ")", "n", "=", "ctypes", ".", "c_int", "(", ")", "typeout", "=", "ctypes", ".", "c_char", "(", ")", ...
Return the data about a kernel pool variable. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dtpool_c.html :param name: Name of the variable whose value is to be returned. :type name: str :return: Number of values returned for name, Type of the variable "C", "N", or "X". :rtype: tuple
[ "Return", "the", "data", "about", "a", "kernel", "pool", "variable", "." ]
python
train
vpelletier/python-libusb1
usb1/__init__.py
https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L2394-L2421
def getPollFDList(self): """ Return file descriptors to be used to poll USB events. You should not have to call this method, unless you are integrating this class with a polling mechanism. """ pollfd_p_p = libusb1.libusb_get_pollfds(self.__context_p) if not pollfd_p_p: errno = get_errno() if errno: raise OSError(errno) else: # Assume not implemented raise NotImplementedError( 'Your libusb does not seem to implement pollable FDs') try: result = [] append = result.append fd_index = 0 while pollfd_p_p[fd_index]: append(( pollfd_p_p[fd_index].contents.fd, pollfd_p_p[fd_index].contents.events, )) fd_index += 1 finally: _free(pollfd_p_p) return result
[ "def", "getPollFDList", "(", "self", ")", ":", "pollfd_p_p", "=", "libusb1", ".", "libusb_get_pollfds", "(", "self", ".", "__context_p", ")", "if", "not", "pollfd_p_p", ":", "errno", "=", "get_errno", "(", ")", "if", "errno", ":", "raise", "OSError", "(", ...
Return file descriptors to be used to poll USB events. You should not have to call this method, unless you are integrating this class with a polling mechanism.
[ "Return", "file", "descriptors", "to", "be", "used", "to", "poll", "USB", "events", ".", "You", "should", "not", "have", "to", "call", "this", "method", "unless", "you", "are", "integrating", "this", "class", "with", "a", "polling", "mechanism", "." ]
python
train
arogozhnikov/einops
einops/einops.py
https://github.com/arogozhnikov/einops/blob/9698f0f5efa6c5a79daa75253137ba5d79a95615/einops/einops.py#L117-L177
def reconstruct_from_shape(self, shape, optimize=False): """ Shape is a tuple that may contain integers, shape symbols (tf, keras, theano) and UnknownSize (keras, mxnet) known axes can be integers or symbols, but not Nones """ axes_lengths = list(self.elementary_axes_lengths) if self.ellipsis_positions != (math.inf, math.inf): if len(shape) < len(self.input_composite_axes) - 1: raise EinopsError('Expected at least {} dimensions, got {}'.format( len(self.input_composite_axes) - 1, len(shape))) else: if len(shape) != len(self.input_composite_axes): raise EinopsError('Expected {} dimensions, got {}'.format(len(self.input_composite_axes), len(shape))) for input_axis, (known_axes, unknown_axes) in enumerate(self.input_composite_axes): before_ellipsis = input_axis after_ellipsis = input_axis + len(shape) - len(self.input_composite_axes) if input_axis == self.ellipsis_positions[0]: assert len(known_axes) == 0 and len(unknown_axes) == 1 unknown_axis, = unknown_axes ellipsis_shape = shape[before_ellipsis:after_ellipsis + 1] if any(d is None for d in ellipsis_shape): raise EinopsError("Couldn't infer shape for one or more axes represented by ellipsis") axes_lengths[unknown_axis] = _product(ellipsis_shape) else: if input_axis < self.ellipsis_positions[0]: length = shape[before_ellipsis] else: length = shape[after_ellipsis] known_product = 1 for axis in known_axes: known_product *= axes_lengths[axis] if len(unknown_axes) == 0: if isinstance(length, int) and isinstance(known_product, int) and length != known_product: raise EinopsError('Shape mismatch, {} != {}'.format(length, known_product)) else: if isinstance(length, int) and isinstance(known_product, int) and length % known_product != 0: raise EinopsError("Shape mismatch, can't divide axis of length {} in chunks of {}".format( length, known_product)) unknown_axis, = unknown_axes axes_lengths[unknown_axis] = length // known_product init_shapes = axes_lengths reduced_axes_lengths = [dim for i, dim in enumerate(axes_lengths) if i not in self.reduced_elementary_axes] final_shapes = [] for output_axis, grouping in enumerate(self.output_composite_axes): if output_axis == self.ellipsis_positions[1]: final_shapes.extend(ellipsis_shape) else: lengths = [reduced_axes_lengths[elementary_axis] for elementary_axis in grouping] if any(l is None for l in lengths): final_shapes.append(None) else: final_shapes.append(_product(lengths)) reduced_axes = self.reduced_elementary_axes axes_reordering = self.final_axes_grouping_flat if optimize: return _optimize_transformation(init_shapes, reduced_axes, axes_reordering, final_shapes) else: return init_shapes, reduced_axes, axes_reordering, final_shapes
[ "def", "reconstruct_from_shape", "(", "self", ",", "shape", ",", "optimize", "=", "False", ")", ":", "axes_lengths", "=", "list", "(", "self", ".", "elementary_axes_lengths", ")", "if", "self", ".", "ellipsis_positions", "!=", "(", "math", ".", "inf", ",", ...
Shape is a tuple that may contain integers, shape symbols (tf, keras, theano) and UnknownSize (keras, mxnet) known axes can be integers or symbols, but not Nones
[ "Shape", "is", "a", "tuple", "that", "may", "contain", "integers", "shape", "symbols", "(", "tf", "keras", "theano", ")", "and", "UnknownSize", "(", "keras", "mxnet", ")", "known", "axes", "can", "be", "integers", "or", "symbols", "but", "not", "Nones" ]
python
train
chemlab/chemlab
chemlab/io/datafile.py
https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/io/datafile.py#L85-L99
def get_handler_class(ext): """Get the IOHandler that can handle the extension *ext*.""" if ext in _extensions_map: format = _extensions_map[ext] else: raise ValueError("Unknown format for %s extension." % ext) if format in _handler_map: hc = _handler_map[format] return hc else: matches = difflib.get_close_matches(format, _handler_map.keys()) raise ValueError("Unknown Handler for format %s, close matches: %s" % (format, str(matches)))
[ "def", "get_handler_class", "(", "ext", ")", ":", "if", "ext", "in", "_extensions_map", ":", "format", "=", "_extensions_map", "[", "ext", "]", "else", ":", "raise", "ValueError", "(", "\"Unknown format for %s extension.\"", "%", "ext", ")", "if", "format", "i...
Get the IOHandler that can handle the extension *ext*.
[ "Get", "the", "IOHandler", "that", "can", "handle", "the", "extension", "*", "ext", "*", "." ]
python
train
MacHu-GWU/pyknackhq-project
pyknackhq/zzz_manual_install.py
https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/zzz_manual_install.py#L100-L141
def install(): """Install your package to all Python version you have installed on Windows. """ import os, shutil _ROOT = os.getcwd() _PACKAGE_NAME = os.path.basename(_ROOT) print("Installing [%s] to all python version..." % _PACKAGE_NAME) # find all Python release installed on this windows computer installed_python_version = list() for root, folder_list, _ in os.walk(r"C:\\"): for folder in folder_list: if folder.startswith("Python"): if os.path.exists(os.path.join(root, folder, "pythonw.exe")): installed_python_version.append(folder) break print("\tYou have installed: {0}".format(", ".join(installed_python_version))) # remove __pycache__ folder and *.pyc file print("\tRemoving *.pyc file ...") pyc_folder_list = list() for root, folder_list, _ in os.walk(_ROOT): if os.path.basename(root) == "__pycache__": pyc_folder_list.append(root) for folder in pyc_folder_list: shutil.rmtree(folder) print("\t\tall *.pyc file has been removed.") # install this package to all python version for py_root in installed_python_version: dst = os.path.join(r"C:\\", py_root, r"Lib\site-packages", _PACKAGE_NAME) try: shutil.rmtree(dst) except: pass print("\tRemoved %s." % dst) shutil.copytree(_ROOT, dst) print("\tInstalled %s." % dst) print("Complete!")
[ "def", "install", "(", ")", ":", "import", "os", ",", "shutil", "_ROOT", "=", "os", ".", "getcwd", "(", ")", "_PACKAGE_NAME", "=", "os", ".", "path", ".", "basename", "(", "_ROOT", ")", "print", "(", "\"Installing [%s] to all python version...\"", "%", "_P...
Install your package to all Python version you have installed on Windows.
[ "Install", "your", "package", "to", "all", "Python", "version", "you", "have", "installed", "on", "Windows", "." ]
python
train
tipsi/tipsi_tools
tipsi_tools/drf/__init__.py
https://github.com/tipsi/tipsi_tools/blob/1aba960c9890ceef2fb5e215b98b1646056ee58e/tipsi_tools/drf/__init__.py#L5-L49
def use_form(form_class, request=None, **top_kwargs): """ Validate request (query_params or request body with args from url) with serializer and pass validated data dict to the view function instead of request object. """ def validated_form(request, **kwargs): # import ipdb; ipdb.set_trace() data = request.query_params.dict() if request.method in ['GET'] else request.data if isinstance(data, QueryDict): form = form_class(data={**data.dict(), **kwargs}) elif isinstance(data, dict): form = form_class(data={**data, **kwargs}) else: form = form_class(data=data, **kwargs) form.is_valid(raise_exception=True) return form if request: kwargs = {} if request.resolver_match: kwargs = {**request.resolver_match.kwargs} if top_kwargs: kwargs = {**kwargs, **top_kwargs} return validated_form(request, **kwargs).validated_data def wrap(func): def method_wrap(view, request, *args, **kwargs): form = validated_form(request, **kwargs) if hasattr(view, 'log'): form.log = view.log return func(view, form.validated_data, *args, **kwargs) def function_wrap(request, *args, **kwargs): form = validated_form(request, **kwargs) return func(form.validated_data, *args, **kwargs) def inner(*args, **kwargs): is_method = isinstance(args[0], APIView) return (method_wrap if is_method else function_wrap)(*args, **kwargs) return inner return wrap
[ "def", "use_form", "(", "form_class", ",", "request", "=", "None", ",", "*", "*", "top_kwargs", ")", ":", "def", "validated_form", "(", "request", ",", "*", "*", "kwargs", ")", ":", "# import ipdb; ipdb.set_trace()", "data", "=", "request", ".", "query_param...
Validate request (query_params or request body with args from url) with serializer and pass validated data dict to the view function instead of request object.
[ "Validate", "request", "(", "query_params", "or", "request", "body", "with", "args", "from", "url", ")", "with", "serializer", "and", "pass", "validated", "data", "dict", "to", "the", "view", "function", "instead", "of", "request", "object", "." ]
python
train
MartinThoma/hwrt
hwrt/utils.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/utils.py#L343-L376
def get_readable_time(t): """ Format the time to a readable format. Parameters ---------- t : int Time in ms Returns ------- string The time splitted to highest used time (minutes, hours, ...) """ ms = t % 1000 t -= ms t /= 1000 s = t % 60 t -= s t /= 60 minutes = t % 60 t -= minutes t /= 60 if t != 0: return "%ih, %i minutes %is %ims" % (t, minutes, s, ms) elif minutes != 0: return "%i minutes %is %ims" % (minutes, s, ms) elif s != 0: return "%is %ims" % (s, ms) else: return "%ims" % ms
[ "def", "get_readable_time", "(", "t", ")", ":", "ms", "=", "t", "%", "1000", "t", "-=", "ms", "t", "/=", "1000", "s", "=", "t", "%", "60", "t", "-=", "s", "t", "/=", "60", "minutes", "=", "t", "%", "60", "t", "-=", "minutes", "t", "/=", "60...
Format the time to a readable format. Parameters ---------- t : int Time in ms Returns ------- string The time splitted to highest used time (minutes, hours, ...)
[ "Format", "the", "time", "to", "a", "readable", "format", "." ]
python
train
openthread/openthread
tools/harness-thci/OpenThread.py
https://github.com/openthread/openthread/blob/0208d10563aa21c518092985c78ecf9cd223ab74/tools/harness-thci/OpenThread.py#L2060-L2094
def joinCommissioned(self, strPSKd='threadjpaketest', waitTime=20): """start joiner Args: strPSKd: Joiner's PSKd Returns: True: successful to start joiner False: fail to start joiner """ print '%s call joinCommissioned' % self.port self.__sendCommand('ifconfig up') cmd = 'joiner start %s %s' %(strPSKd, self.provisioningUrl) print cmd if self.__sendCommand(cmd)[0] == "Done": maxDuration = 150 # seconds self.joinCommissionedStatus = self.joinStatus['ongoing'] if self.logThreadStatus == self.logStatus['stop']: self.logThread = ThreadRunner.run(target=self.__readCommissioningLogs, args=(maxDuration,)) t_end = time.time() + maxDuration while time.time() < t_end: if self.joinCommissionedStatus == self.joinStatus['succeed']: break elif self.joinCommissionedStatus == self.joinStatus['failed']: return False time.sleep(1) self.__sendCommand('thread start') time.sleep(30) return True else: return False
[ "def", "joinCommissioned", "(", "self", ",", "strPSKd", "=", "'threadjpaketest'", ",", "waitTime", "=", "20", ")", ":", "print", "'%s call joinCommissioned'", "%", "self", ".", "port", "self", ".", "__sendCommand", "(", "'ifconfig up'", ")", "cmd", "=", "'join...
start joiner Args: strPSKd: Joiner's PSKd Returns: True: successful to start joiner False: fail to start joiner
[ "start", "joiner" ]
python
train
QInfer/python-qinfer
src/qinfer/distributions.py
https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/distributions.py#L320-L333
def sample(self, n=1): """ Returns random samples from the current particle distribution according to particle weights. :param int n: The number of samples to draw. :return: The sampled model parameter vectors. :rtype: `~numpy.ndarray` of shape ``(n, updater.n_rvs)``. """ cumsum_weights = np.cumsum(self.particle_weights) return self.particle_locations[np.minimum(cumsum_weights.searchsorted( np.random.random((n,)), side='right' ), len(cumsum_weights) - 1)]
[ "def", "sample", "(", "self", ",", "n", "=", "1", ")", ":", "cumsum_weights", "=", "np", ".", "cumsum", "(", "self", ".", "particle_weights", ")", "return", "self", ".", "particle_locations", "[", "np", ".", "minimum", "(", "cumsum_weights", ".", "search...
Returns random samples from the current particle distribution according to particle weights. :param int n: The number of samples to draw. :return: The sampled model parameter vectors. :rtype: `~numpy.ndarray` of shape ``(n, updater.n_rvs)``.
[ "Returns", "random", "samples", "from", "the", "current", "particle", "distribution", "according", "to", "particle", "weights", "." ]
python
train
barryp/py-amqplib
amqplib/client_0_8/transport.py
https://github.com/barryp/py-amqplib/blob/2b3a47de34b4712c111d0a55d7ff109dffc2a7b2/amqplib/client_0_8/transport.py#L144-L155
def read_frame(self): """ Read an AMQP frame. """ frame_type, channel, size = unpack('>BHI', self._read(7)) payload = self._read(size) ch = ord(self._read(1)) if ch == 206: # '\xce' return frame_type, channel, payload else: raise Exception('Framing Error, received 0x%02x while expecting 0xce' % ch)
[ "def", "read_frame", "(", "self", ")", ":", "frame_type", ",", "channel", ",", "size", "=", "unpack", "(", "'>BHI'", ",", "self", ".", "_read", "(", "7", ")", ")", "payload", "=", "self", ".", "_read", "(", "size", ")", "ch", "=", "ord", "(", "se...
Read an AMQP frame.
[ "Read", "an", "AMQP", "frame", "." ]
python
train
Clarify/clarify_python
clarify_python/clarify.py
https://github.com/Clarify/clarify_python/blob/1a00a5e39f77af9ad7f2e08480a3ab14e7d72aeb/clarify_python/clarify.py#L218-L265
def create_bundle(self, name=None, media_url=None, audio_channel=None, metadata=None, notify_url=None, external_id=None): """Create a new bundle. 'metadata' may be None, or an object that can be converted to a JSON string. See API documentation for restrictions. The conversion will take place before the API call. All other parameters are also optional. For information about these see https://api.clarify.io/docs#!/audio/v1audio_post_1. Returns a data structure equivalent to the JSON returned by the API. If the response status is not 2xx, throws an APIException. If the JSON to python data struct conversion fails, throws an APIDataException.""" # Prepare the data we're going to include in our bundle creation. path = '/' + __api_version__ + '/' + BUNDLES_PATH data = None fields = {} if name is not None: fields['name'] = name if media_url is not None: fields['media_url'] = media_url if audio_channel is not None: fields['audio_channel'] = audio_channel if metadata is not None: fields['metadata'] = json.dumps(metadata) if notify_url is not None: fields['notify_url'] = notify_url if external_id is not None: fields['external_id'] = external_id if len(fields) > 0: data = fields raw_result = self.post(path, data) if raw_result.status < 200 or raw_result.status > 202: raise APIException(raw_result.status, raw_result.json) # Convert the JSON to a python data struct. return self._parse_json(raw_result.json)
[ "def", "create_bundle", "(", "self", ",", "name", "=", "None", ",", "media_url", "=", "None", ",", "audio_channel", "=", "None", ",", "metadata", "=", "None", ",", "notify_url", "=", "None", ",", "external_id", "=", "None", ")", ":", "# Prepare the data we...
Create a new bundle. 'metadata' may be None, or an object that can be converted to a JSON string. See API documentation for restrictions. The conversion will take place before the API call. All other parameters are also optional. For information about these see https://api.clarify.io/docs#!/audio/v1audio_post_1. Returns a data structure equivalent to the JSON returned by the API. If the response status is not 2xx, throws an APIException. If the JSON to python data struct conversion fails, throws an APIDataException.
[ "Create", "a", "new", "bundle", "." ]
python
train
assamite/creamas
creamas/examples/spiro/spiro_agent_mp.py
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/examples/spiro/spiro_agent_mp.py#L195-L221
def invent(self, n): '''Invent new spirograph by taking n random steps from current position (spirograph generation parameters) and selecting the best one based on the agent's evaluation (hedonic function). :param int n: how many spirographs are created for evaluation :returns: Best created artifact. :rtype: :py:class:`~creamas.core.agent.Artifact` ''' args = self.randomize_args() img = self.create(args[0], args[1]) best_artifact = SpiroArtifact(self, img, domain='image') ev, _ = self.evaluate(best_artifact) best_artifact.add_eval(self, ev, fr={'args': args}) for i in range(n-1): args = self.randomize_args() img = self.create(args[0], args[1]) artifact = SpiroArtifact(self, img, domain='image') ev, _ = self.evaluate(artifact) artifact.add_eval(self, ev, fr={'args': args}) if ev > best_artifact.evals[self.name]: best_artifact = artifact self.spiro_args = best_artifact.framings[self.name]['args'] best_artifact.in_domain = False best_artifact.self_criticism = 'reject' best_artifact.creation_time = self.age return best_artifact
[ "def", "invent", "(", "self", ",", "n", ")", ":", "args", "=", "self", ".", "randomize_args", "(", ")", "img", "=", "self", ".", "create", "(", "args", "[", "0", "]", ",", "args", "[", "1", "]", ")", "best_artifact", "=", "SpiroArtifact", "(", "s...
Invent new spirograph by taking n random steps from current position (spirograph generation parameters) and selecting the best one based on the agent's evaluation (hedonic function). :param int n: how many spirographs are created for evaluation :returns: Best created artifact. :rtype: :py:class:`~creamas.core.agent.Artifact`
[ "Invent", "new", "spirograph", "by", "taking", "n", "random", "steps", "from", "current", "position", "(", "spirograph", "generation", "parameters", ")", "and", "selecting", "the", "best", "one", "based", "on", "the", "agent", "s", "evaluation", "(", "hedonic"...
python
train
Robpol86/sphinxcontrib-imgur
sphinxcontrib/imgur/nodes.py
https://github.com/Robpol86/sphinxcontrib-imgur/blob/5c178481d645147d10acb096793eda41c12c57af/sphinxcontrib/imgur/nodes.py#L111-L158
def finalize(self, album_cache, image_cache, warn_node): """Update attributes after Sphinx cache is updated. :param dict album_cache: Cache of Imgur albums to read. Keys are Imgur IDs, values are Album instances. :param dict image_cache: Cache of Imgur images to read. Keys are Imgur IDs, values are Image instances. :param function warn_node: sphinx.environment.BuildEnvironment.warn_node without needing node argument. """ album = album_cache[self.imgur_id] if self.album else None image = image_cache[album.cover_id] if self.album else image_cache[self.imgur_id] options = self.options # Determine target. Code in directives.py handles defaults and unsets target_* if :target: is set. if options['target_gallery'] and (album.in_gallery if album else image.in_gallery): options['target'] = '//imgur.com/gallery/{}'.format(album.imgur_id if album else image.imgur_id) elif options['target_page']: options['target'] = '//imgur.com/{}'.format(album.imgur_id if album else image.imgur_id) elif options['target_largest'] and not album: options['target'] = '//i.imgur.com/' + image.filename(full_size=True) elif not options['target'] and (options['width'] or options['height'] or options['scale']): options['target'] = '//i.imgur.com/' + image.filename(full_size=True) # Handle scale with no API data. if options['scale']: if not image.width and not options['width'] and not image.height and not options['height']: options['scale'] = '' warn_node('Could not obtain image size. :scale: option is ignored.') elif not image.width and not options['width']: warn_node('Could not obtain image width. :scale: option is partially ignored.') elif not image.width or not image.height: warn_node('Could not obtain image height. :scale: option is partially ignored.') # Handle scale, width, and height. if options['scale'] and (options['width'] or image.width): match = RE_WIDTH_HEIGHT.match(options['width'] or '%dpx' % image.width) options['width'] = '{}{}'.format(int(float(match.group(1)) * (options['scale'] / 100.0)), match.group(2)) if options['scale'] and (options['height'] or image.height): match = RE_WIDTH_HEIGHT.match(options['height'] or '%dpx' % image.height) options['height'] = '{}{}'.format(int(float(match.group(1)) * (options['scale'] / 100.0)), match.group(2)) # Set src and style. self.src = '//i.imgur.com/' + image.filename(options['width'], options['height']) style = [p for p in ((k, options[k]) for k in ('width', 'height')) if p[1]] if style: self.style = '; '.join('{}: {}'.format(k, v) for k, v in style) # Determine alt text. if not options['alt']: options['alt'] = image.title or self.src[2:]
[ "def", "finalize", "(", "self", ",", "album_cache", ",", "image_cache", ",", "warn_node", ")", ":", "album", "=", "album_cache", "[", "self", ".", "imgur_id", "]", "if", "self", ".", "album", "else", "None", "image", "=", "image_cache", "[", "album", "."...
Update attributes after Sphinx cache is updated. :param dict album_cache: Cache of Imgur albums to read. Keys are Imgur IDs, values are Album instances. :param dict image_cache: Cache of Imgur images to read. Keys are Imgur IDs, values are Image instances. :param function warn_node: sphinx.environment.BuildEnvironment.warn_node without needing node argument.
[ "Update", "attributes", "after", "Sphinx", "cache", "is", "updated", "." ]
python
train
googleapis/google-cloud-python
api_core/google/api_core/bidi.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/api_core/google/api_core/bidi.py#L202-L222
def open(self): """Opens the stream.""" if self.is_active: raise ValueError("Can not open an already open stream.") request_generator = _RequestQueueGenerator( self._request_queue, initial_request=self._initial_request ) call = self._start_rpc(iter(request_generator), metadata=self._rpc_metadata) request_generator.call = call # TODO: api_core should expose the future interface for wrapped # callables as well. if hasattr(call, "_wrapped"): # pragma: NO COVER call._wrapped.add_done_callback(self._on_call_done) else: call.add_done_callback(self._on_call_done) self._request_generator = request_generator self.call = call
[ "def", "open", "(", "self", ")", ":", "if", "self", ".", "is_active", ":", "raise", "ValueError", "(", "\"Can not open an already open stream.\"", ")", "request_generator", "=", "_RequestQueueGenerator", "(", "self", ".", "_request_queue", ",", "initial_request", "=...
Opens the stream.
[ "Opens", "the", "stream", "." ]
python
train
ThreshingFloor/libtf
libtf/logparsers/tf_auth_log.py
https://github.com/ThreshingFloor/libtf/blob/f1a8710f750639c9b9e2a468ece0d2923bf8c3df/libtf/logparsers/tf_auth_log.py#L64-L85
def _extract_features(self): """ Extracts and sets the feature data from the log file necessary for a reduction """ for parsed_line in self.parsed_lines: # If it's ssh, we can handle it if parsed_line.get('program') == 'sshd': result = self._parse_auth_message(parsed_line['message']) # Add the ip if we have it if 'ip' in result: self.features['ips'].append(result['ip']) # If we haven't seen the ip, add it if result['ip'] not in self.ips_to_pids: # Make the value a list of pids self.ips_to_pids[result['ip']] = [parsed_line['processid']] else: # If we have seen the ip before, add the pid if it's a new one if parsed_line['processid'] not in self.ips_to_pids[result['ip']]: self.ips_to_pids[result['ip']].append(parsed_line['processid'])
[ "def", "_extract_features", "(", "self", ")", ":", "for", "parsed_line", "in", "self", ".", "parsed_lines", ":", "# If it's ssh, we can handle it", "if", "parsed_line", ".", "get", "(", "'program'", ")", "==", "'sshd'", ":", "result", "=", "self", ".", "_parse...
Extracts and sets the feature data from the log file necessary for a reduction
[ "Extracts", "and", "sets", "the", "feature", "data", "from", "the", "log", "file", "necessary", "for", "a", "reduction" ]
python
train
PmagPy/PmagPy
dialogs/pmag_er_magic_dialogs.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L299-L322
def on_saveButton(self, event, grid): """saves any editing of the grid but does not continue to the next window""" wait = wx.BusyInfo("Please wait, working...") wx.SafeYield() if self.grid_frame.drop_down_menu: # unhighlight selected columns, etc. self.grid_frame.drop_down_menu.clean_up() # remove '**' and '^^' from col labels starred_cols, hatted_cols = grid.remove_starred_labels() grid.SaveEditControlValue() # locks in value in cell currently edited grid.HideCellEditControl() # removes focus from cell that was being edited if grid.changes: self.onSave(grid) for col in starred_cols: label = grid.GetColLabelValue(col) grid.SetColLabelValue(col, label + '**') for col in hatted_cols: label = grid.GetColLabelValue(col) grid.SetColLabelValue(col, label + '^^') del wait
[ "def", "on_saveButton", "(", "self", ",", "event", ",", "grid", ")", ":", "wait", "=", "wx", ".", "BusyInfo", "(", "\"Please wait, working...\"", ")", "wx", ".", "SafeYield", "(", ")", "if", "self", ".", "grid_frame", ".", "drop_down_menu", ":", "# unhighl...
saves any editing of the grid but does not continue to the next window
[ "saves", "any", "editing", "of", "the", "grid", "but", "does", "not", "continue", "to", "the", "next", "window" ]
python
train
hydraplatform/hydra-base
hydra_base/lib/scenario.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/scenario.py#L1139-L1198
def update_value_from_mapping(source_resource_attr_id, target_resource_attr_id, source_scenario_id, target_scenario_id, **kwargs): """ Using a resource attribute mapping, take the value from the source and apply it to the target. Both source and target scenarios must be specified (and therefor must exist). """ user_id = int(kwargs.get('user_id')) rm = aliased(ResourceAttrMap, name='rm') #Check the mapping exists. mapping = db.DBSession.query(rm).filter( or_( and_( rm.resource_attr_id_a == source_resource_attr_id, rm.resource_attr_id_b == target_resource_attr_id ), and_( rm.resource_attr_id_a == target_resource_attr_id, rm.resource_attr_id_b == source_resource_attr_id ) ) ).first() if mapping is None: raise ResourceNotFoundError("Mapping between %s and %s not found"% (source_resource_attr_id, target_resource_attr_id)) #check scenarios exist s1 = _get_scenario(source_scenario_id, user_id) s2 = _get_scenario(target_scenario_id, user_id) rs = aliased(ResourceScenario, name='rs') rs1 = db.DBSession.query(rs).filter(rs.resource_attr_id == source_resource_attr_id, rs.scenario_id == source_scenario_id).first() rs2 = db.DBSession.query(rs).filter(rs.resource_attr_id == target_resource_attr_id, rs.scenario_id == target_scenario_id).first() #3 possibilities worth considering: #1: Both RS exist, so update the target RS #2: Target RS does not exist, so create it with the dastaset from RS1 #3: Source RS does not exist, so it must be removed from the target scenario if it exists return_value = None#Either return null or return a new or updated resource scenario if rs1 is not None: if rs2 is not None: log.info("Destination Resource Scenario exists. Updating dastaset ID") rs2.dataset_id = rs1.dataset_id else: log.info("Destination has no data, so making a new Resource Scenario") rs2 = ResourceScenario(resource_attr_id=target_resource_attr_id, scenario_id=target_scenario_id, dataset_id=rs1.dataset_id) db.DBSession.add(rs2) db.DBSession.flush() return_value = rs2 else: log.info("Source Resource Scenario does not exist. Deleting destination Resource Scenario") if rs2 is not None: db.DBSession.delete(rs2) db.DBSession.flush() return return_value
[ "def", "update_value_from_mapping", "(", "source_resource_attr_id", ",", "target_resource_attr_id", ",", "source_scenario_id", ",", "target_scenario_id", ",", "*", "*", "kwargs", ")", ":", "user_id", "=", "int", "(", "kwargs", ".", "get", "(", "'user_id'", ")", ")...
Using a resource attribute mapping, take the value from the source and apply it to the target. Both source and target scenarios must be specified (and therefor must exist).
[ "Using", "a", "resource", "attribute", "mapping", "take", "the", "value", "from", "the", "source", "and", "apply", "it", "to", "the", "target", ".", "Both", "source", "and", "target", "scenarios", "must", "be", "specified", "(", "and", "therefor", "must", ...
python
train
bitesofcode/projexui
projexui/dialogs/xwizardbrowserdialog/xwizardplugin.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/dialogs/xwizardbrowserdialog/xwizardplugin.py#L194-L204
def setGroupIcon( cls, groupName, icon ): """ Sets the group icon for the wizard plugin to the inputed icon. :param groupName | <str> icon | <str> """ if ( cls._groupIcons is None ): cls._groupIcons = {} cls._groupIcons[nativestring(groupName)] = icon
[ "def", "setGroupIcon", "(", "cls", ",", "groupName", ",", "icon", ")", ":", "if", "(", "cls", ".", "_groupIcons", "is", "None", ")", ":", "cls", ".", "_groupIcons", "=", "{", "}", "cls", ".", "_groupIcons", "[", "nativestring", "(", "groupName", ")", ...
Sets the group icon for the wizard plugin to the inputed icon. :param groupName | <str> icon | <str>
[ "Sets", "the", "group", "icon", "for", "the", "wizard", "plugin", "to", "the", "inputed", "icon", ".", ":", "param", "groupName", "|", "<str", ">", "icon", "|", "<str", ">" ]
python
train
Gandi/gandi.cli
gandi/cli/modules/paas.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/paas.py#L80-L126
def deploy(cls, remote_name, branch): """Deploy a PaaS instance.""" def get_remote_url(remote): return 'git config --local --get remote.%s.url' % (remote) remote_url = cls.exec_output(get_remote_url(remote_name)) \ .replace('\n', '') if not remote_url or not re.search('gpaas.net|gandi.net', remote_url): remote_name = ('$(git config --local --get branch.%s.remote)' % branch) remote_url = cls.exec_output(get_remote_url(remote_name)) \ .replace('\n', '') error = None if not remote_url: error = True cls.echo('Error: Could not find git remote ' 'to extract deploy url from.') elif not re.search('gpaas.net|gandi.net', remote_url): error = True cls.echo('Error: %s is not a valid Simple Hosting git remote.' % (remote_url)) if error: cls.echo("""This usually happens when: - the current directory has no Simple Hosting git remote attached, in this case, please see $ gandi paas attach --help - the local branch being deployed hasn't been pushed to the \ remote repository yet, in this case, please try $ git push <remote> %s """ % (branch)) cls.echo('Otherwise, it\'s recommended to use' ' the --remote and/or --branch options:\n' '$ gandi deploy --remote <remote> [--branch <branch>]') sys.exit(2) remote_url_no_protocol = remote_url.split('://')[1] splitted_url = remote_url_no_protocol.split('/') paas_access = splitted_url[0] deploy_git_host = splitted_url[1] command = "ssh %s 'deploy %s %s'" \ % (paas_access, deploy_git_host, branch) cls.execute(command)
[ "def", "deploy", "(", "cls", ",", "remote_name", ",", "branch", ")", ":", "def", "get_remote_url", "(", "remote", ")", ":", "return", "'git config --local --get remote.%s.url'", "%", "(", "remote", ")", "remote_url", "=", "cls", ".", "exec_output", "(", "get_r...
Deploy a PaaS instance.
[ "Deploy", "a", "PaaS", "instance", "." ]
python
train
ubyssey/dispatch
dispatch/api/mixins.py
https://github.com/ubyssey/dispatch/blob/8da6084fe61726f20e9cf675190480cfc45ee764/dispatch/api/mixins.py#L107-L116
def exclude_fields(self): """Excludes fields that are included in the queryparameters""" request = self.context.get('request') if request: exclude = request.query_params.get('exclude', None) if exclude is None: return excluded_fields = exclude.split(',') for field in excluded_fields: self.fields.pop(field)
[ "def", "exclude_fields", "(", "self", ")", ":", "request", "=", "self", ".", "context", ".", "get", "(", "'request'", ")", "if", "request", ":", "exclude", "=", "request", ".", "query_params", ".", "get", "(", "'exclude'", ",", "None", ")", "if", "excl...
Excludes fields that are included in the queryparameters
[ "Excludes", "fields", "that", "are", "included", "in", "the", "queryparameters" ]
python
test
juju/python-libjuju
juju/client/_client2.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/client/_client2.py#L5128-L5141
async def Delete(self, image_ids): ''' image_ids : typing.Sequence[str] Returns -> typing.Sequence[~ErrorResult] ''' # map input types to rpc msg _params = dict() msg = dict(type='ImageMetadata', request='Delete', version=2, params=_params) _params['image-ids'] = image_ids reply = await self.rpc(msg) return reply
[ "async", "def", "Delete", "(", "self", ",", "image_ids", ")", ":", "# map input types to rpc msg", "_params", "=", "dict", "(", ")", "msg", "=", "dict", "(", "type", "=", "'ImageMetadata'", ",", "request", "=", "'Delete'", ",", "version", "=", "2", ",", ...
image_ids : typing.Sequence[str] Returns -> typing.Sequence[~ErrorResult]
[ "image_ids", ":", "typing", ".", "Sequence", "[", "str", "]", "Returns", "-", ">", "typing", ".", "Sequence", "[", "~ErrorResult", "]" ]
python
train
andialbrecht/sqlparse
sqlparse/lexer.py
https://github.com/andialbrecht/sqlparse/blob/913b56e34edc7e3025feea4744dbd762774805c3/sqlparse/lexer.py#L28-L73
def get_tokens(text, encoding=None): """ Return an iterable of (tokentype, value) pairs generated from `text`. If `unfiltered` is set to `True`, the filtering mechanism is bypassed even if filters are defined. Also preprocess the text, i.e. expand tabs and strip it if wanted and applies registered filters. Split ``text`` into (tokentype, text) pairs. ``stack`` is the initial stack (default: ``['root']``) """ if isinstance(text, file_types): text = text.read() if isinstance(text, text_type): pass elif isinstance(text, bytes): if encoding: text = text.decode(encoding) else: try: text = text.decode('utf-8') except UnicodeDecodeError: text = text.decode('unicode-escape') else: raise TypeError(u"Expected text or file-like object, got {!r}". format(type(text))) iterable = enumerate(text) for pos, char in iterable: for rexmatch, action in SQL_REGEX: m = rexmatch(text, pos) if not m: continue elif isinstance(action, tokens._TokenType): yield action, m.group() elif callable(action): yield action(m.group()) consume(iterable, m.end() - pos - 1) break else: yield tokens.Error, char
[ "def", "get_tokens", "(", "text", ",", "encoding", "=", "None", ")", ":", "if", "isinstance", "(", "text", ",", "file_types", ")", ":", "text", "=", "text", ".", "read", "(", ")", "if", "isinstance", "(", "text", ",", "text_type", ")", ":", "pass", ...
Return an iterable of (tokentype, value) pairs generated from `text`. If `unfiltered` is set to `True`, the filtering mechanism is bypassed even if filters are defined. Also preprocess the text, i.e. expand tabs and strip it if wanted and applies registered filters. Split ``text`` into (tokentype, text) pairs. ``stack`` is the initial stack (default: ``['root']``)
[ "Return", "an", "iterable", "of", "(", "tokentype", "value", ")", "pairs", "generated", "from", "text", ".", "If", "unfiltered", "is", "set", "to", "True", "the", "filtering", "mechanism", "is", "bypassed", "even", "if", "filters", "are", "defined", "." ]
python
train
jciskey/pygraph
pygraph/functions/spanning_tree.py
https://github.com/jciskey/pygraph/blob/037bb2f32503fecb60d62921f9766d54109f15e2/pygraph/functions/spanning_tree.py#L57-L62
def find_minimum_spanning_forest_as_subgraphs(graph): """Calculates the minimum spanning forest and returns a list of trees as subgraphs.""" forest = find_minimum_spanning_forest(graph) list_of_subgraphs = [get_subgraph_from_edge_list(graph, edge_list) for edge_list in forest] return list_of_subgraphs
[ "def", "find_minimum_spanning_forest_as_subgraphs", "(", "graph", ")", ":", "forest", "=", "find_minimum_spanning_forest", "(", "graph", ")", "list_of_subgraphs", "=", "[", "get_subgraph_from_edge_list", "(", "graph", ",", "edge_list", ")", "for", "edge_list", "in", "...
Calculates the minimum spanning forest and returns a list of trees as subgraphs.
[ "Calculates", "the", "minimum", "spanning", "forest", "and", "returns", "a", "list", "of", "trees", "as", "subgraphs", "." ]
python
train
Prodicode/ann-visualizer
ann_visualizer/visualize.py
https://github.com/Prodicode/ann-visualizer/blob/031f4977aed3b4c1b5c009bcfd70638268b1800f/ann_visualizer/visualize.py#L18-L206
def ann_viz(model, view=True, filename="network.gv", title="My Neural Network"): """Vizualizez a Sequential model. # Arguments model: A Keras model instance. view: whether to display the model after generation. filename: where to save the vizualization. (a .gv file) title: A title for the graph """ from graphviz import Digraph; import keras; from keras.models import Sequential; from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten; import json; input_layer = 0; hidden_layers_nr = 0; layer_types = []; hidden_layers = []; output_layer = 0; for layer in model.layers: if(layer == model.layers[0]): input_layer = int(str(layer.input_shape).split(",")[1][1:-1]); hidden_layers_nr += 1; if (type(layer) == keras.layers.core.Dense): hidden_layers.append(int(str(layer.output_shape).split(",")[1][1:-1])); layer_types.append("Dense"); else: hidden_layers.append(1); if (type(layer) == keras.layers.convolutional.Conv2D): layer_types.append("Conv2D"); elif (type(layer) == keras.layers.pooling.MaxPooling2D): layer_types.append("MaxPooling2D"); elif (type(layer) == keras.layers.core.Dropout): layer_types.append("Dropout"); elif (type(layer) == keras.layers.core.Flatten): layer_types.append("Flatten"); elif (type(layer) == keras.layers.core.Activation): layer_types.append("Activation"); else: if(layer == model.layers[-1]): output_layer = int(str(layer.output_shape).split(",")[1][1:-1]); else: hidden_layers_nr += 1; if (type(layer) == keras.layers.core.Dense): hidden_layers.append(int(str(layer.output_shape).split(",")[1][1:-1])); layer_types.append("Dense"); else: hidden_layers.append(1); if (type(layer) == keras.layers.convolutional.Conv2D): layer_types.append("Conv2D"); elif (type(layer) == keras.layers.pooling.MaxPooling2D): layer_types.append("MaxPooling2D"); elif (type(layer) == keras.layers.core.Dropout): layer_types.append("Dropout"); elif (type(layer) == keras.layers.core.Flatten): layer_types.append("Flatten"); elif (type(layer) == keras.layers.core.Activation): layer_types.append("Activation"); last_layer_nodes = input_layer; nodes_up = input_layer; if(type(model.layers[0]) != keras.layers.core.Dense): last_layer_nodes = 1; nodes_up = 1; input_layer = 1; g = Digraph('g', filename=filename); n = 0; g.graph_attr.update(splines="false", nodesep='1', ranksep='2'); #Input Layer with g.subgraph(name='cluster_input') as c: if(type(model.layers[0]) == keras.layers.core.Dense): the_label = title+'\n\n\n\nInput Layer'; if (int(str(model.layers[0].input_shape).split(",")[1][1:-1]) > 10): the_label += " (+"+str(int(str(model.layers[0].input_shape).split(",")[1][1:-1]) - 10)+")"; input_layer = 10; c.attr(color='white') for i in range(0, input_layer): n += 1; c.node(str(n)); c.attr(label=the_label) c.attr(rank='same'); c.node_attr.update(color="#2ecc71", style="filled", fontcolor="#2ecc71", shape="circle"); elif(type(model.layers[0]) == keras.layers.convolutional.Conv2D): #Conv2D Input visualizing the_label = title+'\n\n\n\nInput Layer'; c.attr(color="white", label=the_label); c.node_attr.update(shape="square"); pxls = str(model.layers[0].input_shape).split(','); clr = int(pxls[3][1:-1]); if (clr == 1): clrmap = "Grayscale"; the_color = "black:white"; elif (clr == 3): clrmap = "RGB"; the_color = "#e74c3c:#3498db"; else: clrmap = ""; c.node_attr.update(fontcolor="white", fillcolor=the_color, style="filled"); n += 1; c.node(str(n), label="Image\n"+pxls[1]+" x"+pxls[2]+" pixels\n"+clrmap, fontcolor="white"); else: raise ValueError("ANN Visualizer: Layer not supported for visualizing"); for i in range(0, hidden_layers_nr): with g.subgraph(name="cluster_"+str(i+1)) as c: if (layer_types[i] == "Dense"): c.attr(color='white'); c.attr(rank='same'); #If hidden_layers[i] > 10, dont include all the_label = ""; if (int(str(model.layers[i].output_shape).split(",")[1][1:-1]) > 10): the_label += " (+"+str(int(str(model.layers[i].output_shape).split(",")[1][1:-1]) - 10)+")"; hidden_layers[i] = 10; c.attr(labeljust="right", labelloc="b", label=the_label); for j in range(0, hidden_layers[i]): n += 1; c.node(str(n), shape="circle", style="filled", color="#3498db", fontcolor="#3498db"); for h in range(nodes_up - last_layer_nodes + 1 , nodes_up + 1): g.edge(str(h), str(n)); last_layer_nodes = hidden_layers[i]; nodes_up += hidden_layers[i]; elif (layer_types[i] == "Conv2D"): c.attr(style='filled', color='#5faad0'); n += 1; kernel_size = str(model.layers[i].get_config()['kernel_size']).split(',')[0][1] + "x" + str(model.layers[i].get_config()['kernel_size']).split(',')[1][1 : -1]; filters = str(model.layers[i].get_config()['filters']); c.node("conv_"+str(n), label="Convolutional Layer\nKernel Size: "+kernel_size+"\nFilters: "+filters, shape="square"); c.node(str(n), label=filters+"\nFeature Maps", shape="square"); g.edge("conv_"+str(n), str(n)); for h in range(nodes_up - last_layer_nodes + 1 , nodes_up + 1): g.edge(str(h), "conv_"+str(n)); last_layer_nodes = 1; nodes_up += 1; elif (layer_types[i] == "MaxPooling2D"): c.attr(color="white"); n += 1; pool_size = str(model.layers[i].get_config()['pool_size']).split(',')[0][1] + "x" + str(model.layers[i].get_config()['pool_size']).split(',')[1][1 : -1]; c.node(str(n), label="Max Pooling\nPool Size: "+pool_size, style="filled", fillcolor="#8e44ad", fontcolor="white"); for h in range(nodes_up - last_layer_nodes + 1 , nodes_up + 1): g.edge(str(h), str(n)); last_layer_nodes = 1; nodes_up += 1; elif (layer_types[i] == "Flatten"): n += 1; c.attr(color="white"); c.node(str(n), label="Flattening", shape="invtriangle", style="filled", fillcolor="#2c3e50", fontcolor="white"); for h in range(nodes_up - last_layer_nodes + 1 , nodes_up + 1): g.edge(str(h), str(n)); last_layer_nodes = 1; nodes_up += 1; elif (layer_types[i] == "Dropout"): n += 1; c.attr(color="white"); c.node(str(n), label="Dropout Layer", style="filled", fontcolor="white", fillcolor="#f39c12"); for h in range(nodes_up - last_layer_nodes + 1 , nodes_up + 1): g.edge(str(h), str(n)); last_layer_nodes = 1; nodes_up += 1; elif (layer_types[i] == "Activation"): n += 1; c.attr(color="white"); fnc = model.layers[i].get_config()['activation']; c.node(str(n), shape="octagon", label="Activation Layer\nFunction: "+fnc, style="filled", fontcolor="white", fillcolor="#00b894"); for h in range(nodes_up - last_layer_nodes + 1 , nodes_up + 1): g.edge(str(h), str(n)); last_layer_nodes = 1; nodes_up += 1; with g.subgraph(name='cluster_output') as c: if (type(model.layers[-1]) == keras.layers.core.Dense): c.attr(color='white') c.attr(rank='same'); c.attr(labeljust="1"); for i in range(1, output_layer+1): n += 1; c.node(str(n), shape="circle", style="filled", color="#e74c3c", fontcolor="#e74c3c"); for h in range(nodes_up - last_layer_nodes + 1 , nodes_up + 1): g.edge(str(h), str(n)); c.attr(label='Output Layer', labelloc="bottom") c.node_attr.update(color="#2ecc71", style="filled", fontcolor="#2ecc71", shape="circle"); g.attr(arrowShape="none"); g.edge_attr.update(arrowhead="none", color="#707070"); if view == True: g.view();
[ "def", "ann_viz", "(", "model", ",", "view", "=", "True", ",", "filename", "=", "\"network.gv\"", ",", "title", "=", "\"My Neural Network\"", ")", ":", "from", "graphviz", "import", "Digraph", "import", "keras", "from", "keras", ".", "models", "import", "Seq...
Vizualizez a Sequential model. # Arguments model: A Keras model instance. view: whether to display the model after generation. filename: where to save the vizualization. (a .gv file) title: A title for the graph
[ "Vizualizez", "a", "Sequential", "model", "." ]
python
train
The-Politico/politico-civic-ap-loader
aploader/management/commands/reup_to_db.py
https://github.com/The-Politico/politico-civic-ap-loader/blob/4afeebb62da4b8f22da63711e7176bf4527bccfb/aploader/management/commands/reup_to_db.py#L189-L395
def process_result(self, result, tabulated, no_bots, election_slug): """ Processes top-level (state) results for candidate races, loads data into the database and sends alerts for winning results. """ # Deconstruct result in variables ( ID, RACE_ID, IS_BALLOT_MEASURE, ELEX_ELECTION_DATE, LEVEL, STATE_POSTAL, REPORTING_UNIT, LAST_NAME, OFFICE_NAME, RACE_TYPE, WINNER, UNCONTESTED, RUNOFF, VOTE_COUNT, VOTE_PERCENT, PRECINCTS_REPORTING, PRECINCTS_REPORTING_PERCENT, PRECINCTS_TOTAL, PARTY, ) = self.deconstruct_result(result) # Skip ballot measures on non-state-level results if IS_BALLOT_MEASURE or LEVEL != DivisionLevel.STATE: return try: ap_meta = APElectionMeta.objects.get( ap_election_id=RACE_ID, election__election_day__slug=election_slug, ) except ObjectDoesNotExist: print( "No AP Meta found for {0} {1} {2}".format( LAST_NAME, OFFICE_NAME, REPORTING_UNIT ) ) return id_components = ID.split("-") CANDIDATE_ID = "{0}-{1}".format(id_components[1], id_components[2]) if LAST_NAME == "None of these candidates": CANDIDATE_ID = "{0}-{1}".format(id_components[0], CANDIDATE_ID) try: candidate_election = CandidateElection.objects.get( election=ap_meta.election, candidate__ap_candidate_id=CANDIDATE_ID, ) except ObjectDoesNotExist: print( "No Candidate found for {0} {1} {2}".format( LAST_NAME, OFFICE_NAME, PARTY ) ) return candidate = candidate_election.candidate division = Division.objects.get( level__name=DivisionLevel.STATE, code_components__postal=STATE_POSTAL, ) filter_kwargs = { "candidate_election": candidate_election, "division": division, } vote_update = {} if not ap_meta.override_ap_votes: vote_update["count"] = VOTE_COUNT vote_update["pct"] = VOTE_PERCENT if not ap_meta.override_ap_call: vote_update["winning"] = WINNER vote_update["runoff"] = RUNOFF if WINNER: ap_meta.called = True if ap_meta.precincts_reporting != PRECINCTS_REPORTING: ap_meta.precincts_reporting = PRECINCTS_REPORTING ap_meta.precincts_total = PRECINCTS_TOTAL ap_meta.precincts_reporting_pct = PRECINCTS_REPORTING_PERCENT if PRECINCTS_REPORTING_PERCENT == 1 or UNCONTESTED or tabulated: ap_meta.tabulated = True else: ap_meta.tabulated = False ap_meta.save() votes = Votes.objects.filter(**filter_kwargs) if (WINNER or RUNOFF) and not candidate_election.uncontested: # If new call on contested race, send alerts first = votes.first() if not (first.winning or first.runoff) and not no_bots: if ap_meta.election.party: PRIMARY_PARTY = ap_meta.election.party.label else: PRIMARY_PARTY = None # construct page URL for payload if app_settings.AWS_S3_BUCKET == "interactives.politico.com": base_url = "https://www.politico.com/election-results/2018" end_path = "" else: base_url = "https://s3.amazonaws.com/staging.interactives.politico.com/election-results/2018" # noqa end_path = "index.html" if RACE_TYPE == "Runoff": state_path = "{}/runoff".format(division.slug) elif "Special" in RACE_TYPE: # first check to see if this special is on a state page events = ElectionEvent.objects.filter( division=division, election_day__slug=ELEX_ELECTION_DATE, ) print(events, division, ELEX_ELECTION_DATE) if len(events) > 0: state_path = division.slug else: parsed = datetime.strptime( ELEX_ELECTION_DATE, "%Y-%m-%d" ) month = parsed.strftime("%b").lower() day = parsed.strftime("%d") state_path = "{}/special-election/{}-{}".format( division.slug, month, day ) else: state_path = division.slug url = "{}/{}/{}".format(base_url, state_path, end_path) payload = { "race_id": RACE_ID, "division": division.label, "division_slug": division.slug, "office": format_office_label( candidate.race.office, division.label ), "office_short": short_format_office_label( candidate.race.office, division.label ), "candidate": "{} {}".format( candidate.person.first_name, candidate.person.last_name ), "election_date": ELEX_ELECTION_DATE, "candidate_party": candidate.party.ap_code, "primary_party": PRIMARY_PARTY, "vote_percent": VOTE_PERCENT, "vote_count": VOTE_COUNT, "runoff": RUNOFF, "precincts_reporting_percent": PRECINCTS_REPORTING_PERCENT, "jungle": RACE_TYPE == "Open Primary", "runoff_election": RACE_TYPE == "Runoff", "special_election": "Special" in RACE_TYPE, "page_url": url, } call_race_in_slack.delay(payload) call_race_in_slackchat.delay(payload) call_race_on_twitter.delay(payload) votes.update(**vote_update) if OFFICE_NAME == "U.S. House": bop_body = self.bop["house"] elif OFFICE_NAME == "U.S. Senate": bop_body = self.bop["senate"] else: return if not PARTY: return if (WINNER and not ap_meta.override_ap_call) or votes.first().winning: party_slug = PARTY.lower() incumbent = self.get_current_party(ap_meta.election.race) if PARTY not in ["Dem", "GOP"]: if ( STATE_POSTAL in ["VT", "ME"] and OFFICE_NAME == "U.S. Senate" ): bop_body["dem"]["total"] += 1 else: bop_body["other"]["total"] += 1 else: bop_body[party_slug]["total"] += 1 if party_slug != incumbent: print(result, votes.first().winning) print(LAST_NAME, candidate.race.office) bop_body[party_slug]["flips"] += 1
[ "def", "process_result", "(", "self", ",", "result", ",", "tabulated", ",", "no_bots", ",", "election_slug", ")", ":", "# Deconstruct result in variables", "(", "ID", ",", "RACE_ID", ",", "IS_BALLOT_MEASURE", ",", "ELEX_ELECTION_DATE", ",", "LEVEL", ",", "STATE_PO...
Processes top-level (state) results for candidate races, loads data into the database and sends alerts for winning results.
[ "Processes", "top", "-", "level", "(", "state", ")", "results", "for", "candidate", "races", "loads", "data", "into", "the", "database", "and", "sends", "alerts", "for", "winning", "results", "." ]
python
train
hobson/aima
aima/search.py
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/search.py#L564-L569
def result(self, state, row): "Place the next queen at the given row." col = state.index(None) new = state[:] new[col] = row return new
[ "def", "result", "(", "self", ",", "state", ",", "row", ")", ":", "col", "=", "state", ".", "index", "(", "None", ")", "new", "=", "state", "[", ":", "]", "new", "[", "col", "]", "=", "row", "return", "new" ]
Place the next queen at the given row.
[ "Place", "the", "next", "queen", "at", "the", "given", "row", "." ]
python
valid
DataDog/integrations-core
openstack_controller/datadog_checks/openstack_controller/api.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/openstack_controller/datadog_checks/openstack_controller/api.py#L598-L623
def _get_valid_endpoint(resp, name, entry_type): """ Parse the service catalog returned by the Identity API for an endpoint matching the Nova service with the requested version Sends a CRITICAL service check when no viable candidates are found in the Catalog """ catalog = resp.get('token', {}).get('catalog', []) for entry in catalog: if ( entry.get('name') and entry.get('type') and entry.get('name') == name and entry.get('type') == entry_type ): # Collect any endpoints on the public or internal interface valid_endpoints = {} for ep in entry.get('endpoints'): interface = ep.get('interface', '') if interface in ['public', 'internal']: valid_endpoints[interface] = ep.get('url') if valid_endpoints: # Favor public endpoints over internal return valid_endpoints.get('public', valid_endpoints.get('internal')) return None
[ "def", "_get_valid_endpoint", "(", "resp", ",", "name", ",", "entry_type", ")", ":", "catalog", "=", "resp", ".", "get", "(", "'token'", ",", "{", "}", ")", ".", "get", "(", "'catalog'", ",", "[", "]", ")", "for", "entry", "in", "catalog", ":", "if...
Parse the service catalog returned by the Identity API for an endpoint matching the Nova service with the requested version Sends a CRITICAL service check when no viable candidates are found in the Catalog
[ "Parse", "the", "service", "catalog", "returned", "by", "the", "Identity", "API", "for", "an", "endpoint", "matching", "the", "Nova", "service", "with", "the", "requested", "version", "Sends", "a", "CRITICAL", "service", "check", "when", "no", "viable", "candi...
python
train
titusjan/argos
argos/config/qtctis.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/config/qtctis.py#L340-L347
def _nodeGetNonDefaultsDict(self): """ Retrieves this nodes` values as a dictionary to be used for persistence. Non-recursive auxiliary function for getNonDefaultsDict """ dct = {} if self.data != self.defaultData: dct['data'] = self.data.toString() # calls QFont.toString() return dct
[ "def", "_nodeGetNonDefaultsDict", "(", "self", ")", ":", "dct", "=", "{", "}", "if", "self", ".", "data", "!=", "self", ".", "defaultData", ":", "dct", "[", "'data'", "]", "=", "self", ".", "data", ".", "toString", "(", ")", "# calls QFont.toString()", ...
Retrieves this nodes` values as a dictionary to be used for persistence. Non-recursive auxiliary function for getNonDefaultsDict
[ "Retrieves", "this", "nodes", "values", "as", "a", "dictionary", "to", "be", "used", "for", "persistence", ".", "Non", "-", "recursive", "auxiliary", "function", "for", "getNonDefaultsDict" ]
python
train
KrzyHonk/bpmn-python
bpmn_python/bpmn_diagram_import.py
https://github.com/KrzyHonk/bpmn-python/blob/6e5e28e3d656dbf5bd3d85d78fe8e3f2fb462629/bpmn_python/bpmn_diagram_import.py#L452-L468
def import_activity_to_graph(diagram_graph, process_id, process_attributes, element): """ Method that adds the new element that represents BPMN activity. Should not be used directly, only as a part of method, that imports an element which extends Activity element (task, subprocess etc.) :param diagram_graph: NetworkX graph representing a BPMN process diagram, :param process_id: string object, representing an ID of process element, :param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of imported flow node, :param element: object representing a BPMN XML element which extends 'activity'. """ BpmnDiagramGraphImport.import_flow_node_to_graph(diagram_graph, process_id, process_attributes, element) element_id = element.getAttribute(consts.Consts.id) diagram_graph.node[element_id][consts.Consts.default] = element.getAttribute(consts.Consts.default) \ if element.hasAttribute(consts.Consts.default) else None
[ "def", "import_activity_to_graph", "(", "diagram_graph", ",", "process_id", ",", "process_attributes", ",", "element", ")", ":", "BpmnDiagramGraphImport", ".", "import_flow_node_to_graph", "(", "diagram_graph", ",", "process_id", ",", "process_attributes", ",", "element",...
Method that adds the new element that represents BPMN activity. Should not be used directly, only as a part of method, that imports an element which extends Activity element (task, subprocess etc.) :param diagram_graph: NetworkX graph representing a BPMN process diagram, :param process_id: string object, representing an ID of process element, :param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of imported flow node, :param element: object representing a BPMN XML element which extends 'activity'.
[ "Method", "that", "adds", "the", "new", "element", "that", "represents", "BPMN", "activity", ".", "Should", "not", "be", "used", "directly", "only", "as", "a", "part", "of", "method", "that", "imports", "an", "element", "which", "extends", "Activity", "eleme...
python
train
StyXman/ayrton
ayrton/parser/pyparser/pytokenizer.py
https://github.com/StyXman/ayrton/blob/e1eed5c7ef230e3c2340a1f0bf44c72bbdc0debb/ayrton/parser/pyparser/pytokenizer.py#L15-L51
def match_encoding_declaration(comment): """returns the declared encoding or None This function is a replacement for : >>> py_encoding = re.compile(r"coding[:=]\s*([-\w.]+)") >>> py_encoding.search(comment) """ # the coding line must be ascii try: comment = comment.decode('ascii') except UnicodeDecodeError: return None index = comment.find('coding') if index < 0: return None next_char = comment[index + 6] if next_char not in ':=': return None end_of_decl = comment[index + 7:] index = 0 for char in end_of_decl: if char not in WHITESPACES: break index += 1 else: return None encoding = '' for char in end_of_decl[index:]: if char in EXTENDED_ALNUMCHARS: encoding += char else: break if encoding != '': return encoding return None
[ "def", "match_encoding_declaration", "(", "comment", ")", ":", "# the coding line must be ascii", "try", ":", "comment", "=", "comment", ".", "decode", "(", "'ascii'", ")", "except", "UnicodeDecodeError", ":", "return", "None", "index", "=", "comment", ".", "find"...
returns the declared encoding or None This function is a replacement for : >>> py_encoding = re.compile(r"coding[:=]\s*([-\w.]+)") >>> py_encoding.search(comment)
[ "returns", "the", "declared", "encoding", "or", "None" ]
python
train
log2timeline/plaso
plaso/engine/profilers.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/engine/profilers.py#L286-L296
def Sample(self, task, status): """Takes a sample of the status of a task for profiling. Args: task (Task): a task. status (str): status. """ sample_time = time.time() sample = '{0:f}\t{1:s}\t{2:s}\n'.format( sample_time, task.identifier, status) self._WritesString(sample)
[ "def", "Sample", "(", "self", ",", "task", ",", "status", ")", ":", "sample_time", "=", "time", ".", "time", "(", ")", "sample", "=", "'{0:f}\\t{1:s}\\t{2:s}\\n'", ".", "format", "(", "sample_time", ",", "task", ".", "identifier", ",", "status", ")", "se...
Takes a sample of the status of a task for profiling. Args: task (Task): a task. status (str): status.
[ "Takes", "a", "sample", "of", "the", "status", "of", "a", "task", "for", "profiling", "." ]
python
train
Azure/azure-cli-extensions
src/express-route/azext_express_route/vendored_sdks/network_management_client.py
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/express-route/azext_express_route/vendored_sdks/network_management_client.py#L2042-L2055
def service_endpoint_policies(self): """Instance depends on the API version: * 2018-07-01: :class:`ServiceEndpointPoliciesOperations<azure.mgmt.network.v2018_07_01.operations.ServiceEndpointPoliciesOperations>` * 2018-08-01: :class:`ServiceEndpointPoliciesOperations<azure.mgmt.network.v2018_08_01.operations.ServiceEndpointPoliciesOperations>` """ api_version = self._get_api_version('service_endpoint_policies') if api_version == '2018-07-01': from .v2018_07_01.operations import ServiceEndpointPoliciesOperations as OperationClass elif api_version == '2018-08-01': from .v2018_08_01.operations import ServiceEndpointPoliciesOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
[ "def", "service_endpoint_policies", "(", "self", ")", ":", "api_version", "=", "self", ".", "_get_api_version", "(", "'service_endpoint_policies'", ")", "if", "api_version", "==", "'2018-07-01'", ":", "from", ".", "v2018_07_01", ".", "operations", "import", "Service...
Instance depends on the API version: * 2018-07-01: :class:`ServiceEndpointPoliciesOperations<azure.mgmt.network.v2018_07_01.operations.ServiceEndpointPoliciesOperations>` * 2018-08-01: :class:`ServiceEndpointPoliciesOperations<azure.mgmt.network.v2018_08_01.operations.ServiceEndpointPoliciesOperations>`
[ "Instance", "depends", "on", "the", "API", "version", ":" ]
python
train
Garee/pytodoist
pytodoist/todoist.py
https://github.com/Garee/pytodoist/blob/3359cbff485ebdbbb4ffbd58d71e21a817874dd7/pytodoist/todoist.py#L1138-L1151
def get_notes(self): """Return a list of all of the project's notes. :return: A list of notes. :rtype: list of :class:`pytodoist.todoist.Note` >>> from pytodoist import todoist >>> user = todoist.login('john.doe@gmail.com', 'password') >>> project = user.get_project('PyTodoist') >>> notes = project.get_notes() """ self.owner.sync() notes = self.owner.notes.values() return [n for n in notes if n.project_id == self.id]
[ "def", "get_notes", "(", "self", ")", ":", "self", ".", "owner", ".", "sync", "(", ")", "notes", "=", "self", ".", "owner", ".", "notes", ".", "values", "(", ")", "return", "[", "n", "for", "n", "in", "notes", "if", "n", ".", "project_id", "==", ...
Return a list of all of the project's notes. :return: A list of notes. :rtype: list of :class:`pytodoist.todoist.Note` >>> from pytodoist import todoist >>> user = todoist.login('john.doe@gmail.com', 'password') >>> project = user.get_project('PyTodoist') >>> notes = project.get_notes()
[ "Return", "a", "list", "of", "all", "of", "the", "project", "s", "notes", "." ]
python
train
wdbm/megaparsex
megaparsex.py
https://github.com/wdbm/megaparsex/blob/59da05410aa1cf8682dcee2bf0bd0572fa42bd29/megaparsex.py#L93-L230
def parse( text = None, humour = 75 ): """ Parse input text using various triggers, some returning text and some for engaging functions. If triggered, a trigger returns text or True if and if not triggered, returns False. If no triggers are triggered, return False, if one trigger is triggered, return the value returned by that trigger, and if multiple triggers are triggered, return a list of the values returned by those triggers. Options such as humour engage or disengage various triggers. """ triggers = [] # general if humour >= 75: triggers.extend([ trigger_keyphrases( text = text, keyphrases = [ "image" ], response = "http://i.imgur.com/MiqrlTh.jpg" ), trigger_keyphrases( text = text, keyphrases = [ "sup", "hi" ], response = "sup home bean" ), trigger_keyphrases( text = text, keyphrases = [ "thanks", "thank you" ], response = "you're welcome, boo ;)" ) ]) # information triggers.extend([ trigger_keyphrases( text = text, keyphrases = [ "where are you", "IP", "I.P.", "IP address", "I.P. address", "ip address" ], function = report_IP ), trigger_keyphrases( text = text, keyphrases = [ "how are you", "are you well", "status" ], function = report_system_status, kwargs = {"humour": humour} ), trigger_keyphrases( text = text, keyphrases = [ "heartbeat" ], function = heartbeat_message ), trigger_keyphrases( text = text, keyphrases = [ "METAR" ], function = report_METAR, kwargs = {"text": text} ), trigger_keyphrases( text = text, keyphrases = [ "TAF" ], response = report_TAF, kwargs = {"text": text} ), trigger_keyphrases( text = text, keyphrases = [ "rain" ], response = report_rain_times, kwargs = {"text": text} ) ]) # actions triggers.extend([ trigger_keyphrases( text = text, keyphrases = [ "command", "run command", "engage command", "execute command" ], response = command() ), trigger_keyphrases( text = text, keyphrases = [ "restart" ], function = restart, confirm = True, confirmation_prompt = "Do you want to restart this " "program? (y/n)", confirmation_feedback_confirm = "confirm restart", confirmation_feedback_deny = "deny restart" ) ]) if any(triggers): responses = [response for response in triggers if response] if len(responses) > 1: return responses else: return responses[0] else: return False
[ "def", "parse", "(", "text", "=", "None", ",", "humour", "=", "75", ")", ":", "triggers", "=", "[", "]", "# general", "if", "humour", ">=", "75", ":", "triggers", ".", "extend", "(", "[", "trigger_keyphrases", "(", "text", "=", "text", ",", "keyphras...
Parse input text using various triggers, some returning text and some for engaging functions. If triggered, a trigger returns text or True if and if not triggered, returns False. If no triggers are triggered, return False, if one trigger is triggered, return the value returned by that trigger, and if multiple triggers are triggered, return a list of the values returned by those triggers. Options such as humour engage or disengage various triggers.
[ "Parse", "input", "text", "using", "various", "triggers", "some", "returning", "text", "and", "some", "for", "engaging", "functions", ".", "If", "triggered", "a", "trigger", "returns", "text", "or", "True", "if", "and", "if", "not", "triggered", "returns", "...
python
train
kallimachos/sphinxmark
sphinxmark/__init__.py
https://github.com/kallimachos/sphinxmark/blob/f7b17d9dabf1fff448bb38d90474498f0d203990/sphinxmark/__init__.py#L152-L159
def watermark(app, env): """Add watermark.""" if app.config.sphinxmark_enable is True: LOG.info('adding watermark...', nonl=True) buildpath, imagefile = getimage(app) cssname = buildcss(app, buildpath, imagefile) app.add_css_file(cssname) LOG.info(' done')
[ "def", "watermark", "(", "app", ",", "env", ")", ":", "if", "app", ".", "config", ".", "sphinxmark_enable", "is", "True", ":", "LOG", ".", "info", "(", "'adding watermark...'", ",", "nonl", "=", "True", ")", "buildpath", ",", "imagefile", "=", "getimage"...
Add watermark.
[ "Add", "watermark", "." ]
python
train
cloud-custodian/cloud-custodian
c7n/filters/offhours.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/filters/offhours.py#L398-L438
def process_resource_schedule(self, i, value, time_type): """Does the resource tag schedule and policy match the current time.""" rid = i[self.id_key] # this is to normalize trailing semicolons which when done allows # dateutil.parser.parse to process: value='off=(m-f,1);' properly. # before this normalization, some cases would silently fail. value = ';'.join(filter(None, value.split(';'))) if self.parser.has_resource_schedule(value, time_type): schedule = self.parser.parse(value) elif self.parser.keys_are_valid(value): # respect timezone from tag raw_data = self.parser.raw_data(value) if 'tz' in raw_data: schedule = dict(self.default_schedule) schedule['tz'] = raw_data['tz'] else: schedule = self.default_schedule else: schedule = None if schedule is None: log.warning( "Invalid schedule on resource:%s value:%s", rid, value) self.parse_errors.append((rid, value)) return False tz = self.get_tz(schedule['tz']) if not tz: log.warning( "Could not resolve tz on resource:%s value:%s", rid, value) self.parse_errors.append((rid, value)) return False now = datetime.datetime.now(tz).replace( minute=0, second=0, microsecond=0) now_str = now.strftime("%Y-%m-%d") if 'skip-days-from' in self.data: values = ValuesFrom(self.data['skip-days-from'], self.manager) self.skip_days = values.get_values() else: self.skip_days = self.data.get('skip-days', []) if now_str in self.skip_days: return False return self.match(now, schedule)
[ "def", "process_resource_schedule", "(", "self", ",", "i", ",", "value", ",", "time_type", ")", ":", "rid", "=", "i", "[", "self", ".", "id_key", "]", "# this is to normalize trailing semicolons which when done allows", "# dateutil.parser.parse to process: value='off=(m-f,1...
Does the resource tag schedule and policy match the current time.
[ "Does", "the", "resource", "tag", "schedule", "and", "policy", "match", "the", "current", "time", "." ]
python
train
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/tree.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/tree.py#L26-L40
def get_position(self, rst_tree, node_id=None): """Get the linear position of an element of this DGParentedTree in an RSTTree. If ``node_id`` is given, this will return the position of the subtree with that node ID. Otherwise, the position of the root of this DGParentedTree in the given RSTTree is returned. """ if node_id is None: node_id = self.root_id if node_id in rst_tree.edu_set: return rst_tree.edus.index(node_id) return min(self.get_position(rst_tree, child_node_id) for child_node_id in rst_tree.child_dict[node_id])
[ "def", "get_position", "(", "self", ",", "rst_tree", ",", "node_id", "=", "None", ")", ":", "if", "node_id", "is", "None", ":", "node_id", "=", "self", ".", "root_id", "if", "node_id", "in", "rst_tree", ".", "edu_set", ":", "return", "rst_tree", ".", "...
Get the linear position of an element of this DGParentedTree in an RSTTree. If ``node_id`` is given, this will return the position of the subtree with that node ID. Otherwise, the position of the root of this DGParentedTree in the given RSTTree is returned.
[ "Get", "the", "linear", "position", "of", "an", "element", "of", "this", "DGParentedTree", "in", "an", "RSTTree", "." ]
python
train
hellosign/hellosign-python-sdk
hellosign_sdk/hsclient.py
https://github.com/hellosign/hellosign-python-sdk/blob/4325a29ad5766380a214eac3914511f62f7ecba4/hellosign_sdk/hsclient.py#L173-L206
def create_account(self, email_address, password=None, client_id=None, client_secret=None): ''' Create a new account. If the account is created via an app, then Account.oauth will contain the OAuth data that can be used to execute actions on behalf of the newly created account. Args: email_address (str): Email address of the new account to create password (str): [DEPRECATED] This parameter will be ignored client_id (str, optional): Client id of the app to use to create this account client_secret (str, optional): Secret of the app to use to create this account Returns: The new Account object ''' request = self._get_request() params = { 'email_address': email_address } if client_id: params['client_id'] = client_id params['client_secret'] = client_secret response = request.post(self.ACCOUNT_CREATE_URL, params) if 'oauth_data' in response: response["account"]["oauth"] = response['oauth_data'] return response
[ "def", "create_account", "(", "self", ",", "email_address", ",", "password", "=", "None", ",", "client_id", "=", "None", ",", "client_secret", "=", "None", ")", ":", "request", "=", "self", ".", "_get_request", "(", ")", "params", "=", "{", "'email_address...
Create a new account. If the account is created via an app, then Account.oauth will contain the OAuth data that can be used to execute actions on behalf of the newly created account. Args: email_address (str): Email address of the new account to create password (str): [DEPRECATED] This parameter will be ignored client_id (str, optional): Client id of the app to use to create this account client_secret (str, optional): Secret of the app to use to create this account Returns: The new Account object
[ "Create", "a", "new", "account", "." ]
python
train
jayferg/faderport
faderport.py
https://github.com/jayferg/faderport/blob/53152797f3dedd0fa56d66068313f5484e469a68/faderport.py#L354-L367
def find_faderport_input_name(number=0): """ Find the MIDI input name for a connected FaderPort. NOTE! Untested for more than one FaderPort attached. :param number: 0 unless you've got more than one FaderPort attached. In which case 0 is the first, 1 is the second etc :return: Port name or None """ ins = [i for i in mido.get_input_names() if i.lower().startswith('faderport')] if 0 <= number < len(ins): return ins[number] else: return None
[ "def", "find_faderport_input_name", "(", "number", "=", "0", ")", ":", "ins", "=", "[", "i", "for", "i", "in", "mido", ".", "get_input_names", "(", ")", "if", "i", ".", "lower", "(", ")", ".", "startswith", "(", "'faderport'", ")", "]", "if", "0", ...
Find the MIDI input name for a connected FaderPort. NOTE! Untested for more than one FaderPort attached. :param number: 0 unless you've got more than one FaderPort attached. In which case 0 is the first, 1 is the second etc :return: Port name or None
[ "Find", "the", "MIDI", "input", "name", "for", "a", "connected", "FaderPort", "." ]
python
train
spulec/moto
moto/packages/httpretty/http.py
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/packages/httpretty/http.py#L144-L154
def last_requestline(sent_data): """ Find the last line in sent_data that can be parsed with parse_requestline """ for line in reversed(sent_data): try: parse_requestline(decode_utf8(line)) except ValueError: pass else: return line
[ "def", "last_requestline", "(", "sent_data", ")", ":", "for", "line", "in", "reversed", "(", "sent_data", ")", ":", "try", ":", "parse_requestline", "(", "decode_utf8", "(", "line", ")", ")", "except", "ValueError", ":", "pass", "else", ":", "return", "lin...
Find the last line in sent_data that can be parsed with parse_requestline
[ "Find", "the", "last", "line", "in", "sent_data", "that", "can", "be", "parsed", "with", "parse_requestline" ]
python
train
bykof/billomapy
billomapy/billomapy.py
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L637-L651
def get_tags_of_supplier_per_page(self, supplier_id, per_page=1000, page=1): """ Get tags of suppliers per page :param supplier_id: the supplier id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list """ return self._get_resource_per_page( resource=SUPPLIER_TAGS, per_page=per_page, page=page, params={'supplier_id': supplier_id}, )
[ "def", "get_tags_of_supplier_per_page", "(", "self", ",", "supplier_id", ",", "per_page", "=", "1000", ",", "page", "=", "1", ")", ":", "return", "self", ".", "_get_resource_per_page", "(", "resource", "=", "SUPPLIER_TAGS", ",", "per_page", "=", "per_page", ",...
Get tags of suppliers per page :param supplier_id: the supplier id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list
[ "Get", "tags", "of", "suppliers", "per", "page" ]
python
train
crankycoder/hydra
src/hydra.py
https://github.com/crankycoder/hydra/blob/3be536bd0c6716d4efcfde3e132582e6066bae43/src/hydra.py#L4-L18
def ReadingBloomFilter(filename, want_lock=False): """ Create a read-only bloom filter with an upperbound of (num_elements, max_fp_prob) as a specification and using filename as the backing datastore. """ with open('{}.desc'.format(filename), 'r') as descriptor: num_elements = int(descriptor.readline()) max_fp_prob = float(descriptor.readline()) ignore_case = int(descriptor.readline()) return _hydra.BloomFilter.getFilter( num_elements, max_fp_prob, filename=filename, ignore_case=ignore_case, read_only=True, want_lock=want_lock)
[ "def", "ReadingBloomFilter", "(", "filename", ",", "want_lock", "=", "False", ")", ":", "with", "open", "(", "'{}.desc'", ".", "format", "(", "filename", ")", ",", "'r'", ")", "as", "descriptor", ":", "num_elements", "=", "int", "(", "descriptor", ".", "...
Create a read-only bloom filter with an upperbound of (num_elements, max_fp_prob) as a specification and using filename as the backing datastore.
[ "Create", "a", "read", "-", "only", "bloom", "filter", "with", "an", "upperbound", "of", "(", "num_elements", "max_fp_prob", ")", "as", "a", "specification", "and", "using", "filename", "as", "the", "backing", "datastore", "." ]
python
test
fracpete/python-weka-wrapper3
python/weka/core/capabilities.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/core/capabilities.py#L113-L124
def owner(self): """ Returns the owner of these capabilities, if any. :return: the owner, can be None :rtype: JavaObject """ obj = javabridge.call(self.jobject, "getOwner", "()Lweka/core/CapabilitiesHandler;") if obj is None: return None else: return JavaObject(jobject=obj)
[ "def", "owner", "(", "self", ")", ":", "obj", "=", "javabridge", ".", "call", "(", "self", ".", "jobject", ",", "\"getOwner\"", ",", "\"()Lweka/core/CapabilitiesHandler;\"", ")", "if", "obj", "is", "None", ":", "return", "None", "else", ":", "return", "Jav...
Returns the owner of these capabilities, if any. :return: the owner, can be None :rtype: JavaObject
[ "Returns", "the", "owner", "of", "these", "capabilities", "if", "any", "." ]
python
train
digi604/django-smart-selects
smart_selects/widgets.py
https://github.com/digi604/django-smart-selects/blob/05dcc4a3de2874499ff3b9a3dfac5c623206e3e5/smart_selects/widgets.py#L157-L183
def _get_available_choices(self, queryset, value): """ get possible choices for selection """ item = queryset.filter(pk=value).first() if item: try: pk = getattr(item, self.chained_model_field + "_id") filter = {self.chained_model_field: pk} except AttributeError: try: # maybe m2m? pks = getattr(item, self.chained_model_field).all().values_list('pk', flat=True) filter = {self.chained_model_field + "__in": pks} except AttributeError: try: # maybe a set? pks = getattr(item, self.chained_model_field + "_set").all().values_list('pk', flat=True) filter = {self.chained_model_field + "__in": pks} except AttributeError: # give up filter = {} filtered = list(get_model(self.to_app_name, self.to_model_name).objects.filter(**filter).distinct()) if self.sort: sort_results(filtered) else: # invalid value for queryset filtered = [] return filtered
[ "def", "_get_available_choices", "(", "self", ",", "queryset", ",", "value", ")", ":", "item", "=", "queryset", ".", "filter", "(", "pk", "=", "value", ")", ".", "first", "(", ")", "if", "item", ":", "try", ":", "pk", "=", "getattr", "(", "item", "...
get possible choices for selection
[ "get", "possible", "choices", "for", "selection" ]
python
valid
mwgielen/jackal
jackal/core.py
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/core.py#L495-L519
def find_object(self, username, secret, domain=None, host_ip=None, service_id=None): """ Searches elasticsearch for objects with the same username, password, optional domain, host_ip and service_id. """ # Not sure yet if this is advisable... Older passwords can be overwritten... search = Credential.search() search = search.filter("term", username=username) search = search.filter("term", secret=secret) if domain: search = search.filter("term", domain=domain) else: search = search.exclude("exists", field="domain") if host_ip: search = search.filter("term", host_ip=host_ip) else: search = search.exclude("exists", field="host_ip") if service_id: search = search.filter("term", service_id=service_id) else: search = search.exclude("exists", field="service_id") if search.count(): result = search[0].execute()[0] return result else: return None
[ "def", "find_object", "(", "self", ",", "username", ",", "secret", ",", "domain", "=", "None", ",", "host_ip", "=", "None", ",", "service_id", "=", "None", ")", ":", "# Not sure yet if this is advisable... Older passwords can be overwritten...", "search", "=", "Cred...
Searches elasticsearch for objects with the same username, password, optional domain, host_ip and service_id.
[ "Searches", "elasticsearch", "for", "objects", "with", "the", "same", "username", "password", "optional", "domain", "host_ip", "and", "service_id", "." ]
python
valid
agoragames/haigha
haigha/connection.py
https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connection.py#L534-L560
def dispatch(self, frame): ''' Override the default dispatch since we don't need the rest of the stack. ''' if frame.type() == HeartbeatFrame.type(): self.send_heartbeat() elif frame.type() == MethodFrame.type(): if frame.class_id == 10: cb = self._method_map.get(frame.method_id) if cb: method = self.clear_synchronous_cb(cb) method(frame) else: raise Channel.InvalidMethod( "unsupported method %d on channel %d", frame.method_id, self.channel_id) else: raise Channel.InvalidClass( "class %d is not supported on channel %d", frame.class_id, self.channel_id) else: raise Frame.InvalidFrameType( "frame type %d is not supported on channel %d", frame.type(), self.channel_id)
[ "def", "dispatch", "(", "self", ",", "frame", ")", ":", "if", "frame", ".", "type", "(", ")", "==", "HeartbeatFrame", ".", "type", "(", ")", ":", "self", ".", "send_heartbeat", "(", ")", "elif", "frame", ".", "type", "(", ")", "==", "MethodFrame", ...
Override the default dispatch since we don't need the rest of the stack.
[ "Override", "the", "default", "dispatch", "since", "we", "don", "t", "need", "the", "rest", "of", "the", "stack", "." ]
python
train
bmcfee/pumpp
pumpp/task/regression.py
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/regression.py#L64-L92
def transform_annotation(self, ann, duration): '''Apply the vector transformation. Parameters ---------- ann : jams.Annotation The input annotation duration : number > 0 The duration of the track Returns ------- data : dict data['vector'] : np.ndarray, shape=(dimension,) Raises ------ DataError If the input dimension does not match ''' _, values = ann.to_interval_values() vector = np.asarray(values[0], dtype=self.dtype) if len(vector) != self.dimension: raise DataError('vector dimension({:0}) ' '!= self.dimension({:1})' .format(len(vector), self.dimension)) return {'vector': vector}
[ "def", "transform_annotation", "(", "self", ",", "ann", ",", "duration", ")", ":", "_", ",", "values", "=", "ann", ".", "to_interval_values", "(", ")", "vector", "=", "np", ".", "asarray", "(", "values", "[", "0", "]", ",", "dtype", "=", "self", ".",...
Apply the vector transformation. Parameters ---------- ann : jams.Annotation The input annotation duration : number > 0 The duration of the track Returns ------- data : dict data['vector'] : np.ndarray, shape=(dimension,) Raises ------ DataError If the input dimension does not match
[ "Apply", "the", "vector", "transformation", "." ]
python
train
Esri/ArcREST
src/arcrest/manageags/_system.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageags/_system.py#L604-L629
def recover(self,runAsync=False): """ If the shared configuration store for a site is unavailable, a site in read-only mode will operate in a degraded capacity that allows access to the ArcGIS Server Administrator Directory. You can recover a site if the shared configuration store is permanently lost. The site must be in read-only mode, and the site configuration files must have been copied to the local repository when switching site modes. The recover operation will copy the configuration store from the local repository into the shared configuration store location. The copied local repository will be from the machine in the site where the recover operation is performed. Inputs: runAsync - default False - Decides if this operation must run asynchronously. """ url = self._url + "/recover" params = { "f" : "json", "runAsync" : runAsync } return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
[ "def", "recover", "(", "self", ",", "runAsync", "=", "False", ")", ":", "url", "=", "self", ".", "_url", "+", "\"/recover\"", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"runAsync\"", ":", "runAsync", "}", "return", "self", ".", "_post", "(", ...
If the shared configuration store for a site is unavailable, a site in read-only mode will operate in a degraded capacity that allows access to the ArcGIS Server Administrator Directory. You can recover a site if the shared configuration store is permanently lost. The site must be in read-only mode, and the site configuration files must have been copied to the local repository when switching site modes. The recover operation will copy the configuration store from the local repository into the shared configuration store location. The copied local repository will be from the machine in the site where the recover operation is performed. Inputs: runAsync - default False - Decides if this operation must run asynchronously.
[ "If", "the", "shared", "configuration", "store", "for", "a", "site", "is", "unavailable", "a", "site", "in", "read", "-", "only", "mode", "will", "operate", "in", "a", "degraded", "capacity", "that", "allows", "access", "to", "the", "ArcGIS", "Server", "Ad...
python
train
brocade/pynos
pynos/versions/base/interface.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/interface.py#L3321-L3367
def ve_interfaces(self, **kwargs): """list[dict]: A list of dictionary items describing the operational state of ve interfaces along with the ip address associations. Args: rbridge_id (str): rbridge-id for device. callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: None Examples: >>> import pynos.device >>> conn = ('10.24.39.211', '22') >>> auth = ('admin', 'password') >>> with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.interface.ve_interfaces() ... output = dev.interface.ve_interfaces(rbridge_id='1') """ urn = "{urn:brocade.com:mgmt:brocade-interface-ext}" rbridge_id = kwargs.pop('rbridge_id', None) ip_result = [] request_interface = self._get_intf_rb_id(rbridge_id=rbridge_id) interface_result = self._callback(request_interface, 'get') for interface in interface_result.findall('%sinterface' % urn): int_type = interface.find('%sinterface-type' % urn).text int_name = interface.find('%sinterface-name' % urn).text int_state = interface.find('%sif-state' % urn).text int_proto_state = interface.find('%sline-protocol-state' % urn).text ip_address = interface.find('.//%sipv4' % urn).text if_name = interface.find('%sif-name' % urn).text results = {'interface-type': int_type, 'interface-name': int_name, 'if-name': if_name, 'interface-state': int_state, 'interface-proto-state': int_proto_state, 'ip-address': ip_address} ip_result.append(results) return ip_result
[ "def", "ve_interfaces", "(", "self", ",", "*", "*", "kwargs", ")", ":", "urn", "=", "\"{urn:brocade.com:mgmt:brocade-interface-ext}\"", "rbridge_id", "=", "kwargs", ".", "pop", "(", "'rbridge_id'", ",", "None", ")", "ip_result", "=", "[", "]", "request_interface...
list[dict]: A list of dictionary items describing the operational state of ve interfaces along with the ip address associations. Args: rbridge_id (str): rbridge-id for device. callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: None Examples: >>> import pynos.device >>> conn = ('10.24.39.211', '22') >>> auth = ('admin', 'password') >>> with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.interface.ve_interfaces() ... output = dev.interface.ve_interfaces(rbridge_id='1')
[ "list", "[", "dict", "]", ":", "A", "list", "of", "dictionary", "items", "describing", "the", "operational", "state", "of", "ve", "interfaces", "along", "with", "the", "ip", "address", "associations", "." ]
python
train
pylast/pylast
src/pylast/__init__.py
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L2423-L2428
def is_subscriber(self): """Returns whether the user is a subscriber or not. True or False.""" doc = self._request(self.ws_prefix + ".getInfo", True) return _extract(doc, "subscriber") == "1"
[ "def", "is_subscriber", "(", "self", ")", ":", "doc", "=", "self", ".", "_request", "(", "self", ".", "ws_prefix", "+", "\".getInfo\"", ",", "True", ")", "return", "_extract", "(", "doc", ",", "\"subscriber\"", ")", "==", "\"1\"" ]
Returns whether the user is a subscriber or not. True or False.
[ "Returns", "whether", "the", "user", "is", "a", "subscriber", "or", "not", ".", "True", "or", "False", "." ]
python
train
specialunderwear/django-easymode
easymode/management/commands/easy_copy_language.py
https://github.com/specialunderwear/django-easymode/blob/92f674b91fb8c54d6e379e2664e2000872d9c95e/easymode/management/commands/easy_copy_language.py#L24-L63
def handle(self, source, target, app=None, **options): """ command execution """ translation.activate(settings.LANGUAGE_CODE) if app: unpack = app.split('.') if len(unpack) == 2: models = [get_model(unpack[0], unpack[1])] elif len(unpack) == 1: models = get_models(get_app(unpack[0])) else: models = get_models() for model in models: if hasattr(model, 'localized_fields'): model_full_name = '%s.%s' % (model._meta.app_label, model._meta.module_name) update_instances = set() messages = [] for instance in model.objects.all(): for field in model.localized_fields: source_field = get_real_fieldname(field, source) target_field = get_real_fieldname(field, target) if hasattr(instance, source_field) and hasattr(instance, target_field): source_field_value = getattr(instance, source_field) target_field_value = getattr(instance, target_field) if target_field_value in (None, u'')\ and source_field_value not in (None, u''): setattr(instance, target_field, force_unicode(source_field_value)) update_instances.add(instance) messages.append(u"%s %s %s will become %s" % (model_full_name, instance, target_field, force_unicode(source_field_value))) if len(update_instances): if self.ask_for_confirmation(messages, u'%s.%s' % (model._meta.app_label, model._meta.module_name)): for update_instance in update_instances: print u"saving %s" % update_instance update_instance.save()
[ "def", "handle", "(", "self", ",", "source", ",", "target", ",", "app", "=", "None", ",", "*", "*", "options", ")", ":", "translation", ".", "activate", "(", "settings", ".", "LANGUAGE_CODE", ")", "if", "app", ":", "unpack", "=", "app", ".", "split",...
command execution
[ "command", "execution" ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/visuals/transforms/_util.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/visuals/transforms/_util.py#L47-L82
def as_vec4(obj, default=(0, 0, 0, 1)): """ Convert `obj` to 4-element vector (numpy array with shape[-1] == 4) Parameters ---------- obj : array-like Original object. default : array-like The defaults to use if the object does not have 4 entries. Returns ------- obj : array-like The object promoted to have 4 elements. Notes ----- `obj` will have at least two dimensions. If `obj` has < 4 elements, then new elements are added from `default`. For inputs intended as a position or translation, use default=(0,0,0,1). For inputs intended as scale factors, use default=(1,1,1,1). """ obj = np.atleast_2d(obj) # For multiple vectors, reshape to (..., 4) if obj.shape[-1] < 4: new = np.empty(obj.shape[:-1] + (4,), dtype=obj.dtype) new[:] = default new[..., :obj.shape[-1]] = obj obj = new elif obj.shape[-1] > 4: raise TypeError("Array shape %s cannot be converted to vec4" % obj.shape) return obj
[ "def", "as_vec4", "(", "obj", ",", "default", "=", "(", "0", ",", "0", ",", "0", ",", "1", ")", ")", ":", "obj", "=", "np", ".", "atleast_2d", "(", "obj", ")", "# For multiple vectors, reshape to (..., 4)", "if", "obj", ".", "shape", "[", "-", "1", ...
Convert `obj` to 4-element vector (numpy array with shape[-1] == 4) Parameters ---------- obj : array-like Original object. default : array-like The defaults to use if the object does not have 4 entries. Returns ------- obj : array-like The object promoted to have 4 elements. Notes ----- `obj` will have at least two dimensions. If `obj` has < 4 elements, then new elements are added from `default`. For inputs intended as a position or translation, use default=(0,0,0,1). For inputs intended as scale factors, use default=(1,1,1,1).
[ "Convert", "obj", "to", "4", "-", "element", "vector", "(", "numpy", "array", "with", "shape", "[", "-", "1", "]", "==", "4", ")" ]
python
train
bpython/curtsies
curtsies/formatstring.py
https://github.com/bpython/curtsies/blob/223e42b97fbf6c86b479ed4f0963a067333c5a63/curtsies/formatstring.py#L395-L408
def ljust(self, width, fillchar=None): """S.ljust(width[, fillchar]) -> string If a fillchar is provided, less formatting information will be preserved """ if fillchar is not None: return fmtstr(self.s.ljust(width, fillchar), **self.shared_atts) to_add = ' ' * (width - len(self.s)) shared = self.shared_atts if 'bg' in shared: return self + fmtstr(to_add, bg=shared[str('bg')]) if to_add else self else: uniform = self.new_with_atts_removed('bg') return uniform + fmtstr(to_add, **self.shared_atts) if to_add else uniform
[ "def", "ljust", "(", "self", ",", "width", ",", "fillchar", "=", "None", ")", ":", "if", "fillchar", "is", "not", "None", ":", "return", "fmtstr", "(", "self", ".", "s", ".", "ljust", "(", "width", ",", "fillchar", ")", ",", "*", "*", "self", "."...
S.ljust(width[, fillchar]) -> string If a fillchar is provided, less formatting information will be preserved
[ "S", ".", "ljust", "(", "width", "[", "fillchar", "]", ")", "-", ">", "string" ]
python
train
ClimateImpactLab/DataFS
datafs/core/data_api.py
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/core/data_api.py#L304-L361
def listdir(self, location, authority_name=None): ''' List archive path components at a given location .. Note :: When using listdir on versioned archives, listdir will provide the version numbers when a full archive path is supplied as the location argument. This is because DataFS stores the archive path as a directory and the versions as the actual files when versioning is on. Parameters ---------- location: str Path of the "directory" to search `location` can be a path relative to the authority root (e.g `/MyFiles/Data`) or can include authority as a protocol (e.g. `my_auth://MyFiles/Data`). If the authority is specified as a protocol, the `authority_name` argument is ignored. authority_name: str Name of the authority to search (optional) If no authority is specified, the default authority is used (if only one authority is attached or if :py:attr:`DefaultAuthorityName` is assigned). Returns ------- list Archive path components that exist at the given "directory" location on the specified authority Raises ------ ValueError A ValueError is raised if the authority is ambiguous or invalid ''' authority_name, location = self._normalize_archive_name( location, authority_name=authority_name) if authority_name is None: authority_name = self.default_authority_name return self._authorities[authority_name].fs.listdir(location)
[ "def", "listdir", "(", "self", ",", "location", ",", "authority_name", "=", "None", ")", ":", "authority_name", ",", "location", "=", "self", ".", "_normalize_archive_name", "(", "location", ",", "authority_name", "=", "authority_name", ")", "if", "authority_nam...
List archive path components at a given location .. Note :: When using listdir on versioned archives, listdir will provide the version numbers when a full archive path is supplied as the location argument. This is because DataFS stores the archive path as a directory and the versions as the actual files when versioning is on. Parameters ---------- location: str Path of the "directory" to search `location` can be a path relative to the authority root (e.g `/MyFiles/Data`) or can include authority as a protocol (e.g. `my_auth://MyFiles/Data`). If the authority is specified as a protocol, the `authority_name` argument is ignored. authority_name: str Name of the authority to search (optional) If no authority is specified, the default authority is used (if only one authority is attached or if :py:attr:`DefaultAuthorityName` is assigned). Returns ------- list Archive path components that exist at the given "directory" location on the specified authority Raises ------ ValueError A ValueError is raised if the authority is ambiguous or invalid
[ "List", "archive", "path", "components", "at", "a", "given", "location" ]
python
train
networks-lab/tidyextractors
tidyextractors/base_extractor.py
https://github.com/networks-lab/tidyextractors/blob/658448ed533beecf32adcc188fc64d1068d15ca6/tidyextractors/base_extractor.py#L136-L317
def expand_on(self, col1, col2, rename1 = None, rename2 = None, drop = [], drop_collections = False): """ Returns a reshaped version of extractor's data, where unique combinations of values from col1 and col2 are given individual rows. Example function call from ``tidymbox``: .. code-block:: python self.expand_on('From', 'To', ['MessageID', 'Recipient'], rename1='From', rename2='Recipient') Columns to be expanded upon should be either atomic values or dictionaries of dictionaries. For example: Input Data: +-----------------+-------------------------------------------------------------------+ | col1 (Atomic) | col2 (Dict of Dict) | +=================+===================================================================+ | value1 | {valueA : {attr1: X1, attr2: Y1}, valueB: {attr1: X2, attr2: Y2} | +-----------------+-------------------------------------------------------------------+ | value2 | {valueC : {attr1: X3, attr2: Y3}, valueD: {attr1: X4, attr2: Y4} | +-----------------+-------------------------------------------------------------------+ Output Data: +---------------+---------------+-------+-------+ | col1_extended | col2_extended | attr1 | attr2 | +===============+===============+=======+=======+ | value1 | valueA | X1 | Y1 | +---------------+---------------+-------+-------+ | value1 | valueB | X2 | Y2 | +---------------+---------------+-------+-------+ | value2 | valueA | X3 | Y3 | +---------------+---------------+-------+-------+ | value2 | valueB | X4 | Y4 | +---------------+---------------+-------+-------+ :param str col1: The first column to expand on. May be an atomic value, or a dict of dict. :param str col2: The second column to expand on. May be an atomic value, or a dict of dict. :param str rename1: The name for col1 after expansion. Defaults to col1_extended. :param str rename2: The name for col2 after expansion. Defaults to col2_extended. :param list drop: Column names to be dropped from output. :param bool drop_collections: Should columns with compound values be dropped? :return: pandas.DataFrame """ # Assumption 1: Expanded columns are either atomic are built in collections # Assumption 2: New test_data columns added to rows from dicts in columns of collections. # How many rows expected in the output? count = len(self._data) # How often should the progress bar be updated? update_interval = max(min(count//100, 100), 5) # What are the column names? column_list = list(self._data.columns) # Determine column index (for itertuples) try: col1_index = column_list.index(col1) except ValueError: warnings.warn('Could not find "{}" in columns.'.format(col1)) raise try: col2_index = column_list.index(col2) except ValueError: warnings.warn('Could not find "{}" in columns.'.format(col2)) raise # Standardize the order of the specified columns first_index = min(col1_index, col2_index) second_index = max(col1_index, col2_index) first_name = column_list[first_index] second_name = column_list[second_index] first_rename = rename1 if first_index == col1_index else rename2 second_rename = rename2 if first_index == col1_index else rename1 # New column names: new_column_list = column_list[:first_index] + \ [first_name+'_extended' if first_rename is None else first_rename] + \ column_list[first_index+1:second_index] + \ [second_name+'_extended' if second_rename is None else second_rename] + \ column_list[second_index+1:] # Assert that there are no duplicates! if len(set(new_column_list)) != len(new_column_list): raise Exception('Duplicate columns names found. Note that you cannot rename a column with a name ' 'that is already taken by another column.') # List of tuples. Rows in new test_data frame. old_attr_df_tuples = [] new_attr_df_dicts = [] # MultiIndex tuples index_tuples = [] def iter_product(item1,item2): """ Enumerates possible combinations of items from item1 and item 2. Allows atomic values. :param item1: Any :param item2: Any :return: A list of tuples. """ if hasattr(item1, '__iter__') and type(item1) != str: iter1 = item1 else: iter1 = [item1] if hasattr(item2, '__iter__') and type(item2) != str: iter2 = item2 else: iter2 = [item2] return it.product(iter1,iter2) # Create test_data for output. with tqdm.tqdm(total=count) as pbar: for row in self._data.itertuples(index=False): # Enumerate commit/file pairs for index in iter_product(row[first_index],row[second_index]): new_row = row[:first_index] + \ (index[0],) + \ row[first_index+1:second_index] + \ (index[1],) + \ row[second_index+1:] # Add new row to list of row tuples old_attr_df_tuples.append(new_row) # Add key tuple to list of indices index_tuples.append((index[0],index[1])) # If there's test_data in either of the columns add the test_data to the new attr test_data frame. temp_attrs = {} # Get a copy of the first cell value for this index. # If it's a dict, get the appropriate entry. temp_first = row[first_index] if type(temp_first) == dict: temp_first = temp_first[index[0]] temp_second = row[second_index] if type(temp_second) == dict: temp_second = temp_second[index[1]] # Get nested test_data for this index. if type(temp_first) == dict: for k in temp_first: temp_attrs[first_name + '/' + k] = temp_first[k] if type(temp_second) == dict: for k in temp_second: temp_attrs[second_name + '/' + k] = temp_second[k] # Add to the "new test_data" records. new_attr_df_dicts.append(temp_attrs) # Update progress bar pbar.update(update_interval) # An expanded test_data frame with only the columns of the original test_data frame df_1 = pd.DataFrame.from_records(old_attr_df_tuples, columns=new_column_list) # An expanded test_data frame containing any test_data held in value:key collections in the expanded cols df_2 = pd.DataFrame.from_records(new_attr_df_dicts) # The final expanded test_data set df_out = pd.concat([df_1, df_2], axis=1) # Set new index # index_cols has been depracated # df_out = df_out.set_index(index_cols) # Drop unwanted columns for col in drop: if col in df_out.columns: df_out = df_out.drop(col,1) if drop_collections is True: df_out = self._drop_collections(df_out) return df_out
[ "def", "expand_on", "(", "self", ",", "col1", ",", "col2", ",", "rename1", "=", "None", ",", "rename2", "=", "None", ",", "drop", "=", "[", "]", ",", "drop_collections", "=", "False", ")", ":", "# Assumption 1: Expanded columns are either atomic are built in col...
Returns a reshaped version of extractor's data, where unique combinations of values from col1 and col2 are given individual rows. Example function call from ``tidymbox``: .. code-block:: python self.expand_on('From', 'To', ['MessageID', 'Recipient'], rename1='From', rename2='Recipient') Columns to be expanded upon should be either atomic values or dictionaries of dictionaries. For example: Input Data: +-----------------+-------------------------------------------------------------------+ | col1 (Atomic) | col2 (Dict of Dict) | +=================+===================================================================+ | value1 | {valueA : {attr1: X1, attr2: Y1}, valueB: {attr1: X2, attr2: Y2} | +-----------------+-------------------------------------------------------------------+ | value2 | {valueC : {attr1: X3, attr2: Y3}, valueD: {attr1: X4, attr2: Y4} | +-----------------+-------------------------------------------------------------------+ Output Data: +---------------+---------------+-------+-------+ | col1_extended | col2_extended | attr1 | attr2 | +===============+===============+=======+=======+ | value1 | valueA | X1 | Y1 | +---------------+---------------+-------+-------+ | value1 | valueB | X2 | Y2 | +---------------+---------------+-------+-------+ | value2 | valueA | X3 | Y3 | +---------------+---------------+-------+-------+ | value2 | valueB | X4 | Y4 | +---------------+---------------+-------+-------+ :param str col1: The first column to expand on. May be an atomic value, or a dict of dict. :param str col2: The second column to expand on. May be an atomic value, or a dict of dict. :param str rename1: The name for col1 after expansion. Defaults to col1_extended. :param str rename2: The name for col2 after expansion. Defaults to col2_extended. :param list drop: Column names to be dropped from output. :param bool drop_collections: Should columns with compound values be dropped? :return: pandas.DataFrame
[ "Returns", "a", "reshaped", "version", "of", "extractor", "s", "data", "where", "unique", "combinations", "of", "values", "from", "col1", "and", "col2", "are", "given", "individual", "rows", "." ]
python
train
toumorokoshi/sprinter
sprinter/core/manifest.py
https://github.com/toumorokoshi/sprinter/blob/846697a7a087e69c61d075232e754d6975a64152/sprinter/core/manifest.py#L196-L206
def get_context_dict(self): """ return a context dict of the desired state """ context_dict = {} for s in self.sections(): for k, v in self.manifest.items(s): context_dict["%s:%s" % (s, k)] = v for k, v in self.inputs.values().items(): context_dict["config:{0}".format(k)] = v context_dict.update(self.additional_context_variables.items()) context_dict.update(dict([("%s|escaped" % k, re.escape(str(v) or "")) for k, v in context_dict.items()])) return context_dict
[ "def", "get_context_dict", "(", "self", ")", ":", "context_dict", "=", "{", "}", "for", "s", "in", "self", ".", "sections", "(", ")", ":", "for", "k", ",", "v", "in", "self", ".", "manifest", ".", "items", "(", "s", ")", ":", "context_dict", "[", ...
return a context dict of the desired state
[ "return", "a", "context", "dict", "of", "the", "desired", "state" ]
python
train
deschler/django-modeltranslation
modeltranslation/translator.py
https://github.com/deschler/django-modeltranslation/blob/18fec04a5105cbd83fc3759f4fda20135b3a848c/modeltranslation/translator.py#L196-L220
def add_manager(model): """ Monkey patches the original model to use MultilingualManager instead of default managers (not only ``objects``, but also every manager defined and inherited). Custom managers are merged with MultilingualManager. """ if model._meta.abstract: return # Make all managers local for this model to fix patching parent model managers model._meta.local_managers = model._meta.managers for current_manager in model._meta.local_managers: prev_class = current_manager.__class__ patch_manager_class(current_manager) if model._default_manager.__class__ is prev_class: # Normally model._default_manager is a reference to one of model's managers # (and would be patched by the way). # However, in some rare situations (mostly proxy models) # model._default_manager is not the same instance as one of managers, but it # share the same class. model._default_manager.__class__ = current_manager.__class__ patch_manager_class(model._base_manager) model._meta.base_manager_name = 'objects' model._meta._expire_cache()
[ "def", "add_manager", "(", "model", ")", ":", "if", "model", ".", "_meta", ".", "abstract", ":", "return", "# Make all managers local for this model to fix patching parent model managers", "model", ".", "_meta", ".", "local_managers", "=", "model", ".", "_meta", ".", ...
Monkey patches the original model to use MultilingualManager instead of default managers (not only ``objects``, but also every manager defined and inherited). Custom managers are merged with MultilingualManager.
[ "Monkey", "patches", "the", "original", "model", "to", "use", "MultilingualManager", "instead", "of", "default", "managers", "(", "not", "only", "objects", "but", "also", "every", "manager", "defined", "and", "inherited", ")", "." ]
python
train
apache/incubator-mxnet
python/mxnet/ndarray/ndarray.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L946-L1067
def reshape(self, *shape, **kwargs): """Returns a **view** of this array with a new shape without altering any data. Parameters ---------- shape : tuple of int, or n ints The new shape should not change the array size, namely ``np.prod(new_shape)`` should be equal to ``np.prod(self.shape)``. Some dimensions of the shape can take special values from the set {0, -1, -2, -3, -4}. The significance of each is explained below: - ``0`` copy this dimension from the input to the output shape. Example:: - input shape = (2,3,4), shape = (4,0,2), output shape = (4,3,2) - input shape = (2,3,4), shape = (2,0,0), output shape = (2,3,4) - ``-1`` infers the dimension of the output shape by using the remainder of the input dimensions keeping the size of the new array same as that of the input array. At most one dimension of shape can be -1. Example:: - input shape = (2,3,4), shape = (6,1,-1), output shape = (6,1,4) - input shape = (2,3,4), shape = (3,-1,8), output shape = (3,1,8) - input shape = (2,3,4), shape=(-1,), output shape = (24,) - ``-2`` copy all/remainder of the input dimensions to the output shape. Example:: - input shape = (2,3,4), shape = (-2,), output shape = (2,3,4) - input shape = (2,3,4), shape = (2,-2), output shape = (2,3,4) - input shape = (2,3,4), shape = (-2,1,1), output shape = (2,3,4,1,1) - ``-3`` use the product of two consecutive dimensions of the input shape as the output dimension. Example:: - input shape = (2,3,4), shape = (-3,4), output shape = (6,4) - input shape = (2,3,4,5), shape = (-3,-3), output shape = (6,20) - input shape = (2,3,4), shape = (0,-3), output shape = (2,12) - input shape = (2,3,4), shape = (-3,-2), output shape = (6,4) - ``-4`` split one dimension of the input into two dimensions passed subsequent to -4 in shape (can contain -1). Example:: - input shape = (2,3,4), shape = (-4,1,2,-2), output shape =(1,2,3,4) - input shape = (2,3,4), shape = (2,-4,-1,3,-2), output shape = (2,1,3,4) - If the argument `reverse` is set to 1, then the special values are inferred from right to left. Example:: - without reverse=1, for input shape = (10,5,4), shape = (-1,0), output shape would be \ (40,5). - with reverse=1, output shape will be (50,4). reverse : bool, default False If true then the special values are inferred from right to left. Only supported as keyword argument. Returns ------- NDArray An array with desired shape that shares data with this array. Examples -------- >>> x = mx.nd.arange(0,6).reshape(2,3) >>> x.asnumpy() array([[ 0., 1., 2.], [ 3., 4., 5.]], dtype=float32) >>> y = x.reshape(3,2) >>> y.asnumpy() array([[ 0., 1.], [ 2., 3.], [ 4., 5.]], dtype=float32) >>> y = x.reshape(3,-1) >>> y.asnumpy() array([[ 0., 1.], [ 2., 3.], [ 4., 5.]], dtype=float32) >>> y = x.reshape(3,2) >>> y.asnumpy() array([[ 0., 1.], [ 2., 3.], [ 4., 5.]], dtype=float32) >>> y = x.reshape(-3) >>> y.asnumpy() array([ 0. 1. 2. 3. 4. 5.], dtype=float32) >>> y[:] = -1 >>> x.asnumpy() array([[-1., -1., -1.], [-1., -1., -1.]], dtype=float32) """ if len(shape) == 1 and isinstance(shape[0], (list, tuple)): shape = shape[0] elif not shape: shape = kwargs.get('shape') assert shape, "Shape must be provided." if not all(k in ['shape', 'reverse'] for k in kwargs): raise TypeError( "Got unknown keywords in reshape: {}. " \ "Accepted keyword arguments are 'shape' and 'reverse'.".format( ', '.join([k for k in kwargs if k not in ['shape', 'reverse']]))) reverse = kwargs.get('reverse', False) handle = NDArrayHandle() # Actual reshape check_call(_LIB.MXNDArrayReshape64(self.handle, len(shape), c_array(ctypes.c_int64, shape), reverse, ctypes.byref(handle))) return NDArray(handle=handle, writable=self.writable)
[ "def", "reshape", "(", "self", ",", "*", "shape", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "shape", ")", "==", "1", "and", "isinstance", "(", "shape", "[", "0", "]", ",", "(", "list", ",", "tuple", ")", ")", ":", "shape", "=", "sh...
Returns a **view** of this array with a new shape without altering any data. Parameters ---------- shape : tuple of int, or n ints The new shape should not change the array size, namely ``np.prod(new_shape)`` should be equal to ``np.prod(self.shape)``. Some dimensions of the shape can take special values from the set {0, -1, -2, -3, -4}. The significance of each is explained below: - ``0`` copy this dimension from the input to the output shape. Example:: - input shape = (2,3,4), shape = (4,0,2), output shape = (4,3,2) - input shape = (2,3,4), shape = (2,0,0), output shape = (2,3,4) - ``-1`` infers the dimension of the output shape by using the remainder of the input dimensions keeping the size of the new array same as that of the input array. At most one dimension of shape can be -1. Example:: - input shape = (2,3,4), shape = (6,1,-1), output shape = (6,1,4) - input shape = (2,3,4), shape = (3,-1,8), output shape = (3,1,8) - input shape = (2,3,4), shape=(-1,), output shape = (24,) - ``-2`` copy all/remainder of the input dimensions to the output shape. Example:: - input shape = (2,3,4), shape = (-2,), output shape = (2,3,4) - input shape = (2,3,4), shape = (2,-2), output shape = (2,3,4) - input shape = (2,3,4), shape = (-2,1,1), output shape = (2,3,4,1,1) - ``-3`` use the product of two consecutive dimensions of the input shape as the output dimension. Example:: - input shape = (2,3,4), shape = (-3,4), output shape = (6,4) - input shape = (2,3,4,5), shape = (-3,-3), output shape = (6,20) - input shape = (2,3,4), shape = (0,-3), output shape = (2,12) - input shape = (2,3,4), shape = (-3,-2), output shape = (6,4) - ``-4`` split one dimension of the input into two dimensions passed subsequent to -4 in shape (can contain -1). Example:: - input shape = (2,3,4), shape = (-4,1,2,-2), output shape =(1,2,3,4) - input shape = (2,3,4), shape = (2,-4,-1,3,-2), output shape = (2,1,3,4) - If the argument `reverse` is set to 1, then the special values are inferred from right to left. Example:: - without reverse=1, for input shape = (10,5,4), shape = (-1,0), output shape would be \ (40,5). - with reverse=1, output shape will be (50,4). reverse : bool, default False If true then the special values are inferred from right to left. Only supported as keyword argument. Returns ------- NDArray An array with desired shape that shares data with this array. Examples -------- >>> x = mx.nd.arange(0,6).reshape(2,3) >>> x.asnumpy() array([[ 0., 1., 2.], [ 3., 4., 5.]], dtype=float32) >>> y = x.reshape(3,2) >>> y.asnumpy() array([[ 0., 1.], [ 2., 3.], [ 4., 5.]], dtype=float32) >>> y = x.reshape(3,-1) >>> y.asnumpy() array([[ 0., 1.], [ 2., 3.], [ 4., 5.]], dtype=float32) >>> y = x.reshape(3,2) >>> y.asnumpy() array([[ 0., 1.], [ 2., 3.], [ 4., 5.]], dtype=float32) >>> y = x.reshape(-3) >>> y.asnumpy() array([ 0. 1. 2. 3. 4. 5.], dtype=float32) >>> y[:] = -1 >>> x.asnumpy() array([[-1., -1., -1.], [-1., -1., -1.]], dtype=float32)
[ "Returns", "a", "**", "view", "**", "of", "this", "array", "with", "a", "new", "shape", "without", "altering", "any", "data", "." ]
python
train
kashifrazzaqui/vyked
vyked/bus.py
https://github.com/kashifrazzaqui/vyked/blob/94d858e939cb9a723c3bfb5176a93b46ad3422e2/vyked/bus.py#L118-L137
def _request_sender(self, packet: dict): """ Sends a request to a server from a ServiceClient auto dispatch method called from self.send() """ node_id = self._get_node_id_for_packet(packet) client_protocol = self._client_protocols.get(node_id) if node_id and client_protocol: if client_protocol.is_connected(): packet['to'] = node_id client_protocol.send(packet) return True else: self._logger.error('Client protocol is not connected for packet %s', packet) raise ClientDisconnected() else: # No node found to send request self._logger.error('Out of %s, Client Not found for packet %s', self._client_protocols.keys(), packet) raise ClientNotFoundError()
[ "def", "_request_sender", "(", "self", ",", "packet", ":", "dict", ")", ":", "node_id", "=", "self", ".", "_get_node_id_for_packet", "(", "packet", ")", "client_protocol", "=", "self", ".", "_client_protocols", ".", "get", "(", "node_id", ")", "if", "node_id...
Sends a request to a server from a ServiceClient auto dispatch method called from self.send()
[ "Sends", "a", "request", "to", "a", "server", "from", "a", "ServiceClient", "auto", "dispatch", "method", "called", "from", "self", ".", "send", "()" ]
python
train
vimalloc/flask-jwt-extended
flask_jwt_extended/jwt_manager.py
https://github.com/vimalloc/flask-jwt-extended/blob/569d3b89eb5d2586d0cff4581a346229c623cefc/flask_jwt_extended/jwt_manager.py#L150-L218
def _set_default_configuration_options(app): """ Sets the default configuration options used by this extension """ # Where to look for the JWT. Available options are cookies or headers app.config.setdefault('JWT_TOKEN_LOCATION', ('headers',)) # Options for JWTs when the TOKEN_LOCATION is headers app.config.setdefault('JWT_HEADER_NAME', 'Authorization') app.config.setdefault('JWT_HEADER_TYPE', 'Bearer') # Options for JWTs then the TOKEN_LOCATION is query_string app.config.setdefault('JWT_QUERY_STRING_NAME', 'jwt') # Option for JWTs when the TOKEN_LOCATION is cookies app.config.setdefault('JWT_ACCESS_COOKIE_NAME', 'access_token_cookie') app.config.setdefault('JWT_REFRESH_COOKIE_NAME', 'refresh_token_cookie') app.config.setdefault('JWT_ACCESS_COOKIE_PATH', '/') app.config.setdefault('JWT_REFRESH_COOKIE_PATH', '/') app.config.setdefault('JWT_COOKIE_SECURE', False) app.config.setdefault('JWT_COOKIE_DOMAIN', None) app.config.setdefault('JWT_SESSION_COOKIE', True) app.config.setdefault('JWT_COOKIE_SAMESITE', None) # Option for JWTs when the TOKEN_LOCATION is json app.config.setdefault('JWT_JSON_KEY', 'access_token') app.config.setdefault('JWT_REFRESH_JSON_KEY', 'refresh_token') # Options for using double submit csrf protection app.config.setdefault('JWT_COOKIE_CSRF_PROTECT', True) app.config.setdefault('JWT_CSRF_METHODS', ['POST', 'PUT', 'PATCH', 'DELETE']) app.config.setdefault('JWT_ACCESS_CSRF_HEADER_NAME', 'X-CSRF-TOKEN') app.config.setdefault('JWT_REFRESH_CSRF_HEADER_NAME', 'X-CSRF-TOKEN') app.config.setdefault('JWT_CSRF_IN_COOKIES', True) app.config.setdefault('JWT_ACCESS_CSRF_COOKIE_NAME', 'csrf_access_token') app.config.setdefault('JWT_REFRESH_CSRF_COOKIE_NAME', 'csrf_refresh_token') app.config.setdefault('JWT_ACCESS_CSRF_COOKIE_PATH', '/') app.config.setdefault('JWT_REFRESH_CSRF_COOKIE_PATH', '/') # How long an a token will live before they expire. app.config.setdefault('JWT_ACCESS_TOKEN_EXPIRES', datetime.timedelta(minutes=15)) app.config.setdefault('JWT_REFRESH_TOKEN_EXPIRES', datetime.timedelta(days=30)) # What algorithm to use to sign the token. See here for a list of options: # https://github.com/jpadilla/pyjwt/blob/master/jwt/api_jwt.py app.config.setdefault('JWT_ALGORITHM', 'HS256') # Secret key to sign JWTs with. Only used if a symmetric algorithm is # used (such as the HS* algorithms). We will use the app secret key # if this is not set. app.config.setdefault('JWT_SECRET_KEY', None) # Keys to sign JWTs with when use when using an asymmetric # (public/private key) algorithm, such as RS* or EC* app.config.setdefault('JWT_PRIVATE_KEY', None) app.config.setdefault('JWT_PUBLIC_KEY', None) # Options for blacklisting/revoking tokens app.config.setdefault('JWT_BLACKLIST_ENABLED', False) app.config.setdefault('JWT_BLACKLIST_TOKEN_CHECKS', ('access', 'refresh')) app.config.setdefault('JWT_IDENTITY_CLAIM', 'identity') app.config.setdefault('JWT_USER_CLAIMS', 'user_claims') app.config.setdefault('JWT_DECODE_AUDIENCE', None) app.config.setdefault('JWT_DECODE_LEEWAY', 0) app.config.setdefault('JWT_CLAIMS_IN_REFRESH_TOKEN', False) app.config.setdefault('JWT_ERROR_MESSAGE_KEY', 'msg')
[ "def", "_set_default_configuration_options", "(", "app", ")", ":", "# Where to look for the JWT. Available options are cookies or headers", "app", ".", "config", ".", "setdefault", "(", "'JWT_TOKEN_LOCATION'", ",", "(", "'headers'", ",", ")", ")", "# Options for JWTs when the...
Sets the default configuration options used by this extension
[ "Sets", "the", "default", "configuration", "options", "used", "by", "this", "extension" ]
python
train
jobovy/galpy
galpy/actionAngle/actionAngleStaeckel.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/actionAngle/actionAngleStaeckel.py#L94-L192
def _evaluate(self,*args,**kwargs): """ NAME: __call__ (_evaluate) PURPOSE: evaluate the actions (jr,lz,jz) INPUT: Either: a) R,vR,vT,z,vz[,phi]: 1) floats: phase-space value for single object (phi is optional) (each can be a Quantity) 2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity) b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument delta= (object-wide default) can be used to override the object-wide focal length; can also be an array with length N to allow different delta for different phase-space points u0= (None) if object-wide option useu0 is set, u0 to use (if useu0 and useu0 is None, a good value will be computed) c= (object-wide default, bool) True/False to override the object-wide setting for whether or not to use the C implementation order= (object-wide default, int) number of points to use in the Gauss-Legendre numerical integration of the relevant action integrals When not using C: fixed_quad= (False) if True, use Gaussian quadrature (scipy.integrate.fixed_quad instead of scipy.integrate.quad) scipy.integrate.fixed_quad or .quad keywords OUTPUT: (jr,lz,jz) HISTORY: 2012-11-27 - Written - Bovy (IAS) 2017-12-27 - Allowed individual delta for each point - Bovy (UofT) """ delta= kwargs.pop('delta',self._delta) order= kwargs.get('order',self._order) if ((self._c and not ('c' in kwargs and not kwargs['c']))\ or (ext_loaded and (('c' in kwargs and kwargs['c'])))) \ and _check_c(self._pot): if len(args) == 5: #R,vR.vT, z, vz R,vR,vT, z, vz= args elif len(args) == 6: #R,vR.vT, z, vz, phi R,vR,vT, z, vz, phi= args else: self._parse_eval_args(*args) R= self._eval_R vR= self._eval_vR vT= self._eval_vT z= self._eval_z vz= self._eval_vz if isinstance(R,float): R= nu.array([R]) vR= nu.array([vR]) vT= nu.array([vT]) z= nu.array([z]) vz= nu.array([vz]) Lz= R*vT if self._useu0: #First calculate u0 if 'u0' in kwargs: u0= nu.asarray(kwargs['u0']) else: E= nu.array([_evaluatePotentials(self._pot,R[ii],z[ii]) +vR[ii]**2./2.+vz[ii]**2./2.+vT[ii]**2./2. for ii in range(len(R))]) u0= actionAngleStaeckel_c.actionAngleStaeckel_calcu0(\ E,Lz,self._pot,delta)[0] kwargs.pop('u0',None) else: u0= None jr, jz, err= actionAngleStaeckel_c.actionAngleStaeckel_c(\ self._pot,delta,R,vR,vT,z,vz,u0=u0,order=order) if err == 0: return (jr,Lz,jz) else: #pragma: no cover raise RuntimeError("C-code for calculation actions failed; try with c=False") else: if 'c' in kwargs and kwargs['c'] and not self._c: #pragma: no cover warnings.warn("C module not used because potential does not have a C implementation",galpyWarning) kwargs.pop('c',None) if (len(args) == 5 or len(args) == 6) \ and isinstance(args[0],nu.ndarray): ojr= nu.zeros((len(args[0]))) olz= nu.zeros((len(args[0]))) ojz= nu.zeros((len(args[0]))) for ii in range(len(args[0])): if len(args) == 5: targs= (args[0][ii],args[1][ii],args[2][ii], args[3][ii],args[4][ii]) elif len(args) == 6: targs= (args[0][ii],args[1][ii],args[2][ii], args[3][ii],args[4][ii],args[5][ii]) tkwargs= copy.copy(kwargs) try: tkwargs['delta']= delta[ii] except TypeError: tkwargs['delta']= delta tjr,tlz,tjz= self(*targs,**tkwargs) ojr[ii]= tjr ojz[ii]= tjz olz[ii]= tlz return (ojr,olz,ojz) else: #Set up the actionAngleStaeckelSingle object aASingle= actionAngleStaeckelSingle(*args,pot=self._pot, delta=delta) return (aASingle.JR(**copy.copy(kwargs)), aASingle._R*aASingle._vT, aASingle.Jz(**copy.copy(kwargs)))
[ "def", "_evaluate", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "delta", "=", "kwargs", ".", "pop", "(", "'delta'", ",", "self", ".", "_delta", ")", "order", "=", "kwargs", ".", "get", "(", "'order'", ",", "self", ".", "_ord...
NAME: __call__ (_evaluate) PURPOSE: evaluate the actions (jr,lz,jz) INPUT: Either: a) R,vR,vT,z,vz[,phi]: 1) floats: phase-space value for single object (phi is optional) (each can be a Quantity) 2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity) b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument delta= (object-wide default) can be used to override the object-wide focal length; can also be an array with length N to allow different delta for different phase-space points u0= (None) if object-wide option useu0 is set, u0 to use (if useu0 and useu0 is None, a good value will be computed) c= (object-wide default, bool) True/False to override the object-wide setting for whether or not to use the C implementation order= (object-wide default, int) number of points to use in the Gauss-Legendre numerical integration of the relevant action integrals When not using C: fixed_quad= (False) if True, use Gaussian quadrature (scipy.integrate.fixed_quad instead of scipy.integrate.quad) scipy.integrate.fixed_quad or .quad keywords OUTPUT: (jr,lz,jz) HISTORY: 2012-11-27 - Written - Bovy (IAS) 2017-12-27 - Allowed individual delta for each point - Bovy (UofT)
[ "NAME", ":", "__call__", "(", "_evaluate", ")", "PURPOSE", ":", "evaluate", "the", "actions", "(", "jr", "lz", "jz", ")", "INPUT", ":", "Either", ":", "a", ")", "R", "vR", "vT", "z", "vz", "[", "phi", "]", ":", "1", ")", "floats", ":", "phase", ...
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/DireitoGrupoEquipamento.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/DireitoGrupoEquipamento.py#L170-L209
def inserir( self, id_grupo_usuario, id_grupo_equipamento, leitura, escrita, alterar_config, exclusao): """Cria um novo direito de um grupo de usuário em um grupo de equipamento e retorna o seu identificador. :param id_grupo_usuario: Identificador do grupo de usuário. :param id_grupo_equipamento: Identificador do grupo de equipamento. :param leitura: Indicação de permissão de leitura ('0' ou '1'). :param escrita: Indicação de permissão de escrita ('0' ou '1'). :param alterar_config: Indicação de permissão de alterar_config ('0' ou '1'). :param exclusao: Indicação de permissão de exclusão ('0' ou '1'). :return: Dicionário com a seguinte estrutura: {'direito_grupo_equipamento': {'id': < id>}} :raise InvalidParameterError: Pelo menos um dos parâmetros é nulo ou inválido. :raise GrupoEquipamentoNaoExisteError: Grupo de Equipamento não cadastrado. :raise GrupoUsuarioNaoExisteError: Grupo de Usuário não cadastrado. :raise ValorIndicacaoDireitoInvalidoError: Valor de leitura, escrita, alterar_config e/ou exclusão inválido. :raise DireitoGrupoEquipamentoDuplicadoError: Já existe direitos cadastrados para o grupo de usuário e grupo de equipamento informados. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao ler o XML de requisição ou gerar o XML de resposta. """ direito_map = dict() direito_map['id_grupo_usuario'] = id_grupo_usuario direito_map['id_grupo_equipamento'] = id_grupo_equipamento direito_map['leitura'] = leitura direito_map['escrita'] = escrita direito_map['alterar_config'] = alterar_config direito_map['exclusao'] = exclusao code, xml = self.submit( {'direito_grupo_equipamento': direito_map}, 'POST', 'direitosgrupoequipamento/') return self.response(code, xml)
[ "def", "inserir", "(", "self", ",", "id_grupo_usuario", ",", "id_grupo_equipamento", ",", "leitura", ",", "escrita", ",", "alterar_config", ",", "exclusao", ")", ":", "direito_map", "=", "dict", "(", ")", "direito_map", "[", "'id_grupo_usuario'", "]", "=", "id...
Cria um novo direito de um grupo de usuário em um grupo de equipamento e retorna o seu identificador. :param id_grupo_usuario: Identificador do grupo de usuário. :param id_grupo_equipamento: Identificador do grupo de equipamento. :param leitura: Indicação de permissão de leitura ('0' ou '1'). :param escrita: Indicação de permissão de escrita ('0' ou '1'). :param alterar_config: Indicação de permissão de alterar_config ('0' ou '1'). :param exclusao: Indicação de permissão de exclusão ('0' ou '1'). :return: Dicionário com a seguinte estrutura: {'direito_grupo_equipamento': {'id': < id>}} :raise InvalidParameterError: Pelo menos um dos parâmetros é nulo ou inválido. :raise GrupoEquipamentoNaoExisteError: Grupo de Equipamento não cadastrado. :raise GrupoUsuarioNaoExisteError: Grupo de Usuário não cadastrado. :raise ValorIndicacaoDireitoInvalidoError: Valor de leitura, escrita, alterar_config e/ou exclusão inválido. :raise DireitoGrupoEquipamentoDuplicadoError: Já existe direitos cadastrados para o grupo de usuário e grupo de equipamento informados. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao ler o XML de requisição ou gerar o XML de resposta.
[ "Cria", "um", "novo", "direito", "de", "um", "grupo", "de", "usuário", "em", "um", "grupo", "de", "equipamento", "e", "retorna", "o", "seu", "identificador", "." ]
python
train
mk-fg/graphite-metrics
graphite_metrics/collectors/__init__.py
https://github.com/mk-fg/graphite-metrics/blob/f0ba28d1ed000b2316d3c403206eba78dd7b4c50/graphite_metrics/collectors/__init__.py#L24-L38
def rate_limit(max_interval=20, sampling=3, f=lambda x: x): '''x rises by 1 from 0 on each iteraton, back to 0 on triggering. f(x) should rise up to f(max_interval) in some way (with default "f(x)=x" probability rises lineary with 100% chance on "x=max_interval"). "sampling" affect probablility in an "c=1-(1-c0)*(1-c1)*...*(1-cx)" exponential way.''' from random import random val = 0 val_max = float(f(max_interval)) while True: if val % sampling == 0: trigger = random() > (val_max - f(val)) / val_max if trigger: val = 0 yield trigger else: yield False val += 1
[ "def", "rate_limit", "(", "max_interval", "=", "20", ",", "sampling", "=", "3", ",", "f", "=", "lambda", "x", ":", "x", ")", ":", "from", "random", "import", "random", "val", "=", "0", "val_max", "=", "float", "(", "f", "(", "max_interval", ")", ")...
x rises by 1 from 0 on each iteraton, back to 0 on triggering. f(x) should rise up to f(max_interval) in some way (with default "f(x)=x" probability rises lineary with 100% chance on "x=max_interval"). "sampling" affect probablility in an "c=1-(1-c0)*(1-c1)*...*(1-cx)" exponential way.
[ "x", "rises", "by", "1", "from", "0", "on", "each", "iteraton", "back", "to", "0", "on", "triggering", ".", "f", "(", "x", ")", "should", "rise", "up", "to", "f", "(", "max_interval", ")", "in", "some", "way", "(", "with", "default", "f", "(", "x...
python
train
foremast/foremast
src/foremast/elb/format_listeners.py
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/elb/format_listeners.py#L131-L161
def format_cert_name(env='', account='', region='', certificate=None): """Format the SSL certificate name into ARN for ELB. Args: env (str): Account environment name account (str): Account number for ARN region (str): AWS Region. certificate (str): Name of SSL certificate Returns: str: Fully qualified ARN for SSL certificate None: Certificate is not desired """ cert_name = None if certificate: if certificate.startswith('arn'): LOG.info("Full ARN provided...skipping lookup.") cert_name = certificate else: generated_cert_name = generate_custom_cert_name(env, region, account, certificate) if generated_cert_name: LOG.info("Found generated certificate %s from template", generated_cert_name) cert_name = generated_cert_name else: LOG.info("Using default certificate name logic") cert_name = ('arn:aws:iam::{account}:server-certificate/{name}'.format( account=account, name=certificate)) LOG.debug('Certificate name: %s', cert_name) return cert_name
[ "def", "format_cert_name", "(", "env", "=", "''", ",", "account", "=", "''", ",", "region", "=", "''", ",", "certificate", "=", "None", ")", ":", "cert_name", "=", "None", "if", "certificate", ":", "if", "certificate", ".", "startswith", "(", "'arn'", ...
Format the SSL certificate name into ARN for ELB. Args: env (str): Account environment name account (str): Account number for ARN region (str): AWS Region. certificate (str): Name of SSL certificate Returns: str: Fully qualified ARN for SSL certificate None: Certificate is not desired
[ "Format", "the", "SSL", "certificate", "name", "into", "ARN", "for", "ELB", "." ]
python
train
googlefonts/glyphsLib
Lib/glyphsLib/builder/guidelines.py
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/guidelines.py#L28-L63
def to_ufo_guidelines(self, ufo_obj, glyphs_obj): """Set guidelines.""" guidelines = glyphs_obj.guides if not guidelines: return new_guidelines = [] for guideline in guidelines: new_guideline = {} x, y = guideline.position angle = guideline.angle % 360 if _is_vertical(x, y, angle): new_guideline["x"] = x elif _is_horizontal(x, y, angle): new_guideline["y"] = y else: new_guideline["x"] = x new_guideline["y"] = y new_guideline["angle"] = angle name = guideline.name if name is not None: # Identifier m = IDENTIFIER_NAME_RE.match(name) if m: new_guideline["identifier"] = m.group(2) name = name[: -len(m.group(1))] # Color m = COLOR_NAME_RE.match(name) if m: new_guideline["color"] = m.group(2) name = name[: -len(m.group(1))] if guideline.locked: name = (name or "") + LOCKED_NAME_SUFFIX if name: new_guideline["name"] = name new_guidelines.append(new_guideline) ufo_obj.guidelines = new_guidelines
[ "def", "to_ufo_guidelines", "(", "self", ",", "ufo_obj", ",", "glyphs_obj", ")", ":", "guidelines", "=", "glyphs_obj", ".", "guides", "if", "not", "guidelines", ":", "return", "new_guidelines", "=", "[", "]", "for", "guideline", "in", "guidelines", ":", "new...
Set guidelines.
[ "Set", "guidelines", "." ]
python
train
PyPSA/PyPSA
pypsa/pf.py
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/pf.py#L495-L511
def apply_transformer_t_model(network): """Convert given T-model parameters to PI-model parameters using wye-delta transformation""" z_series = network.transformers.r_pu + 1j*network.transformers.x_pu y_shunt = network.transformers.g_pu + 1j*network.transformers.b_pu ts_b = (network.transformers.model == "t") & (y_shunt != 0.) if ts_b.zsum() == 0: return za,zb,zc = wye_to_delta(z_series.loc[ts_b]/2,z_series.loc[ts_b]/2,1/y_shunt.loc[ts_b]) network.transformers.loc[ts_b,"r_pu"] = zc.real network.transformers.loc[ts_b,"x_pu"] = zc.imag network.transformers.loc[ts_b,"g_pu"] = (2/za).real network.transformers.loc[ts_b,"b_pu"] = (2/za).imag
[ "def", "apply_transformer_t_model", "(", "network", ")", ":", "z_series", "=", "network", ".", "transformers", ".", "r_pu", "+", "1j", "*", "network", ".", "transformers", ".", "x_pu", "y_shunt", "=", "network", ".", "transformers", ".", "g_pu", "+", "1j", ...
Convert given T-model parameters to PI-model parameters using wye-delta transformation
[ "Convert", "given", "T", "-", "model", "parameters", "to", "PI", "-", "model", "parameters", "using", "wye", "-", "delta", "transformation" ]
python
train
tanghaibao/jcvi
jcvi/assembly/hic.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/hic.py#L634-L659
def mergemat(args): """ %prog mergemat *.npy Combine counts from multiple .npy data files. """ p = OptionParser(mergemat.__doc__) p.set_outfile(outfile="out") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) npyfiles = args A = np.load(npyfiles[0]) logging.debug("Load `{}`: matrix of shape {}:; sum={}" .format(npyfiles[0], A.shape, A.sum())) for npyfile in npyfiles[1:]: B = np.load(npyfile) A += B logging.debug("Load `{}`: sum={}" .format(npyfiles[0], A.sum())) pf = opts.outfile np.save(pf, A) logging.debug("Combined {} files into `{}.npy`".format(len(npyfiles), pf))
[ "def", "mergemat", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "mergemat", ".", "__doc__", ")", "p", ".", "set_outfile", "(", "outfile", "=", "\"out\"", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", ...
%prog mergemat *.npy Combine counts from multiple .npy data files.
[ "%prog", "mergemat", "*", ".", "npy" ]
python
train
zyga/python-glibc
pyglibc/selectors.py
https://github.com/zyga/python-glibc/blob/d6fdb306b123a995471584a5201155c60a34448a/pyglibc/selectors.py#L213-L227
def from_epoll_events(cls, epoll_events): """ Create a :class:`_EpollSelectorEvents` instance out of a bit mask using ``EPOLL*`` family of constants. """ self = cls() if epoll_events & select.EPOLLIN: self |= EVENT_READ if epoll_events & select.EPOLLOUT: self |= EVENT_WRITE # Treat EPOLLHUP specially, as both 'read and write ready' so that on # the outside this can be interpreted as EOF if epoll_events & select.EPOLLHUP: self |= EVENT_READ | EVENT_WRITE return self
[ "def", "from_epoll_events", "(", "cls", ",", "epoll_events", ")", ":", "self", "=", "cls", "(", ")", "if", "epoll_events", "&", "select", ".", "EPOLLIN", ":", "self", "|=", "EVENT_READ", "if", "epoll_events", "&", "select", ".", "EPOLLOUT", ":", "self", ...
Create a :class:`_EpollSelectorEvents` instance out of a bit mask using ``EPOLL*`` family of constants.
[ "Create", "a", ":", "class", ":", "_EpollSelectorEvents", "instance", "out", "of", "a", "bit", "mask", "using", "EPOLL", "*", "family", "of", "constants", "." ]
python
train
sangoma/pysensu
pysensu/api.py
https://github.com/sangoma/pysensu/blob/dc6799edbf2635247aec61fcf45b04ddec1beb49/pysensu/api.py#L133-L138
def delete_event(self, client, check): """ Resolves an event for a given check on a given client. (delayed action) """ self._request('DELETE', '/events/{}/{}'.format(client, check)) return True
[ "def", "delete_event", "(", "self", ",", "client", ",", "check", ")", ":", "self", ".", "_request", "(", "'DELETE'", ",", "'/events/{}/{}'", ".", "format", "(", "client", ",", "check", ")", ")", "return", "True" ]
Resolves an event for a given check on a given client. (delayed action)
[ "Resolves", "an", "event", "for", "a", "given", "check", "on", "a", "given", "client", ".", "(", "delayed", "action", ")" ]
python
train
asweigart/pyautogui
pyautogui/_pyautogui_osx.py
https://github.com/asweigart/pyautogui/blob/77524bd47334a89024013fd48e05151c3ac9289a/pyautogui/_pyautogui_osx.py#L264-L285
def _specialKeyEvent(key, upDown): """ Helper method for special keys. Source: http://stackoverflow.com/questions/11045814/emulate-media-key-press-on-mac """ assert upDown in ('up', 'down'), "upDown argument must be 'up' or 'down'" key_code = special_key_translate_table[key] ev = AppKit.NSEvent.otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_( Quartz.NSSystemDefined, # type (0,0), # location 0xa00 if upDown == 'down' else 0xb00, # flags 0, # timestamp 0, # window 0, # ctx 8, # subtype (key_code << 16) | ((0xa if upDown == 'down' else 0xb) << 8), # data1 -1 # data2 ) Quartz.CGEventPost(0, ev.CGEvent())
[ "def", "_specialKeyEvent", "(", "key", ",", "upDown", ")", ":", "assert", "upDown", "in", "(", "'up'", ",", "'down'", ")", ",", "\"upDown argument must be 'up' or 'down'\"", "key_code", "=", "special_key_translate_table", "[", "key", "]", "ev", "=", "AppKit", "....
Helper method for special keys. Source: http://stackoverflow.com/questions/11045814/emulate-media-key-press-on-mac
[ "Helper", "method", "for", "special", "keys", "." ]
python
train
jkitzes/macroeco
macroeco/compare/_compare.py
https://github.com/jkitzes/macroeco/blob/ee5fac5560a2d64de3a64738b5bc6833e2d7ff2e/macroeco/compare/_compare.py#L281-L303
def sum_of_squares(obs, pred): """ Sum of squares between observed and predicted data Parameters ---------- obs : iterable Observed data pred : iterable Predicted data Returns ------- float Sum of squares Notes ----- The length of observed and predicted data must match. """ return np.sum((np.array(obs) - np.array(pred)) ** 2)
[ "def", "sum_of_squares", "(", "obs", ",", "pred", ")", ":", "return", "np", ".", "sum", "(", "(", "np", ".", "array", "(", "obs", ")", "-", "np", ".", "array", "(", "pred", ")", ")", "**", "2", ")" ]
Sum of squares between observed and predicted data Parameters ---------- obs : iterable Observed data pred : iterable Predicted data Returns ------- float Sum of squares Notes ----- The length of observed and predicted data must match.
[ "Sum", "of", "squares", "between", "observed", "and", "predicted", "data" ]
python
train
googleads/googleads-python-lib
googleads/adwords.py
https://github.com/googleads/googleads-python-lib/blob/aa3b1b474b0f9789ca55ca46f4b2b57aeae38874/googleads/adwords.py#L2416-L2451
def NextPage(self, page=None): """Sets the LIMIT clause of the AWQL to the next page. This method is meant to be used with HasNext(). When using DataService, page is needed, as its paging mechanism is different from other services. For details, see https://developers.google.com/adwords/api/docs/guides/bid-landscapes#paging_through_results. Args: page: An optional dict-like page returned in an API response, where the type depends on the configured SOAP client. The page contains the 'totalNumEntries' key whose value represents the total number of results from making the query to the AdWords API services. This page is required when using this method with DataService. Returns: This service query object. Raises: ValueError: If the start index of this object is None, meaning that the LIMIT clause hasn't been set before. """ if self._start_index is None: raise ValueError('Cannot page through query with no LIMIT clause.') # DataService has a different paging mechanism, resulting in different # method of determining if there is still a page left. page_size = None if (page and self._PAGE_TYPE in page and page[self._PAGE_TYPE] in self._BID_LANDSCAPE_PAGES): page_size = sum([len(bid_landscape[self._LANDSCAPE_POINTS]) for bid_landscape in page[self._ENTRIES]]) increment = page_size or self._page_size self._start_index += increment return self
[ "def", "NextPage", "(", "self", ",", "page", "=", "None", ")", ":", "if", "self", ".", "_start_index", "is", "None", ":", "raise", "ValueError", "(", "'Cannot page through query with no LIMIT clause.'", ")", "# DataService has a different paging mechanism, resulting in di...
Sets the LIMIT clause of the AWQL to the next page. This method is meant to be used with HasNext(). When using DataService, page is needed, as its paging mechanism is different from other services. For details, see https://developers.google.com/adwords/api/docs/guides/bid-landscapes#paging_through_results. Args: page: An optional dict-like page returned in an API response, where the type depends on the configured SOAP client. The page contains the 'totalNumEntries' key whose value represents the total number of results from making the query to the AdWords API services. This page is required when using this method with DataService. Returns: This service query object. Raises: ValueError: If the start index of this object is None, meaning that the LIMIT clause hasn't been set before.
[ "Sets", "the", "LIMIT", "clause", "of", "the", "AWQL", "to", "the", "next", "page", "." ]
python
train
mangalam-research/selenic
selenic/util.py
https://github.com/mangalam-research/selenic/blob/2284c68e15fa3d34b88aa2eec1a2e8ecd37f44ad/selenic/util.py#L401-L409
def wait_until_not(self, condition): """ Waits for a condition to be false. :param condition: Should be a callable that operates in the same way ``WebDriverWait.until_not`` expects. :returns: Whatever ``WebDriverWait.until_not`` returns. """ return WebDriverWait(self.driver, self.timeout).until_not(condition)
[ "def", "wait_until_not", "(", "self", ",", "condition", ")", ":", "return", "WebDriverWait", "(", "self", ".", "driver", ",", "self", ".", "timeout", ")", ".", "until_not", "(", "condition", ")" ]
Waits for a condition to be false. :param condition: Should be a callable that operates in the same way ``WebDriverWait.until_not`` expects. :returns: Whatever ``WebDriverWait.until_not`` returns.
[ "Waits", "for", "a", "condition", "to", "be", "false", "." ]
python
train
coded-by-hand/mass
env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/req.py
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/req.py#L647-L659
def check_if_exists(self): """Find an installed distribution that satisfies or conflicts with this requirement, and set self.satisfied_by or self.conflicts_with appropriately.""" if self.req is None: return False try: self.satisfied_by = pkg_resources.get_distribution(self.req) except pkg_resources.DistributionNotFound: return False except pkg_resources.VersionConflict: self.conflicts_with = pkg_resources.get_distribution(self.req.project_name) return True
[ "def", "check_if_exists", "(", "self", ")", ":", "if", "self", ".", "req", "is", "None", ":", "return", "False", "try", ":", "self", ".", "satisfied_by", "=", "pkg_resources", ".", "get_distribution", "(", "self", ".", "req", ")", "except", "pkg_resources"...
Find an installed distribution that satisfies or conflicts with this requirement, and set self.satisfied_by or self.conflicts_with appropriately.
[ "Find", "an", "installed", "distribution", "that", "satisfies", "or", "conflicts", "with", "this", "requirement", "and", "set", "self", ".", "satisfied_by", "or", "self", ".", "conflicts_with", "appropriately", "." ]
python
train
h2oai/h2o-3
h2o-py/h2o/frame.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L2409-L2426
def lstrip(self, set=" "): """ Return a copy of the column with leading characters removed. The set argument is a string specifying the set of characters to be removed. If omitted, the set argument defaults to removing whitespace. :param character set: The set of characters to lstrip from strings in column. :returns: a new H2OFrame with the same shape as the original frame and having all its values trimmed from the left (equivalent of Python's ``str.lstrip()``). """ # work w/ None; parity with python lstrip if set is None: set = " " fr = H2OFrame._expr(expr=ExprNode("lstrip", self, set)) fr._ex._cache.nrows = self.nrow fr._ex._cache.ncol = self.ncol return fr
[ "def", "lstrip", "(", "self", ",", "set", "=", "\" \"", ")", ":", "# work w/ None; parity with python lstrip", "if", "set", "is", "None", ":", "set", "=", "\" \"", "fr", "=", "H2OFrame", ".", "_expr", "(", "expr", "=", "ExprNode", "(", "\"lstrip\"", ",", ...
Return a copy of the column with leading characters removed. The set argument is a string specifying the set of characters to be removed. If omitted, the set argument defaults to removing whitespace. :param character set: The set of characters to lstrip from strings in column. :returns: a new H2OFrame with the same shape as the original frame and having all its values trimmed from the left (equivalent of Python's ``str.lstrip()``).
[ "Return", "a", "copy", "of", "the", "column", "with", "leading", "characters", "removed", "." ]
python
test
mathandy/svgpathtools
svgpathtools/path.py
https://github.com/mathandy/svgpathtools/blob/fd7348a1dfd88b65ea61da02325c6605aedf8c4f/svgpathtools/path.py#L2443-L2490
def area(self, chord_length=1e-4): """Find area enclosed by path. Approximates any Arc segments in the Path with lines approximately `chord_length` long, and returns the area enclosed by the approximated Path. Default chord length is 0.01. If Arc segments are included in path, to ensure accurate results, make sure this `chord_length` is set to a reasonable value (e.g. by checking curvature). Notes ----- * Negative area results from clockwise (as opposed to counter-clockwise) parameterization of the input Path. To Contributors --------------- This is one of many parts of `svgpathtools` that could be improved by a noble soul implementing a piecewise-linear approximation scheme for paths (one with controls to guarantee a desired accuracy). """ def area_without_arcs(path): area_enclosed = 0 for seg in path: x = real(seg.poly()) dy = imag(seg.poly()).deriv() integrand = x*dy integral = integrand.integ() area_enclosed += integral(1) - integral(0) return area_enclosed def seg2lines(seg): """Find piecewise-linear approximation of `seg`.""" num_lines = int(ceil(seg.length() / chord_length)) pts = [seg.point(t) for t in np.linspace(0, 1, num_lines+1)] return [Line(pts[i], pts[i+1]) for i in range(num_lines)] assert self.isclosed() bezier_path_approximation = [] for seg in self: if isinstance(seg, Arc): bezier_path_approximation += seg2lines(seg) else: bezier_path_approximation.append(seg) return area_without_arcs(Path(*bezier_path_approximation))
[ "def", "area", "(", "self", ",", "chord_length", "=", "1e-4", ")", ":", "def", "area_without_arcs", "(", "path", ")", ":", "area_enclosed", "=", "0", "for", "seg", "in", "path", ":", "x", "=", "real", "(", "seg", ".", "poly", "(", ")", ")", "dy", ...
Find area enclosed by path. Approximates any Arc segments in the Path with lines approximately `chord_length` long, and returns the area enclosed by the approximated Path. Default chord length is 0.01. If Arc segments are included in path, to ensure accurate results, make sure this `chord_length` is set to a reasonable value (e.g. by checking curvature). Notes ----- * Negative area results from clockwise (as opposed to counter-clockwise) parameterization of the input Path. To Contributors --------------- This is one of many parts of `svgpathtools` that could be improved by a noble soul implementing a piecewise-linear approximation scheme for paths (one with controls to guarantee a desired accuracy).
[ "Find", "area", "enclosed", "by", "path", ".", "Approximates", "any", "Arc", "segments", "in", "the", "Path", "with", "lines", "approximately", "chord_length", "long", "and", "returns", "the", "area", "enclosed", "by", "the", "approximated", "Path", ".", "Defa...
python
train
billyoverton/tweetqueue
tweetqueue/TweetList.py
https://github.com/billyoverton/tweetqueue/blob/e54972a0137ea2a21b2357b81408d9d4c92fdd61/tweetqueue/TweetList.py#L110-L121
def peek(self): """Peeks at the first of the list without removing it.""" c = self.connection.cursor() first_tweet_id = c.execute("SELECT tweet from tweetlist where label='first_tweet'").next()[0] if first_tweet_id is None: # No tweets are in the list, so return None return None tweet = c.execute("SELECT message from tweets WHERE id=?", (first_tweet_id,)).next()[0] c.close() return tweet
[ "def", "peek", "(", "self", ")", ":", "c", "=", "self", ".", "connection", ".", "cursor", "(", ")", "first_tweet_id", "=", "c", ".", "execute", "(", "\"SELECT tweet from tweetlist where label='first_tweet'\"", ")", ".", "next", "(", ")", "[", "0", "]", "if...
Peeks at the first of the list without removing it.
[ "Peeks", "at", "the", "first", "of", "the", "list", "without", "removing", "it", "." ]
python
train
AtteqCom/zsl
src/zsl/utils/xml_to_json.py
https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/utils/xml_to_json.py#L145-L171
def _parse_list(element, definition): """Parse xml element by definition given by list. Find all elements matched by the string given as the first value in the list (as XPath or @attribute). If there is a second argument it will be handled as a definitions for the elements matched or the text when not. :param element: ElementTree element :param definition: definition schema :type definition: list :return: parsed xml :rtype: list """ if len(definition) == 0: raise XmlToJsonException('List definition needs some definition') tag = definition[0] tag_def = definition[1] if len(definition) > 1 else None sub_list = [] for el in element.findall(tag): sub_list.append(xml_to_json(el, tag_def)) return sub_list
[ "def", "_parse_list", "(", "element", ",", "definition", ")", ":", "if", "len", "(", "definition", ")", "==", "0", ":", "raise", "XmlToJsonException", "(", "'List definition needs some definition'", ")", "tag", "=", "definition", "[", "0", "]", "tag_def", "=",...
Parse xml element by definition given by list. Find all elements matched by the string given as the first value in the list (as XPath or @attribute). If there is a second argument it will be handled as a definitions for the elements matched or the text when not. :param element: ElementTree element :param definition: definition schema :type definition: list :return: parsed xml :rtype: list
[ "Parse", "xml", "element", "by", "definition", "given", "by", "list", "." ]
python
train
ewels/MultiQC
multiqc/modules/bismark/bismark.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/bismark/bismark.py#L186-L232
def parse_bismark_mbias(self, f): """ Parse the Bismark M-Bias plot data """ s = f['s_name'] self.bismark_mbias_data['meth']['CpG_R1'][s] = {} self.bismark_mbias_data['meth']['CHG_R1'][s] = {} self.bismark_mbias_data['meth']['CHH_R1'][s] = {} self.bismark_mbias_data['cov']['CpG_R1'][s] = {} self.bismark_mbias_data['cov']['CHG_R1'][s] = {} self.bismark_mbias_data['cov']['CHH_R1'][s] = {} self.bismark_mbias_data['meth']['CpG_R2'][s] = {} self.bismark_mbias_data['meth']['CHG_R2'][s] = {} self.bismark_mbias_data['meth']['CHH_R2'][s] = {} self.bismark_mbias_data['cov']['CpG_R2'][s] = {} self.bismark_mbias_data['cov']['CHG_R2'][s] = {} self.bismark_mbias_data['cov']['CHH_R2'][s] = {} key = None for l in f['f']: if 'context' in l: if 'CpG' in l: key = 'CpG' elif 'CHG' in l: key = 'CHG' elif 'CHH' in l: key = 'CHH' if '(R1)' in l: key += '_R1' elif '(R2)' in l: key += '_R2' else: key += '_R1' if key is not None: sections = l.split() try: pos = int(sections[0]) self.bismark_mbias_data['meth'][key][s][pos] = float(sections[3]) self.bismark_mbias_data['cov'][key][s][pos] = int(sections[4]) except (IndexError, ValueError): continue # Remove empty dicts (eg. R2 for SE data) for t in self.bismark_mbias_data: for k in self.bismark_mbias_data[t]: self.bismark_mbias_data[t][k] = { s_name: self.bismark_mbias_data[t][k][s_name] for s_name in self.bismark_mbias_data[t][k] if len(self.bismark_mbias_data[t][k][s_name]) > 0 }
[ "def", "parse_bismark_mbias", "(", "self", ",", "f", ")", ":", "s", "=", "f", "[", "'s_name'", "]", "self", ".", "bismark_mbias_data", "[", "'meth'", "]", "[", "'CpG_R1'", "]", "[", "s", "]", "=", "{", "}", "self", ".", "bismark_mbias_data", "[", "'m...
Parse the Bismark M-Bias plot data
[ "Parse", "the", "Bismark", "M", "-", "Bias", "plot", "data" ]
python
train
matthewdeanmartin/jiggle_version
jiggle_version/commands.py
https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/jiggle_version/commands.py#L50-L71
def find_version(project, source, force_init): # type: (str, str, bool) ->None """ Entry point to just find a version and print next :return: """ # quiet! no noise file_opener = FileOpener() finder = FindVersion(project, source, file_opener, force_init=force_init) if finder.PROJECT is None: raise TypeError("Next step will fail without project name") if not finder.validate_current_versions(): # This is a failure. logger.debug(unicode(finder.all_current_versions())) logger.error("Versions not in sync, won't continue") die(-1, "Versions not in sync, won't continue") version = finder.find_any_valid_version() if version: print(finder.version_to_write(unicode(version))) else: logger.error("Failed to find version") die(-1, "Failed to find version")
[ "def", "find_version", "(", "project", ",", "source", ",", "force_init", ")", ":", "# type: (str, str, bool) ->None", "# quiet! no noise", "file_opener", "=", "FileOpener", "(", ")", "finder", "=", "FindVersion", "(", "project", ",", "source", ",", "file_opener", ...
Entry point to just find a version and print next :return:
[ "Entry", "point", "to", "just", "find", "a", "version", "and", "print", "next", ":", "return", ":" ]
python
train
n1analytics/python-paillier
phe/command_line.py
https://github.com/n1analytics/python-paillier/blob/955f8c0bfa9623be15b75462b121d28acf70f04b/phe/command_line.py#L72-L87
def extract(input, output): """Extract public key from private key. Given INPUT a private paillier key file as generated by generate, extract the public key portion to OUTPUT. Use "-" to output to stdout. """ log("Loading paillier keypair") priv = json.load(input) error_msg = "Invalid private key" assert 'pub' in priv, error_msg assert priv['kty'] == 'DAJ', error_msg json.dump(priv['pub'], output) output.write('\n') log("Public key written to {}".format(output.name))
[ "def", "extract", "(", "input", ",", "output", ")", ":", "log", "(", "\"Loading paillier keypair\"", ")", "priv", "=", "json", ".", "load", "(", "input", ")", "error_msg", "=", "\"Invalid private key\"", "assert", "'pub'", "in", "priv", ",", "error_msg", "as...
Extract public key from private key. Given INPUT a private paillier key file as generated by generate, extract the public key portion to OUTPUT. Use "-" to output to stdout.
[ "Extract", "public", "key", "from", "private", "key", "." ]
python
train
albert12132/templar
templar/api/rules/table_of_contents.py
https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/api/rules/table_of_contents.py#L57-L85
def _build_list_items(self, matches): """Returns the HTML list items for the next matches that have a larger (or equal) header compared to the first header's level. This method mutatively removes elements from the front of matches as it processes each element. This method assumes matches contains at least one match. PARAMETERS: matches -- list of tuples; each tuple corresponds to the groups matched by the header_regex. RETURNS: list of str; the table of contents as a list of lines. """ assert len(matches) > 0, "Should be at least one match, by assumption" lines = [] current_level = matches[0][0] while matches and current_level <= matches[0][0]: level, _, tag_id, title = matches[0] if current_level < level: lines.extend(self._build_list(matches, level)) continue if tag_id: lines.append('<li><a href="#{0}">{1}</a></li>'.format(tag_id, title)) else: lines.append('<li>{0}</li>'.format(title)) matches.pop(0) return lines
[ "def", "_build_list_items", "(", "self", ",", "matches", ")", ":", "assert", "len", "(", "matches", ")", ">", "0", ",", "\"Should be at least one match, by assumption\"", "lines", "=", "[", "]", "current_level", "=", "matches", "[", "0", "]", "[", "0", "]", ...
Returns the HTML list items for the next matches that have a larger (or equal) header compared to the first header's level. This method mutatively removes elements from the front of matches as it processes each element. This method assumes matches contains at least one match. PARAMETERS: matches -- list of tuples; each tuple corresponds to the groups matched by the header_regex. RETURNS: list of str; the table of contents as a list of lines.
[ "Returns", "the", "HTML", "list", "items", "for", "the", "next", "matches", "that", "have", "a", "larger", "(", "or", "equal", ")", "header", "compared", "to", "the", "first", "header", "s", "level", "." ]
python
train
coin-or/GiMPy
src/gimpy/graph.py
https://github.com/coin-or/GiMPy/blob/51853122a50eb6019d06bbdedbfc396a833b5a22/src/gimpy/graph.py#L2046-L2067
def floyd_warshall_get_path(self, distance, nextn, i, j): ''' API: floyd_warshall_get_path(self, distance, nextn, i, j): Description: Finds shortest path between i and j using distance and nextn dictionaries. Pre: (1) distance and nextn are outputs of floyd_warshall method. (2) The graph does not have a negative cycle, , ie. distance[(i,i)] >=0 for all node i. Return: Returns the list of nodes on the path from i to j, ie. [i,...,j] ''' if distance[(i,j)]=='infinity': return None k = nextn[(i,j)] path = self.floyd_warshall_get_path if i==k: return [i, j] else: return path(distance, nextn, i,k) + [k] + path(distance, nextn, k,j)
[ "def", "floyd_warshall_get_path", "(", "self", ",", "distance", ",", "nextn", ",", "i", ",", "j", ")", ":", "if", "distance", "[", "(", "i", ",", "j", ")", "]", "==", "'infinity'", ":", "return", "None", "k", "=", "nextn", "[", "(", "i", ",", "j"...
API: floyd_warshall_get_path(self, distance, nextn, i, j): Description: Finds shortest path between i and j using distance and nextn dictionaries. Pre: (1) distance and nextn are outputs of floyd_warshall method. (2) The graph does not have a negative cycle, , ie. distance[(i,i)] >=0 for all node i. Return: Returns the list of nodes on the path from i to j, ie. [i,...,j]
[ "API", ":", "floyd_warshall_get_path", "(", "self", "distance", "nextn", "i", "j", ")", ":", "Description", ":", "Finds", "shortest", "path", "between", "i", "and", "j", "using", "distance", "and", "nextn", "dictionaries", ".", "Pre", ":", "(", "1", ")", ...
python
train
marshmallow-code/marshmallow
src/marshmallow/utils.py
https://github.com/marshmallow-code/marshmallow/blob/a6b6c4151f1fbf16f3774d4052ca2bddf6903750/src/marshmallow/utils.py#L167-L178
def rfcformat(dt, localtime=False): """Return the RFC822-formatted representation of a datetime object. :param datetime dt: The datetime. :param bool localtime: If ``True``, return the date relative to the local timezone instead of UTC, displaying the proper offset, e.g. "Sun, 10 Nov 2013 08:23:45 -0600" """ if not localtime: return formatdate(timegm(dt.utctimetuple())) else: return local_rfcformat(dt)
[ "def", "rfcformat", "(", "dt", ",", "localtime", "=", "False", ")", ":", "if", "not", "localtime", ":", "return", "formatdate", "(", "timegm", "(", "dt", ".", "utctimetuple", "(", ")", ")", ")", "else", ":", "return", "local_rfcformat", "(", "dt", ")" ...
Return the RFC822-formatted representation of a datetime object. :param datetime dt: The datetime. :param bool localtime: If ``True``, return the date relative to the local timezone instead of UTC, displaying the proper offset, e.g. "Sun, 10 Nov 2013 08:23:45 -0600"
[ "Return", "the", "RFC822", "-", "formatted", "representation", "of", "a", "datetime", "object", "." ]
python
train
Maplecroft/Winston
winston/utils.py
https://github.com/Maplecroft/Winston/blob/d70394c60d5b56d8b374b4db2240394dfd45cfa8/winston/utils.py#L33-L47
def raster_to_shape(raster): """Take a raster and return a polygon representing the outer edge.""" left = raster.bounds.left right = raster.bounds.right top = raster.bounds.top bottom = raster.bounds.bottom top_left = (left, top) top_right = (right, top) bottom_left = (left, bottom) bottom_right = (right, bottom) return Polygon(( top_left, top_right, bottom_right, bottom_left, top_left, ))
[ "def", "raster_to_shape", "(", "raster", ")", ":", "left", "=", "raster", ".", "bounds", ".", "left", "right", "=", "raster", ".", "bounds", ".", "right", "top", "=", "raster", ".", "bounds", ".", "top", "bottom", "=", "raster", ".", "bounds", ".", "...
Take a raster and return a polygon representing the outer edge.
[ "Take", "a", "raster", "and", "return", "a", "polygon", "representing", "the", "outer", "edge", "." ]
python
train