nwo
stringlengths
5
91
sha
stringlengths
40
40
path
stringlengths
5
174
language
stringclasses
1 value
identifier
stringlengths
1
120
parameters
stringlengths
0
3.15k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
24.1k
docstring
stringlengths
0
27.3k
docstring_summary
stringlengths
0
13.8k
docstring_tokens
sequence
function
stringlengths
22
139k
function_tokens
sequence
url
stringlengths
87
283
keiffster/program-y
8c99b56f8c32f01a7b9887b5daae9465619d0385
src/utils/botflow/src/botflow.py
python
Step.__str__
(self)
return "[%s] - %s -> %s" % (self._step, self._prompt, next_steps)
[]
def __str__(self): if self._conditions: next_steps = ", ".join("%s %s"(x._next_step, x.condition) for x in self._conditions) else: next_steps = "EXECUTE" return "[%s] - %s -> %s" % (self._step, self._prompt, next_steps)
[ "def", "__str__", "(", "self", ")", ":", "if", "self", ".", "_conditions", ":", "next_steps", "=", "\", \"", ".", "join", "(", "\"%s %s\"", "(", "x", ".", "_next_step", ",", "x", ".", "condition", ")", "for", "x", "in", "self", ".", "_conditions", ")", "else", ":", "next_steps", "=", "\"EXECUTE\"", "return", "\"[%s] - %s -> %s\"", "%", "(", "self", ".", "_step", ",", "self", ".", "_prompt", ",", "next_steps", ")" ]
https://github.com/keiffster/program-y/blob/8c99b56f8c32f01a7b9887b5daae9465619d0385/src/utils/botflow/src/botflow.py#L181-L186
exodrifter/unity-python
bef6e4e9ddfbbf1eaf7acbbb973e9aa3dd64a20d
Lib/distutils/sysconfig.py
python
get_config_var
(name)
return get_config_vars().get(name)
Return the value of a single variable using the dictionary returned by 'get_config_vars()'. Equivalent to get_config_vars().get(name)
Return the value of a single variable using the dictionary returned by 'get_config_vars()'. Equivalent to get_config_vars().get(name)
[ "Return", "the", "value", "of", "a", "single", "variable", "using", "the", "dictionary", "returned", "by", "get_config_vars", "()", ".", "Equivalent", "to", "get_config_vars", "()", ".", "get", "(", "name", ")" ]
def get_config_var(name): """Return the value of a single variable using the dictionary returned by 'get_config_vars()'. Equivalent to get_config_vars().get(name) """ return get_config_vars().get(name)
[ "def", "get_config_var", "(", "name", ")", ":", "return", "get_config_vars", "(", ")", ".", "get", "(", "name", ")" ]
https://github.com/exodrifter/unity-python/blob/bef6e4e9ddfbbf1eaf7acbbb973e9aa3dd64a20d/Lib/distutils/sysconfig.py#L478-L483
Komodo/KomodoEdit
61edab75dce2bdb03943b387b0608ea36f548e8e
src/lint/koLintService.py
python
RequestQueue.remove_uid
(self, uid)
Remove all current requests with the given uid. Does not return anything.
Remove all current requests with the given uid.
[ "Remove", "all", "current", "requests", "with", "the", "given", "uid", "." ]
def remove_uid(self, uid): """Remove all current requests with the given uid. Does not return anything. """ log.debug("in RequestQueue.remove_uid, acquiring esema") if not self.esema.acquire(0): # do not block to acquire lock # return if could not acquire: means queue is empty and # therefore do not have any items to remove log.debug("in RequestQueue.remove_uid, did not acquire esema") return log.debug("in RequestQueue.remove_uid, acquired mutex") log.debug("in RequestQueue.remove_uid, acquiring mutex") self.mutex.acquire() release_esema = 1 try: self._remove_uid(uid) # Failure means empty state also unchanged - release_esema # remains true. release_esema = not self._empty() finally: if release_esema: log.debug("in RequestQueue.remove_uid, releasing esema") self.esema.release() log.debug("in RequestQueue.remove_uid, releasing mutex") self.mutex.release()
[ "def", "remove_uid", "(", "self", ",", "uid", ")", ":", "log", ".", "debug", "(", "\"in RequestQueue.remove_uid, acquiring esema\"", ")", "if", "not", "self", ".", "esema", ".", "acquire", "(", "0", ")", ":", "# do not block to acquire lock", "# return if could not acquire: means queue is empty and", "# therefore do not have any items to remove", "log", ".", "debug", "(", "\"in RequestQueue.remove_uid, did not acquire esema\"", ")", "return", "log", ".", "debug", "(", "\"in RequestQueue.remove_uid, acquired mutex\"", ")", "log", ".", "debug", "(", "\"in RequestQueue.remove_uid, acquiring mutex\"", ")", "self", ".", "mutex", ".", "acquire", "(", ")", "release_esema", "=", "1", "try", ":", "self", ".", "_remove_uid", "(", "uid", ")", "# Failure means empty state also unchanged - release_esema", "# remains true.", "release_esema", "=", "not", "self", ".", "_empty", "(", ")", "finally", ":", "if", "release_esema", ":", "log", ".", "debug", "(", "\"in RequestQueue.remove_uid, releasing esema\"", ")", "self", ".", "esema", ".", "release", "(", ")", "log", ".", "debug", "(", "\"in RequestQueue.remove_uid, releasing mutex\"", ")", "self", ".", "mutex", ".", "release", "(", ")" ]
https://github.com/Komodo/KomodoEdit/blob/61edab75dce2bdb03943b387b0608ea36f548e8e/src/lint/koLintService.py#L130-L155
bruderstein/PythonScript
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
PythonLib/min/bisect.py
python
insort_right
(a, x, lo=0, hi=None, *, key=None)
Insert item x in list a, and keep it sorted assuming a is sorted. If x is already in a, insert it to the right of the rightmost x. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched.
Insert item x in list a, and keep it sorted assuming a is sorted.
[ "Insert", "item", "x", "in", "list", "a", "and", "keep", "it", "sorted", "assuming", "a", "is", "sorted", "." ]
def insort_right(a, x, lo=0, hi=None, *, key=None): """Insert item x in list a, and keep it sorted assuming a is sorted. If x is already in a, insert it to the right of the rightmost x. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. """ if key is None: lo = bisect_right(a, x, lo, hi) else: lo = bisect_right(a, key(x), lo, hi, key=key) a.insert(lo, x)
[ "def", "insort_right", "(", "a", ",", "x", ",", "lo", "=", "0", ",", "hi", "=", "None", ",", "*", ",", "key", "=", "None", ")", ":", "if", "key", "is", "None", ":", "lo", "=", "bisect_right", "(", "a", ",", "x", ",", "lo", ",", "hi", ")", "else", ":", "lo", "=", "bisect_right", "(", "a", ",", "key", "(", "x", ")", ",", "lo", ",", "hi", ",", "key", "=", "key", ")", "a", ".", "insert", "(", "lo", ",", "x", ")" ]
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/min/bisect.py#L4-L16
mitmproxy/mitmproxy
1abb8f69217910c8623bd1339da2502aed98ff0d
mitmproxy/tools/console/consoleaddons.py
python
ConsoleAddon.console_command
(self, *command_str: str)
Prompt the user to edit a command with a (possibly empty) starting value.
Prompt the user to edit a command with a (possibly empty) starting value.
[ "Prompt", "the", "user", "to", "edit", "a", "command", "with", "a", "(", "possibly", "empty", ")", "starting", "value", "." ]
def console_command(self, *command_str: str) -> None: """ Prompt the user to edit a command with a (possibly empty) starting value. """ quoted = " ".join(command_lexer.quote(x) for x in command_str) if quoted: quoted += " " signals.status_prompt_command.send(partial=quoted)
[ "def", "console_command", "(", "self", ",", "*", "command_str", ":", "str", ")", "->", "None", ":", "quoted", "=", "\" \"", ".", "join", "(", "command_lexer", ".", "quote", "(", "x", ")", "for", "x", "in", "command_str", ")", "if", "quoted", ":", "quoted", "+=", "\" \"", "signals", ".", "status_prompt_command", ".", "send", "(", "partial", "=", "quoted", ")" ]
https://github.com/mitmproxy/mitmproxy/blob/1abb8f69217910c8623bd1339da2502aed98ff0d/mitmproxy/tools/console/consoleaddons.py#L262-L269
Nuitka/Nuitka
39262276993757fa4e299f497654065600453fc9
nuitka/plugins/standard/MatplotlibPlugin.py
python
NuitkaPluginMatplotlib.isRelevant
(cls)
return isStandaloneMode()
Check whether plugin might be required. Returns: True if this is a standalone compilation.
Check whether plugin might be required.
[ "Check", "whether", "plugin", "might", "be", "required", "." ]
def isRelevant(cls): """Check whether plugin might be required. Returns: True if this is a standalone compilation. """ return isStandaloneMode()
[ "def", "isRelevant", "(", "cls", ")", ":", "return", "isStandaloneMode", "(", ")" ]
https://github.com/Nuitka/Nuitka/blob/39262276993757fa4e299f497654065600453fc9/nuitka/plugins/standard/MatplotlibPlugin.py#L53-L59
chribsen/simple-machine-learning-examples
dc94e52a4cebdc8bb959ff88b81ff8cfeca25022
venv/lib/python2.7/site-packages/scipy/stats/stats.py
python
hmean
(a, axis=0, dtype=None)
Calculates the harmonic mean along the specified axis. That is: n / (1/x1 + 1/x2 + ... + 1/xn) Parameters ---------- a : array_like Input array, masked array or object that can be converted to an array. axis : int or None, optional Axis along which the harmonic mean is computed. Default is 0. If None, compute over the whole array `a`. dtype : dtype, optional Type of the returned array and of the accumulator in which the elements are summed. If `dtype` is not specified, it defaults to the dtype of `a`, unless `a` has an integer `dtype` with a precision less than that of the default platform integer. In that case, the default platform integer is used. Returns ------- hmean : ndarray see `dtype` parameter above See Also -------- numpy.mean : Arithmetic average numpy.average : Weighted average gmean : Geometric mean Notes ----- The harmonic mean is computed over a single dimension of the input array, axis=0 by default, or all values in the array if axis=None. float64 intermediate and return values are used for integer inputs. Use masked arrays to ignore any non-finite values in the input or that arise in the calculations such as Not a Number and infinity.
Calculates the harmonic mean along the specified axis.
[ "Calculates", "the", "harmonic", "mean", "along", "the", "specified", "axis", "." ]
def hmean(a, axis=0, dtype=None): """ Calculates the harmonic mean along the specified axis. That is: n / (1/x1 + 1/x2 + ... + 1/xn) Parameters ---------- a : array_like Input array, masked array or object that can be converted to an array. axis : int or None, optional Axis along which the harmonic mean is computed. Default is 0. If None, compute over the whole array `a`. dtype : dtype, optional Type of the returned array and of the accumulator in which the elements are summed. If `dtype` is not specified, it defaults to the dtype of `a`, unless `a` has an integer `dtype` with a precision less than that of the default platform integer. In that case, the default platform integer is used. Returns ------- hmean : ndarray see `dtype` parameter above See Also -------- numpy.mean : Arithmetic average numpy.average : Weighted average gmean : Geometric mean Notes ----- The harmonic mean is computed over a single dimension of the input array, axis=0 by default, or all values in the array if axis=None. float64 intermediate and return values are used for integer inputs. Use masked arrays to ignore any non-finite values in the input or that arise in the calculations such as Not a Number and infinity. """ if not isinstance(a, np.ndarray): a = np.array(a, dtype=dtype) if np.all(a > 0): # Harmonic mean only defined if greater than zero if isinstance(a, np.ma.MaskedArray): size = a.count(axis) else: if axis is None: a = a.ravel() size = a.shape[0] else: size = a.shape[axis] return size / np.sum(1.0/a, axis=axis, dtype=dtype) else: raise ValueError("Harmonic mean only defined if all elements greater than zero")
[ "def", "hmean", "(", "a", ",", "axis", "=", "0", ",", "dtype", "=", "None", ")", ":", "if", "not", "isinstance", "(", "a", ",", "np", ".", "ndarray", ")", ":", "a", "=", "np", ".", "array", "(", "a", ",", "dtype", "=", "dtype", ")", "if", "np", ".", "all", "(", "a", ">", "0", ")", ":", "# Harmonic mean only defined if greater than zero", "if", "isinstance", "(", "a", ",", "np", ".", "ma", ".", "MaskedArray", ")", ":", "size", "=", "a", ".", "count", "(", "axis", ")", "else", ":", "if", "axis", "is", "None", ":", "a", "=", "a", ".", "ravel", "(", ")", "size", "=", "a", ".", "shape", "[", "0", "]", "else", ":", "size", "=", "a", ".", "shape", "[", "axis", "]", "return", "size", "/", "np", ".", "sum", "(", "1.0", "/", "a", ",", "axis", "=", "axis", ",", "dtype", "=", "dtype", ")", "else", ":", "raise", "ValueError", "(", "\"Harmonic mean only defined if all elements greater than zero\"", ")" ]
https://github.com/chribsen/simple-machine-learning-examples/blob/dc94e52a4cebdc8bb959ff88b81ff8cfeca25022/venv/lib/python2.7/site-packages/scipy/stats/stats.py#L318-L372
freqtrade/freqtrade
13651fd3be8d5ce8dcd7c94b920bda4e00b75aca
scripts/rest_client.py
python
FtRestClient.locks
(self)
return self._get("locks")
Return current locks :return: json object
Return current locks
[ "Return", "current", "locks" ]
def locks(self): """Return current locks :return: json object """ return self._get("locks")
[ "def", "locks", "(", "self", ")", ":", "return", "self", ".", "_get", "(", "\"locks\"", ")" ]
https://github.com/freqtrade/freqtrade/blob/13651fd3be8d5ce8dcd7c94b920bda4e00b75aca/scripts/rest_client.py#L114-L119
localstack/localstack
ec8b72d5c926ae8495ca50ce168494247aef54be
localstack/services/cloudformation/service_models.py
python
GenericBaseModel.props
(self)
return result
Return a copy of (1) the resource properties (from the template), combined with (2) the current deployment state properties of the resource.
Return a copy of (1) the resource properties (from the template), combined with (2) the current deployment state properties of the resource.
[ "Return", "a", "copy", "of", "(", "1", ")", "the", "resource", "properties", "(", "from", "the", "template", ")", "combined", "with", "(", "2", ")", "the", "current", "deployment", "state", "properties", "of", "the", "resource", "." ]
def props(self): """Return a copy of (1) the resource properties (from the template), combined with (2) the current deployment state properties of the resource.""" result = dict(self.properties) result.update(self.state or {}) return result
[ "def", "props", "(", "self", ")", ":", "result", "=", "dict", "(", "self", ".", "properties", ")", "result", ".", "update", "(", "self", ".", "state", "or", "{", "}", ")", "return", "result" ]
https://github.com/localstack/localstack/blob/ec8b72d5c926ae8495ca50ce168494247aef54be/localstack/services/cloudformation/service_models.py#L154-L159
ddbourgin/numpy-ml
b0359af5285fbf9699d64fd5ec059493228af03e
numpy_ml/neural_nets/models/w2v.py
python
Word2Vec.backward
(self)
Compute the gradient of the loss wrt the current network parameters.
Compute the gradient of the loss wrt the current network parameters.
[ "Compute", "the", "gradient", "of", "the", "loss", "wrt", "the", "current", "network", "parameters", "." ]
def backward(self): """ Compute the gradient of the loss wrt the current network parameters. """ dX_emb = self.loss.grad(retain_grads=True, update_params=False) self.embeddings.backward(dX_emb)
[ "def", "backward", "(", "self", ")", ":", "dX_emb", "=", "self", ".", "loss", ".", "grad", "(", "retain_grads", "=", "True", ",", "update_params", "=", "False", ")", "self", ".", "embeddings", ".", "backward", "(", "dX_emb", ")" ]
https://github.com/ddbourgin/numpy-ml/blob/b0359af5285fbf9699d64fd5ec059493228af03e/numpy_ml/neural_nets/models/w2v.py#L235-L240
barseghyanartur/django-fobi
a998feae007d7fe3637429a80e42952ec7cda79f
src/fobi/wizard/views/dynamic.py
python
StepsHelper.is_last_step
(self)
return self.index1 == self.count
Check if last step.
Check if last step.
[ "Check", "if", "last", "step", "." ]
def is_last_step(self): """Check if last step.""" return self.index1 == self.count
[ "def", "is_last_step", "(", "self", ")", ":", "return", "self", ".", "index1", "==", "self", ".", "count" ]
https://github.com/barseghyanartur/django-fobi/blob/a998feae007d7fe3637429a80e42952ec7cda79f/src/fobi/wizard/views/dynamic.py#L123-L125
timkpaine/paperboy
6c0854b2c0dad139c25153e520ca79ffed820fa4
paperboy/resources/autocomplete.py
python
AutocompleteResource.__init__
(self, *args, **kwargs)
[]
def __init__(self, *args, **kwargs): super(AutocompleteResource, self).__init__(*args, **kwargs)
[ "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", "AutocompleteResource", ",", "self", ")", ".", "__init__", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/timkpaine/paperboy/blob/6c0854b2c0dad139c25153e520ca79ffed820fa4/paperboy/resources/autocomplete.py#L8-L9
Neoteroi/BlackSheep
2936cdd3ba6fceacd230a02c99241bde1d06b265
blacksheep/server/cors.py
python
CORSStrategy.__call__
(self, policy: str)
return decorator
Decorates a request handler to bind it to a specific policy by name.
Decorates a request handler to bind it to a specific policy by name.
[ "Decorates", "a", "request", "handler", "to", "bind", "it", "to", "a", "specific", "policy", "by", "name", "." ]
def __call__(self, policy: str): """Decorates a request handler to bind it to a specific policy by name.""" def decorator(fn): is_match = False policy_object = self.policies.get(policy) if not policy_object: raise CORSPolicyNotConfiguredError(policy) for route in self.router: if route.handler is fn: self._policies_by_route[route] = policy_object is_match = True if not is_match: raise NotRequestHandlerError() return fn return decorator
[ "def", "__call__", "(", "self", ",", "policy", ":", "str", ")", ":", "def", "decorator", "(", "fn", ")", ":", "is_match", "=", "False", "policy_object", "=", "self", ".", "policies", ".", "get", "(", "policy", ")", "if", "not", "policy_object", ":", "raise", "CORSPolicyNotConfiguredError", "(", "policy", ")", "for", "route", "in", "self", ".", "router", ":", "if", "route", ".", "handler", "is", "fn", ":", "self", ".", "_policies_by_route", "[", "route", "]", "=", "policy_object", "is_match", "=", "True", "if", "not", "is_match", ":", "raise", "NotRequestHandlerError", "(", ")", "return", "fn", "return", "decorator" ]
https://github.com/Neoteroi/BlackSheep/blob/2936cdd3ba6fceacd230a02c99241bde1d06b265/blacksheep/server/cors.py#L183-L202
dropbox/changes
37e23c3141b75e4785cf398d015e3dbca41bdd56
changes/backends/jenkins/builder.py
python
JenkinsBuilder._pick_master
(self, job_name, is_diff=False)
return best
Identify a master to run the given job on. The master with the lowest queue for the given job is chosen. By random sorting the first empty queue will be prioritized.
Identify a master to run the given job on.
[ "Identify", "a", "master", "to", "run", "the", "given", "job", "on", "." ]
def _pick_master(self, job_name, is_diff=False): """ Identify a master to run the given job on. The master with the lowest queue for the given job is chosen. By random sorting the first empty queue will be prioritized. """ candidate_urls = self.master_urls if is_diff and self.diff_urls: candidate_urls = self.diff_urls blacklist = redis.smembers(MASTER_BLACKLIST_KEY) master_urls = [c for c in candidate_urls if c not in blacklist] if len(master_urls) == 0: raise ValueError("No masters to pick from.") if len(master_urls) == 1: return master_urls[0] random.shuffle(master_urls) best_match = (sys.maxint, None) for url in master_urls: try: queued_jobs = self._count_queued_jobs(url, job_name) except: self.logger.exception("Couldn't count queued jobs on master %s", url) continue if queued_jobs == 0: return url if best_match[0] > queued_jobs: best_match = (queued_jobs, url) best = best_match[1] if not best: raise Exception("Unable to successfully pick a master from {}.".format(master_urls)) return best
[ "def", "_pick_master", "(", "self", ",", "job_name", ",", "is_diff", "=", "False", ")", ":", "candidate_urls", "=", "self", ".", "master_urls", "if", "is_diff", "and", "self", ".", "diff_urls", ":", "candidate_urls", "=", "self", ".", "diff_urls", "blacklist", "=", "redis", ".", "smembers", "(", "MASTER_BLACKLIST_KEY", ")", "master_urls", "=", "[", "c", "for", "c", "in", "candidate_urls", "if", "c", "not", "in", "blacklist", "]", "if", "len", "(", "master_urls", ")", "==", "0", ":", "raise", "ValueError", "(", "\"No masters to pick from.\"", ")", "if", "len", "(", "master_urls", ")", "==", "1", ":", "return", "master_urls", "[", "0", "]", "random", ".", "shuffle", "(", "master_urls", ")", "best_match", "=", "(", "sys", ".", "maxint", ",", "None", ")", "for", "url", "in", "master_urls", ":", "try", ":", "queued_jobs", "=", "self", ".", "_count_queued_jobs", "(", "url", ",", "job_name", ")", "except", ":", "self", ".", "logger", ".", "exception", "(", "\"Couldn't count queued jobs on master %s\"", ",", "url", ")", "continue", "if", "queued_jobs", "==", "0", ":", "return", "url", "if", "best_match", "[", "0", "]", ">", "queued_jobs", ":", "best_match", "=", "(", "queued_jobs", ",", "url", ")", "best", "=", "best_match", "[", "1", "]", "if", "not", "best", ":", "raise", "Exception", "(", "\"Unable to successfully pick a master from {}.\"", ".", "format", "(", "master_urls", ")", ")", "return", "best" ]
https://github.com/dropbox/changes/blob/37e23c3141b75e4785cf398d015e3dbca41bdd56/changes/backends/jenkins/builder.py#L344-L383
nopernik/mpDNS
b17dc39e7068406df82cb3431b3042e74e520cf9
dnslib/fixedresolver.py
python
FixedResolver.__init__
(self,zone)
[]
def __init__(self,zone): # Parse RRs self.rrs = RR.fromZone(zone)
[ "def", "__init__", "(", "self", ",", "zone", ")", ":", "# Parse RRs", "self", ".", "rrs", "=", "RR", ".", "fromZone", "(", "zone", ")" ]
https://github.com/nopernik/mpDNS/blob/b17dc39e7068406df82cb3431b3042e74e520cf9/dnslib/fixedresolver.py#L19-L21
polakowo/vectorbt
6638735c131655760474d72b9f045d1dbdbd8fe9
vectorbt/signals/nb.py
python
generate_rand_enex_by_prob_nb
(shape: tp.Shape, entry_prob: tp.MaybeArray[float], exit_prob: tp.MaybeArray[float], entry_wait: int, exit_wait: int, entry_pick_first: bool, exit_pick_first: bool, flex_2d: bool, seed: tp.Optional[int] = None)
return generate_enex_nb( shape, entry_wait, exit_wait, entry_pick_first, exit_pick_first, rand_by_prob_choice_nb, (entry_prob, entry_pick_first, temp_idx_arr, flex_2d), rand_by_prob_choice_nb, (exit_prob, exit_pick_first, temp_idx_arr, flex_2d) )
Pick entries by probability `entry_prob` and exits by probability `exit_prob` one after another. `entry_prob` and `exit_prob` should be 2-dim arrays of shape `shape`. Specify `seed` to make output deterministic.
Pick entries by probability `entry_prob` and exits by probability `exit_prob` one after another.
[ "Pick", "entries", "by", "probability", "entry_prob", "and", "exits", "by", "probability", "exit_prob", "one", "after", "another", "." ]
def generate_rand_enex_by_prob_nb(shape: tp.Shape, entry_prob: tp.MaybeArray[float], exit_prob: tp.MaybeArray[float], entry_wait: int, exit_wait: int, entry_pick_first: bool, exit_pick_first: bool, flex_2d: bool, seed: tp.Optional[int] = None) -> tp.Tuple[tp.Array2d, tp.Array2d]: """Pick entries by probability `entry_prob` and exits by probability `exit_prob` one after another. `entry_prob` and `exit_prob` should be 2-dim arrays of shape `shape`. Specify `seed` to make output deterministic.""" if seed is not None: np.random.seed(seed) temp_idx_arr = np.empty((shape[0],), dtype=np.int_) return generate_enex_nb( shape, entry_wait, exit_wait, entry_pick_first, exit_pick_first, rand_by_prob_choice_nb, (entry_prob, entry_pick_first, temp_idx_arr, flex_2d), rand_by_prob_choice_nb, (exit_prob, exit_pick_first, temp_idx_arr, flex_2d) )
[ "def", "generate_rand_enex_by_prob_nb", "(", "shape", ":", "tp", ".", "Shape", ",", "entry_prob", ":", "tp", ".", "MaybeArray", "[", "float", "]", ",", "exit_prob", ":", "tp", ".", "MaybeArray", "[", "float", "]", ",", "entry_wait", ":", "int", ",", "exit_wait", ":", "int", ",", "entry_pick_first", ":", "bool", ",", "exit_pick_first", ":", "bool", ",", "flex_2d", ":", "bool", ",", "seed", ":", "tp", ".", "Optional", "[", "int", "]", "=", "None", ")", "->", "tp", ".", "Tuple", "[", "tp", ".", "Array2d", ",", "tp", ".", "Array2d", "]", ":", "if", "seed", "is", "not", "None", ":", "np", ".", "random", ".", "seed", "(", "seed", ")", "temp_idx_arr", "=", "np", ".", "empty", "(", "(", "shape", "[", "0", "]", ",", ")", ",", "dtype", "=", "np", ".", "int_", ")", "return", "generate_enex_nb", "(", "shape", ",", "entry_wait", ",", "exit_wait", ",", "entry_pick_first", ",", "exit_pick_first", ",", "rand_by_prob_choice_nb", ",", "(", "entry_prob", ",", "entry_pick_first", ",", "temp_idx_arr", ",", "flex_2d", ")", ",", "rand_by_prob_choice_nb", ",", "(", "exit_prob", ",", "exit_pick_first", ",", "temp_idx_arr", ",", "flex_2d", ")", ")" ]
https://github.com/polakowo/vectorbt/blob/6638735c131655760474d72b9f045d1dbdbd8fe9/vectorbt/signals/nb.py#L524-L548
kamalgill/flask-appengine-template
11760f83faccbb0d0afe416fc58e67ecfb4643c2
src/lib/flask/app.py
python
Flask.create_jinja_environment
(self)
return rv
Creates the Jinja2 environment based on :attr:`jinja_options` and :meth:`select_jinja_autoescape`. Since 0.7 this also adds the Jinja2 globals and filters after initialization. Override this function to customize the behavior. .. versionadded:: 0.5 .. versionchanged:: 0.11 ``Environment.auto_reload`` set in accordance with ``TEMPLATES_AUTO_RELOAD`` configuration option.
Creates the Jinja2 environment based on :attr:`jinja_options` and :meth:`select_jinja_autoescape`. Since 0.7 this also adds the Jinja2 globals and filters after initialization. Override this function to customize the behavior.
[ "Creates", "the", "Jinja2", "environment", "based", "on", ":", "attr", ":", "jinja_options", "and", ":", "meth", ":", "select_jinja_autoescape", ".", "Since", "0", ".", "7", "this", "also", "adds", "the", "Jinja2", "globals", "and", "filters", "after", "initialization", ".", "Override", "this", "function", "to", "customize", "the", "behavior", "." ]
def create_jinja_environment(self): """Creates the Jinja2 environment based on :attr:`jinja_options` and :meth:`select_jinja_autoescape`. Since 0.7 this also adds the Jinja2 globals and filters after initialization. Override this function to customize the behavior. .. versionadded:: 0.5 .. versionchanged:: 0.11 ``Environment.auto_reload`` set in accordance with ``TEMPLATES_AUTO_RELOAD`` configuration option. """ options = dict(self.jinja_options) if 'autoescape' not in options: options['autoescape'] = self.select_jinja_autoescape if 'auto_reload' not in options: if self.config['TEMPLATES_AUTO_RELOAD'] is not None: options['auto_reload'] = self.config['TEMPLATES_AUTO_RELOAD'] else: options['auto_reload'] = self.debug rv = self.jinja_environment(self, **options) rv.globals.update( url_for=url_for, get_flashed_messages=get_flashed_messages, config=self.config, # request, session and g are normally added with the # context processor for efficiency reasons but for imported # templates we also want the proxies in there. request=request, session=session, g=g ) rv.filters['tojson'] = json.tojson_filter return rv
[ "def", "create_jinja_environment", "(", "self", ")", ":", "options", "=", "dict", "(", "self", ".", "jinja_options", ")", "if", "'autoescape'", "not", "in", "options", ":", "options", "[", "'autoescape'", "]", "=", "self", ".", "select_jinja_autoescape", "if", "'auto_reload'", "not", "in", "options", ":", "if", "self", ".", "config", "[", "'TEMPLATES_AUTO_RELOAD'", "]", "is", "not", "None", ":", "options", "[", "'auto_reload'", "]", "=", "self", ".", "config", "[", "'TEMPLATES_AUTO_RELOAD'", "]", "else", ":", "options", "[", "'auto_reload'", "]", "=", "self", ".", "debug", "rv", "=", "self", ".", "jinja_environment", "(", "self", ",", "*", "*", "options", ")", "rv", ".", "globals", ".", "update", "(", "url_for", "=", "url_for", ",", "get_flashed_messages", "=", "get_flashed_messages", ",", "config", "=", "self", ".", "config", ",", "# request, session and g are normally added with the", "# context processor for efficiency reasons but for imported", "# templates we also want the proxies in there.", "request", "=", "request", ",", "session", "=", "session", ",", "g", "=", "g", ")", "rv", ".", "filters", "[", "'tojson'", "]", "=", "json", ".", "tojson_filter", "return", "rv" ]
https://github.com/kamalgill/flask-appengine-template/blob/11760f83faccbb0d0afe416fc58e67ecfb4643c2/src/lib/flask/app.py#L679-L711
schematics/schematics
3a144be0aa50f68a4da917e8d957b924dedf9a52
schematics/types/base.py
python
MultilingualStringType.to_primitive
(self, value, context=None)
return localized
Use a combination of ``default_locale`` and ``context.app_data['locale']`` to return the best localized string.
Use a combination of ``default_locale`` and ``context.app_data['locale']`` to return the best localized string.
[ "Use", "a", "combination", "of", "default_locale", "and", "context", ".", "app_data", "[", "locale", "]", "to", "return", "the", "best", "localized", "string", "." ]
def to_primitive(self, value, context=None): """ Use a combination of ``default_locale`` and ``context.app_data['locale']`` to return the best localized string. """ if value is None: return None context_locale = None if context and 'locale' in context.app_data: context_locale = context.app_data['locale'] # Build a list of all possible locales to try possible_locales = [] for locale in (context_locale, self.default_locale): if not locale: continue if isinstance(locale, string_type): possible_locales.append(locale) else: possible_locales.extend(locale) if not possible_locales: raise ConversionError(self.messages['no_locale']) for locale in possible_locales: if locale in value: localized = value[locale] break else: raise ConversionError(self.messages['locale_not_found']) if not isinstance(localized, str): if isinstance(localized, self.allow_casts): if isinstance(localized, bytes): localized = str(localized, 'utf-8') else: localized = str(localized) else: raise ConversionError(self.messages['convert']) return localized
[ "def", "to_primitive", "(", "self", ",", "value", ",", "context", "=", "None", ")", ":", "if", "value", "is", "None", ":", "return", "None", "context_locale", "=", "None", "if", "context", "and", "'locale'", "in", "context", ".", "app_data", ":", "context_locale", "=", "context", ".", "app_data", "[", "'locale'", "]", "# Build a list of all possible locales to try", "possible_locales", "=", "[", "]", "for", "locale", "in", "(", "context_locale", ",", "self", ".", "default_locale", ")", ":", "if", "not", "locale", ":", "continue", "if", "isinstance", "(", "locale", ",", "string_type", ")", ":", "possible_locales", ".", "append", "(", "locale", ")", "else", ":", "possible_locales", ".", "extend", "(", "locale", ")", "if", "not", "possible_locales", ":", "raise", "ConversionError", "(", "self", ".", "messages", "[", "'no_locale'", "]", ")", "for", "locale", "in", "possible_locales", ":", "if", "locale", "in", "value", ":", "localized", "=", "value", "[", "locale", "]", "break", "else", ":", "raise", "ConversionError", "(", "self", ".", "messages", "[", "'locale_not_found'", "]", ")", "if", "not", "isinstance", "(", "localized", ",", "str", ")", ":", "if", "isinstance", "(", "localized", ",", "self", ".", "allow_casts", ")", ":", "if", "isinstance", "(", "localized", ",", "bytes", ")", ":", "localized", "=", "str", "(", "localized", ",", "'utf-8'", ")", "else", ":", "localized", "=", "str", "(", "localized", ")", "else", ":", "raise", "ConversionError", "(", "self", ".", "messages", "[", "'convert'", "]", ")", "return", "localized" ]
https://github.com/schematics/schematics/blob/3a144be0aa50f68a4da917e8d957b924dedf9a52/schematics/types/base.py#L1132-L1175
IronLanguages/ironpython2
51fdedeeda15727717fb8268a805f71b06c0b9f1
Src/StdLib/Lib/site-packages/win32/Demos/security/security_enums.py
python
Enum.lookup_flags
(self, flags)
return flag_names, unknown_flags
Returns the names of all recognized flags in input, and any flags not found in the enum.
Returns the names of all recognized flags in input, and any flags not found in the enum.
[ "Returns", "the", "names", "of", "all", "recognized", "flags", "in", "input", "and", "any", "flags", "not", "found", "in", "the", "enum", "." ]
def lookup_flags(self, flags): """Returns the names of all recognized flags in input, and any flags not found in the enum.""" flag_names=[] unknown_flags=flags for k,v in self.__dict__.iteritems(): if flags & v == v: flag_names.append(k) unknown_flags = unknown_flags & ~v return flag_names, unknown_flags
[ "def", "lookup_flags", "(", "self", ",", "flags", ")", ":", "flag_names", "=", "[", "]", "unknown_flags", "=", "flags", "for", "k", ",", "v", "in", "self", ".", "__dict__", ".", "iteritems", "(", ")", ":", "if", "flags", "&", "v", "==", "v", ":", "flag_names", ".", "append", "(", "k", ")", "unknown_flags", "=", "unknown_flags", "&", "~", "v", "return", "flag_names", ",", "unknown_flags" ]
https://github.com/IronLanguages/ironpython2/blob/51fdedeeda15727717fb8268a805f71b06c0b9f1/Src/StdLib/Lib/site-packages/win32/Demos/security/security_enums.py#L27-L35
complexdb/zincbase
0c8ce46bc392dfa8ee99414877adb3b41648451e
zincbase/web/__init__.py
python
GraphCaster.render
(self, node_color=0x11bb88, node_size=10, node_opacity=0.9, node_label='id', node_visibility=True, edge_label='pred', edge_opacity=1, edge_color=0x333333, edge_size=0, edge_visibility=True, arrow_size=0, arrow_color=0x000001, label_node=False, label_node_color='black', label_node_height=3, label_node_offset=1, label_edge=False, label_edge_color='black', label_edge_height=3, label_edge_offset=1, bg_color=0xffffff, engine='d3')
Perform the initial setup/rendering of the current graph. :param node_color: Either a 24bit RGB int (such as 0xFF001A) or a string containing a Javascript function which takes `node` as an argument, for example `node => node.color` :param node_size: Either a number >= 0 or a string containing a Javascript function, for example `node => Math.log(node.enormity)` :param node_label: Either a string representing a property of the node to display (on hover) as its label, or a Javascript function returning a string. All nodes have a property called `id` which is their name/string repr. :param node_visibility: Either a string representing a property of the node which evaluates truthy/falsy (in Javascript) to determine whether to display the node, or a JS function that returns true or false, or True/False. :param label_node: If True, nodes will be labeled with `node_label`. Unlike `node_label`, which only displays on hover, this is a permanent text. Note that the value updates when the value of `node[node_label]` changes (in Python). :param label_node_color: RGB value for the color of a node's permanent label :param label_node_height: Text height for the node's permanent label :param label_node_offset: Integer specifying how far out from the node the label should appear. Default is 1 unit on the z-axis. :param edge_visibility: Either a string representing a property of the edge which evaluates truthy/falsy (in Javascript) to determine whether to display the edge, or a JS function that returns true or false, or True/False. :param edge_label: Either a string representing a property of an edge to display (on hover) as its label, or a Javascript function returning a string. Defaults to the predicate. :param float edge_opacity: Opacity of the edges, from 0-1 :param edge_color: Either a 24bit RGB int or a string containing a Javascript function which takes `edge` as an argument, for example `edge => edge.color`. :param edge_size: The width of an edge. Either a number >= 0 (where 0 means 1px) or a string containing a Javascript function. :param label_edge: If True, nodes will be labeled with `edge_label`. Unlike `edge_label`, which only displays on hover, this is a permanent text. Note that the value updates when the value of `edge[edge_label]` changes (in Python). :param label_edge_color: RGB value for the color of a edge's permanent label :param label_edge_height: Text height for the edge's permanent label :param label_edge_offset: Integer specifying how far out from the edge the label should appear. Default is 1 unit on the z-axis. :param int arrow_size: If >0, display directional arrows on edges of that size. :param int arrow_color: Color of arrows (if arrow_size > 0) :param int bg_color: Hex background color for the graph, e.g. 0xFF0000 is red. :param str engine: Specify d3 or ngraph. ngraph is faster but can be buggy, and is only really suitable for static graphs. The layouts can look different also.
Perform the initial setup/rendering of the current graph.
[ "Perform", "the", "initial", "setup", "/", "rendering", "of", "the", "current", "graph", "." ]
def render(self, node_color=0x11bb88, node_size=10, node_opacity=0.9, node_label='id', node_visibility=True, edge_label='pred', edge_opacity=1, edge_color=0x333333, edge_size=0, edge_visibility=True, arrow_size=0, arrow_color=0x000001, label_node=False, label_node_color='black', label_node_height=3, label_node_offset=1, label_edge=False, label_edge_color='black', label_edge_height=3, label_edge_offset=1, bg_color=0xffffff, engine='d3'): """Perform the initial setup/rendering of the current graph. :param node_color: Either a 24bit RGB int (such as 0xFF001A) or a string containing a Javascript function which takes `node` as an argument, for example `node => node.color` :param node_size: Either a number >= 0 or a string containing a Javascript function, for example `node => Math.log(node.enormity)` :param node_label: Either a string representing a property of the node to display (on hover) as its label, or a Javascript function returning a string. All nodes have a property called `id` which is their name/string repr. :param node_visibility: Either a string representing a property of the node which evaluates truthy/falsy (in Javascript) to determine whether to display the node, or a JS function that returns true or false, or True/False. :param label_node: If True, nodes will be labeled with `node_label`. Unlike `node_label`, which only displays on hover, this is a permanent text. Note that the value updates when the value of `node[node_label]` changes (in Python). :param label_node_color: RGB value for the color of a node's permanent label :param label_node_height: Text height for the node's permanent label :param label_node_offset: Integer specifying how far out from the node the label should appear. Default is 1 unit on the z-axis. :param edge_visibility: Either a string representing a property of the edge which evaluates truthy/falsy (in Javascript) to determine whether to display the edge, or a JS function that returns true or false, or True/False. :param edge_label: Either a string representing a property of an edge to display (on hover) as its label, or a Javascript function returning a string. Defaults to the predicate. :param float edge_opacity: Opacity of the edges, from 0-1 :param edge_color: Either a 24bit RGB int or a string containing a Javascript function which takes `edge` as an argument, for example `edge => edge.color`. :param edge_size: The width of an edge. Either a number >= 0 (where 0 means 1px) or a string containing a Javascript function. :param label_edge: If True, nodes will be labeled with `edge_label`. Unlike `edge_label`, which only displays on hover, this is a permanent text. Note that the value updates when the value of `edge[edge_label]` changes (in Python). :param label_edge_color: RGB value for the color of a edge's permanent label :param label_edge_height: Text height for the edge's permanent label :param label_edge_offset: Integer specifying how far out from the edge the label should appear. Default is 1 unit on the z-axis. :param int arrow_size: If >0, display directional arrows on edges of that size. :param int arrow_color: Color of arrows (if arrow_size > 0) :param int bg_color: Hex background color for the graph, e.g. 0xFF0000 is red. :param str engine: Specify d3 or ngraph. ngraph is faster but can be buggy, and is only really suitable for static graphs. The layouts can look different also. """ if label_node: label_node = { 'color': 'black', 'height': 3, 'offset': node_size + label_node_offset } if label_edge: label_edge = { 'color': 'black', 'height': 3, 'offset': edge_size + label_edge_offset } attributes = { 'node_color': node_color, 'node_size': node_size, 'node_opacity': node_opacity, 'node_label': node_label, 'node_visibility': node_visibility, 'edge_visibility': edge_visibility, 'edge_opacity': edge_opacity, 'edge_color': edge_color, 'edge_size': edge_size, 'edge_label': edge_label, 'arrow_size': arrow_size, 'arrow_color': arrow_color, 'label_node': label_node, 'label_edge': label_edge, 'engine': engine, 'bg_color': bg_color } self.socketio.emit('render', attributes, json=True)
[ "def", "render", "(", "self", ",", "node_color", "=", "0x11bb88", ",", "node_size", "=", "10", ",", "node_opacity", "=", "0.9", ",", "node_label", "=", "'id'", ",", "node_visibility", "=", "True", ",", "edge_label", "=", "'pred'", ",", "edge_opacity", "=", "1", ",", "edge_color", "=", "0x333333", ",", "edge_size", "=", "0", ",", "edge_visibility", "=", "True", ",", "arrow_size", "=", "0", ",", "arrow_color", "=", "0x000001", ",", "label_node", "=", "False", ",", "label_node_color", "=", "'black'", ",", "label_node_height", "=", "3", ",", "label_node_offset", "=", "1", ",", "label_edge", "=", "False", ",", "label_edge_color", "=", "'black'", ",", "label_edge_height", "=", "3", ",", "label_edge_offset", "=", "1", ",", "bg_color", "=", "0xffffff", ",", "engine", "=", "'d3'", ")", ":", "if", "label_node", ":", "label_node", "=", "{", "'color'", ":", "'black'", ",", "'height'", ":", "3", ",", "'offset'", ":", "node_size", "+", "label_node_offset", "}", "if", "label_edge", ":", "label_edge", "=", "{", "'color'", ":", "'black'", ",", "'height'", ":", "3", ",", "'offset'", ":", "edge_size", "+", "label_edge_offset", "}", "attributes", "=", "{", "'node_color'", ":", "node_color", ",", "'node_size'", ":", "node_size", ",", "'node_opacity'", ":", "node_opacity", ",", "'node_label'", ":", "node_label", ",", "'node_visibility'", ":", "node_visibility", ",", "'edge_visibility'", ":", "edge_visibility", ",", "'edge_opacity'", ":", "edge_opacity", ",", "'edge_color'", ":", "edge_color", ",", "'edge_size'", ":", "edge_size", ",", "'edge_label'", ":", "edge_label", ",", "'arrow_size'", ":", "arrow_size", ",", "'arrow_color'", ":", "arrow_color", ",", "'label_node'", ":", "label_node", ",", "'label_edge'", ":", "label_edge", ",", "'engine'", ":", "engine", ",", "'bg_color'", ":", "bg_color", "}", "self", ".", "socketio", ".", "emit", "(", "'render'", ",", "attributes", ",", "json", "=", "True", ")" ]
https://github.com/complexdb/zincbase/blob/0c8ce46bc392dfa8ee99414877adb3b41648451e/zincbase/web/__init__.py#L81-L153
toxygen-project/toxygen
0a54012cf5ee72434b923bcde7d8f1a4e575ce2f
toxygen/tox.py
python
Tox.callback_friend_request
(self, callback, user_data)
Set the callback for the `friend_request` event. Pass None to unset. This event is triggered when a friend request is received. :param callback: Python function. Should take pointer (c_void_p) to Tox object, The Public Key (c_uint8 array) of the user who sent the friend request, The message (c_char_p) they sent along with the request, The size (c_size_t) of the message byte array, pointer (c_void_p) to user_data :param user_data: pointer (c_void_p) to user data
Set the callback for the `friend_request` event. Pass None to unset.
[ "Set", "the", "callback", "for", "the", "friend_request", "event", ".", "Pass", "None", "to", "unset", "." ]
def callback_friend_request(self, callback, user_data): """ Set the callback for the `friend_request` event. Pass None to unset. This event is triggered when a friend request is received. :param callback: Python function. Should take pointer (c_void_p) to Tox object, The Public Key (c_uint8 array) of the user who sent the friend request, The message (c_char_p) they sent along with the request, The size (c_size_t) of the message byte array, pointer (c_void_p) to user_data :param user_data: pointer (c_void_p) to user data """ c_callback = CFUNCTYPE(None, c_void_p, POINTER(c_uint8), c_char_p, c_size_t, c_void_p) self.friend_request_cb = c_callback(callback) Tox.libtoxcore.tox_callback_friend_request(self._tox_pointer, self.friend_request_cb, c_void_p(user_data))
[ "def", "callback_friend_request", "(", "self", ",", "callback", ",", "user_data", ")", ":", "c_callback", "=", "CFUNCTYPE", "(", "None", ",", "c_void_p", ",", "POINTER", "(", "c_uint8", ")", ",", "c_char_p", ",", "c_size_t", ",", "c_void_p", ")", "self", ".", "friend_request_cb", "=", "c_callback", "(", "callback", ")", "Tox", ".", "libtoxcore", ".", "tox_callback_friend_request", "(", "self", ".", "_tox_pointer", ",", "self", ".", "friend_request_cb", ",", "c_void_p", "(", "user_data", ")", ")" ]
https://github.com/toxygen-project/toxygen/blob/0a54012cf5ee72434b923bcde7d8f1a4e575ce2f/toxygen/tox.py#L987-L1002
EasyIME/PIME
0f1eee10169c1cb2eaa0b59a77fa6f931ecb33b3
python/python3/tornado/websocket.py
python
WebSocketHandler.check_origin
(self, origin: str)
return origin == host
Override to enable support for allowing alternate origins. The ``origin`` argument is the value of the ``Origin`` HTTP header, the url responsible for initiating this request. This method is not called for clients that do not send this header; such requests are always allowed (because all browsers that implement WebSockets support this header, and non-browser clients do not have the same cross-site security concerns). Should return ``True`` to accept the request or ``False`` to reject it. By default, rejects all requests with an origin on a host other than this one. This is a security protection against cross site scripting attacks on browsers, since WebSockets are allowed to bypass the usual same-origin policies and don't use CORS headers. .. warning:: This is an important security measure; don't disable it without understanding the security implications. In particular, if your authentication is cookie-based, you must either restrict the origins allowed by ``check_origin()`` or implement your own XSRF-like protection for websocket connections. See `these <https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html>`_ `articles <https://devcenter.heroku.com/articles/websocket-security>`_ for more. To accept all cross-origin traffic (which was the default prior to Tornado 4.0), simply override this method to always return ``True``:: def check_origin(self, origin): return True To allow connections from any subdomain of your site, you might do something like:: def check_origin(self, origin): parsed_origin = urllib.parse.urlparse(origin) return parsed_origin.netloc.endswith(".mydomain.com") .. versionadded:: 4.0
Override to enable support for allowing alternate origins.
[ "Override", "to", "enable", "support", "for", "allowing", "alternate", "origins", "." ]
def check_origin(self, origin: str) -> bool: """Override to enable support for allowing alternate origins. The ``origin`` argument is the value of the ``Origin`` HTTP header, the url responsible for initiating this request. This method is not called for clients that do not send this header; such requests are always allowed (because all browsers that implement WebSockets support this header, and non-browser clients do not have the same cross-site security concerns). Should return ``True`` to accept the request or ``False`` to reject it. By default, rejects all requests with an origin on a host other than this one. This is a security protection against cross site scripting attacks on browsers, since WebSockets are allowed to bypass the usual same-origin policies and don't use CORS headers. .. warning:: This is an important security measure; don't disable it without understanding the security implications. In particular, if your authentication is cookie-based, you must either restrict the origins allowed by ``check_origin()`` or implement your own XSRF-like protection for websocket connections. See `these <https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html>`_ `articles <https://devcenter.heroku.com/articles/websocket-security>`_ for more. To accept all cross-origin traffic (which was the default prior to Tornado 4.0), simply override this method to always return ``True``:: def check_origin(self, origin): return True To allow connections from any subdomain of your site, you might do something like:: def check_origin(self, origin): parsed_origin = urllib.parse.urlparse(origin) return parsed_origin.netloc.endswith(".mydomain.com") .. versionadded:: 4.0 """ parsed_origin = urlparse(origin) origin = parsed_origin.netloc origin = origin.lower() host = self.request.headers.get("Host") # Check to see that origin matches host directly, including ports return origin == host
[ "def", "check_origin", "(", "self", ",", "origin", ":", "str", ")", "->", "bool", ":", "parsed_origin", "=", "urlparse", "(", "origin", ")", "origin", "=", "parsed_origin", ".", "netloc", "origin", "=", "origin", ".", "lower", "(", ")", "host", "=", "self", ".", "request", ".", "headers", ".", "get", "(", "\"Host\"", ")", "# Check to see that origin matches host directly, including ports", "return", "origin", "==", "host" ]
https://github.com/EasyIME/PIME/blob/0f1eee10169c1cb2eaa0b59a77fa6f931ecb33b3/python/python3/tornado/websocket.py#L489-L543
kuri65536/python-for-android
26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891
python-build/python-libs/xmpppy/xmpp/protocol.py
python
DataReported.setField
(self,name,typ=None,label=None)
return self.addChild(node=DataField(name,None,typ,0,label))
Create if nessessary or get the existing datafield object with name 'name' and return it. If created, attributes 'type' and 'label' are applied to new datafield.
Create if nessessary or get the existing datafield object with name 'name' and return it. If created, attributes 'type' and 'label' are applied to new datafield.
[ "Create", "if", "nessessary", "or", "get", "the", "existing", "datafield", "object", "with", "name", "name", "and", "return", "it", ".", "If", "created", "attributes", "type", "and", "label", "are", "applied", "to", "new", "datafield", "." ]
def setField(self,name,typ=None,label=None): """ Create if nessessary or get the existing datafield object with name 'name' and return it. If created, attributes 'type' and 'label' are applied to new datafield.""" f=self.getField(name) if f: return f return self.addChild(node=DataField(name,None,typ,0,label))
[ "def", "setField", "(", "self", ",", "name", ",", "typ", "=", "None", ",", "label", "=", "None", ")", ":", "f", "=", "self", ".", "getField", "(", "name", ")", "if", "f", ":", "return", "f", "return", "self", ".", "addChild", "(", "node", "=", "DataField", "(", "name", ",", "None", ",", "typ", ",", "0", ",", "label", ")", ")" ]
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-build/python-libs/xmpppy/xmpp/protocol.py#L695-L700
cgre-aachen/gempy
6ad16c46fc6616c9f452fba85d31ce32decd8b10
gempy/core/theano_modules/theano_graph_pro.py
python
TheanoGraphPro.covariance_matrix
(self)
return C_matrix
Set all the previous covariances together in the universal cokriging matrix Returns: theano.tensor.matrix: Multivariate covariance
Set all the previous covariances together in the universal cokriging matrix
[ "Set", "all", "the", "previous", "covariances", "together", "in", "the", "universal", "cokriging", "matrix" ]
def covariance_matrix(self): """ Set all the previous covariances together in the universal cokriging matrix Returns: theano.tensor.matrix: Multivariate covariance """ # Lengths length_of_CG, length_of_CGI, length_of_U_I, length_of_faults, length_of_C = self.matrices_shapes() # Individual matrices C_G = self.cov_gradients() C_I = self.cov_surface_points() C_GI = self.cov_interface_gradients() U_I, U_G = self.universal_matrix() F_I, F_G = self.faults_matrix() # ================================= # Creation of the Covariance Matrix # ================================= C_matrix = T.zeros((length_of_C, length_of_C)) # First row of matrices # Set C_G C_matrix = T.set_subtensor(C_matrix[0:length_of_CG, 0:length_of_CG], C_G) # Set CGI C_matrix = T.set_subtensor( C_matrix[0:length_of_CG, length_of_CG:length_of_CG + length_of_CGI], C_GI.T) # Set UG C_matrix = T.set_subtensor(C_matrix[0:length_of_CG, length_of_CG + length_of_CGI:length_of_CG + length_of_CGI + length_of_U_I], U_G) # Set FG. I cannot use -index because when is -0 is equivalent to 0 C_matrix = T.set_subtensor( C_matrix[0:length_of_CG, length_of_CG + length_of_CGI + length_of_U_I:], F_G.T) # Second row of matrices # Set C_IG C_matrix = T.set_subtensor( C_matrix[length_of_CG:length_of_CG + length_of_CGI, 0:length_of_CG], C_GI) # Set C_I C_matrix = T.set_subtensor( C_matrix[length_of_CG:length_of_CG + length_of_CGI, length_of_CG:length_of_CG + length_of_CGI], C_I) # Set U_I # if not self.u_grade_T.get_value() == 0: C_matrix = T.set_subtensor( C_matrix[length_of_CG:length_of_CG + length_of_CGI, length_of_CG + length_of_CGI:length_of_CG + length_of_CGI + length_of_U_I], U_I) # Set F_I C_matrix = T.set_subtensor( C_matrix[length_of_CG:length_of_CG + length_of_CGI, length_of_CG + length_of_CGI + length_of_U_I:], F_I.T) # Third row of matrices # Set U_G C_matrix = T.set_subtensor( C_matrix[ length_of_CG + length_of_CGI:length_of_CG + length_of_CGI + length_of_U_I, 0:length_of_CG], U_G.T) # Set U_I C_matrix = T.set_subtensor(C_matrix[ length_of_CG + length_of_CGI:length_of_CG + length_of_CGI + length_of_U_I, length_of_CG:length_of_CG + length_of_CGI], U_I.T) # Fourth row of matrices # Set F_G C_matrix = T.set_subtensor( C_matrix[length_of_CG + length_of_CGI + length_of_U_I:, 0:length_of_CG], F_G) # Set F_I C_matrix = T.set_subtensor( C_matrix[length_of_CG + length_of_CGI + length_of_U_I:, length_of_CG:length_of_CG + length_of_CGI], F_I) # Add name to the theano node C_matrix.name = 'Block Covariance Matrix' if str(sys._getframe().f_code.co_name) in self.verbose: C_matrix = theano.printing.Print('cov_function')(C_matrix) return C_matrix
[ "def", "covariance_matrix", "(", "self", ")", ":", "# Lengths", "length_of_CG", ",", "length_of_CGI", ",", "length_of_U_I", ",", "length_of_faults", ",", "length_of_C", "=", "self", ".", "matrices_shapes", "(", ")", "# Individual matrices", "C_G", "=", "self", ".", "cov_gradients", "(", ")", "C_I", "=", "self", ".", "cov_surface_points", "(", ")", "C_GI", "=", "self", ".", "cov_interface_gradients", "(", ")", "U_I", ",", "U_G", "=", "self", ".", "universal_matrix", "(", ")", "F_I", ",", "F_G", "=", "self", ".", "faults_matrix", "(", ")", "# =================================", "# Creation of the Covariance Matrix", "# =================================", "C_matrix", "=", "T", ".", "zeros", "(", "(", "length_of_C", ",", "length_of_C", ")", ")", "# First row of matrices", "# Set C_G", "C_matrix", "=", "T", ".", "set_subtensor", "(", "C_matrix", "[", "0", ":", "length_of_CG", ",", "0", ":", "length_of_CG", "]", ",", "C_G", ")", "# Set CGI", "C_matrix", "=", "T", ".", "set_subtensor", "(", "C_matrix", "[", "0", ":", "length_of_CG", ",", "length_of_CG", ":", "length_of_CG", "+", "length_of_CGI", "]", ",", "C_GI", ".", "T", ")", "# Set UG", "C_matrix", "=", "T", ".", "set_subtensor", "(", "C_matrix", "[", "0", ":", "length_of_CG", ",", "length_of_CG", "+", "length_of_CGI", ":", "length_of_CG", "+", "length_of_CGI", "+", "length_of_U_I", "]", ",", "U_G", ")", "# Set FG. I cannot use -index because when is -0 is equivalent to 0", "C_matrix", "=", "T", ".", "set_subtensor", "(", "C_matrix", "[", "0", ":", "length_of_CG", ",", "length_of_CG", "+", "length_of_CGI", "+", "length_of_U_I", ":", "]", ",", "F_G", ".", "T", ")", "# Second row of matrices", "# Set C_IG", "C_matrix", "=", "T", ".", "set_subtensor", "(", "C_matrix", "[", "length_of_CG", ":", "length_of_CG", "+", "length_of_CGI", ",", "0", ":", "length_of_CG", "]", ",", "C_GI", ")", "# Set C_I", "C_matrix", "=", "T", ".", "set_subtensor", "(", "C_matrix", "[", "length_of_CG", ":", "length_of_CG", "+", "length_of_CGI", ",", "length_of_CG", ":", "length_of_CG", "+", "length_of_CGI", "]", ",", "C_I", ")", "# Set U_I", "# if not self.u_grade_T.get_value() == 0:", "C_matrix", "=", "T", ".", "set_subtensor", "(", "C_matrix", "[", "length_of_CG", ":", "length_of_CG", "+", "length_of_CGI", ",", "length_of_CG", "+", "length_of_CGI", ":", "length_of_CG", "+", "length_of_CGI", "+", "length_of_U_I", "]", ",", "U_I", ")", "# Set F_I", "C_matrix", "=", "T", ".", "set_subtensor", "(", "C_matrix", "[", "length_of_CG", ":", "length_of_CG", "+", "length_of_CGI", ",", "length_of_CG", "+", "length_of_CGI", "+", "length_of_U_I", ":", "]", ",", "F_I", ".", "T", ")", "# Third row of matrices", "# Set U_G", "C_matrix", "=", "T", ".", "set_subtensor", "(", "C_matrix", "[", "length_of_CG", "+", "length_of_CGI", ":", "length_of_CG", "+", "length_of_CGI", "+", "length_of_U_I", ",", "0", ":", "length_of_CG", "]", ",", "U_G", ".", "T", ")", "# Set U_I", "C_matrix", "=", "T", ".", "set_subtensor", "(", "C_matrix", "[", "length_of_CG", "+", "length_of_CGI", ":", "length_of_CG", "+", "length_of_CGI", "+", "length_of_U_I", ",", "length_of_CG", ":", "length_of_CG", "+", "length_of_CGI", "]", ",", "U_I", ".", "T", ")", "# Fourth row of matrices", "# Set F_G", "C_matrix", "=", "T", ".", "set_subtensor", "(", "C_matrix", "[", "length_of_CG", "+", "length_of_CGI", "+", "length_of_U_I", ":", ",", "0", ":", "length_of_CG", "]", ",", "F_G", ")", "# Set F_I", "C_matrix", "=", "T", ".", "set_subtensor", "(", "C_matrix", "[", "length_of_CG", "+", "length_of_CGI", "+", "length_of_U_I", ":", ",", "length_of_CG", ":", "length_of_CG", "+", "length_of_CGI", "]", ",", "F_I", ")", "# Add name to the theano node", "C_matrix", ".", "name", "=", "'Block Covariance Matrix'", "if", "str", "(", "sys", ".", "_getframe", "(", ")", ".", "f_code", ".", "co_name", ")", "in", "self", ".", "verbose", ":", "C_matrix", "=", "theano", ".", "printing", ".", "Print", "(", "'cov_function'", ")", "(", "C_matrix", ")", "return", "C_matrix" ]
https://github.com/cgre-aachen/gempy/blob/6ad16c46fc6616c9f452fba85d31ce32decd8b10/gempy/core/theano_modules/theano_graph_pro.py#L1244-L1325
pantsbuild/pex
473c6ac732ed4bc338b4b20a9ec930d1d722c9b4
pex/vendor/_vendored/pip/pip/_vendor/requests/adapters.py
python
HTTPAdapter.cert_verify
(self, conn, url, verify, cert)
Verify a SSL certificate. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param conn: The urllib3 connection object associated with the cert. :param url: The requested URL. :param verify: Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: The SSL certificate to verify.
Verify a SSL certificate. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
[ "Verify", "a", "SSL", "certificate", ".", "This", "method", "should", "not", "be", "called", "from", "user", "code", "and", "is", "only", "exposed", "for", "use", "when", "subclassing", "the", ":", "class", ":", "HTTPAdapter", "<requests", ".", "adapters", ".", "HTTPAdapter", ">", "." ]
def cert_verify(self, conn, url, verify, cert): """Verify a SSL certificate. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param conn: The urllib3 connection object associated with the cert. :param url: The requested URL. :param verify: Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: The SSL certificate to verify. """ if url.lower().startswith('https') and verify: cert_loc = None # Allow self-specified cert location. if verify is not True: cert_loc = verify if not cert_loc: cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH) if not cert_loc or not os.path.exists(cert_loc): raise IOError("Could not find a suitable TLS CA certificate bundle, " "invalid path: {}".format(cert_loc)) conn.cert_reqs = 'CERT_REQUIRED' if not os.path.isdir(cert_loc): conn.ca_certs = cert_loc else: conn.ca_cert_dir = cert_loc else: conn.cert_reqs = 'CERT_NONE' conn.ca_certs = None conn.ca_cert_dir = None if cert: if not isinstance(cert, basestring): conn.cert_file = cert[0] conn.key_file = cert[1] else: conn.cert_file = cert conn.key_file = None if conn.cert_file and not os.path.exists(conn.cert_file): raise IOError("Could not find the TLS certificate file, " "invalid path: {}".format(conn.cert_file)) if conn.key_file and not os.path.exists(conn.key_file): raise IOError("Could not find the TLS key file, " "invalid path: {}".format(conn.key_file))
[ "def", "cert_verify", "(", "self", ",", "conn", ",", "url", ",", "verify", ",", "cert", ")", ":", "if", "url", ".", "lower", "(", ")", ".", "startswith", "(", "'https'", ")", "and", "verify", ":", "cert_loc", "=", "None", "# Allow self-specified cert location.", "if", "verify", "is", "not", "True", ":", "cert_loc", "=", "verify", "if", "not", "cert_loc", ":", "cert_loc", "=", "extract_zipped_paths", "(", "DEFAULT_CA_BUNDLE_PATH", ")", "if", "not", "cert_loc", "or", "not", "os", ".", "path", ".", "exists", "(", "cert_loc", ")", ":", "raise", "IOError", "(", "\"Could not find a suitable TLS CA certificate bundle, \"", "\"invalid path: {}\"", ".", "format", "(", "cert_loc", ")", ")", "conn", ".", "cert_reqs", "=", "'CERT_REQUIRED'", "if", "not", "os", ".", "path", ".", "isdir", "(", "cert_loc", ")", ":", "conn", ".", "ca_certs", "=", "cert_loc", "else", ":", "conn", ".", "ca_cert_dir", "=", "cert_loc", "else", ":", "conn", ".", "cert_reqs", "=", "'CERT_NONE'", "conn", ".", "ca_certs", "=", "None", "conn", ".", "ca_cert_dir", "=", "None", "if", "cert", ":", "if", "not", "isinstance", "(", "cert", ",", "basestring", ")", ":", "conn", ".", "cert_file", "=", "cert", "[", "0", "]", "conn", ".", "key_file", "=", "cert", "[", "1", "]", "else", ":", "conn", ".", "cert_file", "=", "cert", "conn", ".", "key_file", "=", "None", "if", "conn", ".", "cert_file", "and", "not", "os", ".", "path", ".", "exists", "(", "conn", ".", "cert_file", ")", ":", "raise", "IOError", "(", "\"Could not find the TLS certificate file, \"", "\"invalid path: {}\"", ".", "format", "(", "conn", ".", "cert_file", ")", ")", "if", "conn", ".", "key_file", "and", "not", "os", ".", "path", ".", "exists", "(", "conn", ".", "key_file", ")", ":", "raise", "IOError", "(", "\"Could not find the TLS key file, \"", "\"invalid path: {}\"", ".", "format", "(", "conn", ".", "key_file", ")", ")" ]
https://github.com/pantsbuild/pex/blob/473c6ac732ed4bc338b4b20a9ec930d1d722c9b4/pex/vendor/_vendored/pip/pip/_vendor/requests/adapters.py#L203-L253
SciTools/cartopy
591fb5450e11b42b6de1cebe4f240112f915bd52
lib/cartopy/io/shapereader.py
python
BasicReader.records
(self)
Return an iterator of :class:`~Record` instances.
Return an iterator of :class:`~Record` instances.
[ "Return", "an", "iterator", "of", ":", "class", ":", "~Record", "instances", "." ]
def records(self): """ Return an iterator of :class:`~Record` instances. """ # Ignore the "DeletionFlag" field which always comes first fields = self._reader.fields[1:] for shape_record in self._reader.iterShapeRecords(): attributes = shape_record.record.as_dict() yield Record(shape_record.shape, attributes, fields)
[ "def", "records", "(", "self", ")", ":", "# Ignore the \"DeletionFlag\" field which always comes first", "fields", "=", "self", ".", "_reader", ".", "fields", "[", "1", ":", "]", "for", "shape_record", "in", "self", ".", "_reader", ".", "iterShapeRecords", "(", ")", ":", "attributes", "=", "shape_record", ".", "record", ".", "as_dict", "(", ")", "yield", "Record", "(", "shape_record", ".", "shape", ",", "attributes", ",", "fields", ")" ]
https://github.com/SciTools/cartopy/blob/591fb5450e11b42b6de1cebe4f240112f915bd52/lib/cartopy/io/shapereader.py#L157-L166
bitcoin-core/HWI
6871946c2176f2f9777b6ac8f0614d96d99bfa0e
hwilib/_gui.py
python
DisplayAddressDialog.__init__
(self, client)
[]
def __init__(self, client): super(DisplayAddressDialog, self).__init__() self.ui = Ui_DisplayAddressDialog() self.ui.setupUi(self) self.setWindowTitle('Display Address') self.client = client self.ui.path_lineedit.setValidator(QRegExpValidator(QRegExp("m(/[0-9]+['Hh]?)+"), None)) self.ui.path_lineedit.setFocus() self.ui.go_button.clicked.connect(self.go_button_clicked) self.ui.buttonBox.clicked.connect(self.accept)
[ "def", "__init__", "(", "self", ",", "client", ")", ":", "super", "(", "DisplayAddressDialog", ",", "self", ")", ".", "__init__", "(", ")", "self", ".", "ui", "=", "Ui_DisplayAddressDialog", "(", ")", "self", ".", "ui", ".", "setupUi", "(", "self", ")", "self", ".", "setWindowTitle", "(", "'Display Address'", ")", "self", ".", "client", "=", "client", "self", ".", "ui", ".", "path_lineedit", ".", "setValidator", "(", "QRegExpValidator", "(", "QRegExp", "(", "\"m(/[0-9]+['Hh]?)+\"", ")", ",", "None", ")", ")", "self", ".", "ui", ".", "path_lineedit", ".", "setFocus", "(", ")", "self", ".", "ui", ".", "go_button", ".", "clicked", ".", "connect", "(", "self", ".", "go_button_clicked", ")", "self", ".", "ui", ".", "buttonBox", ".", "clicked", ".", "connect", "(", "self", ".", "accept", ")" ]
https://github.com/bitcoin-core/HWI/blob/6871946c2176f2f9777b6ac8f0614d96d99bfa0e/hwilib/_gui.py#L158-L169
fooying/3102
0faee38c30b2e24154f41e68457cfd8f7a61c040
thirdparty/requests/cookies.py
python
get_cookie_header
(jar, request)
return r.get_new_headers().get('Cookie')
Produce an appropriate Cookie header string to be sent with `request`, or None.
Produce an appropriate Cookie header string to be sent with `request`, or None.
[ "Produce", "an", "appropriate", "Cookie", "header", "string", "to", "be", "sent", "with", "request", "or", "None", "." ]
def get_cookie_header(jar, request): """Produce an appropriate Cookie header string to be sent with `request`, or None.""" r = MockRequest(request) jar.add_cookie_header(r) return r.get_new_headers().get('Cookie')
[ "def", "get_cookie_header", "(", "jar", ",", "request", ")", ":", "r", "=", "MockRequest", "(", "request", ")", "jar", ".", "add_cookie_header", "(", "r", ")", "return", "r", ".", "get_new_headers", "(", ")", ".", "get", "(", "'Cookie'", ")" ]
https://github.com/fooying/3102/blob/0faee38c30b2e24154f41e68457cfd8f7a61c040/thirdparty/requests/cookies.py#L131-L135
bukun/TorCMS
f7b44e8650aa54774f6b57e7b178edebbbf57e8e
torcms/handlers/log_handler.py
python
LogHandler.user_log_list
(self, userid, cur_p='')
View the list of the Log.
View the list of the Log.
[ "View", "the", "list", "of", "the", "Log", "." ]
def user_log_list(self, userid, cur_p=''): ''' View the list of the Log. ''' if cur_p == '': current_page_number = 1 else: current_page_number = int(cur_p) current_page_number = 1 if current_page_number < 1 else current_page_number pager_num = int(MLog.total_number() / CMS_CFG['list_num']) kwd = { 'pager': '', 'title': '', 'current_page': current_page_number, 'user_id': userid, } if self.is_p: self.render('admin/log_ajax/user_log_list.html', kwd=kwd, infos=MLog.query_pager_by_user( userid, current_page_num=current_page_number), format_date=tools.format_date, userinfo=self.userinfo) else: self.render('misc/log/user_log_list.html', kwd=kwd, infos=MLog.query_pager_by_user( userid, current_page_num=current_page_number), format_date=tools.format_date, userinfo=self.userinfo)
[ "def", "user_log_list", "(", "self", ",", "userid", ",", "cur_p", "=", "''", ")", ":", "if", "cur_p", "==", "''", ":", "current_page_number", "=", "1", "else", ":", "current_page_number", "=", "int", "(", "cur_p", ")", "current_page_number", "=", "1", "if", "current_page_number", "<", "1", "else", "current_page_number", "pager_num", "=", "int", "(", "MLog", ".", "total_number", "(", ")", "/", "CMS_CFG", "[", "'list_num'", "]", ")", "kwd", "=", "{", "'pager'", ":", "''", ",", "'title'", ":", "''", ",", "'current_page'", ":", "current_page_number", ",", "'user_id'", ":", "userid", ",", "}", "if", "self", ".", "is_p", ":", "self", ".", "render", "(", "'admin/log_ajax/user_log_list.html'", ",", "kwd", "=", "kwd", ",", "infos", "=", "MLog", ".", "query_pager_by_user", "(", "userid", ",", "current_page_num", "=", "current_page_number", ")", ",", "format_date", "=", "tools", ".", "format_date", ",", "userinfo", "=", "self", ".", "userinfo", ")", "else", ":", "self", ".", "render", "(", "'misc/log/user_log_list.html'", ",", "kwd", "=", "kwd", ",", "infos", "=", "MLog", ".", "query_pager_by_user", "(", "userid", ",", "current_page_num", "=", "current_page_number", ")", ",", "format_date", "=", "tools", ".", "format_date", ",", "userinfo", "=", "self", ".", "userinfo", ")" ]
https://github.com/bukun/TorCMS/blob/f7b44e8650aa54774f6b57e7b178edebbbf57e8e/torcms/handlers/log_handler.py#L112-L145
MobSF/Mobile-Security-Framework-MobSF
33abc69b54689fb535c72c720b593dc7ed21a4cf
mobsf/StaticAnalyzer/views/android/network_security.py
python
read_netsec_config
(app_dir, config, src_type)
return None
Read the manifest file.
Read the manifest file.
[ "Read", "the", "manifest", "file", "." ]
def read_netsec_config(app_dir, config, src_type): """Read the manifest file.""" msg = 'Reading Network Security Config' try: config_file = None config = config.replace('@xml/', '', 1) base = Path(app_dir) if src_type: # Support only android studio source files xml_dir = base / 'app' / 'src' / 'main' / 'res' / 'xml' else: # APK xml_dir = base / 'apktool_out' / 'res' / 'xml' xmls = Path(xml_dir).glob('*.xml') for xml in xmls: if xml.stem in [config, 'network_security_config']: config_file = xml break if not config_file: return None logger.info(msg) return config_file.read_text('utf8', 'ignore') except Exception: logger.exception(msg) return None
[ "def", "read_netsec_config", "(", "app_dir", ",", "config", ",", "src_type", ")", ":", "msg", "=", "'Reading Network Security Config'", "try", ":", "config_file", "=", "None", "config", "=", "config", ".", "replace", "(", "'@xml/'", ",", "''", ",", "1", ")", "base", "=", "Path", "(", "app_dir", ")", "if", "src_type", ":", "# Support only android studio source files", "xml_dir", "=", "base", "/", "'app'", "/", "'src'", "/", "'main'", "/", "'res'", "/", "'xml'", "else", ":", "# APK", "xml_dir", "=", "base", "/", "'apktool_out'", "/", "'res'", "/", "'xml'", "xmls", "=", "Path", "(", "xml_dir", ")", ".", "glob", "(", "'*.xml'", ")", "for", "xml", "in", "xmls", ":", "if", "xml", ".", "stem", "in", "[", "config", ",", "'network_security_config'", "]", ":", "config_file", "=", "xml", "break", "if", "not", "config_file", ":", "return", "None", "logger", ".", "info", "(", "msg", ")", "return", "config_file", ".", "read_text", "(", "'utf8'", ",", "'ignore'", ")", "except", "Exception", ":", "logger", ".", "exception", "(", "msg", ")", "return", "None" ]
https://github.com/MobSF/Mobile-Security-Framework-MobSF/blob/33abc69b54689fb535c72c720b593dc7ed21a4cf/mobsf/StaticAnalyzer/views/android/network_security.py#L10-L34
neuropsychology/NeuroKit
d01111b9b82364d28da01c002e6cbfc45d9493d9
neurokit2/signal/signal_recompose.py
python
_signal_recompose_meanfreq
(components, sampling_rate=1000)
Get the mean frequency of components.
Get the mean frequency of components.
[ "Get", "the", "mean", "frequency", "of", "components", "." ]
def _signal_recompose_meanfreq(components, sampling_rate=1000): """Get the mean frequency of components.""" duration = components.shape[1] / sampling_rate n = len(components) freqs = np.zeros(n) for i in range(n): c = components[i, :] - np.mean(components[i, :]) freqs[i] = len(signal_zerocrossings(c)) / duration
[ "def", "_signal_recompose_meanfreq", "(", "components", ",", "sampling_rate", "=", "1000", ")", ":", "duration", "=", "components", ".", "shape", "[", "1", "]", "/", "sampling_rate", "n", "=", "len", "(", "components", ")", "freqs", "=", "np", ".", "zeros", "(", "n", ")", "for", "i", "in", "range", "(", "n", ")", ":", "c", "=", "components", "[", "i", ",", ":", "]", "-", "np", ".", "mean", "(", "components", "[", "i", ",", ":", "]", ")", "freqs", "[", "i", "]", "=", "len", "(", "signal_zerocrossings", "(", "c", ")", ")", "/", "duration" ]
https://github.com/neuropsychology/NeuroKit/blob/d01111b9b82364d28da01c002e6cbfc45d9493d9/neurokit2/signal/signal_recompose.py#L165-L173
huggingface/transformers
623b4f7c63f60cce917677ee704d6c93ee960b4b
src/transformers/models/electra/modeling_electra.py
python
ElectraSelfAttention.forward
( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, )
return outputs
[]
def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in ElectraModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs
[ "def", "forward", "(", "self", ",", "hidden_states", ",", "attention_mask", "=", "None", ",", "head_mask", "=", "None", ",", "encoder_hidden_states", "=", "None", ",", "encoder_attention_mask", "=", "None", ",", "past_key_value", "=", "None", ",", "output_attentions", "=", "False", ",", ")", ":", "mixed_query_layer", "=", "self", ".", "query", "(", "hidden_states", ")", "# If this is instantiated as a cross-attention module, the keys", "# and values come from an encoder; the attention mask needs to be", "# such that the encoder's padding tokens are not attended to.", "is_cross_attention", "=", "encoder_hidden_states", "is", "not", "None", "if", "is_cross_attention", "and", "past_key_value", "is", "not", "None", ":", "# reuse k,v, cross_attentions", "key_layer", "=", "past_key_value", "[", "0", "]", "value_layer", "=", "past_key_value", "[", "1", "]", "attention_mask", "=", "encoder_attention_mask", "elif", "is_cross_attention", ":", "key_layer", "=", "self", ".", "transpose_for_scores", "(", "self", ".", "key", "(", "encoder_hidden_states", ")", ")", "value_layer", "=", "self", ".", "transpose_for_scores", "(", "self", ".", "value", "(", "encoder_hidden_states", ")", ")", "attention_mask", "=", "encoder_attention_mask", "elif", "past_key_value", "is", "not", "None", ":", "key_layer", "=", "self", ".", "transpose_for_scores", "(", "self", ".", "key", "(", "hidden_states", ")", ")", "value_layer", "=", "self", ".", "transpose_for_scores", "(", "self", ".", "value", "(", "hidden_states", ")", ")", "key_layer", "=", "torch", ".", "cat", "(", "[", "past_key_value", "[", "0", "]", ",", "key_layer", "]", ",", "dim", "=", "2", ")", "value_layer", "=", "torch", ".", "cat", "(", "[", "past_key_value", "[", "1", "]", ",", "value_layer", "]", ",", "dim", "=", "2", ")", "else", ":", "key_layer", "=", "self", ".", "transpose_for_scores", "(", "self", ".", "key", "(", "hidden_states", ")", ")", "value_layer", "=", "self", ".", "transpose_for_scores", "(", "self", ".", "value", "(", "hidden_states", ")", ")", "query_layer", "=", "self", ".", "transpose_for_scores", "(", "mixed_query_layer", ")", "if", "self", ".", "is_decoder", ":", "# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.", "# Further calls to cross_attention layer can then reuse all cross-attention", "# key/value_states (first \"if\" case)", "# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of", "# all previous decoder key/value_states. Further calls to uni-directional self-attention", "# can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)", "# if encoder bi-directional self-attention `past_key_value` is always `None`", "past_key_value", "=", "(", "key_layer", ",", "value_layer", ")", "# Take the dot product between \"query\" and \"key\" to get the raw attention scores.", "attention_scores", "=", "torch", ".", "matmul", "(", "query_layer", ",", "key_layer", ".", "transpose", "(", "-", "1", ",", "-", "2", ")", ")", "if", "self", ".", "position_embedding_type", "==", "\"relative_key\"", "or", "self", ".", "position_embedding_type", "==", "\"relative_key_query\"", ":", "seq_length", "=", "hidden_states", ".", "size", "(", ")", "[", "1", "]", "position_ids_l", "=", "torch", ".", "arange", "(", "seq_length", ",", "dtype", "=", "torch", ".", "long", ",", "device", "=", "hidden_states", ".", "device", ")", ".", "view", "(", "-", "1", ",", "1", ")", "position_ids_r", "=", "torch", ".", "arange", "(", "seq_length", ",", "dtype", "=", "torch", ".", "long", ",", "device", "=", "hidden_states", ".", "device", ")", ".", "view", "(", "1", ",", "-", "1", ")", "distance", "=", "position_ids_l", "-", "position_ids_r", "positional_embedding", "=", "self", ".", "distance_embedding", "(", "distance", "+", "self", ".", "max_position_embeddings", "-", "1", ")", "positional_embedding", "=", "positional_embedding", ".", "to", "(", "dtype", "=", "query_layer", ".", "dtype", ")", "# fp16 compatibility", "if", "self", ".", "position_embedding_type", "==", "\"relative_key\"", ":", "relative_position_scores", "=", "torch", ".", "einsum", "(", "\"bhld,lrd->bhlr\"", ",", "query_layer", ",", "positional_embedding", ")", "attention_scores", "=", "attention_scores", "+", "relative_position_scores", "elif", "self", ".", "position_embedding_type", "==", "\"relative_key_query\"", ":", "relative_position_scores_query", "=", "torch", ".", "einsum", "(", "\"bhld,lrd->bhlr\"", ",", "query_layer", ",", "positional_embedding", ")", "relative_position_scores_key", "=", "torch", ".", "einsum", "(", "\"bhrd,lrd->bhlr\"", ",", "key_layer", ",", "positional_embedding", ")", "attention_scores", "=", "attention_scores", "+", "relative_position_scores_query", "+", "relative_position_scores_key", "attention_scores", "=", "attention_scores", "/", "math", ".", "sqrt", "(", "self", ".", "attention_head_size", ")", "if", "attention_mask", "is", "not", "None", ":", "# Apply the attention mask is (precomputed for all layers in ElectraModel forward() function)", "attention_scores", "=", "attention_scores", "+", "attention_mask", "# Normalize the attention scores to probabilities.", "attention_probs", "=", "nn", ".", "functional", ".", "softmax", "(", "attention_scores", ",", "dim", "=", "-", "1", ")", "# This is actually dropping out entire tokens to attend to, which might", "# seem a bit unusual, but is taken from the original Transformer paper.", "attention_probs", "=", "self", ".", "dropout", "(", "attention_probs", ")", "# Mask heads if we want to", "if", "head_mask", "is", "not", "None", ":", "attention_probs", "=", "attention_probs", "*", "head_mask", "context_layer", "=", "torch", ".", "matmul", "(", "attention_probs", ",", "value_layer", ")", "context_layer", "=", "context_layer", ".", "permute", "(", "0", ",", "2", ",", "1", ",", "3", ")", ".", "contiguous", "(", ")", "new_context_layer_shape", "=", "context_layer", ".", "size", "(", ")", "[", ":", "-", "2", "]", "+", "(", "self", ".", "all_head_size", ",", ")", "context_layer", "=", "context_layer", ".", "view", "(", "*", "new_context_layer_shape", ")", "outputs", "=", "(", "context_layer", ",", "attention_probs", ")", "if", "output_attentions", "else", "(", "context_layer", ",", ")", "if", "self", ".", "is_decoder", ":", "outputs", "=", "outputs", "+", "(", "past_key_value", ",", ")", "return", "outputs" ]
https://github.com/huggingface/transformers/blob/623b4f7c63f60cce917677ee704d6c93ee960b4b/src/transformers/models/electra/modeling_electra.py#L251-L343
sopel-irc/sopel
787baa6e39f9dad57d94600c92e10761c41b21ef
sopel/modules/url.py
python
title_command
(bot, trigger)
Show the title or URL information for the given URL, or the last URL seen in this channel.
Show the title or URL information for the given URL, or the last URL seen in this channel.
[ "Show", "the", "title", "or", "URL", "information", "for", "the", "given", "URL", "or", "the", "last", "URL", "seen", "in", "this", "channel", "." ]
def title_command(bot, trigger): """ Show the title or URL information for the given URL, or the last URL seen in this channel. """ if not trigger.group(2): if trigger.sender not in bot.memory['last_seen_url']: return matched = check_callbacks( bot, bot.memory['last_seen_url'][trigger.sender]) if matched: return else: urls = [bot.memory['last_seen_url'][trigger.sender]] else: urls = list( # needs to be a list so len() can be checked later web.search_urls( trigger, exclusion_char=bot.config.url.exclusion_char ) ) result_count = 0 for url, title, domain, tinyurl in process_urls(bot, trigger, urls): message = '%s | %s' % (title, domain) if tinyurl: message += ' ( %s )' % tinyurl bot.reply(message) bot.memory['last_seen_url'][trigger.sender] = url result_count += 1 expected_count = len(urls) if result_count < expected_count: if expected_count == 1: bot.reply("Sorry, fetching that title failed. Make sure the site is working.") elif result_count == 0: bot.reply("Sorry, I couldn't fetch titles for any of those.") else: bot.reply("I couldn't get all of the titles, but I fetched what I could!")
[ "def", "title_command", "(", "bot", ",", "trigger", ")", ":", "if", "not", "trigger", ".", "group", "(", "2", ")", ":", "if", "trigger", ".", "sender", "not", "in", "bot", ".", "memory", "[", "'last_seen_url'", "]", ":", "return", "matched", "=", "check_callbacks", "(", "bot", ",", "bot", ".", "memory", "[", "'last_seen_url'", "]", "[", "trigger", ".", "sender", "]", ")", "if", "matched", ":", "return", "else", ":", "urls", "=", "[", "bot", ".", "memory", "[", "'last_seen_url'", "]", "[", "trigger", ".", "sender", "]", "]", "else", ":", "urls", "=", "list", "(", "# needs to be a list so len() can be checked later", "web", ".", "search_urls", "(", "trigger", ",", "exclusion_char", "=", "bot", ".", "config", ".", "url", ".", "exclusion_char", ")", ")", "result_count", "=", "0", "for", "url", ",", "title", ",", "domain", ",", "tinyurl", "in", "process_urls", "(", "bot", ",", "trigger", ",", "urls", ")", ":", "message", "=", "'%s | %s'", "%", "(", "title", ",", "domain", ")", "if", "tinyurl", ":", "message", "+=", "' ( %s )'", "%", "tinyurl", "bot", ".", "reply", "(", "message", ")", "bot", ".", "memory", "[", "'last_seen_url'", "]", "[", "trigger", ".", "sender", "]", "=", "url", "result_count", "+=", "1", "expected_count", "=", "len", "(", "urls", ")", "if", "result_count", "<", "expected_count", ":", "if", "expected_count", "==", "1", ":", "bot", ".", "reply", "(", "\"Sorry, fetching that title failed. Make sure the site is working.\"", ")", "elif", "result_count", "==", "0", ":", "bot", ".", "reply", "(", "\"Sorry, I couldn't fetch titles for any of those.\"", ")", "else", ":", "bot", ".", "reply", "(", "\"I couldn't get all of the titles, but I fetched what I could!\"", ")" ]
https://github.com/sopel-irc/sopel/blob/787baa6e39f9dad57d94600c92e10761c41b21ef/sopel/modules/url.py#L247-L285
lensacom/sparkit-learn
0498502107c1f7dcf33cda0cdb6f5ba4b42524b7
splearn/preprocessing/label.py
python
SparkLabelEncoder.transform
(self, y)
return y.transform(mapper)
Transform labels to normalized encoding. Parameters ---------- y : ArrayRDD [n_samples] Target values. Returns ------- y : ArrayRDD [n_samples]
Transform labels to normalized encoding. Parameters ---------- y : ArrayRDD [n_samples] Target values. Returns ------- y : ArrayRDD [n_samples]
[ "Transform", "labels", "to", "normalized", "encoding", ".", "Parameters", "----------", "y", ":", "ArrayRDD", "[", "n_samples", "]", "Target", "values", ".", "Returns", "-------", "y", ":", "ArrayRDD", "[", "n_samples", "]" ]
def transform(self, y): """Transform labels to normalized encoding. Parameters ---------- y : ArrayRDD [n_samples] Target values. Returns ------- y : ArrayRDD [n_samples] """ mapper = super(SparkLabelEncoder, self).transform mapper = self.broadcast(mapper, y.context) return y.transform(mapper)
[ "def", "transform", "(", "self", ",", "y", ")", ":", "mapper", "=", "super", "(", "SparkLabelEncoder", ",", "self", ")", ".", "transform", "mapper", "=", "self", ".", "broadcast", "(", "mapper", ",", "y", ".", "context", ")", "return", "y", ".", "transform", "(", "mapper", ")" ]
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/preprocessing/label.py#L84-L96
david-cortes/contextualbandits
293b47ce80d8330a238cca0614147256ffea5529
contextualbandits/online.py
python
_ActivePolicy.reset_active_choice
(self, active_choice='weighted')
return self
Set the active gradient criteria to a custom form Parameters ---------- active_choice : str in {'min', 'max', 'weighted'} How to calculate the gradient that an observation would have on the loss function for each classifier, given that it could be either class (positive or negative) for the classifier that predicts each arm. If weighted, they are weighted by the same probability estimates from the base algorithm. Returns ------- self : obj This object
Set the active gradient criteria to a custom form
[ "Set", "the", "active", "gradient", "criteria", "to", "a", "custom", "form" ]
def reset_active_choice(self, active_choice='weighted'): """ Set the active gradient criteria to a custom form Parameters ---------- active_choice : str in {'min', 'max', 'weighted'} How to calculate the gradient that an observation would have on the loss function for each classifier, given that it could be either class (positive or negative) for the classifier that predicts each arm. If weighted, they are weighted by the same probability estimates from the base algorithm. Returns ------- self : obj This object """ if self.active_choice is None: ### AdaptiveGreedy raise ValueError("Cannot change active choice for non-active policy.") assert active_choice in ['min', 'max', 'weighted'] self.active_choice = active_choice return self
[ "def", "reset_active_choice", "(", "self", ",", "active_choice", "=", "'weighted'", ")", ":", "if", "self", ".", "active_choice", "is", "None", ":", "### AdaptiveGreedy", "raise", "ValueError", "(", "\"Cannot change active choice for non-active policy.\"", ")", "assert", "active_choice", "in", "[", "'min'", ",", "'max'", ",", "'weighted'", "]", "self", ".", "active_choice", "=", "active_choice", "return", "self" ]
https://github.com/david-cortes/contextualbandits/blob/293b47ce80d8330a238cca0614147256ffea5529/contextualbandits/online.py#L1675-L1696
realpython/book2-exercises
cde325eac8e6d8cff2316601c2e5b36bb46af7d0
web2py/venv/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py
python
_is_unpacked_egg
(path)
return ( path.lower().endswith('.egg') )
Determine if given path appears to be an unpacked egg.
Determine if given path appears to be an unpacked egg.
[ "Determine", "if", "given", "path", "appears", "to", "be", "an", "unpacked", "egg", "." ]
def _is_unpacked_egg(path): """ Determine if given path appears to be an unpacked egg. """ return ( path.lower().endswith('.egg') )
[ "def", "_is_unpacked_egg", "(", "path", ")", ":", "return", "(", "path", ".", "lower", "(", ")", ".", "endswith", "(", "'.egg'", ")", ")" ]
https://github.com/realpython/book2-exercises/blob/cde325eac8e6d8cff2316601c2e5b36bb46af7d0/web2py/venv/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py#L2210-L2216
huggingface/transformers
623b4f7c63f60cce917677ee704d6c93ee960b4b
examples/research_projects/rag/lightning_base.py
python
BaseTransformer.test_epoch_end
(self, outputs)
return self.validation_end(outputs)
[]
def test_epoch_end(self, outputs): return self.validation_end(outputs)
[ "def", "test_epoch_end", "(", "self", ",", "outputs", ")", ":", "return", "self", ".", "validation_end", "(", "outputs", ")" ]
https://github.com/huggingface/transformers/blob/623b4f7c63f60cce917677ee704d6c93ee960b4b/examples/research_projects/rag/lightning_base.py#L161-L162
citronneur/rdpy
cef16a9f64d836a3221a344ca7d571644280d829
rdpy/protocol/rdp/t125/ber.py
python
writeOctetstring
(value)
return (writeUniversalTag(Tag.BER_TAG_OCTET_STRING, False), writeLength(len(value)), String(value))
@summary: Write string in BER representation @param value: string @return: BER octet string block
[]
def writeOctetstring(value): """ @summary: Write string in BER representation @param value: string @return: BER octet string block """ return (writeUniversalTag(Tag.BER_TAG_OCTET_STRING, False), writeLength(len(value)), String(value))
[ "def", "writeOctetstring", "(", "value", ")", ":", "return", "(", "writeUniversalTag", "(", "Tag", ".", "BER_TAG_OCTET_STRING", ",", "False", ")", ",", "writeLength", "(", "len", "(", "value", ")", ")", ",", "String", "(", "value", ")", ")" ]
https://github.com/citronneur/rdpy/blob/cef16a9f64d836a3221a344ca7d571644280d829/rdpy/protocol/rdp/t125/ber.py#L232-L238
phantomcyber/playbooks
9e850ecc44cb98c5dde53784744213a1ed5799bd
risk_notable_investigate.py
python
risk_notable_enrich
(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs)
return
[]
def risk_notable_enrich(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs): phantom.debug("risk_notable_enrich() called") ################################################################################ ## Custom Code Start ################################################################################ # Write your custom code here... ################################################################################ ## Custom Code End ################################################################################ # call playbook "community/risk_notable_enrich", returns the playbook_run_id playbook_run_id = phantom.playbook("community/risk_notable_enrich", container=container, name="risk_notable_enrich", callback=note_decision_2) return
[ "def", "risk_notable_enrich", "(", "action", "=", "None", ",", "success", "=", "None", ",", "container", "=", "None", ",", "results", "=", "None", ",", "handle", "=", "None", ",", "filtered_artifacts", "=", "None", ",", "filtered_results", "=", "None", ",", "custom_function", "=", "None", ",", "*", "*", "kwargs", ")", ":", "phantom", ".", "debug", "(", "\"risk_notable_enrich() called\"", ")", "################################################################################", "## Custom Code Start", "################################################################################", "# Write your custom code here...", "################################################################################", "## Custom Code End", "################################################################################", "# call playbook \"community/risk_notable_enrich\", returns the playbook_run_id", "playbook_run_id", "=", "phantom", ".", "playbook", "(", "\"community/risk_notable_enrich\"", ",", "container", "=", "container", ",", "name", "=", "\"risk_notable_enrich\"", ",", "callback", "=", "note_decision_2", ")", "return" ]
https://github.com/phantomcyber/playbooks/blob/9e850ecc44cb98c5dde53784744213a1ed5799bd/risk_notable_investigate.py#L80-L96
huggingface/transformers
623b4f7c63f60cce917677ee704d6c93ee960b4b
src/transformers/trainer_pt_utils.py
python
numpy_pad_and_concatenate
(array1, array2, padding_index=-100)
return result
Concatenates `array1` and `array2` on first axis, applying padding on the second if necessary.
Concatenates `array1` and `array2` on first axis, applying padding on the second if necessary.
[ "Concatenates", "array1", "and", "array2", "on", "first", "axis", "applying", "padding", "on", "the", "second", "if", "necessary", "." ]
def numpy_pad_and_concatenate(array1, array2, padding_index=-100): """Concatenates `array1` and `array2` on first axis, applying padding on the second if necessary.""" if len(array1.shape) == 1 or array1.shape[1] == array2.shape[1]: return np.concatenate((array1, array2), axis=0) # Let's figure out the new shape new_shape = (array1.shape[0] + array2.shape[0], max(array1.shape[1], array2.shape[1])) + array1.shape[2:] # Now let's fill the result tensor result = np.full_like(array1, padding_index, shape=new_shape) result[: array1.shape[0], : array1.shape[1]] = array1 result[array1.shape[0] :, : array2.shape[1]] = array2 return result
[ "def", "numpy_pad_and_concatenate", "(", "array1", ",", "array2", ",", "padding_index", "=", "-", "100", ")", ":", "if", "len", "(", "array1", ".", "shape", ")", "==", "1", "or", "array1", ".", "shape", "[", "1", "]", "==", "array2", ".", "shape", "[", "1", "]", ":", "return", "np", ".", "concatenate", "(", "(", "array1", ",", "array2", ")", ",", "axis", "=", "0", ")", "# Let's figure out the new shape", "new_shape", "=", "(", "array1", ".", "shape", "[", "0", "]", "+", "array2", ".", "shape", "[", "0", "]", ",", "max", "(", "array1", ".", "shape", "[", "1", "]", ",", "array2", ".", "shape", "[", "1", "]", ")", ")", "+", "array1", ".", "shape", "[", "2", ":", "]", "# Now let's fill the result tensor", "result", "=", "np", ".", "full_like", "(", "array1", ",", "padding_index", ",", "shape", "=", "new_shape", ")", "result", "[", ":", "array1", ".", "shape", "[", "0", "]", ",", ":", "array1", ".", "shape", "[", "1", "]", "]", "=", "array1", "result", "[", "array1", ".", "shape", "[", "0", "]", ":", ",", ":", "array2", ".", "shape", "[", "1", "]", "]", "=", "array2", "return", "result" ]
https://github.com/huggingface/transformers/blob/623b4f7c63f60cce917677ee704d6c93ee960b4b/src/transformers/trainer_pt_utils.py#L82-L94
sympy/sympy
d822fcba181155b85ff2b29fe525adbafb22b448
sympy/series/sequences.py
python
SeqBase._add
(self, other)
return None
Should only be used internally. Explanation =========== self._add(other) returns a new, term-wise added sequence if self knows how to add with other, otherwise it returns ``None``. ``other`` should only be a sequence object. Used within :class:`SeqAdd` class.
Should only be used internally.
[ "Should", "only", "be", "used", "internally", "." ]
def _add(self, other): """ Should only be used internally. Explanation =========== self._add(other) returns a new, term-wise added sequence if self knows how to add with other, otherwise it returns ``None``. ``other`` should only be a sequence object. Used within :class:`SeqAdd` class. """ return None
[ "def", "_add", "(", "self", ",", "other", ")", ":", "return", "None" ]
https://github.com/sympy/sympy/blob/d822fcba181155b85ff2b29fe525adbafb22b448/sympy/series/sequences.py#L158-L172
4shadoww/hakkuframework
409a11fc3819d251f86faa3473439f8c19066a21
lib/scapy/contrib/automotive/uds.py
python
UDS_TesterPresentSender.__init__
(self, sock, pkt=UDS() / UDS_TP(), interval=2)
Thread to send TesterPresent messages packets periodically Args: sock: socket where packet is sent periodically pkt: packet to send interval: interval between two packets
Thread to send TesterPresent messages packets periodically
[ "Thread", "to", "send", "TesterPresent", "messages", "packets", "periodically" ]
def __init__(self, sock, pkt=UDS() / UDS_TP(), interval=2): """ Thread to send TesterPresent messages packets periodically Args: sock: socket where packet is sent periodically pkt: packet to send interval: interval between two packets """ PeriodicSenderThread.__init__(self, sock, pkt, interval)
[ "def", "__init__", "(", "self", ",", "sock", ",", "pkt", "=", "UDS", "(", ")", "/", "UDS_TP", "(", ")", ",", "interval", "=", "2", ")", ":", "PeriodicSenderThread", ".", "__init__", "(", "self", ",", "sock", ",", "pkt", ",", "interval", ")" ]
https://github.com/4shadoww/hakkuframework/blob/409a11fc3819d251f86faa3473439f8c19066a21/lib/scapy/contrib/automotive/uds.py#L1201-L1209
wrye-bash/wrye-bash
d495c47cfdb44475befa523438a40c4419cb386f
Mopy/bash/balt.py
python
Link._askDirectory
(self, message=_('Choose a directory.'), defaultPath='')
return DirOpen.display_dialog(self.window, message, defaultPath, create_dir=True)
Show a modal directory dialog and return the resulting path, or None if canceled.
Show a modal directory dialog and return the resulting path, or None if canceled.
[ "Show", "a", "modal", "directory", "dialog", "and", "return", "the", "resulting", "path", "or", "None", "if", "canceled", "." ]
def _askDirectory(self, message=_('Choose a directory.'), defaultPath=''): """Show a modal directory dialog and return the resulting path, or None if canceled.""" return DirOpen.display_dialog(self.window, message, defaultPath, create_dir=True)
[ "def", "_askDirectory", "(", "self", ",", "message", "=", "_", "(", "'Choose a directory.'", ")", ",", "defaultPath", "=", "''", ")", ":", "return", "DirOpen", ".", "display_dialog", "(", "self", ".", "window", ",", "message", ",", "defaultPath", ",", "create_dir", "=", "True", ")" ]
https://github.com/wrye-bash/wrye-bash/blob/d495c47cfdb44475befa523438a40c4419cb386f/Mopy/bash/balt.py#L1705-L1709
Instagram/MonkeyType
d582ee3914f9eee1fdfb76a57bb9f4206e017ceb
monkeytype/typing.py
python
field_annotations
(typed_dict)
return (typed_dict.__annotations__["required_fields"].__annotations__, typed_dict.__annotations__["optional_fields"].__annotations__)
Return the required and optional fields in the TypedDict.
Return the required and optional fields in the TypedDict.
[ "Return", "the", "required", "and", "optional", "fields", "in", "the", "TypedDict", "." ]
def field_annotations(typed_dict) -> Tuple[Dict[str, type], Dict[str, type]]: """Return the required and optional fields in the TypedDict.""" return (typed_dict.__annotations__["required_fields"].__annotations__, typed_dict.__annotations__["optional_fields"].__annotations__)
[ "def", "field_annotations", "(", "typed_dict", ")", "->", "Tuple", "[", "Dict", "[", "str", ",", "type", "]", ",", "Dict", "[", "str", ",", "type", "]", "]", ":", "return", "(", "typed_dict", ".", "__annotations__", "[", "\"required_fields\"", "]", ".", "__annotations__", ",", "typed_dict", ".", "__annotations__", "[", "\"optional_fields\"", "]", ".", "__annotations__", ")" ]
https://github.com/Instagram/MonkeyType/blob/d582ee3914f9eee1fdfb76a57bb9f4206e017ceb/monkeytype/typing.py#L67-L70
saltstack/salt
fae5bc757ad0f1716483ce7ae180b451545c2058
salt/modules/inspectlib/fsdb.py
python
CsvDB.__criteria
(self, obj, matches=None, mt=None, lt=None, eq=None)
return True
Returns True if object is aligned to the criteria. :param obj: :param matches: :param mt: :param lt: :param eq: :return: Boolean
Returns True if object is aligned to the criteria.
[ "Returns", "True", "if", "object", "is", "aligned", "to", "the", "criteria", "." ]
def __criteria(self, obj, matches=None, mt=None, lt=None, eq=None): """ Returns True if object is aligned to the criteria. :param obj: :param matches: :param mt: :param lt: :param eq: :return: Boolean """ # Fail matcher if "less than" for field, value in (mt or {}).items(): if getattr(obj, field) <= value: return False # Fail matcher if "more than" for field, value in (lt or {}).items(): if getattr(obj, field) >= value: return False # Fail matcher if "not equal" for field, value in (eq or {}).items(): if getattr(obj, field) != value: return False # Fail matcher if "doesn't match" for field, value in (matches or {}).items(): if not re.search(value, str(getattr(obj, field))): return False return True
[ "def", "__criteria", "(", "self", ",", "obj", ",", "matches", "=", "None", ",", "mt", "=", "None", ",", "lt", "=", "None", ",", "eq", "=", "None", ")", ":", "# Fail matcher if \"less than\"", "for", "field", ",", "value", "in", "(", "mt", "or", "{", "}", ")", ".", "items", "(", ")", ":", "if", "getattr", "(", "obj", ",", "field", ")", "<=", "value", ":", "return", "False", "# Fail matcher if \"more than\"", "for", "field", ",", "value", "in", "(", "lt", "or", "{", "}", ")", ".", "items", "(", ")", ":", "if", "getattr", "(", "obj", ",", "field", ")", ">=", "value", ":", "return", "False", "# Fail matcher if \"not equal\"", "for", "field", ",", "value", "in", "(", "eq", "or", "{", "}", ")", ".", "items", "(", ")", ":", "if", "getattr", "(", "obj", ",", "field", ")", "!=", "value", ":", "return", "False", "# Fail matcher if \"doesn't match\"", "for", "field", ",", "value", "in", "(", "matches", "or", "{", "}", ")", ".", "items", "(", ")", ":", "if", "not", "re", ".", "search", "(", "value", ",", "str", "(", "getattr", "(", "obj", ",", "field", ")", ")", ")", ":", "return", "False", "return", "True" ]
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/modules/inspectlib/fsdb.py#L276-L307
leo-editor/leo-editor
383d6776d135ef17d73d935a2f0ecb3ac0e99494
leo/plugins/backlink.py
python
backlinkController.markDst
(self)
Mark current position as 'destination' (called by UI)
Mark current position as 'destination' (called by UI)
[ "Mark", "current", "position", "as", "destination", "(", "called", "by", "UI", ")" ]
def markDst(self): """Mark current position as 'destination' (called by UI)""" self.linkDestination = self.c.p.copy() self.showMessage('Dest. marked')
[ "def", "markDst", "(", "self", ")", ":", "self", ".", "linkDestination", "=", "self", ".", "c", ".", "p", ".", "copy", "(", ")", "self", ".", "showMessage", "(", "'Dest. marked'", ")" ]
https://github.com/leo-editor/leo-editor/blob/383d6776d135ef17d73d935a2f0ecb3ac0e99494/leo/plugins/backlink.py#L482-L485
aws-cloudformation/cfn-lint
16df5d0ca0d8ebcf9330ebea701e83d883b47217
src/cfnlint/rules/resources/properties/ValuePrimitiveType.py
python
ValuePrimitiveType.check_value
(self, value, path, **kwargs)
return matches
Check Value
Check Value
[ "Check", "Value" ]
def check_value(self, value, path, **kwargs): """Check Value""" matches = [] primitive_type = kwargs.get('primitive_type', {}) item_type = kwargs.get('item_type', {}) strict_check = kwargs.get('non_strict', self.config['strict']) if item_type in ['Map']: if isinstance(value, dict): for map_key, map_value in value.items(): if not isinstance(map_value, dict): matches.extend(self.check_primitive_type( map_value, primitive_type, path + [map_key], strict_check)) else: # some properties support primitive types and objects # skip in the case it could be an object and the value is a object if (item_type or primitive_type) and isinstance(value, dict): return matches matches.extend(self.check_primitive_type(value, primitive_type, path, strict_check)) return matches
[ "def", "check_value", "(", "self", ",", "value", ",", "path", ",", "*", "*", "kwargs", ")", ":", "matches", "=", "[", "]", "primitive_type", "=", "kwargs", ".", "get", "(", "'primitive_type'", ",", "{", "}", ")", "item_type", "=", "kwargs", ".", "get", "(", "'item_type'", ",", "{", "}", ")", "strict_check", "=", "kwargs", ".", "get", "(", "'non_strict'", ",", "self", ".", "config", "[", "'strict'", "]", ")", "if", "item_type", "in", "[", "'Map'", "]", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "for", "map_key", ",", "map_value", "in", "value", ".", "items", "(", ")", ":", "if", "not", "isinstance", "(", "map_value", ",", "dict", ")", ":", "matches", ".", "extend", "(", "self", ".", "check_primitive_type", "(", "map_value", ",", "primitive_type", ",", "path", "+", "[", "map_key", "]", ",", "strict_check", ")", ")", "else", ":", "# some properties support primitive types and objects", "# skip in the case it could be an object and the value is a object", "if", "(", "item_type", "or", "primitive_type", ")", "and", "isinstance", "(", "value", ",", "dict", ")", ":", "return", "matches", "matches", ".", "extend", "(", "self", ".", "check_primitive_type", "(", "value", ",", "primitive_type", ",", "path", ",", "strict_check", ")", ")", "return", "matches" ]
https://github.com/aws-cloudformation/cfn-lint/blob/16df5d0ca0d8ebcf9330ebea701e83d883b47217/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py#L123-L142
tensorflow/lingvo
ce10019243d954c3c3ebe739f7589b5eebfdf907
lingvo/core/gshard_builder.py
python
MoEBuilder.Attention
(self, name)
return self._Graph( name, ['_q', '_k', '_v', 'bias'], ['outputs'], ('_q->q', self.Split('_q')), ('_k->k', self.Split('_k')), ('_v->v', self.Split('_v')), ('q,k->l', self._Fn('logits', fn=_LogitsFnF32)), ('l,bias->logits', self._Fn('bias', fn=_AddBiasF32)), ('logits->w', self._Fn('weights', _SoftmaxF32)), ('w->weights', self._Dropout('dropout', 1 - self.params.attention_dropout_prob)), ('weights,v->outputs', self._Fn( 'outputs', fn=lambda weights, v: tf.einsum('BLHM,BMHD->BLHD', weights, v))), )
Attention with multiple attention heads. Keys, values share same dimensionality params.self.params.attention_key_value_dim. Args: name: name of the layer Returns: The Attention layer params.
Attention with multiple attention heads.
[ "Attention", "with", "multiple", "attention", "heads", "." ]
def Attention(self, name): """Attention with multiple attention heads. Keys, values share same dimensionality params.self.params.attention_key_value_dim. Args: name: name of the layer Returns: The Attention layer params. """ p = self.params def _AddBiasF32(logits, bias): # logits: BLHM [batch, length, heads, memory_length] # bias: BLHM [batch, length, heads, memory_length] # (in case of attention with relative bias) OR # # BLM [batch, length, memory_length] # (default masking bias with very negative logits). bias = tf.cast(bias, logits.dtype) if bias.shape.ndims == 3: # Expanding the 'heads' dimension retval = logits + tf.expand_dims(bias, 2) else: assert bias.shape.ndims == 4 retval = logits + bias return retval def _ReduceLogsumexp(x): max_logit = tf.math.reduce_max( tf.stop_gradient(x), axis=-1, keepdims=True) extra_logit = p.attention_extra_logit if extra_logit is not None: extra_logit = tf.convert_to_tensor(extra_logit, max_logit.dtype) max_logit = tf.math.maximum(max_logit, extra_logit) x -= max_logit exp_x = tf.math.exp(x) sum_exp_x = tf.math.reduce_sum(exp_x, axis=-1, keepdims=True) if extra_logit is not None: sum_exp_x += tf.math.exp(extra_logit - max_logit) return tf.math.log(sum_exp_x) + max_logit def _LogSoftmax(x): return x - _ReduceLogsumexp(x) def _LogitsFnF32(q, k): # logits.dtype == tf.float32 leads to better training stability if p.attention_logits_dtype is not None: q = tf.cast(q, p.attention_logits_dtype) k = tf.cast(k, p.attention_logits_dtype) return tf.einsum('BLHD,BMHD->BLHM', q, k) def _SoftmaxF32(x): # expecting x.dtype == tf.float32 # # TODO(lepikhin): consider # if p.attention_extra_logit is None: # return tf.nn.softmax(x) softmax = tf.math.exp(_LogSoftmax(x)) softmax = tf.cast(softmax, py_utils.FPropDtype(self.params)) return softmax return self._Graph( name, ['_q', '_k', '_v', 'bias'], ['outputs'], ('_q->q', self.Split('_q')), ('_k->k', self.Split('_k')), ('_v->v', self.Split('_v')), ('q,k->l', self._Fn('logits', fn=_LogitsFnF32)), ('l,bias->logits', self._Fn('bias', fn=_AddBiasF32)), ('logits->w', self._Fn('weights', _SoftmaxF32)), ('w->weights', self._Dropout('dropout', 1 - self.params.attention_dropout_prob)), ('weights,v->outputs', self._Fn( 'outputs', fn=lambda weights, v: tf.einsum('BLHM,BMHD->BLHD', weights, v))), )
[ "def", "Attention", "(", "self", ",", "name", ")", ":", "p", "=", "self", ".", "params", "def", "_AddBiasF32", "(", "logits", ",", "bias", ")", ":", "# logits: BLHM [batch, length, heads, memory_length]", "# bias: BLHM [batch, length, heads, memory_length]", "# (in case of attention with relative bias) OR", "#", "# BLM [batch, length, memory_length]", "# (default masking bias with very negative logits).", "bias", "=", "tf", ".", "cast", "(", "bias", ",", "logits", ".", "dtype", ")", "if", "bias", ".", "shape", ".", "ndims", "==", "3", ":", "# Expanding the 'heads' dimension", "retval", "=", "logits", "+", "tf", ".", "expand_dims", "(", "bias", ",", "2", ")", "else", ":", "assert", "bias", ".", "shape", ".", "ndims", "==", "4", "retval", "=", "logits", "+", "bias", "return", "retval", "def", "_ReduceLogsumexp", "(", "x", ")", ":", "max_logit", "=", "tf", ".", "math", ".", "reduce_max", "(", "tf", ".", "stop_gradient", "(", "x", ")", ",", "axis", "=", "-", "1", ",", "keepdims", "=", "True", ")", "extra_logit", "=", "p", ".", "attention_extra_logit", "if", "extra_logit", "is", "not", "None", ":", "extra_logit", "=", "tf", ".", "convert_to_tensor", "(", "extra_logit", ",", "max_logit", ".", "dtype", ")", "max_logit", "=", "tf", ".", "math", ".", "maximum", "(", "max_logit", ",", "extra_logit", ")", "x", "-=", "max_logit", "exp_x", "=", "tf", ".", "math", ".", "exp", "(", "x", ")", "sum_exp_x", "=", "tf", ".", "math", ".", "reduce_sum", "(", "exp_x", ",", "axis", "=", "-", "1", ",", "keepdims", "=", "True", ")", "if", "extra_logit", "is", "not", "None", ":", "sum_exp_x", "+=", "tf", ".", "math", ".", "exp", "(", "extra_logit", "-", "max_logit", ")", "return", "tf", ".", "math", ".", "log", "(", "sum_exp_x", ")", "+", "max_logit", "def", "_LogSoftmax", "(", "x", ")", ":", "return", "x", "-", "_ReduceLogsumexp", "(", "x", ")", "def", "_LogitsFnF32", "(", "q", ",", "k", ")", ":", "# logits.dtype == tf.float32 leads to better training stability", "if", "p", ".", "attention_logits_dtype", "is", "not", "None", ":", "q", "=", "tf", ".", "cast", "(", "q", ",", "p", ".", "attention_logits_dtype", ")", "k", "=", "tf", ".", "cast", "(", "k", ",", "p", ".", "attention_logits_dtype", ")", "return", "tf", ".", "einsum", "(", "'BLHD,BMHD->BLHM'", ",", "q", ",", "k", ")", "def", "_SoftmaxF32", "(", "x", ")", ":", "# expecting x.dtype == tf.float32", "#", "# TODO(lepikhin): consider", "# if p.attention_extra_logit is None:", "# return tf.nn.softmax(x)", "softmax", "=", "tf", ".", "math", ".", "exp", "(", "_LogSoftmax", "(", "x", ")", ")", "softmax", "=", "tf", ".", "cast", "(", "softmax", ",", "py_utils", ".", "FPropDtype", "(", "self", ".", "params", ")", ")", "return", "softmax", "return", "self", ".", "_Graph", "(", "name", ",", "[", "'_q'", ",", "'_k'", ",", "'_v'", ",", "'bias'", "]", ",", "[", "'outputs'", "]", ",", "(", "'_q->q'", ",", "self", ".", "Split", "(", "'_q'", ")", ")", ",", "(", "'_k->k'", ",", "self", ".", "Split", "(", "'_k'", ")", ")", ",", "(", "'_v->v'", ",", "self", ".", "Split", "(", "'_v'", ")", ")", ",", "(", "'q,k->l'", ",", "self", ".", "_Fn", "(", "'logits'", ",", "fn", "=", "_LogitsFnF32", ")", ")", ",", "(", "'l,bias->logits'", ",", "self", ".", "_Fn", "(", "'bias'", ",", "fn", "=", "_AddBiasF32", ")", ")", ",", "(", "'logits->w'", ",", "self", ".", "_Fn", "(", "'weights'", ",", "_SoftmaxF32", ")", ")", ",", "(", "'w->weights'", ",", "self", ".", "_Dropout", "(", "'dropout'", ",", "1", "-", "self", ".", "params", ".", "attention_dropout_prob", ")", ")", ",", "(", "'weights,v->outputs'", ",", "self", ".", "_Fn", "(", "'outputs'", ",", "fn", "=", "lambda", "weights", ",", "v", ":", "tf", ".", "einsum", "(", "'BLHM,BMHD->BLHD'", ",", "weights", ",", "v", ")", ")", ")", ",", ")" ]
https://github.com/tensorflow/lingvo/blob/ce10019243d954c3c3ebe739f7589b5eebfdf907/lingvo/core/gshard_builder.py#L888-L969
mypaint/mypaint
90b36dbc7b8bd2f323383f7edf608a5e0a3a1a33
lib/floodfill.py
python
enqueue_overflows
(queue, tile_coord, seeds, tiles_bbox, *p)
Conditionally add (coordinate, seed list, data...) tuples to a queue. :param queue: the queue which may be appended :type queue: list :param tile_coord: the 2d coordinate in the middle of the seed coordinates :type tile_coord: (int, int) :param seeds: 4-tuple of seed lists for n, e, s, w, relative to tile_coord :type seeds: (list, list, list, list) :param tiles_bbox: the bounding box of the fill operation :type tiles_bbox: lib.fill_common.TileBoundingBox :param p: tuples of length >= 4, items added to queue items w. same index NOTE: This function improves readability significantly in exchange for a small performance hit. Replace with explicit queueing if too slow.
Conditionally add (coordinate, seed list, data...) tuples to a queue.
[ "Conditionally", "add", "(", "coordinate", "seed", "list", "data", "...", ")", "tuples", "to", "a", "queue", "." ]
def enqueue_overflows(queue, tile_coord, seeds, tiles_bbox, *p): """ Conditionally add (coordinate, seed list, data...) tuples to a queue. :param queue: the queue which may be appended :type queue: list :param tile_coord: the 2d coordinate in the middle of the seed coordinates :type tile_coord: (int, int) :param seeds: 4-tuple of seed lists for n, e, s, w, relative to tile_coord :type seeds: (list, list, list, list) :param tiles_bbox: the bounding box of the fill operation :type tiles_bbox: lib.fill_common.TileBoundingBox :param p: tuples of length >= 4, items added to queue items w. same index NOTE: This function improves readability significantly in exchange for a small performance hit. Replace with explicit queueing if too slow. """ for edge in zip(*(fc.orthogonal(tile_coord), seeds) + p): edge_coord = edge[0] edge_seeds = edge[1] if edge_seeds and not tiles_bbox.outside(edge_coord): queue.append(edge)
[ "def", "enqueue_overflows", "(", "queue", ",", "tile_coord", ",", "seeds", ",", "tiles_bbox", ",", "*", "p", ")", ":", "for", "edge", "in", "zip", "(", "*", "(", "fc", ".", "orthogonal", "(", "tile_coord", ")", ",", "seeds", ")", "+", "p", ")", ":", "edge_coord", "=", "edge", "[", "0", "]", "edge_seeds", "=", "edge", "[", "1", "]", "if", "edge_seeds", "and", "not", "tiles_bbox", ".", "outside", "(", "edge_coord", ")", ":", "queue", ".", "append", "(", "edge", ")" ]
https://github.com/mypaint/mypaint/blob/90b36dbc7b8bd2f323383f7edf608a5e0a3a1a33/lib/floodfill.py#L56-L76
general03/flask-autoindex
424246242c9f40aeb9ac2c8c63f4d2234024256e
.eggs/click-7.1.1-py3.7.egg/click/formatting.py
python
HelpFormatter.write_text
(self, text)
Writes re-indented text into the buffer. This rewraps and preserves paragraphs.
Writes re-indented text into the buffer. This rewraps and preserves paragraphs.
[ "Writes", "re", "-", "indented", "text", "into", "the", "buffer", ".", "This", "rewraps", "and", "preserves", "paragraphs", "." ]
def write_text(self, text): """Writes re-indented text into the buffer. This rewraps and preserves paragraphs. """ text_width = max(self.width - self.current_indent, 11) indent = " " * self.current_indent self.write( wrap_text( text, text_width, initial_indent=indent, subsequent_indent=indent, preserve_paragraphs=True, ) ) self.write("\n")
[ "def", "write_text", "(", "self", ",", "text", ")", ":", "text_width", "=", "max", "(", "self", ".", "width", "-", "self", ".", "current_indent", ",", "11", ")", "indent", "=", "\" \"", "*", "self", ".", "current_indent", "self", ".", "write", "(", "wrap_text", "(", "text", ",", "text_width", ",", "initial_indent", "=", "indent", ",", "subsequent_indent", "=", "indent", ",", "preserve_paragraphs", "=", "True", ",", ")", ")", "self", ".", "write", "(", "\"\\n\"", ")" ]
https://github.com/general03/flask-autoindex/blob/424246242c9f40aeb9ac2c8c63f4d2234024256e/.eggs/click-7.1.1-py3.7.egg/click/formatting.py#L173-L188
RedTeamOperations/PivotSuite
9078d1ede1f076d30b6d72ca14e05ddf991f51f4
pivot_suite/ntlm_auth/compute_response.py
python
ComputeResponse._get_channel_bindings_value
(server_certificate_hash)
return channel_bindings
https://msdn.microsoft.com/en-us/library/windows/desktop/dd919963%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396 https://blogs.msdn.microsoft.com/openspecification/2013/03/26/ntlm-and-channel-binding-hash-aka-extended-protection-for-authentication/ Get's the MD5 hash of the gss_channel_bindings_struct to add to the AV_PAIR MSV_AV_CHANNEL_BINDINGS. This method takes in the SHA256 hash (Hash of the DER encoded certificate of the server we are connecting to) and add's it to the gss_channel_bindings_struct. It then gets the MD5 hash and converts this to a byte array in preparation of adding it to the AV_PAIR structure. :param server_certificate_hash: The SHA256 hash of the server certificate (DER encoded) NTLM is authenticated to :return channel_bindings: An MD5 hash of the gss_channel_bindings_struct to add to the AV_PAIR MsvChannelBindings
https://msdn.microsoft.com/en-us/library/windows/desktop/dd919963%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396 https://blogs.msdn.microsoft.com/openspecification/2013/03/26/ntlm-and-channel-binding-hash-aka-extended-protection-for-authentication/
[ "https", ":", "//", "msdn", ".", "microsoft", ".", "com", "/", "en", "-", "us", "/", "library", "/", "windows", "/", "desktop", "/", "dd919963%28v", "=", "vs", ".", "85%29", ".", "aspx?f", "=", "255&MSPPError", "=", "-", "2147217396", "https", ":", "//", "blogs", ".", "msdn", ".", "microsoft", ".", "com", "/", "openspecification", "/", "2013", "/", "03", "/", "26", "/", "ntlm", "-", "and", "-", "channel", "-", "binding", "-", "hash", "-", "aka", "-", "extended", "-", "protection", "-", "for", "-", "authentication", "/" ]
def _get_channel_bindings_value(server_certificate_hash): """ https://msdn.microsoft.com/en-us/library/windows/desktop/dd919963%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396 https://blogs.msdn.microsoft.com/openspecification/2013/03/26/ntlm-and-channel-binding-hash-aka-extended-protection-for-authentication/ Get's the MD5 hash of the gss_channel_bindings_struct to add to the AV_PAIR MSV_AV_CHANNEL_BINDINGS. This method takes in the SHA256 hash (Hash of the DER encoded certificate of the server we are connecting to) and add's it to the gss_channel_bindings_struct. It then gets the MD5 hash and converts this to a byte array in preparation of adding it to the AV_PAIR structure. :param server_certificate_hash: The SHA256 hash of the server certificate (DER encoded) NTLM is authenticated to :return channel_bindings: An MD5 hash of the gss_channel_bindings_struct to add to the AV_PAIR MsvChannelBindings """ # Channel Binding Tokens support, used for NTLMv2 # Decode the SHA256 certificate hash certificate_digest = base64.b16decode(server_certificate_hash) # Initialise the GssChannelBindingsStruct and add the certificate_digest to the application_data field gss_channel_bindings = GssChannelBindingsStruct() gss_channel_bindings[gss_channel_bindings.APPLICATION_DATA] = 'tls-server-end-point:'.encode() + certificate_digest # Get the gss_channel_bindings_struct and create an MD5 hash channel_bindings_struct_data = gss_channel_bindings.get_data() channel_bindings_hash = hashlib.md5(channel_bindings_struct_data).hexdigest() try: cbt_value = bytearray.fromhex(channel_bindings_hash) except TypeError: # Work-around for Python 2.6 bug cbt_value = bytearray.fromhex(unicode(channel_bindings_hash)) channel_bindings = bytes(cbt_value) return channel_bindings
[ "def", "_get_channel_bindings_value", "(", "server_certificate_hash", ")", ":", "# Channel Binding Tokens support, used for NTLMv2", "# Decode the SHA256 certificate hash", "certificate_digest", "=", "base64", ".", "b16decode", "(", "server_certificate_hash", ")", "# Initialise the GssChannelBindingsStruct and add the certificate_digest to the application_data field", "gss_channel_bindings", "=", "GssChannelBindingsStruct", "(", ")", "gss_channel_bindings", "[", "gss_channel_bindings", ".", "APPLICATION_DATA", "]", "=", "'tls-server-end-point:'", ".", "encode", "(", ")", "+", "certificate_digest", "# Get the gss_channel_bindings_struct and create an MD5 hash", "channel_bindings_struct_data", "=", "gss_channel_bindings", ".", "get_data", "(", ")", "channel_bindings_hash", "=", "hashlib", ".", "md5", "(", "channel_bindings_struct_data", ")", ".", "hexdigest", "(", ")", "try", ":", "cbt_value", "=", "bytearray", ".", "fromhex", "(", "channel_bindings_hash", ")", "except", "TypeError", ":", "# Work-around for Python 2.6 bug", "cbt_value", "=", "bytearray", ".", "fromhex", "(", "unicode", "(", "channel_bindings_hash", ")", ")", "channel_bindings", "=", "bytes", "(", "cbt_value", ")", "return", "channel_bindings" ]
https://github.com/RedTeamOperations/PivotSuite/blob/9078d1ede1f076d30b6d72ca14e05ddf991f51f4/pivot_suite/ntlm_auth/compute_response.py#L360-L392
ethereum/trinity
6383280c5044feb06695ac2f7bc1100b7bcf4fe0
p2p/kademlia.py
python
Address.is_unspecified
(self)
return self._ip.is_unspecified
[]
def is_unspecified(self) -> bool: return self._ip.is_unspecified
[ "def", "is_unspecified", "(", "self", ")", "->", "bool", ":", "return", "self", ".", "_ip", ".", "is_unspecified" ]
https://github.com/ethereum/trinity/blob/6383280c5044feb06695ac2f7bc1100b7bcf4fe0/p2p/kademlia.py#L72-L73
cagbal/ros_people_object_detection_tensorflow
982ffd4a54b8059638f5cd4aa167299c7fc9e61f
src/object_detection/create_pascal_tf_record.py
python
dict_to_tf_example
(data, dataset_directory, label_map_dict, ignore_difficult_instances=False, image_subdirectory='JPEGImages')
return example
Convert XML derived dict to tf.Example proto. Notice that this function normalizes the bounding box coordinates provided by the raw data. Args: data: dict holding PASCAL XML fields for a single image (obtained by running dataset_util.recursive_parse_xml_to_dict) dataset_directory: Path to root directory holding PASCAL dataset label_map_dict: A map from string label names to integers ids. ignore_difficult_instances: Whether to skip difficult instances in the dataset (default: False). image_subdirectory: String specifying subdirectory within the PASCAL dataset directory holding the actual image data. Returns: example: The converted tf.Example. Raises: ValueError: if the image pointed to by data['filename'] is not a valid JPEG
Convert XML derived dict to tf.Example proto.
[ "Convert", "XML", "derived", "dict", "to", "tf", ".", "Example", "proto", "." ]
def dict_to_tf_example(data, dataset_directory, label_map_dict, ignore_difficult_instances=False, image_subdirectory='JPEGImages'): """Convert XML derived dict to tf.Example proto. Notice that this function normalizes the bounding box coordinates provided by the raw data. Args: data: dict holding PASCAL XML fields for a single image (obtained by running dataset_util.recursive_parse_xml_to_dict) dataset_directory: Path to root directory holding PASCAL dataset label_map_dict: A map from string label names to integers ids. ignore_difficult_instances: Whether to skip difficult instances in the dataset (default: False). image_subdirectory: String specifying subdirectory within the PASCAL dataset directory holding the actual image data. Returns: example: The converted tf.Example. Raises: ValueError: if the image pointed to by data['filename'] is not a valid JPEG """ img_path = os.path.join(data['folder'], image_subdirectory, data['filename']) full_path = os.path.join(dataset_directory, img_path) with tf.gfile.GFile(full_path, 'rb') as fid: encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image = PIL.Image.open(encoded_jpg_io) if image.format != 'JPEG': raise ValueError('Image format not JPEG') key = hashlib.sha256(encoded_jpg).hexdigest() width = int(data['size']['width']) height = int(data['size']['height']) xmin = [] ymin = [] xmax = [] ymax = [] classes = [] classes_text = [] truncated = [] poses = [] difficult_obj = [] for obj in data['object']: difficult = bool(int(obj['difficult'])) if ignore_difficult_instances and difficult: continue difficult_obj.append(int(difficult)) xmin.append(float(obj['bndbox']['xmin']) / width) ymin.append(float(obj['bndbox']['ymin']) / height) xmax.append(float(obj['bndbox']['xmax']) / width) ymax.append(float(obj['bndbox']['ymax']) / height) classes_text.append(obj['name'].encode('utf8')) classes.append(label_map_dict[obj['name']]) truncated.append(int(obj['truncated'])) poses.append(obj['pose'].encode('utf8')) example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature( data['filename'].encode('utf8')), 'image/source_id': dataset_util.bytes_feature( data['filename'].encode('utf8')), 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')), 'image/encoded': dataset_util.bytes_feature(encoded_jpg), 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax), 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), 'image/object/class/label': dataset_util.int64_list_feature(classes), 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj), 'image/object/truncated': dataset_util.int64_list_feature(truncated), 'image/object/view': dataset_util.bytes_list_feature(poses), })) return example
[ "def", "dict_to_tf_example", "(", "data", ",", "dataset_directory", ",", "label_map_dict", ",", "ignore_difficult_instances", "=", "False", ",", "image_subdirectory", "=", "'JPEGImages'", ")", ":", "img_path", "=", "os", ".", "path", ".", "join", "(", "data", "[", "'folder'", "]", ",", "image_subdirectory", ",", "data", "[", "'filename'", "]", ")", "full_path", "=", "os", ".", "path", ".", "join", "(", "dataset_directory", ",", "img_path", ")", "with", "tf", ".", "gfile", ".", "GFile", "(", "full_path", ",", "'rb'", ")", "as", "fid", ":", "encoded_jpg", "=", "fid", ".", "read", "(", ")", "encoded_jpg_io", "=", "io", ".", "BytesIO", "(", "encoded_jpg", ")", "image", "=", "PIL", ".", "Image", ".", "open", "(", "encoded_jpg_io", ")", "if", "image", ".", "format", "!=", "'JPEG'", ":", "raise", "ValueError", "(", "'Image format not JPEG'", ")", "key", "=", "hashlib", ".", "sha256", "(", "encoded_jpg", ")", ".", "hexdigest", "(", ")", "width", "=", "int", "(", "data", "[", "'size'", "]", "[", "'width'", "]", ")", "height", "=", "int", "(", "data", "[", "'size'", "]", "[", "'height'", "]", ")", "xmin", "=", "[", "]", "ymin", "=", "[", "]", "xmax", "=", "[", "]", "ymax", "=", "[", "]", "classes", "=", "[", "]", "classes_text", "=", "[", "]", "truncated", "=", "[", "]", "poses", "=", "[", "]", "difficult_obj", "=", "[", "]", "for", "obj", "in", "data", "[", "'object'", "]", ":", "difficult", "=", "bool", "(", "int", "(", "obj", "[", "'difficult'", "]", ")", ")", "if", "ignore_difficult_instances", "and", "difficult", ":", "continue", "difficult_obj", ".", "append", "(", "int", "(", "difficult", ")", ")", "xmin", ".", "append", "(", "float", "(", "obj", "[", "'bndbox'", "]", "[", "'xmin'", "]", ")", "/", "width", ")", "ymin", ".", "append", "(", "float", "(", "obj", "[", "'bndbox'", "]", "[", "'ymin'", "]", ")", "/", "height", ")", "xmax", ".", "append", "(", "float", "(", "obj", "[", "'bndbox'", "]", "[", "'xmax'", "]", ")", "/", "width", ")", "ymax", ".", "append", "(", "float", "(", "obj", "[", "'bndbox'", "]", "[", "'ymax'", "]", ")", "/", "height", ")", "classes_text", ".", "append", "(", "obj", "[", "'name'", "]", ".", "encode", "(", "'utf8'", ")", ")", "classes", ".", "append", "(", "label_map_dict", "[", "obj", "[", "'name'", "]", "]", ")", "truncated", ".", "append", "(", "int", "(", "obj", "[", "'truncated'", "]", ")", ")", "poses", ".", "append", "(", "obj", "[", "'pose'", "]", ".", "encode", "(", "'utf8'", ")", ")", "example", "=", "tf", ".", "train", ".", "Example", "(", "features", "=", "tf", ".", "train", ".", "Features", "(", "feature", "=", "{", "'image/height'", ":", "dataset_util", ".", "int64_feature", "(", "height", ")", ",", "'image/width'", ":", "dataset_util", ".", "int64_feature", "(", "width", ")", ",", "'image/filename'", ":", "dataset_util", ".", "bytes_feature", "(", "data", "[", "'filename'", "]", ".", "encode", "(", "'utf8'", ")", ")", ",", "'image/source_id'", ":", "dataset_util", ".", "bytes_feature", "(", "data", "[", "'filename'", "]", ".", "encode", "(", "'utf8'", ")", ")", ",", "'image/key/sha256'", ":", "dataset_util", ".", "bytes_feature", "(", "key", ".", "encode", "(", "'utf8'", ")", ")", ",", "'image/encoded'", ":", "dataset_util", ".", "bytes_feature", "(", "encoded_jpg", ")", ",", "'image/format'", ":", "dataset_util", ".", "bytes_feature", "(", "'jpeg'", ".", "encode", "(", "'utf8'", ")", ")", ",", "'image/object/bbox/xmin'", ":", "dataset_util", ".", "float_list_feature", "(", "xmin", ")", ",", "'image/object/bbox/xmax'", ":", "dataset_util", ".", "float_list_feature", "(", "xmax", ")", ",", "'image/object/bbox/ymin'", ":", "dataset_util", ".", "float_list_feature", "(", "ymin", ")", ",", "'image/object/bbox/ymax'", ":", "dataset_util", ".", "float_list_feature", "(", "ymax", ")", ",", "'image/object/class/text'", ":", "dataset_util", ".", "bytes_list_feature", "(", "classes_text", ")", ",", "'image/object/class/label'", ":", "dataset_util", ".", "int64_list_feature", "(", "classes", ")", ",", "'image/object/difficult'", ":", "dataset_util", ".", "int64_list_feature", "(", "difficult_obj", ")", ",", "'image/object/truncated'", ":", "dataset_util", ".", "int64_list_feature", "(", "truncated", ")", ",", "'image/object/view'", ":", "dataset_util", ".", "bytes_list_feature", "(", "poses", ")", ",", "}", ")", ")", "return", "example" ]
https://github.com/cagbal/ros_people_object_detection_tensorflow/blob/982ffd4a54b8059638f5cd4aa167299c7fc9e61f/src/object_detection/create_pascal_tf_record.py#L58-L142
facebookresearch/pysparnn
c299c825fd99f263f3957e9b31197daf23a1e7a3
pysparnn/matrix_distance.py
python
DenseCosineDistance._distance
(self, a_matrix)
return 1 - (dprod * magnitude)
Vectorised cosine distance
Vectorised cosine distance
[ "Vectorised", "cosine", "distance" ]
def _distance(self, a_matrix): """Vectorised cosine distance""" # what is the implmentation of transpose? can i change the order? dprod = self.matrix.dot(a_matrix.transpose()).transpose() * 1.0 a_root_sum_square = (a_matrix ** 2).sum(axis=1).reshape(-1) a_root_sum_square = a_root_sum_square.reshape(len(a_root_sum_square), 1) a_root_sum_square = _np.sqrt(a_root_sum_square) magnitude = 1.0 / (a_root_sum_square * self.matrix_root_sum_square) return 1 - (dprod * magnitude)
[ "def", "_distance", "(", "self", ",", "a_matrix", ")", ":", "# what is the implmentation of transpose? can i change the order?", "dprod", "=", "self", ".", "matrix", ".", "dot", "(", "a_matrix", ".", "transpose", "(", ")", ")", ".", "transpose", "(", ")", "*", "1.0", "a_root_sum_square", "=", "(", "a_matrix", "**", "2", ")", ".", "sum", "(", "axis", "=", "1", ")", ".", "reshape", "(", "-", "1", ")", "a_root_sum_square", "=", "a_root_sum_square", ".", "reshape", "(", "len", "(", "a_root_sum_square", ")", ",", "1", ")", "a_root_sum_square", "=", "_np", ".", "sqrt", "(", "a_root_sum_square", ")", "magnitude", "=", "1.0", "/", "(", "a_root_sum_square", "*", "self", ".", "matrix_root_sum_square", ")", "return", "1", "-", "(", "dprod", "*", "magnitude", ")" ]
https://github.com/facebookresearch/pysparnn/blob/c299c825fd99f263f3957e9b31197daf23a1e7a3/pysparnn/matrix_distance.py#L331-L342
nosmokingbandit/Watcher3
0217e75158b563bdefc8e01c3be7620008cf3977
lib/requests/packages/urllib3/poolmanager.py
python
PoolManager.connection_from_host
(self, host, port=None, scheme='http')
return self.connection_from_context(request_context)
Get a :class:`ConnectionPool` based on the host, port, and scheme. If ``port`` isn't given, it will be derived from the ``scheme`` using ``urllib3.connectionpool.port_by_scheme``.
Get a :class:`ConnectionPool` based on the host, port, and scheme.
[ "Get", "a", ":", "class", ":", "ConnectionPool", "based", "on", "the", "host", "port", "and", "scheme", "." ]
def connection_from_host(self, host, port=None, scheme='http'): """ Get a :class:`ConnectionPool` based on the host, port, and scheme. If ``port`` isn't given, it will be derived from the ``scheme`` using ``urllib3.connectionpool.port_by_scheme``. """ if not host: raise LocationValueError("No host specified.") request_context = self.connection_pool_kw.copy() request_context['scheme'] = scheme or 'http' if not port: port = port_by_scheme.get(request_context['scheme'].lower(), 80) request_context['port'] = port request_context['host'] = host return self.connection_from_context(request_context)
[ "def", "connection_from_host", "(", "self", ",", "host", ",", "port", "=", "None", ",", "scheme", "=", "'http'", ")", ":", "if", "not", "host", ":", "raise", "LocationValueError", "(", "\"No host specified.\"", ")", "request_context", "=", "self", ".", "connection_pool_kw", ".", "copy", "(", ")", "request_context", "[", "'scheme'", "]", "=", "scheme", "or", "'http'", "if", "not", "port", ":", "port", "=", "port_by_scheme", ".", "get", "(", "request_context", "[", "'scheme'", "]", ".", "lower", "(", ")", ",", "80", ")", "request_context", "[", "'port'", "]", "=", "port", "request_context", "[", "'host'", "]", "=", "host", "return", "self", ".", "connection_from_context", "(", "request_context", ")" ]
https://github.com/nosmokingbandit/Watcher3/blob/0217e75158b563bdefc8e01c3be7620008cf3977/lib/requests/packages/urllib3/poolmanager.py#L158-L176
pymedusa/Medusa
1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38
lib/pymediainfo/__init__.py
python
MediaInfo.tracks
(self)
return self._tracks
A list of :py:class:`Track` objects which the media file contains. For instance: >>> mi = pymediainfo.MediaInfo.parse("/path/to/file.mp4") >>> for t in mi.tracks: ... print(t) <Track track_id='None', track_type='General'> <Track track_id='1', track_type='Text'>
A list of :py:class:`Track` objects which the media file contains.
[ "A", "list", "of", ":", "py", ":", "class", ":", "Track", "objects", "which", "the", "media", "file", "contains", "." ]
def tracks(self): """ A list of :py:class:`Track` objects which the media file contains. For instance: >>> mi = pymediainfo.MediaInfo.parse("/path/to/file.mp4") >>> for t in mi.tracks: ... print(t) <Track track_id='None', track_type='General'> <Track track_id='1', track_type='Text'> """ if not hasattr(self, "_tracks"): self._populate_tracks() return self._tracks
[ "def", "tracks", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "\"_tracks\"", ")", ":", "self", ".", "_populate_tracks", "(", ")", "return", "self", ".", "_tracks" ]
https://github.com/pymedusa/Medusa/blob/1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38/lib/pymediainfo/__init__.py#L285-L299
openstack/openstacksdk
58384268487fa854f21c470b101641ab382c9897
openstack/clustering/v1/_proxy.py
python
Proxy.policy_types
(self, **query)
return self._list(_policy_type.PolicyType, **query)
Get a generator of policy types. :returns: A generator of objects that are of type :class:`~openstack.clustering.v1.policy_type.PolicyType`
Get a generator of policy types.
[ "Get", "a", "generator", "of", "policy", "types", "." ]
def policy_types(self, **query): """Get a generator of policy types. :returns: A generator of objects that are of type :class:`~openstack.clustering.v1.policy_type.PolicyType` """ return self._list(_policy_type.PolicyType, **query)
[ "def", "policy_types", "(", "self", ",", "*", "*", "query", ")", ":", "return", "self", ".", "_list", "(", "_policy_type", ".", "PolicyType", ",", "*", "*", "query", ")" ]
https://github.com/openstack/openstacksdk/blob/58384268487fa854f21c470b101641ab382c9897/openstack/clustering/v1/_proxy.py#L60-L66
QCoDeS/Qcodes
3cda2cef44812e2aa4672781f2423bf5f816f9f9
qcodes/instrument_drivers/tektronix/Keithley_6500.py
python
Keithley_6500._get_mode_param
(self, parameter: str, parser: Callable[[str], T])
return parser(self.ask(cmd))
Reads the current mode of the multimeter and ask for the given parameter. Args: parameter: The asked parameter after getting the current mode. parser: A function that parses the input buffer read. Returns: Any: the parsed ask command. The parser determines the return data-type.
Reads the current mode of the multimeter and ask for the given parameter.
[ "Reads", "the", "current", "mode", "of", "the", "multimeter", "and", "ask", "for", "the", "given", "parameter", "." ]
def _get_mode_param(self, parameter: str, parser: Callable[[str], T]) -> T: """ Reads the current mode of the multimeter and ask for the given parameter. Args: parameter: The asked parameter after getting the current mode. parser: A function that parses the input buffer read. Returns: Any: the parsed ask command. The parser determines the return data-type. """ mode = _parse_output_string(self._mode_map[self.mode()]) cmd = f'{mode}:{parameter}?' return parser(self.ask(cmd))
[ "def", "_get_mode_param", "(", "self", ",", "parameter", ":", "str", ",", "parser", ":", "Callable", "[", "[", "str", "]", ",", "T", "]", ")", "->", "T", ":", "mode", "=", "_parse_output_string", "(", "self", ".", "_mode_map", "[", "self", ".", "mode", "(", ")", "]", ")", "cmd", "=", "f'{mode}:{parameter}?'", "return", "parser", "(", "self", ".", "ask", "(", "cmd", ")", ")" ]
https://github.com/QCoDeS/Qcodes/blob/3cda2cef44812e2aa4672781f2423bf5f816f9f9/qcodes/instrument_drivers/tektronix/Keithley_6500.py#L203-L215
Ericsson/codechecker
c4e43f62dc3acbf71d3109b337db7c97f7852f43
tools/report-converter/codechecker_report_converter/analyzers/sanitizers/thread/analyzer_result.py
python
AnalyzerResult.get_reports
(self, file_path: str)
return Parser().get_reports(file_path)
Get reports from the given analyzer result.
Get reports from the given analyzer result.
[ "Get", "reports", "from", "the", "given", "analyzer", "result", "." ]
def get_reports(self, file_path: str) -> List[Report]: """ Get reports from the given analyzer result. """ return Parser().get_reports(file_path)
[ "def", "get_reports", "(", "self", ",", "file_path", ":", "str", ")", "->", "List", "[", "Report", "]", ":", "return", "Parser", "(", ")", ".", "get_reports", "(", "file_path", ")" ]
https://github.com/Ericsson/codechecker/blob/c4e43f62dc3acbf71d3109b337db7c97f7852f43/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/thread/analyzer_result.py#L24-L26
saltstack/salt
fae5bc757ad0f1716483ce7ae180b451545c2058
salt/states/file.py
python
serialize
( name, dataset=None, dataset_pillar=None, user=None, group=None, mode=None, backup="", makedirs=False, show_changes=True, create=True, merge_if_exists=False, encoding=None, encoding_errors="strict", serializer=None, serializer_opts=None, deserializer_opts=None, **kwargs )
return __salt__["file.manage_file"]( name=name, sfn="", ret=ret, source=None, source_sum={}, user=user, group=group, mode=mode, attrs=None, saltenv=__env__, backup=backup, makedirs=makedirs, template=None, show_changes=show_changes, encoding=encoding, encoding_errors=encoding_errors, contents=contents, )
Serializes dataset and store it into managed file. Useful for sharing simple configuration files. name The location of the file to create dataset The dataset that will be serialized dataset_pillar Operates like ``dataset``, but draws from a value stored in pillar, using the pillar path syntax used in :mod:`pillar.get <salt.modules.pillar.get>`. This is useful when the pillar value contains newlines, as referencing a pillar variable using a jinja/mako template can result in YAML formatting issues due to the newlines causing indentation mismatches. .. versionadded:: 2015.8.0 serializer (or formatter) Write the data as this format. See the list of :ref:`all-salt.serializers` for supported output formats. .. versionchanged:: 3002 ``serializer`` argument added as an alternative to ``formatter``. Both are accepted, but using both will result in an error. encoding If specified, then the specified encoding will be used. Otherwise, the file will be encoded using the system locale (usually UTF-8). See https://docs.python.org/3/library/codecs.html#standard-encodings for the list of available encodings. .. versionadded:: 2017.7.0 encoding_errors Error encoding scheme. Default is ```'strict'```. See https://docs.python.org/2/library/codecs.html#codec-base-classes for the list of available schemes. .. versionadded:: 2017.7.0 user The user to own the directory, this defaults to the user salt is running as on the minion group The group ownership set for the directory, this defaults to the group salt is running as on the minion mode The permissions to set on this file, e.g. ``644``, ``0775``, or ``4664``. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. .. note:: This option is **not** supported on Windows. backup Overrides the default backup mode for this specific file. makedirs Create parent directories for destination file. .. versionadded:: 2014.1.3 show_changes Output a unified diff of the old file and the new file. If ``False`` return a boolean if any changes were made. create Default is True, if create is set to False then the file will only be managed if the file already exists on the system. merge_if_exists Default is False, if merge_if_exists is True then the existing file will be parsed and the dataset passed in will be merged with the existing content .. versionadded:: 2014.7.0 serializer_opts Pass through options to serializer. For example: .. code-block:: yaml /etc/dummy/package.yaml file.serialize: - serializer: yaml - serializer_opts: - explicit_start: True - default_flow_style: True - indent: 4 The valid opts are the additional opts (i.e. not the data being serialized) for the function used to serialize the data. Documentation for the these functions can be found in the list below: - For **yaml**: `yaml.dump()`_ - For **json**: `json.dumps()`_ - For **python**: `pprint.pformat()`_ - For **msgpack**: Run ``python -c 'import msgpack; help(msgpack.Packer)'`` to see the available options (``encoding``, ``unicode_errors``, etc.) .. _`yaml.dump()`: https://pyyaml.org/wiki/PyYAMLDocumentation .. _`json.dumps()`: https://docs.python.org/2/library/json.html#json.dumps .. _`pprint.pformat()`: https://docs.python.org/2/library/pprint.html#pprint.pformat deserializer_opts Like ``serializer_opts`` above, but only used when merging with an existing file (i.e. when ``merge_if_exists`` is set to ``True``). The options specified here will be passed to the deserializer to load the existing data, before merging with the specified data and re-serializing. .. code-block:: yaml /etc/dummy/package.yaml file.serialize: - serializer: yaml - serializer_opts: - explicit_start: True - default_flow_style: True - indent: 4 - deserializer_opts: - encoding: latin-1 - merge_if_exists: True The valid opts are the additional opts (i.e. not the data being deserialized) for the function used to deserialize the data. Documentation for the these functions can be found in the list below: - For **yaml**: `yaml.load()`_ - For **json**: `json.loads()`_ .. _`yaml.load()`: https://pyyaml.org/wiki/PyYAMLDocumentation .. _`json.loads()`: https://docs.python.org/2/library/json.html#json.loads However, note that not all arguments are supported. For example, when deserializing JSON, arguments like ``parse_float`` and ``parse_int`` which accept a callable object cannot be handled in an SLS file. .. versionadded:: 2019.2.0 For example, this state: .. code-block:: yaml /etc/dummy/package.json: file.serialize: - dataset: name: naive description: A package using naive versioning author: A confused individual <iam@confused.com> dependencies: express: '>= 1.2.0' optimist: '>= 0.1.0' engine: node 0.4.1 - serializer: json will manage the file ``/etc/dummy/package.json``: .. code-block:: json { "author": "A confused individual <iam@confused.com>", "dependencies": { "express": ">= 1.2.0", "optimist": ">= 0.1.0" }, "description": "A package using naive versioning", "engine": "node 0.4.1", "name": "naive" }
Serializes dataset and store it into managed file. Useful for sharing simple configuration files.
[ "Serializes", "dataset", "and", "store", "it", "into", "managed", "file", ".", "Useful", "for", "sharing", "simple", "configuration", "files", "." ]
def serialize( name, dataset=None, dataset_pillar=None, user=None, group=None, mode=None, backup="", makedirs=False, show_changes=True, create=True, merge_if_exists=False, encoding=None, encoding_errors="strict", serializer=None, serializer_opts=None, deserializer_opts=None, **kwargs ): """ Serializes dataset and store it into managed file. Useful for sharing simple configuration files. name The location of the file to create dataset The dataset that will be serialized dataset_pillar Operates like ``dataset``, but draws from a value stored in pillar, using the pillar path syntax used in :mod:`pillar.get <salt.modules.pillar.get>`. This is useful when the pillar value contains newlines, as referencing a pillar variable using a jinja/mako template can result in YAML formatting issues due to the newlines causing indentation mismatches. .. versionadded:: 2015.8.0 serializer (or formatter) Write the data as this format. See the list of :ref:`all-salt.serializers` for supported output formats. .. versionchanged:: 3002 ``serializer`` argument added as an alternative to ``formatter``. Both are accepted, but using both will result in an error. encoding If specified, then the specified encoding will be used. Otherwise, the file will be encoded using the system locale (usually UTF-8). See https://docs.python.org/3/library/codecs.html#standard-encodings for the list of available encodings. .. versionadded:: 2017.7.0 encoding_errors Error encoding scheme. Default is ```'strict'```. See https://docs.python.org/2/library/codecs.html#codec-base-classes for the list of available schemes. .. versionadded:: 2017.7.0 user The user to own the directory, this defaults to the user salt is running as on the minion group The group ownership set for the directory, this defaults to the group salt is running as on the minion mode The permissions to set on this file, e.g. ``644``, ``0775``, or ``4664``. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. .. note:: This option is **not** supported on Windows. backup Overrides the default backup mode for this specific file. makedirs Create parent directories for destination file. .. versionadded:: 2014.1.3 show_changes Output a unified diff of the old file and the new file. If ``False`` return a boolean if any changes were made. create Default is True, if create is set to False then the file will only be managed if the file already exists on the system. merge_if_exists Default is False, if merge_if_exists is True then the existing file will be parsed and the dataset passed in will be merged with the existing content .. versionadded:: 2014.7.0 serializer_opts Pass through options to serializer. For example: .. code-block:: yaml /etc/dummy/package.yaml file.serialize: - serializer: yaml - serializer_opts: - explicit_start: True - default_flow_style: True - indent: 4 The valid opts are the additional opts (i.e. not the data being serialized) for the function used to serialize the data. Documentation for the these functions can be found in the list below: - For **yaml**: `yaml.dump()`_ - For **json**: `json.dumps()`_ - For **python**: `pprint.pformat()`_ - For **msgpack**: Run ``python -c 'import msgpack; help(msgpack.Packer)'`` to see the available options (``encoding``, ``unicode_errors``, etc.) .. _`yaml.dump()`: https://pyyaml.org/wiki/PyYAMLDocumentation .. _`json.dumps()`: https://docs.python.org/2/library/json.html#json.dumps .. _`pprint.pformat()`: https://docs.python.org/2/library/pprint.html#pprint.pformat deserializer_opts Like ``serializer_opts`` above, but only used when merging with an existing file (i.e. when ``merge_if_exists`` is set to ``True``). The options specified here will be passed to the deserializer to load the existing data, before merging with the specified data and re-serializing. .. code-block:: yaml /etc/dummy/package.yaml file.serialize: - serializer: yaml - serializer_opts: - explicit_start: True - default_flow_style: True - indent: 4 - deserializer_opts: - encoding: latin-1 - merge_if_exists: True The valid opts are the additional opts (i.e. not the data being deserialized) for the function used to deserialize the data. Documentation for the these functions can be found in the list below: - For **yaml**: `yaml.load()`_ - For **json**: `json.loads()`_ .. _`yaml.load()`: https://pyyaml.org/wiki/PyYAMLDocumentation .. _`json.loads()`: https://docs.python.org/2/library/json.html#json.loads However, note that not all arguments are supported. For example, when deserializing JSON, arguments like ``parse_float`` and ``parse_int`` which accept a callable object cannot be handled in an SLS file. .. versionadded:: 2019.2.0 For example, this state: .. code-block:: yaml /etc/dummy/package.json: file.serialize: - dataset: name: naive description: A package using naive versioning author: A confused individual <iam@confused.com> dependencies: express: '>= 1.2.0' optimist: '>= 0.1.0' engine: node 0.4.1 - serializer: json will manage the file ``/etc/dummy/package.json``: .. code-block:: json { "author": "A confused individual <iam@confused.com>", "dependencies": { "express": ">= 1.2.0", "optimist": ">= 0.1.0" }, "description": "A package using naive versioning", "engine": "node 0.4.1", "name": "naive" } """ if "env" in kwargs: # "env" is not supported; Use "saltenv". kwargs.pop("env") name = os.path.expanduser(name) # Set some defaults serializer_options = { "yaml.serialize": {"default_flow_style": False}, "json.serialize": {"indent": 2, "separators": (",", ": "), "sort_keys": True}, } deserializer_options = { "yaml.deserialize": {}, "json.deserialize": {}, } if encoding: serializer_options["yaml.serialize"].update({"allow_unicode": True}) serializer_options["json.serialize"].update({"ensure_ascii": False}) ret = {"changes": {}, "comment": "", "name": name, "result": True} if not name: return _error(ret, "Must provide name to file.serialize") if not create: if not os.path.isfile(name): # Don't create a file that is not already present ret[ "comment" ] = "File {} is not present and is not set for creation".format(name) return ret formatter = kwargs.pop("formatter", None) if serializer and formatter: return _error(ret, "Only one of serializer and formatter are allowed") serializer = str(serializer or formatter or "yaml").lower() if len([x for x in (dataset, dataset_pillar) if x]) > 1: return _error(ret, "Only one of 'dataset' and 'dataset_pillar' is permitted") if dataset_pillar: dataset = __salt__["pillar.get"](dataset_pillar) if dataset is None: return _error(ret, "Neither 'dataset' nor 'dataset_pillar' was defined") if salt.utils.platform.is_windows(): if group is not None: log.warning( "The group argument for %s has been ignored as this " "is a Windows system.", name, ) group = user serializer_name = "{}.serialize".format(serializer) deserializer_name = "{}.deserialize".format(serializer) if serializer_name not in __serializers__: return { "changes": {}, "comment": ( "The {} serializer could not be found. It either does " "not exist or its prerequisites are not installed.".format(serializer) ), "name": name, "result": False, } if serializer_opts: serializer_options.setdefault(serializer_name, {}).update( salt.utils.data.repack_dictlist(serializer_opts) ) if deserializer_opts: deserializer_options.setdefault(deserializer_name, {}).update( salt.utils.data.repack_dictlist(deserializer_opts) ) if merge_if_exists: if os.path.isfile(name): if deserializer_name not in __serializers__: return { "changes": {}, "comment": ( "merge_if_exists is not supported for the {} serializer".format( serializer ) ), "name": name, "result": False, } open_args = "r" if serializer == "plist": open_args += "b" with salt.utils.files.fopen(name, open_args) as fhr: try: existing_data = __serializers__[deserializer_name]( fhr, **deserializer_options.get(deserializer_name, {}) ) except (TypeError, DeserializationError) as exc: ret["result"] = False ret["comment"] = "Failed to deserialize existing data: {}".format( exc ) return False if existing_data is not None: merged_data = salt.utils.dictupdate.merge_recurse( existing_data, dataset ) if existing_data == merged_data: ret["result"] = True ret["comment"] = "The file {} is in the correct state".format(name) return ret dataset = merged_data else: if deserializer_opts: ret.setdefault("warnings", []).append( "The 'deserializer_opts' option is ignored unless " "merge_if_exists is set to True." ) contents = __serializers__[serializer_name]( dataset, **serializer_options.get(serializer_name, {}) ) # Insert a newline, but only if the serialized contents are not a # bytestring. If it's a bytestring, it's almost certainly serialized into a # binary format that does not take kindly to additional bytes being foisted # upon it. try: contents += "\n" except TypeError: pass # Make sure that any leading zeros stripped by YAML loader are added back mode = salt.utils.files.normalize_mode(mode) if __opts__["test"]: ret["changes"] = __salt__["file.check_managed_changes"]( name=name, source=None, source_hash={}, source_hash_name=None, user=user, group=group, mode=mode, attrs=None, template=None, context=None, defaults=None, saltenv=__env__, contents=contents, skip_verify=False, **kwargs ) if ret["changes"]: ret["result"] = None ret["comment"] = "Dataset will be serialized and stored into {}".format( name ) if not show_changes: ret["changes"]["diff"] = "<show_changes=False>" else: ret["result"] = True ret["comment"] = "The file {} is in the correct state".format(name) return ret return __salt__["file.manage_file"]( name=name, sfn="", ret=ret, source=None, source_sum={}, user=user, group=group, mode=mode, attrs=None, saltenv=__env__, backup=backup, makedirs=makedirs, template=None, show_changes=show_changes, encoding=encoding, encoding_errors=encoding_errors, contents=contents, )
[ "def", "serialize", "(", "name", ",", "dataset", "=", "None", ",", "dataset_pillar", "=", "None", ",", "user", "=", "None", ",", "group", "=", "None", ",", "mode", "=", "None", ",", "backup", "=", "\"\"", ",", "makedirs", "=", "False", ",", "show_changes", "=", "True", ",", "create", "=", "True", ",", "merge_if_exists", "=", "False", ",", "encoding", "=", "None", ",", "encoding_errors", "=", "\"strict\"", ",", "serializer", "=", "None", ",", "serializer_opts", "=", "None", ",", "deserializer_opts", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "\"env\"", "in", "kwargs", ":", "# \"env\" is not supported; Use \"saltenv\".", "kwargs", ".", "pop", "(", "\"env\"", ")", "name", "=", "os", ".", "path", ".", "expanduser", "(", "name", ")", "# Set some defaults", "serializer_options", "=", "{", "\"yaml.serialize\"", ":", "{", "\"default_flow_style\"", ":", "False", "}", ",", "\"json.serialize\"", ":", "{", "\"indent\"", ":", "2", ",", "\"separators\"", ":", "(", "\",\"", ",", "\": \"", ")", ",", "\"sort_keys\"", ":", "True", "}", ",", "}", "deserializer_options", "=", "{", "\"yaml.deserialize\"", ":", "{", "}", ",", "\"json.deserialize\"", ":", "{", "}", ",", "}", "if", "encoding", ":", "serializer_options", "[", "\"yaml.serialize\"", "]", ".", "update", "(", "{", "\"allow_unicode\"", ":", "True", "}", ")", "serializer_options", "[", "\"json.serialize\"", "]", ".", "update", "(", "{", "\"ensure_ascii\"", ":", "False", "}", ")", "ret", "=", "{", "\"changes\"", ":", "{", "}", ",", "\"comment\"", ":", "\"\"", ",", "\"name\"", ":", "name", ",", "\"result\"", ":", "True", "}", "if", "not", "name", ":", "return", "_error", "(", "ret", ",", "\"Must provide name to file.serialize\"", ")", "if", "not", "create", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "name", ")", ":", "# Don't create a file that is not already present", "ret", "[", "\"comment\"", "]", "=", "\"File {} is not present and is not set for creation\"", ".", "format", "(", "name", ")", "return", "ret", "formatter", "=", "kwargs", ".", "pop", "(", "\"formatter\"", ",", "None", ")", "if", "serializer", "and", "formatter", ":", "return", "_error", "(", "ret", ",", "\"Only one of serializer and formatter are allowed\"", ")", "serializer", "=", "str", "(", "serializer", "or", "formatter", "or", "\"yaml\"", ")", ".", "lower", "(", ")", "if", "len", "(", "[", "x", "for", "x", "in", "(", "dataset", ",", "dataset_pillar", ")", "if", "x", "]", ")", ">", "1", ":", "return", "_error", "(", "ret", ",", "\"Only one of 'dataset' and 'dataset_pillar' is permitted\"", ")", "if", "dataset_pillar", ":", "dataset", "=", "__salt__", "[", "\"pillar.get\"", "]", "(", "dataset_pillar", ")", "if", "dataset", "is", "None", ":", "return", "_error", "(", "ret", ",", "\"Neither 'dataset' nor 'dataset_pillar' was defined\"", ")", "if", "salt", ".", "utils", ".", "platform", ".", "is_windows", "(", ")", ":", "if", "group", "is", "not", "None", ":", "log", ".", "warning", "(", "\"The group argument for %s has been ignored as this \"", "\"is a Windows system.\"", ",", "name", ",", ")", "group", "=", "user", "serializer_name", "=", "\"{}.serialize\"", ".", "format", "(", "serializer", ")", "deserializer_name", "=", "\"{}.deserialize\"", ".", "format", "(", "serializer", ")", "if", "serializer_name", "not", "in", "__serializers__", ":", "return", "{", "\"changes\"", ":", "{", "}", ",", "\"comment\"", ":", "(", "\"The {} serializer could not be found. It either does \"", "\"not exist or its prerequisites are not installed.\"", ".", "format", "(", "serializer", ")", ")", ",", "\"name\"", ":", "name", ",", "\"result\"", ":", "False", ",", "}", "if", "serializer_opts", ":", "serializer_options", ".", "setdefault", "(", "serializer_name", ",", "{", "}", ")", ".", "update", "(", "salt", ".", "utils", ".", "data", ".", "repack_dictlist", "(", "serializer_opts", ")", ")", "if", "deserializer_opts", ":", "deserializer_options", ".", "setdefault", "(", "deserializer_name", ",", "{", "}", ")", ".", "update", "(", "salt", ".", "utils", ".", "data", ".", "repack_dictlist", "(", "deserializer_opts", ")", ")", "if", "merge_if_exists", ":", "if", "os", ".", "path", ".", "isfile", "(", "name", ")", ":", "if", "deserializer_name", "not", "in", "__serializers__", ":", "return", "{", "\"changes\"", ":", "{", "}", ",", "\"comment\"", ":", "(", "\"merge_if_exists is not supported for the {} serializer\"", ".", "format", "(", "serializer", ")", ")", ",", "\"name\"", ":", "name", ",", "\"result\"", ":", "False", ",", "}", "open_args", "=", "\"r\"", "if", "serializer", "==", "\"plist\"", ":", "open_args", "+=", "\"b\"", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "name", ",", "open_args", ")", "as", "fhr", ":", "try", ":", "existing_data", "=", "__serializers__", "[", "deserializer_name", "]", "(", "fhr", ",", "*", "*", "deserializer_options", ".", "get", "(", "deserializer_name", ",", "{", "}", ")", ")", "except", "(", "TypeError", ",", "DeserializationError", ")", "as", "exc", ":", "ret", "[", "\"result\"", "]", "=", "False", "ret", "[", "\"comment\"", "]", "=", "\"Failed to deserialize existing data: {}\"", ".", "format", "(", "exc", ")", "return", "False", "if", "existing_data", "is", "not", "None", ":", "merged_data", "=", "salt", ".", "utils", ".", "dictupdate", ".", "merge_recurse", "(", "existing_data", ",", "dataset", ")", "if", "existing_data", "==", "merged_data", ":", "ret", "[", "\"result\"", "]", "=", "True", "ret", "[", "\"comment\"", "]", "=", "\"The file {} is in the correct state\"", ".", "format", "(", "name", ")", "return", "ret", "dataset", "=", "merged_data", "else", ":", "if", "deserializer_opts", ":", "ret", ".", "setdefault", "(", "\"warnings\"", ",", "[", "]", ")", ".", "append", "(", "\"The 'deserializer_opts' option is ignored unless \"", "\"merge_if_exists is set to True.\"", ")", "contents", "=", "__serializers__", "[", "serializer_name", "]", "(", "dataset", ",", "*", "*", "serializer_options", ".", "get", "(", "serializer_name", ",", "{", "}", ")", ")", "# Insert a newline, but only if the serialized contents are not a", "# bytestring. If it's a bytestring, it's almost certainly serialized into a", "# binary format that does not take kindly to additional bytes being foisted", "# upon it.", "try", ":", "contents", "+=", "\"\\n\"", "except", "TypeError", ":", "pass", "# Make sure that any leading zeros stripped by YAML loader are added back", "mode", "=", "salt", ".", "utils", ".", "files", ".", "normalize_mode", "(", "mode", ")", "if", "__opts__", "[", "\"test\"", "]", ":", "ret", "[", "\"changes\"", "]", "=", "__salt__", "[", "\"file.check_managed_changes\"", "]", "(", "name", "=", "name", ",", "source", "=", "None", ",", "source_hash", "=", "{", "}", ",", "source_hash_name", "=", "None", ",", "user", "=", "user", ",", "group", "=", "group", ",", "mode", "=", "mode", ",", "attrs", "=", "None", ",", "template", "=", "None", ",", "context", "=", "None", ",", "defaults", "=", "None", ",", "saltenv", "=", "__env__", ",", "contents", "=", "contents", ",", "skip_verify", "=", "False", ",", "*", "*", "kwargs", ")", "if", "ret", "[", "\"changes\"", "]", ":", "ret", "[", "\"result\"", "]", "=", "None", "ret", "[", "\"comment\"", "]", "=", "\"Dataset will be serialized and stored into {}\"", ".", "format", "(", "name", ")", "if", "not", "show_changes", ":", "ret", "[", "\"changes\"", "]", "[", "\"diff\"", "]", "=", "\"<show_changes=False>\"", "else", ":", "ret", "[", "\"result\"", "]", "=", "True", "ret", "[", "\"comment\"", "]", "=", "\"The file {} is in the correct state\"", ".", "format", "(", "name", ")", "return", "ret", "return", "__salt__", "[", "\"file.manage_file\"", "]", "(", "name", "=", "name", ",", "sfn", "=", "\"\"", ",", "ret", "=", "ret", ",", "source", "=", "None", ",", "source_sum", "=", "{", "}", ",", "user", "=", "user", ",", "group", "=", "group", ",", "mode", "=", "mode", ",", "attrs", "=", "None", ",", "saltenv", "=", "__env__", ",", "backup", "=", "backup", ",", "makedirs", "=", "makedirs", ",", "template", "=", "None", ",", "show_changes", "=", "show_changes", ",", "encoding", "=", "encoding", ",", "encoding_errors", "=", "encoding_errors", ",", "contents", "=", "contents", ",", ")" ]
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/states/file.py#L7458-L7844
vertexproject/synapse
8173f43cb5fba5ca2648d12a659afb432139b0a7
synapse/lib/cell.py
python
CellApi.rotateNexsLog
(self)
return await self.cell.rotateNexsLog()
Rotate the Nexus log at the current offset. Returns: int: The starting index of the active Nexus log
Rotate the Nexus log at the current offset.
[ "Rotate", "the", "Nexus", "log", "at", "the", "current", "offset", "." ]
async def rotateNexsLog(self): ''' Rotate the Nexus log at the current offset. Returns: int: The starting index of the active Nexus log ''' return await self.cell.rotateNexsLog()
[ "async", "def", "rotateNexsLog", "(", "self", ")", ":", "return", "await", "self", ".", "cell", ".", "rotateNexsLog", "(", ")" ]
https://github.com/vertexproject/synapse/blob/8173f43cb5fba5ca2648d12a659afb432139b0a7/synapse/lib/cell.py#L246-L253
pfalcon/pycopy-lib
56ebf2110f3caa63a3785d439ce49b11e13c75c0
email.internal/email/_policybase.py
python
_PolicyBase.__init__
(self, **kw)
Create new Policy, possibly overriding some defaults. See class docstring for a list of overridable attributes.
Create new Policy, possibly overriding some defaults.
[ "Create", "new", "Policy", "possibly", "overriding", "some", "defaults", "." ]
def __init__(self, **kw): """Create new Policy, possibly overriding some defaults. See class docstring for a list of overridable attributes. """ for name, value in kw.items(): if hasattr(self, name): super(_PolicyBase,self).__setattr__(name, value) else: raise TypeError( "{!r} is an invalid keyword argument for {}".format( name, self.__class__.__name__))
[ "def", "__init__", "(", "self", ",", "*", "*", "kw", ")", ":", "for", "name", ",", "value", "in", "kw", ".", "items", "(", ")", ":", "if", "hasattr", "(", "self", ",", "name", ")", ":", "super", "(", "_PolicyBase", ",", "self", ")", ".", "__setattr__", "(", "name", ",", "value", ")", "else", ":", "raise", "TypeError", "(", "\"{!r} is an invalid keyword argument for {}\"", ".", "format", "(", "name", ",", "self", ".", "__class__", ".", "__name__", ")", ")" ]
https://github.com/pfalcon/pycopy-lib/blob/56ebf2110f3caa63a3785d439ce49b11e13c75c0/email.internal/email/_policybase.py#L41-L53
XX-net/XX-Net
a9898cfcf0084195fb7e69b6bc834e59aecdf14f
python3.8.2/Lib/warnings.py
python
showwarning
(message, category, filename, lineno, file=None, line=None)
Hook to write a warning to a file; replace if you like.
Hook to write a warning to a file; replace if you like.
[ "Hook", "to", "write", "a", "warning", "to", "a", "file", ";", "replace", "if", "you", "like", "." ]
def showwarning(message, category, filename, lineno, file=None, line=None): """Hook to write a warning to a file; replace if you like.""" msg = WarningMessage(message, category, filename, lineno, file, line) _showwarnmsg_impl(msg)
[ "def", "showwarning", "(", "message", ",", "category", ",", "filename", ",", "lineno", ",", "file", "=", "None", ",", "line", "=", "None", ")", ":", "msg", "=", "WarningMessage", "(", "message", ",", "category", ",", "filename", ",", "lineno", ",", "file", ",", "line", ")", "_showwarnmsg_impl", "(", "msg", ")" ]
https://github.com/XX-net/XX-Net/blob/a9898cfcf0084195fb7e69b6bc834e59aecdf14f/python3.8.2/Lib/warnings.py#L10-L13
lululxvi/deepxde
730c97282636e86c845ce2ba3253482f2178469e
deepxde/optimizers/tensorflow/optimizers.py
python
get
(optimizer, learning_rate=None, decay=None)
Retrieves a Keras Optimizer instance.
Retrieves a Keras Optimizer instance.
[ "Retrieves", "a", "Keras", "Optimizer", "instance", "." ]
def get(optimizer, learning_rate=None, decay=None): """Retrieves a Keras Optimizer instance.""" if isinstance(optimizer, tf.keras.optimizers.Optimizer): return optimizer if is_external_optimizer(optimizer): if learning_rate is not None or decay is not None: print("Warning: learning rate is ignored for {}".format(optimizer)) return lbfgs_minimize if learning_rate is None: raise ValueError("No learning rate for {}.".format(optimizer)) lr_schedule = _get_learningrate(learning_rate, decay) if optimizer == "adam": return tf.keras.optimizers.Adam(learning_rate=lr_schedule) if optimizer == "nadam": return tf.keras.optimizers.Nadam(learning_rate=lr_schedule) if optimizer == "sgd": return tf.keras.optimizers.SGD(learning_rate=lr_schedule) raise NotImplementedError(f"{optimizer} to be implemented for backend tensorflow.")
[ "def", "get", "(", "optimizer", ",", "learning_rate", "=", "None", ",", "decay", "=", "None", ")", ":", "if", "isinstance", "(", "optimizer", ",", "tf", ".", "keras", ".", "optimizers", ".", "Optimizer", ")", ":", "return", "optimizer", "if", "is_external_optimizer", "(", "optimizer", ")", ":", "if", "learning_rate", "is", "not", "None", "or", "decay", "is", "not", "None", ":", "print", "(", "\"Warning: learning rate is ignored for {}\"", ".", "format", "(", "optimizer", ")", ")", "return", "lbfgs_minimize", "if", "learning_rate", "is", "None", ":", "raise", "ValueError", "(", "\"No learning rate for {}.\"", ".", "format", "(", "optimizer", ")", ")", "lr_schedule", "=", "_get_learningrate", "(", "learning_rate", ",", "decay", ")", "if", "optimizer", "==", "\"adam\"", ":", "return", "tf", ".", "keras", ".", "optimizers", ".", "Adam", "(", "learning_rate", "=", "lr_schedule", ")", "if", "optimizer", "==", "\"nadam\"", ":", "return", "tf", ".", "keras", ".", "optimizers", ".", "Nadam", "(", "learning_rate", "=", "lr_schedule", ")", "if", "optimizer", "==", "\"sgd\"", ":", "return", "tf", ".", "keras", ".", "optimizers", ".", "SGD", "(", "learning_rate", "=", "lr_schedule", ")", "raise", "NotImplementedError", "(", "f\"{optimizer} to be implemented for backend tensorflow.\"", ")" ]
https://github.com/lululxvi/deepxde/blob/730c97282636e86c845ce2ba3253482f2178469e/deepxde/optimizers/tensorflow/optimizers.py#L11-L31
cltk/cltk
1a8c2f5ef72389e2579dfce1fa5af8e59ebc9ec1
src/cltk/prosody/non.py
python
UnspecifiedStanza.to_phonetics
(self, with_squared_brackets=True)
>>> stanza = "Ein sat hon úti,\\nþá er inn aldni kom\\nyggjungr ása\\nok í augu leit.\\nHvers fregnið mik?\\nHví freistið mín?\\nAllt veit ek, Óðinn,\\nhvar þú auga falt,\\ní inum mæra\\nMímisbrunni.\\nDrekkr mjöð Mímir\\nmorgun hverjan\\naf veði Valföðrs.\\nVituð ér enn - eða hvat?" >>> us = UnspecifiedStanza() >>> us.from_short_lines_text(stanza) >>> us.to_phonetics(False) >>> us.transcribed_text [['ɛin', 'sat', 'hɔn', 'uːti'], ['θaː', 'ɛr', 'inː', 'aldni', 'kɔm'], ['ygːjunɣr', 'aːsa'], ['ɔk', 'iː', 'ɒuɣu', 'lɛit'], ['hvɛrs', 'frɛɣnið', 'mik'], ['hviː', 'frɛistið', 'miːn'], ['alːt', 'vɛit', 'ɛk', 'oːðinː'], ['hvar', 'θuː', 'ɒuɣa', 'falt'], ['iː', 'inum', 'mɛːra'], ['miːmisbrunːi'], ['drɛkːr', 'mjœð', 'miːmir'], ['mɔrɣun', 'hvɛrjan'], ['av', 'vɛði', 'valvœðrs'], ['vituð', 'eːr', 'ɛnː', 'ɛða', 'hvat']] :return:
>>> stanza = "Ein sat hon úti,\\nþá er inn aldni kom\\nyggjungr ása\\nok í augu leit.\\nHvers fregnið mik?\\nHví freistið mín?\\nAllt veit ek, Óðinn,\\nhvar þú auga falt,\\ní inum mæra\\nMímisbrunni.\\nDrekkr mjöð Mímir\\nmorgun hverjan\\naf veði Valföðrs.\\nVituð ér enn - eða hvat?" >>> us = UnspecifiedStanza() >>> us.from_short_lines_text(stanza) >>> us.to_phonetics(False) >>> us.transcribed_text [['ɛin', 'sat', 'hɔn', 'uːti'], ['θaː', 'ɛr', 'inː', 'aldni', 'kɔm'], ['ygːjunɣr', 'aːsa'], ['ɔk', 'iː', 'ɒuɣu', 'lɛit'], ['hvɛrs', 'frɛɣnið', 'mik'], ['hviː', 'frɛistið', 'miːn'], ['alːt', 'vɛit', 'ɛk', 'oːðinː'], ['hvar', 'θuː', 'ɒuɣa', 'falt'], ['iː', 'inum', 'mɛːra'], ['miːmisbrunːi'], ['drɛkːr', 'mjœð', 'miːmir'], ['mɔrɣun', 'hvɛrjan'], ['av', 'vɛði', 'valvœðrs'], ['vituð', 'eːr', 'ɛnː', 'ɛða', 'hvat']]
[ ">>>", "stanza", "=", "Ein", "sat", "hon", "úti", "\\\\", "nþá", "er", "inn", "aldni", "kom", "\\\\", "nyggjungr", "ása", "\\\\", "nok", "í", "augu", "leit", ".", "\\\\", "nHvers", "fregnið", "mik?", "\\\\", "nHví", "freistið", "mín?", "\\\\", "nAllt", "veit", "ek", "Óðinn", "\\\\", "nhvar", "þú", "auga", "falt", "\\\\", "ní", "inum", "mæra", "\\\\", "nMímisbrunni", ".", "\\\\", "nDrekkr", "mjöð", "Mímir", "\\\\", "nmorgun", "hverjan", "\\\\", "naf", "veði", "Valföðrs", ".", "\\\\", "nVituð", "ér", "enn", "-", "eða", "hvat?", ">>>", "us", "=", "UnspecifiedStanza", "()", ">>>", "us", ".", "from_short_lines_text", "(", "stanza", ")", ">>>", "us", ".", "to_phonetics", "(", "False", ")", ">>>", "us", ".", "transcribed_text", "[[", "ɛin", "sat", "hɔn", "uːti", "]", "[", "θaː", "ɛr", "inː", "aldni", "kɔm", "]", "[", "ygːjunɣr", "aːsa", "]", "[", "ɔk", "iː", "ɒuɣu", "lɛit", "]", "[", "hvɛrs", "frɛɣnið", "mik", "]", "[", "hviː", "frɛistið", "miːn", "]", "[", "alːt", "vɛit", "ɛk", "oːðinː", "]", "[", "hvar", "θuː", "ɒuɣa", "falt", "]", "[", "iː", "inum", "mɛːra", "]", "[", "miːmisbrunːi", "]", "[", "drɛkːr", "mjœð", "miːmir", "]", "[", "mɔrɣun", "hvɛrjan", "]", "[", "av", "vɛði", "valvœðrs", "]", "[", "vituð", "eːr", "ɛnː", "ɛða", "hvat", "]]" ]
def to_phonetics(self, with_squared_brackets=True): """ >>> stanza = "Ein sat hon úti,\\nþá er inn aldni kom\\nyggjungr ása\\nok í augu leit.\\nHvers fregnið mik?\\nHví freistið mín?\\nAllt veit ek, Óðinn,\\nhvar þú auga falt,\\ní inum mæra\\nMímisbrunni.\\nDrekkr mjöð Mímir\\nmorgun hverjan\\naf veði Valföðrs.\\nVituð ér enn - eða hvat?" >>> us = UnspecifiedStanza() >>> us.from_short_lines_text(stanza) >>> us.to_phonetics(False) >>> us.transcribed_text [['ɛin', 'sat', 'hɔn', 'uːti'], ['θaː', 'ɛr', 'inː', 'aldni', 'kɔm'], ['ygːjunɣr', 'aːsa'], ['ɔk', 'iː', 'ɒuɣu', 'lɛit'], ['hvɛrs', 'frɛɣnið', 'mik'], ['hviː', 'frɛistið', 'miːn'], ['alːt', 'vɛit', 'ɛk', 'oːðinː'], ['hvar', 'θuː', 'ɒuɣa', 'falt'], ['iː', 'inum', 'mɛːra'], ['miːmisbrunːi'], ['drɛkːr', 'mjœð', 'miːmir'], ['mɔrɣun', 'hvɛrjan'], ['av', 'vɛði', 'valvœðrs'], ['vituð', 'eːr', 'ɛnː', 'ɛða', 'hvat']] :return: """ transcriber = Transcriber( old_norse_transcription.DIPHTHONGS_IPA, old_norse_transcription.DIPHTHONGS_IPA_class, old_norse_transcription.IPA_class, old_norse_transcription.old_norse_rules, ) transcribed_text = [] phonological_features_text = [] for short_line in self.short_lines: assert isinstance(short_line, ShortLine) or isinstance(short_line, LongLine) short_line.to_phonetics(transcriber, with_squared_brackets) transcribed_text.append(short_line.transcribed) phonological_features_text.append(short_line.phonological_features_text) self.transcribed_text = transcribed_text self.phonological_features_text = phonological_features_text
[ "def", "to_phonetics", "(", "self", ",", "with_squared_brackets", "=", "True", ")", ":", "transcriber", "=", "Transcriber", "(", "old_norse_transcription", ".", "DIPHTHONGS_IPA", ",", "old_norse_transcription", ".", "DIPHTHONGS_IPA_class", ",", "old_norse_transcription", ".", "IPA_class", ",", "old_norse_transcription", ".", "old_norse_rules", ",", ")", "transcribed_text", "=", "[", "]", "phonological_features_text", "=", "[", "]", "for", "short_line", "in", "self", ".", "short_lines", ":", "assert", "isinstance", "(", "short_line", ",", "ShortLine", ")", "or", "isinstance", "(", "short_line", ",", "LongLine", ")", "short_line", ".", "to_phonetics", "(", "transcriber", ",", "with_squared_brackets", ")", "transcribed_text", ".", "append", "(", "short_line", ".", "transcribed", ")", "phonological_features_text", ".", "append", "(", "short_line", ".", "phonological_features_text", ")", "self", ".", "transcribed_text", "=", "transcribed_text", "self", ".", "phonological_features_text", "=", "phonological_features_text" ]
https://github.com/cltk/cltk/blob/1a8c2f5ef72389e2579dfce1fa5af8e59ebc9ec1/src/cltk/prosody/non.py#L388-L413
r9y9/wavenet_vocoder
a35fff76ea3687b05e1a10023cad3f7f64fa25a3
lrschedule.py
python
cyclic_cosine_annealing
(init_lr, global_step, T, M)
return init_lr / 2.0 * (np.cos(np.pi * ((global_step - 1) % TdivM) / TdivM) + 1.0)
Cyclic cosine annealing https://arxiv.org/pdf/1704.00109.pdf Args: init_lr (float): Initial learning rate global_step (int): Current iteration number T (int): Total iteration number (i,e. nepoch) M (int): Number of ensembles we want Returns: float: Annealed learning rate
Cyclic cosine annealing
[ "Cyclic", "cosine", "annealing" ]
def cyclic_cosine_annealing(init_lr, global_step, T, M): """Cyclic cosine annealing https://arxiv.org/pdf/1704.00109.pdf Args: init_lr (float): Initial learning rate global_step (int): Current iteration number T (int): Total iteration number (i,e. nepoch) M (int): Number of ensembles we want Returns: float: Annealed learning rate """ TdivM = T // M return init_lr / 2.0 * (np.cos(np.pi * ((global_step - 1) % TdivM) / TdivM) + 1.0)
[ "def", "cyclic_cosine_annealing", "(", "init_lr", ",", "global_step", ",", "T", ",", "M", ")", ":", "TdivM", "=", "T", "//", "M", "return", "init_lr", "/", "2.0", "*", "(", "np", ".", "cos", "(", "np", ".", "pi", "*", "(", "(", "global_step", "-", "1", ")", "%", "TdivM", ")", "/", "TdivM", ")", "+", "1.0", ")" ]
https://github.com/r9y9/wavenet_vocoder/blob/a35fff76ea3687b05e1a10023cad3f7f64fa25a3/lrschedule.py#L20-L35
jellyfin/jellyfin-kodi
e21e059e000f06890b33e2794a7e57959fdf19a3
jellyfin_kodi/objects/kodi/movies.py
python
Movies.add_ratings
(self, *args)
Add ratings, rating type and votes.
Add ratings, rating type and votes.
[ "Add", "ratings", "rating", "type", "and", "votes", "." ]
def add_ratings(self, *args): ''' Add ratings, rating type and votes. ''' self.cursor.execute(QU.add_rating, args)
[ "def", "add_ratings", "(", "self", ",", "*", "args", ")", ":", "self", ".", "cursor", ".", "execute", "(", "QU", ".", "add_rating", ",", "args", ")" ]
https://github.com/jellyfin/jellyfin-kodi/blob/e21e059e000f06890b33e2794a7e57959fdf19a3/jellyfin_kodi/objects/kodi/movies.py#L68-L72
jgagneastro/coffeegrindsize
22661ebd21831dba4cf32bfc6ba59fe3d49f879c
App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/pandas/core/arrays/categorical.py
python
Categorical.as_unordered
(self, inplace=False)
return self.set_ordered(False, inplace=inplace)
Set the Categorical to be unordered. Parameters ---------- inplace : boolean (default: False) Whether or not to set the ordered attribute inplace or return a copy of this categorical with ordered set to False
Set the Categorical to be unordered.
[ "Set", "the", "Categorical", "to", "be", "unordered", "." ]
def as_unordered(self, inplace=False): """ Set the Categorical to be unordered. Parameters ---------- inplace : boolean (default: False) Whether or not to set the ordered attribute inplace or return a copy of this categorical with ordered set to False """ inplace = validate_bool_kwarg(inplace, 'inplace') return self.set_ordered(False, inplace=inplace)
[ "def", "as_unordered", "(", "self", ",", "inplace", "=", "False", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "return", "self", ".", "set_ordered", "(", "False", ",", "inplace", "=", "inplace", ")" ]
https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/pandas/core/arrays/categorical.py#L772-L783
TencentCloud/tencentcloud-sdk-python
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
tencentcloud/ie/v20200304/models.py
python
MediaJoiningTaskResult.__init__
(self)
r""" :param File: 拼接结果文件。 注意:此字段可能返回 null,表示取不到有效值。 :type File: :class:`tencentcloud.ie.v20200304.models.TaskResultFile`
r""" :param File: 拼接结果文件。 注意:此字段可能返回 null,表示取不到有效值。 :type File: :class:`tencentcloud.ie.v20200304.models.TaskResultFile`
[ "r", ":", "param", "File", ":", "拼接结果文件。", "注意:此字段可能返回", "null,表示取不到有效值。", ":", "type", "File", ":", ":", "class", ":", "tencentcloud", ".", "ie", ".", "v20200304", ".", "models", ".", "TaskResultFile" ]
def __init__(self): r""" :param File: 拼接结果文件。 注意:此字段可能返回 null,表示取不到有效值。 :type File: :class:`tencentcloud.ie.v20200304.models.TaskResultFile` """ self.File = None
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "File", "=", "None" ]
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/ie/v20200304/models.py#L1955-L1961
openhatch/oh-mainline
ce29352a034e1223141dcc2f317030bbc3359a51
vendor/packages/gdata/src/gdata/tlslite/integration/HTTPTLSConnection.py
python
HTTPTLSConnection.__init__
(self, host, port=None, username=None, password=None, sharedKey=None, certChain=None, privateKey=None, cryptoID=None, protocol=None, x509Fingerprint=None, x509TrustList=None, x509CommonName=None, settings = None)
Create a new HTTPTLSConnection. For client authentication, use one of these argument combinations: - username, password (SRP) - username, sharedKey (shared-key) - certChain, privateKey (certificate) For server authentication, you can either rely on the implicit mutual authentication performed by SRP or shared-keys, or you can do certificate-based server authentication with one of these argument combinations: - cryptoID[, protocol] (requires cryptoIDlib) - x509Fingerprint - x509TrustList[, x509CommonName] (requires cryptlib_py) Certificate-based server authentication is compatible with SRP or certificate-based client authentication. It is not compatible with shared-keys. The constructor does not perform the TLS handshake itself, but simply stores these arguments for later. The handshake is performed only when this class needs to connect with the server. Thus you should be prepared to handle TLS-specific exceptions when calling methods inherited from L{httplib.HTTPConnection} such as request(), connect(), and send(). See the client handshake functions in L{tlslite.TLSConnection.TLSConnection} for details on which exceptions might be raised. @type host: str @param host: Server to connect to. @type port: int @param port: Port to connect to. @type username: str @param username: SRP or shared-key username. Requires the 'password' or 'sharedKey' argument. @type password: str @param password: SRP password for mutual authentication. Requires the 'username' argument. @type sharedKey: str @param sharedKey: Shared key for mutual authentication. Requires the 'username' argument. @type certChain: L{tlslite.X509CertChain.X509CertChain} or L{cryptoIDlib.CertChain.CertChain} @param certChain: Certificate chain for client authentication. Requires the 'privateKey' argument. Excludes the SRP or shared-key related arguments. @type privateKey: L{tlslite.utils.RSAKey.RSAKey} @param privateKey: Private key for client authentication. Requires the 'certChain' argument. Excludes the SRP or shared-key related arguments. @type cryptoID: str @param cryptoID: cryptoID for server authentication. Mutually exclusive with the 'x509...' arguments. @type protocol: str @param protocol: cryptoID protocol URI for server authentication. Requires the 'cryptoID' argument. @type x509Fingerprint: str @param x509Fingerprint: Hex-encoded X.509 fingerprint for server authentication. Mutually exclusive with the 'cryptoID' and 'x509TrustList' arguments. @type x509TrustList: list of L{tlslite.X509.X509} @param x509TrustList: A list of trusted root certificates. The other party must present a certificate chain which extends to one of these root certificates. The cryptlib_py module must be installed to use this parameter. Mutually exclusive with the 'cryptoID' and 'x509Fingerprint' arguments. @type x509CommonName: str @param x509CommonName: The end-entity certificate's 'CN' field must match this value. For a web server, this is typically a server name such as 'www.amazon.com'. Mutually exclusive with the 'cryptoID' and 'x509Fingerprint' arguments. Requires the 'x509TrustList' argument. @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} @param settings: Various settings which can be used to control the ciphersuites, certificate types, and SSL/TLS versions offered by the client.
Create a new HTTPTLSConnection.
[ "Create", "a", "new", "HTTPTLSConnection", "." ]
def __init__(self, host, port=None, username=None, password=None, sharedKey=None, certChain=None, privateKey=None, cryptoID=None, protocol=None, x509Fingerprint=None, x509TrustList=None, x509CommonName=None, settings = None): """Create a new HTTPTLSConnection. For client authentication, use one of these argument combinations: - username, password (SRP) - username, sharedKey (shared-key) - certChain, privateKey (certificate) For server authentication, you can either rely on the implicit mutual authentication performed by SRP or shared-keys, or you can do certificate-based server authentication with one of these argument combinations: - cryptoID[, protocol] (requires cryptoIDlib) - x509Fingerprint - x509TrustList[, x509CommonName] (requires cryptlib_py) Certificate-based server authentication is compatible with SRP or certificate-based client authentication. It is not compatible with shared-keys. The constructor does not perform the TLS handshake itself, but simply stores these arguments for later. The handshake is performed only when this class needs to connect with the server. Thus you should be prepared to handle TLS-specific exceptions when calling methods inherited from L{httplib.HTTPConnection} such as request(), connect(), and send(). See the client handshake functions in L{tlslite.TLSConnection.TLSConnection} for details on which exceptions might be raised. @type host: str @param host: Server to connect to. @type port: int @param port: Port to connect to. @type username: str @param username: SRP or shared-key username. Requires the 'password' or 'sharedKey' argument. @type password: str @param password: SRP password for mutual authentication. Requires the 'username' argument. @type sharedKey: str @param sharedKey: Shared key for mutual authentication. Requires the 'username' argument. @type certChain: L{tlslite.X509CertChain.X509CertChain} or L{cryptoIDlib.CertChain.CertChain} @param certChain: Certificate chain for client authentication. Requires the 'privateKey' argument. Excludes the SRP or shared-key related arguments. @type privateKey: L{tlslite.utils.RSAKey.RSAKey} @param privateKey: Private key for client authentication. Requires the 'certChain' argument. Excludes the SRP or shared-key related arguments. @type cryptoID: str @param cryptoID: cryptoID for server authentication. Mutually exclusive with the 'x509...' arguments. @type protocol: str @param protocol: cryptoID protocol URI for server authentication. Requires the 'cryptoID' argument. @type x509Fingerprint: str @param x509Fingerprint: Hex-encoded X.509 fingerprint for server authentication. Mutually exclusive with the 'cryptoID' and 'x509TrustList' arguments. @type x509TrustList: list of L{tlslite.X509.X509} @param x509TrustList: A list of trusted root certificates. The other party must present a certificate chain which extends to one of these root certificates. The cryptlib_py module must be installed to use this parameter. Mutually exclusive with the 'cryptoID' and 'x509Fingerprint' arguments. @type x509CommonName: str @param x509CommonName: The end-entity certificate's 'CN' field must match this value. For a web server, this is typically a server name such as 'www.amazon.com'. Mutually exclusive with the 'cryptoID' and 'x509Fingerprint' arguments. Requires the 'x509TrustList' argument. @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} @param settings: Various settings which can be used to control the ciphersuites, certificate types, and SSL/TLS versions offered by the client. """ HTTPBaseTLSConnection.__init__(self, host, port) ClientHelper.__init__(self, username, password, sharedKey, certChain, privateKey, cryptoID, protocol, x509Fingerprint, x509TrustList, x509CommonName, settings)
[ "def", "__init__", "(", "self", ",", "host", ",", "port", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ",", "sharedKey", "=", "None", ",", "certChain", "=", "None", ",", "privateKey", "=", "None", ",", "cryptoID", "=", "None", ",", "protocol", "=", "None", ",", "x509Fingerprint", "=", "None", ",", "x509TrustList", "=", "None", ",", "x509CommonName", "=", "None", ",", "settings", "=", "None", ")", ":", "HTTPBaseTLSConnection", ".", "__init__", "(", "self", ",", "host", ",", "port", ")", "ClientHelper", ".", "__init__", "(", "self", ",", "username", ",", "password", ",", "sharedKey", ",", "certChain", ",", "privateKey", ",", "cryptoID", ",", "protocol", ",", "x509Fingerprint", ",", "x509TrustList", ",", "x509CommonName", ",", "settings", ")" ]
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/gdata/src/gdata/tlslite/integration/HTTPTLSConnection.py#L59-L166
natashamjaques/neural_chat
ddb977bb4602a67c460d02231e7bbf7b2cb49a97
ParlAI/parlai/core/torch_agent.py
python
TorchAgent.match_batch
(self, batch_reply, valid_inds, output=None)
return batch_reply
Match sub-batch of predictions to the original batch indices. Batches may be only partially filled (i.e when completing the remainder at the end of the validation or test set), or we may want to sort by e.g the length of the input sequences if using pack_padded_sequence. This matches rows back with their original row in the batch for calculating metrics like accuracy. If output is None (model choosing not to provide any predictions), we will just return the batch of replies. Otherwise, output should be a parlai.core.torch_agent.Output object. This is a namedtuple, which can provide text predictions and/or text_candidates predictions. If you would like to map additional fields into the batch_reply, you can override this method as well as providing your own namedtuple with additional fields. :param batch_reply: Full-batchsize list of message dictionaries to put responses into. :param valid_inds: Original indices of the predictions. :param output: Output namedtuple which contains sub-batchsize list of text outputs from model. May be None (default) if model chooses not to answer. This method will check for ``text`` and ``text_candidates`` fields.
Match sub-batch of predictions to the original batch indices.
[ "Match", "sub", "-", "batch", "of", "predictions", "to", "the", "original", "batch", "indices", "." ]
def match_batch(self, batch_reply, valid_inds, output=None): """ Match sub-batch of predictions to the original batch indices. Batches may be only partially filled (i.e when completing the remainder at the end of the validation or test set), or we may want to sort by e.g the length of the input sequences if using pack_padded_sequence. This matches rows back with their original row in the batch for calculating metrics like accuracy. If output is None (model choosing not to provide any predictions), we will just return the batch of replies. Otherwise, output should be a parlai.core.torch_agent.Output object. This is a namedtuple, which can provide text predictions and/or text_candidates predictions. If you would like to map additional fields into the batch_reply, you can override this method as well as providing your own namedtuple with additional fields. :param batch_reply: Full-batchsize list of message dictionaries to put responses into. :param valid_inds: Original indices of the predictions. :param output: Output namedtuple which contains sub-batchsize list of text outputs from model. May be None (default) if model chooses not to answer. This method will check for ``text`` and ``text_candidates`` fields. """ if output is None: return batch_reply if output.text is not None: for i, response in zip(valid_inds, output.text): batch_reply[i]['text'] = response if output.text_candidates is not None: for i, cands in zip(valid_inds, output.text_candidates): batch_reply[i]['text_candidates'] = cands return batch_reply
[ "def", "match_batch", "(", "self", ",", "batch_reply", ",", "valid_inds", ",", "output", "=", "None", ")", ":", "if", "output", "is", "None", ":", "return", "batch_reply", "if", "output", ".", "text", "is", "not", "None", ":", "for", "i", ",", "response", "in", "zip", "(", "valid_inds", ",", "output", ".", "text", ")", ":", "batch_reply", "[", "i", "]", "[", "'text'", "]", "=", "response", "if", "output", ".", "text_candidates", "is", "not", "None", ":", "for", "i", ",", "cands", "in", "zip", "(", "valid_inds", ",", "output", ".", "text_candidates", ")", ":", "batch_reply", "[", "i", "]", "[", "'text_candidates'", "]", "=", "cands", "return", "batch_reply" ]
https://github.com/natashamjaques/neural_chat/blob/ddb977bb4602a67c460d02231e7bbf7b2cb49a97/ParlAI/parlai/core/torch_agent.py#L1486-L1525
tensorflow/tfx
b4a6b83269815ed12ba9df9e9154c7376fef2ea0
tfx/dsl/compiler/compiler.py
python
_compile_resolver_node
( resolver_node: base_node.BaseNode, )
return result
Converts Resolver node to a corresponding ResolverSteps.
Converts Resolver node to a corresponding ResolverSteps.
[ "Converts", "Resolver", "node", "to", "a", "corresponding", "ResolverSteps", "." ]
def _compile_resolver_node( resolver_node: base_node.BaseNode, ) -> List[pipeline_pb2.ResolverConfig.ResolverStep]: """Converts Resolver node to a corresponding ResolverSteps.""" assert compiler_utils.is_resolver(resolver_node) resolver_node = cast(resolver.Resolver, resolver_node) result = _compile_resolver_function(resolver_node.resolver_function) for step in result: step.input_keys.extend(resolver_node.inputs.keys()) return result
[ "def", "_compile_resolver_node", "(", "resolver_node", ":", "base_node", ".", "BaseNode", ",", ")", "->", "List", "[", "pipeline_pb2", ".", "ResolverConfig", ".", "ResolverStep", "]", ":", "assert", "compiler_utils", ".", "is_resolver", "(", "resolver_node", ")", "resolver_node", "=", "cast", "(", "resolver", ".", "Resolver", ",", "resolver_node", ")", "result", "=", "_compile_resolver_function", "(", "resolver_node", ".", "resolver_function", ")", "for", "step", "in", "result", ":", "step", ".", "input_keys", ".", "extend", "(", "resolver_node", ".", "inputs", ".", "keys", "(", ")", ")", "return", "result" ]
https://github.com/tensorflow/tfx/blob/b4a6b83269815ed12ba9df9e9154c7376fef2ea0/tfx/dsl/compiler/compiler.py#L621-L630
inspurer/WorkAttendanceSystem
1221e2d67bdf5bb15fe99517cc3ded58ccb066df
V2.0/venv/Lib/site-packages/pip-9.0.1-py3.5.egg/pip/_vendor/requests/utils.py
python
requote_uri
(uri)
Re-quote the given URI. This function passes the given URI through an unquote/quote cycle to ensure that it is fully and consistently quoted. :rtype: str
Re-quote the given URI.
[ "Re", "-", "quote", "the", "given", "URI", "." ]
def requote_uri(uri): """Re-quote the given URI. This function passes the given URI through an unquote/quote cycle to ensure that it is fully and consistently quoted. :rtype: str """ safe_with_percent = "!#$%&'()*+,/:;=?@[]~" safe_without_percent = "!#$&'()*+,/:;=?@[]~" try: # Unquote only the unreserved characters # Then quote only illegal characters (do not quote reserved, # unreserved, or '%') return quote(unquote_unreserved(uri), safe=safe_with_percent) except InvalidURL: # We couldn't unquote the given URI, so let's try quoting it, but # there may be unquoted '%'s in the URI. We need to make sure they're # properly quoted so they do not cause issues elsewhere. return quote(uri, safe=safe_without_percent)
[ "def", "requote_uri", "(", "uri", ")", ":", "safe_with_percent", "=", "\"!#$%&'()*+,/:;=?@[]~\"", "safe_without_percent", "=", "\"!#$&'()*+,/:;=?@[]~\"", "try", ":", "# Unquote only the unreserved characters", "# Then quote only illegal characters (do not quote reserved,", "# unreserved, or '%')", "return", "quote", "(", "unquote_unreserved", "(", "uri", ")", ",", "safe", "=", "safe_with_percent", ")", "except", "InvalidURL", ":", "# We couldn't unquote the given URI, so let's try quoting it, but", "# there may be unquoted '%'s in the URI. We need to make sure they're", "# properly quoted so they do not cause issues elsewhere.", "return", "quote", "(", "uri", ",", "safe", "=", "safe_without_percent", ")" ]
https://github.com/inspurer/WorkAttendanceSystem/blob/1221e2d67bdf5bb15fe99517cc3ded58ccb066df/V2.0/venv/Lib/site-packages/pip-9.0.1-py3.5.egg/pip/_vendor/requests/utils.py#L462-L481
twilio/stashboard
3e4b18a8168c102d1e1d7f88fec22bcbfc530d23
stashboard/contrib/httplib2/__init__.py
python
Http.__init__
(self, cache=None, timeout=None, proxy_info=None)
The value of proxy_info is a ProxyInfo instance. If 'cache' is a string then it is used as a directory name for a disk cache. Otherwise it must be an object that supports the same interface as FileCache. All timeouts are in seconds. If None is passed for timeout then Python's default timeout for sockets will be used. See for example the docs of socket.setdefaulttimeout(): http://docs.python.org/library/socket.html#socket.setdefaulttimeout
The value of proxy_info is a ProxyInfo instance.
[ "The", "value", "of", "proxy_info", "is", "a", "ProxyInfo", "instance", "." ]
def __init__(self, cache=None, timeout=None, proxy_info=None): """ The value of proxy_info is a ProxyInfo instance. If 'cache' is a string then it is used as a directory name for a disk cache. Otherwise it must be an object that supports the same interface as FileCache. All timeouts are in seconds. If None is passed for timeout then Python's default timeout for sockets will be used. See for example the docs of socket.setdefaulttimeout(): http://docs.python.org/library/socket.html#socket.setdefaulttimeout """ self.proxy_info = proxy_info # Map domain name to an httplib connection self.connections = {} # The location of the cache, for now a directory # where cached responses are held. if cache and isinstance(cache, str): self.cache = FileCache(cache) else: self.cache = cache # Name/password self.credentials = Credentials() # Key/cert self.certificates = KeyCerts() # authorization objects self.authorizations = [] # If set to False then no redirects are followed, even safe ones. self.follow_redirects = True # Which HTTP methods do we apply optimistic concurrency to, i.e. # which methods get an "if-match:" etag header added to them. self.optimistic_concurrency_methods = ["PUT"] # If 'follow_redirects' is True, and this is set to True then # all redirecs are followed, including unsafe ones. self.follow_all_redirects = False self.ignore_etag = False self.force_exception_to_status_code = False self.timeout = timeout
[ "def", "__init__", "(", "self", ",", "cache", "=", "None", ",", "timeout", "=", "None", ",", "proxy_info", "=", "None", ")", ":", "self", ".", "proxy_info", "=", "proxy_info", "# Map domain name to an httplib connection", "self", ".", "connections", "=", "{", "}", "# The location of the cache, for now a directory", "# where cached responses are held.", "if", "cache", "and", "isinstance", "(", "cache", ",", "str", ")", ":", "self", ".", "cache", "=", "FileCache", "(", "cache", ")", "else", ":", "self", ".", "cache", "=", "cache", "# Name/password", "self", ".", "credentials", "=", "Credentials", "(", ")", "# Key/cert", "self", ".", "certificates", "=", "KeyCerts", "(", ")", "# authorization objects", "self", ".", "authorizations", "=", "[", "]", "# If set to False then no redirects are followed, even safe ones.", "self", ".", "follow_redirects", "=", "True", "# Which HTTP methods do we apply optimistic concurrency to, i.e.", "# which methods get an \"if-match:\" etag header added to them.", "self", ".", "optimistic_concurrency_methods", "=", "[", "\"PUT\"", "]", "# If 'follow_redirects' is True, and this is set to True then", "# all redirecs are followed, including unsafe ones.", "self", ".", "follow_all_redirects", "=", "False", "self", ".", "ignore_etag", "=", "False", "self", ".", "force_exception_to_status_code", "=", "False", "self", ".", "timeout", "=", "timeout" ]
https://github.com/twilio/stashboard/blob/3e4b18a8168c102d1e1d7f88fec22bcbfc530d23/stashboard/contrib/httplib2/__init__.py#L831-L878
samuelclay/NewsBlur
2c45209df01a1566ea105e04d499367f32ac9ad2
vendor/mms-agent/confPull.py
python
ConfPullThread._pullRemoteConf
( self )
Pull the remote configuration data
Pull the remote configuration data
[ "Pull", "the", "remote", "configuration", "data" ]
def _pullRemoteConf( self ): """ Pull the remote configuration data """ uniqueHostnames = [] res = None try: res = urllib.request.urlopen( self.confUrl ) resBson = None try: resBson = bson.decode_all( res.read() ) finally: if res is not None: res.close() res = None if len(resBson) != 1: return confResponse = resBson[0] if 'hosts' not in confResponse: self.mmsAgent.stopAll() return if 'disableDbstats' in confResponse: self.mmsAgent.disableDbstats = confResponse['disableDbstats'] else: self.mmsAgent.disableDbstats = False hosts = confResponse['hosts'] self.mmsAgent.serverHostDefsLock.acquire() try: # Extract the host information if hosts is not None: for host in hosts: hostDef, hostDefLast = self.mmsAgent.extractHostDef( host ) hostKey = hostDef['hostKey'] uniqueHostnames.append( hostKey ) if hostKey not in self.mmsAgent.serverHostDefs: self.mmsAgent.startMonitoringThreads( hostDef ) else: self.mmsAgent.checkChangedHostDef( hostDef, hostDefLast ) hostDef = None hostDefLast = None # Check to see if anything was removed for hostDef in list(self.mmsAgent.serverHostDefs.values()): if hostDef['hostKey'] not in uniqueHostnames: self.mmsAgent.stopAndClearHost( hostDef['hostKey'] ) finally: self.mmsAgent.serverHostDefsLock.release() except Exception as e: if res is not None: try: res.close() res = None except: pass self.logger.warning( "Problem pulling configuration data from MMS (check firewall and network): " + traceback.format_exc( e ) )
[ "def", "_pullRemoteConf", "(", "self", ")", ":", "uniqueHostnames", "=", "[", "]", "res", "=", "None", "try", ":", "res", "=", "urllib", ".", "request", ".", "urlopen", "(", "self", ".", "confUrl", ")", "resBson", "=", "None", "try", ":", "resBson", "=", "bson", ".", "decode_all", "(", "res", ".", "read", "(", ")", ")", "finally", ":", "if", "res", "is", "not", "None", ":", "res", ".", "close", "(", ")", "res", "=", "None", "if", "len", "(", "resBson", ")", "!=", "1", ":", "return", "confResponse", "=", "resBson", "[", "0", "]", "if", "'hosts'", "not", "in", "confResponse", ":", "self", ".", "mmsAgent", ".", "stopAll", "(", ")", "return", "if", "'disableDbstats'", "in", "confResponse", ":", "self", ".", "mmsAgent", ".", "disableDbstats", "=", "confResponse", "[", "'disableDbstats'", "]", "else", ":", "self", ".", "mmsAgent", ".", "disableDbstats", "=", "False", "hosts", "=", "confResponse", "[", "'hosts'", "]", "self", ".", "mmsAgent", ".", "serverHostDefsLock", ".", "acquire", "(", ")", "try", ":", "# Extract the host information", "if", "hosts", "is", "not", "None", ":", "for", "host", "in", "hosts", ":", "hostDef", ",", "hostDefLast", "=", "self", ".", "mmsAgent", ".", "extractHostDef", "(", "host", ")", "hostKey", "=", "hostDef", "[", "'hostKey'", "]", "uniqueHostnames", ".", "append", "(", "hostKey", ")", "if", "hostKey", "not", "in", "self", ".", "mmsAgent", ".", "serverHostDefs", ":", "self", ".", "mmsAgent", ".", "startMonitoringThreads", "(", "hostDef", ")", "else", ":", "self", ".", "mmsAgent", ".", "checkChangedHostDef", "(", "hostDef", ",", "hostDefLast", ")", "hostDef", "=", "None", "hostDefLast", "=", "None", "# Check to see if anything was removed", "for", "hostDef", "in", "list", "(", "self", ".", "mmsAgent", ".", "serverHostDefs", ".", "values", "(", ")", ")", ":", "if", "hostDef", "[", "'hostKey'", "]", "not", "in", "uniqueHostnames", ":", "self", ".", "mmsAgent", ".", "stopAndClearHost", "(", "hostDef", "[", "'hostKey'", "]", ")", "finally", ":", "self", ".", "mmsAgent", ".", "serverHostDefsLock", ".", "release", "(", ")", "except", "Exception", "as", "e", ":", "if", "res", "is", "not", "None", ":", "try", ":", "res", ".", "close", "(", ")", "res", "=", "None", "except", ":", "pass", "self", ".", "logger", ".", "warning", "(", "\"Problem pulling configuration data from MMS (check firewall and network): \"", "+", "traceback", ".", "format_exc", "(", "e", ")", ")" ]
https://github.com/samuelclay/NewsBlur/blob/2c45209df01a1566ea105e04d499367f32ac9ad2/vendor/mms-agent/confPull.py#L39-L108
whoosh-community/whoosh
5421f1ab3bb802114105b3181b7ce4f44ad7d0bb
src/whoosh/writing.py
python
IndexWriter.cancel
(self)
Cancels any documents/deletions added by this object and unlocks the index.
Cancels any documents/deletions added by this object and unlocks the index.
[ "Cancels", "any", "documents", "/", "deletions", "added", "by", "this", "object", "and", "unlocks", "the", "index", "." ]
def cancel(self): """Cancels any documents/deletions added by this object and unlocks the index. """ pass
[ "def", "cancel", "(", "self", ")", ":", "pass" ]
https://github.com/whoosh-community/whoosh/blob/5421f1ab3bb802114105b3181b7ce4f44ad7d0bb/src/whoosh/writing.py#L496-L500
zwczou/weixin-python
4d0964b1fbaad270abb2a64037b173894e66019d
weixin/pay.py
python
WeixinPay.close_order
(self, out_trade_no, **data)
return self._fetch(url, data)
关闭订单 out_trade_no必填 appid, mchid, nonce_str不需要填入
关闭订单 out_trade_no必填 appid, mchid, nonce_str不需要填入
[ "关闭订单", "out_trade_no必填", "appid", "mchid", "nonce_str不需要填入" ]
def close_order(self, out_trade_no, **data): """ 关闭订单 out_trade_no必填 appid, mchid, nonce_str不需要填入 """ url = self.PAY_HOST + '/pay/closeorder' data.setdefault("out_trade_no", out_trade_no) return self._fetch(url, data)
[ "def", "close_order", "(", "self", ",", "out_trade_no", ",", "*", "*", "data", ")", ":", "url", "=", "self", ".", "PAY_HOST", "+", "'/pay/closeorder'", "data", ".", "setdefault", "(", "\"out_trade_no\"", ",", "out_trade_no", ")", "return", "self", ".", "_fetch", "(", "url", ",", "data", ")" ]
https://github.com/zwczou/weixin-python/blob/4d0964b1fbaad270abb2a64037b173894e66019d/weixin/pay.py#L212-L222
aws-samples/aws-kube-codesuite
ab4e5ce45416b83bffb947ab8d234df5437f4fca
src/rsa/transform.py
python
bytes2int
(raw_bytes)
return int(binascii.hexlify(raw_bytes), 16)
r"""Converts a list of bytes or an 8-bit string to an integer. When using unicode strings, encode it to some encoding like UTF8 first. >>> (((128 * 256) + 64) * 256) + 15 8405007 >>> bytes2int(b'\x80@\x0f') 8405007
r"""Converts a list of bytes or an 8-bit string to an integer.
[ "r", "Converts", "a", "list", "of", "bytes", "or", "an", "8", "-", "bit", "string", "to", "an", "integer", "." ]
def bytes2int(raw_bytes): r"""Converts a list of bytes or an 8-bit string to an integer. When using unicode strings, encode it to some encoding like UTF8 first. >>> (((128 * 256) + 64) * 256) + 15 8405007 >>> bytes2int(b'\x80@\x0f') 8405007 """ return int(binascii.hexlify(raw_bytes), 16)
[ "def", "bytes2int", "(", "raw_bytes", ")", ":", "return", "int", "(", "binascii", ".", "hexlify", "(", "raw_bytes", ")", ",", "16", ")" ]
https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/rsa/transform.py#L40-L52
replit-archive/empythoned
977ec10ced29a3541a4973dc2b59910805695752
cpython/Lib/lib-tk/Tkinter.py
python
Misc._options
(self, cnf, kw = None)
return res
Internal function.
Internal function.
[ "Internal", "function", "." ]
def _options(self, cnf, kw = None): """Internal function.""" if kw: cnf = _cnfmerge((cnf, kw)) else: cnf = _cnfmerge(cnf) res = () for k, v in cnf.items(): if v is not None: if k[-1] == '_': k = k[:-1] if hasattr(v, '__call__'): v = self._register(v) elif isinstance(v, (tuple, list)): nv = [] for item in v: if not isinstance(item, (basestring, int)): break elif isinstance(item, int): nv.append('%d' % item) else: # format it to proper Tcl code if it contains space nv.append(('{%s}' if ' ' in item else '%s') % item) else: v = ' '.join(nv) res = res + ('-'+k, v) return res
[ "def", "_options", "(", "self", ",", "cnf", ",", "kw", "=", "None", ")", ":", "if", "kw", ":", "cnf", "=", "_cnfmerge", "(", "(", "cnf", ",", "kw", ")", ")", "else", ":", "cnf", "=", "_cnfmerge", "(", "cnf", ")", "res", "=", "(", ")", "for", "k", ",", "v", "in", "cnf", ".", "items", "(", ")", ":", "if", "v", "is", "not", "None", ":", "if", "k", "[", "-", "1", "]", "==", "'_'", ":", "k", "=", "k", "[", ":", "-", "1", "]", "if", "hasattr", "(", "v", ",", "'__call__'", ")", ":", "v", "=", "self", ".", "_register", "(", "v", ")", "elif", "isinstance", "(", "v", ",", "(", "tuple", ",", "list", ")", ")", ":", "nv", "=", "[", "]", "for", "item", "in", "v", ":", "if", "not", "isinstance", "(", "item", ",", "(", "basestring", ",", "int", ")", ")", ":", "break", "elif", "isinstance", "(", "item", ",", "int", ")", ":", "nv", ".", "append", "(", "'%d'", "%", "item", ")", "else", ":", "# format it to proper Tcl code if it contains space", "nv", ".", "append", "(", "(", "'{%s}'", "if", "' '", "in", "item", "else", "'%s'", ")", "%", "item", ")", "else", ":", "v", "=", "' '", ".", "join", "(", "nv", ")", "res", "=", "res", "+", "(", "'-'", "+", "k", ",", "v", ")", "return", "res" ]
https://github.com/replit-archive/empythoned/blob/977ec10ced29a3541a4973dc2b59910805695752/cpython/Lib/lib-tk/Tkinter.py#L1040-L1065
cclib/cclib
81cd4a81cc4a3bbed7016b3e417ca9bff8ad3a92
cclib/method/density.py
python
Density.__str__
(self)
return "Density matrix of %s" % (self.data)
Return a string representation of the object.
Return a string representation of the object.
[ "Return", "a", "string", "representation", "of", "the", "object", "." ]
def __str__(self): """Return a string representation of the object.""" return "Density matrix of %s" % (self.data)
[ "def", "__str__", "(", "self", ")", ":", "return", "\"Density matrix of %s\"", "%", "(", "self", ".", "data", ")" ]
https://github.com/cclib/cclib/blob/81cd4a81cc4a3bbed7016b3e417ca9bff8ad3a92/cclib/method/density.py#L26-L28
SteveDoyle2/pyNastran
eda651ac2d4883d95a34951f8a002ff94f642a1a
pyNastran/dev/bdf_vectorized/bdf.py
python
BDF._parse_darea
(self, card_name, cards)
adds dareas
adds dareas
[ "adds", "dareas" ]
def _parse_darea(self, card_name, cards): """adds dareas""" self._parse_multi(card_name, cards, self.darea, [5])
[ "def", "_parse_darea", "(", "self", ",", "card_name", ",", "cards", ")", ":", "self", ".", "_parse_multi", "(", "card_name", ",", "cards", ",", "self", ".", "darea", ",", "[", "5", "]", ")" ]
https://github.com/SteveDoyle2/pyNastran/blob/eda651ac2d4883d95a34951f8a002ff94f642a1a/pyNastran/dev/bdf_vectorized/bdf.py#L3103-L3105
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/speaklater.py
python
_LazyString.__copy__
(self)
return self
[]
def __copy__(self): return self
[ "def", "__copy__", "(", "self", ")", ":", "return", "self" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/speaklater.py#L188-L189
ray-project/ray
703c1610348615dcb8c2d141a0c46675084660f5
python/ray/tune/automl/search_space.py
python
ContinuousSpace.__init__
(self, name, start, end, num, distribution=LINEAR)
Initialize ContinuousSpace. Arguments: name (str): Name of the parameter. start: Start of the continuous space included. end: End of the continuous space included. num: Sampling count if possible. distribution: Sampling distribution, should be in [LINEAR]
Initialize ContinuousSpace.
[ "Initialize", "ContinuousSpace", "." ]
def __init__(self, name, start, end, num, distribution=LINEAR): """Initialize ContinuousSpace. Arguments: name (str): Name of the parameter. start: Start of the continuous space included. end: End of the continuous space included. num: Sampling count if possible. distribution: Sampling distribution, should be in [LINEAR] """ super(ContinuousSpace, self).__init__(name) self.start = float(start) self.end = float(end) self.num = num if distribution == ContinuousSpace.LINEAR: self.choices = np.linspace(start, end, num) else: raise NotImplementedError( "Distribution %s not supported" % distribution) self.distribution = distribution
[ "def", "__init__", "(", "self", ",", "name", ",", "start", ",", "end", ",", "num", ",", "distribution", "=", "LINEAR", ")", ":", "super", "(", "ContinuousSpace", ",", "self", ")", ".", "__init__", "(", "name", ")", "self", ".", "start", "=", "float", "(", "start", ")", "self", ".", "end", "=", "float", "(", "end", ")", "self", ".", "num", "=", "num", "if", "distribution", "==", "ContinuousSpace", ".", "LINEAR", ":", "self", ".", "choices", "=", "np", ".", "linspace", "(", "start", ",", "end", ",", "num", ")", "else", ":", "raise", "NotImplementedError", "(", "\"Distribution %s not supported\"", "%", "distribution", ")", "self", ".", "distribution", "=", "distribution" ]
https://github.com/ray-project/ray/blob/703c1610348615dcb8c2d141a0c46675084660f5/python/ray/tune/automl/search_space.py#L70-L91
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/databases/findstat.py
python
FindStatCollection.from_string
(self)
return self._data["Code"].string_to_element
r""" Return a function that returns the object given a FindStat representation. OUTPUT: The function that produces the sage object given its FindStat representation as a string. EXAMPLES:: sage: from sage.databases.findstat import FindStatCollection sage: c = FindStatCollection("Posets") # optional -- internet sage: p = c.from_string()('([(0, 2), (2, 1)], 3)') # optional -- internet sage: p.cover_relations() # optional -- internet [[0, 2], [2, 1]] sage: c = FindStatCollection("Binary Words") # optional -- internet sage: w = c.from_string()('010101') # optional -- internet sage: w in c._data["Code"].elements_on_level(6) # optional -- internet True
r""" Return a function that returns the object given a FindStat representation.
[ "r", "Return", "a", "function", "that", "returns", "the", "object", "given", "a", "FindStat", "representation", "." ]
def from_string(self): r""" Return a function that returns the object given a FindStat representation. OUTPUT: The function that produces the sage object given its FindStat representation as a string. EXAMPLES:: sage: from sage.databases.findstat import FindStatCollection sage: c = FindStatCollection("Posets") # optional -- internet sage: p = c.from_string()('([(0, 2), (2, 1)], 3)') # optional -- internet sage: p.cover_relations() # optional -- internet [[0, 2], [2, 1]] sage: c = FindStatCollection("Binary Words") # optional -- internet sage: w = c.from_string()('010101') # optional -- internet sage: w in c._data["Code"].elements_on_level(6) # optional -- internet True """ return self._data["Code"].string_to_element
[ "def", "from_string", "(", "self", ")", ":", "return", "self", ".", "_data", "[", "\"Code\"", "]", ".", "string_to_element" ]
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/databases/findstat.py#L4359-L4382
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_hxb2/lib/python3.5/site-packages/redis/connection.py
python
Connection._connect
(self)
Create a TCP socket connection
Create a TCP socket connection
[ "Create", "a", "TCP", "socket", "connection" ]
def _connect(self): "Create a TCP socket connection" # we want to mimic what socket.create_connection does to support # ipv4/ipv6, but we want to set options prior to calling # socket.connect() err = None for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM): family, socktype, proto, canonname, socket_address = res sock = None try: sock = socket.socket(family, socktype, proto) # TCP_NODELAY sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) # TCP_KEEPALIVE if self.socket_keepalive: sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) for k, v in iteritems(self.socket_keepalive_options): sock.setsockopt(socket.SOL_TCP, k, v) # set the socket_connect_timeout before we connect sock.settimeout(self.socket_connect_timeout) # connect sock.connect(socket_address) # set the socket_timeout now that we're connected sock.settimeout(self.socket_timeout) return sock except socket.error as _: err = _ if sock is not None: sock.close() if err is not None: raise err raise socket.error("socket.getaddrinfo returned an empty list")
[ "def", "_connect", "(", "self", ")", ":", "# we want to mimic what socket.create_connection does to support", "# ipv4/ipv6, but we want to set options prior to calling", "# socket.connect()", "err", "=", "None", "for", "res", "in", "socket", ".", "getaddrinfo", "(", "self", ".", "host", ",", "self", ".", "port", ",", "0", ",", "socket", ".", "SOCK_STREAM", ")", ":", "family", ",", "socktype", ",", "proto", ",", "canonname", ",", "socket_address", "=", "res", "sock", "=", "None", "try", ":", "sock", "=", "socket", ".", "socket", "(", "family", ",", "socktype", ",", "proto", ")", "# TCP_NODELAY", "sock", ".", "setsockopt", "(", "socket", ".", "IPPROTO_TCP", ",", "socket", ".", "TCP_NODELAY", ",", "1", ")", "# TCP_KEEPALIVE", "if", "self", ".", "socket_keepalive", ":", "sock", ".", "setsockopt", "(", "socket", ".", "SOL_SOCKET", ",", "socket", ".", "SO_KEEPALIVE", ",", "1", ")", "for", "k", ",", "v", "in", "iteritems", "(", "self", ".", "socket_keepalive_options", ")", ":", "sock", ".", "setsockopt", "(", "socket", ".", "SOL_TCP", ",", "k", ",", "v", ")", "# set the socket_connect_timeout before we connect", "sock", ".", "settimeout", "(", "self", ".", "socket_connect_timeout", ")", "# connect", "sock", ".", "connect", "(", "socket_address", ")", "# set the socket_timeout now that we're connected", "sock", ".", "settimeout", "(", "self", ".", "socket_timeout", ")", "return", "sock", "except", "socket", ".", "error", "as", "_", ":", "err", "=", "_", "if", "sock", "is", "not", "None", ":", "sock", ".", "close", "(", ")", "if", "err", "is", "not", "None", ":", "raise", "err", "raise", "socket", ".", "error", "(", "\"socket.getaddrinfo returned an empty list\"", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/redis/connection.py#L504-L542
OptMLGroup/VRP-RL
b794fb1e4c4bb70a62cfa54504ee7a247adbc2a0
VRP/vrp_utils.py
python
DataGenerator.__init__
(self, args)
This class generates VRP problems for training and test Inputs: args: the parameter dictionary. It should include: args['random_seed']: random seed args['test_size']: number of problems to test args['n_nodes']: number of nodes args['n_cust']: number of customers args['batch_size']: batchsize for training
This class generates VRP problems for training and test Inputs: args: the parameter dictionary. It should include: args['random_seed']: random seed args['test_size']: number of problems to test args['n_nodes']: number of nodes args['n_cust']: number of customers args['batch_size']: batchsize for training
[ "This", "class", "generates", "VRP", "problems", "for", "training", "and", "test", "Inputs", ":", "args", ":", "the", "parameter", "dictionary", ".", "It", "should", "include", ":", "args", "[", "random_seed", "]", ":", "random", "seed", "args", "[", "test_size", "]", ":", "number", "of", "problems", "to", "test", "args", "[", "n_nodes", "]", ":", "number", "of", "nodes", "args", "[", "n_cust", "]", ":", "number", "of", "customers", "args", "[", "batch_size", "]", ":", "batchsize", "for", "training" ]
def __init__(self, args): ''' This class generates VRP problems for training and test Inputs: args: the parameter dictionary. It should include: args['random_seed']: random seed args['test_size']: number of problems to test args['n_nodes']: number of nodes args['n_cust']: number of customers args['batch_size']: batchsize for training ''' self.args = args self.rnd = np.random.RandomState(seed= args['random_seed']) print('Created train iterator.') # create test data self.n_problems = args['test_size'] self.test_data = create_VRP_dataset(self.n_problems,args['n_cust'],'./data', seed = args['random_seed']+1,data_type='test') self.reset()
[ "def", "__init__", "(", "self", ",", "args", ")", ":", "self", ".", "args", "=", "args", "self", ".", "rnd", "=", "np", ".", "random", ".", "RandomState", "(", "seed", "=", "args", "[", "'random_seed'", "]", ")", "print", "(", "'Created train iterator.'", ")", "# create test data", "self", ".", "n_problems", "=", "args", "[", "'test_size'", "]", "self", ".", "test_data", "=", "create_VRP_dataset", "(", "self", ".", "n_problems", ",", "args", "[", "'n_cust'", "]", ",", "'./data'", ",", "seed", "=", "args", "[", "'random_seed'", "]", "+", "1", ",", "data_type", "=", "'test'", ")", "self", ".", "reset", "(", ")" ]
https://github.com/OptMLGroup/VRP-RL/blob/b794fb1e4c4bb70a62cfa54504ee7a247adbc2a0/VRP/vrp_utils.py#L57-L80
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_openshift/library/oc_serviceaccount_secret.py
python
Yedit.parse_value
(inc_value, vtype='')
return inc_value
determine value type passed
determine value type passed
[ "determine", "value", "type", "passed" ]
def parse_value(inc_value, vtype=''): '''determine value type passed''' true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE', 'on', 'On', 'ON', ] false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE', 'off', 'Off', 'OFF'] # It came in as a string but you didn't specify value_type as string # we will convert to bool if it matches any of the above cases if isinstance(inc_value, str) and 'bool' in vtype: if inc_value not in true_bools and inc_value not in false_bools: raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype)) elif isinstance(inc_value, bool) and 'str' in vtype: inc_value = str(inc_value) # There is a special case where '' will turn into None after yaml loading it so skip if isinstance(inc_value, str) and inc_value == '': pass # If vtype is not str then go ahead and attempt to yaml load it. elif isinstance(inc_value, str) and 'str' not in vtype: try: inc_value = yaml.safe_load(inc_value) except Exception: raise YeditException('Could not determine type of incoming value. ' + 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype)) return inc_value
[ "def", "parse_value", "(", "inc_value", ",", "vtype", "=", "''", ")", ":", "true_bools", "=", "[", "'y'", ",", "'Y'", ",", "'yes'", ",", "'Yes'", ",", "'YES'", ",", "'true'", ",", "'True'", ",", "'TRUE'", ",", "'on'", ",", "'On'", ",", "'ON'", ",", "]", "false_bools", "=", "[", "'n'", ",", "'N'", ",", "'no'", ",", "'No'", ",", "'NO'", ",", "'false'", ",", "'False'", ",", "'FALSE'", ",", "'off'", ",", "'Off'", ",", "'OFF'", "]", "# It came in as a string but you didn't specify value_type as string", "# we will convert to bool if it matches any of the above cases", "if", "isinstance", "(", "inc_value", ",", "str", ")", "and", "'bool'", "in", "vtype", ":", "if", "inc_value", "not", "in", "true_bools", "and", "inc_value", "not", "in", "false_bools", ":", "raise", "YeditException", "(", "'Not a boolean type. str=[{}] vtype=[{}]'", ".", "format", "(", "inc_value", ",", "vtype", ")", ")", "elif", "isinstance", "(", "inc_value", ",", "bool", ")", "and", "'str'", "in", "vtype", ":", "inc_value", "=", "str", "(", "inc_value", ")", "# There is a special case where '' will turn into None after yaml loading it so skip", "if", "isinstance", "(", "inc_value", ",", "str", ")", "and", "inc_value", "==", "''", ":", "pass", "# If vtype is not str then go ahead and attempt to yaml load it.", "elif", "isinstance", "(", "inc_value", ",", "str", ")", "and", "'str'", "not", "in", "vtype", ":", "try", ":", "inc_value", "=", "yaml", ".", "safe_load", "(", "inc_value", ")", "except", "Exception", ":", "raise", "YeditException", "(", "'Could not determine type of incoming value. '", "+", "'value=[{}] vtype=[{}]'", ".", "format", "(", "type", "(", "inc_value", ")", ",", "vtype", ")", ")", "return", "inc_value" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_openshift/library/oc_serviceaccount_secret.py#L671-L697
openstack/designate
bff3d5f6e31fe595a77143ec4ac779c187bf72a8
designate/api/admin/views/base.py
python
BaseView.list_basic
(self, context, request, items)
return [self.show_basic(context, request, i) for i in items]
Non-detailed list of items
Non-detailed list of items
[ "Non", "-", "detailed", "list", "of", "items" ]
def list_basic(self, context, request, items): """Non-detailed list of items""" return [self.show_basic(context, request, i) for i in items]
[ "def", "list_basic", "(", "self", ",", "context", ",", "request", ",", "items", ")", ":", "return", "[", "self", ".", "show_basic", "(", "context", ",", "request", ",", "i", ")", "for", "i", "in", "items", "]" ]
https://github.com/openstack/designate/blob/bff3d5f6e31fe595a77143ec4ac779c187bf72a8/designate/api/admin/views/base.py#L69-L71
AppScale/gts
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
AppServer/google/appengine/api/datastore_distributed.py
python
DatastoreDistributed._maybeSetDefaultAuthDomain
(self)
Sets default auth domain if not set.
Sets default auth domain if not set.
[ "Sets", "default", "auth", "domain", "if", "not", "set", "." ]
def _maybeSetDefaultAuthDomain(self): """ Sets default auth domain if not set. """ auth_domain = os.environ.get("AUTH_DOMAIN") if not auth_domain: os.environ['AUTH_DOMAIN'] = "appscale.com"
[ "def", "_maybeSetDefaultAuthDomain", "(", "self", ")", ":", "auth_domain", "=", "os", ".", "environ", ".", "get", "(", "\"AUTH_DOMAIN\"", ")", "if", "not", "auth_domain", ":", "os", ".", "environ", "[", "'AUTH_DOMAIN'", "]", "=", "\"appscale.com\"" ]
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/google/appengine/api/datastore_distributed.py#L329-L333
openhatch/oh-mainline
ce29352a034e1223141dcc2f317030bbc3359a51
vendor/packages/docutils/docutils/parsers/rst/states.py
python
SpecializedBody.invalid_input
(self, match=None, context=None, next_state=None)
Not a compound element member. Abort this state machine.
Not a compound element member. Abort this state machine.
[ "Not", "a", "compound", "element", "member", ".", "Abort", "this", "state", "machine", "." ]
def invalid_input(self, match=None, context=None, next_state=None): """Not a compound element member. Abort this state machine.""" self.state_machine.previous_line() # back up so parent SM can reassess raise EOFError
[ "def", "invalid_input", "(", "self", ",", "match", "=", "None", ",", "context", "=", "None", ",", "next_state", "=", "None", ")", ":", "self", ".", "state_machine", ".", "previous_line", "(", ")", "# back up so parent SM can reassess", "raise", "EOFError" ]
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/docutils/docutils/parsers/rst/states.py#L2450-L2453
quantumlib/OpenFermion
6187085f2a7707012b68370b625acaeed547e62b
src/openfermion/hamiltonians/special_operators.py
python
s_squared_operator
(n_spatial_orbitals: int)
return operator
r"""Return the s^{2} operator. $$ \begin{align} S^{2} = S^{-} S^{+} + S^{z}( S^{z} + 1) \end{align} $$ Args: n_spatial_orbitals: number of spatial orbitals (n_qubits + 1 // 2). Returns: operator (FermionOperator): corresponding to the s+ operator over n_spatial_orbitals. Note: The indexing convention used is that even indices correspond to spin-up (alpha) modes and odd indices correspond to spin-down (beta) modes.
r"""Return the s^{2} operator.
[ "r", "Return", "the", "s^", "{", "2", "}", "operator", "." ]
def s_squared_operator(n_spatial_orbitals: int) -> FermionOperator: r"""Return the s^{2} operator. $$ \begin{align} S^{2} = S^{-} S^{+} + S^{z}( S^{z} + 1) \end{align} $$ Args: n_spatial_orbitals: number of spatial orbitals (n_qubits + 1 // 2). Returns: operator (FermionOperator): corresponding to the s+ operator over n_spatial_orbitals. Note: The indexing convention used is that even indices correspond to spin-up (alpha) modes and odd indices correspond to spin-down (beta) modes. """ if not isinstance(n_spatial_orbitals, int): raise TypeError("n_orbitals must be specified as an integer") fermion_identity = FermionOperator(()) operator = (s_minus_operator(n_spatial_orbitals) * s_plus_operator(n_spatial_orbitals)) operator += (sz_operator(n_spatial_orbitals) * (sz_operator(n_spatial_orbitals) + fermion_identity)) return operator
[ "def", "s_squared_operator", "(", "n_spatial_orbitals", ":", "int", ")", "->", "FermionOperator", ":", "if", "not", "isinstance", "(", "n_spatial_orbitals", ",", "int", ")", ":", "raise", "TypeError", "(", "\"n_orbitals must be specified as an integer\"", ")", "fermion_identity", "=", "FermionOperator", "(", "(", ")", ")", "operator", "=", "(", "s_minus_operator", "(", "n_spatial_orbitals", ")", "*", "s_plus_operator", "(", "n_spatial_orbitals", ")", ")", "operator", "+=", "(", "sz_operator", "(", "n_spatial_orbitals", ")", "*", "(", "sz_operator", "(", "n_spatial_orbitals", ")", "+", "fermion_identity", ")", ")", "return", "operator" ]
https://github.com/quantumlib/OpenFermion/blob/6187085f2a7707012b68370b625acaeed547e62b/src/openfermion/hamiltonians/special_operators.py#L182-L211
prompt-toolkit/pymux
3f66e62b9de4b2251c7f9afad6c516dc5a30ec67
pymux/layout.py
python
_draw_number
(screen, x_offset, y_offset, number, style='class:clock', transparent=False)
Write number at position.
Write number at position.
[ "Write", "number", "at", "position", "." ]
def _draw_number(screen, x_offset, y_offset, number, style='class:clock', transparent=False): " Write number at position. " fg = Char(' ', 'class:clock') bg = Char(' ', '') for y, row in enumerate(_numbers[number]): screen_row = screen.data_buffer[y + y_offset] for x, n in enumerate(row): if n == '#': screen_row[x + x_offset] = fg elif not transparent: screen_row[x + x_offset] = bg
[ "def", "_draw_number", "(", "screen", ",", "x_offset", ",", "y_offset", ",", "number", ",", "style", "=", "'class:clock'", ",", "transparent", "=", "False", ")", ":", "fg", "=", "Char", "(", "' '", ",", "'class:clock'", ")", "bg", "=", "Char", "(", "' '", ",", "''", ")", "for", "y", ",", "row", "in", "enumerate", "(", "_numbers", "[", "number", "]", ")", ":", "screen_row", "=", "screen", ".", "data_buffer", "[", "y", "+", "y_offset", "]", "for", "x", ",", "n", "in", "enumerate", "(", "row", ")", ":", "if", "n", "==", "'#'", ":", "screen_row", "[", "x", "+", "x_offset", "]", "=", "fg", "elif", "not", "transparent", ":", "screen_row", "[", "x", "+", "x_offset", "]", "=", "bg" ]
https://github.com/prompt-toolkit/pymux/blob/3f66e62b9de4b2251c7f9afad6c516dc5a30ec67/pymux/layout.py#L102-L114
tp4a/teleport
1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad
server/www/packages/packages-darwin/x64/cryptography/hazmat/bindings/openssl/_conditional.py
python
cryptography_has_ec2m
()
return [ "EC_POINT_set_affine_coordinates_GF2m", "EC_POINT_get_affine_coordinates_GF2m", "EC_POINT_set_compressed_coordinates_GF2m", ]
[]
def cryptography_has_ec2m(): return [ "EC_POINT_set_affine_coordinates_GF2m", "EC_POINT_get_affine_coordinates_GF2m", "EC_POINT_set_compressed_coordinates_GF2m", ]
[ "def", "cryptography_has_ec2m", "(", ")", ":", "return", "[", "\"EC_POINT_set_affine_coordinates_GF2m\"", ",", "\"EC_POINT_get_affine_coordinates_GF2m\"", ",", "\"EC_POINT_set_compressed_coordinates_GF2m\"", ",", "]" ]
https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-darwin/x64/cryptography/hazmat/bindings/openssl/_conditional.py#L8-L13
JacquesLucke/animation_nodes
b1e3ace8dcb0a771fd882fc3ac4e490b009fa0d1
animation_nodes/nodes/mesh/mesh_object_input.py
python
MeshObjectInputNode.getVertexLocations
(self, mesh, object, useWorldSpace)
return vertices
[]
def getVertexLocations(self, mesh, object, useWorldSpace): vertices = mesh.an.getVertices() if useWorldSpace: vertices.transform(object.matrix_world) return vertices
[ "def", "getVertexLocations", "(", "self", ",", "mesh", ",", "object", ",", "useWorldSpace", ")", ":", "vertices", "=", "mesh", ".", "an", ".", "getVertices", "(", ")", "if", "useWorldSpace", ":", "vertices", ".", "transform", "(", "object", ".", "matrix_world", ")", "return", "vertices" ]
https://github.com/JacquesLucke/animation_nodes/blob/b1e3ace8dcb0a771fd882fc3ac4e490b009fa0d1/animation_nodes/nodes/mesh/mesh_object_input.py#L113-L117
timonwong/OmniMarkupPreviewer
21921ac7a99d2b5924a2219b33679a5b53621392
OmniMarkupLib/Renderers/libs/python2/docutils/parsers/rst/states.py
python
build_regexp
(definition, compile=True)
Build, compile and return a regular expression based on `definition`. :Parameter: `definition`: a 4-tuple (group name, prefix, suffix, parts), where "parts" is a list of regular expressions and/or regular expression definitions to be joined into an or-group.
Build, compile and return a regular expression based on `definition`.
[ "Build", "compile", "and", "return", "a", "regular", "expression", "based", "on", "definition", "." ]
def build_regexp(definition, compile=True): """ Build, compile and return a regular expression based on `definition`. :Parameter: `definition`: a 4-tuple (group name, prefix, suffix, parts), where "parts" is a list of regular expressions and/or regular expression definitions to be joined into an or-group. """ name, prefix, suffix, parts = definition part_strings = [] for part in parts: if type(part) is tuple: part_strings.append(build_regexp(part, None)) else: part_strings.append(part) or_group = '|'.join(part_strings) regexp = '%(prefix)s(?P<%(name)s>%(or_group)s)%(suffix)s' % locals() if compile: return re.compile(regexp, re.UNICODE) else: return regexp
[ "def", "build_regexp", "(", "definition", ",", "compile", "=", "True", ")", ":", "name", ",", "prefix", ",", "suffix", ",", "parts", "=", "definition", "part_strings", "=", "[", "]", "for", "part", "in", "parts", ":", "if", "type", "(", "part", ")", "is", "tuple", ":", "part_strings", ".", "append", "(", "build_regexp", "(", "part", ",", "None", ")", ")", "else", ":", "part_strings", ".", "append", "(", "part", ")", "or_group", "=", "'|'", ".", "join", "(", "part_strings", ")", "regexp", "=", "'%(prefix)s(?P<%(name)s>%(or_group)s)%(suffix)s'", "%", "locals", "(", ")", "if", "compile", ":", "return", "re", ".", "compile", "(", "regexp", ",", "re", ".", "UNICODE", ")", "else", ":", "return", "regexp" ]
https://github.com/timonwong/OmniMarkupPreviewer/blob/21921ac7a99d2b5924a2219b33679a5b53621392/OmniMarkupLib/Renderers/libs/python2/docutils/parsers/rst/states.py#L441-L461
scrapy/scrapy
b04cfa48328d5d5749dca6f50fa34e0cfc664c89
scrapy/linkextractors/lxmlhtml.py
python
LxmlParserLinkExtractor._extract_links
(self, selector, response_url, response_encoding, base_url)
return self._deduplicate_if_needed(links)
[]
def _extract_links(self, selector, response_url, response_encoding, base_url): links = [] # hacky way to get the underlying lxml parsed document for el, attr, attr_val in self._iter_links(selector.root): # pseudo lxml.html.HtmlElement.make_links_absolute(base_url) try: if self.strip: attr_val = strip_html5_whitespace(attr_val) attr_val = urljoin(base_url, attr_val) except ValueError: continue # skipping bogus links else: url = self.process_attr(attr_val) if url is None: continue url = safe_url_string(url, encoding=response_encoding) # to fix relative links after process_value url = urljoin(response_url, url) link = Link(url, _collect_string_content(el) or '', nofollow=rel_has_nofollow(el.get('rel'))) links.append(link) return self._deduplicate_if_needed(links)
[ "def", "_extract_links", "(", "self", ",", "selector", ",", "response_url", ",", "response_encoding", ",", "base_url", ")", ":", "links", "=", "[", "]", "# hacky way to get the underlying lxml parsed document", "for", "el", ",", "attr", ",", "attr_val", "in", "self", ".", "_iter_links", "(", "selector", ".", "root", ")", ":", "# pseudo lxml.html.HtmlElement.make_links_absolute(base_url)", "try", ":", "if", "self", ".", "strip", ":", "attr_val", "=", "strip_html5_whitespace", "(", "attr_val", ")", "attr_val", "=", "urljoin", "(", "base_url", ",", "attr_val", ")", "except", "ValueError", ":", "continue", "# skipping bogus links", "else", ":", "url", "=", "self", ".", "process_attr", "(", "attr_val", ")", "if", "url", "is", "None", ":", "continue", "url", "=", "safe_url_string", "(", "url", ",", "encoding", "=", "response_encoding", ")", "# to fix relative links after process_value", "url", "=", "urljoin", "(", "response_url", ",", "url", ")", "link", "=", "Link", "(", "url", ",", "_collect_string_content", "(", "el", ")", "or", "''", ",", "nofollow", "=", "rel_has_nofollow", "(", "el", ".", "get", "(", "'rel'", ")", ")", ")", "links", ".", "append", "(", "link", ")", "return", "self", ".", "_deduplicate_if_needed", "(", "links", ")" ]
https://github.com/scrapy/scrapy/blob/b04cfa48328d5d5749dca6f50fa34e0cfc664c89/scrapy/linkextractors/lxmlhtml.py#L61-L82
sahana/eden
1696fa50e90ce967df69f66b571af45356cc18da
modules/s3db/project.py
python
ProjectPlanningModel.project_indicator_activity_create_onaccept
(self, form)
Default all weightings to an even spread
Default all weightings to an even spread
[ "Default", "all", "weightings", "to", "an", "even", "spread" ]
def project_indicator_activity_create_onaccept(self, form): """ Default all weightings to an even spread """ db = current.db record_id = form.vars.id # Find the indicator_id table = current.s3db.project_indicator_activity record = db(table.id == record_id).select(table.indicator_id, limitby = (0, 1), ).first() try: indicator_id = record.indicator_id except AttributeError: current.log.error("Cannot find Project Indicator Activity record (no record for this ID), so cannot setup default weightings") return # Read the records query = (table.indicator_id == indicator_id) & \ (table.deleted == False) records = db(query).select(table.id) weighting = 1.0 / len(records) for r in records: # Set the weighting r.update_record(weighting = weighting) # Fire normal onaccept self.project_indicator_activity_onaccept(form, create=True)
[ "def", "project_indicator_activity_create_onaccept", "(", "self", ",", "form", ")", ":", "db", "=", "current", ".", "db", "record_id", "=", "form", ".", "vars", ".", "id", "# Find the indicator_id", "table", "=", "current", ".", "s3db", ".", "project_indicator_activity", "record", "=", "db", "(", "table", ".", "id", "==", "record_id", ")", ".", "select", "(", "table", ".", "indicator_id", ",", "limitby", "=", "(", "0", ",", "1", ")", ",", ")", ".", "first", "(", ")", "try", ":", "indicator_id", "=", "record", ".", "indicator_id", "except", "AttributeError", ":", "current", ".", "log", ".", "error", "(", "\"Cannot find Project Indicator Activity record (no record for this ID), so cannot setup default weightings\"", ")", "return", "# Read the records", "query", "=", "(", "table", ".", "indicator_id", "==", "indicator_id", ")", "&", "(", "table", ".", "deleted", "==", "False", ")", "records", "=", "db", "(", "query", ")", ".", "select", "(", "table", ".", "id", ")", "weighting", "=", "1.0", "/", "len", "(", "records", ")", "for", "r", "in", "records", ":", "# Set the weighting", "r", ".", "update_record", "(", "weighting", "=", "weighting", ")", "# Fire normal onaccept", "self", ".", "project_indicator_activity_onaccept", "(", "form", ",", "create", "=", "True", ")" ]
https://github.com/sahana/eden/blob/1696fa50e90ce967df69f66b571af45356cc18da/modules/s3db/project.py#L5996-L6025
linxid/Machine_Learning_Study_Path
558e82d13237114bbb8152483977806fc0c222af
Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/random.py
python
Random.choice
(self, seq)
return seq[i]
Choose a random element from a non-empty sequence.
Choose a random element from a non-empty sequence.
[ "Choose", "a", "random", "element", "from", "a", "non", "-", "empty", "sequence", "." ]
def choice(self, seq): """Choose a random element from a non-empty sequence.""" try: i = self._randbelow(len(seq)) except ValueError: raise IndexError('Cannot choose from an empty sequence') from None return seq[i]
[ "def", "choice", "(", "self", ",", "seq", ")", ":", "try", ":", "i", "=", "self", ".", "_randbelow", "(", "len", "(", "seq", ")", ")", "except", "ValueError", ":", "raise", "IndexError", "(", "'Cannot choose from an empty sequence'", ")", "from", "None", "return", "seq", "[", "i", "]" ]
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/random.py#L252-L258
jgagneastro/coffeegrindsize
22661ebd21831dba4cf32bfc6ba59fe3d49f879c
App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/scipy/interpolate/fitpack.py
python
splev
(x, tck, der=0, ext=0)
Evaluate a B-spline or its derivatives. Given the knots and coefficients of a B-spline representation, evaluate the value of the smoothing polynomial and its derivatives. This is a wrapper around the FORTRAN routines splev and splder of FITPACK. Parameters ---------- x : array_like An array of points at which to return the value of the smoothed spline or its derivatives. If `tck` was returned from `splprep`, then the parameter values, u should be given. tck : 3-tuple or a BSpline object If a tuple, then it should be a sequence of length 3 returned by `splrep` or `splprep` containing the knots, coefficients, and degree of the spline. (Also see Notes.) der : int, optional The order of derivative of the spline to compute (must be less than or equal to k, the degree of the spline). ext : int, optional Controls the value returned for elements of ``x`` not in the interval defined by the knot sequence. * if ext=0, return the extrapolated value. * if ext=1, return 0 * if ext=2, raise a ValueError * if ext=3, return the boundary value. The default value is 0. Returns ------- y : ndarray or list of ndarrays An array of values representing the spline function evaluated at the points in `x`. If `tck` was returned from `splprep`, then this is a list of arrays representing the curve in N-dimensional space. Notes ----- Manipulating the tck-tuples directly is not recommended. In new code, prefer using `BSpline` objects. See Also -------- splprep, splrep, sproot, spalde, splint bisplrep, bisplev BSpline References ---------- .. [1] C. de Boor, "On calculating with b-splines", J. Approximation Theory, 6, p.50-62, 1972. .. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths Applics, 10, p.134-149, 1972. .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on Numerical Analysis, Oxford University Press, 1993.
Evaluate a B-spline or its derivatives.
[ "Evaluate", "a", "B", "-", "spline", "or", "its", "derivatives", "." ]
def splev(x, tck, der=0, ext=0): """ Evaluate a B-spline or its derivatives. Given the knots and coefficients of a B-spline representation, evaluate the value of the smoothing polynomial and its derivatives. This is a wrapper around the FORTRAN routines splev and splder of FITPACK. Parameters ---------- x : array_like An array of points at which to return the value of the smoothed spline or its derivatives. If `tck` was returned from `splprep`, then the parameter values, u should be given. tck : 3-tuple or a BSpline object If a tuple, then it should be a sequence of length 3 returned by `splrep` or `splprep` containing the knots, coefficients, and degree of the spline. (Also see Notes.) der : int, optional The order of derivative of the spline to compute (must be less than or equal to k, the degree of the spline). ext : int, optional Controls the value returned for elements of ``x`` not in the interval defined by the knot sequence. * if ext=0, return the extrapolated value. * if ext=1, return 0 * if ext=2, raise a ValueError * if ext=3, return the boundary value. The default value is 0. Returns ------- y : ndarray or list of ndarrays An array of values representing the spline function evaluated at the points in `x`. If `tck` was returned from `splprep`, then this is a list of arrays representing the curve in N-dimensional space. Notes ----- Manipulating the tck-tuples directly is not recommended. In new code, prefer using `BSpline` objects. See Also -------- splprep, splrep, sproot, spalde, splint bisplrep, bisplev BSpline References ---------- .. [1] C. de Boor, "On calculating with b-splines", J. Approximation Theory, 6, p.50-62, 1972. .. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths Applics, 10, p.134-149, 1972. .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on Numerical Analysis, Oxford University Press, 1993. """ if isinstance(tck, BSpline): if tck.c.ndim > 1: mesg = ("Calling splev() with BSpline objects with c.ndim > 1 is " "not recommended. Use BSpline.__call__(x) instead.") warnings.warn(mesg, DeprecationWarning) # remap the out-of-bounds behavior try: extrapolate = {0: True, }[ext] except KeyError: raise ValueError("Extrapolation mode %s is not supported " "by BSpline." % ext) return tck(x, der, extrapolate=extrapolate) else: return _impl.splev(x, tck, der, ext)
[ "def", "splev", "(", "x", ",", "tck", ",", "der", "=", "0", ",", "ext", "=", "0", ")", ":", "if", "isinstance", "(", "tck", ",", "BSpline", ")", ":", "if", "tck", ".", "c", ".", "ndim", ">", "1", ":", "mesg", "=", "(", "\"Calling splev() with BSpline objects with c.ndim > 1 is \"", "\"not recommended. Use BSpline.__call__(x) instead.\"", ")", "warnings", ".", "warn", "(", "mesg", ",", "DeprecationWarning", ")", "# remap the out-of-bounds behavior", "try", ":", "extrapolate", "=", "{", "0", ":", "True", ",", "}", "[", "ext", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "\"Extrapolation mode %s is not supported \"", "\"by BSpline.\"", "%", "ext", ")", "return", "tck", "(", "x", ",", "der", ",", "extrapolate", "=", "extrapolate", ")", "else", ":", "return", "_impl", ".", "splev", "(", "x", ",", "tck", ",", "der", ",", "ext", ")" ]
https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/scipy/interpolate/fitpack.py#L293-L368
xmengli/H-DenseUNet
06cc436a43196310fe933d114a353839907cc176
Keras-2.0.8/keras/backend/tensorflow_backend.py
python
argmax
(x, axis=-1)
return tf.argmax(x, axis)
Returns the index of the maximum value along an axis. # Arguments x: Tensor or variable. axis: axis along which to perform the reduction. # Returns A tensor.
Returns the index of the maximum value along an axis.
[ "Returns", "the", "index", "of", "the", "maximum", "value", "along", "an", "axis", "." ]
def argmax(x, axis=-1): """Returns the index of the maximum value along an axis. # Arguments x: Tensor or variable. axis: axis along which to perform the reduction. # Returns A tensor. """ return tf.argmax(x, axis)
[ "def", "argmax", "(", "x", ",", "axis", "=", "-", "1", ")", ":", "return", "tf", ".", "argmax", "(", "x", ",", "axis", ")" ]
https://github.com/xmengli/H-DenseUNet/blob/06cc436a43196310fe933d114a353839907cc176/Keras-2.0.8/keras/backend/tensorflow_backend.py#L1323-L1333