repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 4
175
| func_name
stringlengths 1
129
| whole_func_string
stringlengths 91
50.9k
| language
stringclasses 1
value | func_code_string
stringlengths 91
50.9k
| func_code_tokens
sequence | func_documentation_string
stringlengths 1
31.6k
| func_documentation_tokens
sequence | split_name
stringclasses 1
value | func_code_url
stringlengths 89
268
| score
float64 0
0.09
|
---|---|---|---|---|---|---|---|---|---|---|---|
rflamary/POT | ot/smooth.py | solve_semi_dual | def solve_semi_dual(a, b, C, regul, method="L-BFGS-B", tol=1e-3, max_iter=500,
verbose=False):
"""
Solve the "smoothed" semi-dual objective.
Parameters
----------
a: array, shape = len(a)
b: array, shape = len(b)
Input histograms (should be non-negative and sum to 1).
C: array, shape = len(a) x len(b)
Ground cost matrix.
regul: Regularization object
Should implement a max_Omega(X) method.
method: str
Solver to be used (passed to `scipy.optimize.minimize`).
tol: float
Tolerance parameter.
max_iter: int
Maximum number of iterations.
Returns
-------
alpha: array, shape = len(a)
Semi-dual potentials.
"""
def _func(alpha):
obj, grad = semi_dual_obj_grad(alpha, a, b, C, regul)
# We need to maximize the semi-dual.
return -obj, -grad
alpha_init = np.zeros(len(a))
res = minimize(_func, alpha_init, method=method, jac=True,
tol=tol, options=dict(maxiter=max_iter, disp=verbose))
return res.x, res | python | def solve_semi_dual(a, b, C, regul, method="L-BFGS-B", tol=1e-3, max_iter=500,
verbose=False):
"""
Solve the "smoothed" semi-dual objective.
Parameters
----------
a: array, shape = len(a)
b: array, shape = len(b)
Input histograms (should be non-negative and sum to 1).
C: array, shape = len(a) x len(b)
Ground cost matrix.
regul: Regularization object
Should implement a max_Omega(X) method.
method: str
Solver to be used (passed to `scipy.optimize.minimize`).
tol: float
Tolerance parameter.
max_iter: int
Maximum number of iterations.
Returns
-------
alpha: array, shape = len(a)
Semi-dual potentials.
"""
def _func(alpha):
obj, grad = semi_dual_obj_grad(alpha, a, b, C, regul)
# We need to maximize the semi-dual.
return -obj, -grad
alpha_init = np.zeros(len(a))
res = minimize(_func, alpha_init, method=method, jac=True,
tol=tol, options=dict(maxiter=max_iter, disp=verbose))
return res.x, res | [
"def",
"solve_semi_dual",
"(",
"a",
",",
"b",
",",
"C",
",",
"regul",
",",
"method",
"=",
"\"L-BFGS-B\"",
",",
"tol",
"=",
"1e-3",
",",
"max_iter",
"=",
"500",
",",
"verbose",
"=",
"False",
")",
":",
"def",
"_func",
"(",
"alpha",
")",
":",
"obj",
",",
"grad",
"=",
"semi_dual_obj_grad",
"(",
"alpha",
",",
"a",
",",
"b",
",",
"C",
",",
"regul",
")",
"# We need to maximize the semi-dual.",
"return",
"-",
"obj",
",",
"-",
"grad",
"alpha_init",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"a",
")",
")",
"res",
"=",
"minimize",
"(",
"_func",
",",
"alpha_init",
",",
"method",
"=",
"method",
",",
"jac",
"=",
"True",
",",
"tol",
"=",
"tol",
",",
"options",
"=",
"dict",
"(",
"maxiter",
"=",
"max_iter",
",",
"disp",
"=",
"verbose",
")",
")",
"return",
"res",
".",
"x",
",",
"res"
] | Solve the "smoothed" semi-dual objective.
Parameters
----------
a: array, shape = len(a)
b: array, shape = len(b)
Input histograms (should be non-negative and sum to 1).
C: array, shape = len(a) x len(b)
Ground cost matrix.
regul: Regularization object
Should implement a max_Omega(X) method.
method: str
Solver to be used (passed to `scipy.optimize.minimize`).
tol: float
Tolerance parameter.
max_iter: int
Maximum number of iterations.
Returns
-------
alpha: array, shape = len(a)
Semi-dual potentials. | [
"Solve",
"the",
"smoothed",
"semi",
"-",
"dual",
"objective",
"."
] | train | https://github.com/rflamary/POT/blob/c5108efc7b6702e1af3928bef1032e6b37734d1c/ot/smooth.py#L331-L368 | 0.000911 |
SiLab-Bonn/basil | basil/HL/FEI4QuadModuleAdapterCard.py | DacMax5380._set_dac_value | def _set_dac_value(self, channel, value):
'''Write DAC
'''
self._intf.write(self._base_addr + self.MAX_5380_ADD, array('B', pack('B', value))) | python | def _set_dac_value(self, channel, value):
'''Write DAC
'''
self._intf.write(self._base_addr + self.MAX_5380_ADD, array('B', pack('B', value))) | [
"def",
"_set_dac_value",
"(",
"self",
",",
"channel",
",",
"value",
")",
":",
"self",
".",
"_intf",
".",
"write",
"(",
"self",
".",
"_base_addr",
"+",
"self",
".",
"MAX_5380_ADD",
",",
"array",
"(",
"'B'",
",",
"pack",
"(",
"'B'",
",",
"value",
")",
")",
")"
] | Write DAC | [
"Write",
"DAC"
] | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/FEI4QuadModuleAdapterCard.py#L32-L35 | 0.018072 |
saltstack/salt | salt/states/pushover.py | post_message | def post_message(name,
user=None,
device=None,
message=None,
title=None,
priority=None,
expire=None,
retry=None,
sound=None,
api_version=1,
token=None):
'''
Send a message to a PushOver channel.
.. code-block:: yaml
pushover-message:
pushover.post_message:
- user: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
- token: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
- title: Salt Returner
- device: phone
- priority: -1
- expire: 3600
- retry: 5
The following parameters are required:
name
The unique name for this event.
user
The user or group of users to send the message to. Must be ID of user, not name
or email address.
message
The message that is to be sent to the PushOver channel.
The following parameters are optional:
title
The title to use for the message.
device
The device for the user to send the message to.
priority
The priority for the message.
expire
The message should expire after specified amount of seconds.
retry
The message should be resent this many times.
token
The token for PushOver to use for authentication,
if not specified in the configuration options of master or minion.
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if __opts__['test']:
ret['comment'] = 'The following message is to be sent to PushOver: {0}'.format(message)
ret['result'] = None
return ret
if not user:
ret['comment'] = 'PushOver user is missing: {0}'.format(user)
return ret
if not message:
ret['comment'] = 'PushOver message is missing: {0}'.format(message)
return ret
result = __salt__['pushover.post_message'](
user=user,
message=message,
title=title,
device=device,
priority=priority,
expire=expire,
retry=retry,
token=token,
)
if result:
ret['result'] = True
ret['comment'] = 'Sent message: {0}'.format(name)
else:
ret['comment'] = 'Failed to send message: {0}'.format(name)
return ret | python | def post_message(name,
user=None,
device=None,
message=None,
title=None,
priority=None,
expire=None,
retry=None,
sound=None,
api_version=1,
token=None):
'''
Send a message to a PushOver channel.
.. code-block:: yaml
pushover-message:
pushover.post_message:
- user: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
- token: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
- title: Salt Returner
- device: phone
- priority: -1
- expire: 3600
- retry: 5
The following parameters are required:
name
The unique name for this event.
user
The user or group of users to send the message to. Must be ID of user, not name
or email address.
message
The message that is to be sent to the PushOver channel.
The following parameters are optional:
title
The title to use for the message.
device
The device for the user to send the message to.
priority
The priority for the message.
expire
The message should expire after specified amount of seconds.
retry
The message should be resent this many times.
token
The token for PushOver to use for authentication,
if not specified in the configuration options of master or minion.
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if __opts__['test']:
ret['comment'] = 'The following message is to be sent to PushOver: {0}'.format(message)
ret['result'] = None
return ret
if not user:
ret['comment'] = 'PushOver user is missing: {0}'.format(user)
return ret
if not message:
ret['comment'] = 'PushOver message is missing: {0}'.format(message)
return ret
result = __salt__['pushover.post_message'](
user=user,
message=message,
title=title,
device=device,
priority=priority,
expire=expire,
retry=retry,
token=token,
)
if result:
ret['result'] = True
ret['comment'] = 'Sent message: {0}'.format(name)
else:
ret['comment'] = 'Failed to send message: {0}'.format(name)
return ret | [
"def",
"post_message",
"(",
"name",
",",
"user",
"=",
"None",
",",
"device",
"=",
"None",
",",
"message",
"=",
"None",
",",
"title",
"=",
"None",
",",
"priority",
"=",
"None",
",",
"expire",
"=",
"None",
",",
"retry",
"=",
"None",
",",
"sound",
"=",
"None",
",",
"api_version",
"=",
"1",
",",
"token",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"False",
",",
"'comment'",
":",
"''",
"}",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'The following message is to be sent to PushOver: {0}'",
".",
"format",
"(",
"message",
")",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"return",
"ret",
"if",
"not",
"user",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'PushOver user is missing: {0}'",
".",
"format",
"(",
"user",
")",
"return",
"ret",
"if",
"not",
"message",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'PushOver message is missing: {0}'",
".",
"format",
"(",
"message",
")",
"return",
"ret",
"result",
"=",
"__salt__",
"[",
"'pushover.post_message'",
"]",
"(",
"user",
"=",
"user",
",",
"message",
"=",
"message",
",",
"title",
"=",
"title",
",",
"device",
"=",
"device",
",",
"priority",
"=",
"priority",
",",
"expire",
"=",
"expire",
",",
"retry",
"=",
"retry",
",",
"token",
"=",
"token",
",",
")",
"if",
"result",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"'Sent message: {0}'",
".",
"format",
"(",
"name",
")",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to send message: {0}'",
".",
"format",
"(",
"name",
")",
"return",
"ret"
] | Send a message to a PushOver channel.
.. code-block:: yaml
pushover-message:
pushover.post_message:
- user: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
- token: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
- title: Salt Returner
- device: phone
- priority: -1
- expire: 3600
- retry: 5
The following parameters are required:
name
The unique name for this event.
user
The user or group of users to send the message to. Must be ID of user, not name
or email address.
message
The message that is to be sent to the PushOver channel.
The following parameters are optional:
title
The title to use for the message.
device
The device for the user to send the message to.
priority
The priority for the message.
expire
The message should expire after specified amount of seconds.
retry
The message should be resent this many times.
token
The token for PushOver to use for authentication,
if not specified in the configuration options of master or minion. | [
"Send",
"a",
"message",
"to",
"a",
"PushOver",
"channel",
"."
] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pushover.py#L42-L137 | 0.001229 |
materialsvirtuallab/monty | monty/bisect.py | find_ge | def find_ge(a, x):
"""Find leftmost item greater than or equal to x."""
i = bs.bisect_left(a, x)
if i != len(a): return i
raise ValueError | python | def find_ge(a, x):
"""Find leftmost item greater than or equal to x."""
i = bs.bisect_left(a, x)
if i != len(a): return i
raise ValueError | [
"def",
"find_ge",
"(",
"a",
",",
"x",
")",
":",
"i",
"=",
"bs",
".",
"bisect_left",
"(",
"a",
",",
"x",
")",
"if",
"i",
"!=",
"len",
"(",
"a",
")",
":",
"return",
"i",
"raise",
"ValueError"
] | Find leftmost item greater than or equal to x. | [
"Find",
"leftmost",
"item",
"greater",
"than",
"or",
"equal",
"to",
"x",
"."
] | train | https://github.com/materialsvirtuallab/monty/blob/d99d6f3c68372d83489d28ff515566c93cd569e2/monty/bisect.py#L56-L60 | 0.012987 |
jucacrispim/pylint-mongoengine | pylint_mongoengine/suppression.py | _is_call2custom_manager | def _is_call2custom_manager(node):
"""Checks if the call is being done to a custom queryset manager."""
called = safe_infer(node.func)
funcdef = getattr(called, '_proxied', None)
return _is_custom_qs_manager(funcdef) | python | def _is_call2custom_manager(node):
"""Checks if the call is being done to a custom queryset manager."""
called = safe_infer(node.func)
funcdef = getattr(called, '_proxied', None)
return _is_custom_qs_manager(funcdef) | [
"def",
"_is_call2custom_manager",
"(",
"node",
")",
":",
"called",
"=",
"safe_infer",
"(",
"node",
".",
"func",
")",
"funcdef",
"=",
"getattr",
"(",
"called",
",",
"'_proxied'",
",",
"None",
")",
"return",
"_is_custom_qs_manager",
"(",
"funcdef",
")"
] | Checks if the call is being done to a custom queryset manager. | [
"Checks",
"if",
"the",
"call",
"is",
"being",
"done",
"to",
"a",
"custom",
"queryset",
"manager",
"."
] | train | https://github.com/jucacrispim/pylint-mongoengine/blob/b873653d1224a5748f75dd507f492f8c60d95ce3/pylint_mongoengine/suppression.py#L43-L47 | 0.00431 |
Microsoft/azure-devops-python-api | azure-devops/azure/devops/v5_0/symbol/symbol_client.py | SymbolClient.update_requests_request_name | def update_requests_request_name(self, update_request, request_name):
"""UpdateRequestsRequestName.
[Preview API] Update a symbol request by request name.
:param :class:`<Request> <azure.devops.v5_0.symbol.models.Request>` update_request: The symbol request.
:param str request_name:
:rtype: :class:`<Request> <azure.devops.v5_0.symbol.models.Request>`
"""
query_parameters = {}
if request_name is not None:
query_parameters['requestName'] = self._serialize.query('request_name', request_name, 'str')
content = self._serialize.body(update_request, 'Request')
response = self._send(http_method='PATCH',
location_id='ebc09fe3-1b20-4667-abc5-f2b60fe8de52',
version='5.0-preview.1',
query_parameters=query_parameters,
content=content)
return self._deserialize('Request', response) | python | def update_requests_request_name(self, update_request, request_name):
"""UpdateRequestsRequestName.
[Preview API] Update a symbol request by request name.
:param :class:`<Request> <azure.devops.v5_0.symbol.models.Request>` update_request: The symbol request.
:param str request_name:
:rtype: :class:`<Request> <azure.devops.v5_0.symbol.models.Request>`
"""
query_parameters = {}
if request_name is not None:
query_parameters['requestName'] = self._serialize.query('request_name', request_name, 'str')
content = self._serialize.body(update_request, 'Request')
response = self._send(http_method='PATCH',
location_id='ebc09fe3-1b20-4667-abc5-f2b60fe8de52',
version='5.0-preview.1',
query_parameters=query_parameters,
content=content)
return self._deserialize('Request', response) | [
"def",
"update_requests_request_name",
"(",
"self",
",",
"update_request",
",",
"request_name",
")",
":",
"query_parameters",
"=",
"{",
"}",
"if",
"request_name",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'requestName'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'request_name'",
",",
"request_name",
",",
"'str'",
")",
"content",
"=",
"self",
".",
"_serialize",
".",
"body",
"(",
"update_request",
",",
"'Request'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'PATCH'",
",",
"location_id",
"=",
"'ebc09fe3-1b20-4667-abc5-f2b60fe8de52'",
",",
"version",
"=",
"'5.0-preview.1'",
",",
"query_parameters",
"=",
"query_parameters",
",",
"content",
"=",
"content",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'Request'",
",",
"response",
")"
] | UpdateRequestsRequestName.
[Preview API] Update a symbol request by request name.
:param :class:`<Request> <azure.devops.v5_0.symbol.models.Request>` update_request: The symbol request.
:param str request_name:
:rtype: :class:`<Request> <azure.devops.v5_0.symbol.models.Request>` | [
"UpdateRequestsRequestName",
".",
"[",
"Preview",
"API",
"]",
"Update",
"a",
"symbol",
"request",
"by",
"request",
"name",
".",
":",
"param",
":",
"class",
":",
"<Request",
">",
"<azure",
".",
"devops",
".",
"v5_0",
".",
"symbol",
".",
"models",
".",
"Request",
">",
"update_request",
":",
"The",
"symbol",
"request",
".",
":",
"param",
"str",
"request_name",
":",
":",
"rtype",
":",
":",
"class",
":",
"<Request",
">",
"<azure",
".",
"devops",
".",
"v5_0",
".",
"symbol",
".",
"models",
".",
"Request",
">"
] | train | https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/symbol/symbol_client.py#L198-L214 | 0.00502 |
contains-io/typet | typet/objects.py | _is_propertyable | def _is_propertyable(
names, # type: List[str]
attrs, # type: Dict[str, Any]
annotations, # type: Dict[str, type]
attr, # Dict[str, Any]
):
# type: (...) -> bool
"""Determine if an attribute can be replaced with a property.
Args:
names: The complete list of all attribute names for the class.
attrs: The attribute dict returned by __prepare__.
annotations: A mapping of all defined annotations for the class.
attr: The attribute to test.
Returns:
True if the attribute can be replaced with a property; else False.
"""
return (
attr in annotations
and not attr.startswith("_")
and not attr.isupper()
and "__{}".format(attr) not in names
and not isinstance(getattr(attrs, attr, None), types.MethodType)
) | python | def _is_propertyable(
names, # type: List[str]
attrs, # type: Dict[str, Any]
annotations, # type: Dict[str, type]
attr, # Dict[str, Any]
):
# type: (...) -> bool
"""Determine if an attribute can be replaced with a property.
Args:
names: The complete list of all attribute names for the class.
attrs: The attribute dict returned by __prepare__.
annotations: A mapping of all defined annotations for the class.
attr: The attribute to test.
Returns:
True if the attribute can be replaced with a property; else False.
"""
return (
attr in annotations
and not attr.startswith("_")
and not attr.isupper()
and "__{}".format(attr) not in names
and not isinstance(getattr(attrs, attr, None), types.MethodType)
) | [
"def",
"_is_propertyable",
"(",
"names",
",",
"# type: List[str]",
"attrs",
",",
"# type: Dict[str, Any]",
"annotations",
",",
"# type: Dict[str, type]",
"attr",
",",
"# Dict[str, Any]",
")",
":",
"# type: (...) -> bool",
"return",
"(",
"attr",
"in",
"annotations",
"and",
"not",
"attr",
".",
"startswith",
"(",
"\"_\"",
")",
"and",
"not",
"attr",
".",
"isupper",
"(",
")",
"and",
"\"__{}\"",
".",
"format",
"(",
"attr",
")",
"not",
"in",
"names",
"and",
"not",
"isinstance",
"(",
"getattr",
"(",
"attrs",
",",
"attr",
",",
"None",
")",
",",
"types",
".",
"MethodType",
")",
")"
] | Determine if an attribute can be replaced with a property.
Args:
names: The complete list of all attribute names for the class.
attrs: The attribute dict returned by __prepare__.
annotations: A mapping of all defined annotations for the class.
attr: The attribute to test.
Returns:
True if the attribute can be replaced with a property; else False. | [
"Determine",
"if",
"an",
"attribute",
"can",
"be",
"replaced",
"with",
"a",
"property",
"."
] | train | https://github.com/contains-io/typet/blob/ad5087c567af84db299eca186776e1cee228e442/typet/objects.py#L110-L134 | 0.001202 |
SBRG/ssbio | ssbio/pipeline/gempro.py | GEMPRO.get_scratch_predictions | def get_scratch_predictions(self, path_to_scratch, results_dir, scratch_basename='scratch', num_cores=1,
exposed_buried_cutoff=25, custom_gene_mapping=None):
"""Run and parse ``SCRATCH`` results to predict secondary structure and solvent accessibility.
Annotations are stored in the protein's representative sequence at:
* ``.annotations``
* ``.letter_annotations``
Args:
path_to_scratch (str): Path to SCRATCH executable
results_dir (str): Path to SCRATCH results folder, which will have the files (scratch.ss, scratch.ss8,
scratch.acc, scratch.acc20)
scratch_basename (str): Basename of the SCRATCH results ('scratch' is default)
num_cores (int): Number of cores to use to parallelize SCRATCH run
exposed_buried_cutoff (int): Cutoff of exposed/buried for the acc20 predictions
custom_gene_mapping (dict): Default parsing of SCRATCH output files is to look for the model gene IDs. If
your output files contain IDs which differ from the model gene IDs, use this dictionary to map model
gene IDs to result file IDs. Dictionary keys must match model genes.
"""
if not self.genome_path:
# Write all sequences as one file
all_seqs = self.write_representative_sequences_file(outname=self.id)
# Runs SCRATCH or loads existing results in results_dir
scratch = SCRATCH(project_name=scratch_basename, seq_file=self.genome_path)
scratch.run_scratch(path_to_scratch=path_to_scratch, num_cores=num_cores, outdir=results_dir)
sspro_summary = scratch.sspro_summary()
sspro8_summary = scratch.sspro8_summary()
sspro_results = scratch.sspro_results()
sspro8_results = scratch.sspro8_results()
accpro_summary = scratch.accpro_summary()
accpro20_summary = scratch.accpro20_summary(exposed_buried_cutoff)
accpro_results = scratch.accpro_results()
accpro20_results = scratch.accpro20_results()
counter = 0
# Adding the scratch annotations to the representative_sequences letter_annotations
for g in tqdm(self.genes_with_a_representative_sequence):
if custom_gene_mapping:
g_id = custom_gene_mapping[g.id]
else:
g_id = g.id
if g_id in sspro_summary:
# Secondary structure
g.protein.representative_sequence.annotations.update(sspro_summary[g_id])
g.protein.representative_sequence.annotations.update(sspro8_summary[g_id])
try:
g.protein.representative_sequence.letter_annotations['SS-sspro'] = sspro_results[g_id]
g.protein.representative_sequence.letter_annotations['SS-sspro8'] = sspro8_results[g_id]
except TypeError:
log.error('Gene {}, SeqProp {}: sequence length mismatch between SCRATCH results and representative '
'sequence, unable to set letter annotation'.format(g_id, g.protein.representative_sequence.id))
# Solvent accessibility
g.protein.representative_sequence.annotations.update(accpro_summary[g_id])
g.protein.representative_sequence.annotations.update(accpro20_summary[g_id])
try:
g.protein.representative_sequence.letter_annotations['RSA-accpro'] = accpro_results[g_id]
g.protein.representative_sequence.letter_annotations['RSA-accpro20'] = accpro20_results[g_id]
except TypeError:
log.error('Gene {}, SeqProp {}: sequence length mismatch between SCRATCH results and representative '
'sequence, unable to set letter annotation'.format(g_id, g.protein.representative_sequence.id))
counter += 1
else:
log.error('{}: missing SCRATCH results'.format(g.id))
log.info('{}/{}: number of genes with SCRATCH predictions loaded'.format(counter, len(self.genes))) | python | def get_scratch_predictions(self, path_to_scratch, results_dir, scratch_basename='scratch', num_cores=1,
exposed_buried_cutoff=25, custom_gene_mapping=None):
"""Run and parse ``SCRATCH`` results to predict secondary structure and solvent accessibility.
Annotations are stored in the protein's representative sequence at:
* ``.annotations``
* ``.letter_annotations``
Args:
path_to_scratch (str): Path to SCRATCH executable
results_dir (str): Path to SCRATCH results folder, which will have the files (scratch.ss, scratch.ss8,
scratch.acc, scratch.acc20)
scratch_basename (str): Basename of the SCRATCH results ('scratch' is default)
num_cores (int): Number of cores to use to parallelize SCRATCH run
exposed_buried_cutoff (int): Cutoff of exposed/buried for the acc20 predictions
custom_gene_mapping (dict): Default parsing of SCRATCH output files is to look for the model gene IDs. If
your output files contain IDs which differ from the model gene IDs, use this dictionary to map model
gene IDs to result file IDs. Dictionary keys must match model genes.
"""
if not self.genome_path:
# Write all sequences as one file
all_seqs = self.write_representative_sequences_file(outname=self.id)
# Runs SCRATCH or loads existing results in results_dir
scratch = SCRATCH(project_name=scratch_basename, seq_file=self.genome_path)
scratch.run_scratch(path_to_scratch=path_to_scratch, num_cores=num_cores, outdir=results_dir)
sspro_summary = scratch.sspro_summary()
sspro8_summary = scratch.sspro8_summary()
sspro_results = scratch.sspro_results()
sspro8_results = scratch.sspro8_results()
accpro_summary = scratch.accpro_summary()
accpro20_summary = scratch.accpro20_summary(exposed_buried_cutoff)
accpro_results = scratch.accpro_results()
accpro20_results = scratch.accpro20_results()
counter = 0
# Adding the scratch annotations to the representative_sequences letter_annotations
for g in tqdm(self.genes_with_a_representative_sequence):
if custom_gene_mapping:
g_id = custom_gene_mapping[g.id]
else:
g_id = g.id
if g_id in sspro_summary:
# Secondary structure
g.protein.representative_sequence.annotations.update(sspro_summary[g_id])
g.protein.representative_sequence.annotations.update(sspro8_summary[g_id])
try:
g.protein.representative_sequence.letter_annotations['SS-sspro'] = sspro_results[g_id]
g.protein.representative_sequence.letter_annotations['SS-sspro8'] = sspro8_results[g_id]
except TypeError:
log.error('Gene {}, SeqProp {}: sequence length mismatch between SCRATCH results and representative '
'sequence, unable to set letter annotation'.format(g_id, g.protein.representative_sequence.id))
# Solvent accessibility
g.protein.representative_sequence.annotations.update(accpro_summary[g_id])
g.protein.representative_sequence.annotations.update(accpro20_summary[g_id])
try:
g.protein.representative_sequence.letter_annotations['RSA-accpro'] = accpro_results[g_id]
g.protein.representative_sequence.letter_annotations['RSA-accpro20'] = accpro20_results[g_id]
except TypeError:
log.error('Gene {}, SeqProp {}: sequence length mismatch between SCRATCH results and representative '
'sequence, unable to set letter annotation'.format(g_id, g.protein.representative_sequence.id))
counter += 1
else:
log.error('{}: missing SCRATCH results'.format(g.id))
log.info('{}/{}: number of genes with SCRATCH predictions loaded'.format(counter, len(self.genes))) | [
"def",
"get_scratch_predictions",
"(",
"self",
",",
"path_to_scratch",
",",
"results_dir",
",",
"scratch_basename",
"=",
"'scratch'",
",",
"num_cores",
"=",
"1",
",",
"exposed_buried_cutoff",
"=",
"25",
",",
"custom_gene_mapping",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"genome_path",
":",
"# Write all sequences as one file",
"all_seqs",
"=",
"self",
".",
"write_representative_sequences_file",
"(",
"outname",
"=",
"self",
".",
"id",
")",
"# Runs SCRATCH or loads existing results in results_dir",
"scratch",
"=",
"SCRATCH",
"(",
"project_name",
"=",
"scratch_basename",
",",
"seq_file",
"=",
"self",
".",
"genome_path",
")",
"scratch",
".",
"run_scratch",
"(",
"path_to_scratch",
"=",
"path_to_scratch",
",",
"num_cores",
"=",
"num_cores",
",",
"outdir",
"=",
"results_dir",
")",
"sspro_summary",
"=",
"scratch",
".",
"sspro_summary",
"(",
")",
"sspro8_summary",
"=",
"scratch",
".",
"sspro8_summary",
"(",
")",
"sspro_results",
"=",
"scratch",
".",
"sspro_results",
"(",
")",
"sspro8_results",
"=",
"scratch",
".",
"sspro8_results",
"(",
")",
"accpro_summary",
"=",
"scratch",
".",
"accpro_summary",
"(",
")",
"accpro20_summary",
"=",
"scratch",
".",
"accpro20_summary",
"(",
"exposed_buried_cutoff",
")",
"accpro_results",
"=",
"scratch",
".",
"accpro_results",
"(",
")",
"accpro20_results",
"=",
"scratch",
".",
"accpro20_results",
"(",
")",
"counter",
"=",
"0",
"# Adding the scratch annotations to the representative_sequences letter_annotations",
"for",
"g",
"in",
"tqdm",
"(",
"self",
".",
"genes_with_a_representative_sequence",
")",
":",
"if",
"custom_gene_mapping",
":",
"g_id",
"=",
"custom_gene_mapping",
"[",
"g",
".",
"id",
"]",
"else",
":",
"g_id",
"=",
"g",
".",
"id",
"if",
"g_id",
"in",
"sspro_summary",
":",
"# Secondary structure",
"g",
".",
"protein",
".",
"representative_sequence",
".",
"annotations",
".",
"update",
"(",
"sspro_summary",
"[",
"g_id",
"]",
")",
"g",
".",
"protein",
".",
"representative_sequence",
".",
"annotations",
".",
"update",
"(",
"sspro8_summary",
"[",
"g_id",
"]",
")",
"try",
":",
"g",
".",
"protein",
".",
"representative_sequence",
".",
"letter_annotations",
"[",
"'SS-sspro'",
"]",
"=",
"sspro_results",
"[",
"g_id",
"]",
"g",
".",
"protein",
".",
"representative_sequence",
".",
"letter_annotations",
"[",
"'SS-sspro8'",
"]",
"=",
"sspro8_results",
"[",
"g_id",
"]",
"except",
"TypeError",
":",
"log",
".",
"error",
"(",
"'Gene {}, SeqProp {}: sequence length mismatch between SCRATCH results and representative '",
"'sequence, unable to set letter annotation'",
".",
"format",
"(",
"g_id",
",",
"g",
".",
"protein",
".",
"representative_sequence",
".",
"id",
")",
")",
"# Solvent accessibility",
"g",
".",
"protein",
".",
"representative_sequence",
".",
"annotations",
".",
"update",
"(",
"accpro_summary",
"[",
"g_id",
"]",
")",
"g",
".",
"protein",
".",
"representative_sequence",
".",
"annotations",
".",
"update",
"(",
"accpro20_summary",
"[",
"g_id",
"]",
")",
"try",
":",
"g",
".",
"protein",
".",
"representative_sequence",
".",
"letter_annotations",
"[",
"'RSA-accpro'",
"]",
"=",
"accpro_results",
"[",
"g_id",
"]",
"g",
".",
"protein",
".",
"representative_sequence",
".",
"letter_annotations",
"[",
"'RSA-accpro20'",
"]",
"=",
"accpro20_results",
"[",
"g_id",
"]",
"except",
"TypeError",
":",
"log",
".",
"error",
"(",
"'Gene {}, SeqProp {}: sequence length mismatch between SCRATCH results and representative '",
"'sequence, unable to set letter annotation'",
".",
"format",
"(",
"g_id",
",",
"g",
".",
"protein",
".",
"representative_sequence",
".",
"id",
")",
")",
"counter",
"+=",
"1",
"else",
":",
"log",
".",
"error",
"(",
"'{}: missing SCRATCH results'",
".",
"format",
"(",
"g",
".",
"id",
")",
")",
"log",
".",
"info",
"(",
"'{}/{}: number of genes with SCRATCH predictions loaded'",
".",
"format",
"(",
"counter",
",",
"len",
"(",
"self",
".",
"genes",
")",
")",
")"
] | Run and parse ``SCRATCH`` results to predict secondary structure and solvent accessibility.
Annotations are stored in the protein's representative sequence at:
* ``.annotations``
* ``.letter_annotations``
Args:
path_to_scratch (str): Path to SCRATCH executable
results_dir (str): Path to SCRATCH results folder, which will have the files (scratch.ss, scratch.ss8,
scratch.acc, scratch.acc20)
scratch_basename (str): Basename of the SCRATCH results ('scratch' is default)
num_cores (int): Number of cores to use to parallelize SCRATCH run
exposed_buried_cutoff (int): Cutoff of exposed/buried for the acc20 predictions
custom_gene_mapping (dict): Default parsing of SCRATCH output files is to look for the model gene IDs. If
your output files contain IDs which differ from the model gene IDs, use this dictionary to map model
gene IDs to result file IDs. Dictionary keys must match model genes. | [
"Run",
"and",
"parse",
"SCRATCH",
"results",
"to",
"predict",
"secondary",
"structure",
"and",
"solvent",
"accessibility",
".",
"Annotations",
"are",
"stored",
"in",
"the",
"protein",
"s",
"representative",
"sequence",
"at",
":"
] | train | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/gempro.py#L775-L844 | 0.006953 |
tomnor/channelpack | channelpack/pulltxt.py | PatternPull.file_rows | def file_rows(self, fo):
"""Return the lines in the file as a list.
fo is the open file object."""
rows = []
for i in range(NUMROWS):
line = fo.readline()
if not line:
break
rows += [line]
return rows | python | def file_rows(self, fo):
"""Return the lines in the file as a list.
fo is the open file object."""
rows = []
for i in range(NUMROWS):
line = fo.readline()
if not line:
break
rows += [line]
return rows | [
"def",
"file_rows",
"(",
"self",
",",
"fo",
")",
":",
"rows",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"NUMROWS",
")",
":",
"line",
"=",
"fo",
".",
"readline",
"(",
")",
"if",
"not",
"line",
":",
"break",
"rows",
"+=",
"[",
"line",
"]",
"return",
"rows"
] | Return the lines in the file as a list.
fo is the open file object. | [
"Return",
"the",
"lines",
"in",
"the",
"file",
"as",
"a",
"list",
"."
] | train | https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pulltxt.py#L101-L113 | 0.00678 |
pandas-dev/pandas | pandas/core/dtypes/concat.py | _concat_compat | def _concat_compat(to_concat, axis=0):
"""
provide concatenation of an array of arrays each of which is a single
'normalized' dtypes (in that for example, if it's object, then it is a
non-datetimelike and provide a combined dtype for the resulting array that
preserves the overall dtype if possible)
Parameters
----------
to_concat : array of arrays
axis : axis to provide concatenation
Returns
-------
a single array, preserving the combined dtypes
"""
# filter empty arrays
# 1-d dtypes always are included here
def is_nonempty(x):
try:
return x.shape[axis] > 0
except Exception:
return True
# If all arrays are empty, there's nothing to convert, just short-cut to
# the concatenation, #3121.
#
# Creating an empty array directly is tempting, but the winnings would be
# marginal given that it would still require shape & dtype calculation and
# np.concatenate which has them both implemented is compiled.
typs = get_dtype_kinds(to_concat)
_contains_datetime = any(typ.startswith('datetime') for typ in typs)
_contains_period = any(typ.startswith('period') for typ in typs)
if 'category' in typs:
# this must be priort to _concat_datetime,
# to support Categorical + datetime-like
return _concat_categorical(to_concat, axis=axis)
elif _contains_datetime or 'timedelta' in typs or _contains_period:
return _concat_datetime(to_concat, axis=axis, typs=typs)
# these are mandated to handle empties as well
elif 'sparse' in typs:
return _concat_sparse(to_concat, axis=axis, typs=typs)
all_empty = all(not is_nonempty(x) for x in to_concat)
if any(is_extension_array_dtype(x) for x in to_concat) and axis == 1:
to_concat = [np.atleast_2d(x.astype('object')) for x in to_concat]
if all_empty:
# we have all empties, but may need to coerce the result dtype to
# object if we have non-numeric type operands (numpy would otherwise
# cast this to float)
typs = get_dtype_kinds(to_concat)
if len(typs) != 1:
if (not len(typs - {'i', 'u', 'f'}) or
not len(typs - {'bool', 'i', 'u'})):
# let numpy coerce
pass
else:
# coerce to object
to_concat = [x.astype('object') for x in to_concat]
return np.concatenate(to_concat, axis=axis) | python | def _concat_compat(to_concat, axis=0):
"""
provide concatenation of an array of arrays each of which is a single
'normalized' dtypes (in that for example, if it's object, then it is a
non-datetimelike and provide a combined dtype for the resulting array that
preserves the overall dtype if possible)
Parameters
----------
to_concat : array of arrays
axis : axis to provide concatenation
Returns
-------
a single array, preserving the combined dtypes
"""
# filter empty arrays
# 1-d dtypes always are included here
def is_nonempty(x):
try:
return x.shape[axis] > 0
except Exception:
return True
# If all arrays are empty, there's nothing to convert, just short-cut to
# the concatenation, #3121.
#
# Creating an empty array directly is tempting, but the winnings would be
# marginal given that it would still require shape & dtype calculation and
# np.concatenate which has them both implemented is compiled.
typs = get_dtype_kinds(to_concat)
_contains_datetime = any(typ.startswith('datetime') for typ in typs)
_contains_period = any(typ.startswith('period') for typ in typs)
if 'category' in typs:
# this must be priort to _concat_datetime,
# to support Categorical + datetime-like
return _concat_categorical(to_concat, axis=axis)
elif _contains_datetime or 'timedelta' in typs or _contains_period:
return _concat_datetime(to_concat, axis=axis, typs=typs)
# these are mandated to handle empties as well
elif 'sparse' in typs:
return _concat_sparse(to_concat, axis=axis, typs=typs)
all_empty = all(not is_nonempty(x) for x in to_concat)
if any(is_extension_array_dtype(x) for x in to_concat) and axis == 1:
to_concat = [np.atleast_2d(x.astype('object')) for x in to_concat]
if all_empty:
# we have all empties, but may need to coerce the result dtype to
# object if we have non-numeric type operands (numpy would otherwise
# cast this to float)
typs = get_dtype_kinds(to_concat)
if len(typs) != 1:
if (not len(typs - {'i', 'u', 'f'}) or
not len(typs - {'bool', 'i', 'u'})):
# let numpy coerce
pass
else:
# coerce to object
to_concat = [x.astype('object') for x in to_concat]
return np.concatenate(to_concat, axis=axis) | [
"def",
"_concat_compat",
"(",
"to_concat",
",",
"axis",
"=",
"0",
")",
":",
"# filter empty arrays",
"# 1-d dtypes always are included here",
"def",
"is_nonempty",
"(",
"x",
")",
":",
"try",
":",
"return",
"x",
".",
"shape",
"[",
"axis",
"]",
">",
"0",
"except",
"Exception",
":",
"return",
"True",
"# If all arrays are empty, there's nothing to convert, just short-cut to",
"# the concatenation, #3121.",
"#",
"# Creating an empty array directly is tempting, but the winnings would be",
"# marginal given that it would still require shape & dtype calculation and",
"# np.concatenate which has them both implemented is compiled.",
"typs",
"=",
"get_dtype_kinds",
"(",
"to_concat",
")",
"_contains_datetime",
"=",
"any",
"(",
"typ",
".",
"startswith",
"(",
"'datetime'",
")",
"for",
"typ",
"in",
"typs",
")",
"_contains_period",
"=",
"any",
"(",
"typ",
".",
"startswith",
"(",
"'period'",
")",
"for",
"typ",
"in",
"typs",
")",
"if",
"'category'",
"in",
"typs",
":",
"# this must be priort to _concat_datetime,",
"# to support Categorical + datetime-like",
"return",
"_concat_categorical",
"(",
"to_concat",
",",
"axis",
"=",
"axis",
")",
"elif",
"_contains_datetime",
"or",
"'timedelta'",
"in",
"typs",
"or",
"_contains_period",
":",
"return",
"_concat_datetime",
"(",
"to_concat",
",",
"axis",
"=",
"axis",
",",
"typs",
"=",
"typs",
")",
"# these are mandated to handle empties as well",
"elif",
"'sparse'",
"in",
"typs",
":",
"return",
"_concat_sparse",
"(",
"to_concat",
",",
"axis",
"=",
"axis",
",",
"typs",
"=",
"typs",
")",
"all_empty",
"=",
"all",
"(",
"not",
"is_nonempty",
"(",
"x",
")",
"for",
"x",
"in",
"to_concat",
")",
"if",
"any",
"(",
"is_extension_array_dtype",
"(",
"x",
")",
"for",
"x",
"in",
"to_concat",
")",
"and",
"axis",
"==",
"1",
":",
"to_concat",
"=",
"[",
"np",
".",
"atleast_2d",
"(",
"x",
".",
"astype",
"(",
"'object'",
")",
")",
"for",
"x",
"in",
"to_concat",
"]",
"if",
"all_empty",
":",
"# we have all empties, but may need to coerce the result dtype to",
"# object if we have non-numeric type operands (numpy would otherwise",
"# cast this to float)",
"typs",
"=",
"get_dtype_kinds",
"(",
"to_concat",
")",
"if",
"len",
"(",
"typs",
")",
"!=",
"1",
":",
"if",
"(",
"not",
"len",
"(",
"typs",
"-",
"{",
"'i'",
",",
"'u'",
",",
"'f'",
"}",
")",
"or",
"not",
"len",
"(",
"typs",
"-",
"{",
"'bool'",
",",
"'i'",
",",
"'u'",
"}",
")",
")",
":",
"# let numpy coerce",
"pass",
"else",
":",
"# coerce to object",
"to_concat",
"=",
"[",
"x",
".",
"astype",
"(",
"'object'",
")",
"for",
"x",
"in",
"to_concat",
"]",
"return",
"np",
".",
"concatenate",
"(",
"to_concat",
",",
"axis",
"=",
"axis",
")"
] | provide concatenation of an array of arrays each of which is a single
'normalized' dtypes (in that for example, if it's object, then it is a
non-datetimelike and provide a combined dtype for the resulting array that
preserves the overall dtype if possible)
Parameters
----------
to_concat : array of arrays
axis : axis to provide concatenation
Returns
-------
a single array, preserving the combined dtypes | [
"provide",
"concatenation",
"of",
"an",
"array",
"of",
"arrays",
"each",
"of",
"which",
"is",
"a",
"single",
"normalized",
"dtypes",
"(",
"in",
"that",
"for",
"example",
"if",
"it",
"s",
"object",
"then",
"it",
"is",
"a",
"non",
"-",
"datetimelike",
"and",
"provide",
"a",
"combined",
"dtype",
"for",
"the",
"resulting",
"array",
"that",
"preserves",
"the",
"overall",
"dtype",
"if",
"possible",
")"
] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/concat.py#L98-L165 | 0.0004 |
FactoryBoy/factory_boy | factory/random.py | set_random_state | def set_random_state(state):
"""Force-set the state of factory.fuzzy's random generator."""
randgen.state_set = True
randgen.setstate(state)
faker.generator.random.setstate(state) | python | def set_random_state(state):
"""Force-set the state of factory.fuzzy's random generator."""
randgen.state_set = True
randgen.setstate(state)
faker.generator.random.setstate(state) | [
"def",
"set_random_state",
"(",
"state",
")",
":",
"randgen",
".",
"state_set",
"=",
"True",
"randgen",
".",
"setstate",
"(",
"state",
")",
"faker",
".",
"generator",
".",
"random",
".",
"setstate",
"(",
"state",
")"
] | Force-set the state of factory.fuzzy's random generator. | [
"Force",
"-",
"set",
"the",
"state",
"of",
"factory",
".",
"fuzzy",
"s",
"random",
"generator",
"."
] | train | https://github.com/FactoryBoy/factory_boy/blob/edaa7c7f5a14065b229927903bd7989cc93cd069/factory/random.py#L16-L21 | 0.005102 |
tadashi-aikawa/owlmixin | owlmixin/transformers.py | YamlTransformer.to_yaml | def to_yaml(self, ignore_none: bool=True, ignore_empty: bool=False) -> str:
"""From instance to yaml string
:param ignore_none: Properties which is None are excluded if True
:param ignore_empty: Properties which is empty are excluded if True
:return: Yaml string
Usage:
>>> from owlmixin.samples import Human
>>> human = Human.from_dict({
... "id": 1,
... "name": "Tom",
... "favorites": [
... {"name": "Apple", "names_by_lang": {"en": "Apple", "de": "Apfel"}},
... {"name": "Orange"}
... ]
... })
>>> print(human.to_yaml())
favorites:
- name: Apple
names_by_lang:
de: Apfel
en: Apple
- name: Orange
id: 1
name: Tom
<BLANKLINE>
"""
return util.dump_yaml(traverse(self, ignore_none, force_value=True, ignore_empty=ignore_empty)) | python | def to_yaml(self, ignore_none: bool=True, ignore_empty: bool=False) -> str:
"""From instance to yaml string
:param ignore_none: Properties which is None are excluded if True
:param ignore_empty: Properties which is empty are excluded if True
:return: Yaml string
Usage:
>>> from owlmixin.samples import Human
>>> human = Human.from_dict({
... "id": 1,
... "name": "Tom",
... "favorites": [
... {"name": "Apple", "names_by_lang": {"en": "Apple", "de": "Apfel"}},
... {"name": "Orange"}
... ]
... })
>>> print(human.to_yaml())
favorites:
- name: Apple
names_by_lang:
de: Apfel
en: Apple
- name: Orange
id: 1
name: Tom
<BLANKLINE>
"""
return util.dump_yaml(traverse(self, ignore_none, force_value=True, ignore_empty=ignore_empty)) | [
"def",
"to_yaml",
"(",
"self",
",",
"ignore_none",
":",
"bool",
"=",
"True",
",",
"ignore_empty",
":",
"bool",
"=",
"False",
")",
"->",
"str",
":",
"return",
"util",
".",
"dump_yaml",
"(",
"traverse",
"(",
"self",
",",
"ignore_none",
",",
"force_value",
"=",
"True",
",",
"ignore_empty",
"=",
"ignore_empty",
")",
")"
] | From instance to yaml string
:param ignore_none: Properties which is None are excluded if True
:param ignore_empty: Properties which is empty are excluded if True
:return: Yaml string
Usage:
>>> from owlmixin.samples import Human
>>> human = Human.from_dict({
... "id": 1,
... "name": "Tom",
... "favorites": [
... {"name": "Apple", "names_by_lang": {"en": "Apple", "de": "Apfel"}},
... {"name": "Orange"}
... ]
... })
>>> print(human.to_yaml())
favorites:
- name: Apple
names_by_lang:
de: Apfel
en: Apple
- name: Orange
id: 1
name: Tom
<BLANKLINE> | [
"From",
"instance",
"to",
"yaml",
"string"
] | train | https://github.com/tadashi-aikawa/owlmixin/blob/7c4a042c3008abddc56a8e8e55ae930d276071f5/owlmixin/transformers.py#L273-L302 | 0.007505 |
mpdavis/python-jose | jose/backends/rsa_backend.py | _gcd | def _gcd(a, b):
"""Calculate the Greatest Common Divisor of a and b.
Unless b==0, the result will have the same sign as b (so that when
b is divided by it, the result comes out positive).
"""
while b:
a, b = b, (a % b)
return a | python | def _gcd(a, b):
"""Calculate the Greatest Common Divisor of a and b.
Unless b==0, the result will have the same sign as b (so that when
b is divided by it, the result comes out positive).
"""
while b:
a, b = b, (a % b)
return a | [
"def",
"_gcd",
"(",
"a",
",",
"b",
")",
":",
"while",
"b",
":",
"a",
",",
"b",
"=",
"b",
",",
"(",
"a",
"%",
"b",
")",
"return",
"a"
] | Calculate the Greatest Common Divisor of a and b.
Unless b==0, the result will have the same sign as b (so that when
b is divided by it, the result comes out positive). | [
"Calculate",
"the",
"Greatest",
"Common",
"Divisor",
"of",
"a",
"and",
"b",
"."
] | train | https://github.com/mpdavis/python-jose/blob/deea7600eeea47aeb1bf5053a96de51cf2b9c639/jose/backends/rsa_backend.py#L37-L45 | 0.003846 |
mozilla/treeherder | treeherder/etl/jobs.py | _remove_existing_jobs | def _remove_existing_jobs(data):
"""
Remove jobs from data where we already have them in the same state.
1. split the incoming jobs into pending, running and complete.
2. fetch the ``job_guids`` from the db that are in the same state as they
are in ``data``.
3. build a new list of jobs in ``new_data`` that are not already in
the db and pass that back. It could end up empty at that point.
"""
new_data = []
guids = [datum['job']['job_guid'] for datum in data]
state_map = {
guid: state for (guid, state) in Job.objects.filter(
guid__in=guids).values_list('guid', 'state')
}
for datum in data:
job = datum['job']
if not state_map.get(job['job_guid']):
new_data.append(datum)
else:
# should not transition from running to pending,
# or completed to any other state
current_state = state_map[job['job_guid']]
if current_state == 'completed' or (
job['state'] == 'pending' and
current_state == 'running'):
continue
new_data.append(datum)
return new_data | python | def _remove_existing_jobs(data):
"""
Remove jobs from data where we already have them in the same state.
1. split the incoming jobs into pending, running and complete.
2. fetch the ``job_guids`` from the db that are in the same state as they
are in ``data``.
3. build a new list of jobs in ``new_data`` that are not already in
the db and pass that back. It could end up empty at that point.
"""
new_data = []
guids = [datum['job']['job_guid'] for datum in data]
state_map = {
guid: state for (guid, state) in Job.objects.filter(
guid__in=guids).values_list('guid', 'state')
}
for datum in data:
job = datum['job']
if not state_map.get(job['job_guid']):
new_data.append(datum)
else:
# should not transition from running to pending,
# or completed to any other state
current_state = state_map[job['job_guid']]
if current_state == 'completed' or (
job['state'] == 'pending' and
current_state == 'running'):
continue
new_data.append(datum)
return new_data | [
"def",
"_remove_existing_jobs",
"(",
"data",
")",
":",
"new_data",
"=",
"[",
"]",
"guids",
"=",
"[",
"datum",
"[",
"'job'",
"]",
"[",
"'job_guid'",
"]",
"for",
"datum",
"in",
"data",
"]",
"state_map",
"=",
"{",
"guid",
":",
"state",
"for",
"(",
"guid",
",",
"state",
")",
"in",
"Job",
".",
"objects",
".",
"filter",
"(",
"guid__in",
"=",
"guids",
")",
".",
"values_list",
"(",
"'guid'",
",",
"'state'",
")",
"}",
"for",
"datum",
"in",
"data",
":",
"job",
"=",
"datum",
"[",
"'job'",
"]",
"if",
"not",
"state_map",
".",
"get",
"(",
"job",
"[",
"'job_guid'",
"]",
")",
":",
"new_data",
".",
"append",
"(",
"datum",
")",
"else",
":",
"# should not transition from running to pending,",
"# or completed to any other state",
"current_state",
"=",
"state_map",
"[",
"job",
"[",
"'job_guid'",
"]",
"]",
"if",
"current_state",
"==",
"'completed'",
"or",
"(",
"job",
"[",
"'state'",
"]",
"==",
"'pending'",
"and",
"current_state",
"==",
"'running'",
")",
":",
"continue",
"new_data",
".",
"append",
"(",
"datum",
")",
"return",
"new_data"
] | Remove jobs from data where we already have them in the same state.
1. split the incoming jobs into pending, running and complete.
2. fetch the ``job_guids`` from the db that are in the same state as they
are in ``data``.
3. build a new list of jobs in ``new_data`` that are not already in
the db and pass that back. It could end up empty at that point. | [
"Remove",
"jobs",
"from",
"data",
"where",
"we",
"already",
"have",
"them",
"in",
"the",
"same",
"state",
"."
] | train | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/etl/jobs.py#L40-L72 | 0.00084 |
Galarzaa90/tibia.py | tibiapy/world.py | WorldOverview.from_content | def from_content(cls, content):
"""Parses the content of the World Overview section from Tibia.com into an object of this class.
Parameters
----------
content: :class:`str`
The HTML content of the World Overview page in Tibia.com
Returns
-------
:class:`WorldOverview`
An instance of this class containing all the information.
Raises
------
InvalidContent
If the provided content is not the HTML content of the worlds section in Tibia.com
"""
parsed_content = parse_tibiacom_content(content, html_class="TableContentAndRightShadow")
world_overview = WorldOverview()
try:
record_row, titles_row, *rows = parsed_content.find_all("tr")
m = record_regexp.search(record_row.text)
if not m:
raise InvalidContent("content does not belong to the World Overview section in Tibia.com")
world_overview.record_count = int(m.group("count"))
world_overview.record_date = parse_tibia_datetime(m.group("date"))
world_rows = rows
world_overview._parse_worlds(world_rows)
return world_overview
except (AttributeError, KeyError, ValueError):
raise InvalidContent("content does not belong to the World Overview section in Tibia.com") | python | def from_content(cls, content):
"""Parses the content of the World Overview section from Tibia.com into an object of this class.
Parameters
----------
content: :class:`str`
The HTML content of the World Overview page in Tibia.com
Returns
-------
:class:`WorldOverview`
An instance of this class containing all the information.
Raises
------
InvalidContent
If the provided content is not the HTML content of the worlds section in Tibia.com
"""
parsed_content = parse_tibiacom_content(content, html_class="TableContentAndRightShadow")
world_overview = WorldOverview()
try:
record_row, titles_row, *rows = parsed_content.find_all("tr")
m = record_regexp.search(record_row.text)
if not m:
raise InvalidContent("content does not belong to the World Overview section in Tibia.com")
world_overview.record_count = int(m.group("count"))
world_overview.record_date = parse_tibia_datetime(m.group("date"))
world_rows = rows
world_overview._parse_worlds(world_rows)
return world_overview
except (AttributeError, KeyError, ValueError):
raise InvalidContent("content does not belong to the World Overview section in Tibia.com") | [
"def",
"from_content",
"(",
"cls",
",",
"content",
")",
":",
"parsed_content",
"=",
"parse_tibiacom_content",
"(",
"content",
",",
"html_class",
"=",
"\"TableContentAndRightShadow\"",
")",
"world_overview",
"=",
"WorldOverview",
"(",
")",
"try",
":",
"record_row",
",",
"titles_row",
",",
"",
"*",
"rows",
"=",
"parsed_content",
".",
"find_all",
"(",
"\"tr\"",
")",
"m",
"=",
"record_regexp",
".",
"search",
"(",
"record_row",
".",
"text",
")",
"if",
"not",
"m",
":",
"raise",
"InvalidContent",
"(",
"\"content does not belong to the World Overview section in Tibia.com\"",
")",
"world_overview",
".",
"record_count",
"=",
"int",
"(",
"m",
".",
"group",
"(",
"\"count\"",
")",
")",
"world_overview",
".",
"record_date",
"=",
"parse_tibia_datetime",
"(",
"m",
".",
"group",
"(",
"\"date\"",
")",
")",
"world_rows",
"=",
"rows",
"world_overview",
".",
"_parse_worlds",
"(",
"world_rows",
")",
"return",
"world_overview",
"except",
"(",
"AttributeError",
",",
"KeyError",
",",
"ValueError",
")",
":",
"raise",
"InvalidContent",
"(",
"\"content does not belong to the World Overview section in Tibia.com\"",
")"
] | Parses the content of the World Overview section from Tibia.com into an object of this class.
Parameters
----------
content: :class:`str`
The HTML content of the World Overview page in Tibia.com
Returns
-------
:class:`WorldOverview`
An instance of this class containing all the information.
Raises
------
InvalidContent
If the provided content is not the HTML content of the worlds section in Tibia.com | [
"Parses",
"the",
"content",
"of",
"the",
"World",
"Overview",
"section",
"from",
"Tibia",
".",
"com",
"into",
"an",
"object",
"of",
"this",
"class",
"."
] | train | https://github.com/Galarzaa90/tibia.py/blob/02ba1a8f1e18177ef5c7dcd44affc8d761d59e12/tibiapy/world.py#L448-L479 | 0.005014 |
okzach/bofhexcuse | bofhexcuse/__init__.py | generate_random_string | def generate_random_string(template_dict, key='start'):
"""Generates a random excuse from a simple template dict.
Based off of drow's generator.js (public domain).
Grok it here: http://donjon.bin.sh/code/random/generator.js
Args:
template_dict: Dict with template strings.
key: String with the starting index for the dict. (Default: 'start')
Returns:
Generated string.
"""
data = template_dict.get(key)
#if isinstance(data, list):
result = random.choice(data)
#else:
#result = random.choice(data.values())
for match in token_regex.findall(result):
word = generate_random_string(template_dict, match) or match
result = result.replace('{{{0}}}'.format(match), word)
return result | python | def generate_random_string(template_dict, key='start'):
"""Generates a random excuse from a simple template dict.
Based off of drow's generator.js (public domain).
Grok it here: http://donjon.bin.sh/code/random/generator.js
Args:
template_dict: Dict with template strings.
key: String with the starting index for the dict. (Default: 'start')
Returns:
Generated string.
"""
data = template_dict.get(key)
#if isinstance(data, list):
result = random.choice(data)
#else:
#result = random.choice(data.values())
for match in token_regex.findall(result):
word = generate_random_string(template_dict, match) or match
result = result.replace('{{{0}}}'.format(match), word)
return result | [
"def",
"generate_random_string",
"(",
"template_dict",
",",
"key",
"=",
"'start'",
")",
":",
"data",
"=",
"template_dict",
".",
"get",
"(",
"key",
")",
"#if isinstance(data, list):",
"result",
"=",
"random",
".",
"choice",
"(",
"data",
")",
"#else:",
"#result = random.choice(data.values())",
"for",
"match",
"in",
"token_regex",
".",
"findall",
"(",
"result",
")",
":",
"word",
"=",
"generate_random_string",
"(",
"template_dict",
",",
"match",
")",
"or",
"match",
"result",
"=",
"result",
".",
"replace",
"(",
"'{{{0}}}'",
".",
"format",
"(",
"match",
")",
",",
"word",
")",
"return",
"result"
] | Generates a random excuse from a simple template dict.
Based off of drow's generator.js (public domain).
Grok it here: http://donjon.bin.sh/code/random/generator.js
Args:
template_dict: Dict with template strings.
key: String with the starting index for the dict. (Default: 'start')
Returns:
Generated string. | [
"Generates",
"a",
"random",
"excuse",
"from",
"a",
"simple",
"template",
"dict",
"."
] | train | https://github.com/okzach/bofhexcuse/blob/ea241b8a63c4baa44cfb17c6acd47b6e12e822af/bofhexcuse/__init__.py#L14-L39 | 0.00641 |
theosysbio/means | src/means/inference/distances.py | _eval_density | def _eval_density(means, variances,observed_values, distribution):
"""
Calculates gamma/lognormal/normal pdf given mean variance, x
where x is the experimental species number measured at a particular timepoint. Returns ln(pdf)
:param mean: mean
:param var: variance
:param observed_values: experimental species number measured at a particular timepoint
:param distribution: distribution to consider. Either 'gamma', 'normal' or 'lognormal'
:return: normal log of the pdf
"""
means = np.array(means, dtype=NP_FLOATING_POINT_PRECISION)
variances = np.array(variances, dtype=NP_FLOATING_POINT_PRECISION)
observed_values = np.array(observed_values, dtype=NP_FLOATING_POINT_PRECISION)
# Remove data about unobserved datapoints
means = means[~np.isnan(observed_values)]
variances = variances[~np.isnan(observed_values)]
observed_values = observed_values[~np.isnan(observed_values)]
# Remove data for when variance is zero as we cannot estimate distributions that way
non_zero_varianes = ~(variances == 0)
means = means[non_zero_varianes]
variances = variances[~(variances == 0)]
observed_values = observed_values[non_zero_varianes]
if distribution == 'gamma':
b = variances / means
a = means / b
log_observed_values = np.log(observed_values)
log_density = (a - 1.0) * log_observed_values - (observed_values / b) - a * np.log(b) - gammaln(a)
elif distribution == 'normal':
log_density = -(observed_values - means) ** 2 / (2 * variances) - np.log(np.sqrt(2 * np.pi * variances))
elif distribution == 'lognormal':
log_density = -(np.log(observed_values) - means) ** 2 / (2 * variances) - np.log(observed_values * np.sqrt(2 * np.pi * variances))
else:
raise ValueError('Unsupported distribution {0!r}'.format(distribution))
total_log_density = np.sum(log_density)
return total_log_density | python | def _eval_density(means, variances,observed_values, distribution):
"""
Calculates gamma/lognormal/normal pdf given mean variance, x
where x is the experimental species number measured at a particular timepoint. Returns ln(pdf)
:param mean: mean
:param var: variance
:param observed_values: experimental species number measured at a particular timepoint
:param distribution: distribution to consider. Either 'gamma', 'normal' or 'lognormal'
:return: normal log of the pdf
"""
means = np.array(means, dtype=NP_FLOATING_POINT_PRECISION)
variances = np.array(variances, dtype=NP_FLOATING_POINT_PRECISION)
observed_values = np.array(observed_values, dtype=NP_FLOATING_POINT_PRECISION)
# Remove data about unobserved datapoints
means = means[~np.isnan(observed_values)]
variances = variances[~np.isnan(observed_values)]
observed_values = observed_values[~np.isnan(observed_values)]
# Remove data for when variance is zero as we cannot estimate distributions that way
non_zero_varianes = ~(variances == 0)
means = means[non_zero_varianes]
variances = variances[~(variances == 0)]
observed_values = observed_values[non_zero_varianes]
if distribution == 'gamma':
b = variances / means
a = means / b
log_observed_values = np.log(observed_values)
log_density = (a - 1.0) * log_observed_values - (observed_values / b) - a * np.log(b) - gammaln(a)
elif distribution == 'normal':
log_density = -(observed_values - means) ** 2 / (2 * variances) - np.log(np.sqrt(2 * np.pi * variances))
elif distribution == 'lognormal':
log_density = -(np.log(observed_values) - means) ** 2 / (2 * variances) - np.log(observed_values * np.sqrt(2 * np.pi * variances))
else:
raise ValueError('Unsupported distribution {0!r}'.format(distribution))
total_log_density = np.sum(log_density)
return total_log_density | [
"def",
"_eval_density",
"(",
"means",
",",
"variances",
",",
"observed_values",
",",
"distribution",
")",
":",
"means",
"=",
"np",
".",
"array",
"(",
"means",
",",
"dtype",
"=",
"NP_FLOATING_POINT_PRECISION",
")",
"variances",
"=",
"np",
".",
"array",
"(",
"variances",
",",
"dtype",
"=",
"NP_FLOATING_POINT_PRECISION",
")",
"observed_values",
"=",
"np",
".",
"array",
"(",
"observed_values",
",",
"dtype",
"=",
"NP_FLOATING_POINT_PRECISION",
")",
"# Remove data about unobserved datapoints",
"means",
"=",
"means",
"[",
"~",
"np",
".",
"isnan",
"(",
"observed_values",
")",
"]",
"variances",
"=",
"variances",
"[",
"~",
"np",
".",
"isnan",
"(",
"observed_values",
")",
"]",
"observed_values",
"=",
"observed_values",
"[",
"~",
"np",
".",
"isnan",
"(",
"observed_values",
")",
"]",
"# Remove data for when variance is zero as we cannot estimate distributions that way",
"non_zero_varianes",
"=",
"~",
"(",
"variances",
"==",
"0",
")",
"means",
"=",
"means",
"[",
"non_zero_varianes",
"]",
"variances",
"=",
"variances",
"[",
"~",
"(",
"variances",
"==",
"0",
")",
"]",
"observed_values",
"=",
"observed_values",
"[",
"non_zero_varianes",
"]",
"if",
"distribution",
"==",
"'gamma'",
":",
"b",
"=",
"variances",
"/",
"means",
"a",
"=",
"means",
"/",
"b",
"log_observed_values",
"=",
"np",
".",
"log",
"(",
"observed_values",
")",
"log_density",
"=",
"(",
"a",
"-",
"1.0",
")",
"*",
"log_observed_values",
"-",
"(",
"observed_values",
"/",
"b",
")",
"-",
"a",
"*",
"np",
".",
"log",
"(",
"b",
")",
"-",
"gammaln",
"(",
"a",
")",
"elif",
"distribution",
"==",
"'normal'",
":",
"log_density",
"=",
"-",
"(",
"observed_values",
"-",
"means",
")",
"**",
"2",
"/",
"(",
"2",
"*",
"variances",
")",
"-",
"np",
".",
"log",
"(",
"np",
".",
"sqrt",
"(",
"2",
"*",
"np",
".",
"pi",
"*",
"variances",
")",
")",
"elif",
"distribution",
"==",
"'lognormal'",
":",
"log_density",
"=",
"-",
"(",
"np",
".",
"log",
"(",
"observed_values",
")",
"-",
"means",
")",
"**",
"2",
"/",
"(",
"2",
"*",
"variances",
")",
"-",
"np",
".",
"log",
"(",
"observed_values",
"*",
"np",
".",
"sqrt",
"(",
"2",
"*",
"np",
".",
"pi",
"*",
"variances",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unsupported distribution {0!r}'",
".",
"format",
"(",
"distribution",
")",
")",
"total_log_density",
"=",
"np",
".",
"sum",
"(",
"log_density",
")",
"return",
"total_log_density"
] | Calculates gamma/lognormal/normal pdf given mean variance, x
where x is the experimental species number measured at a particular timepoint. Returns ln(pdf)
:param mean: mean
:param var: variance
:param observed_values: experimental species number measured at a particular timepoint
:param distribution: distribution to consider. Either 'gamma', 'normal' or 'lognormal'
:return: normal log of the pdf | [
"Calculates",
"gamma",
"/",
"lognormal",
"/",
"normal",
"pdf",
"given",
"mean",
"variance",
"x",
"where",
"x",
"is",
"the",
"experimental",
"species",
"number",
"measured",
"at",
"a",
"particular",
"timepoint",
".",
"Returns",
"ln",
"(",
"pdf",
")",
":",
"param",
"mean",
":",
"mean",
":",
"param",
"var",
":",
"variance",
":",
"param",
"observed_values",
":",
"experimental",
"species",
"number",
"measured",
"at",
"a",
"particular",
"timepoint",
":",
"param",
"distribution",
":",
"distribution",
"to",
"consider",
".",
"Either",
"gamma",
"normal",
"or",
"lognormal",
":",
"return",
":",
"normal",
"log",
"of",
"the",
"pdf"
] | train | https://github.com/theosysbio/means/blob/fe164916a1d84ab2a4fa039871d38ccdf638b1db/src/means/inference/distances.py#L163-L203 | 0.005133 |
MichaelAquilina/hashedindex | hashedindex/__init__.py | HashedIndex.get_document_length | def get_document_length(self, document):
"""
Returns the number of terms found within the specified document.
"""
if document in self._documents:
return self._documents[document]
else:
raise IndexError(DOCUMENT_DOES_NOT_EXIST) | python | def get_document_length(self, document):
"""
Returns the number of terms found within the specified document.
"""
if document in self._documents:
return self._documents[document]
else:
raise IndexError(DOCUMENT_DOES_NOT_EXIST) | [
"def",
"get_document_length",
"(",
"self",
",",
"document",
")",
":",
"if",
"document",
"in",
"self",
".",
"_documents",
":",
"return",
"self",
".",
"_documents",
"[",
"document",
"]",
"else",
":",
"raise",
"IndexError",
"(",
"DOCUMENT_DOES_NOT_EXIST",
")"
] | Returns the number of terms found within the specified document. | [
"Returns",
"the",
"number",
"of",
"terms",
"found",
"within",
"the",
"specified",
"document",
"."
] | train | https://github.com/MichaelAquilina/hashedindex/blob/5a84dcd6c697ea04162cf7b2683fa2723845b51c/hashedindex/__init__.py#L123-L130 | 0.006897 |
CamDavidsonPilon/lifetimes | lifetimes/utils.py | _find_first_transactions | def _find_first_transactions(
transactions,
customer_id_col,
datetime_col,
monetary_value_col=None,
datetime_format=None,
observation_period_end=None,
freq="D",
):
"""
Return dataframe with first transactions.
This takes a DataFrame of transaction data of the form:
customer_id, datetime [, monetary_value]
and appends a column named 'repeated' to the transaction log which indicates which rows
are repeated transactions for that customer_id.
Parameters
----------
transactions: :obj: DataFrame
a Pandas DataFrame that contains the customer_id col and the datetime col.
customer_id_col: string
the column in transactions DataFrame that denotes the customer_id
datetime_col: string
the column in transactions that denotes the datetime the purchase was made.
monetary_value_col: string, optional
the column in transactions that denotes the monetary value of the transaction.
Optional, only needed for customer lifetime value estimation models.
observation_period_end: :obj: datetime
a string or datetime to denote the final date of the study.
Events after this date are truncated. If not given, defaults to the max 'datetime_col'.
datetime_format: string, optional
a string that represents the timestamp format. Useful if Pandas can't understand
the provided format.
freq: string, optional
Default 'D' for days, 'W' for weeks, 'M' for months... etc. Full list here:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#dateoffset-objects
"""
if observation_period_end is None:
observation_period_end = transactions[datetime_col].max()
if type(observation_period_end) == pd.Period:
observation_period_end = observation_period_end.to_timestamp()
select_columns = [customer_id_col, datetime_col]
if monetary_value_col:
select_columns.append(monetary_value_col)
transactions = transactions[select_columns].sort_values(select_columns).copy()
# make sure the date column uses datetime objects, and use Pandas' DateTimeIndex.to_period()
# to convert the column to a PeriodIndex which is useful for time-wise grouping and truncating
transactions[datetime_col] = pd.to_datetime(transactions[datetime_col], format=datetime_format)
transactions = transactions.set_index(datetime_col).to_period(freq).to_timestamp()
transactions = transactions.loc[(transactions.index <= observation_period_end)].reset_index()
period_groupby = transactions.groupby([datetime_col, customer_id_col], sort=False, as_index=False)
if monetary_value_col:
# when we have a monetary column, make sure to sum together any values in the same period
period_transactions = period_groupby.sum()
else:
# by calling head() on the groupby object, the datetime_col and customer_id_col columns
# will be reduced
period_transactions = period_groupby.head(1)
# initialize a new column where we will indicate which are the first transactions
period_transactions["first"] = False
# find all of the initial transactions and store as an index
first_transactions = period_transactions.groupby(customer_id_col, sort=True, as_index=False).head(1).index
# mark the initial transactions as True
period_transactions.loc[first_transactions, "first"] = True
select_columns.append("first")
# reset datetime_col to period
period_transactions[datetime_col] = pd.Index(period_transactions[datetime_col]).to_period(freq)
return period_transactions[select_columns] | python | def _find_first_transactions(
transactions,
customer_id_col,
datetime_col,
monetary_value_col=None,
datetime_format=None,
observation_period_end=None,
freq="D",
):
"""
Return dataframe with first transactions.
This takes a DataFrame of transaction data of the form:
customer_id, datetime [, monetary_value]
and appends a column named 'repeated' to the transaction log which indicates which rows
are repeated transactions for that customer_id.
Parameters
----------
transactions: :obj: DataFrame
a Pandas DataFrame that contains the customer_id col and the datetime col.
customer_id_col: string
the column in transactions DataFrame that denotes the customer_id
datetime_col: string
the column in transactions that denotes the datetime the purchase was made.
monetary_value_col: string, optional
the column in transactions that denotes the monetary value of the transaction.
Optional, only needed for customer lifetime value estimation models.
observation_period_end: :obj: datetime
a string or datetime to denote the final date of the study.
Events after this date are truncated. If not given, defaults to the max 'datetime_col'.
datetime_format: string, optional
a string that represents the timestamp format. Useful if Pandas can't understand
the provided format.
freq: string, optional
Default 'D' for days, 'W' for weeks, 'M' for months... etc. Full list here:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#dateoffset-objects
"""
if observation_period_end is None:
observation_period_end = transactions[datetime_col].max()
if type(observation_period_end) == pd.Period:
observation_period_end = observation_period_end.to_timestamp()
select_columns = [customer_id_col, datetime_col]
if monetary_value_col:
select_columns.append(monetary_value_col)
transactions = transactions[select_columns].sort_values(select_columns).copy()
# make sure the date column uses datetime objects, and use Pandas' DateTimeIndex.to_period()
# to convert the column to a PeriodIndex which is useful for time-wise grouping and truncating
transactions[datetime_col] = pd.to_datetime(transactions[datetime_col], format=datetime_format)
transactions = transactions.set_index(datetime_col).to_period(freq).to_timestamp()
transactions = transactions.loc[(transactions.index <= observation_period_end)].reset_index()
period_groupby = transactions.groupby([datetime_col, customer_id_col], sort=False, as_index=False)
if monetary_value_col:
# when we have a monetary column, make sure to sum together any values in the same period
period_transactions = period_groupby.sum()
else:
# by calling head() on the groupby object, the datetime_col and customer_id_col columns
# will be reduced
period_transactions = period_groupby.head(1)
# initialize a new column where we will indicate which are the first transactions
period_transactions["first"] = False
# find all of the initial transactions and store as an index
first_transactions = period_transactions.groupby(customer_id_col, sort=True, as_index=False).head(1).index
# mark the initial transactions as True
period_transactions.loc[first_transactions, "first"] = True
select_columns.append("first")
# reset datetime_col to period
period_transactions[datetime_col] = pd.Index(period_transactions[datetime_col]).to_period(freq)
return period_transactions[select_columns] | [
"def",
"_find_first_transactions",
"(",
"transactions",
",",
"customer_id_col",
",",
"datetime_col",
",",
"monetary_value_col",
"=",
"None",
",",
"datetime_format",
"=",
"None",
",",
"observation_period_end",
"=",
"None",
",",
"freq",
"=",
"\"D\"",
",",
")",
":",
"if",
"observation_period_end",
"is",
"None",
":",
"observation_period_end",
"=",
"transactions",
"[",
"datetime_col",
"]",
".",
"max",
"(",
")",
"if",
"type",
"(",
"observation_period_end",
")",
"==",
"pd",
".",
"Period",
":",
"observation_period_end",
"=",
"observation_period_end",
".",
"to_timestamp",
"(",
")",
"select_columns",
"=",
"[",
"customer_id_col",
",",
"datetime_col",
"]",
"if",
"monetary_value_col",
":",
"select_columns",
".",
"append",
"(",
"monetary_value_col",
")",
"transactions",
"=",
"transactions",
"[",
"select_columns",
"]",
".",
"sort_values",
"(",
"select_columns",
")",
".",
"copy",
"(",
")",
"# make sure the date column uses datetime objects, and use Pandas' DateTimeIndex.to_period()",
"# to convert the column to a PeriodIndex which is useful for time-wise grouping and truncating",
"transactions",
"[",
"datetime_col",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"transactions",
"[",
"datetime_col",
"]",
",",
"format",
"=",
"datetime_format",
")",
"transactions",
"=",
"transactions",
".",
"set_index",
"(",
"datetime_col",
")",
".",
"to_period",
"(",
"freq",
")",
".",
"to_timestamp",
"(",
")",
"transactions",
"=",
"transactions",
".",
"loc",
"[",
"(",
"transactions",
".",
"index",
"<=",
"observation_period_end",
")",
"]",
".",
"reset_index",
"(",
")",
"period_groupby",
"=",
"transactions",
".",
"groupby",
"(",
"[",
"datetime_col",
",",
"customer_id_col",
"]",
",",
"sort",
"=",
"False",
",",
"as_index",
"=",
"False",
")",
"if",
"monetary_value_col",
":",
"# when we have a monetary column, make sure to sum together any values in the same period",
"period_transactions",
"=",
"period_groupby",
".",
"sum",
"(",
")",
"else",
":",
"# by calling head() on the groupby object, the datetime_col and customer_id_col columns",
"# will be reduced",
"period_transactions",
"=",
"period_groupby",
".",
"head",
"(",
"1",
")",
"# initialize a new column where we will indicate which are the first transactions",
"period_transactions",
"[",
"\"first\"",
"]",
"=",
"False",
"# find all of the initial transactions and store as an index",
"first_transactions",
"=",
"period_transactions",
".",
"groupby",
"(",
"customer_id_col",
",",
"sort",
"=",
"True",
",",
"as_index",
"=",
"False",
")",
".",
"head",
"(",
"1",
")",
".",
"index",
"# mark the initial transactions as True",
"period_transactions",
".",
"loc",
"[",
"first_transactions",
",",
"\"first\"",
"]",
"=",
"True",
"select_columns",
".",
"append",
"(",
"\"first\"",
")",
"# reset datetime_col to period",
"period_transactions",
"[",
"datetime_col",
"]",
"=",
"pd",
".",
"Index",
"(",
"period_transactions",
"[",
"datetime_col",
"]",
")",
".",
"to_period",
"(",
"freq",
")",
"return",
"period_transactions",
"[",
"select_columns",
"]"
] | Return dataframe with first transactions.
This takes a DataFrame of transaction data of the form:
customer_id, datetime [, monetary_value]
and appends a column named 'repeated' to the transaction log which indicates which rows
are repeated transactions for that customer_id.
Parameters
----------
transactions: :obj: DataFrame
a Pandas DataFrame that contains the customer_id col and the datetime col.
customer_id_col: string
the column in transactions DataFrame that denotes the customer_id
datetime_col: string
the column in transactions that denotes the datetime the purchase was made.
monetary_value_col: string, optional
the column in transactions that denotes the monetary value of the transaction.
Optional, only needed for customer lifetime value estimation models.
observation_period_end: :obj: datetime
a string or datetime to denote the final date of the study.
Events after this date are truncated. If not given, defaults to the max 'datetime_col'.
datetime_format: string, optional
a string that represents the timestamp format. Useful if Pandas can't understand
the provided format.
freq: string, optional
Default 'D' for days, 'W' for weeks, 'M' for months... etc. Full list here:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#dateoffset-objects | [
"Return",
"dataframe",
"with",
"first",
"transactions",
"."
] | train | https://github.com/CamDavidsonPilon/lifetimes/blob/f926308bc03c17c1d12fead729de43885cf13321/lifetimes/utils.py#L130-L209 | 0.00547 |
KeplerGO/K2fov | K2fov/plot.py | K2FootprintPlot.plot_campaign_outline | def plot_campaign_outline(self, campaign=0, facecolor="#666666", text=None):
"""Plot the outline of a campaign as a contiguous gray patch.
Parameters
----------
campaign : int
K2 Campaign number.
facecolor : str
Color of the patch.
"""
# The outline is composed of two filled rectangles,
# defined by the first coordinate of the corner of four channels each
fov = getKeplerFov(campaign)
corners = fov.getCoordsOfChannelCorners()
for rectangle in [[4, 75, 84, 11], [15, 56, 71, 32]]:
ra_outline, dec_outline = [], []
for channel in rectangle:
idx = np.where(corners[::, 2] == channel)
ra_outline.append(corners[idx, 3][0][0])
dec_outline.append(corners[idx, 4][0][0])
ra = np.array(ra_outline + ra_outline[:1])
dec = np.array(dec_outline + dec_outline[:1])
if campaign == 1002: # Overlaps the meridian
ra[ra > 180] -= 360
myfill = self.ax.fill(ra, dec,
facecolor=facecolor,
zorder=151, lw=0)
# Print the campaign number on top of the outline
if text is None:
text = "{}".format(campaign)
ra_center, dec_center, _ = fov.getBoresight()
if campaign == 6:
dec_center -= 2
elif campaign == 12:
ra_center += 0.5
dec_center -= 1.7
elif campaign == 13:
dec_center -= 1.5
elif campaign == 16:
dec_center += 1.5
elif campaign == 18:
dec_center -= 1.5
elif campaign == 19:
dec_center += 1.7
elif campaign == 20:
dec_center += 1.5
offsets = {5: (40, -20), 16: (-20, 40), 18: (-15, -50)}
if campaign in [5]:
pl.annotate(text, xy=(ra_center, dec_center),
xycoords='data', ha='center',
xytext=offsets[campaign], textcoords='offset points',
size=18, zorder=0, color=facecolor,
arrowprops=dict(arrowstyle="-", ec=facecolor, lw=2))
else:
self.ax.text(ra_center, dec_center, text,
fontsize=18, color="white",
ha="center", va="center",
zorder=155)
return myfill | python | def plot_campaign_outline(self, campaign=0, facecolor="#666666", text=None):
"""Plot the outline of a campaign as a contiguous gray patch.
Parameters
----------
campaign : int
K2 Campaign number.
facecolor : str
Color of the patch.
"""
# The outline is composed of two filled rectangles,
# defined by the first coordinate of the corner of four channels each
fov = getKeplerFov(campaign)
corners = fov.getCoordsOfChannelCorners()
for rectangle in [[4, 75, 84, 11], [15, 56, 71, 32]]:
ra_outline, dec_outline = [], []
for channel in rectangle:
idx = np.where(corners[::, 2] == channel)
ra_outline.append(corners[idx, 3][0][0])
dec_outline.append(corners[idx, 4][0][0])
ra = np.array(ra_outline + ra_outline[:1])
dec = np.array(dec_outline + dec_outline[:1])
if campaign == 1002: # Overlaps the meridian
ra[ra > 180] -= 360
myfill = self.ax.fill(ra, dec,
facecolor=facecolor,
zorder=151, lw=0)
# Print the campaign number on top of the outline
if text is None:
text = "{}".format(campaign)
ra_center, dec_center, _ = fov.getBoresight()
if campaign == 6:
dec_center -= 2
elif campaign == 12:
ra_center += 0.5
dec_center -= 1.7
elif campaign == 13:
dec_center -= 1.5
elif campaign == 16:
dec_center += 1.5
elif campaign == 18:
dec_center -= 1.5
elif campaign == 19:
dec_center += 1.7
elif campaign == 20:
dec_center += 1.5
offsets = {5: (40, -20), 16: (-20, 40), 18: (-15, -50)}
if campaign in [5]:
pl.annotate(text, xy=(ra_center, dec_center),
xycoords='data', ha='center',
xytext=offsets[campaign], textcoords='offset points',
size=18, zorder=0, color=facecolor,
arrowprops=dict(arrowstyle="-", ec=facecolor, lw=2))
else:
self.ax.text(ra_center, dec_center, text,
fontsize=18, color="white",
ha="center", va="center",
zorder=155)
return myfill | [
"def",
"plot_campaign_outline",
"(",
"self",
",",
"campaign",
"=",
"0",
",",
"facecolor",
"=",
"\"#666666\"",
",",
"text",
"=",
"None",
")",
":",
"# The outline is composed of two filled rectangles,",
"# defined by the first coordinate of the corner of four channels each",
"fov",
"=",
"getKeplerFov",
"(",
"campaign",
")",
"corners",
"=",
"fov",
".",
"getCoordsOfChannelCorners",
"(",
")",
"for",
"rectangle",
"in",
"[",
"[",
"4",
",",
"75",
",",
"84",
",",
"11",
"]",
",",
"[",
"15",
",",
"56",
",",
"71",
",",
"32",
"]",
"]",
":",
"ra_outline",
",",
"dec_outline",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"channel",
"in",
"rectangle",
":",
"idx",
"=",
"np",
".",
"where",
"(",
"corners",
"[",
":",
":",
",",
"2",
"]",
"==",
"channel",
")",
"ra_outline",
".",
"append",
"(",
"corners",
"[",
"idx",
",",
"3",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"dec_outline",
".",
"append",
"(",
"corners",
"[",
"idx",
",",
"4",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"ra",
"=",
"np",
".",
"array",
"(",
"ra_outline",
"+",
"ra_outline",
"[",
":",
"1",
"]",
")",
"dec",
"=",
"np",
".",
"array",
"(",
"dec_outline",
"+",
"dec_outline",
"[",
":",
"1",
"]",
")",
"if",
"campaign",
"==",
"1002",
":",
"# Overlaps the meridian",
"ra",
"[",
"ra",
">",
"180",
"]",
"-=",
"360",
"myfill",
"=",
"self",
".",
"ax",
".",
"fill",
"(",
"ra",
",",
"dec",
",",
"facecolor",
"=",
"facecolor",
",",
"zorder",
"=",
"151",
",",
"lw",
"=",
"0",
")",
"# Print the campaign number on top of the outline",
"if",
"text",
"is",
"None",
":",
"text",
"=",
"\"{}\"",
".",
"format",
"(",
"campaign",
")",
"ra_center",
",",
"dec_center",
",",
"_",
"=",
"fov",
".",
"getBoresight",
"(",
")",
"if",
"campaign",
"==",
"6",
":",
"dec_center",
"-=",
"2",
"elif",
"campaign",
"==",
"12",
":",
"ra_center",
"+=",
"0.5",
"dec_center",
"-=",
"1.7",
"elif",
"campaign",
"==",
"13",
":",
"dec_center",
"-=",
"1.5",
"elif",
"campaign",
"==",
"16",
":",
"dec_center",
"+=",
"1.5",
"elif",
"campaign",
"==",
"18",
":",
"dec_center",
"-=",
"1.5",
"elif",
"campaign",
"==",
"19",
":",
"dec_center",
"+=",
"1.7",
"elif",
"campaign",
"==",
"20",
":",
"dec_center",
"+=",
"1.5",
"offsets",
"=",
"{",
"5",
":",
"(",
"40",
",",
"-",
"20",
")",
",",
"16",
":",
"(",
"-",
"20",
",",
"40",
")",
",",
"18",
":",
"(",
"-",
"15",
",",
"-",
"50",
")",
"}",
"if",
"campaign",
"in",
"[",
"5",
"]",
":",
"pl",
".",
"annotate",
"(",
"text",
",",
"xy",
"=",
"(",
"ra_center",
",",
"dec_center",
")",
",",
"xycoords",
"=",
"'data'",
",",
"ha",
"=",
"'center'",
",",
"xytext",
"=",
"offsets",
"[",
"campaign",
"]",
",",
"textcoords",
"=",
"'offset points'",
",",
"size",
"=",
"18",
",",
"zorder",
"=",
"0",
",",
"color",
"=",
"facecolor",
",",
"arrowprops",
"=",
"dict",
"(",
"arrowstyle",
"=",
"\"-\"",
",",
"ec",
"=",
"facecolor",
",",
"lw",
"=",
"2",
")",
")",
"else",
":",
"self",
".",
"ax",
".",
"text",
"(",
"ra_center",
",",
"dec_center",
",",
"text",
",",
"fontsize",
"=",
"18",
",",
"color",
"=",
"\"white\"",
",",
"ha",
"=",
"\"center\"",
",",
"va",
"=",
"\"center\"",
",",
"zorder",
"=",
"155",
")",
"return",
"myfill"
] | Plot the outline of a campaign as a contiguous gray patch.
Parameters
----------
campaign : int
K2 Campaign number.
facecolor : str
Color of the patch. | [
"Plot",
"the",
"outline",
"of",
"a",
"campaign",
"as",
"a",
"contiguous",
"gray",
"patch",
"."
] | train | https://github.com/KeplerGO/K2fov/blob/fb122b35687340e0357cba9e0dd47b3be0760693/K2fov/plot.py#L67-L126 | 0.000808 |
saltstack/salt | salt/minion.py | Minion._handle_tag_pillar_refresh | def _handle_tag_pillar_refresh(self, tag, data):
'''
Handle a pillar_refresh event
'''
yield self.pillar_refresh(
force_refresh=data.get('force_refresh', False),
notify=data.get('notify', False)
) | python | def _handle_tag_pillar_refresh(self, tag, data):
'''
Handle a pillar_refresh event
'''
yield self.pillar_refresh(
force_refresh=data.get('force_refresh', False),
notify=data.get('notify', False)
) | [
"def",
"_handle_tag_pillar_refresh",
"(",
"self",
",",
"tag",
",",
"data",
")",
":",
"yield",
"self",
".",
"pillar_refresh",
"(",
"force_refresh",
"=",
"data",
".",
"get",
"(",
"'force_refresh'",
",",
"False",
")",
",",
"notify",
"=",
"data",
".",
"get",
"(",
"'notify'",
",",
"False",
")",
")"
] | Handle a pillar_refresh event | [
"Handle",
"a",
"pillar_refresh",
"event"
] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2405-L2412 | 0.007692 |
nchopin/particles | particles/smc_samplers.py | ThetaParticles.copyto_at | def copyto_at(self, n, src, m):
"""Copy to at a given location.
Parameters
----------
n: int
index where to copy
src: `ThetaParticles` object
source
m: int
index of the element to be copied
Note
----
Basically, does self[n] <- src[m]
"""
for k in self.containers:
self.__dict__[k][n] = src.__dict__[k][m] | python | def copyto_at(self, n, src, m):
"""Copy to at a given location.
Parameters
----------
n: int
index where to copy
src: `ThetaParticles` object
source
m: int
index of the element to be copied
Note
----
Basically, does self[n] <- src[m]
"""
for k in self.containers:
self.__dict__[k][n] = src.__dict__[k][m] | [
"def",
"copyto_at",
"(",
"self",
",",
"n",
",",
"src",
",",
"m",
")",
":",
"for",
"k",
"in",
"self",
".",
"containers",
":",
"self",
".",
"__dict__",
"[",
"k",
"]",
"[",
"n",
"]",
"=",
"src",
".",
"__dict__",
"[",
"k",
"]",
"[",
"m",
"]"
] | Copy to at a given location.
Parameters
----------
n: int
index where to copy
src: `ThetaParticles` object
source
m: int
index of the element to be copied
Note
----
Basically, does self[n] <- src[m] | [
"Copy",
"to",
"at",
"a",
"given",
"location",
"."
] | train | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smc_samplers.py#L272-L289 | 0.011236 |
hazelcast/hazelcast-remote-controller | python-controller/hzrc/RemoteController.py | Client.terminateMember | def terminateMember(self, clusterId, memberId):
"""
Parameters:
- clusterId
- memberId
"""
self.send_terminateMember(clusterId, memberId)
return self.recv_terminateMember() | python | def terminateMember(self, clusterId, memberId):
"""
Parameters:
- clusterId
- memberId
"""
self.send_terminateMember(clusterId, memberId)
return self.recv_terminateMember() | [
"def",
"terminateMember",
"(",
"self",
",",
"clusterId",
",",
"memberId",
")",
":",
"self",
".",
"send_terminateMember",
"(",
"clusterId",
",",
"memberId",
")",
"return",
"self",
".",
"recv_terminateMember",
"(",
")"
] | Parameters:
- clusterId
- memberId | [
"Parameters",
":",
"-",
"clusterId",
"-",
"memberId"
] | train | https://github.com/hazelcast/hazelcast-remote-controller/blob/41b9e7d2d722b69ff79642eb34b702c9a6087635/python-controller/hzrc/RemoteController.py#L300-L307 | 0.008696 |
ultradns/python_rest_api_client | ultra_rest_client/ultra_rest_client.py | RestApiClient.create_web_forward | def create_web_forward(self, zone_name, request_to, redirect_to, forward_type):
"""Create a web forward record.
Arguments:
zone_name -- The zone in which the web forward is to be created.
request_to -- The URL to be redirected. You may use http:// and ftp://.
forward_type -- The type of forward. Valid options include:
Framed
HTTP_301_REDIRECT
HTTP_302_REDIRECT
HTTP_303_REDIRECT
HTTP_307_REDIRECT
"""
web_forward = {"requestTo": request_to, "defaultRedirectTo": redirect_to, "defaultForwardType": forward_type}
return self.rest_api_connection.post("/v1/zones/" + zone_name + "/webforwards", json.dumps(web_forward)) | python | def create_web_forward(self, zone_name, request_to, redirect_to, forward_type):
"""Create a web forward record.
Arguments:
zone_name -- The zone in which the web forward is to be created.
request_to -- The URL to be redirected. You may use http:// and ftp://.
forward_type -- The type of forward. Valid options include:
Framed
HTTP_301_REDIRECT
HTTP_302_REDIRECT
HTTP_303_REDIRECT
HTTP_307_REDIRECT
"""
web_forward = {"requestTo": request_to, "defaultRedirectTo": redirect_to, "defaultForwardType": forward_type}
return self.rest_api_connection.post("/v1/zones/" + zone_name + "/webforwards", json.dumps(web_forward)) | [
"def",
"create_web_forward",
"(",
"self",
",",
"zone_name",
",",
"request_to",
",",
"redirect_to",
",",
"forward_type",
")",
":",
"web_forward",
"=",
"{",
"\"requestTo\"",
":",
"request_to",
",",
"\"defaultRedirectTo\"",
":",
"redirect_to",
",",
"\"defaultForwardType\"",
":",
"forward_type",
"}",
"return",
"self",
".",
"rest_api_connection",
".",
"post",
"(",
"\"/v1/zones/\"",
"+",
"zone_name",
"+",
"\"/webforwards\"",
",",
"json",
".",
"dumps",
"(",
"web_forward",
")",
")"
] | Create a web forward record.
Arguments:
zone_name -- The zone in which the web forward is to be created.
request_to -- The URL to be redirected. You may use http:// and ftp://.
forward_type -- The type of forward. Valid options include:
Framed
HTTP_301_REDIRECT
HTTP_302_REDIRECT
HTTP_303_REDIRECT
HTTP_307_REDIRECT | [
"Create",
"a",
"web",
"forward",
"record",
"."
] | train | https://github.com/ultradns/python_rest_api_client/blob/e4095f28f5cb5e258b768c06ef7cf8b1915aa5ec/ultra_rest_client/ultra_rest_client.py#L413-L428 | 0.004662 |
materialsproject/pymatgen | pymatgen/electronic_structure/boltztrap.py | BoltztrapAnalyzer.get_zt | def get_zt(self, output='eigs', doping_levels=True, relaxation_time=1e-14,
kl=1.0):
"""
Gives the ZT coefficient (S^2*cond*T/thermal cond) in either a full
3x3 tensor form,
as 3 eigenvalues, or as the average value (trace/3.0) If
doping_levels=True, the results are given at
different p and n doping levels (given by self.doping), otherwise it
is given as a series of
electron chemical potential values. We assume a constant relaxation
time and a constant
lattice thermal conductivity
Args:
output (string): the type of output. 'tensor' give the full 3x3
tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
relaxation_time (float): constant relaxation time in secs
k_l (float): lattice thermal cond in W/(m*K)
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The
'p' links to ZT
at p-type doping and 'n' to the ZT at n-type doping. Otherwise,
returns a {temp:[]} dictionary. The result contains either the
sorted three eigenvalues of the symmetric
ZT tensor (format='eigs') or a full tensor (3x3 array) (
output='tensor') or as an average
(output='average').
The result includes a given constant relaxation time and lattice
thermal conductivity
"""
result = None
result_doping = None
if doping_levels:
result_doping = {doping: {t: [] for t in
self._seebeck_doping[doping]} for
doping in self._seebeck_doping}
for doping in result_doping:
for t in result_doping[doping]:
for i in range(len(self.doping[doping])):
pf_tensor = np.dot(self._cond_doping[doping][t][i],
np.dot(
self._seebeck_doping[doping][t][
i],
self._seebeck_doping[doping][t][
i]))
thermal_conduct = (self._kappa_doping[doping][t][i]
- pf_tensor * t) * relaxation_time
result_doping[doping][t].append(
np.dot(pf_tensor * relaxation_time * t,
np.linalg.inv(
thermal_conduct + kl * np.eye(3, 3))))
else:
result = {t: [] for t in self._seebeck}
for t in result:
for i in range(len(self.mu_steps)):
pf_tensor = np.dot(self._cond[t][i],
np.dot(self._seebeck[t][i],
self._seebeck[t][i]))
thermal_conduct = (self._kappa[t][i]
- pf_tensor * t) * relaxation_time
result[t].append(np.dot(pf_tensor * relaxation_time * t,
np.linalg.inv(
thermal_conduct + kl *
np.eye(3, 3))))
return BoltztrapAnalyzer._format_to_output(result, result_doping,
output, doping_levels) | python | def get_zt(self, output='eigs', doping_levels=True, relaxation_time=1e-14,
kl=1.0):
"""
Gives the ZT coefficient (S^2*cond*T/thermal cond) in either a full
3x3 tensor form,
as 3 eigenvalues, or as the average value (trace/3.0) If
doping_levels=True, the results are given at
different p and n doping levels (given by self.doping), otherwise it
is given as a series of
electron chemical potential values. We assume a constant relaxation
time and a constant
lattice thermal conductivity
Args:
output (string): the type of output. 'tensor' give the full 3x3
tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
relaxation_time (float): constant relaxation time in secs
k_l (float): lattice thermal cond in W/(m*K)
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The
'p' links to ZT
at p-type doping and 'n' to the ZT at n-type doping. Otherwise,
returns a {temp:[]} dictionary. The result contains either the
sorted three eigenvalues of the symmetric
ZT tensor (format='eigs') or a full tensor (3x3 array) (
output='tensor') or as an average
(output='average').
The result includes a given constant relaxation time and lattice
thermal conductivity
"""
result = None
result_doping = None
if doping_levels:
result_doping = {doping: {t: [] for t in
self._seebeck_doping[doping]} for
doping in self._seebeck_doping}
for doping in result_doping:
for t in result_doping[doping]:
for i in range(len(self.doping[doping])):
pf_tensor = np.dot(self._cond_doping[doping][t][i],
np.dot(
self._seebeck_doping[doping][t][
i],
self._seebeck_doping[doping][t][
i]))
thermal_conduct = (self._kappa_doping[doping][t][i]
- pf_tensor * t) * relaxation_time
result_doping[doping][t].append(
np.dot(pf_tensor * relaxation_time * t,
np.linalg.inv(
thermal_conduct + kl * np.eye(3, 3))))
else:
result = {t: [] for t in self._seebeck}
for t in result:
for i in range(len(self.mu_steps)):
pf_tensor = np.dot(self._cond[t][i],
np.dot(self._seebeck[t][i],
self._seebeck[t][i]))
thermal_conduct = (self._kappa[t][i]
- pf_tensor * t) * relaxation_time
result[t].append(np.dot(pf_tensor * relaxation_time * t,
np.linalg.inv(
thermal_conduct + kl *
np.eye(3, 3))))
return BoltztrapAnalyzer._format_to_output(result, result_doping,
output, doping_levels) | [
"def",
"get_zt",
"(",
"self",
",",
"output",
"=",
"'eigs'",
",",
"doping_levels",
"=",
"True",
",",
"relaxation_time",
"=",
"1e-14",
",",
"kl",
"=",
"1.0",
")",
":",
"result",
"=",
"None",
"result_doping",
"=",
"None",
"if",
"doping_levels",
":",
"result_doping",
"=",
"{",
"doping",
":",
"{",
"t",
":",
"[",
"]",
"for",
"t",
"in",
"self",
".",
"_seebeck_doping",
"[",
"doping",
"]",
"}",
"for",
"doping",
"in",
"self",
".",
"_seebeck_doping",
"}",
"for",
"doping",
"in",
"result_doping",
":",
"for",
"t",
"in",
"result_doping",
"[",
"doping",
"]",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"doping",
"[",
"doping",
"]",
")",
")",
":",
"pf_tensor",
"=",
"np",
".",
"dot",
"(",
"self",
".",
"_cond_doping",
"[",
"doping",
"]",
"[",
"t",
"]",
"[",
"i",
"]",
",",
"np",
".",
"dot",
"(",
"self",
".",
"_seebeck_doping",
"[",
"doping",
"]",
"[",
"t",
"]",
"[",
"i",
"]",
",",
"self",
".",
"_seebeck_doping",
"[",
"doping",
"]",
"[",
"t",
"]",
"[",
"i",
"]",
")",
")",
"thermal_conduct",
"=",
"(",
"self",
".",
"_kappa_doping",
"[",
"doping",
"]",
"[",
"t",
"]",
"[",
"i",
"]",
"-",
"pf_tensor",
"*",
"t",
")",
"*",
"relaxation_time",
"result_doping",
"[",
"doping",
"]",
"[",
"t",
"]",
".",
"append",
"(",
"np",
".",
"dot",
"(",
"pf_tensor",
"*",
"relaxation_time",
"*",
"t",
",",
"np",
".",
"linalg",
".",
"inv",
"(",
"thermal_conduct",
"+",
"kl",
"*",
"np",
".",
"eye",
"(",
"3",
",",
"3",
")",
")",
")",
")",
"else",
":",
"result",
"=",
"{",
"t",
":",
"[",
"]",
"for",
"t",
"in",
"self",
".",
"_seebeck",
"}",
"for",
"t",
"in",
"result",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"mu_steps",
")",
")",
":",
"pf_tensor",
"=",
"np",
".",
"dot",
"(",
"self",
".",
"_cond",
"[",
"t",
"]",
"[",
"i",
"]",
",",
"np",
".",
"dot",
"(",
"self",
".",
"_seebeck",
"[",
"t",
"]",
"[",
"i",
"]",
",",
"self",
".",
"_seebeck",
"[",
"t",
"]",
"[",
"i",
"]",
")",
")",
"thermal_conduct",
"=",
"(",
"self",
".",
"_kappa",
"[",
"t",
"]",
"[",
"i",
"]",
"-",
"pf_tensor",
"*",
"t",
")",
"*",
"relaxation_time",
"result",
"[",
"t",
"]",
".",
"append",
"(",
"np",
".",
"dot",
"(",
"pf_tensor",
"*",
"relaxation_time",
"*",
"t",
",",
"np",
".",
"linalg",
".",
"inv",
"(",
"thermal_conduct",
"+",
"kl",
"*",
"np",
".",
"eye",
"(",
"3",
",",
"3",
")",
")",
")",
")",
"return",
"BoltztrapAnalyzer",
".",
"_format_to_output",
"(",
"result",
",",
"result_doping",
",",
"output",
",",
"doping_levels",
")"
] | Gives the ZT coefficient (S^2*cond*T/thermal cond) in either a full
3x3 tensor form,
as 3 eigenvalues, or as the average value (trace/3.0) If
doping_levels=True, the results are given at
different p and n doping levels (given by self.doping), otherwise it
is given as a series of
electron chemical potential values. We assume a constant relaxation
time and a constant
lattice thermal conductivity
Args:
output (string): the type of output. 'tensor' give the full 3x3
tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
relaxation_time (float): constant relaxation time in secs
k_l (float): lattice thermal cond in W/(m*K)
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The
'p' links to ZT
at p-type doping and 'n' to the ZT at n-type doping. Otherwise,
returns a {temp:[]} dictionary. The result contains either the
sorted three eigenvalues of the symmetric
ZT tensor (format='eigs') or a full tensor (3x3 array) (
output='tensor') or as an average
(output='average').
The result includes a given constant relaxation time and lattice
thermal conductivity | [
"Gives",
"the",
"ZT",
"coefficient",
"(",
"S^2",
"*",
"cond",
"*",
"T",
"/",
"thermal",
"cond",
")",
"in",
"either",
"a",
"full",
"3x3",
"tensor",
"form",
"as",
"3",
"eigenvalues",
"or",
"as",
"the",
"average",
"value",
"(",
"trace",
"/",
"3",
".",
"0",
")",
"If",
"doping_levels",
"=",
"True",
"the",
"results",
"are",
"given",
"at",
"different",
"p",
"and",
"n",
"doping",
"levels",
"(",
"given",
"by",
"self",
".",
"doping",
")",
"otherwise",
"it",
"is",
"given",
"as",
"a",
"series",
"of",
"electron",
"chemical",
"potential",
"values",
".",
"We",
"assume",
"a",
"constant",
"relaxation",
"time",
"and",
"a",
"constant",
"lattice",
"thermal",
"conductivity"
] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/electronic_structure/boltztrap.py#L1198-L1270 | 0.000791 |
orbingol/NURBS-Python | geomdl/linalg.py | vector_angle_between | def vector_angle_between(vector1, vector2, **kwargs):
""" Computes the angle between the two input vectors.
If the keyword argument ``degrees`` is set to *True*, then the angle will be in degrees. Otherwise, it will be
in radians. By default, ``degrees`` is set to *True*.
:param vector1: vector
:type vector1: list, tuple
:param vector2: vector
:type vector2: list, tuple
:return: angle between the vectors
:rtype: float
"""
degrees = kwargs.get('degrees', True)
magn1 = vector_magnitude(vector1)
magn2 = vector_magnitude(vector2)
acos_val = vector_dot(vector1, vector2) / (magn1 * magn2)
angle_radians = math.acos(acos_val)
if degrees:
return math.degrees(angle_radians)
else:
return angle_radians | python | def vector_angle_between(vector1, vector2, **kwargs):
""" Computes the angle between the two input vectors.
If the keyword argument ``degrees`` is set to *True*, then the angle will be in degrees. Otherwise, it will be
in radians. By default, ``degrees`` is set to *True*.
:param vector1: vector
:type vector1: list, tuple
:param vector2: vector
:type vector2: list, tuple
:return: angle between the vectors
:rtype: float
"""
degrees = kwargs.get('degrees', True)
magn1 = vector_magnitude(vector1)
magn2 = vector_magnitude(vector2)
acos_val = vector_dot(vector1, vector2) / (magn1 * magn2)
angle_radians = math.acos(acos_val)
if degrees:
return math.degrees(angle_radians)
else:
return angle_radians | [
"def",
"vector_angle_between",
"(",
"vector1",
",",
"vector2",
",",
"*",
"*",
"kwargs",
")",
":",
"degrees",
"=",
"kwargs",
".",
"get",
"(",
"'degrees'",
",",
"True",
")",
"magn1",
"=",
"vector_magnitude",
"(",
"vector1",
")",
"magn2",
"=",
"vector_magnitude",
"(",
"vector2",
")",
"acos_val",
"=",
"vector_dot",
"(",
"vector1",
",",
"vector2",
")",
"/",
"(",
"magn1",
"*",
"magn2",
")",
"angle_radians",
"=",
"math",
".",
"acos",
"(",
"acos_val",
")",
"if",
"degrees",
":",
"return",
"math",
".",
"degrees",
"(",
"angle_radians",
")",
"else",
":",
"return",
"angle_radians"
] | Computes the angle between the two input vectors.
If the keyword argument ``degrees`` is set to *True*, then the angle will be in degrees. Otherwise, it will be
in radians. By default, ``degrees`` is set to *True*.
:param vector1: vector
:type vector1: list, tuple
:param vector2: vector
:type vector2: list, tuple
:return: angle between the vectors
:rtype: float | [
"Computes",
"the",
"angle",
"between",
"the",
"two",
"input",
"vectors",
"."
] | train | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L237-L258 | 0.002548 |
lvh/txeasymail | txeasymail/mime.py | _encodeHeader | def _encodeHeader(headerValue):
"""
Encodes a header value.
Returns ASCII if possible, else returns an UTF-8 encoded e-mail header.
"""
try:
return headerValue.encode('ascii', 'strict')
except UnicodeError:
encoded = headerValue.encode("utf-8")
return header.Header(encoded, "UTF-8").encode() | python | def _encodeHeader(headerValue):
"""
Encodes a header value.
Returns ASCII if possible, else returns an UTF-8 encoded e-mail header.
"""
try:
return headerValue.encode('ascii', 'strict')
except UnicodeError:
encoded = headerValue.encode("utf-8")
return header.Header(encoded, "UTF-8").encode() | [
"def",
"_encodeHeader",
"(",
"headerValue",
")",
":",
"try",
":",
"return",
"headerValue",
".",
"encode",
"(",
"'ascii'",
",",
"'strict'",
")",
"except",
"UnicodeError",
":",
"encoded",
"=",
"headerValue",
".",
"encode",
"(",
"\"utf-8\"",
")",
"return",
"header",
".",
"Header",
"(",
"encoded",
",",
"\"UTF-8\"",
")",
".",
"encode",
"(",
")"
] | Encodes a header value.
Returns ASCII if possible, else returns an UTF-8 encoded e-mail header. | [
"Encodes",
"a",
"header",
"value",
"."
] | train | https://github.com/lvh/txeasymail/blob/7b845a5238b1371824854468646d54653a426f09/txeasymail/mime.py#L37-L47 | 0.002933 |
lreis2415/PyGeoC | pygeoc/utils.py | MathClass.rsr | def rsr(obsvalues, # type: Union[numpy.ndarray, List[Union[float, int]]]
simvalues # type: Union[numpy.ndarray, List[Union[float, int]]]
):
# type: (...) -> Union[float, numpy.ScalarType]
"""Calculate RSR (RMSE-to-SD Ratio).
Programmed according to equation (3) in
Moriasi et al. 2007. Model evalutaion guidelines for systematic quantification of accuracy
in watershed simulations. Transactions of the ASABE 50(3): 885-900.
Args:
obsvalues: observe values array
simvalues: simulate values array
Examples:
>>> obs = [2.92, 2.75, 2.01, 1.09, 2.87, 1.43, 1.96,\
4.00, 2.24, 29.28, 5.88, 0.86, 13.21]
>>> sim = [2.90, 2.87, 2.85, 2.83, 3.04, 2.81, 2.85,\
2.78, 2.76, 13.40, 2.70, 2.09, 1.62]
>>> MathClass.rsr(obs, sim) # doctest: +ELLIPSIS
0.7404026155824978...
Returns:
RSR value, or raise exception
"""
if len(obsvalues) != len(simvalues):
raise ValueError("The size of observed and simulated values must be"
" the same for RSR calculation!")
mean_obs = sum(obsvalues) / len(obsvalues)
return sqrt(sum(map(lambda x, y: (x - y) ** 2, obsvalues, simvalues))) / \
sqrt(sum(map(lambda x, y: (x - y) ** 2, obsvalues, [mean_obs] * len(obsvalues)))) | python | def rsr(obsvalues, # type: Union[numpy.ndarray, List[Union[float, int]]]
simvalues # type: Union[numpy.ndarray, List[Union[float, int]]]
):
# type: (...) -> Union[float, numpy.ScalarType]
"""Calculate RSR (RMSE-to-SD Ratio).
Programmed according to equation (3) in
Moriasi et al. 2007. Model evalutaion guidelines for systematic quantification of accuracy
in watershed simulations. Transactions of the ASABE 50(3): 885-900.
Args:
obsvalues: observe values array
simvalues: simulate values array
Examples:
>>> obs = [2.92, 2.75, 2.01, 1.09, 2.87, 1.43, 1.96,\
4.00, 2.24, 29.28, 5.88, 0.86, 13.21]
>>> sim = [2.90, 2.87, 2.85, 2.83, 3.04, 2.81, 2.85,\
2.78, 2.76, 13.40, 2.70, 2.09, 1.62]
>>> MathClass.rsr(obs, sim) # doctest: +ELLIPSIS
0.7404026155824978...
Returns:
RSR value, or raise exception
"""
if len(obsvalues) != len(simvalues):
raise ValueError("The size of observed and simulated values must be"
" the same for RSR calculation!")
mean_obs = sum(obsvalues) / len(obsvalues)
return sqrt(sum(map(lambda x, y: (x - y) ** 2, obsvalues, simvalues))) / \
sqrt(sum(map(lambda x, y: (x - y) ** 2, obsvalues, [mean_obs] * len(obsvalues)))) | [
"def",
"rsr",
"(",
"obsvalues",
",",
"# type: Union[numpy.ndarray, List[Union[float, int]]]",
"simvalues",
"# type: Union[numpy.ndarray, List[Union[float, int]]]",
")",
":",
"# type: (...) -> Union[float, numpy.ScalarType]",
"if",
"len",
"(",
"obsvalues",
")",
"!=",
"len",
"(",
"simvalues",
")",
":",
"raise",
"ValueError",
"(",
"\"The size of observed and simulated values must be\"",
"\" the same for RSR calculation!\"",
")",
"mean_obs",
"=",
"sum",
"(",
"obsvalues",
")",
"/",
"len",
"(",
"obsvalues",
")",
"return",
"sqrt",
"(",
"sum",
"(",
"map",
"(",
"lambda",
"x",
",",
"y",
":",
"(",
"x",
"-",
"y",
")",
"**",
"2",
",",
"obsvalues",
",",
"simvalues",
")",
")",
")",
"/",
"sqrt",
"(",
"sum",
"(",
"map",
"(",
"lambda",
"x",
",",
"y",
":",
"(",
"x",
"-",
"y",
")",
"**",
"2",
",",
"obsvalues",
",",
"[",
"mean_obs",
"]",
"*",
"len",
"(",
"obsvalues",
")",
")",
")",
")"
] | Calculate RSR (RMSE-to-SD Ratio).
Programmed according to equation (3) in
Moriasi et al. 2007. Model evalutaion guidelines for systematic quantification of accuracy
in watershed simulations. Transactions of the ASABE 50(3): 885-900.
Args:
obsvalues: observe values array
simvalues: simulate values array
Examples:
>>> obs = [2.92, 2.75, 2.01, 1.09, 2.87, 1.43, 1.96,\
4.00, 2.24, 29.28, 5.88, 0.86, 13.21]
>>> sim = [2.90, 2.87, 2.85, 2.83, 3.04, 2.81, 2.85,\
2.78, 2.76, 13.40, 2.70, 2.09, 1.62]
>>> MathClass.rsr(obs, sim) # doctest: +ELLIPSIS
0.7404026155824978...
Returns:
RSR value, or raise exception | [
"Calculate",
"RSR",
"(",
"RMSE",
"-",
"to",
"-",
"SD",
"Ratio",
")",
"."
] | train | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L345-L375 | 0.007555 |
aouyar/PyMunin | pysysinfo/freeswitch.py | FSinfo._execShowCountCmd | def _execShowCountCmd(self, showcmd):
"""Execute 'show' command and return result dictionary.
@param cmd: Command string.
@return: Result dictionary.
"""
result = None
lines = self._execCmd("show", showcmd + " count")
for line in lines:
mobj = re.match('\s*(\d+)\s+total', line)
if mobj:
return int(mobj.group(1))
return result | python | def _execShowCountCmd(self, showcmd):
"""Execute 'show' command and return result dictionary.
@param cmd: Command string.
@return: Result dictionary.
"""
result = None
lines = self._execCmd("show", showcmd + " count")
for line in lines:
mobj = re.match('\s*(\d+)\s+total', line)
if mobj:
return int(mobj.group(1))
return result | [
"def",
"_execShowCountCmd",
"(",
"self",
",",
"showcmd",
")",
":",
"result",
"=",
"None",
"lines",
"=",
"self",
".",
"_execCmd",
"(",
"\"show\"",
",",
"showcmd",
"+",
"\" count\"",
")",
"for",
"line",
"in",
"lines",
":",
"mobj",
"=",
"re",
".",
"match",
"(",
"'\\s*(\\d+)\\s+total'",
",",
"line",
")",
"if",
"mobj",
":",
"return",
"int",
"(",
"mobj",
".",
"group",
"(",
"1",
")",
")",
"return",
"result"
] | Execute 'show' command and return result dictionary.
@param cmd: Command string.
@return: Result dictionary. | [
"Execute",
"show",
"command",
"and",
"return",
"result",
"dictionary",
"."
] | train | https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/freeswitch.py#L109-L122 | 0.015184 |
vertexproject/synapse | synapse/lib/jupyter.py | getTempCoreProx | async def getTempCoreProx(mods=None):
'''
Get a Telepath Proxt to a Cortex instance which is backed by a temporary Cortex.
Args:
mods (list): A list of additional CoreModules to load in the Cortex.
Notes:
The Proxy returned by this should be fini()'d to tear down the temporary Cortex.
Returns:
s_telepath.Proxy
'''
acm = genTempCoreProxy(mods)
prox = await acm.__aenter__()
# Use object.__setattr__ to hulk smash and avoid proxy getattr magick
object.__setattr__(prox, '_acm', acm)
async def onfini():
await prox._acm.__aexit__(None, None, None)
prox.onfini(onfini)
return prox | python | async def getTempCoreProx(mods=None):
'''
Get a Telepath Proxt to a Cortex instance which is backed by a temporary Cortex.
Args:
mods (list): A list of additional CoreModules to load in the Cortex.
Notes:
The Proxy returned by this should be fini()'d to tear down the temporary Cortex.
Returns:
s_telepath.Proxy
'''
acm = genTempCoreProxy(mods)
prox = await acm.__aenter__()
# Use object.__setattr__ to hulk smash and avoid proxy getattr magick
object.__setattr__(prox, '_acm', acm)
async def onfini():
await prox._acm.__aexit__(None, None, None)
prox.onfini(onfini)
return prox | [
"async",
"def",
"getTempCoreProx",
"(",
"mods",
"=",
"None",
")",
":",
"acm",
"=",
"genTempCoreProxy",
"(",
"mods",
")",
"prox",
"=",
"await",
"acm",
".",
"__aenter__",
"(",
")",
"# Use object.__setattr__ to hulk smash and avoid proxy getattr magick",
"object",
".",
"__setattr__",
"(",
"prox",
",",
"'_acm'",
",",
"acm",
")",
"async",
"def",
"onfini",
"(",
")",
":",
"await",
"prox",
".",
"_acm",
".",
"__aexit__",
"(",
"None",
",",
"None",
",",
"None",
")",
"prox",
".",
"onfini",
"(",
"onfini",
")",
"return",
"prox"
] | Get a Telepath Proxt to a Cortex instance which is backed by a temporary Cortex.
Args:
mods (list): A list of additional CoreModules to load in the Cortex.
Notes:
The Proxy returned by this should be fini()'d to tear down the temporary Cortex.
Returns:
s_telepath.Proxy | [
"Get",
"a",
"Telepath",
"Proxt",
"to",
"a",
"Cortex",
"instance",
"which",
"is",
"backed",
"by",
"a",
"temporary",
"Cortex",
"."
] | train | https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/jupyter.py#L219-L239 | 0.006015 |
apple/turicreate | src/external/xgboost/python-package/xgboost/core.py | DMatrix._init_from_csc | def _init_from_csc(self, csc):
"""
Initialize data from a CSC matrix.
"""
if len(csc.indices) != len(csc.data):
raise ValueError('length mismatch: {} vs {}'.format(len(csc.indices), len(csc.data)))
self.handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixCreateFromCSC(c_array(ctypes.c_ulong, csc.indptr),
c_array(ctypes.c_uint, csc.indices),
c_array(ctypes.c_float, csc.data),
len(csc.indptr), len(csc.data),
ctypes.byref(self.handle))) | python | def _init_from_csc(self, csc):
"""
Initialize data from a CSC matrix.
"""
if len(csc.indices) != len(csc.data):
raise ValueError('length mismatch: {} vs {}'.format(len(csc.indices), len(csc.data)))
self.handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixCreateFromCSC(c_array(ctypes.c_ulong, csc.indptr),
c_array(ctypes.c_uint, csc.indices),
c_array(ctypes.c_float, csc.data),
len(csc.indptr), len(csc.data),
ctypes.byref(self.handle))) | [
"def",
"_init_from_csc",
"(",
"self",
",",
"csc",
")",
":",
"if",
"len",
"(",
"csc",
".",
"indices",
")",
"!=",
"len",
"(",
"csc",
".",
"data",
")",
":",
"raise",
"ValueError",
"(",
"'length mismatch: {} vs {}'",
".",
"format",
"(",
"len",
"(",
"csc",
".",
"indices",
")",
",",
"len",
"(",
"csc",
".",
"data",
")",
")",
")",
"self",
".",
"handle",
"=",
"ctypes",
".",
"c_void_p",
"(",
")",
"_check_call",
"(",
"_LIB",
".",
"XGDMatrixCreateFromCSC",
"(",
"c_array",
"(",
"ctypes",
".",
"c_ulong",
",",
"csc",
".",
"indptr",
")",
",",
"c_array",
"(",
"ctypes",
".",
"c_uint",
",",
"csc",
".",
"indices",
")",
",",
"c_array",
"(",
"ctypes",
".",
"c_float",
",",
"csc",
".",
"data",
")",
",",
"len",
"(",
"csc",
".",
"indptr",
")",
",",
"len",
"(",
"csc",
".",
"data",
")",
",",
"ctypes",
".",
"byref",
"(",
"self",
".",
"handle",
")",
")",
")"
] | Initialize data from a CSC matrix. | [
"Initialize",
"data",
"from",
"a",
"CSC",
"matrix",
"."
] | train | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/core.py#L248-L259 | 0.008696 |
dmwm/DBS | Server/Python/src/dbs/business/DBSFile.py | DBSFile.listFileParents | def listFileParents(self, logical_file_name="", block_id=0, block_name=""):
"""
required parameter: logical_file_name or block_name
returns: this_logical_file_name, parent_logical_file_name, parent_file_id
"""
#self.logger.debug("lfn %s, block_name %s, block_id :%s" % (logical_file_name, block_name, block_id))
if not logical_file_name and not block_name and not block_id:
dbsExceptionHandler('dbsException-invalid-input', \
"Logical_file_name, block_id or block_name is required for fileparents api", self.logger.exception )
with self.dbi.connection() as conn:
sqlresult = self.fileparentlist.execute(conn, logical_file_name, block_id, block_name)
d = {}
#self.logger.debug(sqlresult)
for i in sqlresult:
k = i['this_logical_file_name']
v = i['parent_logical_file_name']
d.setdefault(k, []).append(v)
for k, v in d.iteritems():
yield {'logical_file_name':k, 'parent_logical_file_name': v}
del d | python | def listFileParents(self, logical_file_name="", block_id=0, block_name=""):
"""
required parameter: logical_file_name or block_name
returns: this_logical_file_name, parent_logical_file_name, parent_file_id
"""
#self.logger.debug("lfn %s, block_name %s, block_id :%s" % (logical_file_name, block_name, block_id))
if not logical_file_name and not block_name and not block_id:
dbsExceptionHandler('dbsException-invalid-input', \
"Logical_file_name, block_id or block_name is required for fileparents api", self.logger.exception )
with self.dbi.connection() as conn:
sqlresult = self.fileparentlist.execute(conn, logical_file_name, block_id, block_name)
d = {}
#self.logger.debug(sqlresult)
for i in sqlresult:
k = i['this_logical_file_name']
v = i['parent_logical_file_name']
d.setdefault(k, []).append(v)
for k, v in d.iteritems():
yield {'logical_file_name':k, 'parent_logical_file_name': v}
del d | [
"def",
"listFileParents",
"(",
"self",
",",
"logical_file_name",
"=",
"\"\"",
",",
"block_id",
"=",
"0",
",",
"block_name",
"=",
"\"\"",
")",
":",
"#self.logger.debug(\"lfn %s, block_name %s, block_id :%s\" % (logical_file_name, block_name, block_id))",
"if",
"not",
"logical_file_name",
"and",
"not",
"block_name",
"and",
"not",
"block_id",
":",
"dbsExceptionHandler",
"(",
"'dbsException-invalid-input'",
",",
"\"Logical_file_name, block_id or block_name is required for fileparents api\"",
",",
"self",
".",
"logger",
".",
"exception",
")",
"with",
"self",
".",
"dbi",
".",
"connection",
"(",
")",
"as",
"conn",
":",
"sqlresult",
"=",
"self",
".",
"fileparentlist",
".",
"execute",
"(",
"conn",
",",
"logical_file_name",
",",
"block_id",
",",
"block_name",
")",
"d",
"=",
"{",
"}",
"#self.logger.debug(sqlresult)",
"for",
"i",
"in",
"sqlresult",
":",
"k",
"=",
"i",
"[",
"'this_logical_file_name'",
"]",
"v",
"=",
"i",
"[",
"'parent_logical_file_name'",
"]",
"d",
".",
"setdefault",
"(",
"k",
",",
"[",
"]",
")",
".",
"append",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"d",
".",
"iteritems",
"(",
")",
":",
"yield",
"{",
"'logical_file_name'",
":",
"k",
",",
"'parent_logical_file_name'",
":",
"v",
"}",
"del",
"d"
] | required parameter: logical_file_name or block_name
returns: this_logical_file_name, parent_logical_file_name, parent_file_id | [
"required",
"parameter",
":",
"logical_file_name",
"or",
"block_name",
"returns",
":",
"this_logical_file_name",
"parent_logical_file_name",
"parent_file_id"
] | train | https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/business/DBSFile.py#L97-L116 | 0.010753 |
honzamach/pynspect | pynspect/compilers.py | IDEAFilterCompiler._compile_operation_rule | def _compile_operation_rule(self, rule, left, right, result_class):
"""
Compile given operation rule, when possible for given compination of
operation operands.
"""
# Make sure variables always have constant with correct datatype on the
# opposite side of operation.
if isinstance(left, VariableRule) and isinstance(right, (ConstantRule, ListRule)):
return self._cor_compile(
rule,
left,
right,
result_class,
clean_variable(left.value),
self.compilations_variable
)
if isinstance(right, VariableRule) and isinstance(left, (ConstantRule, ListRule)):
return self._cor_compile(
rule,
right,
left,
result_class,
clean_variable(right.value),
self.compilations_variable
)
# Make sure functions always have constant with correct datatype on the
# opposite side of operation.
if isinstance(left, FunctionRule) and isinstance(right, (ConstantRule, ListRule)):
return self._cor_compile(
rule,
left,
right,
result_class,
left.function,
self.compilations_function
)
if isinstance(right, FunctionRule) and isinstance(left, (ConstantRule, ListRule)):
return self._cor_compile(
rule,
right,
left,
result_class,
right.function,
self.compilations_function
)
# In all other cases just keep things the way they are.
return result_class(rule.operation, left, right) | python | def _compile_operation_rule(self, rule, left, right, result_class):
"""
Compile given operation rule, when possible for given compination of
operation operands.
"""
# Make sure variables always have constant with correct datatype on the
# opposite side of operation.
if isinstance(left, VariableRule) and isinstance(right, (ConstantRule, ListRule)):
return self._cor_compile(
rule,
left,
right,
result_class,
clean_variable(left.value),
self.compilations_variable
)
if isinstance(right, VariableRule) and isinstance(left, (ConstantRule, ListRule)):
return self._cor_compile(
rule,
right,
left,
result_class,
clean_variable(right.value),
self.compilations_variable
)
# Make sure functions always have constant with correct datatype on the
# opposite side of operation.
if isinstance(left, FunctionRule) and isinstance(right, (ConstantRule, ListRule)):
return self._cor_compile(
rule,
left,
right,
result_class,
left.function,
self.compilations_function
)
if isinstance(right, FunctionRule) and isinstance(left, (ConstantRule, ListRule)):
return self._cor_compile(
rule,
right,
left,
result_class,
right.function,
self.compilations_function
)
# In all other cases just keep things the way they are.
return result_class(rule.operation, left, right) | [
"def",
"_compile_operation_rule",
"(",
"self",
",",
"rule",
",",
"left",
",",
"right",
",",
"result_class",
")",
":",
"# Make sure variables always have constant with correct datatype on the",
"# opposite side of operation.",
"if",
"isinstance",
"(",
"left",
",",
"VariableRule",
")",
"and",
"isinstance",
"(",
"right",
",",
"(",
"ConstantRule",
",",
"ListRule",
")",
")",
":",
"return",
"self",
".",
"_cor_compile",
"(",
"rule",
",",
"left",
",",
"right",
",",
"result_class",
",",
"clean_variable",
"(",
"left",
".",
"value",
")",
",",
"self",
".",
"compilations_variable",
")",
"if",
"isinstance",
"(",
"right",
",",
"VariableRule",
")",
"and",
"isinstance",
"(",
"left",
",",
"(",
"ConstantRule",
",",
"ListRule",
")",
")",
":",
"return",
"self",
".",
"_cor_compile",
"(",
"rule",
",",
"right",
",",
"left",
",",
"result_class",
",",
"clean_variable",
"(",
"right",
".",
"value",
")",
",",
"self",
".",
"compilations_variable",
")",
"# Make sure functions always have constant with correct datatype on the",
"# opposite side of operation.",
"if",
"isinstance",
"(",
"left",
",",
"FunctionRule",
")",
"and",
"isinstance",
"(",
"right",
",",
"(",
"ConstantRule",
",",
"ListRule",
")",
")",
":",
"return",
"self",
".",
"_cor_compile",
"(",
"rule",
",",
"left",
",",
"right",
",",
"result_class",
",",
"left",
".",
"function",
",",
"self",
".",
"compilations_function",
")",
"if",
"isinstance",
"(",
"right",
",",
"FunctionRule",
")",
"and",
"isinstance",
"(",
"left",
",",
"(",
"ConstantRule",
",",
"ListRule",
")",
")",
":",
"return",
"self",
".",
"_cor_compile",
"(",
"rule",
",",
"right",
",",
"left",
",",
"result_class",
",",
"right",
".",
"function",
",",
"self",
".",
"compilations_function",
")",
"# In all other cases just keep things the way they are.",
"return",
"result_class",
"(",
"rule",
".",
"operation",
",",
"left",
",",
"right",
")"
] | Compile given operation rule, when possible for given compination of
operation operands. | [
"Compile",
"given",
"operation",
"rule",
"when",
"possible",
"for",
"given",
"compination",
"of",
"operation",
"operands",
"."
] | train | https://github.com/honzamach/pynspect/blob/0582dcc1f7aafe50e25a21c792ea1b3367ea5881/pynspect/compilers.py#L293-L342 | 0.003261 |
hfaran/progressive | progressive/bar.py | Bar.max_width | def max_width(self):
"""Get maximum width of progress bar
:rtype: int
:returns: Maximum column width of progress bar
"""
value, unit = float(self._width_str[:-1]), self._width_str[-1]
ensure(unit in ["c", "%"], ValueError,
"Width unit must be either 'c' or '%'")
if unit == "c":
ensure(value <= self.columns, ValueError,
"Terminal only has {} columns, cannot draw "
"bar of size {}.".format(self.columns, value))
retval = value
else: # unit == "%"
ensure(0 < value <= 100, ValueError,
"value=={} does not satisfy 0 < value <= 100".format(value))
dec = value / 100
retval = dec * self.columns
return floor(retval) | python | def max_width(self):
"""Get maximum width of progress bar
:rtype: int
:returns: Maximum column width of progress bar
"""
value, unit = float(self._width_str[:-1]), self._width_str[-1]
ensure(unit in ["c", "%"], ValueError,
"Width unit must be either 'c' or '%'")
if unit == "c":
ensure(value <= self.columns, ValueError,
"Terminal only has {} columns, cannot draw "
"bar of size {}.".format(self.columns, value))
retval = value
else: # unit == "%"
ensure(0 < value <= 100, ValueError,
"value=={} does not satisfy 0 < value <= 100".format(value))
dec = value / 100
retval = dec * self.columns
return floor(retval) | [
"def",
"max_width",
"(",
"self",
")",
":",
"value",
",",
"unit",
"=",
"float",
"(",
"self",
".",
"_width_str",
"[",
":",
"-",
"1",
"]",
")",
",",
"self",
".",
"_width_str",
"[",
"-",
"1",
"]",
"ensure",
"(",
"unit",
"in",
"[",
"\"c\"",
",",
"\"%\"",
"]",
",",
"ValueError",
",",
"\"Width unit must be either 'c' or '%'\"",
")",
"if",
"unit",
"==",
"\"c\"",
":",
"ensure",
"(",
"value",
"<=",
"self",
".",
"columns",
",",
"ValueError",
",",
"\"Terminal only has {} columns, cannot draw \"",
"\"bar of size {}.\"",
".",
"format",
"(",
"self",
".",
"columns",
",",
"value",
")",
")",
"retval",
"=",
"value",
"else",
":",
"# unit == \"%\"",
"ensure",
"(",
"0",
"<",
"value",
"<=",
"100",
",",
"ValueError",
",",
"\"value=={} does not satisfy 0 < value <= 100\"",
".",
"format",
"(",
"value",
")",
")",
"dec",
"=",
"value",
"/",
"100",
"retval",
"=",
"dec",
"*",
"self",
".",
"columns",
"return",
"floor",
"(",
"retval",
")"
] | Get maximum width of progress bar
:rtype: int
:returns: Maximum column width of progress bar | [
"Get",
"maximum",
"width",
"of",
"progress",
"bar"
] | train | https://github.com/hfaran/progressive/blob/e39c7fb17405dbe997c3417a5993b94ef16dab0a/progressive/bar.py#L144-L166 | 0.002436 |
tethysplatform/condorpy | condorpy/job.py | Job._update_status | def _update_status(self, sub_job_num=None):
"""Gets the job status.
Return:
str: The current status of the job
"""
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
format = ['-format', '"%d"', 'JobStatus']
cmd = 'condor_q {0} {1} && condor_history {0} {1}'.format(job_id, ' '.join(format))
args = [cmd]
out, err = self._execute(args, shell=True, run_in_job_dir=False)
if err:
log.error('Error while updating status for job %s: %s', job_id, err)
raise HTCondorError(err)
if not out:
log.error('Error while updating status for job %s: Job not found.', job_id)
raise HTCondorError('Job not found.')
out = out.replace('\"', '')
log.info('Job %s status: %s', job_id, out)
if not sub_job_num:
if len(out) >= self.num_jobs:
out = out[:self.num_jobs]
else:
msg = 'There are {0} sub-jobs, but {1} status(es).'.format(self.num_jobs, len(out))
log.error(msg)
raise HTCondorError(msg)
#initialize status dictionary
status_dict = dict()
for val in CONDOR_JOB_STATUSES.values():
status_dict[val] = 0
for status_code_str in out:
status_code = 0
try:
status_code = int(status_code_str)
except ValueError:
pass
key = CONDOR_JOB_STATUSES[status_code]
status_dict[key] += 1
return status_dict | python | def _update_status(self, sub_job_num=None):
"""Gets the job status.
Return:
str: The current status of the job
"""
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
format = ['-format', '"%d"', 'JobStatus']
cmd = 'condor_q {0} {1} && condor_history {0} {1}'.format(job_id, ' '.join(format))
args = [cmd]
out, err = self._execute(args, shell=True, run_in_job_dir=False)
if err:
log.error('Error while updating status for job %s: %s', job_id, err)
raise HTCondorError(err)
if not out:
log.error('Error while updating status for job %s: Job not found.', job_id)
raise HTCondorError('Job not found.')
out = out.replace('\"', '')
log.info('Job %s status: %s', job_id, out)
if not sub_job_num:
if len(out) >= self.num_jobs:
out = out[:self.num_jobs]
else:
msg = 'There are {0} sub-jobs, but {1} status(es).'.format(self.num_jobs, len(out))
log.error(msg)
raise HTCondorError(msg)
#initialize status dictionary
status_dict = dict()
for val in CONDOR_JOB_STATUSES.values():
status_dict[val] = 0
for status_code_str in out:
status_code = 0
try:
status_code = int(status_code_str)
except ValueError:
pass
key = CONDOR_JOB_STATUSES[status_code]
status_dict[key] += 1
return status_dict | [
"def",
"_update_status",
"(",
"self",
",",
"sub_job_num",
"=",
"None",
")",
":",
"job_id",
"=",
"'%s.%s'",
"%",
"(",
"self",
".",
"cluster_id",
",",
"sub_job_num",
")",
"if",
"sub_job_num",
"else",
"str",
"(",
"self",
".",
"cluster_id",
")",
"format",
"=",
"[",
"'-format'",
",",
"'\"%d\"'",
",",
"'JobStatus'",
"]",
"cmd",
"=",
"'condor_q {0} {1} && condor_history {0} {1}'",
".",
"format",
"(",
"job_id",
",",
"' '",
".",
"join",
"(",
"format",
")",
")",
"args",
"=",
"[",
"cmd",
"]",
"out",
",",
"err",
"=",
"self",
".",
"_execute",
"(",
"args",
",",
"shell",
"=",
"True",
",",
"run_in_job_dir",
"=",
"False",
")",
"if",
"err",
":",
"log",
".",
"error",
"(",
"'Error while updating status for job %s: %s'",
",",
"job_id",
",",
"err",
")",
"raise",
"HTCondorError",
"(",
"err",
")",
"if",
"not",
"out",
":",
"log",
".",
"error",
"(",
"'Error while updating status for job %s: Job not found.'",
",",
"job_id",
")",
"raise",
"HTCondorError",
"(",
"'Job not found.'",
")",
"out",
"=",
"out",
".",
"replace",
"(",
"'\\\"'",
",",
"''",
")",
"log",
".",
"info",
"(",
"'Job %s status: %s'",
",",
"job_id",
",",
"out",
")",
"if",
"not",
"sub_job_num",
":",
"if",
"len",
"(",
"out",
")",
">=",
"self",
".",
"num_jobs",
":",
"out",
"=",
"out",
"[",
":",
"self",
".",
"num_jobs",
"]",
"else",
":",
"msg",
"=",
"'There are {0} sub-jobs, but {1} status(es).'",
".",
"format",
"(",
"self",
".",
"num_jobs",
",",
"len",
"(",
"out",
")",
")",
"log",
".",
"error",
"(",
"msg",
")",
"raise",
"HTCondorError",
"(",
"msg",
")",
"#initialize status dictionary",
"status_dict",
"=",
"dict",
"(",
")",
"for",
"val",
"in",
"CONDOR_JOB_STATUSES",
".",
"values",
"(",
")",
":",
"status_dict",
"[",
"val",
"]",
"=",
"0",
"for",
"status_code_str",
"in",
"out",
":",
"status_code",
"=",
"0",
"try",
":",
"status_code",
"=",
"int",
"(",
"status_code_str",
")",
"except",
"ValueError",
":",
"pass",
"key",
"=",
"CONDOR_JOB_STATUSES",
"[",
"status_code",
"]",
"status_dict",
"[",
"key",
"]",
"+=",
"1",
"return",
"status_dict"
] | Gets the job status.
Return:
str: The current status of the job | [
"Gets",
"the",
"job",
"status",
"."
] | train | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L350-L394 | 0.004944 |
google/grr | grr/server/grr_response_server/client_index.py | BulkLabel | def BulkLabel(label, hostnames, owner=None, token=None, client_index=None):
"""Assign a label to a group of clients based on hostname.
Sets a label as an identifier to a group of clients. Removes the label from
other clients.
This can be used to automate labeling clients based on externally derived
attributes, for example machines assigned to particular users, or machines
fulfilling particular roles.
Args:
label: The label to apply.
hostnames: The collection of hostnames that should have the label.
owner: The owner for the newly created labels. Defaults to token.username.
token: The authentication token.
client_index: An optional client index to use. If not provided, use the
default client index.
"""
if client_index is None:
client_index = CreateClientIndex(token=token)
fqdns = set()
for hostname in hostnames:
fqdns.add(hostname.lower())
labelled_urns = client_index.LookupClients(["+label:%s" % label])
# If a labelled client fqdn isn't in the set of target fqdns remove the label.
# Labelled clients with a target fqdn need no action and are removed from the
# set of target fqdns.
for client in aff4.FACTORY.MultiOpen(
labelled_urns, token=token, aff4_type=aff4_grr.VFSGRRClient, mode="rw"):
fqdn = utils.SmartStr(client.Get("FQDN")).lower()
if fqdn not in fqdns:
client_index.RemoveClientLabels(client)
client.RemoveLabel(label, owner=owner)
client.Flush()
client_index.AddClient(client)
else:
fqdns.discard(fqdn)
# The residual set of fqdns needs labelling.
# Get the latest URN for these clients and open them to add the label.
urns = []
keywords = ["+host:%s" % fqdn for fqdn in fqdns]
for client_list in client_index.ReadClientPostingLists(keywords).itervalues():
for client_id in client_list:
urns.append(rdfvalue.RDFURN(client_id))
for client in aff4.FACTORY.MultiOpen(
urns, token=token, aff4_type=aff4_grr.VFSGRRClient, mode="rw"):
client.AddLabel(label, owner=owner)
client.Flush()
client_index.AddClient(client) | python | def BulkLabel(label, hostnames, owner=None, token=None, client_index=None):
"""Assign a label to a group of clients based on hostname.
Sets a label as an identifier to a group of clients. Removes the label from
other clients.
This can be used to automate labeling clients based on externally derived
attributes, for example machines assigned to particular users, or machines
fulfilling particular roles.
Args:
label: The label to apply.
hostnames: The collection of hostnames that should have the label.
owner: The owner for the newly created labels. Defaults to token.username.
token: The authentication token.
client_index: An optional client index to use. If not provided, use the
default client index.
"""
if client_index is None:
client_index = CreateClientIndex(token=token)
fqdns = set()
for hostname in hostnames:
fqdns.add(hostname.lower())
labelled_urns = client_index.LookupClients(["+label:%s" % label])
# If a labelled client fqdn isn't in the set of target fqdns remove the label.
# Labelled clients with a target fqdn need no action and are removed from the
# set of target fqdns.
for client in aff4.FACTORY.MultiOpen(
labelled_urns, token=token, aff4_type=aff4_grr.VFSGRRClient, mode="rw"):
fqdn = utils.SmartStr(client.Get("FQDN")).lower()
if fqdn not in fqdns:
client_index.RemoveClientLabels(client)
client.RemoveLabel(label, owner=owner)
client.Flush()
client_index.AddClient(client)
else:
fqdns.discard(fqdn)
# The residual set of fqdns needs labelling.
# Get the latest URN for these clients and open them to add the label.
urns = []
keywords = ["+host:%s" % fqdn for fqdn in fqdns]
for client_list in client_index.ReadClientPostingLists(keywords).itervalues():
for client_id in client_list:
urns.append(rdfvalue.RDFURN(client_id))
for client in aff4.FACTORY.MultiOpen(
urns, token=token, aff4_type=aff4_grr.VFSGRRClient, mode="rw"):
client.AddLabel(label, owner=owner)
client.Flush()
client_index.AddClient(client) | [
"def",
"BulkLabel",
"(",
"label",
",",
"hostnames",
",",
"owner",
"=",
"None",
",",
"token",
"=",
"None",
",",
"client_index",
"=",
"None",
")",
":",
"if",
"client_index",
"is",
"None",
":",
"client_index",
"=",
"CreateClientIndex",
"(",
"token",
"=",
"token",
")",
"fqdns",
"=",
"set",
"(",
")",
"for",
"hostname",
"in",
"hostnames",
":",
"fqdns",
".",
"add",
"(",
"hostname",
".",
"lower",
"(",
")",
")",
"labelled_urns",
"=",
"client_index",
".",
"LookupClients",
"(",
"[",
"\"+label:%s\"",
"%",
"label",
"]",
")",
"# If a labelled client fqdn isn't in the set of target fqdns remove the label.",
"# Labelled clients with a target fqdn need no action and are removed from the",
"# set of target fqdns.",
"for",
"client",
"in",
"aff4",
".",
"FACTORY",
".",
"MultiOpen",
"(",
"labelled_urns",
",",
"token",
"=",
"token",
",",
"aff4_type",
"=",
"aff4_grr",
".",
"VFSGRRClient",
",",
"mode",
"=",
"\"rw\"",
")",
":",
"fqdn",
"=",
"utils",
".",
"SmartStr",
"(",
"client",
".",
"Get",
"(",
"\"FQDN\"",
")",
")",
".",
"lower",
"(",
")",
"if",
"fqdn",
"not",
"in",
"fqdns",
":",
"client_index",
".",
"RemoveClientLabels",
"(",
"client",
")",
"client",
".",
"RemoveLabel",
"(",
"label",
",",
"owner",
"=",
"owner",
")",
"client",
".",
"Flush",
"(",
")",
"client_index",
".",
"AddClient",
"(",
"client",
")",
"else",
":",
"fqdns",
".",
"discard",
"(",
"fqdn",
")",
"# The residual set of fqdns needs labelling.",
"# Get the latest URN for these clients and open them to add the label.",
"urns",
"=",
"[",
"]",
"keywords",
"=",
"[",
"\"+host:%s\"",
"%",
"fqdn",
"for",
"fqdn",
"in",
"fqdns",
"]",
"for",
"client_list",
"in",
"client_index",
".",
"ReadClientPostingLists",
"(",
"keywords",
")",
".",
"itervalues",
"(",
")",
":",
"for",
"client_id",
"in",
"client_list",
":",
"urns",
".",
"append",
"(",
"rdfvalue",
".",
"RDFURN",
"(",
"client_id",
")",
")",
"for",
"client",
"in",
"aff4",
".",
"FACTORY",
".",
"MultiOpen",
"(",
"urns",
",",
"token",
"=",
"token",
",",
"aff4_type",
"=",
"aff4_grr",
".",
"VFSGRRClient",
",",
"mode",
"=",
"\"rw\"",
")",
":",
"client",
".",
"AddLabel",
"(",
"label",
",",
"owner",
"=",
"owner",
")",
"client",
".",
"Flush",
"(",
")",
"client_index",
".",
"AddClient",
"(",
"client",
")"
] | Assign a label to a group of clients based on hostname.
Sets a label as an identifier to a group of clients. Removes the label from
other clients.
This can be used to automate labeling clients based on externally derived
attributes, for example machines assigned to particular users, or machines
fulfilling particular roles.
Args:
label: The label to apply.
hostnames: The collection of hostnames that should have the label.
owner: The owner for the newly created labels. Defaults to token.username.
token: The authentication token.
client_index: An optional client index to use. If not provided, use the
default client index. | [
"Assign",
"a",
"label",
"to",
"a",
"group",
"of",
"clients",
"based",
"on",
"hostname",
"."
] | train | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/client_index.py#L556-L609 | 0.012387 |
pennlabs/penn-sdk-python | penn/laundry.py | Laundry.parse_a_hall | def parse_a_hall(self, hall):
"""Return names, hall numbers, and the washers/dryers available for a certain hall.
:param hall:
The ID of the hall to retrieve data for.
:type hall: int
"""
if hall not in self.hall_to_link:
return None # change to to empty json idk
page = requests.get(self.hall_to_link[hall], timeout=60)
soup = BeautifulSoup(page.content, 'html.parser')
soup.prettify()
washers = {"open": 0, "running": 0, "out_of_order": 0, "offline": 0, "time_remaining": []}
dryers = {"open": 0, "running": 0, "out_of_order": 0, "offline": 0, "time_remaining": []}
detailed = []
rows = soup.find_all('tr')
for row in rows:
cols = row.find_all('td')
if len(cols) > 1:
machine_type = cols[1].getText()
if machine_type == "Washer":
washers = Laundry.update_machine_object(cols, washers)
elif machine_type == "Dryer":
dryers = Laundry.update_machine_object(cols, dryers)
if machine_type in ["Washer", "Dryer"]:
try:
time = int(cols[3].getText().split(" ")[0])
except ValueError:
time = 0
detailed.append({
"id": int(cols[0].getText().split(" ")[1][1:]),
"type": cols[1].getText().lower(),
"status": cols[2].getText(),
"time_remaining": time
})
machines = {"washers": washers, "dryers": dryers, "details": detailed}
return machines | python | def parse_a_hall(self, hall):
"""Return names, hall numbers, and the washers/dryers available for a certain hall.
:param hall:
The ID of the hall to retrieve data for.
:type hall: int
"""
if hall not in self.hall_to_link:
return None # change to to empty json idk
page = requests.get(self.hall_to_link[hall], timeout=60)
soup = BeautifulSoup(page.content, 'html.parser')
soup.prettify()
washers = {"open": 0, "running": 0, "out_of_order": 0, "offline": 0, "time_remaining": []}
dryers = {"open": 0, "running": 0, "out_of_order": 0, "offline": 0, "time_remaining": []}
detailed = []
rows = soup.find_all('tr')
for row in rows:
cols = row.find_all('td')
if len(cols) > 1:
machine_type = cols[1].getText()
if machine_type == "Washer":
washers = Laundry.update_machine_object(cols, washers)
elif machine_type == "Dryer":
dryers = Laundry.update_machine_object(cols, dryers)
if machine_type in ["Washer", "Dryer"]:
try:
time = int(cols[3].getText().split(" ")[0])
except ValueError:
time = 0
detailed.append({
"id": int(cols[0].getText().split(" ")[1][1:]),
"type": cols[1].getText().lower(),
"status": cols[2].getText(),
"time_remaining": time
})
machines = {"washers": washers, "dryers": dryers, "details": detailed}
return machines | [
"def",
"parse_a_hall",
"(",
"self",
",",
"hall",
")",
":",
"if",
"hall",
"not",
"in",
"self",
".",
"hall_to_link",
":",
"return",
"None",
"# change to to empty json idk",
"page",
"=",
"requests",
".",
"get",
"(",
"self",
".",
"hall_to_link",
"[",
"hall",
"]",
",",
"timeout",
"=",
"60",
")",
"soup",
"=",
"BeautifulSoup",
"(",
"page",
".",
"content",
",",
"'html.parser'",
")",
"soup",
".",
"prettify",
"(",
")",
"washers",
"=",
"{",
"\"open\"",
":",
"0",
",",
"\"running\"",
":",
"0",
",",
"\"out_of_order\"",
":",
"0",
",",
"\"offline\"",
":",
"0",
",",
"\"time_remaining\"",
":",
"[",
"]",
"}",
"dryers",
"=",
"{",
"\"open\"",
":",
"0",
",",
"\"running\"",
":",
"0",
",",
"\"out_of_order\"",
":",
"0",
",",
"\"offline\"",
":",
"0",
",",
"\"time_remaining\"",
":",
"[",
"]",
"}",
"detailed",
"=",
"[",
"]",
"rows",
"=",
"soup",
".",
"find_all",
"(",
"'tr'",
")",
"for",
"row",
"in",
"rows",
":",
"cols",
"=",
"row",
".",
"find_all",
"(",
"'td'",
")",
"if",
"len",
"(",
"cols",
")",
">",
"1",
":",
"machine_type",
"=",
"cols",
"[",
"1",
"]",
".",
"getText",
"(",
")",
"if",
"machine_type",
"==",
"\"Washer\"",
":",
"washers",
"=",
"Laundry",
".",
"update_machine_object",
"(",
"cols",
",",
"washers",
")",
"elif",
"machine_type",
"==",
"\"Dryer\"",
":",
"dryers",
"=",
"Laundry",
".",
"update_machine_object",
"(",
"cols",
",",
"dryers",
")",
"if",
"machine_type",
"in",
"[",
"\"Washer\"",
",",
"\"Dryer\"",
"]",
":",
"try",
":",
"time",
"=",
"int",
"(",
"cols",
"[",
"3",
"]",
".",
"getText",
"(",
")",
".",
"split",
"(",
"\" \"",
")",
"[",
"0",
"]",
")",
"except",
"ValueError",
":",
"time",
"=",
"0",
"detailed",
".",
"append",
"(",
"{",
"\"id\"",
":",
"int",
"(",
"cols",
"[",
"0",
"]",
".",
"getText",
"(",
")",
".",
"split",
"(",
"\" \"",
")",
"[",
"1",
"]",
"[",
"1",
":",
"]",
")",
",",
"\"type\"",
":",
"cols",
"[",
"1",
"]",
".",
"getText",
"(",
")",
".",
"lower",
"(",
")",
",",
"\"status\"",
":",
"cols",
"[",
"2",
"]",
".",
"getText",
"(",
")",
",",
"\"time_remaining\"",
":",
"time",
"}",
")",
"machines",
"=",
"{",
"\"washers\"",
":",
"washers",
",",
"\"dryers\"",
":",
"dryers",
",",
"\"details\"",
":",
"detailed",
"}",
"return",
"machines"
] | Return names, hall numbers, and the washers/dryers available for a certain hall.
:param hall:
The ID of the hall to retrieve data for.
:type hall: int | [
"Return",
"names",
"hall",
"numbers",
"and",
"the",
"washers",
"/",
"dryers",
"available",
"for",
"a",
"certain",
"hall",
"."
] | train | https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/laundry.py#L77-L116 | 0.00289 |
rigetti/pyquil | pyquil/gates.py | EQ | def EQ(classical_reg1, classical_reg2, classical_reg3):
"""
Produce an EQ instruction.
:param classical_reg1: Memory address to which to store the comparison result.
:param classical_reg2: Left comparison operand.
:param classical_reg3: Right comparison operand.
:return: A ClassicalEqual instance.
"""
classical_reg1, classical_reg2, classical_reg3 = prepare_ternary_operands(classical_reg1,
classical_reg2,
classical_reg3)
return ClassicalEqual(classical_reg1, classical_reg2, classical_reg3) | python | def EQ(classical_reg1, classical_reg2, classical_reg3):
"""
Produce an EQ instruction.
:param classical_reg1: Memory address to which to store the comparison result.
:param classical_reg2: Left comparison operand.
:param classical_reg3: Right comparison operand.
:return: A ClassicalEqual instance.
"""
classical_reg1, classical_reg2, classical_reg3 = prepare_ternary_operands(classical_reg1,
classical_reg2,
classical_reg3)
return ClassicalEqual(classical_reg1, classical_reg2, classical_reg3) | [
"def",
"EQ",
"(",
"classical_reg1",
",",
"classical_reg2",
",",
"classical_reg3",
")",
":",
"classical_reg1",
",",
"classical_reg2",
",",
"classical_reg3",
"=",
"prepare_ternary_operands",
"(",
"classical_reg1",
",",
"classical_reg2",
",",
"classical_reg3",
")",
"return",
"ClassicalEqual",
"(",
"classical_reg1",
",",
"classical_reg2",
",",
"classical_reg3",
")"
] | Produce an EQ instruction.
:param classical_reg1: Memory address to which to store the comparison result.
:param classical_reg2: Left comparison operand.
:param classical_reg3: Right comparison operand.
:return: A ClassicalEqual instance. | [
"Produce",
"an",
"EQ",
"instruction",
"."
] | train | https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/gates.py#L703-L716 | 0.007267 |
AbdealiJK/pycolorname | pycolorname/color_system.py | ColorSystem.load | def load(self, filename=None, refresh=False):
"""
Try to load the data from a pre existing data file if it exists.
If the data file does not exist, refresh the data and save it in
the data file for future use.
The data file is a json file.
:param filename: The filename to save or fetch the data from.
:param refresh: Whether to force refresh the data or not
"""
filename = filename or self.data_file()
dirname = os.path.dirname(filename)
if refresh is False:
try:
data = None
with open(filename) as fp:
data = json.load(fp)
self.clear()
self.update(data)
return
except (ValueError, IOError):
# Refresh data if reading gave errors
pass
data = self.refresh()
self.clear()
self.update(data)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(filename, 'w') as fp:
json.dump(data, fp,
sort_keys=True,
indent=2,
separators=(',', ': ')) | python | def load(self, filename=None, refresh=False):
"""
Try to load the data from a pre existing data file if it exists.
If the data file does not exist, refresh the data and save it in
the data file for future use.
The data file is a json file.
:param filename: The filename to save or fetch the data from.
:param refresh: Whether to force refresh the data or not
"""
filename = filename or self.data_file()
dirname = os.path.dirname(filename)
if refresh is False:
try:
data = None
with open(filename) as fp:
data = json.load(fp)
self.clear()
self.update(data)
return
except (ValueError, IOError):
# Refresh data if reading gave errors
pass
data = self.refresh()
self.clear()
self.update(data)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(filename, 'w') as fp:
json.dump(data, fp,
sort_keys=True,
indent=2,
separators=(',', ': ')) | [
"def",
"load",
"(",
"self",
",",
"filename",
"=",
"None",
",",
"refresh",
"=",
"False",
")",
":",
"filename",
"=",
"filename",
"or",
"self",
".",
"data_file",
"(",
")",
"dirname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"filename",
")",
"if",
"refresh",
"is",
"False",
":",
"try",
":",
"data",
"=",
"None",
"with",
"open",
"(",
"filename",
")",
"as",
"fp",
":",
"data",
"=",
"json",
".",
"load",
"(",
"fp",
")",
"self",
".",
"clear",
"(",
")",
"self",
".",
"update",
"(",
"data",
")",
"return",
"except",
"(",
"ValueError",
",",
"IOError",
")",
":",
"# Refresh data if reading gave errors",
"pass",
"data",
"=",
"self",
".",
"refresh",
"(",
")",
"self",
".",
"clear",
"(",
")",
"self",
".",
"update",
"(",
"data",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dirname",
")",
":",
"os",
".",
"makedirs",
"(",
"dirname",
")",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"fp",
":",
"json",
".",
"dump",
"(",
"data",
",",
"fp",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"2",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")"
] | Try to load the data from a pre existing data file if it exists.
If the data file does not exist, refresh the data and save it in
the data file for future use.
The data file is a json file.
:param filename: The filename to save or fetch the data from.
:param refresh: Whether to force refresh the data or not | [
"Try",
"to",
"load",
"the",
"data",
"from",
"a",
"pre",
"existing",
"data",
"file",
"if",
"it",
"exists",
".",
"If",
"the",
"data",
"file",
"does",
"not",
"exist",
"refresh",
"the",
"data",
"and",
"save",
"it",
"in",
"the",
"data",
"file",
"for",
"future",
"use",
".",
"The",
"data",
"file",
"is",
"a",
"json",
"file",
"."
] | train | https://github.com/AbdealiJK/pycolorname/blob/d535de3d340a1673906cb484cc4c49c87d296ec0/pycolorname/color_system.py#L35-L70 | 0.001638 |
tensorflow/tensor2tensor | tensor2tensor/data_generators/vqa_utils.py | vqa_v2_preprocess_image | def vqa_v2_preprocess_image(
image,
height,
width,
mode,
resize_side=512,
distort=True,
image_model_fn="resnet_v1_152",
):
"""vqa v2 preprocess image."""
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
assert resize_side > 0
if resize_side:
image = _aspect_preserving_resize(image, resize_side)
if mode == tf.estimator.ModeKeys.TRAIN:
image = tf.random_crop(image, [height, width, 3])
else:
# Central crop, assuming resize_height > height, resize_width > width.
image = tf.image.resize_image_with_crop_or_pad(image, height, width)
image = tf.clip_by_value(image, 0.0, 1.0)
if mode == tf.estimator.ModeKeys.TRAIN and distort:
image = _flip(image)
num_distort_cases = 4
# pylint: disable=unnecessary-lambda
image = _apply_with_random_selector(
image, lambda x, ordering: _distort_color(x, ordering),
num_cases=num_distort_cases)
if image_model_fn.startswith("resnet_v1"):
# resnet_v1 uses vgg preprocessing
image = image * 255.
image = _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN])
elif image_model_fn.startswith("resnet_v2"):
# resnet v2 uses inception preprocessing
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image | python | def vqa_v2_preprocess_image(
image,
height,
width,
mode,
resize_side=512,
distort=True,
image_model_fn="resnet_v1_152",
):
"""vqa v2 preprocess image."""
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
assert resize_side > 0
if resize_side:
image = _aspect_preserving_resize(image, resize_side)
if mode == tf.estimator.ModeKeys.TRAIN:
image = tf.random_crop(image, [height, width, 3])
else:
# Central crop, assuming resize_height > height, resize_width > width.
image = tf.image.resize_image_with_crop_or_pad(image, height, width)
image = tf.clip_by_value(image, 0.0, 1.0)
if mode == tf.estimator.ModeKeys.TRAIN and distort:
image = _flip(image)
num_distort_cases = 4
# pylint: disable=unnecessary-lambda
image = _apply_with_random_selector(
image, lambda x, ordering: _distort_color(x, ordering),
num_cases=num_distort_cases)
if image_model_fn.startswith("resnet_v1"):
# resnet_v1 uses vgg preprocessing
image = image * 255.
image = _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN])
elif image_model_fn.startswith("resnet_v2"):
# resnet v2 uses inception preprocessing
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image | [
"def",
"vqa_v2_preprocess_image",
"(",
"image",
",",
"height",
",",
"width",
",",
"mode",
",",
"resize_side",
"=",
"512",
",",
"distort",
"=",
"True",
",",
"image_model_fn",
"=",
"\"resnet_v1_152\"",
",",
")",
":",
"image",
"=",
"tf",
".",
"image",
".",
"convert_image_dtype",
"(",
"image",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"assert",
"resize_side",
">",
"0",
"if",
"resize_side",
":",
"image",
"=",
"_aspect_preserving_resize",
"(",
"image",
",",
"resize_side",
")",
"if",
"mode",
"==",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
":",
"image",
"=",
"tf",
".",
"random_crop",
"(",
"image",
",",
"[",
"height",
",",
"width",
",",
"3",
"]",
")",
"else",
":",
"# Central crop, assuming resize_height > height, resize_width > width.",
"image",
"=",
"tf",
".",
"image",
".",
"resize_image_with_crop_or_pad",
"(",
"image",
",",
"height",
",",
"width",
")",
"image",
"=",
"tf",
".",
"clip_by_value",
"(",
"image",
",",
"0.0",
",",
"1.0",
")",
"if",
"mode",
"==",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
"and",
"distort",
":",
"image",
"=",
"_flip",
"(",
"image",
")",
"num_distort_cases",
"=",
"4",
"# pylint: disable=unnecessary-lambda",
"image",
"=",
"_apply_with_random_selector",
"(",
"image",
",",
"lambda",
"x",
",",
"ordering",
":",
"_distort_color",
"(",
"x",
",",
"ordering",
")",
",",
"num_cases",
"=",
"num_distort_cases",
")",
"if",
"image_model_fn",
".",
"startswith",
"(",
"\"resnet_v1\"",
")",
":",
"# resnet_v1 uses vgg preprocessing",
"image",
"=",
"image",
"*",
"255.",
"image",
"=",
"_mean_image_subtraction",
"(",
"image",
",",
"[",
"_R_MEAN",
",",
"_G_MEAN",
",",
"_B_MEAN",
"]",
")",
"elif",
"image_model_fn",
".",
"startswith",
"(",
"\"resnet_v2\"",
")",
":",
"# resnet v2 uses inception preprocessing",
"image",
"=",
"tf",
".",
"subtract",
"(",
"image",
",",
"0.5",
")",
"image",
"=",
"tf",
".",
"multiply",
"(",
"image",
",",
"2.0",
")",
"return",
"image"
] | vqa v2 preprocess image. | [
"vqa",
"v2",
"preprocess",
"image",
"."
] | train | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/vqa_utils.py#L196-L236 | 0.009252 |
indico/indico-plugins | piwik/indico_piwik/piwik.py | PiwikRequest._perform_call | def _perform_call(self, query_url, default_response=None, timeout=10):
"""Returns the raw results from the API"""
try:
response = requests.get(query_url, timeout=timeout)
except socket.timeout:
current_plugin.logger.warning("Timeout contacting Piwik server")
return default_response
except Exception:
current_plugin.logger.exception("Unable to connect")
return default_response
return response.content | python | def _perform_call(self, query_url, default_response=None, timeout=10):
"""Returns the raw results from the API"""
try:
response = requests.get(query_url, timeout=timeout)
except socket.timeout:
current_plugin.logger.warning("Timeout contacting Piwik server")
return default_response
except Exception:
current_plugin.logger.exception("Unable to connect")
return default_response
return response.content | [
"def",
"_perform_call",
"(",
"self",
",",
"query_url",
",",
"default_response",
"=",
"None",
",",
"timeout",
"=",
"10",
")",
":",
"try",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"query_url",
",",
"timeout",
"=",
"timeout",
")",
"except",
"socket",
".",
"timeout",
":",
"current_plugin",
".",
"logger",
".",
"warning",
"(",
"\"Timeout contacting Piwik server\"",
")",
"return",
"default_response",
"except",
"Exception",
":",
"current_plugin",
".",
"logger",
".",
"exception",
"(",
"\"Unable to connect\"",
")",
"return",
"default_response",
"return",
"response",
".",
"content"
] | Returns the raw results from the API | [
"Returns",
"the",
"raw",
"results",
"from",
"the",
"API"
] | train | https://github.com/indico/indico-plugins/blob/fe50085cc63be9b8161b09539e662e7b04e4b38e/piwik/indico_piwik/piwik.py#L72-L82 | 0.003992 |
saltstack/salt | salt/modules/aws_sqs.py | delete_message | def delete_message(queue, region, receipthandle, opts=None, user=None):
'''
Delete one or more messages from a queue in a region
queue
The name of the queue to delete messages from
region
Region where SQS queues exists
receipthandle
The ReceiptHandle of the message to delete. The ReceiptHandle
is obtained in the return from receive_message
opts : None
Any additional options to add to the command line
user : None
Run as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.delete_message <sqs queue> <region> receipthandle='<sqs ReceiptHandle>'
.. versionadded:: 2014.7.0
'''
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
if queue not in url_map:
log.info('"%s" queue does not exist.', queue)
return False
out = _run_aws('delete-message', region, opts, user,
receipthandle=receipthandle, queue=url_map[queue],)
return True | python | def delete_message(queue, region, receipthandle, opts=None, user=None):
'''
Delete one or more messages from a queue in a region
queue
The name of the queue to delete messages from
region
Region where SQS queues exists
receipthandle
The ReceiptHandle of the message to delete. The ReceiptHandle
is obtained in the return from receive_message
opts : None
Any additional options to add to the command line
user : None
Run as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.delete_message <sqs queue> <region> receipthandle='<sqs ReceiptHandle>'
.. versionadded:: 2014.7.0
'''
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
if queue not in url_map:
log.info('"%s" queue does not exist.', queue)
return False
out = _run_aws('delete-message', region, opts, user,
receipthandle=receipthandle, queue=url_map[queue],)
return True | [
"def",
"delete_message",
"(",
"queue",
",",
"region",
",",
"receipthandle",
",",
"opts",
"=",
"None",
",",
"user",
"=",
"None",
")",
":",
"queues",
"=",
"list_queues",
"(",
"region",
",",
"opts",
",",
"user",
")",
"url_map",
"=",
"_parse_queue_list",
"(",
"queues",
")",
"if",
"queue",
"not",
"in",
"url_map",
":",
"log",
".",
"info",
"(",
"'\"%s\" queue does not exist.'",
",",
"queue",
")",
"return",
"False",
"out",
"=",
"_run_aws",
"(",
"'delete-message'",
",",
"region",
",",
"opts",
",",
"user",
",",
"receipthandle",
"=",
"receipthandle",
",",
"queue",
"=",
"url_map",
"[",
"queue",
"]",
",",
")",
"return",
"True"
] | Delete one or more messages from a queue in a region
queue
The name of the queue to delete messages from
region
Region where SQS queues exists
receipthandle
The ReceiptHandle of the message to delete. The ReceiptHandle
is obtained in the return from receive_message
opts : None
Any additional options to add to the command line
user : None
Run as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.delete_message <sqs queue> <region> receipthandle='<sqs ReceiptHandle>'
.. versionadded:: 2014.7.0 | [
"Delete",
"one",
"or",
"more",
"messages",
"from",
"a",
"queue",
"in",
"a",
"region"
] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aws_sqs.py#L115-L152 | 0.001883 |
marcotcr/lime | lime/lime_base.py | LimeBase.forward_selection | def forward_selection(self, data, labels, weights, num_features):
"""Iteratively adds features to the model"""
clf = Ridge(alpha=0, fit_intercept=True, random_state=self.random_state)
used_features = []
for _ in range(min(num_features, data.shape[1])):
max_ = -100000000
best = 0
for feature in range(data.shape[1]):
if feature in used_features:
continue
clf.fit(data[:, used_features + [feature]], labels,
sample_weight=weights)
score = clf.score(data[:, used_features + [feature]],
labels,
sample_weight=weights)
if score > max_:
best = feature
max_ = score
used_features.append(best)
return np.array(used_features) | python | def forward_selection(self, data, labels, weights, num_features):
"""Iteratively adds features to the model"""
clf = Ridge(alpha=0, fit_intercept=True, random_state=self.random_state)
used_features = []
for _ in range(min(num_features, data.shape[1])):
max_ = -100000000
best = 0
for feature in range(data.shape[1]):
if feature in used_features:
continue
clf.fit(data[:, used_features + [feature]], labels,
sample_weight=weights)
score = clf.score(data[:, used_features + [feature]],
labels,
sample_weight=weights)
if score > max_:
best = feature
max_ = score
used_features.append(best)
return np.array(used_features) | [
"def",
"forward_selection",
"(",
"self",
",",
"data",
",",
"labels",
",",
"weights",
",",
"num_features",
")",
":",
"clf",
"=",
"Ridge",
"(",
"alpha",
"=",
"0",
",",
"fit_intercept",
"=",
"True",
",",
"random_state",
"=",
"self",
".",
"random_state",
")",
"used_features",
"=",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"min",
"(",
"num_features",
",",
"data",
".",
"shape",
"[",
"1",
"]",
")",
")",
":",
"max_",
"=",
"-",
"100000000",
"best",
"=",
"0",
"for",
"feature",
"in",
"range",
"(",
"data",
".",
"shape",
"[",
"1",
"]",
")",
":",
"if",
"feature",
"in",
"used_features",
":",
"continue",
"clf",
".",
"fit",
"(",
"data",
"[",
":",
",",
"used_features",
"+",
"[",
"feature",
"]",
"]",
",",
"labels",
",",
"sample_weight",
"=",
"weights",
")",
"score",
"=",
"clf",
".",
"score",
"(",
"data",
"[",
":",
",",
"used_features",
"+",
"[",
"feature",
"]",
"]",
",",
"labels",
",",
"sample_weight",
"=",
"weights",
")",
"if",
"score",
">",
"max_",
":",
"best",
"=",
"feature",
"max_",
"=",
"score",
"used_features",
".",
"append",
"(",
"best",
")",
"return",
"np",
".",
"array",
"(",
"used_features",
")"
] | Iteratively adds features to the model | [
"Iteratively",
"adds",
"features",
"to",
"the",
"model"
] | train | https://github.com/marcotcr/lime/blob/08133d47df00ed918e22005e0c98f6eefd5a1d71/lime/lime_base.py#L49-L68 | 0.003257 |
bitesofcode/projexui | projexui/widgets/xmenutemplatewidget/xmenutemplatewidget.py | XMenuTemplateWidget.saveXml | def saveXml( self, xparent, item ):
"""
Saves the information from the tree item to xml.
:param xparent | <xml.etree.ElementTree.Element>
item | <QTreeWidgetItem>
"""
key = nativestring(unwrapVariant(item.data(0, Qt.UserRole)))
if ( key == 'separator' ):
ElementTree.SubElement(xparent, 'separator')
elif ( key == 'menu' ):
elem = ElementTree.SubElement(xparent, 'menu')
elem.set('title', nativestring(item.text(0)))
for c in range(item.childCount()):
self.saveXml(elem, item.child(c))
else:
elem = ElementTree.SubElement(xparent, 'action')
elem.set('name', key) | python | def saveXml( self, xparent, item ):
"""
Saves the information from the tree item to xml.
:param xparent | <xml.etree.ElementTree.Element>
item | <QTreeWidgetItem>
"""
key = nativestring(unwrapVariant(item.data(0, Qt.UserRole)))
if ( key == 'separator' ):
ElementTree.SubElement(xparent, 'separator')
elif ( key == 'menu' ):
elem = ElementTree.SubElement(xparent, 'menu')
elem.set('title', nativestring(item.text(0)))
for c in range(item.childCount()):
self.saveXml(elem, item.child(c))
else:
elem = ElementTree.SubElement(xparent, 'action')
elem.set('name', key) | [
"def",
"saveXml",
"(",
"self",
",",
"xparent",
",",
"item",
")",
":",
"key",
"=",
"nativestring",
"(",
"unwrapVariant",
"(",
"item",
".",
"data",
"(",
"0",
",",
"Qt",
".",
"UserRole",
")",
")",
")",
"if",
"(",
"key",
"==",
"'separator'",
")",
":",
"ElementTree",
".",
"SubElement",
"(",
"xparent",
",",
"'separator'",
")",
"elif",
"(",
"key",
"==",
"'menu'",
")",
":",
"elem",
"=",
"ElementTree",
".",
"SubElement",
"(",
"xparent",
",",
"'menu'",
")",
"elem",
".",
"set",
"(",
"'title'",
",",
"nativestring",
"(",
"item",
".",
"text",
"(",
"0",
")",
")",
")",
"for",
"c",
"in",
"range",
"(",
"item",
".",
"childCount",
"(",
")",
")",
":",
"self",
".",
"saveXml",
"(",
"elem",
",",
"item",
".",
"child",
"(",
"c",
")",
")",
"else",
":",
"elem",
"=",
"ElementTree",
".",
"SubElement",
"(",
"xparent",
",",
"'action'",
")",
"elem",
".",
"set",
"(",
"'name'",
",",
"key",
")"
] | Saves the information from the tree item to xml.
:param xparent | <xml.etree.ElementTree.Element>
item | <QTreeWidgetItem> | [
"Saves",
"the",
"information",
"from",
"the",
"tree",
"item",
"to",
"xml",
".",
":",
"param",
"xparent",
"|",
"<xml",
".",
"etree",
".",
"ElementTree",
".",
"Element",
">",
"item",
"|",
"<QTreeWidgetItem",
">"
] | train | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xmenutemplatewidget/xmenutemplatewidget.py#L285-L304 | 0.01423 |
althonos/pronto | pronto/parser/owl.py | OwlXMLParser._extract_obo_synonyms | def _extract_obo_synonyms(rawterm):
"""Extract the synonyms defined in the rawterm.
"""
synonyms = set()
# keys in rawterm that define a synonym
keys = set(owl_synonyms).intersection(rawterm.keys())
for k in keys:
for s in rawterm[k]:
synonyms.add(Synonym(s, owl_synonyms[k]))
return synonyms | python | def _extract_obo_synonyms(rawterm):
"""Extract the synonyms defined in the rawterm.
"""
synonyms = set()
# keys in rawterm that define a synonym
keys = set(owl_synonyms).intersection(rawterm.keys())
for k in keys:
for s in rawterm[k]:
synonyms.add(Synonym(s, owl_synonyms[k]))
return synonyms | [
"def",
"_extract_obo_synonyms",
"(",
"rawterm",
")",
":",
"synonyms",
"=",
"set",
"(",
")",
"# keys in rawterm that define a synonym",
"keys",
"=",
"set",
"(",
"owl_synonyms",
")",
".",
"intersection",
"(",
"rawterm",
".",
"keys",
"(",
")",
")",
"for",
"k",
"in",
"keys",
":",
"for",
"s",
"in",
"rawterm",
"[",
"k",
"]",
":",
"synonyms",
".",
"add",
"(",
"Synonym",
"(",
"s",
",",
"owl_synonyms",
"[",
"k",
"]",
")",
")",
"return",
"synonyms"
] | Extract the synonyms defined in the rawterm. | [
"Extract",
"the",
"synonyms",
"defined",
"in",
"the",
"rawterm",
"."
] | train | https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/parser/owl.py#L152-L161 | 0.005319 |
Alignak-monitoring/alignak | alignak/http/generic_interface.py | GenericInterface._results | def _results(self, scheduler_instance_id):
"""Get the results of the executed actions for the scheduler which instance id is provided
Calling this method for daemons that are not configured as passive do not make sense.
Indeed, this service should only be exposed on poller and reactionner daemons.
:param scheduler_instance_id: instance id of the scheduler
:type scheduler_instance_id: string
:return: serialized list
:rtype: str
"""
with self.app.lock:
res = self.app.get_results_from_passive(scheduler_instance_id)
return serialize(res, True) | python | def _results(self, scheduler_instance_id):
"""Get the results of the executed actions for the scheduler which instance id is provided
Calling this method for daemons that are not configured as passive do not make sense.
Indeed, this service should only be exposed on poller and reactionner daemons.
:param scheduler_instance_id: instance id of the scheduler
:type scheduler_instance_id: string
:return: serialized list
:rtype: str
"""
with self.app.lock:
res = self.app.get_results_from_passive(scheduler_instance_id)
return serialize(res, True) | [
"def",
"_results",
"(",
"self",
",",
"scheduler_instance_id",
")",
":",
"with",
"self",
".",
"app",
".",
"lock",
":",
"res",
"=",
"self",
".",
"app",
".",
"get_results_from_passive",
"(",
"scheduler_instance_id",
")",
"return",
"serialize",
"(",
"res",
",",
"True",
")"
] | Get the results of the executed actions for the scheduler which instance id is provided
Calling this method for daemons that are not configured as passive do not make sense.
Indeed, this service should only be exposed on poller and reactionner daemons.
:param scheduler_instance_id: instance id of the scheduler
:type scheduler_instance_id: string
:return: serialized list
:rtype: str | [
"Get",
"the",
"results",
"of",
"the",
"executed",
"actions",
"for",
"the",
"scheduler",
"which",
"instance",
"id",
"is",
"provided"
] | train | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/http/generic_interface.py#L367-L380 | 0.007825 |
davidchua/pymessenger | pymessenger/bot.py | Bot.send_action | def send_action(self, recipient_id, action, notification_type=NotificationType.regular):
"""Send typing indicators or send read receipts to the specified recipient.
https://developers.facebook.com/docs/messenger-platform/send-api-reference/sender-actions
Input:
recipient_id: recipient id to send to
action: action type (mark_seen, typing_on, typing_off)
Output:
Response from API as <dict>
"""
return self.send_recipient(recipient_id, {
'sender_action': action
}, notification_type) | python | def send_action(self, recipient_id, action, notification_type=NotificationType.regular):
"""Send typing indicators or send read receipts to the specified recipient.
https://developers.facebook.com/docs/messenger-platform/send-api-reference/sender-actions
Input:
recipient_id: recipient id to send to
action: action type (mark_seen, typing_on, typing_off)
Output:
Response from API as <dict>
"""
return self.send_recipient(recipient_id, {
'sender_action': action
}, notification_type) | [
"def",
"send_action",
"(",
"self",
",",
"recipient_id",
",",
"action",
",",
"notification_type",
"=",
"NotificationType",
".",
"regular",
")",
":",
"return",
"self",
".",
"send_recipient",
"(",
"recipient_id",
",",
"{",
"'sender_action'",
":",
"action",
"}",
",",
"notification_type",
")"
] | Send typing indicators or send read receipts to the specified recipient.
https://developers.facebook.com/docs/messenger-platform/send-api-reference/sender-actions
Input:
recipient_id: recipient id to send to
action: action type (mark_seen, typing_on, typing_off)
Output:
Response from API as <dict> | [
"Send",
"typing",
"indicators",
"or",
"send",
"read",
"receipts",
"to",
"the",
"specified",
"recipient",
".",
"https",
":",
"//",
"developers",
".",
"facebook",
".",
"com",
"/",
"docs",
"/",
"messenger",
"-",
"platform",
"/",
"send",
"-",
"api",
"-",
"reference",
"/",
"sender",
"-",
"actions"
] | train | https://github.com/davidchua/pymessenger/blob/c3aedb65b7a50e0ec82c0df39a566fceec734c85/pymessenger/bot.py#L163-L175 | 0.006803 |
mongodb/mongo-python-driver | gridfs/__init__.py | GridFS.get_version | def get_version(self, filename=None, version=-1, session=None, **kwargs):
"""Get a file from GridFS by ``"filename"`` or metadata fields.
Returns a version of the file in GridFS whose filename matches
`filename` and whose metadata fields match the supplied keyword
arguments, as an instance of :class:`~gridfs.grid_file.GridOut`.
Version numbering is a convenience atop the GridFS API provided
by MongoDB. If more than one file matches the query (either by
`filename` alone, by metadata fields, or by a combination of
both), then version ``-1`` will be the most recently uploaded
matching file, ``-2`` the second most recently
uploaded, etc. Version ``0`` will be the first version
uploaded, ``1`` the second version, etc. So if three versions
have been uploaded, then version ``0`` is the same as version
``-3``, version ``1`` is the same as version ``-2``, and
version ``2`` is the same as version ``-1``.
Raises :class:`~gridfs.errors.NoFile` if no such version of
that file exists.
:Parameters:
- `filename`: ``"filename"`` of the file to get, or `None`
- `version` (optional): version of the file to get (defaults
to -1, the most recent version uploaded)
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`
- `**kwargs` (optional): find files by custom metadata.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.1
``get_version`` no longer ensures indexes.
"""
query = kwargs
if filename is not None:
query["filename"] = filename
cursor = self.__files.find(query, session=session)
if version < 0:
skip = abs(version) - 1
cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING)
else:
cursor.limit(-1).skip(version).sort("uploadDate", ASCENDING)
try:
doc = next(cursor)
return GridOut(
self.__collection, file_document=doc, session=session)
except StopIteration:
raise NoFile("no version %d for filename %r" % (version, filename)) | python | def get_version(self, filename=None, version=-1, session=None, **kwargs):
"""Get a file from GridFS by ``"filename"`` or metadata fields.
Returns a version of the file in GridFS whose filename matches
`filename` and whose metadata fields match the supplied keyword
arguments, as an instance of :class:`~gridfs.grid_file.GridOut`.
Version numbering is a convenience atop the GridFS API provided
by MongoDB. If more than one file matches the query (either by
`filename` alone, by metadata fields, or by a combination of
both), then version ``-1`` will be the most recently uploaded
matching file, ``-2`` the second most recently
uploaded, etc. Version ``0`` will be the first version
uploaded, ``1`` the second version, etc. So if three versions
have been uploaded, then version ``0`` is the same as version
``-3``, version ``1`` is the same as version ``-2``, and
version ``2`` is the same as version ``-1``.
Raises :class:`~gridfs.errors.NoFile` if no such version of
that file exists.
:Parameters:
- `filename`: ``"filename"`` of the file to get, or `None`
- `version` (optional): version of the file to get (defaults
to -1, the most recent version uploaded)
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`
- `**kwargs` (optional): find files by custom metadata.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.1
``get_version`` no longer ensures indexes.
"""
query = kwargs
if filename is not None:
query["filename"] = filename
cursor = self.__files.find(query, session=session)
if version < 0:
skip = abs(version) - 1
cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING)
else:
cursor.limit(-1).skip(version).sort("uploadDate", ASCENDING)
try:
doc = next(cursor)
return GridOut(
self.__collection, file_document=doc, session=session)
except StopIteration:
raise NoFile("no version %d for filename %r" % (version, filename)) | [
"def",
"get_version",
"(",
"self",
",",
"filename",
"=",
"None",
",",
"version",
"=",
"-",
"1",
",",
"session",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"query",
"=",
"kwargs",
"if",
"filename",
"is",
"not",
"None",
":",
"query",
"[",
"\"filename\"",
"]",
"=",
"filename",
"cursor",
"=",
"self",
".",
"__files",
".",
"find",
"(",
"query",
",",
"session",
"=",
"session",
")",
"if",
"version",
"<",
"0",
":",
"skip",
"=",
"abs",
"(",
"version",
")",
"-",
"1",
"cursor",
".",
"limit",
"(",
"-",
"1",
")",
".",
"skip",
"(",
"skip",
")",
".",
"sort",
"(",
"\"uploadDate\"",
",",
"DESCENDING",
")",
"else",
":",
"cursor",
".",
"limit",
"(",
"-",
"1",
")",
".",
"skip",
"(",
"version",
")",
".",
"sort",
"(",
"\"uploadDate\"",
",",
"ASCENDING",
")",
"try",
":",
"doc",
"=",
"next",
"(",
"cursor",
")",
"return",
"GridOut",
"(",
"self",
".",
"__collection",
",",
"file_document",
"=",
"doc",
",",
"session",
"=",
"session",
")",
"except",
"StopIteration",
":",
"raise",
"NoFile",
"(",
"\"no version %d for filename %r\"",
"%",
"(",
"version",
",",
"filename",
")",
")"
] | Get a file from GridFS by ``"filename"`` or metadata fields.
Returns a version of the file in GridFS whose filename matches
`filename` and whose metadata fields match the supplied keyword
arguments, as an instance of :class:`~gridfs.grid_file.GridOut`.
Version numbering is a convenience atop the GridFS API provided
by MongoDB. If more than one file matches the query (either by
`filename` alone, by metadata fields, or by a combination of
both), then version ``-1`` will be the most recently uploaded
matching file, ``-2`` the second most recently
uploaded, etc. Version ``0`` will be the first version
uploaded, ``1`` the second version, etc. So if three versions
have been uploaded, then version ``0`` is the same as version
``-3``, version ``1`` is the same as version ``-2``, and
version ``2`` is the same as version ``-1``.
Raises :class:`~gridfs.errors.NoFile` if no such version of
that file exists.
:Parameters:
- `filename`: ``"filename"`` of the file to get, or `None`
- `version` (optional): version of the file to get (defaults
to -1, the most recent version uploaded)
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`
- `**kwargs` (optional): find files by custom metadata.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.1
``get_version`` no longer ensures indexes. | [
"Get",
"a",
"file",
"from",
"GridFS",
"by",
"filename",
"or",
"metadata",
"fields",
"."
] | train | https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/gridfs/__init__.py#L156-L206 | 0.000873 |
tcalmant/ipopo | pelix/misc/jabsorb.py | _compute_jsonclass | def _compute_jsonclass(obj):
"""
Compute the content of the __jsonclass__ field for the given object
:param obj: An object
:return: The content of the __jsonclass__ field
"""
# It's not a standard type, so it needs __jsonclass__
module_name = inspect.getmodule(obj).__name__
json_class = obj.__class__.__name__
if module_name not in ("", "__main__"):
json_class = "{0}.{1}".format(module_name, json_class)
return [json_class, []] | python | def _compute_jsonclass(obj):
"""
Compute the content of the __jsonclass__ field for the given object
:param obj: An object
:return: The content of the __jsonclass__ field
"""
# It's not a standard type, so it needs __jsonclass__
module_name = inspect.getmodule(obj).__name__
json_class = obj.__class__.__name__
if module_name not in ("", "__main__"):
json_class = "{0}.{1}".format(module_name, json_class)
return [json_class, []] | [
"def",
"_compute_jsonclass",
"(",
"obj",
")",
":",
"# It's not a standard type, so it needs __jsonclass__",
"module_name",
"=",
"inspect",
".",
"getmodule",
"(",
"obj",
")",
".",
"__name__",
"json_class",
"=",
"obj",
".",
"__class__",
".",
"__name__",
"if",
"module_name",
"not",
"in",
"(",
"\"\"",
",",
"\"__main__\"",
")",
":",
"json_class",
"=",
"\"{0}.{1}\"",
".",
"format",
"(",
"module_name",
",",
"json_class",
")",
"return",
"[",
"json_class",
",",
"[",
"]",
"]"
] | Compute the content of the __jsonclass__ field for the given object
:param obj: An object
:return: The content of the __jsonclass__ field | [
"Compute",
"the",
"content",
"of",
"the",
"__jsonclass__",
"field",
"for",
"the",
"given",
"object"
] | train | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/misc/jabsorb.py#L141-L154 | 0.002088 |
openxc/openxc-python | openxc/vehicle.py | Vehicle.get | def get(self, measurement_class):
"""Return the latest measurement for the given class or None if nothing
has been received from the vehicle.
"""
name = Measurement.name_from_class(measurement_class)
return self._construct_measurement(name) | python | def get(self, measurement_class):
"""Return the latest measurement for the given class or None if nothing
has been received from the vehicle.
"""
name = Measurement.name_from_class(measurement_class)
return self._construct_measurement(name) | [
"def",
"get",
"(",
"self",
",",
"measurement_class",
")",
":",
"name",
"=",
"Measurement",
".",
"name_from_class",
"(",
"measurement_class",
")",
"return",
"self",
".",
"_construct_measurement",
"(",
"name",
")"
] | Return the latest measurement for the given class or None if nothing
has been received from the vehicle. | [
"Return",
"the",
"latest",
"measurement",
"for",
"the",
"given",
"class",
"or",
"None",
"if",
"nothing",
"has",
"been",
"received",
"from",
"the",
"vehicle",
"."
] | train | https://github.com/openxc/openxc-python/blob/4becb4a6310bd658c125195ef6ffea4deaf7d7e7/openxc/vehicle.py#L39-L44 | 0.007143 |
brbsix/python-batchpath | batchpath.py | isvalid | def isvalid(path, access=None, extensions=None, filetype=None, minsize=None):
"""Check whether file meets access, extension, size, and type criteria."""
return ((access is None or os.access(path, access)) and
(extensions is None or checkext(path, extensions)) and
(((filetype == 'all' and os.path.exists(path)) or
(filetype == 'dir' and os.path.isdir(path)) or
(filetype == 'file' and os.path.isfile(path))) or
filetype is None) and
(minsize is None or (not os.path.isfile(path) or
os.path.getsize(path) > minsize))) | python | def isvalid(path, access=None, extensions=None, filetype=None, minsize=None):
"""Check whether file meets access, extension, size, and type criteria."""
return ((access is None or os.access(path, access)) and
(extensions is None or checkext(path, extensions)) and
(((filetype == 'all' and os.path.exists(path)) or
(filetype == 'dir' and os.path.isdir(path)) or
(filetype == 'file' and os.path.isfile(path))) or
filetype is None) and
(minsize is None or (not os.path.isfile(path) or
os.path.getsize(path) > minsize))) | [
"def",
"isvalid",
"(",
"path",
",",
"access",
"=",
"None",
",",
"extensions",
"=",
"None",
",",
"filetype",
"=",
"None",
",",
"minsize",
"=",
"None",
")",
":",
"return",
"(",
"(",
"access",
"is",
"None",
"or",
"os",
".",
"access",
"(",
"path",
",",
"access",
")",
")",
"and",
"(",
"extensions",
"is",
"None",
"or",
"checkext",
"(",
"path",
",",
"extensions",
")",
")",
"and",
"(",
"(",
"(",
"filetype",
"==",
"'all'",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
")",
"or",
"(",
"filetype",
"==",
"'dir'",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
")",
"or",
"(",
"filetype",
"==",
"'file'",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
")",
")",
"or",
"filetype",
"is",
"None",
")",
"and",
"(",
"minsize",
"is",
"None",
"or",
"(",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
"or",
"os",
".",
"path",
".",
"getsize",
"(",
"path",
")",
">",
"minsize",
")",
")",
")"
] | Check whether file meets access, extension, size, and type criteria. | [
"Check",
"whether",
"file",
"meets",
"access",
"extension",
"size",
"and",
"type",
"criteria",
"."
] | train | https://github.com/brbsix/python-batchpath/blob/e4426c7946189aa41f0c99d37bf843799fb00c33/batchpath.py#L175-L184 | 0.001577 |
jrfonseca/gprof2dot | gprof2dot.py | Profile.integrate | def integrate(self, outevent, inevent):
"""Propagate function time ratio along the function calls.
Must be called after finding the cycles.
See also:
- http://citeseer.ist.psu.edu/graham82gprof.html
"""
# Sanity checking
assert outevent not in self
for function in compat_itervalues(self.functions):
assert outevent not in function
assert inevent in function
for call in compat_itervalues(function.calls):
assert outevent not in call
if call.callee_id != function.id:
assert call.ratio is not None
# Aggregate the input for each cycle
for cycle in self.cycles:
total = inevent.null()
for function in compat_itervalues(self.functions):
total = inevent.aggregate(total, function[inevent])
self[inevent] = total
# Integrate along the edges
total = inevent.null()
for function in compat_itervalues(self.functions):
total = inevent.aggregate(total, function[inevent])
self._integrate_function(function, outevent, inevent)
self[outevent] = total | python | def integrate(self, outevent, inevent):
"""Propagate function time ratio along the function calls.
Must be called after finding the cycles.
See also:
- http://citeseer.ist.psu.edu/graham82gprof.html
"""
# Sanity checking
assert outevent not in self
for function in compat_itervalues(self.functions):
assert outevent not in function
assert inevent in function
for call in compat_itervalues(function.calls):
assert outevent not in call
if call.callee_id != function.id:
assert call.ratio is not None
# Aggregate the input for each cycle
for cycle in self.cycles:
total = inevent.null()
for function in compat_itervalues(self.functions):
total = inevent.aggregate(total, function[inevent])
self[inevent] = total
# Integrate along the edges
total = inevent.null()
for function in compat_itervalues(self.functions):
total = inevent.aggregate(total, function[inevent])
self._integrate_function(function, outevent, inevent)
self[outevent] = total | [
"def",
"integrate",
"(",
"self",
",",
"outevent",
",",
"inevent",
")",
":",
"# Sanity checking",
"assert",
"outevent",
"not",
"in",
"self",
"for",
"function",
"in",
"compat_itervalues",
"(",
"self",
".",
"functions",
")",
":",
"assert",
"outevent",
"not",
"in",
"function",
"assert",
"inevent",
"in",
"function",
"for",
"call",
"in",
"compat_itervalues",
"(",
"function",
".",
"calls",
")",
":",
"assert",
"outevent",
"not",
"in",
"call",
"if",
"call",
".",
"callee_id",
"!=",
"function",
".",
"id",
":",
"assert",
"call",
".",
"ratio",
"is",
"not",
"None",
"# Aggregate the input for each cycle ",
"for",
"cycle",
"in",
"self",
".",
"cycles",
":",
"total",
"=",
"inevent",
".",
"null",
"(",
")",
"for",
"function",
"in",
"compat_itervalues",
"(",
"self",
".",
"functions",
")",
":",
"total",
"=",
"inevent",
".",
"aggregate",
"(",
"total",
",",
"function",
"[",
"inevent",
"]",
")",
"self",
"[",
"inevent",
"]",
"=",
"total",
"# Integrate along the edges",
"total",
"=",
"inevent",
".",
"null",
"(",
")",
"for",
"function",
"in",
"compat_itervalues",
"(",
"self",
".",
"functions",
")",
":",
"total",
"=",
"inevent",
".",
"aggregate",
"(",
"total",
",",
"function",
"[",
"inevent",
"]",
")",
"self",
".",
"_integrate_function",
"(",
"function",
",",
"outevent",
",",
"inevent",
")",
"self",
"[",
"outevent",
"]",
"=",
"total"
] | Propagate function time ratio along the function calls.
Must be called after finding the cycles.
See also:
- http://citeseer.ist.psu.edu/graham82gprof.html | [
"Propagate",
"function",
"time",
"ratio",
"along",
"the",
"function",
"calls",
"."
] | train | https://github.com/jrfonseca/gprof2dot/blob/0500e89f001e555f5eaa32e70793b4875f2f70db/gprof2dot.py#L484-L515 | 0.002457 |
globocom/GloboNetworkAPI-client-python | networkapiclient/ApiEnvironment.py | ApiEnvironment.create_environment | def create_environment(self, environment):
"""
Method to create environment
"""
uri = 'api/v3/environment/'
data = dict()
data['environments'] = list()
data['environments'].append(environment)
return super(ApiEnvironment, self).post(uri, data) | python | def create_environment(self, environment):
"""
Method to create environment
"""
uri = 'api/v3/environment/'
data = dict()
data['environments'] = list()
data['environments'].append(environment)
return super(ApiEnvironment, self).post(uri, data) | [
"def",
"create_environment",
"(",
"self",
",",
"environment",
")",
":",
"uri",
"=",
"'api/v3/environment/'",
"data",
"=",
"dict",
"(",
")",
"data",
"[",
"'environments'",
"]",
"=",
"list",
"(",
")",
"data",
"[",
"'environments'",
"]",
".",
"append",
"(",
"environment",
")",
"return",
"super",
"(",
"ApiEnvironment",
",",
"self",
")",
".",
"post",
"(",
"uri",
",",
"data",
")"
] | Method to create environment | [
"Method",
"to",
"create",
"environment"
] | train | https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ApiEnvironment.py#L54-L65 | 0.006452 |
SeattleTestbed/seash | pyreadline/modes/basemode.py | BaseMode.forward_word | def forward_word(self, e): # (M-f)
u"""Move forward to the end of the next word. Words are composed of
letters and digits."""
self.l_buffer.forward_word(self.argument_reset)
self.finalize() | python | def forward_word(self, e): # (M-f)
u"""Move forward to the end of the next word. Words are composed of
letters and digits."""
self.l_buffer.forward_word(self.argument_reset)
self.finalize() | [
"def",
"forward_word",
"(",
"self",
",",
"e",
")",
":",
"# (M-f)\r",
"self",
".",
"l_buffer",
".",
"forward_word",
"(",
"self",
".",
"argument_reset",
")",
"self",
".",
"finalize",
"(",
")"
] | u"""Move forward to the end of the next word. Words are composed of
letters and digits. | [
"u",
"Move",
"forward",
"to",
"the",
"end",
"of",
"the",
"next",
"word",
".",
"Words",
"are",
"composed",
"of",
"letters",
"and",
"digits",
"."
] | train | https://github.com/SeattleTestbed/seash/blob/40f9d2285662ff8b61e0468b4196acee089b273b/pyreadline/modes/basemode.py#L333-L337 | 0.013333 |
hugapi/hug | examples/secure_auth_with_db_example.py | add_user | def add_user(username, password):
"""
CLI Parameter to add a user to the database
:param username:
:param password:
:return: JSON status output
"""
user_model = Query()
if db.search(user_model.username == username):
return {
'error': 'User {0} already exists'.format(username)
}
salt = hashlib.sha512(str(os.urandom(64)).encode('utf-8')).hexdigest()
password = hash_password(password, salt)
api_key = gen_api_key(username)
user = {
'username': username,
'password': password,
'salt': salt,
'api_key': api_key
}
user_id = db.insert(user)
return {
'result': 'success',
'eid': user_id,
'user_created': user
} | python | def add_user(username, password):
"""
CLI Parameter to add a user to the database
:param username:
:param password:
:return: JSON status output
"""
user_model = Query()
if db.search(user_model.username == username):
return {
'error': 'User {0} already exists'.format(username)
}
salt = hashlib.sha512(str(os.urandom(64)).encode('utf-8')).hexdigest()
password = hash_password(password, salt)
api_key = gen_api_key(username)
user = {
'username': username,
'password': password,
'salt': salt,
'api_key': api_key
}
user_id = db.insert(user)
return {
'result': 'success',
'eid': user_id,
'user_created': user
} | [
"def",
"add_user",
"(",
"username",
",",
"password",
")",
":",
"user_model",
"=",
"Query",
"(",
")",
"if",
"db",
".",
"search",
"(",
"user_model",
".",
"username",
"==",
"username",
")",
":",
"return",
"{",
"'error'",
":",
"'User {0} already exists'",
".",
"format",
"(",
"username",
")",
"}",
"salt",
"=",
"hashlib",
".",
"sha512",
"(",
"str",
"(",
"os",
".",
"urandom",
"(",
"64",
")",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")",
"password",
"=",
"hash_password",
"(",
"password",
",",
"salt",
")",
"api_key",
"=",
"gen_api_key",
"(",
"username",
")",
"user",
"=",
"{",
"'username'",
":",
"username",
",",
"'password'",
":",
"password",
",",
"'salt'",
":",
"salt",
",",
"'api_key'",
":",
"api_key",
"}",
"user_id",
"=",
"db",
".",
"insert",
"(",
"user",
")",
"return",
"{",
"'result'",
":",
"'success'",
",",
"'eid'",
":",
"user_id",
",",
"'user_created'",
":",
"user",
"}"
] | CLI Parameter to add a user to the database
:param username:
:param password:
:return: JSON status output | [
"CLI",
"Parameter",
"to",
"add",
"a",
"user",
"to",
"the",
"database",
":",
"param",
"username",
":",
":",
"param",
"password",
":",
":",
"return",
":",
"JSON",
"status",
"output"
] | train | https://github.com/hugapi/hug/blob/080901c81576657f82e2432fd4a82f1d0d2f370c/examples/secure_auth_with_db_example.py#L82-L112 | 0.001326 |
ellmetha/django-machina | machina/apps/forum_tracking/handler.py | TrackingHandler.get_unread_topics | def get_unread_topics(self, topics, user):
""" Returns a list of unread topics for the given user from a given set of topics. """
unread_topics = []
# A user which is not authenticated will never see a topic as unread.
# If there are no topics to consider, we stop here.
if not user.is_authenticated or topics is None or not len(topics):
return unread_topics
# A topic can be unread if a track for itself exists with a mark time that
# is less important than its update date.
topic_ids = [topic.id for topic in topics]
topic_tracks = TopicReadTrack.objects.filter(topic__in=topic_ids, user=user)
tracked_topics = dict(topic_tracks.values_list('topic__pk', 'mark_time'))
if tracked_topics:
for topic in topics:
topic_last_modification_date = topic.last_post_on or topic.created
if (
topic.id in tracked_topics.keys() and
topic_last_modification_date > tracked_topics[topic.id]
):
unread_topics.append(topic)
# A topic can be unread if a track for its associated forum exists with
# a mark time that is less important than its creation or update date.
forum_ids = [topic.forum_id for topic in topics]
forum_tracks = ForumReadTrack.objects.filter(forum_id__in=forum_ids, user=user)
tracked_forums = dict(forum_tracks.values_list('forum__pk', 'mark_time'))
if tracked_forums:
for topic in topics:
topic_last_modification_date = topic.last_post_on or topic.created
if (
(topic.forum_id in tracked_forums.keys() and topic.id not in tracked_topics) and
topic_last_modification_date > tracked_forums[topic.forum_id]
):
unread_topics.append(topic)
# A topic can be unread if no tracks exists for it
for topic in topics:
if topic.forum_id not in tracked_forums and topic.id not in tracked_topics:
unread_topics.append(topic)
return list(set(unread_topics)) | python | def get_unread_topics(self, topics, user):
""" Returns a list of unread topics for the given user from a given set of topics. """
unread_topics = []
# A user which is not authenticated will never see a topic as unread.
# If there are no topics to consider, we stop here.
if not user.is_authenticated or topics is None or not len(topics):
return unread_topics
# A topic can be unread if a track for itself exists with a mark time that
# is less important than its update date.
topic_ids = [topic.id for topic in topics]
topic_tracks = TopicReadTrack.objects.filter(topic__in=topic_ids, user=user)
tracked_topics = dict(topic_tracks.values_list('topic__pk', 'mark_time'))
if tracked_topics:
for topic in topics:
topic_last_modification_date = topic.last_post_on or topic.created
if (
topic.id in tracked_topics.keys() and
topic_last_modification_date > tracked_topics[topic.id]
):
unread_topics.append(topic)
# A topic can be unread if a track for its associated forum exists with
# a mark time that is less important than its creation or update date.
forum_ids = [topic.forum_id for topic in topics]
forum_tracks = ForumReadTrack.objects.filter(forum_id__in=forum_ids, user=user)
tracked_forums = dict(forum_tracks.values_list('forum__pk', 'mark_time'))
if tracked_forums:
for topic in topics:
topic_last_modification_date = topic.last_post_on or topic.created
if (
(topic.forum_id in tracked_forums.keys() and topic.id not in tracked_topics) and
topic_last_modification_date > tracked_forums[topic.forum_id]
):
unread_topics.append(topic)
# A topic can be unread if no tracks exists for it
for topic in topics:
if topic.forum_id not in tracked_forums and topic.id not in tracked_topics:
unread_topics.append(topic)
return list(set(unread_topics)) | [
"def",
"get_unread_topics",
"(",
"self",
",",
"topics",
",",
"user",
")",
":",
"unread_topics",
"=",
"[",
"]",
"# A user which is not authenticated will never see a topic as unread.",
"# If there are no topics to consider, we stop here.",
"if",
"not",
"user",
".",
"is_authenticated",
"or",
"topics",
"is",
"None",
"or",
"not",
"len",
"(",
"topics",
")",
":",
"return",
"unread_topics",
"# A topic can be unread if a track for itself exists with a mark time that",
"# is less important than its update date.",
"topic_ids",
"=",
"[",
"topic",
".",
"id",
"for",
"topic",
"in",
"topics",
"]",
"topic_tracks",
"=",
"TopicReadTrack",
".",
"objects",
".",
"filter",
"(",
"topic__in",
"=",
"topic_ids",
",",
"user",
"=",
"user",
")",
"tracked_topics",
"=",
"dict",
"(",
"topic_tracks",
".",
"values_list",
"(",
"'topic__pk'",
",",
"'mark_time'",
")",
")",
"if",
"tracked_topics",
":",
"for",
"topic",
"in",
"topics",
":",
"topic_last_modification_date",
"=",
"topic",
".",
"last_post_on",
"or",
"topic",
".",
"created",
"if",
"(",
"topic",
".",
"id",
"in",
"tracked_topics",
".",
"keys",
"(",
")",
"and",
"topic_last_modification_date",
">",
"tracked_topics",
"[",
"topic",
".",
"id",
"]",
")",
":",
"unread_topics",
".",
"append",
"(",
"topic",
")",
"# A topic can be unread if a track for its associated forum exists with",
"# a mark time that is less important than its creation or update date.",
"forum_ids",
"=",
"[",
"topic",
".",
"forum_id",
"for",
"topic",
"in",
"topics",
"]",
"forum_tracks",
"=",
"ForumReadTrack",
".",
"objects",
".",
"filter",
"(",
"forum_id__in",
"=",
"forum_ids",
",",
"user",
"=",
"user",
")",
"tracked_forums",
"=",
"dict",
"(",
"forum_tracks",
".",
"values_list",
"(",
"'forum__pk'",
",",
"'mark_time'",
")",
")",
"if",
"tracked_forums",
":",
"for",
"topic",
"in",
"topics",
":",
"topic_last_modification_date",
"=",
"topic",
".",
"last_post_on",
"or",
"topic",
".",
"created",
"if",
"(",
"(",
"topic",
".",
"forum_id",
"in",
"tracked_forums",
".",
"keys",
"(",
")",
"and",
"topic",
".",
"id",
"not",
"in",
"tracked_topics",
")",
"and",
"topic_last_modification_date",
">",
"tracked_forums",
"[",
"topic",
".",
"forum_id",
"]",
")",
":",
"unread_topics",
".",
"append",
"(",
"topic",
")",
"# A topic can be unread if no tracks exists for it",
"for",
"topic",
"in",
"topics",
":",
"if",
"topic",
".",
"forum_id",
"not",
"in",
"tracked_forums",
"and",
"topic",
".",
"id",
"not",
"in",
"tracked_topics",
":",
"unread_topics",
".",
"append",
"(",
"topic",
")",
"return",
"list",
"(",
"set",
"(",
"unread_topics",
")",
")"
] | Returns a list of unread topics for the given user from a given set of topics. | [
"Returns",
"a",
"list",
"of",
"unread",
"topics",
"for",
"the",
"given",
"user",
"from",
"a",
"given",
"set",
"of",
"topics",
"."
] | train | https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_tracking/handler.py#L54-L98 | 0.005928 |
bristosoft/financial | finance.py | npv | def npv(ico, nci, r, n):
""" This capital budgeting function computes the net present
value on a cash flow generating investment.
ico = Initial Capital Outlay
nci = net cash inflows per period
r = discounted rate
n = number of periods
Example: npv(100000, 15000, .03, 10)
"""
pv_nci = 0
for x in range(n):
pv_nci = pv_nci + (nci/((1 + r) ** (x + 1)))
return pv_nci - ico | python | def npv(ico, nci, r, n):
""" This capital budgeting function computes the net present
value on a cash flow generating investment.
ico = Initial Capital Outlay
nci = net cash inflows per period
r = discounted rate
n = number of periods
Example: npv(100000, 15000, .03, 10)
"""
pv_nci = 0
for x in range(n):
pv_nci = pv_nci + (nci/((1 + r) ** (x + 1)))
return pv_nci - ico | [
"def",
"npv",
"(",
"ico",
",",
"nci",
",",
"r",
",",
"n",
")",
":",
"pv_nci",
"=",
"0",
"for",
"x",
"in",
"range",
"(",
"n",
")",
":",
"pv_nci",
"=",
"pv_nci",
"+",
"(",
"nci",
"/",
"(",
"(",
"1",
"+",
"r",
")",
"**",
"(",
"x",
"+",
"1",
")",
")",
")",
"return",
"pv_nci",
"-",
"ico"
] | This capital budgeting function computes the net present
value on a cash flow generating investment.
ico = Initial Capital Outlay
nci = net cash inflows per period
r = discounted rate
n = number of periods
Example: npv(100000, 15000, .03, 10) | [
"This",
"capital",
"budgeting",
"function",
"computes",
"the",
"net",
"present",
"value",
"on",
"a",
"cash",
"flow",
"generating",
"investment",
".",
"ico",
"=",
"Initial",
"Capital",
"Outlay",
"nci",
"=",
"net",
"cash",
"inflows",
"per",
"period",
"r",
"=",
"discounted",
"rate",
"n",
"=",
"number",
"of",
"periods",
"Example",
":",
"npv",
"(",
"100000",
"15000",
".",
"03",
"10",
")"
] | train | https://github.com/bristosoft/financial/blob/382c4fef610d67777d7109d9d0ae230ab67ca20f/finance.py#L136-L148 | 0.004706 |
datadotworld/data.world-py | datadotworld/datadotworld.py | DataDotWorld.load_dataset | def load_dataset(self, dataset_key, force_update=False, auto_update=False):
"""Load a dataset from the local filesystem, downloading it from
data.world first, if necessary.
This function returns an object of type `LocalDataset`. The object
allows access to metedata via it's `describe()` method and to all the
data via three properties `raw_data`, `tables` and `dataframes`, all
of which are mappings (dict-like structures).
:param dataset_key: Dataset identifier, in the form of owner/id or of
a url
:type dataset_key: str
:param force_update: Flag, indicating if a new copy of the dataset
should be downloaded replacing any previously downloaded copy
(Default value = False)
:type force_update: bool
:param auto_update: Flag, indicating that dataset be updated to the
latest version
:type auto_update: bool
:returns: The object representing the dataset
:rtype: LocalDataset
:raises RestApiError: If a server error occurs
"""
owner_id, dataset_id = parse_dataset_key(dataset_key)
cache_dir = path.join(self._config.cache_dir, owner_id, dataset_id,
'latest')
backup_dir = None
if path.isdir(cache_dir) and force_update:
backup_dir = path.join(self._config.cache_dir, owner_id,
dataset_id, 'backup')
move_cache_dir_to_backup_dir(backup_dir, cache_dir)
descriptor_file = path.join(cache_dir, 'datapackage.json')
if not path.isfile(descriptor_file):
try:
descriptor_file = self.api_client.download_datapackage(
dataset_key, cache_dir)
except RestApiError as e:
if backup_dir is not None:
shutil.move(backup_dir, cache_dir)
warn('Unable to download datapackage ({}). '
'Loading previously saved version.'.format(e.reason))
else:
raise
else:
try:
dataset_info = self.api_client.get_dataset(dataset_key)
except RestApiError as e:
return LocalDataset(descriptor_file)
last_modified = datetime.strptime(dataset_info['updated'],
'%Y-%m-%dT%H:%M:%S.%fZ')
if (last_modified > datetime.utcfromtimestamp(
path.getmtime(str(descriptor_file)))):
if auto_update:
try:
backup_dir = path.join(self._config.cache_dir,
owner_id, dataset_id,
'backup')
move_cache_dir_to_backup_dir(backup_dir,
cache_dir)
descriptor_file = self.api_client. \
download_datapackage(dataset_key, cache_dir)
except RestApiError as e:
if backup_dir is not None:
shutil.move(backup_dir, cache_dir)
warn('Unable to auto update datapackage ({}). '
'Loading previously saved version.'
.format(e.reason))
else:
raise
else:
filterwarnings('always',
message='You are using an outdated copy')
warn('You are using an outdated copy of {}. '
'If you wish to use the latest version, call this '
'function with the argument '
'auto_update=True or '
'force_update=True'.format(dataset_key))
if backup_dir is not None:
shutil.rmtree(backup_dir, ignore_errors=True)
return LocalDataset(descriptor_file) | python | def load_dataset(self, dataset_key, force_update=False, auto_update=False):
"""Load a dataset from the local filesystem, downloading it from
data.world first, if necessary.
This function returns an object of type `LocalDataset`. The object
allows access to metedata via it's `describe()` method and to all the
data via three properties `raw_data`, `tables` and `dataframes`, all
of which are mappings (dict-like structures).
:param dataset_key: Dataset identifier, in the form of owner/id or of
a url
:type dataset_key: str
:param force_update: Flag, indicating if a new copy of the dataset
should be downloaded replacing any previously downloaded copy
(Default value = False)
:type force_update: bool
:param auto_update: Flag, indicating that dataset be updated to the
latest version
:type auto_update: bool
:returns: The object representing the dataset
:rtype: LocalDataset
:raises RestApiError: If a server error occurs
"""
owner_id, dataset_id = parse_dataset_key(dataset_key)
cache_dir = path.join(self._config.cache_dir, owner_id, dataset_id,
'latest')
backup_dir = None
if path.isdir(cache_dir) and force_update:
backup_dir = path.join(self._config.cache_dir, owner_id,
dataset_id, 'backup')
move_cache_dir_to_backup_dir(backup_dir, cache_dir)
descriptor_file = path.join(cache_dir, 'datapackage.json')
if not path.isfile(descriptor_file):
try:
descriptor_file = self.api_client.download_datapackage(
dataset_key, cache_dir)
except RestApiError as e:
if backup_dir is not None:
shutil.move(backup_dir, cache_dir)
warn('Unable to download datapackage ({}). '
'Loading previously saved version.'.format(e.reason))
else:
raise
else:
try:
dataset_info = self.api_client.get_dataset(dataset_key)
except RestApiError as e:
return LocalDataset(descriptor_file)
last_modified = datetime.strptime(dataset_info['updated'],
'%Y-%m-%dT%H:%M:%S.%fZ')
if (last_modified > datetime.utcfromtimestamp(
path.getmtime(str(descriptor_file)))):
if auto_update:
try:
backup_dir = path.join(self._config.cache_dir,
owner_id, dataset_id,
'backup')
move_cache_dir_to_backup_dir(backup_dir,
cache_dir)
descriptor_file = self.api_client. \
download_datapackage(dataset_key, cache_dir)
except RestApiError as e:
if backup_dir is not None:
shutil.move(backup_dir, cache_dir)
warn('Unable to auto update datapackage ({}). '
'Loading previously saved version.'
.format(e.reason))
else:
raise
else:
filterwarnings('always',
message='You are using an outdated copy')
warn('You are using an outdated copy of {}. '
'If you wish to use the latest version, call this '
'function with the argument '
'auto_update=True or '
'force_update=True'.format(dataset_key))
if backup_dir is not None:
shutil.rmtree(backup_dir, ignore_errors=True)
return LocalDataset(descriptor_file) | [
"def",
"load_dataset",
"(",
"self",
",",
"dataset_key",
",",
"force_update",
"=",
"False",
",",
"auto_update",
"=",
"False",
")",
":",
"owner_id",
",",
"dataset_id",
"=",
"parse_dataset_key",
"(",
"dataset_key",
")",
"cache_dir",
"=",
"path",
".",
"join",
"(",
"self",
".",
"_config",
".",
"cache_dir",
",",
"owner_id",
",",
"dataset_id",
",",
"'latest'",
")",
"backup_dir",
"=",
"None",
"if",
"path",
".",
"isdir",
"(",
"cache_dir",
")",
"and",
"force_update",
":",
"backup_dir",
"=",
"path",
".",
"join",
"(",
"self",
".",
"_config",
".",
"cache_dir",
",",
"owner_id",
",",
"dataset_id",
",",
"'backup'",
")",
"move_cache_dir_to_backup_dir",
"(",
"backup_dir",
",",
"cache_dir",
")",
"descriptor_file",
"=",
"path",
".",
"join",
"(",
"cache_dir",
",",
"'datapackage.json'",
")",
"if",
"not",
"path",
".",
"isfile",
"(",
"descriptor_file",
")",
":",
"try",
":",
"descriptor_file",
"=",
"self",
".",
"api_client",
".",
"download_datapackage",
"(",
"dataset_key",
",",
"cache_dir",
")",
"except",
"RestApiError",
"as",
"e",
":",
"if",
"backup_dir",
"is",
"not",
"None",
":",
"shutil",
".",
"move",
"(",
"backup_dir",
",",
"cache_dir",
")",
"warn",
"(",
"'Unable to download datapackage ({}). '",
"'Loading previously saved version.'",
".",
"format",
"(",
"e",
".",
"reason",
")",
")",
"else",
":",
"raise",
"else",
":",
"try",
":",
"dataset_info",
"=",
"self",
".",
"api_client",
".",
"get_dataset",
"(",
"dataset_key",
")",
"except",
"RestApiError",
"as",
"e",
":",
"return",
"LocalDataset",
"(",
"descriptor_file",
")",
"last_modified",
"=",
"datetime",
".",
"strptime",
"(",
"dataset_info",
"[",
"'updated'",
"]",
",",
"'%Y-%m-%dT%H:%M:%S.%fZ'",
")",
"if",
"(",
"last_modified",
">",
"datetime",
".",
"utcfromtimestamp",
"(",
"path",
".",
"getmtime",
"(",
"str",
"(",
"descriptor_file",
")",
")",
")",
")",
":",
"if",
"auto_update",
":",
"try",
":",
"backup_dir",
"=",
"path",
".",
"join",
"(",
"self",
".",
"_config",
".",
"cache_dir",
",",
"owner_id",
",",
"dataset_id",
",",
"'backup'",
")",
"move_cache_dir_to_backup_dir",
"(",
"backup_dir",
",",
"cache_dir",
")",
"descriptor_file",
"=",
"self",
".",
"api_client",
".",
"download_datapackage",
"(",
"dataset_key",
",",
"cache_dir",
")",
"except",
"RestApiError",
"as",
"e",
":",
"if",
"backup_dir",
"is",
"not",
"None",
":",
"shutil",
".",
"move",
"(",
"backup_dir",
",",
"cache_dir",
")",
"warn",
"(",
"'Unable to auto update datapackage ({}). '",
"'Loading previously saved version.'",
".",
"format",
"(",
"e",
".",
"reason",
")",
")",
"else",
":",
"raise",
"else",
":",
"filterwarnings",
"(",
"'always'",
",",
"message",
"=",
"'You are using an outdated copy'",
")",
"warn",
"(",
"'You are using an outdated copy of {}. '",
"'If you wish to use the latest version, call this '",
"'function with the argument '",
"'auto_update=True or '",
"'force_update=True'",
".",
"format",
"(",
"dataset_key",
")",
")",
"if",
"backup_dir",
"is",
"not",
"None",
":",
"shutil",
".",
"rmtree",
"(",
"backup_dir",
",",
"ignore_errors",
"=",
"True",
")",
"return",
"LocalDataset",
"(",
"descriptor_file",
")"
] | Load a dataset from the local filesystem, downloading it from
data.world first, if necessary.
This function returns an object of type `LocalDataset`. The object
allows access to metedata via it's `describe()` method and to all the
data via three properties `raw_data`, `tables` and `dataframes`, all
of which are mappings (dict-like structures).
:param dataset_key: Dataset identifier, in the form of owner/id or of
a url
:type dataset_key: str
:param force_update: Flag, indicating if a new copy of the dataset
should be downloaded replacing any previously downloaded copy
(Default value = False)
:type force_update: bool
:param auto_update: Flag, indicating that dataset be updated to the
latest version
:type auto_update: bool
:returns: The object representing the dataset
:rtype: LocalDataset
:raises RestApiError: If a server error occurs | [
"Load",
"a",
"dataset",
"from",
"the",
"local",
"filesystem",
"downloading",
"it",
"from",
"data",
".",
"world",
"first",
"if",
"necessary",
"."
] | train | https://github.com/datadotworld/data.world-py/blob/ffaeb115f358731ab0b805b0c43b7ff2e3cf0a77/datadotworld/datadotworld.py#L116-L199 | 0.000486 |
Shizmob/pydle | pydle/features/isupport.py | ISUPPORTSupport.on_isupport_maxlist | async def on_isupport_maxlist(self, value):
""" Limits on channel modes involving lists. """
self._list_limits = {}
for entry in value.split(','):
modes, limit = entry.split(':')
# Assign limit to mode group and add lookup entry for mode.
self._list_limits[frozenset(modes)] = int(limit)
for mode in modes:
self._list_limit_groups[mode] = frozenset(modes) | python | async def on_isupport_maxlist(self, value):
""" Limits on channel modes involving lists. """
self._list_limits = {}
for entry in value.split(','):
modes, limit = entry.split(':')
# Assign limit to mode group and add lookup entry for mode.
self._list_limits[frozenset(modes)] = int(limit)
for mode in modes:
self._list_limit_groups[mode] = frozenset(modes) | [
"async",
"def",
"on_isupport_maxlist",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_list_limits",
"=",
"{",
"}",
"for",
"entry",
"in",
"value",
".",
"split",
"(",
"','",
")",
":",
"modes",
",",
"limit",
"=",
"entry",
".",
"split",
"(",
"':'",
")",
"# Assign limit to mode group and add lookup entry for mode.",
"self",
".",
"_list_limits",
"[",
"frozenset",
"(",
"modes",
")",
"]",
"=",
"int",
"(",
"limit",
")",
"for",
"mode",
"in",
"modes",
":",
"self",
".",
"_list_limit_groups",
"[",
"mode",
"]",
"=",
"frozenset",
"(",
"modes",
")"
] | Limits on channel modes involving lists. | [
"Limits",
"on",
"channel",
"modes",
"involving",
"lists",
"."
] | train | https://github.com/Shizmob/pydle/blob/7ec7d65d097318ed0bcdc5d8401470287d8c7cf7/pydle/features/isupport.py#L162-L172 | 0.004494 |
pandas-dev/pandas | pandas/core/generic.py | NDFrame._to_dict_of_blocks | def _to_dict_of_blocks(self, copy=True):
"""
Return a dict of dtype -> Constructor Types that
each is a homogeneous dtype.
Internal ONLY
"""
return {k: self._constructor(v).__finalize__(self)
for k, v, in self._data.to_dict(copy=copy).items()} | python | def _to_dict_of_blocks(self, copy=True):
"""
Return a dict of dtype -> Constructor Types that
each is a homogeneous dtype.
Internal ONLY
"""
return {k: self._constructor(v).__finalize__(self)
for k, v, in self._data.to_dict(copy=copy).items()} | [
"def",
"_to_dict_of_blocks",
"(",
"self",
",",
"copy",
"=",
"True",
")",
":",
"return",
"{",
"k",
":",
"self",
".",
"_constructor",
"(",
"v",
")",
".",
"__finalize__",
"(",
"self",
")",
"for",
"k",
",",
"v",
",",
"in",
"self",
".",
"_data",
".",
"to_dict",
"(",
"copy",
"=",
"copy",
")",
".",
"items",
"(",
")",
"}"
] | Return a dict of dtype -> Constructor Types that
each is a homogeneous dtype.
Internal ONLY | [
"Return",
"a",
"dict",
"of",
"dtype",
"-",
">",
"Constructor",
"Types",
"that",
"each",
"is",
"a",
"homogeneous",
"dtype",
"."
] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L5604-L5612 | 0.006494 |
jermnelson/flask-fedora-commons | flask_fedora_commons/__init__.py | copy_graph | def copy_graph(subject, existing_graph):
"""Function takes a subject and an existing graph, returns a new graph with
all predicate and objects of the existing graph copied to the new_graph with
subject as the new subject
Args:
subject(rdflib.URIRef): A URIRef subject
existing_graph(rdflib.Graph): A rdflib.Graph
Returns:
rdflib.Graph
"""
new_graph = rdflib.Graph()
for predicate, object_ in existing_graph.predicate_objects():
new_graph.add((subject, predicate, object_))
return new_graph | python | def copy_graph(subject, existing_graph):
"""Function takes a subject and an existing graph, returns a new graph with
all predicate and objects of the existing graph copied to the new_graph with
subject as the new subject
Args:
subject(rdflib.URIRef): A URIRef subject
existing_graph(rdflib.Graph): A rdflib.Graph
Returns:
rdflib.Graph
"""
new_graph = rdflib.Graph()
for predicate, object_ in existing_graph.predicate_objects():
new_graph.add((subject, predicate, object_))
return new_graph | [
"def",
"copy_graph",
"(",
"subject",
",",
"existing_graph",
")",
":",
"new_graph",
"=",
"rdflib",
".",
"Graph",
"(",
")",
"for",
"predicate",
",",
"object_",
"in",
"existing_graph",
".",
"predicate_objects",
"(",
")",
":",
"new_graph",
".",
"add",
"(",
"(",
"subject",
",",
"predicate",
",",
"object_",
")",
")",
"return",
"new_graph"
] | Function takes a subject and an existing graph, returns a new graph with
all predicate and objects of the existing graph copied to the new_graph with
subject as the new subject
Args:
subject(rdflib.URIRef): A URIRef subject
existing_graph(rdflib.Graph): A rdflib.Graph
Returns:
rdflib.Graph | [
"Function",
"takes",
"a",
"subject",
"and",
"an",
"existing",
"graph",
"returns",
"a",
"new",
"graph",
"with",
"all",
"predicate",
"and",
"objects",
"of",
"the",
"existing",
"graph",
"copied",
"to",
"the",
"new_graph",
"with",
"subject",
"as",
"the",
"new",
"subject"
] | train | https://github.com/jermnelson/flask-fedora-commons/blob/81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4/flask_fedora_commons/__init__.py#L81-L96 | 0.003578 |
pixelogik/NearPy | nearpy/distances/euclidean.py | EuclideanDistance.distance | def distance(self, x, y):
"""
Computes distance measure between vectors x and y. Returns float.
"""
if scipy.sparse.issparse(x):
return numpy.linalg.norm((x-y).toarray().ravel())
else:
return numpy.linalg.norm(x-y) | python | def distance(self, x, y):
"""
Computes distance measure between vectors x and y. Returns float.
"""
if scipy.sparse.issparse(x):
return numpy.linalg.norm((x-y).toarray().ravel())
else:
return numpy.linalg.norm(x-y) | [
"def",
"distance",
"(",
"self",
",",
"x",
",",
"y",
")",
":",
"if",
"scipy",
".",
"sparse",
".",
"issparse",
"(",
"x",
")",
":",
"return",
"numpy",
".",
"linalg",
".",
"norm",
"(",
"(",
"x",
"-",
"y",
")",
".",
"toarray",
"(",
")",
".",
"ravel",
"(",
")",
")",
"else",
":",
"return",
"numpy",
".",
"linalg",
".",
"norm",
"(",
"x",
"-",
"y",
")"
] | Computes distance measure between vectors x and y. Returns float. | [
"Computes",
"distance",
"measure",
"between",
"vectors",
"x",
"and",
"y",
".",
"Returns",
"float",
"."
] | train | https://github.com/pixelogik/NearPy/blob/1b534b864d320d875508e95cd2b76b6d8c07a90b/nearpy/distances/euclidean.py#L32-L39 | 0.007194 |
openthread/openthread | tools/harness-thci/OpenThread.py | OpenThread.setUdpJoinerPort | def setUdpJoinerPort(self, portNumber):
"""set Joiner UDP Port
Args:
portNumber: Joiner UDP Port number
Returns:
True: successful to set Joiner UDP Port
False: fail to set Joiner UDP Port
"""
print '%s call setUdpJoinerPort' % self.port
cmd = 'joinerport %d' % portNumber
print cmd
return self.__sendCommand(cmd)[0] == 'Done' | python | def setUdpJoinerPort(self, portNumber):
"""set Joiner UDP Port
Args:
portNumber: Joiner UDP Port number
Returns:
True: successful to set Joiner UDP Port
False: fail to set Joiner UDP Port
"""
print '%s call setUdpJoinerPort' % self.port
cmd = 'joinerport %d' % portNumber
print cmd
return self.__sendCommand(cmd)[0] == 'Done' | [
"def",
"setUdpJoinerPort",
"(",
"self",
",",
"portNumber",
")",
":",
"print",
"'%s call setUdpJoinerPort'",
"%",
"self",
".",
"port",
"cmd",
"=",
"'joinerport %d'",
"%",
"portNumber",
"print",
"cmd",
"return",
"self",
".",
"__sendCommand",
"(",
"cmd",
")",
"[",
"0",
"]",
"==",
"'Done'"
] | set Joiner UDP Port
Args:
portNumber: Joiner UDP Port number
Returns:
True: successful to set Joiner UDP Port
False: fail to set Joiner UDP Port | [
"set",
"Joiner",
"UDP",
"Port"
] | train | https://github.com/openthread/openthread/blob/0208d10563aa21c518092985c78ecf9cd223ab74/tools/harness-thci/OpenThread.py#L2567-L2580 | 0.004684 |
cmbruns/pyopenvr | src/openvr/__init__.py | IVRInput.getPoseActionData | def getPoseActionData(self, action, eOrigin, fPredictedSecondsFromNow, unActionDataSize, ulRestrictToDevice):
"""Reads the state of a pose action given its handle."""
fn = self.function_table.getPoseActionData
pActionData = InputPoseActionData_t()
result = fn(action, eOrigin, fPredictedSecondsFromNow, byref(pActionData), unActionDataSize, ulRestrictToDevice)
return result, pActionData | python | def getPoseActionData(self, action, eOrigin, fPredictedSecondsFromNow, unActionDataSize, ulRestrictToDevice):
"""Reads the state of a pose action given its handle."""
fn = self.function_table.getPoseActionData
pActionData = InputPoseActionData_t()
result = fn(action, eOrigin, fPredictedSecondsFromNow, byref(pActionData), unActionDataSize, ulRestrictToDevice)
return result, pActionData | [
"def",
"getPoseActionData",
"(",
"self",
",",
"action",
",",
"eOrigin",
",",
"fPredictedSecondsFromNow",
",",
"unActionDataSize",
",",
"ulRestrictToDevice",
")",
":",
"fn",
"=",
"self",
".",
"function_table",
".",
"getPoseActionData",
"pActionData",
"=",
"InputPoseActionData_t",
"(",
")",
"result",
"=",
"fn",
"(",
"action",
",",
"eOrigin",
",",
"fPredictedSecondsFromNow",
",",
"byref",
"(",
"pActionData",
")",
",",
"unActionDataSize",
",",
"ulRestrictToDevice",
")",
"return",
"result",
",",
"pActionData"
] | Reads the state of a pose action given its handle. | [
"Reads",
"the",
"state",
"of",
"a",
"pose",
"action",
"given",
"its",
"handle",
"."
] | train | https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L6012-L6018 | 0.009346 |
MonsieurV/PiPocketGeiger | PiPocketGeiger/__init__.py | RadiationWatch.status | def status(self):
"""Return current readings, as a dictionary with:
duration -- the duration of the measurements, in seconds;
cpm -- the radiation count by minute;
uSvh -- the radiation dose, exprimed in Sievert per house (uSv/h);
uSvhError -- the incertitude for the radiation dose."""
minutes = min(self.duration, MAX_CPM_TIME) / 1000 / 60.0
cpm = self.count / minutes if minutes > 0 else 0
return dict(
duration=round(self.duration / 1000.0, 2),
cpm=round(cpm, 2),
uSvh=round(cpm / K_ALPHA, 3),
uSvhError=round(math.sqrt(self.count) / minutes / K_ALPHA, 3)
if minutes > 0
else 0,
) | python | def status(self):
"""Return current readings, as a dictionary with:
duration -- the duration of the measurements, in seconds;
cpm -- the radiation count by minute;
uSvh -- the radiation dose, exprimed in Sievert per house (uSv/h);
uSvhError -- the incertitude for the radiation dose."""
minutes = min(self.duration, MAX_CPM_TIME) / 1000 / 60.0
cpm = self.count / minutes if minutes > 0 else 0
return dict(
duration=round(self.duration / 1000.0, 2),
cpm=round(cpm, 2),
uSvh=round(cpm / K_ALPHA, 3),
uSvhError=round(math.sqrt(self.count) / minutes / K_ALPHA, 3)
if minutes > 0
else 0,
) | [
"def",
"status",
"(",
"self",
")",
":",
"minutes",
"=",
"min",
"(",
"self",
".",
"duration",
",",
"MAX_CPM_TIME",
")",
"/",
"1000",
"/",
"60.0",
"cpm",
"=",
"self",
".",
"count",
"/",
"minutes",
"if",
"minutes",
">",
"0",
"else",
"0",
"return",
"dict",
"(",
"duration",
"=",
"round",
"(",
"self",
".",
"duration",
"/",
"1000.0",
",",
"2",
")",
",",
"cpm",
"=",
"round",
"(",
"cpm",
",",
"2",
")",
",",
"uSvh",
"=",
"round",
"(",
"cpm",
"/",
"K_ALPHA",
",",
"3",
")",
",",
"uSvhError",
"=",
"round",
"(",
"math",
".",
"sqrt",
"(",
"self",
".",
"count",
")",
"/",
"minutes",
"/",
"K_ALPHA",
",",
"3",
")",
"if",
"minutes",
">",
"0",
"else",
"0",
",",
")"
] | Return current readings, as a dictionary with:
duration -- the duration of the measurements, in seconds;
cpm -- the radiation count by minute;
uSvh -- the radiation dose, exprimed in Sievert per house (uSv/h);
uSvhError -- the incertitude for the radiation dose. | [
"Return",
"current",
"readings",
"as",
"a",
"dictionary",
"with",
":",
"duration",
"--",
"the",
"duration",
"of",
"the",
"measurements",
"in",
"seconds",
";",
"cpm",
"--",
"the",
"radiation",
"count",
"by",
"minute",
";",
"uSvh",
"--",
"the",
"radiation",
"dose",
"exprimed",
"in",
"Sievert",
"per",
"house",
"(",
"uSv",
"/",
"h",
")",
";",
"uSvhError",
"--",
"the",
"incertitude",
"for",
"the",
"radiation",
"dose",
"."
] | train | https://github.com/MonsieurV/PiPocketGeiger/blob/b0e7c303df46deeea3715fb8da3ebbefaf660f91/PiPocketGeiger/__init__.py#L60-L75 | 0.002688 |
roanuz/py-cricket | src/pycricket.py | RcaApp.get_overs_summary | def get_overs_summary(self, match_key):
"""
Calling Overs Summary API
Arg:
match_key: key of the match
Return:
json data
"""
overs_summary_url = self.api_path + "match/" + match_key + "/overs_summary/"
response = self.get_response(overs_summary_url)
return response | python | def get_overs_summary(self, match_key):
"""
Calling Overs Summary API
Arg:
match_key: key of the match
Return:
json data
"""
overs_summary_url = self.api_path + "match/" + match_key + "/overs_summary/"
response = self.get_response(overs_summary_url)
return response | [
"def",
"get_overs_summary",
"(",
"self",
",",
"match_key",
")",
":",
"overs_summary_url",
"=",
"self",
".",
"api_path",
"+",
"\"match/\"",
"+",
"match_key",
"+",
"\"/overs_summary/\"",
"response",
"=",
"self",
".",
"get_response",
"(",
"overs_summary_url",
")",
"return",
"response"
] | Calling Overs Summary API
Arg:
match_key: key of the match
Return:
json data | [
"Calling",
"Overs",
"Summary",
"API"
] | train | https://github.com/roanuz/py-cricket/blob/fa47fe2e92915fc58db38898213e974742af55d4/src/pycricket.py#L365-L376 | 0.008523 |
zeroSteiner/smoke-zephyr | smoke_zephyr/configuration.py | MemoryConfiguration.get | def get(self, item_name):
"""
Retrieve the value of an option.
:param str item_name: The name of the option to retrieve.
:return: The value of *item_name* in the configuration.
"""
if self.prefix:
item_name = self.prefix + self.seperator + item_name
item_names = item_name.split(self.seperator)
node = self._storage
for item_name in item_names:
node = node[item_name]
return node | python | def get(self, item_name):
"""
Retrieve the value of an option.
:param str item_name: The name of the option to retrieve.
:return: The value of *item_name* in the configuration.
"""
if self.prefix:
item_name = self.prefix + self.seperator + item_name
item_names = item_name.split(self.seperator)
node = self._storage
for item_name in item_names:
node = node[item_name]
return node | [
"def",
"get",
"(",
"self",
",",
"item_name",
")",
":",
"if",
"self",
".",
"prefix",
":",
"item_name",
"=",
"self",
".",
"prefix",
"+",
"self",
".",
"seperator",
"+",
"item_name",
"item_names",
"=",
"item_name",
".",
"split",
"(",
"self",
".",
"seperator",
")",
"node",
"=",
"self",
".",
"_storage",
"for",
"item_name",
"in",
"item_names",
":",
"node",
"=",
"node",
"[",
"item_name",
"]",
"return",
"node"
] | Retrieve the value of an option.
:param str item_name: The name of the option to retrieve.
:return: The value of *item_name* in the configuration. | [
"Retrieve",
"the",
"value",
"of",
"an",
"option",
"."
] | train | https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/configuration.py#L80-L93 | 0.034483 |
BlueBrain/NeuroM | neurom/core/_neuron.py | iter_sections | def iter_sections(neurites,
iterator_type=Tree.ipreorder,
neurite_filter=None,
neurite_order=NeuriteIter.FileOrder):
'''Iterator to the sections in a neurite, neuron or neuron population.
Parameters:
neurites: neuron, population, neurite, or iterable containing neurite objects
iterator_type: section iteration order within a given neurite. Must be one of:
Tree.ipreorder: Depth-first pre-order iteration of tree nodes
Tree.ipreorder: Depth-first post-order iteration of tree nodes
Tree.iupstream: Iterate from a tree node to the root nodes
Tree.ibifurcation_point: Iterator to bifurcation points
Tree.ileaf: Iterator to all leaves of a tree
neurite_filter: optional top level filter on properties of neurite neurite objects.
neurite_order (NeuriteIter): order upon which neurites should be iterated
- NeuriteIter.FileOrder: order of appearance in the file
- NeuriteIter.NRN: NRN simulator order: soma -> axon -> basal -> apical
Examples:
Get the number of points in each section of all the axons in a neuron population
>>> import neurom as nm
>>> from neurom.core import ites_sections
>>> filter = lambda n : n.type == nm.AXON
>>> n_points = [len(s.points) for s in iter_sections(pop, neurite_filter=filter)]
'''
return chain.from_iterable(
iterator_type(neurite.root_node) for neurite in
iter_neurites(neurites, filt=neurite_filter, neurite_order=neurite_order)) | python | def iter_sections(neurites,
iterator_type=Tree.ipreorder,
neurite_filter=None,
neurite_order=NeuriteIter.FileOrder):
'''Iterator to the sections in a neurite, neuron or neuron population.
Parameters:
neurites: neuron, population, neurite, or iterable containing neurite objects
iterator_type: section iteration order within a given neurite. Must be one of:
Tree.ipreorder: Depth-first pre-order iteration of tree nodes
Tree.ipreorder: Depth-first post-order iteration of tree nodes
Tree.iupstream: Iterate from a tree node to the root nodes
Tree.ibifurcation_point: Iterator to bifurcation points
Tree.ileaf: Iterator to all leaves of a tree
neurite_filter: optional top level filter on properties of neurite neurite objects.
neurite_order (NeuriteIter): order upon which neurites should be iterated
- NeuriteIter.FileOrder: order of appearance in the file
- NeuriteIter.NRN: NRN simulator order: soma -> axon -> basal -> apical
Examples:
Get the number of points in each section of all the axons in a neuron population
>>> import neurom as nm
>>> from neurom.core import ites_sections
>>> filter = lambda n : n.type == nm.AXON
>>> n_points = [len(s.points) for s in iter_sections(pop, neurite_filter=filter)]
'''
return chain.from_iterable(
iterator_type(neurite.root_node) for neurite in
iter_neurites(neurites, filt=neurite_filter, neurite_order=neurite_order)) | [
"def",
"iter_sections",
"(",
"neurites",
",",
"iterator_type",
"=",
"Tree",
".",
"ipreorder",
",",
"neurite_filter",
"=",
"None",
",",
"neurite_order",
"=",
"NeuriteIter",
".",
"FileOrder",
")",
":",
"return",
"chain",
".",
"from_iterable",
"(",
"iterator_type",
"(",
"neurite",
".",
"root_node",
")",
"for",
"neurite",
"in",
"iter_neurites",
"(",
"neurites",
",",
"filt",
"=",
"neurite_filter",
",",
"neurite_order",
"=",
"neurite_order",
")",
")"
] | Iterator to the sections in a neurite, neuron or neuron population.
Parameters:
neurites: neuron, population, neurite, or iterable containing neurite objects
iterator_type: section iteration order within a given neurite. Must be one of:
Tree.ipreorder: Depth-first pre-order iteration of tree nodes
Tree.ipreorder: Depth-first post-order iteration of tree nodes
Tree.iupstream: Iterate from a tree node to the root nodes
Tree.ibifurcation_point: Iterator to bifurcation points
Tree.ileaf: Iterator to all leaves of a tree
neurite_filter: optional top level filter on properties of neurite neurite objects.
neurite_order (NeuriteIter): order upon which neurites should be iterated
- NeuriteIter.FileOrder: order of appearance in the file
- NeuriteIter.NRN: NRN simulator order: soma -> axon -> basal -> apical
Examples:
Get the number of points in each section of all the axons in a neuron population
>>> import neurom as nm
>>> from neurom.core import ites_sections
>>> filter = lambda n : n.type == nm.AXON
>>> n_points = [len(s.points) for s in iter_sections(pop, neurite_filter=filter)] | [
"Iterator",
"to",
"the",
"sections",
"in",
"a",
"neurite",
"neuron",
"or",
"neuron",
"population",
"."
] | train | https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/core/_neuron.py#L93-L126 | 0.005562 |
log2timeline/dfvfs | dfvfs/lib/fvde.py | FVDEVolumeOpen | def FVDEVolumeOpen(fvde_volume, path_spec, file_object, key_chain):
"""Opens the FVDE volume using the path specification.
Args:
fvde_volume (pyfvde.volume): FVDE volume.
path_spec (PathSpec): path specification.
file_object (FileIO): file-like object.
key_chain (KeyChain): key chain.
"""
encrypted_root_plist = key_chain.GetCredential(
path_spec, 'encrypted_root_plist')
if encrypted_root_plist:
fvde_volume.read_encrypted_root_plist(encrypted_root_plist)
password = key_chain.GetCredential(path_spec, 'password')
if password:
fvde_volume.set_password(password)
recovery_password = key_chain.GetCredential(path_spec, 'recovery_password')
if recovery_password:
fvde_volume.set_recovery_password(recovery_password)
fvde_volume.open_file_object(file_object) | python | def FVDEVolumeOpen(fvde_volume, path_spec, file_object, key_chain):
"""Opens the FVDE volume using the path specification.
Args:
fvde_volume (pyfvde.volume): FVDE volume.
path_spec (PathSpec): path specification.
file_object (FileIO): file-like object.
key_chain (KeyChain): key chain.
"""
encrypted_root_plist = key_chain.GetCredential(
path_spec, 'encrypted_root_plist')
if encrypted_root_plist:
fvde_volume.read_encrypted_root_plist(encrypted_root_plist)
password = key_chain.GetCredential(path_spec, 'password')
if password:
fvde_volume.set_password(password)
recovery_password = key_chain.GetCredential(path_spec, 'recovery_password')
if recovery_password:
fvde_volume.set_recovery_password(recovery_password)
fvde_volume.open_file_object(file_object) | [
"def",
"FVDEVolumeOpen",
"(",
"fvde_volume",
",",
"path_spec",
",",
"file_object",
",",
"key_chain",
")",
":",
"encrypted_root_plist",
"=",
"key_chain",
".",
"GetCredential",
"(",
"path_spec",
",",
"'encrypted_root_plist'",
")",
"if",
"encrypted_root_plist",
":",
"fvde_volume",
".",
"read_encrypted_root_plist",
"(",
"encrypted_root_plist",
")",
"password",
"=",
"key_chain",
".",
"GetCredential",
"(",
"path_spec",
",",
"'password'",
")",
"if",
"password",
":",
"fvde_volume",
".",
"set_password",
"(",
"password",
")",
"recovery_password",
"=",
"key_chain",
".",
"GetCredential",
"(",
"path_spec",
",",
"'recovery_password'",
")",
"if",
"recovery_password",
":",
"fvde_volume",
".",
"set_recovery_password",
"(",
"recovery_password",
")",
"fvde_volume",
".",
"open_file_object",
"(",
"file_object",
")"
] | Opens the FVDE volume using the path specification.
Args:
fvde_volume (pyfvde.volume): FVDE volume.
path_spec (PathSpec): path specification.
file_object (FileIO): file-like object.
key_chain (KeyChain): key chain. | [
"Opens",
"the",
"FVDE",
"volume",
"using",
"the",
"path",
"specification",
"."
] | train | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/lib/fvde.py#L7-L29 | 0.011057 |
basler/pypylon | scripts/builddoxy2swig/doxy2swig/doxy2swig.py | Doxy2SWIG.handle_typical_memberdefs_no_overload | def handle_typical_memberdefs_no_overload(self, signature, memberdef_nodes):
"""Produce standard documentation for memberdef_nodes."""
for n in memberdef_nodes:
self.add_text(['\n', '%feature("docstring") ', signature, ' "', '\n'])
if self.with_function_signature:
self.add_line_with_subsequent_indent(self.get_function_signature(n))
self.subnode_parse(n, pieces=[], ignore=['definition', 'name'])
self.add_text(['";', '\n']) | python | def handle_typical_memberdefs_no_overload(self, signature, memberdef_nodes):
"""Produce standard documentation for memberdef_nodes."""
for n in memberdef_nodes:
self.add_text(['\n', '%feature("docstring") ', signature, ' "', '\n'])
if self.with_function_signature:
self.add_line_with_subsequent_indent(self.get_function_signature(n))
self.subnode_parse(n, pieces=[], ignore=['definition', 'name'])
self.add_text(['";', '\n']) | [
"def",
"handle_typical_memberdefs_no_overload",
"(",
"self",
",",
"signature",
",",
"memberdef_nodes",
")",
":",
"for",
"n",
"in",
"memberdef_nodes",
":",
"self",
".",
"add_text",
"(",
"[",
"'\\n'",
",",
"'%feature(\"docstring\") '",
",",
"signature",
",",
"' \"'",
",",
"'\\n'",
"]",
")",
"if",
"self",
".",
"with_function_signature",
":",
"self",
".",
"add_line_with_subsequent_indent",
"(",
"self",
".",
"get_function_signature",
"(",
"n",
")",
")",
"self",
".",
"subnode_parse",
"(",
"n",
",",
"pieces",
"=",
"[",
"]",
",",
"ignore",
"=",
"[",
"'definition'",
",",
"'name'",
"]",
")",
"self",
".",
"add_text",
"(",
"[",
"'\";'",
",",
"'\\n'",
"]",
")"
] | Produce standard documentation for memberdef_nodes. | [
"Produce",
"standard",
"documentation",
"for",
"memberdef_nodes",
"."
] | train | https://github.com/basler/pypylon/blob/d3510fa419b1c2b17f3f0b80a5fbb720c7b84008/scripts/builddoxy2swig/doxy2swig/doxy2swig.py#L431-L438 | 0.007921 |
thehq/python-crossbarhttp | crossbarhttp/crossbarhttp.py | Client.publish | def publish(self, topic, *args, **kwargs):
"""
Publishes the request to the bridge service
:param topic: The topic to publish to
:param args: The arguments
:param kwargs: The key/word arguments
:return: The ID of the publish
"""
assert topic is not None
params = {
"topic": topic,
"args": args,
"kwargs": kwargs
}
response = self._make_api_call("POST", self.url, json_params=params)
return response["id"] | python | def publish(self, topic, *args, **kwargs):
"""
Publishes the request to the bridge service
:param topic: The topic to publish to
:param args: The arguments
:param kwargs: The key/word arguments
:return: The ID of the publish
"""
assert topic is not None
params = {
"topic": topic,
"args": args,
"kwargs": kwargs
}
response = self._make_api_call("POST", self.url, json_params=params)
return response["id"] | [
"def",
"publish",
"(",
"self",
",",
"topic",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"topic",
"is",
"not",
"None",
"params",
"=",
"{",
"\"topic\"",
":",
"topic",
",",
"\"args\"",
":",
"args",
",",
"\"kwargs\"",
":",
"kwargs",
"}",
"response",
"=",
"self",
".",
"_make_api_call",
"(",
"\"POST\"",
",",
"self",
".",
"url",
",",
"json_params",
"=",
"params",
")",
"return",
"response",
"[",
"\"id\"",
"]"
] | Publishes the request to the bridge service
:param topic: The topic to publish to
:param args: The arguments
:param kwargs: The key/word arguments
:return: The ID of the publish | [
"Publishes",
"the",
"request",
"to",
"the",
"bridge",
"service",
":",
"param",
"topic",
":",
"The",
"topic",
"to",
"publish",
"to",
":",
"param",
"args",
":",
"The",
"arguments",
":",
"param",
"kwargs",
":",
"The",
"key",
"/",
"word",
"arguments",
":",
"return",
":",
"The",
"ID",
"of",
"the",
"publish"
] | train | https://github.com/thehq/python-crossbarhttp/blob/15af59969ca1ace3b11f2d94ef966e0017cb1c77/crossbarhttp/crossbarhttp.py#L76-L93 | 0.003717 |
poppy-project/pypot | pypot/vrep/io.py | VrepIO.get_collision_handle | def get_collision_handle(self, collision):
""" Gets a vrep collisions handle. """
if collision not in self._object_handles:
h = self._get_collision_handle(collision)
self._object_handles[collision] = h
return self._object_handles[collision] | python | def get_collision_handle(self, collision):
""" Gets a vrep collisions handle. """
if collision not in self._object_handles:
h = self._get_collision_handle(collision)
self._object_handles[collision] = h
return self._object_handles[collision] | [
"def",
"get_collision_handle",
"(",
"self",
",",
"collision",
")",
":",
"if",
"collision",
"not",
"in",
"self",
".",
"_object_handles",
":",
"h",
"=",
"self",
".",
"_get_collision_handle",
"(",
"collision",
")",
"self",
".",
"_object_handles",
"[",
"collision",
"]",
"=",
"h",
"return",
"self",
".",
"_object_handles",
"[",
"collision",
"]"
] | Gets a vrep collisions handle. | [
"Gets",
"a",
"vrep",
"collisions",
"handle",
"."
] | train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/io.py#L216-L222 | 0.00692 |
aeguana/PyFileMaker | PyFileMaker/FMServer.py | FMServer._preFind | def _preFind(self, WHAT={}, SORT=[], SKIP=None, MAX=None, LOP='AND'):
"""This function will process attributtes for all -find* commands."""
if hasattr(WHAT, '_modified'):
self._addDBParam('RECORDID', WHAT.RECORDID)
elif type(WHAT)==dict:
for key in WHAT:
self._addDBParam(key, WHAT[key])
else:
raise FMError, 'Python Runtime: Object type (%s) given to on of function doFind* as argument WHAT cannot be used.' % type(WHAT)
for key in SORT:
self._addSortParam(key, SORT[key])
if SKIP: self._setSkipRecords(SKIP)
if MAX: self._setMaxRecords(MAX)
if LOP: self._setLogicalOperator(LOP)
if self._layout == '':
raise FMError, 'No layout was selected' | python | def _preFind(self, WHAT={}, SORT=[], SKIP=None, MAX=None, LOP='AND'):
"""This function will process attributtes for all -find* commands."""
if hasattr(WHAT, '_modified'):
self._addDBParam('RECORDID', WHAT.RECORDID)
elif type(WHAT)==dict:
for key in WHAT:
self._addDBParam(key, WHAT[key])
else:
raise FMError, 'Python Runtime: Object type (%s) given to on of function doFind* as argument WHAT cannot be used.' % type(WHAT)
for key in SORT:
self._addSortParam(key, SORT[key])
if SKIP: self._setSkipRecords(SKIP)
if MAX: self._setMaxRecords(MAX)
if LOP: self._setLogicalOperator(LOP)
if self._layout == '':
raise FMError, 'No layout was selected' | [
"def",
"_preFind",
"(",
"self",
",",
"WHAT",
"=",
"{",
"}",
",",
"SORT",
"=",
"[",
"]",
",",
"SKIP",
"=",
"None",
",",
"MAX",
"=",
"None",
",",
"LOP",
"=",
"'AND'",
")",
":",
"if",
"hasattr",
"(",
"WHAT",
",",
"'_modified'",
")",
":",
"self",
".",
"_addDBParam",
"(",
"'RECORDID'",
",",
"WHAT",
".",
"RECORDID",
")",
"elif",
"type",
"(",
"WHAT",
")",
"==",
"dict",
":",
"for",
"key",
"in",
"WHAT",
":",
"self",
".",
"_addDBParam",
"(",
"key",
",",
"WHAT",
"[",
"key",
"]",
")",
"else",
":",
"raise",
"FMError",
",",
"'Python Runtime: Object type (%s) given to on of function doFind* as argument WHAT cannot be used.'",
"%",
"type",
"(",
"WHAT",
")",
"for",
"key",
"in",
"SORT",
":",
"self",
".",
"_addSortParam",
"(",
"key",
",",
"SORT",
"[",
"key",
"]",
")",
"if",
"SKIP",
":",
"self",
".",
"_setSkipRecords",
"(",
"SKIP",
")",
"if",
"MAX",
":",
"self",
".",
"_setMaxRecords",
"(",
"MAX",
")",
"if",
"LOP",
":",
"self",
".",
"_setLogicalOperator",
"(",
"LOP",
")",
"if",
"self",
".",
"_layout",
"==",
"''",
":",
"raise",
"FMError",
",",
"'No layout was selected'"
] | This function will process attributtes for all -find* commands. | [
"This",
"function",
"will",
"process",
"attributtes",
"for",
"all",
"-",
"find",
"*",
"commands",
"."
] | train | https://github.com/aeguana/PyFileMaker/blob/ef269b52a97e329d91da3c4851ddac800d7fd7e6/PyFileMaker/FMServer.py#L399-L418 | 0.035088 |
Autodesk/aomi | aomi/seed_action.py | diff | def diff(vault_client, opt):
"""Derive a comparison between what is represented in the Secretfile
and what is actually live on a Vault instance"""
if opt.thaw_from:
opt.secrets = tempfile.mkdtemp('aomi-thaw')
auto_thaw(vault_client, opt)
ctx = Context.load(get_secretfile(opt), opt) \
.fetch(vault_client)
for backend in ctx.mounts():
diff_a_thing(backend, opt)
for resource in ctx.resources():
diff_a_thing(resource, opt)
if opt.thaw_from:
rmtree(opt.secrets) | python | def diff(vault_client, opt):
"""Derive a comparison between what is represented in the Secretfile
and what is actually live on a Vault instance"""
if opt.thaw_from:
opt.secrets = tempfile.mkdtemp('aomi-thaw')
auto_thaw(vault_client, opt)
ctx = Context.load(get_secretfile(opt), opt) \
.fetch(vault_client)
for backend in ctx.mounts():
diff_a_thing(backend, opt)
for resource in ctx.resources():
diff_a_thing(resource, opt)
if opt.thaw_from:
rmtree(opt.secrets) | [
"def",
"diff",
"(",
"vault_client",
",",
"opt",
")",
":",
"if",
"opt",
".",
"thaw_from",
":",
"opt",
".",
"secrets",
"=",
"tempfile",
".",
"mkdtemp",
"(",
"'aomi-thaw'",
")",
"auto_thaw",
"(",
"vault_client",
",",
"opt",
")",
"ctx",
"=",
"Context",
".",
"load",
"(",
"get_secretfile",
"(",
"opt",
")",
",",
"opt",
")",
".",
"fetch",
"(",
"vault_client",
")",
"for",
"backend",
"in",
"ctx",
".",
"mounts",
"(",
")",
":",
"diff_a_thing",
"(",
"backend",
",",
"opt",
")",
"for",
"resource",
"in",
"ctx",
".",
"resources",
"(",
")",
":",
"diff_a_thing",
"(",
"resource",
",",
"opt",
")",
"if",
"opt",
".",
"thaw_from",
":",
"rmtree",
"(",
"opt",
".",
"secrets",
")"
] | Derive a comparison between what is represented in the Secretfile
and what is actually live on a Vault instance | [
"Derive",
"a",
"comparison",
"between",
"what",
"is",
"represented",
"in",
"the",
"Secretfile",
"and",
"what",
"is",
"actually",
"live",
"on",
"a",
"Vault",
"instance"
] | train | https://github.com/Autodesk/aomi/blob/84da2dfb0424837adf9c4ddc1aa352e942bb7a4a/aomi/seed_action.py#L200-L217 | 0.001821 |
grst/geos | geos/kml.py | kml_element_name | def kml_element_name(grid_coords, elem_id="KML"):
"""
Create a unique element name for KML
Args:
grid_coords (GridCoordinate):
elem_id (str):
>>> kml_element_name(GridCoordinate(zoom=5, x=42, y=60), elem_id="NL")
'NL_5_42_60'
"""
return "_".join(str(x) for x in [elem_id, grid_coords.zoom, grid_coords.x, grid_coords.y]) | python | def kml_element_name(grid_coords, elem_id="KML"):
"""
Create a unique element name for KML
Args:
grid_coords (GridCoordinate):
elem_id (str):
>>> kml_element_name(GridCoordinate(zoom=5, x=42, y=60), elem_id="NL")
'NL_5_42_60'
"""
return "_".join(str(x) for x in [elem_id, grid_coords.zoom, grid_coords.x, grid_coords.y]) | [
"def",
"kml_element_name",
"(",
"grid_coords",
",",
"elem_id",
"=",
"\"KML\"",
")",
":",
"return",
"\"_\"",
".",
"join",
"(",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"[",
"elem_id",
",",
"grid_coords",
".",
"zoom",
",",
"grid_coords",
".",
"x",
",",
"grid_coords",
".",
"y",
"]",
")"
] | Create a unique element name for KML
Args:
grid_coords (GridCoordinate):
elem_id (str):
>>> kml_element_name(GridCoordinate(zoom=5, x=42, y=60), elem_id="NL")
'NL_5_42_60' | [
"Create",
"a",
"unique",
"element",
"name",
"for",
"KML"
] | train | https://github.com/grst/geos/blob/ea15abcc5d8f86c9051df55e489b7d941b51a638/geos/kml.py#L62-L73 | 0.005464 |
aiortc/aiortc | aiortc/rtcrtpsender.py | RTCRtpSender.send | async def send(self, parameters: RTCRtpSendParameters):
"""
Attempt to set the parameters controlling the sending of media.
:param: parameters: The :class:`RTCRtpParameters` for the sender.
"""
if not self.__started:
self.__cname = parameters.rtcp.cname
self.__mid = parameters.muxId
# make note of the RTP header extension IDs
self.__transport._register_rtp_sender(self, parameters)
self.__rtp_header_extensions_map.configure(parameters)
# make note of RTX payload type
for codec in parameters.codecs:
if is_rtx(codec) and codec.parameters['apt'] == parameters.codecs[0].payloadType:
self.__rtx_payload_type = codec.payloadType
break
self.__rtp_task = asyncio.ensure_future(self._run_rtp(parameters.codecs[0]))
self.__rtcp_task = asyncio.ensure_future(self._run_rtcp())
self.__started = True | python | async def send(self, parameters: RTCRtpSendParameters):
"""
Attempt to set the parameters controlling the sending of media.
:param: parameters: The :class:`RTCRtpParameters` for the sender.
"""
if not self.__started:
self.__cname = parameters.rtcp.cname
self.__mid = parameters.muxId
# make note of the RTP header extension IDs
self.__transport._register_rtp_sender(self, parameters)
self.__rtp_header_extensions_map.configure(parameters)
# make note of RTX payload type
for codec in parameters.codecs:
if is_rtx(codec) and codec.parameters['apt'] == parameters.codecs[0].payloadType:
self.__rtx_payload_type = codec.payloadType
break
self.__rtp_task = asyncio.ensure_future(self._run_rtp(parameters.codecs[0]))
self.__rtcp_task = asyncio.ensure_future(self._run_rtcp())
self.__started = True | [
"async",
"def",
"send",
"(",
"self",
",",
"parameters",
":",
"RTCRtpSendParameters",
")",
":",
"if",
"not",
"self",
".",
"__started",
":",
"self",
".",
"__cname",
"=",
"parameters",
".",
"rtcp",
".",
"cname",
"self",
".",
"__mid",
"=",
"parameters",
".",
"muxId",
"# make note of the RTP header extension IDs",
"self",
".",
"__transport",
".",
"_register_rtp_sender",
"(",
"self",
",",
"parameters",
")",
"self",
".",
"__rtp_header_extensions_map",
".",
"configure",
"(",
"parameters",
")",
"# make note of RTX payload type",
"for",
"codec",
"in",
"parameters",
".",
"codecs",
":",
"if",
"is_rtx",
"(",
"codec",
")",
"and",
"codec",
".",
"parameters",
"[",
"'apt'",
"]",
"==",
"parameters",
".",
"codecs",
"[",
"0",
"]",
".",
"payloadType",
":",
"self",
".",
"__rtx_payload_type",
"=",
"codec",
".",
"payloadType",
"break",
"self",
".",
"__rtp_task",
"=",
"asyncio",
".",
"ensure_future",
"(",
"self",
".",
"_run_rtp",
"(",
"parameters",
".",
"codecs",
"[",
"0",
"]",
")",
")",
"self",
".",
"__rtcp_task",
"=",
"asyncio",
".",
"ensure_future",
"(",
"self",
".",
"_run_rtcp",
"(",
")",
")",
"self",
".",
"__started",
"=",
"True"
] | Attempt to set the parameters controlling the sending of media.
:param: parameters: The :class:`RTCRtpParameters` for the sender. | [
"Attempt",
"to",
"set",
"the",
"parameters",
"controlling",
"the",
"sending",
"of",
"media",
"."
] | train | https://github.com/aiortc/aiortc/blob/60ed036abf4575bd63985724b4493d569e6da29b/aiortc/rtcrtpsender.py#L140-L162 | 0.003953 |
ambitioninc/django-query-builder | querybuilder/query.py | Query.find_table | def find_table(self, table):
"""
Finds a table by name or alias. The FROM tables and JOIN tables
are included in the search.
:type table: str or :class:`ModelBase <django:django.db.models.base.ModelBase>`
:param table: string of the table name or alias or a ModelBase instance
:return: The table if it is found, otherwise None
:rtype: Table or None
"""
table = TableFactory(table)
identifier = table.get_identifier()
join_tables = [join_item.right_table for join_item in self.joins]
for table in (self.tables + join_tables):
if table.get_identifier() == identifier:
return table
return None | python | def find_table(self, table):
"""
Finds a table by name or alias. The FROM tables and JOIN tables
are included in the search.
:type table: str or :class:`ModelBase <django:django.db.models.base.ModelBase>`
:param table: string of the table name or alias or a ModelBase instance
:return: The table if it is found, otherwise None
:rtype: Table or None
"""
table = TableFactory(table)
identifier = table.get_identifier()
join_tables = [join_item.right_table for join_item in self.joins]
for table in (self.tables + join_tables):
if table.get_identifier() == identifier:
return table
return None | [
"def",
"find_table",
"(",
"self",
",",
"table",
")",
":",
"table",
"=",
"TableFactory",
"(",
"table",
")",
"identifier",
"=",
"table",
".",
"get_identifier",
"(",
")",
"join_tables",
"=",
"[",
"join_item",
".",
"right_table",
"for",
"join_item",
"in",
"self",
".",
"joins",
"]",
"for",
"table",
"in",
"(",
"self",
".",
"tables",
"+",
"join_tables",
")",
":",
"if",
"table",
".",
"get_identifier",
"(",
")",
"==",
"identifier",
":",
"return",
"table",
"return",
"None"
] | Finds a table by name or alias. The FROM tables and JOIN tables
are included in the search.
:type table: str or :class:`ModelBase <django:django.db.models.base.ModelBase>`
:param table: string of the table name or alias or a ModelBase instance
:return: The table if it is found, otherwise None
:rtype: Table or None | [
"Finds",
"a",
"table",
"by",
"name",
"or",
"alias",
".",
"The",
"FROM",
"tables",
"and",
"JOIN",
"tables",
"are",
"included",
"in",
"the",
"search",
"."
] | train | https://github.com/ambitioninc/django-query-builder/blob/113a7d845d3ddc6a45621b9880308e756f87c5bf/querybuilder/query.py#L1492-L1509 | 0.004144 |
benfred/implicit | implicit/als.py | AlternatingLeastSquares._fit_gpu | def _fit_gpu(self, Ciu_host, Cui_host, show_progress=True):
""" specialized training on the gpu. copies inputs to/from cuda device """
if not implicit.cuda.HAS_CUDA:
raise ValueError("No CUDA extension has been built, can't train on GPU.")
if self.dtype == np.float64:
log.warning("Factors of dtype float64 aren't supported with gpu fitting. "
"Converting factors to float32")
self.item_factors = self.item_factors.astype(np.float32)
self.user_factors = self.user_factors.astype(np.float32)
Ciu = implicit.cuda.CuCSRMatrix(Ciu_host)
Cui = implicit.cuda.CuCSRMatrix(Cui_host)
X = implicit.cuda.CuDenseMatrix(self.user_factors.astype(np.float32))
Y = implicit.cuda.CuDenseMatrix(self.item_factors.astype(np.float32))
solver = implicit.cuda.CuLeastSquaresSolver(self.factors)
log.debug("Running %i ALS iterations", self.iterations)
with tqdm.tqdm(total=self.iterations, disable=not show_progress) as progress:
for iteration in range(self.iterations):
s = time.time()
solver.least_squares(Cui, X, Y, self.regularization, self.cg_steps)
progress.update(.5)
solver.least_squares(Ciu, Y, X, self.regularization, self.cg_steps)
progress.update(.5)
if self.fit_callback:
self.fit_callback(iteration, time.time() - s)
if self.calculate_training_loss:
loss = solver.calculate_loss(Cui, X, Y, self.regularization)
progress.set_postfix({"loss": loss})
if self.calculate_training_loss:
log.info("Final training loss %.4f", loss)
X.to_host(self.user_factors)
Y.to_host(self.item_factors) | python | def _fit_gpu(self, Ciu_host, Cui_host, show_progress=True):
""" specialized training on the gpu. copies inputs to/from cuda device """
if not implicit.cuda.HAS_CUDA:
raise ValueError("No CUDA extension has been built, can't train on GPU.")
if self.dtype == np.float64:
log.warning("Factors of dtype float64 aren't supported with gpu fitting. "
"Converting factors to float32")
self.item_factors = self.item_factors.astype(np.float32)
self.user_factors = self.user_factors.astype(np.float32)
Ciu = implicit.cuda.CuCSRMatrix(Ciu_host)
Cui = implicit.cuda.CuCSRMatrix(Cui_host)
X = implicit.cuda.CuDenseMatrix(self.user_factors.astype(np.float32))
Y = implicit.cuda.CuDenseMatrix(self.item_factors.astype(np.float32))
solver = implicit.cuda.CuLeastSquaresSolver(self.factors)
log.debug("Running %i ALS iterations", self.iterations)
with tqdm.tqdm(total=self.iterations, disable=not show_progress) as progress:
for iteration in range(self.iterations):
s = time.time()
solver.least_squares(Cui, X, Y, self.regularization, self.cg_steps)
progress.update(.5)
solver.least_squares(Ciu, Y, X, self.regularization, self.cg_steps)
progress.update(.5)
if self.fit_callback:
self.fit_callback(iteration, time.time() - s)
if self.calculate_training_loss:
loss = solver.calculate_loss(Cui, X, Y, self.regularization)
progress.set_postfix({"loss": loss})
if self.calculate_training_loss:
log.info("Final training loss %.4f", loss)
X.to_host(self.user_factors)
Y.to_host(self.item_factors) | [
"def",
"_fit_gpu",
"(",
"self",
",",
"Ciu_host",
",",
"Cui_host",
",",
"show_progress",
"=",
"True",
")",
":",
"if",
"not",
"implicit",
".",
"cuda",
".",
"HAS_CUDA",
":",
"raise",
"ValueError",
"(",
"\"No CUDA extension has been built, can't train on GPU.\"",
")",
"if",
"self",
".",
"dtype",
"==",
"np",
".",
"float64",
":",
"log",
".",
"warning",
"(",
"\"Factors of dtype float64 aren't supported with gpu fitting. \"",
"\"Converting factors to float32\"",
")",
"self",
".",
"item_factors",
"=",
"self",
".",
"item_factors",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"self",
".",
"user_factors",
"=",
"self",
".",
"user_factors",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"Ciu",
"=",
"implicit",
".",
"cuda",
".",
"CuCSRMatrix",
"(",
"Ciu_host",
")",
"Cui",
"=",
"implicit",
".",
"cuda",
".",
"CuCSRMatrix",
"(",
"Cui_host",
")",
"X",
"=",
"implicit",
".",
"cuda",
".",
"CuDenseMatrix",
"(",
"self",
".",
"user_factors",
".",
"astype",
"(",
"np",
".",
"float32",
")",
")",
"Y",
"=",
"implicit",
".",
"cuda",
".",
"CuDenseMatrix",
"(",
"self",
".",
"item_factors",
".",
"astype",
"(",
"np",
".",
"float32",
")",
")",
"solver",
"=",
"implicit",
".",
"cuda",
".",
"CuLeastSquaresSolver",
"(",
"self",
".",
"factors",
")",
"log",
".",
"debug",
"(",
"\"Running %i ALS iterations\"",
",",
"self",
".",
"iterations",
")",
"with",
"tqdm",
".",
"tqdm",
"(",
"total",
"=",
"self",
".",
"iterations",
",",
"disable",
"=",
"not",
"show_progress",
")",
"as",
"progress",
":",
"for",
"iteration",
"in",
"range",
"(",
"self",
".",
"iterations",
")",
":",
"s",
"=",
"time",
".",
"time",
"(",
")",
"solver",
".",
"least_squares",
"(",
"Cui",
",",
"X",
",",
"Y",
",",
"self",
".",
"regularization",
",",
"self",
".",
"cg_steps",
")",
"progress",
".",
"update",
"(",
".5",
")",
"solver",
".",
"least_squares",
"(",
"Ciu",
",",
"Y",
",",
"X",
",",
"self",
".",
"regularization",
",",
"self",
".",
"cg_steps",
")",
"progress",
".",
"update",
"(",
".5",
")",
"if",
"self",
".",
"fit_callback",
":",
"self",
".",
"fit_callback",
"(",
"iteration",
",",
"time",
".",
"time",
"(",
")",
"-",
"s",
")",
"if",
"self",
".",
"calculate_training_loss",
":",
"loss",
"=",
"solver",
".",
"calculate_loss",
"(",
"Cui",
",",
"X",
",",
"Y",
",",
"self",
".",
"regularization",
")",
"progress",
".",
"set_postfix",
"(",
"{",
"\"loss\"",
":",
"loss",
"}",
")",
"if",
"self",
".",
"calculate_training_loss",
":",
"log",
".",
"info",
"(",
"\"Final training loss %.4f\"",
",",
"loss",
")",
"X",
".",
"to_host",
"(",
"self",
".",
"user_factors",
")",
"Y",
".",
"to_host",
"(",
"self",
".",
"item_factors",
")"
] | specialized training on the gpu. copies inputs to/from cuda device | [
"specialized",
"training",
"on",
"the",
"gpu",
".",
"copies",
"inputs",
"to",
"/",
"from",
"cuda",
"device"
] | train | https://github.com/benfred/implicit/blob/6b16c50d1d514a814f2e5b8cf2a829ff23dbba63/implicit/als.py#L177-L214 | 0.004862 |
rootpy/rootpy | rootpy/tree/treeobject.py | TreeCollection.getitem | def getitem(self, index):
"""
direct access without going through self.selection
"""
if index >= getattr(self.tree, self.size):
raise IndexError(index)
if self.__cache_objects and index in self.__cache:
return self.__cache[index]
obj = self.tree_object_cls(self.tree, self.name, self.prefix, index)
if self.__cache_objects:
self.__cache[index] = obj
return obj | python | def getitem(self, index):
"""
direct access without going through self.selection
"""
if index >= getattr(self.tree, self.size):
raise IndexError(index)
if self.__cache_objects and index in self.__cache:
return self.__cache[index]
obj = self.tree_object_cls(self.tree, self.name, self.prefix, index)
if self.__cache_objects:
self.__cache[index] = obj
return obj | [
"def",
"getitem",
"(",
"self",
",",
"index",
")",
":",
"if",
"index",
">=",
"getattr",
"(",
"self",
".",
"tree",
",",
"self",
".",
"size",
")",
":",
"raise",
"IndexError",
"(",
"index",
")",
"if",
"self",
".",
"__cache_objects",
"and",
"index",
"in",
"self",
".",
"__cache",
":",
"return",
"self",
".",
"__cache",
"[",
"index",
"]",
"obj",
"=",
"self",
".",
"tree_object_cls",
"(",
"self",
".",
"tree",
",",
"self",
".",
"name",
",",
"self",
".",
"prefix",
",",
"index",
")",
"if",
"self",
".",
"__cache_objects",
":",
"self",
".",
"__cache",
"[",
"index",
"]",
"=",
"obj",
"return",
"obj"
] | direct access without going through self.selection | [
"direct",
"access",
"without",
"going",
"through",
"self",
".",
"selection"
] | train | https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/tree/treeobject.py#L219-L230 | 0.004348 |
nion-software/nionswift-io | nionswift_plugin/TIFF_IO/tifffile.py | clean_offsetscounts | def clean_offsetscounts(offsets, counts):
"""Return cleaned offsets and byte counts.
Remove zero offsets and counts.
Use to sanitize StripOffsets and StripByteCounts tag values.
"""
# TODO: cythonize this
offsets = list(offsets)
counts = list(counts)
size = len(offsets)
if size != len(counts):
raise ValueError('StripOffsets and StripByteCounts mismatch')
j = 0
for i, (o, b) in enumerate(zip(offsets, counts)):
if b > 0:
if o > 0:
if i > j:
offsets[j] = o
counts[j] = b
j += 1
continue
raise ValueError('invalid offset')
log.warning('clean_offsetscounts: empty bytecount')
if size == len(offsets):
return offsets, counts
if j == 0:
return [offsets[0]], [counts[0]]
return offsets[:j], counts[:j] | python | def clean_offsetscounts(offsets, counts):
"""Return cleaned offsets and byte counts.
Remove zero offsets and counts.
Use to sanitize StripOffsets and StripByteCounts tag values.
"""
# TODO: cythonize this
offsets = list(offsets)
counts = list(counts)
size = len(offsets)
if size != len(counts):
raise ValueError('StripOffsets and StripByteCounts mismatch')
j = 0
for i, (o, b) in enumerate(zip(offsets, counts)):
if b > 0:
if o > 0:
if i > j:
offsets[j] = o
counts[j] = b
j += 1
continue
raise ValueError('invalid offset')
log.warning('clean_offsetscounts: empty bytecount')
if size == len(offsets):
return offsets, counts
if j == 0:
return [offsets[0]], [counts[0]]
return offsets[:j], counts[:j] | [
"def",
"clean_offsetscounts",
"(",
"offsets",
",",
"counts",
")",
":",
"# TODO: cythonize this",
"offsets",
"=",
"list",
"(",
"offsets",
")",
"counts",
"=",
"list",
"(",
"counts",
")",
"size",
"=",
"len",
"(",
"offsets",
")",
"if",
"size",
"!=",
"len",
"(",
"counts",
")",
":",
"raise",
"ValueError",
"(",
"'StripOffsets and StripByteCounts mismatch'",
")",
"j",
"=",
"0",
"for",
"i",
",",
"(",
"o",
",",
"b",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"offsets",
",",
"counts",
")",
")",
":",
"if",
"b",
">",
"0",
":",
"if",
"o",
">",
"0",
":",
"if",
"i",
">",
"j",
":",
"offsets",
"[",
"j",
"]",
"=",
"o",
"counts",
"[",
"j",
"]",
"=",
"b",
"j",
"+=",
"1",
"continue",
"raise",
"ValueError",
"(",
"'invalid offset'",
")",
"log",
".",
"warning",
"(",
"'clean_offsetscounts: empty bytecount'",
")",
"if",
"size",
"==",
"len",
"(",
"offsets",
")",
":",
"return",
"offsets",
",",
"counts",
"if",
"j",
"==",
"0",
":",
"return",
"[",
"offsets",
"[",
"0",
"]",
"]",
",",
"[",
"counts",
"[",
"0",
"]",
"]",
"return",
"offsets",
"[",
":",
"j",
"]",
",",
"counts",
"[",
":",
"j",
"]"
] | Return cleaned offsets and byte counts.
Remove zero offsets and counts.
Use to sanitize StripOffsets and StripByteCounts tag values. | [
"Return",
"cleaned",
"offsets",
"and",
"byte",
"counts",
"."
] | train | https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9851-L9879 | 0.001103 |
benjamin-hodgson/asynqp | src/asynqp/queue.py | Queue.get | def get(self, *, no_ack=False):
"""
Synchronously get a message from the queue.
This method is a :ref:`coroutine <coroutine>`.
:keyword bool no_ack: if true, the broker does not require acknowledgement of receipt of the message.
:return: an :class:`~asynqp.message.IncomingMessage`,
or ``None`` if there were no messages on the queue.
"""
if self.deleted:
raise Deleted("Queue {} was deleted".format(self.name))
self.sender.send_BasicGet(self.name, no_ack)
tag_msg = yield from self.synchroniser.wait(spec.BasicGetOK, spec.BasicGetEmpty)
if tag_msg is not None:
consumer_tag, msg = tag_msg
assert consumer_tag is None
else:
msg = None
self.reader.ready()
return msg | python | def get(self, *, no_ack=False):
"""
Synchronously get a message from the queue.
This method is a :ref:`coroutine <coroutine>`.
:keyword bool no_ack: if true, the broker does not require acknowledgement of receipt of the message.
:return: an :class:`~asynqp.message.IncomingMessage`,
or ``None`` if there were no messages on the queue.
"""
if self.deleted:
raise Deleted("Queue {} was deleted".format(self.name))
self.sender.send_BasicGet(self.name, no_ack)
tag_msg = yield from self.synchroniser.wait(spec.BasicGetOK, spec.BasicGetEmpty)
if tag_msg is not None:
consumer_tag, msg = tag_msg
assert consumer_tag is None
else:
msg = None
self.reader.ready()
return msg | [
"def",
"get",
"(",
"self",
",",
"*",
",",
"no_ack",
"=",
"False",
")",
":",
"if",
"self",
".",
"deleted",
":",
"raise",
"Deleted",
"(",
"\"Queue {} was deleted\"",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"self",
".",
"sender",
".",
"send_BasicGet",
"(",
"self",
".",
"name",
",",
"no_ack",
")",
"tag_msg",
"=",
"yield",
"from",
"self",
".",
"synchroniser",
".",
"wait",
"(",
"spec",
".",
"BasicGetOK",
",",
"spec",
".",
"BasicGetEmpty",
")",
"if",
"tag_msg",
"is",
"not",
"None",
":",
"consumer_tag",
",",
"msg",
"=",
"tag_msg",
"assert",
"consumer_tag",
"is",
"None",
"else",
":",
"msg",
"=",
"None",
"self",
".",
"reader",
".",
"ready",
"(",
")",
"return",
"msg"
] | Synchronously get a message from the queue.
This method is a :ref:`coroutine <coroutine>`.
:keyword bool no_ack: if true, the broker does not require acknowledgement of receipt of the message.
:return: an :class:`~asynqp.message.IncomingMessage`,
or ``None`` if there were no messages on the queue. | [
"Synchronously",
"get",
"a",
"message",
"from",
"the",
"queue",
"."
] | train | https://github.com/benjamin-hodgson/asynqp/blob/ea8630d1803d10d4fd64b1a0e50f3097710b34d1/src/asynqp/queue.py#L119-L142 | 0.004796 |
pywbem/pywbem | wbemcli.py | im | def im(mn, op, params, **kwparams):
"""
This function is a wrapper for
:meth:`~pywbem.WBEMConnection.InvokeMethod`.
Invoke a method on a target instance or on a target class.
The methods that can be invoked are static and non-static methods defined
in a class (also known as *extrinsic* methods). Static methods can be
invoked on instances and on classes. Non-static methods can be invoked only
on instances.
Parameters:
mn (:term:`string`):
Method name.
op (:class:`~pywbem.CIMInstanceName`):
Target instance path.
op (:class:`~pywbem.CIMClassName`):
Target class path.
params (:term:`py:iterable`):
Input parameters for the method.
Each item in the iterable is a single parameter value and can be any
of:
* :class:`~pywbem.CIMParameter` representing a parameter value. The
`name`, `value`, `type` and `embedded_object` attributes of this
object are used.
* tuple of name, value, with:
- name (:term:`string`): Parameter name (case independent)
- value (:term:`CIM data type`): Parameter value
**kwparams (named/keyword arguments):
Input parameters for the method.
* key (:term:`string`): Parameter name (case independent)
* value (:term:`CIM data type`): Parameter value
Returns:
tuple(rv, out), with these tuple items:
* rv (:term:`CIM data type`):
Return value of the CIM method.
* out (:ref:`NocaseDict`):
Dictionary with all provided output parameters of the CIM method,
with:
* key (:term:`unicode string`):
Parameter name
* value (:term:`CIM data type`):
Parameter value
"""
return CONN.InvokeMethod(mn, op, params, **kwparams) | python | def im(mn, op, params, **kwparams):
"""
This function is a wrapper for
:meth:`~pywbem.WBEMConnection.InvokeMethod`.
Invoke a method on a target instance or on a target class.
The methods that can be invoked are static and non-static methods defined
in a class (also known as *extrinsic* methods). Static methods can be
invoked on instances and on classes. Non-static methods can be invoked only
on instances.
Parameters:
mn (:term:`string`):
Method name.
op (:class:`~pywbem.CIMInstanceName`):
Target instance path.
op (:class:`~pywbem.CIMClassName`):
Target class path.
params (:term:`py:iterable`):
Input parameters for the method.
Each item in the iterable is a single parameter value and can be any
of:
* :class:`~pywbem.CIMParameter` representing a parameter value. The
`name`, `value`, `type` and `embedded_object` attributes of this
object are used.
* tuple of name, value, with:
- name (:term:`string`): Parameter name (case independent)
- value (:term:`CIM data type`): Parameter value
**kwparams (named/keyword arguments):
Input parameters for the method.
* key (:term:`string`): Parameter name (case independent)
* value (:term:`CIM data type`): Parameter value
Returns:
tuple(rv, out), with these tuple items:
* rv (:term:`CIM data type`):
Return value of the CIM method.
* out (:ref:`NocaseDict`):
Dictionary with all provided output parameters of the CIM method,
with:
* key (:term:`unicode string`):
Parameter name
* value (:term:`CIM data type`):
Parameter value
"""
return CONN.InvokeMethod(mn, op, params, **kwparams) | [
"def",
"im",
"(",
"mn",
",",
"op",
",",
"params",
",",
"*",
"*",
"kwparams",
")",
":",
"return",
"CONN",
".",
"InvokeMethod",
"(",
"mn",
",",
"op",
",",
"params",
",",
"*",
"*",
"kwparams",
")"
] | This function is a wrapper for
:meth:`~pywbem.WBEMConnection.InvokeMethod`.
Invoke a method on a target instance or on a target class.
The methods that can be invoked are static and non-static methods defined
in a class (also known as *extrinsic* methods). Static methods can be
invoked on instances and on classes. Non-static methods can be invoked only
on instances.
Parameters:
mn (:term:`string`):
Method name.
op (:class:`~pywbem.CIMInstanceName`):
Target instance path.
op (:class:`~pywbem.CIMClassName`):
Target class path.
params (:term:`py:iterable`):
Input parameters for the method.
Each item in the iterable is a single parameter value and can be any
of:
* :class:`~pywbem.CIMParameter` representing a parameter value. The
`name`, `value`, `type` and `embedded_object` attributes of this
object are used.
* tuple of name, value, with:
- name (:term:`string`): Parameter name (case independent)
- value (:term:`CIM data type`): Parameter value
**kwparams (named/keyword arguments):
Input parameters for the method.
* key (:term:`string`): Parameter name (case independent)
* value (:term:`CIM data type`): Parameter value
Returns:
tuple(rv, out), with these tuple items:
* rv (:term:`CIM data type`):
Return value of the CIM method.
* out (:ref:`NocaseDict`):
Dictionary with all provided output parameters of the CIM method,
with:
* key (:term:`unicode string`):
Parameter name
* value (:term:`CIM data type`):
Parameter value | [
"This",
"function",
"is",
"a",
"wrapper",
"for",
":",
"meth",
":",
"~pywbem",
".",
"WBEMConnection",
".",
"InvokeMethod",
"."
] | train | https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/wbemcli.py#L875-L935 | 0.000525 |
edx/django-config-models | config_models/views.py | AtomicMixin.create_atomic_wrapper | def create_atomic_wrapper(cls, wrapped_func):
"""Returns a wrapped function."""
def _create_atomic_wrapper(*args, **kwargs):
"""Actual wrapper."""
# When a view call fails due to a permissions error, it raises an exception.
# An uncaught exception breaks the DB transaction for any following DB operations
# unless it's wrapped in a atomic() decorator or context manager.
with transaction.atomic():
return wrapped_func(*args, **kwargs)
return _create_atomic_wrapper | python | def create_atomic_wrapper(cls, wrapped_func):
"""Returns a wrapped function."""
def _create_atomic_wrapper(*args, **kwargs):
"""Actual wrapper."""
# When a view call fails due to a permissions error, it raises an exception.
# An uncaught exception breaks the DB transaction for any following DB operations
# unless it's wrapped in a atomic() decorator or context manager.
with transaction.atomic():
return wrapped_func(*args, **kwargs)
return _create_atomic_wrapper | [
"def",
"create_atomic_wrapper",
"(",
"cls",
",",
"wrapped_func",
")",
":",
"def",
"_create_atomic_wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Actual wrapper.\"\"\"",
"# When a view call fails due to a permissions error, it raises an exception.",
"# An uncaught exception breaks the DB transaction for any following DB operations",
"# unless it's wrapped in a atomic() decorator or context manager.",
"with",
"transaction",
".",
"atomic",
"(",
")",
":",
"return",
"wrapped_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_create_atomic_wrapper"
] | Returns a wrapped function. | [
"Returns",
"a",
"wrapped",
"function",
"."
] | train | https://github.com/edx/django-config-models/blob/f22c05fe3ccb182a6be4dbe313e9d6749dffd3e4/config_models/views.py#L23-L33 | 0.007067 |
wuher/devil | devil/fields/__init__.py | serialize | def serialize(self, value, entity=None, request=None):
""" Validate and serialize the value.
This is the default implementation
"""
ret = self.from_python(value)
self.validate(ret)
self.run_validators(value)
return ret | python | def serialize(self, value, entity=None, request=None):
""" Validate and serialize the value.
This is the default implementation
"""
ret = self.from_python(value)
self.validate(ret)
self.run_validators(value)
return ret | [
"def",
"serialize",
"(",
"self",
",",
"value",
",",
"entity",
"=",
"None",
",",
"request",
"=",
"None",
")",
":",
"ret",
"=",
"self",
".",
"from_python",
"(",
"value",
")",
"self",
".",
"validate",
"(",
"ret",
")",
"self",
".",
"run_validators",
"(",
"value",
")",
"return",
"ret"
] | Validate and serialize the value.
This is the default implementation | [
"Validate",
"and",
"serialize",
"the",
"value",
"."
] | train | https://github.com/wuher/devil/blob/a8834d4f88d915a21754c6b96f99d0ad9123ad4d/devil/fields/__init__.py#L23-L32 | 0.004032 |
klen/muffin-rest | muffin_rest/filters.py | Filter.apply | def apply(self, collection, ops, **kwargs):
"""Apply the filter to collection."""
validator = lambda obj: all(op(obj, val) for (op, val) in ops) # noqa
return [o for o in collection if validator(o)] | python | def apply(self, collection, ops, **kwargs):
"""Apply the filter to collection."""
validator = lambda obj: all(op(obj, val) for (op, val) in ops) # noqa
return [o for o in collection if validator(o)] | [
"def",
"apply",
"(",
"self",
",",
"collection",
",",
"ops",
",",
"*",
"*",
"kwargs",
")",
":",
"validator",
"=",
"lambda",
"obj",
":",
"all",
"(",
"op",
"(",
"obj",
",",
"val",
")",
"for",
"(",
"op",
",",
"val",
")",
"in",
"ops",
")",
"# noqa",
"return",
"[",
"o",
"for",
"o",
"in",
"collection",
"if",
"validator",
"(",
"o",
")",
"]"
] | Apply the filter to collection. | [
"Apply",
"the",
"filter",
"to",
"collection",
"."
] | train | https://github.com/klen/muffin-rest/blob/1d85bdd3b72a89eaeab8c4086926260a960408aa/muffin_rest/filters.py#L61-L64 | 0.013453 |
sethmlarson/virtualbox-python | virtualbox/library.py | IPerformanceCollector.setup_metrics | def setup_metrics(self, metric_names, objects, period, count):
"""Sets parameters of specified base metrics for a set of objects. Returns
an array of :py:class:`IPerformanceMetric` describing the metrics
have been affected.
@c Null or empty metric name array means all metrics. @c Null or
empty object array means all existing objects. If metric name array
contains a single element and object array contains many, the single
metric name array element is applied to each object array element to
form metric/object pairs.
in metric_names of type str
Metric name filter. Comma-separated list of metrics with wildcard
support.
in objects of type Interface
Set of objects to setup metric parameters for.
in period of type int
Time interval in seconds between two consecutive samples of
performance data.
in count of type int
Number of samples to retain in performance data history. Older
samples get discarded.
return affected_metrics of type :class:`IPerformanceMetric`
Array of metrics that have been modified by the call to this method.
"""
if not isinstance(metric_names, list):
raise TypeError("metric_names can only be an instance of type list")
for a in metric_names[:10]:
if not isinstance(a, basestring):
raise TypeError(
"array can only contain objects of type basestring")
if not isinstance(objects, list):
raise TypeError("objects can only be an instance of type list")
for a in objects[:10]:
if not isinstance(a, Interface):
raise TypeError(
"array can only contain objects of type Interface")
if not isinstance(period, baseinteger):
raise TypeError("period can only be an instance of type baseinteger")
if not isinstance(count, baseinteger):
raise TypeError("count can only be an instance of type baseinteger")
affected_metrics = self._call("setupMetrics",
in_p=[metric_names, objects, period, count])
affected_metrics = [IPerformanceMetric(a) for a in affected_metrics]
return affected_metrics | python | def setup_metrics(self, metric_names, objects, period, count):
"""Sets parameters of specified base metrics for a set of objects. Returns
an array of :py:class:`IPerformanceMetric` describing the metrics
have been affected.
@c Null or empty metric name array means all metrics. @c Null or
empty object array means all existing objects. If metric name array
contains a single element and object array contains many, the single
metric name array element is applied to each object array element to
form metric/object pairs.
in metric_names of type str
Metric name filter. Comma-separated list of metrics with wildcard
support.
in objects of type Interface
Set of objects to setup metric parameters for.
in period of type int
Time interval in seconds between two consecutive samples of
performance data.
in count of type int
Number of samples to retain in performance data history. Older
samples get discarded.
return affected_metrics of type :class:`IPerformanceMetric`
Array of metrics that have been modified by the call to this method.
"""
if not isinstance(metric_names, list):
raise TypeError("metric_names can only be an instance of type list")
for a in metric_names[:10]:
if not isinstance(a, basestring):
raise TypeError(
"array can only contain objects of type basestring")
if not isinstance(objects, list):
raise TypeError("objects can only be an instance of type list")
for a in objects[:10]:
if not isinstance(a, Interface):
raise TypeError(
"array can only contain objects of type Interface")
if not isinstance(period, baseinteger):
raise TypeError("period can only be an instance of type baseinteger")
if not isinstance(count, baseinteger):
raise TypeError("count can only be an instance of type baseinteger")
affected_metrics = self._call("setupMetrics",
in_p=[metric_names, objects, period, count])
affected_metrics = [IPerformanceMetric(a) for a in affected_metrics]
return affected_metrics | [
"def",
"setup_metrics",
"(",
"self",
",",
"metric_names",
",",
"objects",
",",
"period",
",",
"count",
")",
":",
"if",
"not",
"isinstance",
"(",
"metric_names",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"\"metric_names can only be an instance of type list\"",
")",
"for",
"a",
"in",
"metric_names",
"[",
":",
"10",
"]",
":",
"if",
"not",
"isinstance",
"(",
"a",
",",
"basestring",
")",
":",
"raise",
"TypeError",
"(",
"\"array can only contain objects of type basestring\"",
")",
"if",
"not",
"isinstance",
"(",
"objects",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"\"objects can only be an instance of type list\"",
")",
"for",
"a",
"in",
"objects",
"[",
":",
"10",
"]",
":",
"if",
"not",
"isinstance",
"(",
"a",
",",
"Interface",
")",
":",
"raise",
"TypeError",
"(",
"\"array can only contain objects of type Interface\"",
")",
"if",
"not",
"isinstance",
"(",
"period",
",",
"baseinteger",
")",
":",
"raise",
"TypeError",
"(",
"\"period can only be an instance of type baseinteger\"",
")",
"if",
"not",
"isinstance",
"(",
"count",
",",
"baseinteger",
")",
":",
"raise",
"TypeError",
"(",
"\"count can only be an instance of type baseinteger\"",
")",
"affected_metrics",
"=",
"self",
".",
"_call",
"(",
"\"setupMetrics\"",
",",
"in_p",
"=",
"[",
"metric_names",
",",
"objects",
",",
"period",
",",
"count",
"]",
")",
"affected_metrics",
"=",
"[",
"IPerformanceMetric",
"(",
"a",
")",
"for",
"a",
"in",
"affected_metrics",
"]",
"return",
"affected_metrics"
] | Sets parameters of specified base metrics for a set of objects. Returns
an array of :py:class:`IPerformanceMetric` describing the metrics
have been affected.
@c Null or empty metric name array means all metrics. @c Null or
empty object array means all existing objects. If metric name array
contains a single element and object array contains many, the single
metric name array element is applied to each object array element to
form metric/object pairs.
in metric_names of type str
Metric name filter. Comma-separated list of metrics with wildcard
support.
in objects of type Interface
Set of objects to setup metric parameters for.
in period of type int
Time interval in seconds between two consecutive samples of
performance data.
in count of type int
Number of samples to retain in performance data history. Older
samples get discarded.
return affected_metrics of type :class:`IPerformanceMetric`
Array of metrics that have been modified by the call to this method. | [
"Sets",
"parameters",
"of",
"specified",
"base",
"metrics",
"for",
"a",
"set",
"of",
"objects",
".",
"Returns",
"an",
"array",
"of",
":",
"py",
":",
"class",
":",
"IPerformanceMetric",
"describing",
"the",
"metrics",
"have",
"been",
"affected",
".",
"@c",
"Null",
"or",
"empty",
"metric",
"name",
"array",
"means",
"all",
"metrics",
".",
"@c",
"Null",
"or",
"empty",
"object",
"array",
"means",
"all",
"existing",
"objects",
".",
"If",
"metric",
"name",
"array",
"contains",
"a",
"single",
"element",
"and",
"object",
"array",
"contains",
"many",
"the",
"single",
"metric",
"name",
"array",
"element",
"is",
"applied",
"to",
"each",
"object",
"array",
"element",
"to",
"form",
"metric",
"/",
"object",
"pairs",
"."
] | train | https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L29486-L29535 | 0.003793 |
astropy/photutils | photutils/centroids/core.py | gaussian1d_moments | def gaussian1d_moments(data, mask=None):
"""
Estimate 1D Gaussian parameters from the moments of 1D data.
This function can be useful for providing initial parameter values
when fitting a 1D Gaussian to the ``data``.
Parameters
----------
data : array_like (1D)
The 1D array.
mask : array_like (1D bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
amplitude, mean, stddev : float
The estimated parameters of a 1D Gaussian.
"""
if np.any(~np.isfinite(data)):
data = np.ma.masked_invalid(data)
warnings.warn('Input data contains input values (e.g. NaNs or infs), '
'which were automatically masked.', AstropyUserWarning)
else:
data = np.ma.array(data)
if mask is not None and mask is not np.ma.nomask:
mask = np.asanyarray(mask)
if data.shape != mask.shape:
raise ValueError('data and mask must have the same shape.')
data.mask |= mask
data.fill_value = 0.
data = data.filled()
x = np.arange(data.size)
x_mean = np.sum(x * data) / np.sum(data)
x_stddev = np.sqrt(abs(np.sum(data * (x - x_mean)**2) / np.sum(data)))
amplitude = np.ptp(data)
return amplitude, x_mean, x_stddev | python | def gaussian1d_moments(data, mask=None):
"""
Estimate 1D Gaussian parameters from the moments of 1D data.
This function can be useful for providing initial parameter values
when fitting a 1D Gaussian to the ``data``.
Parameters
----------
data : array_like (1D)
The 1D array.
mask : array_like (1D bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
amplitude, mean, stddev : float
The estimated parameters of a 1D Gaussian.
"""
if np.any(~np.isfinite(data)):
data = np.ma.masked_invalid(data)
warnings.warn('Input data contains input values (e.g. NaNs or infs), '
'which were automatically masked.', AstropyUserWarning)
else:
data = np.ma.array(data)
if mask is not None and mask is not np.ma.nomask:
mask = np.asanyarray(mask)
if data.shape != mask.shape:
raise ValueError('data and mask must have the same shape.')
data.mask |= mask
data.fill_value = 0.
data = data.filled()
x = np.arange(data.size)
x_mean = np.sum(x * data) / np.sum(data)
x_stddev = np.sqrt(abs(np.sum(data * (x - x_mean)**2) / np.sum(data)))
amplitude = np.ptp(data)
return amplitude, x_mean, x_stddev | [
"def",
"gaussian1d_moments",
"(",
"data",
",",
"mask",
"=",
"None",
")",
":",
"if",
"np",
".",
"any",
"(",
"~",
"np",
".",
"isfinite",
"(",
"data",
")",
")",
":",
"data",
"=",
"np",
".",
"ma",
".",
"masked_invalid",
"(",
"data",
")",
"warnings",
".",
"warn",
"(",
"'Input data contains input values (e.g. NaNs or infs), '",
"'which were automatically masked.'",
",",
"AstropyUserWarning",
")",
"else",
":",
"data",
"=",
"np",
".",
"ma",
".",
"array",
"(",
"data",
")",
"if",
"mask",
"is",
"not",
"None",
"and",
"mask",
"is",
"not",
"np",
".",
"ma",
".",
"nomask",
":",
"mask",
"=",
"np",
".",
"asanyarray",
"(",
"mask",
")",
"if",
"data",
".",
"shape",
"!=",
"mask",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'data and mask must have the same shape.'",
")",
"data",
".",
"mask",
"|=",
"mask",
"data",
".",
"fill_value",
"=",
"0.",
"data",
"=",
"data",
".",
"filled",
"(",
")",
"x",
"=",
"np",
".",
"arange",
"(",
"data",
".",
"size",
")",
"x_mean",
"=",
"np",
".",
"sum",
"(",
"x",
"*",
"data",
")",
"/",
"np",
".",
"sum",
"(",
"data",
")",
"x_stddev",
"=",
"np",
".",
"sqrt",
"(",
"abs",
"(",
"np",
".",
"sum",
"(",
"data",
"*",
"(",
"x",
"-",
"x_mean",
")",
"**",
"2",
")",
"/",
"np",
".",
"sum",
"(",
"data",
")",
")",
")",
"amplitude",
"=",
"np",
".",
"ptp",
"(",
"data",
")",
"return",
"amplitude",
",",
"x_mean",
",",
"x_stddev"
] | Estimate 1D Gaussian parameters from the moments of 1D data.
This function can be useful for providing initial parameter values
when fitting a 1D Gaussian to the ``data``.
Parameters
----------
data : array_like (1D)
The 1D array.
mask : array_like (1D bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
amplitude, mean, stddev : float
The estimated parameters of a 1D Gaussian. | [
"Estimate",
"1D",
"Gaussian",
"parameters",
"from",
"the",
"moments",
"of",
"1D",
"data",
"."
] | train | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/centroids/core.py#L120-L163 | 0.000717 |
asweigart/pyautogui | pyautogui/__init__.py | onScreen | def onScreen(x, y=None):
"""Returns whether the given xy coordinates are on the screen or not.
Args:
Either the arguments are two separate values, first arg for x and second
for y, or there is a single argument of a sequence with two values, the
first x and the second y.
Example: onScreen(x, y) or onScreen([x, y])
Returns:
bool: True if the xy coordinates are on the screen at its current
resolution, otherwise False.
"""
x, y = _unpackXY(x, y)
x = int(x)
y = int(y)
width, height = platformModule._size()
return 0 <= x < width and 0 <= y < height | python | def onScreen(x, y=None):
"""Returns whether the given xy coordinates are on the screen or not.
Args:
Either the arguments are two separate values, first arg for x and second
for y, or there is a single argument of a sequence with two values, the
first x and the second y.
Example: onScreen(x, y) or onScreen([x, y])
Returns:
bool: True if the xy coordinates are on the screen at its current
resolution, otherwise False.
"""
x, y = _unpackXY(x, y)
x = int(x)
y = int(y)
width, height = platformModule._size()
return 0 <= x < width and 0 <= y < height | [
"def",
"onScreen",
"(",
"x",
",",
"y",
"=",
"None",
")",
":",
"x",
",",
"y",
"=",
"_unpackXY",
"(",
"x",
",",
"y",
")",
"x",
"=",
"int",
"(",
"x",
")",
"y",
"=",
"int",
"(",
"y",
")",
"width",
",",
"height",
"=",
"platformModule",
".",
"_size",
"(",
")",
"return",
"0",
"<=",
"x",
"<",
"width",
"and",
"0",
"<=",
"y",
"<",
"height"
] | Returns whether the given xy coordinates are on the screen or not.
Args:
Either the arguments are two separate values, first arg for x and second
for y, or there is a single argument of a sequence with two values, the
first x and the second y.
Example: onScreen(x, y) or onScreen([x, y])
Returns:
bool: True if the xy coordinates are on the screen at its current
resolution, otherwise False. | [
"Returns",
"whether",
"the",
"given",
"xy",
"coordinates",
"are",
"on",
"the",
"screen",
"or",
"not",
"."
] | train | https://github.com/asweigart/pyautogui/blob/77524bd47334a89024013fd48e05151c3ac9289a/pyautogui/__init__.py#L266-L284 | 0.001582 |
saltstack/salt | salt/modules/boto_route53.py | delete_zone | def delete_zone(zone, region=None, key=None, keyid=None, profile=None):
'''
Delete a Route53 hosted zone.
.. versionadded:: 2015.8.0
CLI Example::
salt myminion boto_route53.delete_zone example.org
'''
if region is None:
region = 'universal'
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
_zone = conn.get_zone(zone)
if _zone:
conn.delete_hosted_zone(_zone.id)
return True
return False | python | def delete_zone(zone, region=None, key=None, keyid=None, profile=None):
'''
Delete a Route53 hosted zone.
.. versionadded:: 2015.8.0
CLI Example::
salt myminion boto_route53.delete_zone example.org
'''
if region is None:
region = 'universal'
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
_zone = conn.get_zone(zone)
if _zone:
conn.delete_hosted_zone(_zone.id)
return True
return False | [
"def",
"delete_zone",
"(",
"zone",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"if",
"region",
"is",
"None",
":",
"region",
"=",
"'universal'",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"_zone",
"=",
"conn",
".",
"get_zone",
"(",
"zone",
")",
"if",
"_zone",
":",
"conn",
".",
"delete_hosted_zone",
"(",
"_zone",
".",
"id",
")",
"return",
"True",
"return",
"False"
] | Delete a Route53 hosted zone.
.. versionadded:: 2015.8.0
CLI Example::
salt myminion boto_route53.delete_zone example.org | [
"Delete",
"a",
"Route53",
"hosted",
"zone",
"."
] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_route53.py#L442-L462 | 0.002053 |
n1analytics/python-paillier | examples/logistic_regression_encrypted_model.py | Bob.encrypted_score | def encrypted_score(self, x):
"""Compute the score of `x` by multiplying with the encrypted model,
which is a vector of `paillier.EncryptedNumber`"""
score = self.intercept
_, idx = x.nonzero()
for i in idx:
score += x[0, i] * self.weights[i]
return score | python | def encrypted_score(self, x):
"""Compute the score of `x` by multiplying with the encrypted model,
which is a vector of `paillier.EncryptedNumber`"""
score = self.intercept
_, idx = x.nonzero()
for i in idx:
score += x[0, i] * self.weights[i]
return score | [
"def",
"encrypted_score",
"(",
"self",
",",
"x",
")",
":",
"score",
"=",
"self",
".",
"intercept",
"_",
",",
"idx",
"=",
"x",
".",
"nonzero",
"(",
")",
"for",
"i",
"in",
"idx",
":",
"score",
"+=",
"x",
"[",
"0",
",",
"i",
"]",
"*",
"self",
".",
"weights",
"[",
"i",
"]",
"return",
"score"
] | Compute the score of `x` by multiplying with the encrypted model,
which is a vector of `paillier.EncryptedNumber` | [
"Compute",
"the",
"score",
"of",
"x",
"by",
"multiplying",
"with",
"the",
"encrypted",
"model",
"which",
"is",
"a",
"vector",
"of",
"paillier",
".",
"EncryptedNumber"
] | train | https://github.com/n1analytics/python-paillier/blob/955f8c0bfa9623be15b75462b121d28acf70f04b/examples/logistic_regression_encrypted_model.py#L170-L177 | 0.006349 |
enkore/i3pystatus | i3pystatus/weather/wunderground.py | Wunderground.get_forecast | def get_forecast(self):
'''
If configured to do so, make an API request to retrieve the forecast
data for the configured/queried weather station, and return the low and
high temperatures. Otherwise, return two empty strings.
'''
no_data = ('', '')
if self.forecast:
query_url = STATION_QUERY_URL % (self.api_key,
'forecast',
self.station_id)
try:
response = self.api_request(query_url)['forecast']
response = response['simpleforecast']['forecastday'][0]
except (KeyError, IndexError, TypeError):
self.logger.error(
'No forecast data found for %s', self.station_id)
self.data['update_error'] = self.update_error
return no_data
unit = 'celsius' if self.units == 'metric' else 'fahrenheit'
low_temp = response.get('low', {}).get(unit, '')
high_temp = response.get('high', {}).get(unit, '')
return low_temp, high_temp
else:
return no_data | python | def get_forecast(self):
'''
If configured to do so, make an API request to retrieve the forecast
data for the configured/queried weather station, and return the low and
high temperatures. Otherwise, return two empty strings.
'''
no_data = ('', '')
if self.forecast:
query_url = STATION_QUERY_URL % (self.api_key,
'forecast',
self.station_id)
try:
response = self.api_request(query_url)['forecast']
response = response['simpleforecast']['forecastday'][0]
except (KeyError, IndexError, TypeError):
self.logger.error(
'No forecast data found for %s', self.station_id)
self.data['update_error'] = self.update_error
return no_data
unit = 'celsius' if self.units == 'metric' else 'fahrenheit'
low_temp = response.get('low', {}).get(unit, '')
high_temp = response.get('high', {}).get(unit, '')
return low_temp, high_temp
else:
return no_data | [
"def",
"get_forecast",
"(",
"self",
")",
":",
"no_data",
"=",
"(",
"''",
",",
"''",
")",
"if",
"self",
".",
"forecast",
":",
"query_url",
"=",
"STATION_QUERY_URL",
"%",
"(",
"self",
".",
"api_key",
",",
"'forecast'",
",",
"self",
".",
"station_id",
")",
"try",
":",
"response",
"=",
"self",
".",
"api_request",
"(",
"query_url",
")",
"[",
"'forecast'",
"]",
"response",
"=",
"response",
"[",
"'simpleforecast'",
"]",
"[",
"'forecastday'",
"]",
"[",
"0",
"]",
"except",
"(",
"KeyError",
",",
"IndexError",
",",
"TypeError",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"'No forecast data found for %s'",
",",
"self",
".",
"station_id",
")",
"self",
".",
"data",
"[",
"'update_error'",
"]",
"=",
"self",
".",
"update_error",
"return",
"no_data",
"unit",
"=",
"'celsius'",
"if",
"self",
".",
"units",
"==",
"'metric'",
"else",
"'fahrenheit'",
"low_temp",
"=",
"response",
".",
"get",
"(",
"'low'",
",",
"{",
"}",
")",
".",
"get",
"(",
"unit",
",",
"''",
")",
"high_temp",
"=",
"response",
".",
"get",
"(",
"'high'",
",",
"{",
"}",
")",
".",
"get",
"(",
"unit",
",",
"''",
")",
"return",
"low_temp",
",",
"high_temp",
"else",
":",
"return",
"no_data"
] | If configured to do so, make an API request to retrieve the forecast
data for the configured/queried weather station, and return the low and
high temperatures. Otherwise, return two empty strings. | [
"If",
"configured",
"to",
"do",
"so",
"make",
"an",
"API",
"request",
"to",
"retrieve",
"the",
"forecast",
"data",
"for",
"the",
"configured",
"/",
"queried",
"weather",
"station",
"and",
"return",
"the",
"low",
"and",
"high",
"temperatures",
".",
"Otherwise",
"return",
"two",
"empty",
"strings",
"."
] | train | https://github.com/enkore/i3pystatus/blob/14cfde967cecf79b40e223e35a04600f4c875af7/i3pystatus/weather/wunderground.py#L130-L155 | 0.001688 |
ionelmc/python-aspectlib | src/aspectlib/debug.py | log | def log(func=None,
stacktrace=10,
stacktrace_align=60,
attributes=(),
module=True,
call=True,
call_args=True,
call_args_repr=repr,
result=True,
exception=True,
exception_repr=repr,
result_repr=strip_non_ascii,
use_logging='CRITICAL',
print_to=None):
"""
Decorates `func` to have logging.
Args
func (function):
Function to decorate. If missing log returns a partial which you can use as a decorator.
stacktrace (int):
Number of frames to show.
stacktrace_align (int):
Column to align the framelist to.
attributes (list):
List of instance attributes to show, in case the function is a instance method.
module (bool):
Show the module.
call (bool):
If ``True``, then show calls. If ``False`` only show the call details on exceptions (if ``exception`` is
enabled) (default: ``True``)
call_args (bool):
If ``True``, then show call arguments. (default: ``True``)
call_args_repr (bool):
Function to convert one argument to a string. (default: ``repr``)
result (bool):
If ``True``, then show result. (default: ``True``)
exception (bool):
If ``True``, then show exceptions. (default: ``True``)
exception_repr (function):
Function to convert an exception to a string. (default: ``repr``)
result_repr (function):
Function to convert the result object to a string. (default: ``strip_non_ascii`` - like ``str`` but nonascii
characters are replaced with dots.)
use_logging (string):
Emit log messages with the given loglevel. (default: ``"CRITICAL"``)
print_to (fileobject):
File object to write to, in case you don't want to use logging module. (default: ``None`` - printing is
disabled)
Returns:
A decorator or a wrapper.
Example::
>>> @log(print_to=sys.stdout)
... def a(weird=False):
... if weird:
... raise RuntimeError('BOOM!')
>>> a()
a() <<< ...
a => None
>>> try:
... a(weird=True)
... except Exception:
... pass # naughty code!
a(weird=True) <<< ...
a ~ raised RuntimeError('BOOM!',)
You can conveniently use this to logs just errors, or just results, example::
>>> import aspectlib
>>> with aspectlib.weave(float, log(call=False, result=False, print_to=sys.stdout)):
... try:
... float('invalid')
... except Exception as e:
... pass # naughty code!
float('invalid') <<< ...
float ~ raised ValueError(...float...invalid...)
This makes debugging naughty code easier.
PS: Without the weaving it looks like this::
>>> try:
... log(call=False, result=False, print_to=sys.stdout)(float)('invalid')
... except Exception:
... pass # naughty code!
float('invalid') <<< ...
float ~ raised ValueError(...float...invalid...)
.. versionchanged:: 0.5.0
Renamed `arguments` to `call_args`.
Renamed `arguments_repr` to `call_args_repr`.
Added `call` option.
"""
loglevel = use_logging and (
logging._levelNames if hasattr(logging, '_levelNames') else logging._nameToLevel
).get(use_logging, logging.CRITICAL)
_missing = object()
def dump(buf):
try:
if use_logging:
logger._log(loglevel, buf, ())
if print_to:
buf += '\n'
print_to.write(buf)
except Exception as exc:
logger.critical('Failed to log a message: %s', exc, exc_info=True)
class __logged__(Aspect):
__slots__ = 'cutpoint_function', 'final_function', 'binding', '__name__', '__weakref__'
bind = False
def __init__(self, cutpoint_function, binding=None):
mimic(self, cutpoint_function)
self.cutpoint_function = cutpoint_function
self.final_function = super(__logged__, self).__call__(cutpoint_function)
self.binding = binding
def __get__(self, instance, owner):
return __logged__(self.cutpoint_function.__get__(instance, owner), instance)
def __call__(self, *args, **kwargs):
return self.final_function(*args, **kwargs)
def advising_function(self, *args, **kwargs):
name = self.cutpoint_function.__name__
instance = self.binding
if instance is not None:
if isinstance(instance, InstanceType):
instance_type = instance.__class__
else:
instance_type = type(instance)
info = []
for key in attributes:
if key.endswith('()'):
callarg = key = key.rstrip('()')
else:
callarg = False
val = getattr(instance, key, _missing)
if val is not _missing and key != name:
info.append(' %s=%s' % (
key, call_args_repr(val() if callarg else val)
))
sig = buf = '{%s%s%s}.%s' % (
instance_type.__module__ + '.' if module else '',
instance_type.__name__,
''.join(info),
name
)
else:
sig = buf = name
if call_args:
buf += '(%s%s)' % (
', '.join(repr(i) for i in (args if call_args is True else args[:call_args])),
((', ' if args else '') + ', '.join('%s=%r' % i for i in kwargs.items()))
if kwargs and call_args is True
else '',
)
if stacktrace:
buf = ("%%-%ds <<< %%s" % stacktrace_align) % (buf, format_stack(skip=1, length=stacktrace))
if call:
dump(buf)
try:
res = yield
except Exception as exc:
if exception:
if not call:
dump(buf)
dump('%s ~ raised %s' % (sig, exception_repr(exc)))
raise
if result:
dump('%s => %s' % (sig, result_repr(res)))
if func:
return __logged__(func)
else:
return __logged__ | python | def log(func=None,
stacktrace=10,
stacktrace_align=60,
attributes=(),
module=True,
call=True,
call_args=True,
call_args_repr=repr,
result=True,
exception=True,
exception_repr=repr,
result_repr=strip_non_ascii,
use_logging='CRITICAL',
print_to=None):
"""
Decorates `func` to have logging.
Args
func (function):
Function to decorate. If missing log returns a partial which you can use as a decorator.
stacktrace (int):
Number of frames to show.
stacktrace_align (int):
Column to align the framelist to.
attributes (list):
List of instance attributes to show, in case the function is a instance method.
module (bool):
Show the module.
call (bool):
If ``True``, then show calls. If ``False`` only show the call details on exceptions (if ``exception`` is
enabled) (default: ``True``)
call_args (bool):
If ``True``, then show call arguments. (default: ``True``)
call_args_repr (bool):
Function to convert one argument to a string. (default: ``repr``)
result (bool):
If ``True``, then show result. (default: ``True``)
exception (bool):
If ``True``, then show exceptions. (default: ``True``)
exception_repr (function):
Function to convert an exception to a string. (default: ``repr``)
result_repr (function):
Function to convert the result object to a string. (default: ``strip_non_ascii`` - like ``str`` but nonascii
characters are replaced with dots.)
use_logging (string):
Emit log messages with the given loglevel. (default: ``"CRITICAL"``)
print_to (fileobject):
File object to write to, in case you don't want to use logging module. (default: ``None`` - printing is
disabled)
Returns:
A decorator or a wrapper.
Example::
>>> @log(print_to=sys.stdout)
... def a(weird=False):
... if weird:
... raise RuntimeError('BOOM!')
>>> a()
a() <<< ...
a => None
>>> try:
... a(weird=True)
... except Exception:
... pass # naughty code!
a(weird=True) <<< ...
a ~ raised RuntimeError('BOOM!',)
You can conveniently use this to logs just errors, or just results, example::
>>> import aspectlib
>>> with aspectlib.weave(float, log(call=False, result=False, print_to=sys.stdout)):
... try:
... float('invalid')
... except Exception as e:
... pass # naughty code!
float('invalid') <<< ...
float ~ raised ValueError(...float...invalid...)
This makes debugging naughty code easier.
PS: Without the weaving it looks like this::
>>> try:
... log(call=False, result=False, print_to=sys.stdout)(float)('invalid')
... except Exception:
... pass # naughty code!
float('invalid') <<< ...
float ~ raised ValueError(...float...invalid...)
.. versionchanged:: 0.5.0
Renamed `arguments` to `call_args`.
Renamed `arguments_repr` to `call_args_repr`.
Added `call` option.
"""
loglevel = use_logging and (
logging._levelNames if hasattr(logging, '_levelNames') else logging._nameToLevel
).get(use_logging, logging.CRITICAL)
_missing = object()
def dump(buf):
try:
if use_logging:
logger._log(loglevel, buf, ())
if print_to:
buf += '\n'
print_to.write(buf)
except Exception as exc:
logger.critical('Failed to log a message: %s', exc, exc_info=True)
class __logged__(Aspect):
__slots__ = 'cutpoint_function', 'final_function', 'binding', '__name__', '__weakref__'
bind = False
def __init__(self, cutpoint_function, binding=None):
mimic(self, cutpoint_function)
self.cutpoint_function = cutpoint_function
self.final_function = super(__logged__, self).__call__(cutpoint_function)
self.binding = binding
def __get__(self, instance, owner):
return __logged__(self.cutpoint_function.__get__(instance, owner), instance)
def __call__(self, *args, **kwargs):
return self.final_function(*args, **kwargs)
def advising_function(self, *args, **kwargs):
name = self.cutpoint_function.__name__
instance = self.binding
if instance is not None:
if isinstance(instance, InstanceType):
instance_type = instance.__class__
else:
instance_type = type(instance)
info = []
for key in attributes:
if key.endswith('()'):
callarg = key = key.rstrip('()')
else:
callarg = False
val = getattr(instance, key, _missing)
if val is not _missing and key != name:
info.append(' %s=%s' % (
key, call_args_repr(val() if callarg else val)
))
sig = buf = '{%s%s%s}.%s' % (
instance_type.__module__ + '.' if module else '',
instance_type.__name__,
''.join(info),
name
)
else:
sig = buf = name
if call_args:
buf += '(%s%s)' % (
', '.join(repr(i) for i in (args if call_args is True else args[:call_args])),
((', ' if args else '') + ', '.join('%s=%r' % i for i in kwargs.items()))
if kwargs and call_args is True
else '',
)
if stacktrace:
buf = ("%%-%ds <<< %%s" % stacktrace_align) % (buf, format_stack(skip=1, length=stacktrace))
if call:
dump(buf)
try:
res = yield
except Exception as exc:
if exception:
if not call:
dump(buf)
dump('%s ~ raised %s' % (sig, exception_repr(exc)))
raise
if result:
dump('%s => %s' % (sig, result_repr(res)))
if func:
return __logged__(func)
else:
return __logged__ | [
"def",
"log",
"(",
"func",
"=",
"None",
",",
"stacktrace",
"=",
"10",
",",
"stacktrace_align",
"=",
"60",
",",
"attributes",
"=",
"(",
")",
",",
"module",
"=",
"True",
",",
"call",
"=",
"True",
",",
"call_args",
"=",
"True",
",",
"call_args_repr",
"=",
"repr",
",",
"result",
"=",
"True",
",",
"exception",
"=",
"True",
",",
"exception_repr",
"=",
"repr",
",",
"result_repr",
"=",
"strip_non_ascii",
",",
"use_logging",
"=",
"'CRITICAL'",
",",
"print_to",
"=",
"None",
")",
":",
"loglevel",
"=",
"use_logging",
"and",
"(",
"logging",
".",
"_levelNames",
"if",
"hasattr",
"(",
"logging",
",",
"'_levelNames'",
")",
"else",
"logging",
".",
"_nameToLevel",
")",
".",
"get",
"(",
"use_logging",
",",
"logging",
".",
"CRITICAL",
")",
"_missing",
"=",
"object",
"(",
")",
"def",
"dump",
"(",
"buf",
")",
":",
"try",
":",
"if",
"use_logging",
":",
"logger",
".",
"_log",
"(",
"loglevel",
",",
"buf",
",",
"(",
")",
")",
"if",
"print_to",
":",
"buf",
"+=",
"'\\n'",
"print_to",
".",
"write",
"(",
"buf",
")",
"except",
"Exception",
"as",
"exc",
":",
"logger",
".",
"critical",
"(",
"'Failed to log a message: %s'",
",",
"exc",
",",
"exc_info",
"=",
"True",
")",
"class",
"__logged__",
"(",
"Aspect",
")",
":",
"__slots__",
"=",
"'cutpoint_function'",
",",
"'final_function'",
",",
"'binding'",
",",
"'__name__'",
",",
"'__weakref__'",
"bind",
"=",
"False",
"def",
"__init__",
"(",
"self",
",",
"cutpoint_function",
",",
"binding",
"=",
"None",
")",
":",
"mimic",
"(",
"self",
",",
"cutpoint_function",
")",
"self",
".",
"cutpoint_function",
"=",
"cutpoint_function",
"self",
".",
"final_function",
"=",
"super",
"(",
"__logged__",
",",
"self",
")",
".",
"__call__",
"(",
"cutpoint_function",
")",
"self",
".",
"binding",
"=",
"binding",
"def",
"__get__",
"(",
"self",
",",
"instance",
",",
"owner",
")",
":",
"return",
"__logged__",
"(",
"self",
".",
"cutpoint_function",
".",
"__get__",
"(",
"instance",
",",
"owner",
")",
",",
"instance",
")",
"def",
"__call__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"final_function",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"def",
"advising_function",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"name",
"=",
"self",
".",
"cutpoint_function",
".",
"__name__",
"instance",
"=",
"self",
".",
"binding",
"if",
"instance",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"instance",
",",
"InstanceType",
")",
":",
"instance_type",
"=",
"instance",
".",
"__class__",
"else",
":",
"instance_type",
"=",
"type",
"(",
"instance",
")",
"info",
"=",
"[",
"]",
"for",
"key",
"in",
"attributes",
":",
"if",
"key",
".",
"endswith",
"(",
"'()'",
")",
":",
"callarg",
"=",
"key",
"=",
"key",
".",
"rstrip",
"(",
"'()'",
")",
"else",
":",
"callarg",
"=",
"False",
"val",
"=",
"getattr",
"(",
"instance",
",",
"key",
",",
"_missing",
")",
"if",
"val",
"is",
"not",
"_missing",
"and",
"key",
"!=",
"name",
":",
"info",
".",
"append",
"(",
"' %s=%s'",
"%",
"(",
"key",
",",
"call_args_repr",
"(",
"val",
"(",
")",
"if",
"callarg",
"else",
"val",
")",
")",
")",
"sig",
"=",
"buf",
"=",
"'{%s%s%s}.%s'",
"%",
"(",
"instance_type",
".",
"__module__",
"+",
"'.'",
"if",
"module",
"else",
"''",
",",
"instance_type",
".",
"__name__",
",",
"''",
".",
"join",
"(",
"info",
")",
",",
"name",
")",
"else",
":",
"sig",
"=",
"buf",
"=",
"name",
"if",
"call_args",
":",
"buf",
"+=",
"'(%s%s)'",
"%",
"(",
"', '",
".",
"join",
"(",
"repr",
"(",
"i",
")",
"for",
"i",
"in",
"(",
"args",
"if",
"call_args",
"is",
"True",
"else",
"args",
"[",
":",
"call_args",
"]",
")",
")",
",",
"(",
"(",
"', '",
"if",
"args",
"else",
"''",
")",
"+",
"', '",
".",
"join",
"(",
"'%s=%r'",
"%",
"i",
"for",
"i",
"in",
"kwargs",
".",
"items",
"(",
")",
")",
")",
"if",
"kwargs",
"and",
"call_args",
"is",
"True",
"else",
"''",
",",
")",
"if",
"stacktrace",
":",
"buf",
"=",
"(",
"\"%%-%ds <<< %%s\"",
"%",
"stacktrace_align",
")",
"%",
"(",
"buf",
",",
"format_stack",
"(",
"skip",
"=",
"1",
",",
"length",
"=",
"stacktrace",
")",
")",
"if",
"call",
":",
"dump",
"(",
"buf",
")",
"try",
":",
"res",
"=",
"yield",
"except",
"Exception",
"as",
"exc",
":",
"if",
"exception",
":",
"if",
"not",
"call",
":",
"dump",
"(",
"buf",
")",
"dump",
"(",
"'%s ~ raised %s'",
"%",
"(",
"sig",
",",
"exception_repr",
"(",
"exc",
")",
")",
")",
"raise",
"if",
"result",
":",
"dump",
"(",
"'%s => %s'",
"%",
"(",
"sig",
",",
"result_repr",
"(",
"res",
")",
")",
")",
"if",
"func",
":",
"return",
"__logged__",
"(",
"func",
")",
"else",
":",
"return",
"__logged__"
] | Decorates `func` to have logging.
Args
func (function):
Function to decorate. If missing log returns a partial which you can use as a decorator.
stacktrace (int):
Number of frames to show.
stacktrace_align (int):
Column to align the framelist to.
attributes (list):
List of instance attributes to show, in case the function is a instance method.
module (bool):
Show the module.
call (bool):
If ``True``, then show calls. If ``False`` only show the call details on exceptions (if ``exception`` is
enabled) (default: ``True``)
call_args (bool):
If ``True``, then show call arguments. (default: ``True``)
call_args_repr (bool):
Function to convert one argument to a string. (default: ``repr``)
result (bool):
If ``True``, then show result. (default: ``True``)
exception (bool):
If ``True``, then show exceptions. (default: ``True``)
exception_repr (function):
Function to convert an exception to a string. (default: ``repr``)
result_repr (function):
Function to convert the result object to a string. (default: ``strip_non_ascii`` - like ``str`` but nonascii
characters are replaced with dots.)
use_logging (string):
Emit log messages with the given loglevel. (default: ``"CRITICAL"``)
print_to (fileobject):
File object to write to, in case you don't want to use logging module. (default: ``None`` - printing is
disabled)
Returns:
A decorator or a wrapper.
Example::
>>> @log(print_to=sys.stdout)
... def a(weird=False):
... if weird:
... raise RuntimeError('BOOM!')
>>> a()
a() <<< ...
a => None
>>> try:
... a(weird=True)
... except Exception:
... pass # naughty code!
a(weird=True) <<< ...
a ~ raised RuntimeError('BOOM!',)
You can conveniently use this to logs just errors, or just results, example::
>>> import aspectlib
>>> with aspectlib.weave(float, log(call=False, result=False, print_to=sys.stdout)):
... try:
... float('invalid')
... except Exception as e:
... pass # naughty code!
float('invalid') <<< ...
float ~ raised ValueError(...float...invalid...)
This makes debugging naughty code easier.
PS: Without the weaving it looks like this::
>>> try:
... log(call=False, result=False, print_to=sys.stdout)(float)('invalid')
... except Exception:
... pass # naughty code!
float('invalid') <<< ...
float ~ raised ValueError(...float...invalid...)
.. versionchanged:: 0.5.0
Renamed `arguments` to `call_args`.
Renamed `arguments_repr` to `call_args_repr`.
Added `call` option. | [
"Decorates",
"func",
"to",
"have",
"logging",
"."
] | train | https://github.com/ionelmc/python-aspectlib/blob/7dba7651ab76160b9ce66c9c11e770f7844e95ec/src/aspectlib/debug.py#L48-L233 | 0.002463 |
disqus/nydus | nydus/db/promise.py | promise_method | def promise_method(func):
"""
A decorator which ensures that once a method has been marked as resolved
(via Class.__resolved)) will then propagate the attribute (function) call
upstream.
"""
name = func.__name__
@wraps(func)
def wrapped(self, *args, **kwargs):
cls_name = type(self).__name__
if getattr(self, '_%s__resolved' % (cls_name,)):
return getattr(getattr(self, '_%s__wrapped' % (cls_name,)), name)(*args, **kwargs)
return func(self, *args, **kwargs)
return wrapped | python | def promise_method(func):
"""
A decorator which ensures that once a method has been marked as resolved
(via Class.__resolved)) will then propagate the attribute (function) call
upstream.
"""
name = func.__name__
@wraps(func)
def wrapped(self, *args, **kwargs):
cls_name = type(self).__name__
if getattr(self, '_%s__resolved' % (cls_name,)):
return getattr(getattr(self, '_%s__wrapped' % (cls_name,)), name)(*args, **kwargs)
return func(self, *args, **kwargs)
return wrapped | [
"def",
"promise_method",
"(",
"func",
")",
":",
"name",
"=",
"func",
".",
"__name__",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapped",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"cls_name",
"=",
"type",
"(",
"self",
")",
".",
"__name__",
"if",
"getattr",
"(",
"self",
",",
"'_%s__resolved'",
"%",
"(",
"cls_name",
",",
")",
")",
":",
"return",
"getattr",
"(",
"getattr",
"(",
"self",
",",
"'_%s__wrapped'",
"%",
"(",
"cls_name",
",",
")",
")",
",",
"name",
")",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapped"
] | A decorator which ensures that once a method has been marked as resolved
(via Class.__resolved)) will then propagate the attribute (function) call
upstream. | [
"A",
"decorator",
"which",
"ensures",
"that",
"once",
"a",
"method",
"has",
"been",
"marked",
"as",
"resolved",
"(",
"via",
"Class",
".",
"__resolved",
"))",
"will",
"then",
"propagate",
"the",
"attribute",
"(",
"function",
")",
"call",
"upstream",
"."
] | train | https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/promise.py#L13-L27 | 0.003663 |
kottenator/django-compressor-toolkit | compressor_toolkit/precompilers.py | BaseCompiler.input | def input(self, **kwargs):
"""
Specify temporary input file extension.
Browserify requires explicit file extension (".js" or ".json" by default).
https://github.com/substack/node-browserify/issues/1469
"""
if self.infile is None and "{infile}" in self.command:
if self.filename is None:
self.infile = NamedTemporaryFile(mode='wb', suffix=self.infile_ext)
self.infile.write(self.content.encode(self.default_encoding))
self.infile.flush()
self.options += (
('infile', self.infile.name),
)
return super(BaseCompiler, self).input(**kwargs) | python | def input(self, **kwargs):
"""
Specify temporary input file extension.
Browserify requires explicit file extension (".js" or ".json" by default).
https://github.com/substack/node-browserify/issues/1469
"""
if self.infile is None and "{infile}" in self.command:
if self.filename is None:
self.infile = NamedTemporaryFile(mode='wb', suffix=self.infile_ext)
self.infile.write(self.content.encode(self.default_encoding))
self.infile.flush()
self.options += (
('infile', self.infile.name),
)
return super(BaseCompiler, self).input(**kwargs) | [
"def",
"input",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"infile",
"is",
"None",
"and",
"\"{infile}\"",
"in",
"self",
".",
"command",
":",
"if",
"self",
".",
"filename",
"is",
"None",
":",
"self",
".",
"infile",
"=",
"NamedTemporaryFile",
"(",
"mode",
"=",
"'wb'",
",",
"suffix",
"=",
"self",
".",
"infile_ext",
")",
"self",
".",
"infile",
".",
"write",
"(",
"self",
".",
"content",
".",
"encode",
"(",
"self",
".",
"default_encoding",
")",
")",
"self",
".",
"infile",
".",
"flush",
"(",
")",
"self",
".",
"options",
"+=",
"(",
"(",
"'infile'",
",",
"self",
".",
"infile",
".",
"name",
")",
",",
")",
"return",
"super",
"(",
"BaseCompiler",
",",
"self",
")",
".",
"input",
"(",
"*",
"*",
"kwargs",
")"
] | Specify temporary input file extension.
Browserify requires explicit file extension (".js" or ".json" by default).
https://github.com/substack/node-browserify/issues/1469 | [
"Specify",
"temporary",
"input",
"file",
"extension",
"."
] | train | https://github.com/kottenator/django-compressor-toolkit/blob/e7bfdaa354e9c9189db0e4ba4fa049045adad91b/compressor_toolkit/precompilers.py#L38-L53 | 0.005682 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxfile.py | DXFile.upload_part | def upload_part(self, data, index=None, display_progress=False, report_progress_fn=None, **kwargs):
"""
:param data: Data to be uploaded in this part
:type data: str or mmap object, bytes on python3
:param index: Index of part to be uploaded; must be in [1, 10000]
:type index: integer
:param display_progress: Whether to print "." to stderr when done
:type display_progress: boolean
:param report_progress_fn: Optional: a function to call that takes in two arguments (self, # bytes transmitted)
:type report_progress_fn: function or None
:raises: :exc:`dxpy.exceptions.DXFileError` if *index* is given and is not in the correct range, :exc:`requests.exceptions.HTTPError` if upload fails
Uploads the data in *data* as part number *index* for the
associated file. If no value for *index* is given, *index*
defaults to 1. This probably only makes sense if this is the
only part to be uploaded.
"""
if not USING_PYTHON2:
# In python3, the underlying system methods use the 'bytes' type, not 'string'
assert(isinstance(data, bytes))
req_input = {}
if index is not None:
req_input["index"] = int(index)
md5 = hashlib.md5()
if hasattr(data, 'seek') and hasattr(data, 'tell'):
# data is a buffer; record initial position (so we can rewind back)
rewind_input_buffer_offset = data.tell()
while True:
bytes_read = data.read(MD5_READ_CHUNK_SIZE)
if bytes_read:
md5.update(bytes_read)
else:
break
# rewind the buffer to original position
data.seek(rewind_input_buffer_offset)
else:
md5.update(data)
req_input["md5"] = md5.hexdigest()
req_input["size"] = len(data)
def get_upload_url_and_headers():
# This function is called from within a retry loop, so to avoid amplifying the number of retries
# geometrically, we decrease the allowed number of retries for the nested API call every time.
if 'max_retries' not in kwargs:
kwargs['max_retries'] = dxpy.DEFAULT_RETRIES
elif kwargs['max_retries'] > 0:
kwargs['max_retries'] -= 1
if "timeout" not in kwargs:
kwargs["timeout"] = FILE_REQUEST_TIMEOUT
resp = dxpy.api.file_upload(self._dxid, req_input, **kwargs)
url = resp["url"]
return url, _validate_headers(resp.get("headers", {}))
# The file upload API requires us to get a pre-authenticated upload URL (and headers for it) every time we
# attempt an upload. Because DXHTTPRequest will retry requests under retryable conditions, we give it a callback
# to ask us for a new upload URL every time it attempts a request (instead of giving them directly).
dxpy.DXHTTPRequest(get_upload_url_and_headers,
data,
jsonify_data=False,
prepend_srv=False,
always_retry=True,
timeout=FILE_REQUEST_TIMEOUT,
auth=None,
method='PUT')
self._num_uploaded_parts += 1
if display_progress:
warn(".")
if report_progress_fn is not None:
report_progress_fn(self, len(data)) | python | def upload_part(self, data, index=None, display_progress=False, report_progress_fn=None, **kwargs):
"""
:param data: Data to be uploaded in this part
:type data: str or mmap object, bytes on python3
:param index: Index of part to be uploaded; must be in [1, 10000]
:type index: integer
:param display_progress: Whether to print "." to stderr when done
:type display_progress: boolean
:param report_progress_fn: Optional: a function to call that takes in two arguments (self, # bytes transmitted)
:type report_progress_fn: function or None
:raises: :exc:`dxpy.exceptions.DXFileError` if *index* is given and is not in the correct range, :exc:`requests.exceptions.HTTPError` if upload fails
Uploads the data in *data* as part number *index* for the
associated file. If no value for *index* is given, *index*
defaults to 1. This probably only makes sense if this is the
only part to be uploaded.
"""
if not USING_PYTHON2:
# In python3, the underlying system methods use the 'bytes' type, not 'string'
assert(isinstance(data, bytes))
req_input = {}
if index is not None:
req_input["index"] = int(index)
md5 = hashlib.md5()
if hasattr(data, 'seek') and hasattr(data, 'tell'):
# data is a buffer; record initial position (so we can rewind back)
rewind_input_buffer_offset = data.tell()
while True:
bytes_read = data.read(MD5_READ_CHUNK_SIZE)
if bytes_read:
md5.update(bytes_read)
else:
break
# rewind the buffer to original position
data.seek(rewind_input_buffer_offset)
else:
md5.update(data)
req_input["md5"] = md5.hexdigest()
req_input["size"] = len(data)
def get_upload_url_and_headers():
# This function is called from within a retry loop, so to avoid amplifying the number of retries
# geometrically, we decrease the allowed number of retries for the nested API call every time.
if 'max_retries' not in kwargs:
kwargs['max_retries'] = dxpy.DEFAULT_RETRIES
elif kwargs['max_retries'] > 0:
kwargs['max_retries'] -= 1
if "timeout" not in kwargs:
kwargs["timeout"] = FILE_REQUEST_TIMEOUT
resp = dxpy.api.file_upload(self._dxid, req_input, **kwargs)
url = resp["url"]
return url, _validate_headers(resp.get("headers", {}))
# The file upload API requires us to get a pre-authenticated upload URL (and headers for it) every time we
# attempt an upload. Because DXHTTPRequest will retry requests under retryable conditions, we give it a callback
# to ask us for a new upload URL every time it attempts a request (instead of giving them directly).
dxpy.DXHTTPRequest(get_upload_url_and_headers,
data,
jsonify_data=False,
prepend_srv=False,
always_retry=True,
timeout=FILE_REQUEST_TIMEOUT,
auth=None,
method='PUT')
self._num_uploaded_parts += 1
if display_progress:
warn(".")
if report_progress_fn is not None:
report_progress_fn(self, len(data)) | [
"def",
"upload_part",
"(",
"self",
",",
"data",
",",
"index",
"=",
"None",
",",
"display_progress",
"=",
"False",
",",
"report_progress_fn",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"USING_PYTHON2",
":",
"# In python3, the underlying system methods use the 'bytes' type, not 'string'",
"assert",
"(",
"isinstance",
"(",
"data",
",",
"bytes",
")",
")",
"req_input",
"=",
"{",
"}",
"if",
"index",
"is",
"not",
"None",
":",
"req_input",
"[",
"\"index\"",
"]",
"=",
"int",
"(",
"index",
")",
"md5",
"=",
"hashlib",
".",
"md5",
"(",
")",
"if",
"hasattr",
"(",
"data",
",",
"'seek'",
")",
"and",
"hasattr",
"(",
"data",
",",
"'tell'",
")",
":",
"# data is a buffer; record initial position (so we can rewind back)",
"rewind_input_buffer_offset",
"=",
"data",
".",
"tell",
"(",
")",
"while",
"True",
":",
"bytes_read",
"=",
"data",
".",
"read",
"(",
"MD5_READ_CHUNK_SIZE",
")",
"if",
"bytes_read",
":",
"md5",
".",
"update",
"(",
"bytes_read",
")",
"else",
":",
"break",
"# rewind the buffer to original position",
"data",
".",
"seek",
"(",
"rewind_input_buffer_offset",
")",
"else",
":",
"md5",
".",
"update",
"(",
"data",
")",
"req_input",
"[",
"\"md5\"",
"]",
"=",
"md5",
".",
"hexdigest",
"(",
")",
"req_input",
"[",
"\"size\"",
"]",
"=",
"len",
"(",
"data",
")",
"def",
"get_upload_url_and_headers",
"(",
")",
":",
"# This function is called from within a retry loop, so to avoid amplifying the number of retries",
"# geometrically, we decrease the allowed number of retries for the nested API call every time.",
"if",
"'max_retries'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'max_retries'",
"]",
"=",
"dxpy",
".",
"DEFAULT_RETRIES",
"elif",
"kwargs",
"[",
"'max_retries'",
"]",
">",
"0",
":",
"kwargs",
"[",
"'max_retries'",
"]",
"-=",
"1",
"if",
"\"timeout\"",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"\"timeout\"",
"]",
"=",
"FILE_REQUEST_TIMEOUT",
"resp",
"=",
"dxpy",
".",
"api",
".",
"file_upload",
"(",
"self",
".",
"_dxid",
",",
"req_input",
",",
"*",
"*",
"kwargs",
")",
"url",
"=",
"resp",
"[",
"\"url\"",
"]",
"return",
"url",
",",
"_validate_headers",
"(",
"resp",
".",
"get",
"(",
"\"headers\"",
",",
"{",
"}",
")",
")",
"# The file upload API requires us to get a pre-authenticated upload URL (and headers for it) every time we",
"# attempt an upload. Because DXHTTPRequest will retry requests under retryable conditions, we give it a callback",
"# to ask us for a new upload URL every time it attempts a request (instead of giving them directly).",
"dxpy",
".",
"DXHTTPRequest",
"(",
"get_upload_url_and_headers",
",",
"data",
",",
"jsonify_data",
"=",
"False",
",",
"prepend_srv",
"=",
"False",
",",
"always_retry",
"=",
"True",
",",
"timeout",
"=",
"FILE_REQUEST_TIMEOUT",
",",
"auth",
"=",
"None",
",",
"method",
"=",
"'PUT'",
")",
"self",
".",
"_num_uploaded_parts",
"+=",
"1",
"if",
"display_progress",
":",
"warn",
"(",
"\".\"",
")",
"if",
"report_progress_fn",
"is",
"not",
"None",
":",
"report_progress_fn",
"(",
"self",
",",
"len",
"(",
"data",
")",
")"
] | :param data: Data to be uploaded in this part
:type data: str or mmap object, bytes on python3
:param index: Index of part to be uploaded; must be in [1, 10000]
:type index: integer
:param display_progress: Whether to print "." to stderr when done
:type display_progress: boolean
:param report_progress_fn: Optional: a function to call that takes in two arguments (self, # bytes transmitted)
:type report_progress_fn: function or None
:raises: :exc:`dxpy.exceptions.DXFileError` if *index* is given and is not in the correct range, :exc:`requests.exceptions.HTTPError` if upload fails
Uploads the data in *data* as part number *index* for the
associated file. If no value for *index* is given, *index*
defaults to 1. This probably only makes sense if this is the
only part to be uploaded. | [
":",
"param",
"data",
":",
"Data",
"to",
"be",
"uploaded",
"in",
"this",
"part",
":",
"type",
"data",
":",
"str",
"or",
"mmap",
"object",
"bytes",
"on",
"python3",
":",
"param",
"index",
":",
"Index",
"of",
"part",
"to",
"be",
"uploaded",
";",
"must",
"be",
"in",
"[",
"1",
"10000",
"]",
":",
"type",
"index",
":",
"integer",
":",
"param",
"display_progress",
":",
"Whether",
"to",
"print",
".",
"to",
"stderr",
"when",
"done",
":",
"type",
"display_progress",
":",
"boolean",
":",
"param",
"report_progress_fn",
":",
"Optional",
":",
"a",
"function",
"to",
"call",
"that",
"takes",
"in",
"two",
"arguments",
"(",
"self",
"#",
"bytes",
"transmitted",
")",
":",
"type",
"report_progress_fn",
":",
"function",
"or",
"None",
":",
"raises",
":",
":",
"exc",
":",
"dxpy",
".",
"exceptions",
".",
"DXFileError",
"if",
"*",
"index",
"*",
"is",
"given",
"and",
"is",
"not",
"in",
"the",
"correct",
"range",
":",
"exc",
":",
"requests",
".",
"exceptions",
".",
"HTTPError",
"if",
"upload",
"fails"
] | train | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxfile.py#L651-L727 | 0.003382 |
JNRowe/upoints | upoints/geonames.py | Locations.import_locations | def import_locations(self, data):
"""Parse geonames.org country database exports.
``import_locations()`` returns a list of :class:`trigpoints.Trigpoint`
objects generated from the data exported by geonames.org_.
It expects data files in the following tab separated format::
2633441 Afon Wyre Afon Wyre River Wayrai,River Wyrai,Wyre 52.3166667 -4.1666667 H STM GB GB 00 0 -9999 Europe/London 1994-01-13
2633442 Wyre Wyre Viera 59.1166667 -2.9666667 T ISL GB GB V9 0 1 Europe/London 2004-09-24
2633443 Wraysbury Wraysbury Wyrardisbury 51.45 -0.55 P PPL GB P9 0 28 Europe/London 2006-08-21
Files containing the data in this format can be downloaded from the
geonames.org_ site in their `database export page`_.
Files downloaded from the geonames site when processed by
``import_locations()`` will return ``list`` objects of the following
style::
[Location(2633441, "Afon Wyre", "Afon Wyre",
['River Wayrai', 'River Wyrai', 'Wyre'],
52.3166667, -4.1666667, "H", "STM", "GB", ['GB'], "00",
None, None, None, 0, None, -9999, "Europe/London",
datetime.date(1994, 1, 13)),
Location(2633442, "Wyre", "Wyre", ['Viera'], 59.1166667,
-2.9666667, "T", "ISL", "GB", ['GB'], "V9", None, None,
None, 0, None, 1, "Europe/London",
datetime.date(2004, 9, 24)),
Location(2633443, "Wraysbury", "Wraysbury", ['Wyrardisbury'],
51.45, -0.55, "P", "PPL", "GB", None, "P9", None, None,
None, 0, None, 28, "Europe/London",
datetime.date(2006, 8, 21))]
Args:
data (iter): geonames.org locations data to read
Returns:
list: geonames.org identifiers with :class:`Location` objects
Raises:
FileFormatError: Unknown file format
.. _geonames.org: http://www.geonames.org/
.. _database export page: http://download.geonames.org/export/dump/
"""
self._data = data
field_names = ('geonameid', 'name', 'asciiname', 'alt_names',
'latitude', 'longitude', 'feature_class',
'feature_code', 'country', 'alt_country', 'admin1',
'admin2', 'admin3', 'admin4', 'population', 'altitude',
'gtopo30', 'tzname', 'modified_date')
comma_split = lambda s: s.split(',')
date_parse = lambda s: datetime.date(*map(int, s.split('-')))
or_none = lambda x, s: x(s) if s else None
str_or_none = lambda s: or_none(str, s)
float_or_none = lambda s: or_none(float, s)
int_or_none = lambda s: or_none(int, s)
tz_parse = lambda s: self.timezones[s][0] if self.timezones else None
field_parsers = (int_or_none, str_or_none, str_or_none, comma_split,
float_or_none, float_or_none, str_or_none,
str_or_none, str_or_none, comma_split, str_or_none,
str_or_none, str_or_none, str_or_none, int_or_none,
int_or_none, int_or_none, tz_parse, date_parse)
data = utils.prepare_csv_read(data, field_names, delimiter=r" ")
for row in data:
try:
for name, parser in zip(field_names, field_parsers):
row[name] = parser(row[name])
except ValueError:
raise utils.FileFormatError('geonames.org')
self.append(Location(**row)) | python | def import_locations(self, data):
"""Parse geonames.org country database exports.
``import_locations()`` returns a list of :class:`trigpoints.Trigpoint`
objects generated from the data exported by geonames.org_.
It expects data files in the following tab separated format::
2633441 Afon Wyre Afon Wyre River Wayrai,River Wyrai,Wyre 52.3166667 -4.1666667 H STM GB GB 00 0 -9999 Europe/London 1994-01-13
2633442 Wyre Wyre Viera 59.1166667 -2.9666667 T ISL GB GB V9 0 1 Europe/London 2004-09-24
2633443 Wraysbury Wraysbury Wyrardisbury 51.45 -0.55 P PPL GB P9 0 28 Europe/London 2006-08-21
Files containing the data in this format can be downloaded from the
geonames.org_ site in their `database export page`_.
Files downloaded from the geonames site when processed by
``import_locations()`` will return ``list`` objects of the following
style::
[Location(2633441, "Afon Wyre", "Afon Wyre",
['River Wayrai', 'River Wyrai', 'Wyre'],
52.3166667, -4.1666667, "H", "STM", "GB", ['GB'], "00",
None, None, None, 0, None, -9999, "Europe/London",
datetime.date(1994, 1, 13)),
Location(2633442, "Wyre", "Wyre", ['Viera'], 59.1166667,
-2.9666667, "T", "ISL", "GB", ['GB'], "V9", None, None,
None, 0, None, 1, "Europe/London",
datetime.date(2004, 9, 24)),
Location(2633443, "Wraysbury", "Wraysbury", ['Wyrardisbury'],
51.45, -0.55, "P", "PPL", "GB", None, "P9", None, None,
None, 0, None, 28, "Europe/London",
datetime.date(2006, 8, 21))]
Args:
data (iter): geonames.org locations data to read
Returns:
list: geonames.org identifiers with :class:`Location` objects
Raises:
FileFormatError: Unknown file format
.. _geonames.org: http://www.geonames.org/
.. _database export page: http://download.geonames.org/export/dump/
"""
self._data = data
field_names = ('geonameid', 'name', 'asciiname', 'alt_names',
'latitude', 'longitude', 'feature_class',
'feature_code', 'country', 'alt_country', 'admin1',
'admin2', 'admin3', 'admin4', 'population', 'altitude',
'gtopo30', 'tzname', 'modified_date')
comma_split = lambda s: s.split(',')
date_parse = lambda s: datetime.date(*map(int, s.split('-')))
or_none = lambda x, s: x(s) if s else None
str_or_none = lambda s: or_none(str, s)
float_or_none = lambda s: or_none(float, s)
int_or_none = lambda s: or_none(int, s)
tz_parse = lambda s: self.timezones[s][0] if self.timezones else None
field_parsers = (int_or_none, str_or_none, str_or_none, comma_split,
float_or_none, float_or_none, str_or_none,
str_or_none, str_or_none, comma_split, str_or_none,
str_or_none, str_or_none, str_or_none, int_or_none,
int_or_none, int_or_none, tz_parse, date_parse)
data = utils.prepare_csv_read(data, field_names, delimiter=r" ")
for row in data:
try:
for name, parser in zip(field_names, field_parsers):
row[name] = parser(row[name])
except ValueError:
raise utils.FileFormatError('geonames.org')
self.append(Location(**row)) | [
"def",
"import_locations",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"_data",
"=",
"data",
"field_names",
"=",
"(",
"'geonameid'",
",",
"'name'",
",",
"'asciiname'",
",",
"'alt_names'",
",",
"'latitude'",
",",
"'longitude'",
",",
"'feature_class'",
",",
"'feature_code'",
",",
"'country'",
",",
"'alt_country'",
",",
"'admin1'",
",",
"'admin2'",
",",
"'admin3'",
",",
"'admin4'",
",",
"'population'",
",",
"'altitude'",
",",
"'gtopo30'",
",",
"'tzname'",
",",
"'modified_date'",
")",
"comma_split",
"=",
"lambda",
"s",
":",
"s",
".",
"split",
"(",
"','",
")",
"date_parse",
"=",
"lambda",
"s",
":",
"datetime",
".",
"date",
"(",
"*",
"map",
"(",
"int",
",",
"s",
".",
"split",
"(",
"'-'",
")",
")",
")",
"or_none",
"=",
"lambda",
"x",
",",
"s",
":",
"x",
"(",
"s",
")",
"if",
"s",
"else",
"None",
"str_or_none",
"=",
"lambda",
"s",
":",
"or_none",
"(",
"str",
",",
"s",
")",
"float_or_none",
"=",
"lambda",
"s",
":",
"or_none",
"(",
"float",
",",
"s",
")",
"int_or_none",
"=",
"lambda",
"s",
":",
"or_none",
"(",
"int",
",",
"s",
")",
"tz_parse",
"=",
"lambda",
"s",
":",
"self",
".",
"timezones",
"[",
"s",
"]",
"[",
"0",
"]",
"if",
"self",
".",
"timezones",
"else",
"None",
"field_parsers",
"=",
"(",
"int_or_none",
",",
"str_or_none",
",",
"str_or_none",
",",
"comma_split",
",",
"float_or_none",
",",
"float_or_none",
",",
"str_or_none",
",",
"str_or_none",
",",
"str_or_none",
",",
"comma_split",
",",
"str_or_none",
",",
"str_or_none",
",",
"str_or_none",
",",
"str_or_none",
",",
"int_or_none",
",",
"int_or_none",
",",
"int_or_none",
",",
"tz_parse",
",",
"date_parse",
")",
"data",
"=",
"utils",
".",
"prepare_csv_read",
"(",
"data",
",",
"field_names",
",",
"delimiter",
"=",
"r\"\t\"",
")",
"for",
"row",
"in",
"data",
":",
"try",
":",
"for",
"name",
",",
"parser",
"in",
"zip",
"(",
"field_names",
",",
"field_parsers",
")",
":",
"row",
"[",
"name",
"]",
"=",
"parser",
"(",
"row",
"[",
"name",
"]",
")",
"except",
"ValueError",
":",
"raise",
"utils",
".",
"FileFormatError",
"(",
"'geonames.org'",
")",
"self",
".",
"append",
"(",
"Location",
"(",
"*",
"*",
"row",
")",
")"
] | Parse geonames.org country database exports.
``import_locations()`` returns a list of :class:`trigpoints.Trigpoint`
objects generated from the data exported by geonames.org_.
It expects data files in the following tab separated format::
2633441 Afon Wyre Afon Wyre River Wayrai,River Wyrai,Wyre 52.3166667 -4.1666667 H STM GB GB 00 0 -9999 Europe/London 1994-01-13
2633442 Wyre Wyre Viera 59.1166667 -2.9666667 T ISL GB GB V9 0 1 Europe/London 2004-09-24
2633443 Wraysbury Wraysbury Wyrardisbury 51.45 -0.55 P PPL GB P9 0 28 Europe/London 2006-08-21
Files containing the data in this format can be downloaded from the
geonames.org_ site in their `database export page`_.
Files downloaded from the geonames site when processed by
``import_locations()`` will return ``list`` objects of the following
style::
[Location(2633441, "Afon Wyre", "Afon Wyre",
['River Wayrai', 'River Wyrai', 'Wyre'],
52.3166667, -4.1666667, "H", "STM", "GB", ['GB'], "00",
None, None, None, 0, None, -9999, "Europe/London",
datetime.date(1994, 1, 13)),
Location(2633442, "Wyre", "Wyre", ['Viera'], 59.1166667,
-2.9666667, "T", "ISL", "GB", ['GB'], "V9", None, None,
None, 0, None, 1, "Europe/London",
datetime.date(2004, 9, 24)),
Location(2633443, "Wraysbury", "Wraysbury", ['Wyrardisbury'],
51.45, -0.55, "P", "PPL", "GB", None, "P9", None, None,
None, 0, None, 28, "Europe/London",
datetime.date(2006, 8, 21))]
Args:
data (iter): geonames.org locations data to read
Returns:
list: geonames.org identifiers with :class:`Location` objects
Raises:
FileFormatError: Unknown file format
.. _geonames.org: http://www.geonames.org/
.. _database export page: http://download.geonames.org/export/dump/ | [
"Parse",
"geonames",
".",
"org",
"country",
"database",
"exports",
"."
] | train | https://github.com/JNRowe/upoints/blob/1e4b7a53ed2a06cd854523d54c36aabdccea3830/upoints/geonames.py#L167-L237 | 0.003253 |
Microsoft/nni | src/sdk/pynni/nni/networkmorphism_tuner/bayesian.py | skip_connections_distance | def skip_connections_distance(list_a, list_b):
"""The distance between the skip-connections of two neural networks."""
distance_matrix = np.zeros((len(list_a), len(list_b)))
for i, a in enumerate(list_a):
for j, b in enumerate(list_b):
distance_matrix[i][j] = skip_connection_distance(a, b)
return distance_matrix[linear_sum_assignment(distance_matrix)].sum() + abs(
len(list_a) - len(list_b)
) | python | def skip_connections_distance(list_a, list_b):
"""The distance between the skip-connections of two neural networks."""
distance_matrix = np.zeros((len(list_a), len(list_b)))
for i, a in enumerate(list_a):
for j, b in enumerate(list_b):
distance_matrix[i][j] = skip_connection_distance(a, b)
return distance_matrix[linear_sum_assignment(distance_matrix)].sum() + abs(
len(list_a) - len(list_b)
) | [
"def",
"skip_connections_distance",
"(",
"list_a",
",",
"list_b",
")",
":",
"distance_matrix",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"list_a",
")",
",",
"len",
"(",
"list_b",
")",
")",
")",
"for",
"i",
",",
"a",
"in",
"enumerate",
"(",
"list_a",
")",
":",
"for",
"j",
",",
"b",
"in",
"enumerate",
"(",
"list_b",
")",
":",
"distance_matrix",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"skip_connection_distance",
"(",
"a",
",",
"b",
")",
"return",
"distance_matrix",
"[",
"linear_sum_assignment",
"(",
"distance_matrix",
")",
"]",
".",
"sum",
"(",
")",
"+",
"abs",
"(",
"len",
"(",
"list_a",
")",
"-",
"len",
"(",
"list_b",
")",
")"
] | The distance between the skip-connections of two neural networks. | [
"The",
"distance",
"between",
"the",
"skip",
"-",
"connections",
"of",
"two",
"neural",
"networks",
"."
] | train | https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/networkmorphism_tuner/bayesian.py#L101-L109 | 0.002262 |
runfalk/spans | spans/types.py | Range.endsbefore | def endsbefore(self, other):
"""
Test if this range ends before `other`. `other` may be either range or
scalar. This only takes the upper end of the ranges into consideration.
If the scalar or the upper end of the given range is less than or equal
to this range's upper end, ``True`` is returned.
>>> intrange(1, 5).endsbefore(5)
True
>>> intrange(1, 5).endsbefore(intrange(1, 5))
True
:param other: Range or scalar to test.
:return: ``True`` if this range ends before `other`, otherwise ``False``
:raises TypeError: If `other` is of the wrong type.
"""
if self.is_valid_range(other):
if self.upper == other.upper:
return not self.upper_inc or other.upper_inc
elif self.upper_inf:
return False
elif other.upper_inf:
return True
else:
return self.upper <= other.upper
elif self.is_valid_scalar(other):
return self.upper <= other
else:
raise TypeError(
"Unsupported type to test for ends before '{}'".format(
other.__class__.__name__)) | python | def endsbefore(self, other):
"""
Test if this range ends before `other`. `other` may be either range or
scalar. This only takes the upper end of the ranges into consideration.
If the scalar or the upper end of the given range is less than or equal
to this range's upper end, ``True`` is returned.
>>> intrange(1, 5).endsbefore(5)
True
>>> intrange(1, 5).endsbefore(intrange(1, 5))
True
:param other: Range or scalar to test.
:return: ``True`` if this range ends before `other`, otherwise ``False``
:raises TypeError: If `other` is of the wrong type.
"""
if self.is_valid_range(other):
if self.upper == other.upper:
return not self.upper_inc or other.upper_inc
elif self.upper_inf:
return False
elif other.upper_inf:
return True
else:
return self.upper <= other.upper
elif self.is_valid_scalar(other):
return self.upper <= other
else:
raise TypeError(
"Unsupported type to test for ends before '{}'".format(
other.__class__.__name__)) | [
"def",
"endsbefore",
"(",
"self",
",",
"other",
")",
":",
"if",
"self",
".",
"is_valid_range",
"(",
"other",
")",
":",
"if",
"self",
".",
"upper",
"==",
"other",
".",
"upper",
":",
"return",
"not",
"self",
".",
"upper_inc",
"or",
"other",
".",
"upper_inc",
"elif",
"self",
".",
"upper_inf",
":",
"return",
"False",
"elif",
"other",
".",
"upper_inf",
":",
"return",
"True",
"else",
":",
"return",
"self",
".",
"upper",
"<=",
"other",
".",
"upper",
"elif",
"self",
".",
"is_valid_scalar",
"(",
"other",
")",
":",
"return",
"self",
".",
"upper",
"<=",
"other",
"else",
":",
"raise",
"TypeError",
"(",
"\"Unsupported type to test for ends before '{}'\"",
".",
"format",
"(",
"other",
".",
"__class__",
".",
"__name__",
")",
")"
] | Test if this range ends before `other`. `other` may be either range or
scalar. This only takes the upper end of the ranges into consideration.
If the scalar or the upper end of the given range is less than or equal
to this range's upper end, ``True`` is returned.
>>> intrange(1, 5).endsbefore(5)
True
>>> intrange(1, 5).endsbefore(intrange(1, 5))
True
:param other: Range or scalar to test.
:return: ``True`` if this range ends before `other`, otherwise ``False``
:raises TypeError: If `other` is of the wrong type. | [
"Test",
"if",
"this",
"range",
"ends",
"before",
"other",
".",
"other",
"may",
"be",
"either",
"range",
"or",
"scalar",
".",
"This",
"only",
"takes",
"the",
"upper",
"end",
"of",
"the",
"ranges",
"into",
"consideration",
".",
"If",
"the",
"scalar",
"or",
"the",
"upper",
"end",
"of",
"the",
"given",
"range",
"is",
"less",
"than",
"or",
"equal",
"to",
"this",
"range",
"s",
"upper",
"end",
"True",
"is",
"returned",
"."
] | train | https://github.com/runfalk/spans/blob/59ed73407a569c3be86cfdb4b8f438cb8c794540/spans/types.py#L684-L715 | 0.002396 |
sammchardy/python-binance | binance/client.py | Client.withdraw | def withdraw(self, **params):
"""Submit a withdraw request.
https://www.binance.com/restapipub.html
Assumptions:
- You must have Withdraw permissions enabled on your API key
- You must have withdrawn to the address specified through the website and approved the transaction via email
:param asset: required
:type asset: str
:type address: required
:type address: str
:type addressTag: optional - Secondary address identifier for coins like XRP,XMR etc.
:type address: str
:param amount: required
:type amount: decimal
:param name: optional - Description of the address, default asset value passed will be used
:type name: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"msg": "success",
"success": true,
"id":"7213fea8e94b4a5593d507237e5a555b"
}
:raises: BinanceRequestException, BinanceAPIException, BinanceWithdrawException
"""
# force a name for the withdrawal if one not set
if 'asset' in params and 'name' not in params:
params['name'] = params['asset']
res = self._request_withdraw_api('post', 'withdraw.html', True, data=params)
if not res['success']:
raise BinanceWithdrawException(res['msg'])
return res | python | def withdraw(self, **params):
"""Submit a withdraw request.
https://www.binance.com/restapipub.html
Assumptions:
- You must have Withdraw permissions enabled on your API key
- You must have withdrawn to the address specified through the website and approved the transaction via email
:param asset: required
:type asset: str
:type address: required
:type address: str
:type addressTag: optional - Secondary address identifier for coins like XRP,XMR etc.
:type address: str
:param amount: required
:type amount: decimal
:param name: optional - Description of the address, default asset value passed will be used
:type name: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"msg": "success",
"success": true,
"id":"7213fea8e94b4a5593d507237e5a555b"
}
:raises: BinanceRequestException, BinanceAPIException, BinanceWithdrawException
"""
# force a name for the withdrawal if one not set
if 'asset' in params and 'name' not in params:
params['name'] = params['asset']
res = self._request_withdraw_api('post', 'withdraw.html', True, data=params)
if not res['success']:
raise BinanceWithdrawException(res['msg'])
return res | [
"def",
"withdraw",
"(",
"self",
",",
"*",
"*",
"params",
")",
":",
"# force a name for the withdrawal if one not set",
"if",
"'asset'",
"in",
"params",
"and",
"'name'",
"not",
"in",
"params",
":",
"params",
"[",
"'name'",
"]",
"=",
"params",
"[",
"'asset'",
"]",
"res",
"=",
"self",
".",
"_request_withdraw_api",
"(",
"'post'",
",",
"'withdraw.html'",
",",
"True",
",",
"data",
"=",
"params",
")",
"if",
"not",
"res",
"[",
"'success'",
"]",
":",
"raise",
"BinanceWithdrawException",
"(",
"res",
"[",
"'msg'",
"]",
")",
"return",
"res"
] | Submit a withdraw request.
https://www.binance.com/restapipub.html
Assumptions:
- You must have Withdraw permissions enabled on your API key
- You must have withdrawn to the address specified through the website and approved the transaction via email
:param asset: required
:type asset: str
:type address: required
:type address: str
:type addressTag: optional - Secondary address identifier for coins like XRP,XMR etc.
:type address: str
:param amount: required
:type amount: decimal
:param name: optional - Description of the address, default asset value passed will be used
:type name: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"msg": "success",
"success": true,
"id":"7213fea8e94b4a5593d507237e5a555b"
}
:raises: BinanceRequestException, BinanceAPIException, BinanceWithdrawException | [
"Submit",
"a",
"withdraw",
"request",
"."
] | train | https://github.com/sammchardy/python-binance/blob/31c0d0a32f9edd528c6c2c1dd3044d9a34ce43cc/binance/client.py#L1839-L1881 | 0.004596 |
hasgeek/coaster | coaster/utils/misc.py | check_password | def check_password(reference, attempt):
"""
Compare a reference password with the user attempt.
>>> check_password('{PLAIN}foo', 'foo')
True
>>> check_password(u'{PLAIN}bar', 'bar')
True
>>> check_password(u'{UNKNOWN}baz', 'baz')
False
>>> check_password(u'no-encoding', u'no-encoding')
False
>>> check_password(u'{SSHA}q/uVU8r15k/9QhRi92CWUwMJu2DM6TUSpp25', u're-foo')
True
>>> check_password(u'{BCRYPT}$2b$12$NfKivgz7njR3/rWZ56EsDe7..PPum.fcmFLbdkbP.chtMTcS1s01C', 'foo')
True
"""
if reference.startswith(u'{PLAIN}'):
if reference[7:] == attempt:
return True
elif reference.startswith(u'{SSHA}'):
# In python3 b64decode takes inputtype as bytes as opposed to str in python 2, and returns
# binascii.Error as opposed to TypeError
if six.PY3: # pragma: no cover
try:
if isinstance(reference, six.text_type):
ref = b64decode(reference[6:].encode('utf-8'))
else:
ref = b64decode(reference[6:])
except binascii.Error:
return False # Not Base64
else: # pragma: no cover
try:
ref = b64decode(reference[6:])
except TypeError:
return False # Not Base64
if isinstance(attempt, six.text_type):
attempt = attempt.encode('utf-8')
salt = ref[20:]
b64_encoded = b64encode(hashlib.sha1(attempt + salt).digest() + salt)
if six.PY3: # pragma: no cover
# type(b64_encoded) is bytes and can't be compared with type(reference) which is str
compare = six.text_type('{SSHA}%s' % b64_encoded.decode('utf-8') if type(b64_encoded) is bytes else b64_encoded)
else: # pragma: no cover
compare = six.text_type('{SSHA}%s' % b64_encoded)
return compare == reference
elif reference.startswith(u'{BCRYPT}'):
# bcrypt.hashpw() accepts either a unicode encoded string or the basic string (python 2)
if isinstance(attempt, six.text_type) or isinstance(reference, six.text_type):
attempt = attempt.encode('utf-8')
reference = reference.encode('utf-8')
if six.PY3: # pragma: no cover
return bcrypt.hashpw(attempt, reference[8:]) == reference[8:]
else: # pragma: no cover
return bcrypt.hashpw(
attempt.encode('utf-8') if isinstance(attempt, six.text_type) else attempt,
str(reference[8:])) == reference[8:]
return False | python | def check_password(reference, attempt):
"""
Compare a reference password with the user attempt.
>>> check_password('{PLAIN}foo', 'foo')
True
>>> check_password(u'{PLAIN}bar', 'bar')
True
>>> check_password(u'{UNKNOWN}baz', 'baz')
False
>>> check_password(u'no-encoding', u'no-encoding')
False
>>> check_password(u'{SSHA}q/uVU8r15k/9QhRi92CWUwMJu2DM6TUSpp25', u're-foo')
True
>>> check_password(u'{BCRYPT}$2b$12$NfKivgz7njR3/rWZ56EsDe7..PPum.fcmFLbdkbP.chtMTcS1s01C', 'foo')
True
"""
if reference.startswith(u'{PLAIN}'):
if reference[7:] == attempt:
return True
elif reference.startswith(u'{SSHA}'):
# In python3 b64decode takes inputtype as bytes as opposed to str in python 2, and returns
# binascii.Error as opposed to TypeError
if six.PY3: # pragma: no cover
try:
if isinstance(reference, six.text_type):
ref = b64decode(reference[6:].encode('utf-8'))
else:
ref = b64decode(reference[6:])
except binascii.Error:
return False # Not Base64
else: # pragma: no cover
try:
ref = b64decode(reference[6:])
except TypeError:
return False # Not Base64
if isinstance(attempt, six.text_type):
attempt = attempt.encode('utf-8')
salt = ref[20:]
b64_encoded = b64encode(hashlib.sha1(attempt + salt).digest() + salt)
if six.PY3: # pragma: no cover
# type(b64_encoded) is bytes and can't be compared with type(reference) which is str
compare = six.text_type('{SSHA}%s' % b64_encoded.decode('utf-8') if type(b64_encoded) is bytes else b64_encoded)
else: # pragma: no cover
compare = six.text_type('{SSHA}%s' % b64_encoded)
return compare == reference
elif reference.startswith(u'{BCRYPT}'):
# bcrypt.hashpw() accepts either a unicode encoded string or the basic string (python 2)
if isinstance(attempt, six.text_type) or isinstance(reference, six.text_type):
attempt = attempt.encode('utf-8')
reference = reference.encode('utf-8')
if six.PY3: # pragma: no cover
return bcrypt.hashpw(attempt, reference[8:]) == reference[8:]
else: # pragma: no cover
return bcrypt.hashpw(
attempt.encode('utf-8') if isinstance(attempt, six.text_type) else attempt,
str(reference[8:])) == reference[8:]
return False | [
"def",
"check_password",
"(",
"reference",
",",
"attempt",
")",
":",
"if",
"reference",
".",
"startswith",
"(",
"u'{PLAIN}'",
")",
":",
"if",
"reference",
"[",
"7",
":",
"]",
"==",
"attempt",
":",
"return",
"True",
"elif",
"reference",
".",
"startswith",
"(",
"u'{SSHA}'",
")",
":",
"# In python3 b64decode takes inputtype as bytes as opposed to str in python 2, and returns",
"# binascii.Error as opposed to TypeError",
"if",
"six",
".",
"PY3",
":",
"# pragma: no cover",
"try",
":",
"if",
"isinstance",
"(",
"reference",
",",
"six",
".",
"text_type",
")",
":",
"ref",
"=",
"b64decode",
"(",
"reference",
"[",
"6",
":",
"]",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"else",
":",
"ref",
"=",
"b64decode",
"(",
"reference",
"[",
"6",
":",
"]",
")",
"except",
"binascii",
".",
"Error",
":",
"return",
"False",
"# Not Base64",
"else",
":",
"# pragma: no cover",
"try",
":",
"ref",
"=",
"b64decode",
"(",
"reference",
"[",
"6",
":",
"]",
")",
"except",
"TypeError",
":",
"return",
"False",
"# Not Base64",
"if",
"isinstance",
"(",
"attempt",
",",
"six",
".",
"text_type",
")",
":",
"attempt",
"=",
"attempt",
".",
"encode",
"(",
"'utf-8'",
")",
"salt",
"=",
"ref",
"[",
"20",
":",
"]",
"b64_encoded",
"=",
"b64encode",
"(",
"hashlib",
".",
"sha1",
"(",
"attempt",
"+",
"salt",
")",
".",
"digest",
"(",
")",
"+",
"salt",
")",
"if",
"six",
".",
"PY3",
":",
"# pragma: no cover",
"# type(b64_encoded) is bytes and can't be compared with type(reference) which is str",
"compare",
"=",
"six",
".",
"text_type",
"(",
"'{SSHA}%s'",
"%",
"b64_encoded",
".",
"decode",
"(",
"'utf-8'",
")",
"if",
"type",
"(",
"b64_encoded",
")",
"is",
"bytes",
"else",
"b64_encoded",
")",
"else",
":",
"# pragma: no cover",
"compare",
"=",
"six",
".",
"text_type",
"(",
"'{SSHA}%s'",
"%",
"b64_encoded",
")",
"return",
"compare",
"==",
"reference",
"elif",
"reference",
".",
"startswith",
"(",
"u'{BCRYPT}'",
")",
":",
"# bcrypt.hashpw() accepts either a unicode encoded string or the basic string (python 2)",
"if",
"isinstance",
"(",
"attempt",
",",
"six",
".",
"text_type",
")",
"or",
"isinstance",
"(",
"reference",
",",
"six",
".",
"text_type",
")",
":",
"attempt",
"=",
"attempt",
".",
"encode",
"(",
"'utf-8'",
")",
"reference",
"=",
"reference",
".",
"encode",
"(",
"'utf-8'",
")",
"if",
"six",
".",
"PY3",
":",
"# pragma: no cover",
"return",
"bcrypt",
".",
"hashpw",
"(",
"attempt",
",",
"reference",
"[",
"8",
":",
"]",
")",
"==",
"reference",
"[",
"8",
":",
"]",
"else",
":",
"# pragma: no cover",
"return",
"bcrypt",
".",
"hashpw",
"(",
"attempt",
".",
"encode",
"(",
"'utf-8'",
")",
"if",
"isinstance",
"(",
"attempt",
",",
"six",
".",
"text_type",
")",
"else",
"attempt",
",",
"str",
"(",
"reference",
"[",
"8",
":",
"]",
")",
")",
"==",
"reference",
"[",
"8",
":",
"]",
"return",
"False"
] | Compare a reference password with the user attempt.
>>> check_password('{PLAIN}foo', 'foo')
True
>>> check_password(u'{PLAIN}bar', 'bar')
True
>>> check_password(u'{UNKNOWN}baz', 'baz')
False
>>> check_password(u'no-encoding', u'no-encoding')
False
>>> check_password(u'{SSHA}q/uVU8r15k/9QhRi92CWUwMJu2DM6TUSpp25', u're-foo')
True
>>> check_password(u'{BCRYPT}$2b$12$NfKivgz7njR3/rWZ56EsDe7..PPum.fcmFLbdkbP.chtMTcS1s01C', 'foo')
True | [
"Compare",
"a",
"reference",
"password",
"with",
"the",
"user",
"attempt",
"."
] | train | https://github.com/hasgeek/coaster/blob/07f7eb5d5f516e22fa14fdf4dc70e0ae13ee398d/coaster/utils/misc.py#L341-L398 | 0.003468 |