repo
stringlengths 1
29
| path
stringlengths 24
332
| code
stringlengths 39
579k
|
---|---|---|
plotpy | plotpy//gui/widgets/pyplot.pyfile:/gui/widgets/pyplot.py:function:interactive/interactive | def interactive(state):
"""Toggle interactive mode"""
global _interactive
_interactive = state
|
nodemcu-uploader-0.4.3 | nodemcu-uploader-0.4.3//nodemcu_uploader/main.pyfile:/nodemcu_uploader/main.py:function:arg_auto_int/arg_auto_int | def arg_auto_int(value):
"""parsing function for integer arguments"""
return int(value, 0)
|
bidon-1.0.7 | bidon-1.0.7//bidon/util/terminal.pyclass:ProgressPrinter/percentage | @staticmethod
def percentage(current, total, time_remaining):
"""Returns the progress percentage."""
return '{}% completed'.format(int(current / total * 100))
|
capsul-2.2.1 | capsul-2.2.1//capsul/process/process.pyclass:NipypeProcess/help | @classmethod
def help(cls, nipype_interface, returnhelp=False):
""" Method to print the full wraped nipype interface help.
Parameters
----------
cls: process class (mandatory)
a nipype process class
nipype_instance: nipype interface (mandatory)
a nipype interface object that will be documented.
returnhelp: bool (optional, default False)
if True return the help string message,
otherwise display it on the console.
"""
from .nipype_process import nipype_factory
cls_instance = nipype_factory(nipype_interface)
return cls_instance.get_help(returnhelp)
|
dgl | dgl//backend/backend.pyfile:/backend/backend.py:function:is_tensor/is_tensor | def is_tensor(obj):
"""Returns true if the given object is a framework-specific tensor."""
pass
|
openstack-cyborg-3.0.0 | openstack-cyborg-3.0.0//cyborg/objects/ext_arq.pyclass:ExtARQ/list | @classmethod
def list(cls, context):
"""Return a list of ExtARQ objects."""
db_extarqs = cls.dbapi.extarq_list(context)
obj_extarq_list = cls._from_db_object_list(db_extarqs, context)
return obj_extarq_list
|
gallery_dl | gallery_dl//extractor/common.pyclass:Extractor/_get_tests | @classmethod
def _get_tests(cls):
"""Yield an extractor's test cases as (URL, RESULTS) tuples"""
tests = cls.test
if not tests:
return
if len(tests) == 2 and (not tests[1] or isinstance(tests[1], dict)):
tests = tests,
for test in tests:
if isinstance(test, str):
test = test, None
yield test
|
neurodocker | neurodocker//utils.pyfile:/utils.py:function:_string_vals_to_list/_string_vals_to_list | def _string_vals_to_list(dictionary):
"""Convert string values to lists."""
list_keys = ['conda_install', 'pip_install']
for kk in list_keys:
if kk in dictionary.keys():
dictionary[kk] = dictionary[kk].split()
|
blender-1.4 | blender-1.4//blender/2.79/scripts/addons/archipack/archipack_reference_point.pyclass:archipack_reference_point/filter | @classmethod
def filter(cls, o):
"""
Filter object with this class in data
return
True when object contains this datablock
False otherwhise
usage:
class_name.filter(object) from outside world
self.__class__.filter(object) from instance
"""
try:
return cls.__name__ in o
except:
pass
return False
|
pytype-2020.5.7 | pytype-2020.5.7//pytype/pytype_source_utils.pyfile:/pytype/pytype_source_utils.py:function:load_data_file/load_data_file | def load_data_file(path):
"""Load a file either from __loader__ or the filesystem."""
loader = globals().get('__loader__', None)
if loader:
return loader.get_data(path)
with open(path, 'rb') as fi:
return fi.read()
|
mercurial-5.4 | mercurial-5.4//mercurial/interfaces/repository.pyclass:ifilemutation/censorrevision | def censorrevision(tr, node, tombstone=b''):
"""Remove the content of a single revision.
The specified ``node`` will have its content purged from storage.
Future attempts to access the revision data for this node will
result in failure.
A ``tombstone`` message can optionally be stored. This message may be
displayed to users when they attempt to access the missing revision
data.
Storage backends may have stored deltas against the previous content
in this revision. As part of censoring a revision, these storage
backends are expected to rewrite any internally stored deltas such
that they no longer reference the deleted content.
"""
|
zaqar | zaqar//storage/redis/utils.pyfile:/storage/redis/utils.py:function:msg_delayed_filter/msg_delayed_filter | def msg_delayed_filter(message, now):
"""Return True IFF the message is currently delayed."""
return now < message.delay_expires
|
polyglotdb-1.0.0 | polyglotdb-1.0.0//polyglotdb/io/enrichment/helper.pyfile:/polyglotdb/io/enrichment/helper.py:function:parse_string/parse_string | def parse_string(value):
"""
parses string for python keywords or numeric value
Parameters
----------
value : str or float
the value to be parsed (true, false, none, null, na, or float)
Returns
-------
boolean, None, float, original value
"""
if value is None:
return None
value = value.strip()
if value.lower() == 'true':
return True
if value.lower() == 'false':
return False
if value.lower() in ['none', 'null', 'na']:
return None
try:
if '.' in value:
v = float(value)
else:
v = int(value)
return v
except ValueError:
return value
|
mpldatacursor | mpldatacursor//pick_info.pyfile:/pick_info.py:function:rectangle_props/rectangle_props | def rectangle_props(event):
"""
Returns the width, height, left, and bottom of a rectangle artist.
Parameters
-----------
event : PickEvent
The pick event to process
Returns
--------
A dict with keys:
`width` : The width of the rectangle
`height` : The height of the rectangle
`left` : The minimum x-coordinate of the rectangle
`right` : The maximum x-coordinate of the rectangle
`bottom` : The minimum y-coordinate of the rectangle
`top` : The maximum y-coordinate of the rectangle
`xcenter` : The mean x-coordinate of the rectangle
`ycenter` : The mean y-coordinate of the rectangle
`label` : The label for the rectangle or None
"""
artist = event.artist
width, height = artist.get_width(), artist.get_height()
left, bottom = artist.xy
right, top = left + width, bottom + height
xcenter = left + 0.5 * width
ycenter = bottom + 0.5 * height
label = artist.get_label()
if label is None or label.startswith('_nolegend'):
try:
label = artist._mpldatacursor_label
except AttributeError:
label = None
return dict(width=width, height=height, left=left, bottom=bottom, label
=label, right=right, top=top, xcenter=xcenter, ycenter=ycenter)
|
congress | congress//datasources/datasource_driver.pyclass:DataSourceDriver/_get_schema | @classmethod
def _get_schema(cls, translator, schema, parent_key_type=None):
"""Returns named tuple with values:
schema: the schema of a translator,
id_type: the data type of the id-col, or None of absent
Note: this method uses the argument schema to store
data in since this method words recursively. It might
be worthwhile in the future to refactor this code so this
is not required.
:param parent_key_type: passes down the column data type which the
translator refers to as parent-key
"""
cls.check_translation_type(translator.keys())
translation_type = translator[cls.TRANSLATION_TYPE]
if translation_type == cls.HDICT:
return cls._get_schema_hdict(translator, schema, parent_key_type)
elif translation_type == cls.VDICT:
return cls._get_schema_vdict(translator, schema, parent_key_type)
elif translation_type == cls.LIST:
return cls._get_schema_list(translator, schema, parent_key_type)
elif translation_type == cls.VALUE:
return cls.SCHEMA_RETURN_TUPLE(schema, None)
else:
raise AssertionError('Unexpected translator type %s' % translation_type
)
|
pyvisgraph-0.2.1 | pyvisgraph-0.2.1//pyvisgraph/visible_vertices.pyfile:/pyvisgraph/visible_vertices.py:function:on_segment/on_segment | def on_segment(p, q, r):
"""Given three colinear points p, q, r, the function checks if point q
lies on line segment 'pr'."""
if q.x <= max(p.x, r.x) and q.x >= min(p.x, r.x):
if q.y <= max(p.y, r.y) and q.y >= min(p.y, r.y):
return True
return False
|
panel-plots-0.1.0 | panel-plots-0.1.0//versioneer.pyfile:/versioneer.py:function:render_git_describe_long/render_git_describe_long | def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
rendered += '-%d-g%s' % (pieces['distance'], pieces['short'])
else:
rendered = pieces['short']
if pieces['dirty']:
rendered += '-dirty'
return rendered
|
pyboto3-1.4.4 | pyboto3-1.4.4//pyboto3/elasticloadbalancingv2.pyfile:/pyboto3/elasticloadbalancingv2.py:function:set_rule_priorities/set_rule_priorities | def set_rule_priorities(RulePriorities=None):
"""
Sets the priorities of the specified rules.
You can reorder the rules as long as there are no priority conflicts in the new order. Any existing rules that you do not specify retain their current priority.
See also: AWS API Documentation
Examples
This example sets the priority of the specified rule.
Expected Output:
:example: response = client.set_rule_priorities(
RulePriorities=[
{
'RuleArn': 'string',
'Priority': 123
},
]
)
:type RulePriorities: list
:param RulePriorities: [REQUIRED]
The rule priorities.
(dict) --Information about the priorities for the rules for a listener.
RuleArn (string) --The Amazon Resource Name (ARN) of the rule.
Priority (integer) --The rule priority.
:rtype: dict
:return: {
'Rules': [
{
'RuleArn': 'string',
'Priority': 'string',
'Conditions': [
{
'Field': 'string',
'Values': [
'string',
]
},
],
'Actions': [
{
'Type': 'forward',
'TargetGroupArn': 'string'
},
],
'IsDefault': True|False
},
]
}
:returns:
A-Z, a-z, 0-9
_ - . $ / ~ " ' @ : +
(using amp;)
(matches 0 or more characters)
? (matches exactly 1 character)
"""
pass
|
libdw-4.3.0 | libdw-4.3.0//libdw/util.pyfile:/libdw/util.py:function:inverse_pose/inverse_pose | def inverse_pose(pose):
"""
Same as pose.inverse()
:param pose: instance of ``util.Pose``
"""
return pose.transform().inverse().pose()
|
instatag-0.1 | instatag-0.1//instatag/instatag.pyfile:/instatag/instatag.py:function:zero_insert/zero_insert | def zero_insert(input_string):
"""
This function get a string as input if input is one digit add a zero
:param input_string: input digit az string
:type input_string:str
:return: modified output as str
"""
if len(input_string) == 1:
return '0' + input_string
return input_string
|
boss | boss//bo/kernel_factory.pyclass:KernelFactory/_set_constraints | @staticmethod
def _set_constraints(kerns, STS):
"""
Sets hyperparameter constraints on kernels.
"""
if STS.thetabounds is not None:
kerns[0].variance.constrain_bounded(STS.thetabounds[0][0], STS.
thetabounds[0][1], warning=False)
for i in range(STS.dim):
kerns[i].lengthscale.constrain_bounded(STS.thetabounds[i + 1][0
], STS.thetabounds[i + 1][1], warning=False)
for i in range(STS.dim):
if STS.kerntype[i] == 'stdp':
kerns[i].period.constrain_fixed(STS.periods[i], warning=False)
if STS.dim > 1:
for i in range(1, STS.dim):
kerns[i].variance.constrain_fixed(1.0, warning=False)
|
py3Solr-0.1.0 | py3Solr-0.1.0//src/py3Solr/util.pyfile:/src/py3Solr/util.py:function:is_valid_xml_char_ordinal/is_valid_xml_char_ordinal | def is_valid_xml_char_ordinal(i):
"""
Defines whether char is valid to use in xml document
XML standard defines a valid char as::
Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
"""
return 32 <= i <= 55295 or i in (9, 10, 13
) or 57344 <= i <= 65533 or 65536 <= i <= 1114111
|
fastlmm | fastlmm//association/epistasis.pyfile:/association/epistasis.py:function:write/write | def write(sid0_list, sid1_list, pvalue_list, output_file):
"""
Given three arrays of the same length [as per the output of epistasis(...)], writes a header and the values to the given output file.
"""
with open(output_file, 'w') as out_fp:
out_fp.write('{0}\t{1}\t{2}\n'.format('sid0', 'sid1', 'pvalue'))
for i in range(len(pvalue_list)):
out_fp.write('{0}\t{1}\t{2}\n'.format(sid0_list[i], sid1_list[i
], pvalue_list[i]))
|
django-perf-rec-4.5.0 | django-perf-rec-4.5.0//src/django_perf_rec/sql.pyfile:/src/django_perf_rec/sql.py:function:match_keyword/match_keyword | def match_keyword(token, keywords):
"""
Checks if the given token represents one of the given keywords
"""
if not token:
return False
if not token.is_keyword:
return False
return token.value.upper() in keywords
|
confcollect-0.3.1 | confcollect-0.3.1//confcollect.pyclass:convert/list | @staticmethod
def list(value):
"""Support comma-separated lists.
"""
if not value:
return []
return value.split(',')
|
lambdafactory-0.8.9 | lambdafactory-0.8.9//dist/lambdafactory/typecast.pyfile:/dist/lambdafactory/typecast.py:function:isSame/isSame | def isSame(a, b):
"""Type (b) is the same as (a) if (a) and (b) are identicial, that means
that you can use b where you use a, and this also means that isSame(a,b) ==
isSame(b,a). Basically, when (b) is same as (a), (b) can be considered as an
alias for (a)."""
a.isSame(b)
|
quaidan-0.2.1 | quaidan-0.2.1//quaidan/client.pyfile:/quaidan/client.py:function:to_string/to_string | def to_string(value):
"""Returns a string for the value that can be handled by the
balancer manager.
:param value: Any value.
"""
if isinstance(value, bool):
return '1' if value else '0'
elif value:
return str(value)
else:
return ''
|
fake-bpy-module-2.78-20200428 | fake-bpy-module-2.78-20200428//bpy/ops/curve.pyfile:/bpy/ops/curve.py:function:dissolve_verts/dissolve_verts | def dissolve_verts():
"""Delete selected control points, correcting surrounding handles
"""
pass
|
graphene_django_plus | graphene_django_plus//mutations.pyclass:BaseModelMutation/save | @classmethod
def save(cls, info, instance, cleaned_input=None):
"""Save the instance to the database.
To do something with the instance "before" or "after" saving it,
override either :meth:`.before_save` and/or :meth:`.after_save`.
"""
cls.before_save(info, instance, cleaned_input=cleaned_input)
instance.save()
cls.after_save(info, instance, cleaned_input=cleaned_input)
|
ansible-2.9.7 | ansible-2.9.7//lib/ansible/modules/storage/purestorage/purefa_host.pyfile:/lib/ansible/modules/storage/purestorage/purefa_host.py:function:_set_preferred_array/_set_preferred_array | def _set_preferred_array(module, array):
"""Set preferred array list. Only called when supported"""
if module.params['preferred_array'] != ['delete']:
array.set_host(module.params['host'], preferred_array=module.params
['preferred_array'])
else:
array.set_host(module.params['host'], personality='')
|
bibble-0.0.2 | bibble-0.0.2//bibble/main.pyfile:/bibble/main.py:function:_venue/_venue | def _venue(entry):
"""Format an entry's venue data."""
f = entry.fields
venue = ''
if entry.type == 'article':
venue = f['journal']
try:
if f['volume'] and f['number']:
venue += ' {0}({1})'.format(f['volume'], f['number'])
except KeyError:
pass
elif entry.type == 'inproceedings':
venue = f['booktitle']
try:
if f['series']:
venue += ' ({})'.format(f['series'])
except KeyError:
pass
elif entry.type == 'inbook':
venue = f['title']
elif entry.type == 'techreport':
venue = '{0}, {1}'.format(f['number'], f['institution'])
elif entry.type == 'phdthesis' or entry.type == 'mastersthesis':
venue = ''
else:
venue = 'Unknown venue (type={})'.format(entry.type)
venue = venue.replace('{', '').replace('}', '')
return venue
|
mentat-ng-2.6.5 | mentat-ng-2.6.5//lib/mentat/idea/sqldb.pyclass:Idea/_aggr_iplist | @staticmethod
def _aggr_iplist(ranges, rngcls):
"""
Helper method for creating aggregated IP range from given list of IP ranges.
"""
if not ranges:
return None
ipmin = None
ipmax = None
for rng in ranges:
if ipmin is None or rng.low() < ipmin:
ipmin = rng.low()
if ipmax is None or rng.high() > ipmax:
ipmax = rng.high()
return rngcls((ipmin, ipmax))
|
flaskbb | flaskbb//user/models.pyclass:Group/get_member_group | @classmethod
def get_member_group(cls):
"""Returns the first member group."""
return cls.query.filter(cls.admin == False, cls.super_mod == False, cls
.mod == False, cls.guest == False, cls.banned == False).first()
|
docx | docx//enum/base.pyclass:MetaEnumeration/_add_enum_members | @classmethod
def _add_enum_members(meta, clsdict):
"""
Dispatch ``.add_to_enum()`` call to each member so it can do its
thing to properly add itself to the enumeration class. This
delegation allows member sub-classes to add specialized behaviors.
"""
enum_members = clsdict['__members__']
for member in enum_members:
member.add_to_enum(clsdict)
|
aiobitcoin-0.75.14 | aiobitcoin-0.75.14//aiobitcoin/tools/tx/script/microcode.pyfile:/aiobitcoin/tools/tx/script/microcode.py:function:do_OP_2OVER/do_OP_2OVER | def do_OP_2OVER(stack):
"""
>>> s = [1, 2, 3, 4]
>>> do_OP_2OVER(s)
>>> print(s)
[1, 2, 3, 4, 1, 2]
"""
stack.append(stack[-4])
stack.append(stack[-4])
|
openapi_conformance | openapi_conformance//extension.pyfile:/extension.py:function:describe_operation/describe_operation | def describe_operation(specification, operation):
"""
Get a human readable string which describes an operation.
:param specification: openapi_core Specification
:param operation: openapi_core Operation
:return: str representation of the operation.
"""
return ' '.join((operation.http_method.upper(), specification.
default_url + operation.path_name))
|
dropbox-10.1.2 | dropbox-10.1.2//dropbox/team.pyclass:BaseTeamFolderError/team_shared_dropbox_error | @classmethod
def team_shared_dropbox_error(cls, val):
"""
Create an instance of this class set to the
``team_shared_dropbox_error`` tag with value ``val``.
:param TeamFolderTeamSharedDropboxError val:
:rtype: BaseTeamFolderError
"""
return cls('team_shared_dropbox_error', val)
|
dicom | dicom//contrib/dicom_dao.pyfile:/contrib/dicom_dao.py:function:uid2str/uid2str | def uid2str(uid):
""" Convert PyDicom uid to a string """
return repr(uid).strip("'")
|
skggm-0.2.8 | skggm-0.2.8//inverse_covariance/rank_correlation.pyfile:/inverse_covariance/rank_correlation.py:function:winsorized_rank_correlation/winsorized_rank_correlation | def winsorized_rank_correlation(X, rowvar=False, weighted=False):
"""
Computes rank correlations using a winsorized ranks.
Relevant to high dimensional settings where the n_samples < n_features
resulting too large variance in the ranks.
Parameters
----------
X: array-like, shape = [n_samples, n_features]
Data matrix using which we compute the empirical
correlation
Returns
-------
rank_correlation
References
----------
Liu, Han, John Lafferty, and Larry Wasserman.
"The nonparanormal: Semiparametric estimation of high dimensional
undirected graphs."
Journal of Machine Learning Research 10.Oct (2009): 2295-2328.
"""
pass
|
infrae.layout-1.0 | infrae.layout-1.0//src/infrae/layout/interfaces.pyclass:IPage/content | def content():
"""Give you back the result of your Page to be included inside
the layout.
"""
|
bloxroute-gateway-1.62.13 | bloxroute-gateway-1.62.13//bxcommon/src/bxcommon/utils/config.pyfile:/bxcommon/src/bxcommon/utils/config.py:function:set_working_directory/set_working_directory | def set_working_directory(working_directory: str):
"""
Sets working directory of python application
:param working_directory: full name of working directory
"""
global _working_directory
_working_directory = working_directory
|
dorthrithil-networkx-1.11 | dorthrithil-networkx-1.11//networkx/algorithms/tree/branchings.pyfile:/networkx/algorithms/tree/branchings.py:function:edge_subgraph/edge_subgraph | def edge_subgraph(G, ebunch):
"""Return the subgraph induced on edges in `ebunch`.
The induced subgraph of the graph contains the edges appearing in `ebunch`
and only the nodes that appear in some edge in `ebunch`.
Parameters
----------
ebunch : list, iterable
A container of edges as 3-tuples (u, v, key), which will be iterated
through exactly once.
Returns
-------
H : MultiDiGraph
A subgraph of the graph with the same graph, node, and edge attributes.
Notes
-----
The graph, edge or node attributes just point to the original graph.
So changes to the node or edge structure will not be reflected in
the original graph while changes to the attributes will.
To create a subgraph with its own copy of the edge/node attributes use:
nx.Graph(G.subgraph(nbunch))
If edge attributes are containers, a deep copy can be obtained using:
G.subgraph(nbunch).copy()
For an inplace reduction of a graph to a subgraph you can remove nodes:
G.remove_nodes_from([ n in G if n not in set(nbunch)])
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> H = G.subgraph([0,1,2])
>>> H.edges()
[(0, 1), (1, 2)]
"""
H = G.__class__()
G_succ = G.succ
for u, v, key in ebunch:
try:
attrs = G_succ[u][v][key]
except KeyError:
raise KeyError('Invalid edge: ({0}, {1}, {2})'.format(u, v, key))
H.add_edge(u, v, key=key, **attrs)
for u in H:
H.node[u] = G.node[u].copy()
H.graph = G.graph.copy()
return H
|
IPython | IPython//core/completer.pyfile:/core/completer.py:function:_formatparamchildren/_formatparamchildren | def _formatparamchildren(parameter) ->str:
"""
Get parameter name and value from Jedi Private API
Jedi does not expose a simple way to get `param=value` from its API.
Parameter
=========
parameter:
Jedi's function `Param`
Returns
=======
A string like 'a', 'b=1', '*args', '**kwargs'
"""
description = parameter.description
if not description.startswith('param '):
raise ValueError(
'Jedi function parameter description have change format.Expected "param ...", found %r".'
% description)
return description[6:]
|
hfsslib | hfsslib//hfss.pyclass:HFSSNode/from_file | @classmethod
def from_file(cls, f, parent=None):
"""Construct an HFSSNode from an hfss file."""
return cls._from_file(next(f).strip(), f, parent)
|
cryptotik-0.36 | cryptotik-0.36//cryptotik/livecoin.pyclass:Livecoin/format_pair | @classmethod
def format_pair(cls, pair):
"""format the pair argument to format understood by remote API."""
pair = pair.replace('-', cls.delimiter).upper()
return pair
|
argus_cli | argus_cli//helpers/collections.pyfile:/helpers/collections.py:function:get_dotnotation/get_dotnotation | def get_dotnotation(original: dict, dotnotation: str) ->dict:
"""Use dotted notation to create set values in a dict,
e.g:
>>> dotattr({}, 'some.path.in.dict', 'value')
{ "some": { "path": { "in": {"dict": "value" }}}}
>>> dotattr({'credentials': {'username': 'test'}}, 'credentials.username')
'test'
"""
original = original or {}
parts = dotnotation.split('.')
current = original
for part in parts:
if part is parts[-1]:
return current[part]
elif part not in current:
current[part] = {}
current = current[part]
return original
|
bqplot | bqplot//pyplot.pyfile:/pyplot.py:function:hashable/hashable | def hashable(data, v):
"""Determine whether `v` can be hashed."""
try:
data[v]
except (TypeError, KeyError, IndexError):
return False
return True
|
cx_Freeze | cx_Freeze//hooks.pyfile:/hooks.py:function:load_numpy_distutils_command_scons/load_numpy_distutils_command_scons | def load_numpy_distutils_command_scons(finder, module):
"""the numpy.distutils.command.scons module optionally imports the numscons
module; ignore the error if the module cannot be found."""
module.IgnoreName('numscons')
|
cesium | cesium//features/period_folding.pyfile:/features/period_folding.py:function:get_p2p_ssqr_diff_over_var/get_p2p_ssqr_diff_over_var | def get_p2p_ssqr_diff_over_var(model):
"""
Get sum of squared differences of consecutive values as a fraction of the
variance of the data.
"""
return model['ssqr_diff_over_var']
|
FFC-2017.1.0 | FFC-2017.1.0//ffc/uflacs/analysis/expr_shapes.pyfile:/ffc/uflacs/analysis/expr_shapes.py:function:compute_all_shapes/compute_all_shapes | def compute_all_shapes(v):
"""Compute the tensor-, index-, and total shape of an expr.
Returns (shape, size, index_shape, index_size, total_shape, total_size).
"""
shape = v.ufl_shape
index_shape = v.ufl_index_dimensions
total_shape = shape + index_shape
return shape, index_shape, total_shape
|
mpmath-1.1.0 | mpmath-1.1.0//mpmath/ctx_base.pyclass:StandardBaseContext/almosteq | def almosteq(ctx, s, t, rel_eps=None, abs_eps=None):
"""
Determine whether the difference between `s` and `t` is smaller
than a given epsilon, either relatively or absolutely.
Both a maximum relative difference and a maximum difference
('epsilons') may be specified. The absolute difference is
defined as `|s-t|` and the relative difference is defined
as `|s-t|/\\max(|s|, |t|)`.
If only one epsilon is given, both are set to the same value.
If none is given, both epsilons are set to `2^{-p+m}` where
`p` is the current working precision and `m` is a small
integer. The default setting typically allows :func:`~mpmath.almosteq`
to be used to check for mathematical equality
in the presence of small rounding errors.
**Examples**
>>> from mpmath import *
>>> mp.dps = 15
>>> almosteq(3.141592653589793, 3.141592653589790)
True
>>> almosteq(3.141592653589793, 3.141592653589700)
False
>>> almosteq(3.141592653589793, 3.141592653589700, 1e-10)
True
>>> almosteq(1e-20, 2e-20)
True
>>> almosteq(1e-20, 2e-20, rel_eps=0, abs_eps=0)
False
"""
t = ctx.convert(t)
if abs_eps is None and rel_eps is None:
rel_eps = abs_eps = ctx.ldexp(1, -ctx.prec + 4)
if abs_eps is None:
abs_eps = rel_eps
elif rel_eps is None:
rel_eps = abs_eps
diff = abs(s - t)
if diff <= abs_eps:
return True
abss = abs(s)
abst = abs(t)
if abss < abst:
err = diff / abst
else:
err = diff / abss
return err <= rel_eps
|
xenoGI-2.2.0 | xenoGI-2.2.0//xenoGI/analysis.pyfile:/xenoGI/analysis.py:function:matchFamilyIsland/matchFamilyIsland | def matchFamilyIsland(genesO, gene2FamIslandD, searchStr):
"""Return the island number, family number, and gene name(s)
associated with searchStr in genesO.geneInfoD. Searches for a match in all
fields of geneInfoD."""
geneMatchL = []
for geneNum in genesO.geneInfoD:
valueT = genesO.numToGeneInfo(geneNum)
for value in valueT:
if type(value) == str:
if searchStr in value:
geneMatchL.append(geneNum)
break
outL = []
for geneNum in geneMatchL:
geneName = genesO.numToName(geneNum)
locusIslandNum, famNum, locusFamNum = gene2FamIslandD[geneNum]
outL.append((geneName, locusIslandNum, famNum, locusFamNum))
return outL
|
dinopy-2.2.0 | dinopy-2.2.0//dinopy/fai_io.pyfile:/dinopy/fai_io.py:function:fai_to_chromosome_info/fai_to_chromosome_info | def fai_to_chromosome_info(fai_entries):
"""Convert the given fai-entries to dinopy chromosome info format.
Converts from a list of :code:`[chr_name, chr_len, chr_start, line_length, line_length_bytes]` (file-view)
to a list of :code:`[chr_name, chr_len, (chr_start, chr_stop)]` entries (genome-array-view i.e. without names, newlines, and '>')
Arguments:
fai_entries(Iterable): An iterable of valid fai-entries.
Returns:
list: A list containing valid chromosome info entries.
"""
chr_name, chr_len, chr_start, *_ = fai_entries[0]
chr_start_index = 0
chromosome_infos = []
for chr_name, chr_len, *_ in fai_entries:
if isinstance(chr_name, str):
chr_name = chr_name.encode()
chr_end_index = chr_start_index + chr_len
chromosome_infos.append([chr_name, chr_len, (chr_start_index,
chr_end_index)])
chr_start_index = chr_end_index
return chromosome_infos
|
mcycle | mcycle//library/conversions.pyfile:/library/conversions.py:function:w2bhp/w2bhp | def w2bhp(value):
"""float: power: Watts to Brake Horse Power."""
return value / 745.699872
|
fake-bpy-module-2.80-20200428 | fake-bpy-module-2.80-20200428//bpy/ops/mesh.pyfile:/bpy/ops/mesh.py:function:vert_connect/vert_connect | def vert_connect():
"""Connect selected vertices of faces, splitting the face
"""
pass
|
enablebanking | enablebanking//models/priority_code.pyclass:PriorityCode/__repr__ | def __repr__(A):
"""For `print` and `pprint`"""
return A.to_str()
|
jnpr | jnpr//junos/facts/file_list.pyfile:/junos/facts/file_list.py:function:provides_facts/provides_facts | def provides_facts():
"""
Returns a dictionary keyed on the facts provided by this module. The value
of each key is the doc string describing the fact.
"""
return {'HOME':
'A string indicating the home directory of the current user.'}
|
elist-0.4.64 | elist-0.4.64//elist/elist.pyfile:/elist/elist.py:function:last_continuous_indexes_slice/last_continuous_indexes_slice | def last_continuous_indexes_slice(ol, value):
"""
from elist.elist import *
ol = [1,"a","a",2,3,"a",4,"a","a","a",5]
last_continuous_indexes_slice(ol,"a")
"""
length = ol.__len__()
end = None
slice = []
for i in range(length - 1, -1, -1):
if ol[i] == value:
end = i
break
else:
pass
if end == None:
return None
else:
slice.append(end)
for i in range(end - 1, -1, -1):
if ol[i] == value:
slice.append(i)
else:
break
slice.reverse()
return slice
|
epparsers-0.4.1 | epparsers-0.4.1//src/epp/core.pyfile:/src/epp/core.py:function:has_lookahead/has_lookahead | def has_lookahead(parser):
""" Return True if the parser has the ability to perform lookahead. """
return hasattr(parser, 'lookahead')
|
zope.traversing-4.4.1 | zope.traversing-4.4.1//src/zope/traversing/browser/interfaces.pyclass:IAbsoluteURL/breadcrumbs | def breadcrumbs():
"""Returns a tuple like ({'name':name, 'url':url}, ...)
Name is the name to display for that segment of the breadcrumbs.
URL is the link for that segment of the breadcrumbs.
"""
|
phovea_server | phovea_server//launch.pyfile:/launch.py:function:create_embedded/create_embedded | def create_embedded():
"""
Imports the phovea_server and creates an application
"""
from .server import create_application
return create_application()
|
dropbox | dropbox//team_log.pyclass:EventType/showcase_remove_member | @classmethod
def showcase_remove_member(cls, val):
"""
Create an instance of this class set to the ``showcase_remove_member``
tag with value ``val``.
:param ShowcaseRemoveMemberType val:
:rtype: EventType
"""
return cls('showcase_remove_member', val)
|
PyAVM-0.9.4 | PyAVM-0.9.4//pyavm/avm.pyclass:AVM/from_xml_file | @classmethod
def from_xml_file(cls, filename):
"""
Instantiate an AVM object from an xml file.
"""
return cls.from_xml(open(filename, 'rb').read())
|
pyNastran | pyNastran//bdf/mesh_utils/remove_unused.pyfile:/bdf/mesh_utils/remove_unused.py:function:_store_loads/_store_loads | def _store_loads(model, unused_card_type, unused_ids, nids_used, eids_used,
cids_used):
"""helper for ``remove_unused``"""
for loads in model.loads.values():
for load in loads:
if load.type in ['FORCE', 'MOMENT']:
nids_used.add(load.node_id)
cids_used.add(load.Cid())
elif load.type in ['FORCE1', 'FORCE2', 'MOMENT1', 'MOMENT2']:
nids_used.update(load.node_ids)
elif load.type == 'GRAV':
cids_used.add(load.Cid())
elif load.type == 'RANDPS':
pass
elif load.type == 'PLOAD':
nids_used.update(load.node_ids)
elif load.type == 'PLOAD1':
pass
elif load.type == 'PLOAD2':
pass
elif load.type == 'PLOAD4':
cids_used.add(load.Cid())
elif load.type == 'DEFORM':
eids_used.add(load.Eid())
elif load.type == 'SPCD':
nids_used.update(load.node_ids)
elif load.type == 'GMLOAD':
cids_used.add(load.Cid())
elif load.type in ['RFORCE', 'RFORCE1']:
nids_used.add(load.node_id)
cids_used.add(load.Cid())
elif load.type == 'TEMP':
nids_used.update(list(load.temperatures.keys()))
elif load.type == 'ACCEL':
cids_used.add(load.Cid())
elif load.type == 'ACCEL1':
cids_used.add(load.Cid())
elif load.type in ['QBDY1', 'QBDY2', 'QBDY3', 'QHBDY']:
pass
elif load.type in ['PLOADX1']:
nids_used.update(load.node_ids)
elif load.type in ['SLOAD']:
nids_used.update(load.node_ids)
elif load.type in ['LOAD', 'LSEQ', 'LOADCYN']:
pass
elif load.type in ['QVOL', 'TEMPRB']:
pass
elif load.type in ['TEMPAX']:
pass
else:
raise NotImplementedError(load)
|
MoNeT_MGDrivE-0.5.6.7.5 | MoNeT_MGDrivE-0.5.6.7.5//MoNeT_MGDrivE/spatialVideosLegacy.pyfile:/MoNeT_MGDrivE/spatialVideosLegacy.py:function:getClusters/getClusters | def getClusters(fileLocation):
"""Returns the latlongs of the coordinates stored in the coordinates file.
Parameters
----------
coordinatesFileI : filepath
Coordinates file "I" generated by the clusterAndAggregate routine.
Returns
-------
list
Latlongs of the clusters centroids.
"""
coordinates = [[], []]
clusterFile = open(fileLocation, 'r')
for line in clusterFile:
tokens = line.split(',')
coordinates[0].append(float(tokens[0]))
coordinates[1].append(float(tokens[1]))
return coordinates
|
callchain-0.2.6 | callchain-0.2.6//callchain/services/queue.pyclass:KThings/outshift | def outshift():
"""shift incoming things to outgoing things"""
|
fmoo-audiotools-3.1beta1 | fmoo-audiotools-3.1beta1//audiotools/ape.pyclass:ApeTaggedAudio/supports_metadata | @classmethod
def supports_metadata(cls):
"""returns True if this audio type supports MetaData"""
return True
|
inary-1.0.1 | inary-1.0.1//inary/util.pyfile:/inary/util.py:function:same/same | def same(l):
"""Check if all elements of a sequence are equal."""
if len(l) == 0:
return True
else:
last = l.pop()
for x in l:
if x != last:
return False
return True
|
mercurial-5.4 | mercurial-5.4//mercurial/interfaces/repository.pyclass:imanifeststorage/__iter__ | def __iter__():
"""Iterate over revision numbers for this manifest."""
|
pyNastran | pyNastran//gui/utils/qt/checks/qlineedit.pyfile:/gui/utils/qt/checks/qlineedit.py:function:check_int/check_int | def check_int(cell):
"""
Colors the cell red if the integer is invalid
Parameters
----------
cell : QLineEdit()
a PyQt/PySide object
Returns
-------
value : int / None
int : the value as a int
None : is_passed=False
is_passed : bool
is this a valid integer
"""
text = cell.text()
try:
value = int(text)
cell.setStyleSheet('QLineEdit{background: white;}')
return value, True
except ValueError:
cell.setStyleSheet('QLineEdit{background: red;}')
return None, False
|
the_collector | the_collector//extra_numpy.pyfile:/extra_numpy.py:function:array_pack/array_pack | def array_pack(img):
"""
img: numpy array. I primarily use this for OpenCV images
"""
d = img.tobytes()
s = img.shape
t = img.dtype
return s, t, d
|
ulaval_notify | ulaval_notify//utils.pyfile:/utils.py:function:dummy_wrap/dummy_wrap | def dummy_wrap(f, *args, **kwargs):
"""Do nothing except call the function with the parameters.
:param f: The function to be called
"""
f(*args, **kwargs)
|
fake-bpy-module-2.80-20200428 | fake-bpy-module-2.80-20200428//bpy/ops/graph.pyfile:/bpy/ops/graph.py:function:fmodifier_paste/fmodifier_paste | def fmodifier_paste(only_active: bool=True, replace: bool=False):
"""Add copied F-Modifiers to the selected F-Curves
:param only_active: Only Active, Only paste F-Modifiers on active F-Curve
:type only_active: bool
:param replace: Replace Existing, Replace existing F-Modifiers, instead of just appending to the end of the existing list
:type replace: bool
"""
pass
|
mercurial-5.4 | mercurial-5.4//mercurial/minirst.pyfile:/mercurial/minirst.py:function:prunecomments/prunecomments | def prunecomments(blocks):
"""Remove comments."""
i = 0
while i < len(blocks):
b = blocks[i]
if b[b'type'] == b'paragraph' and (b[b'lines'][0].startswith(b'.. '
) or b[b'lines'] == [b'..']):
del blocks[i]
if i < len(blocks) and blocks[i][b'type'] == b'margin':
del blocks[i]
else:
i += 1
return blocks
|
bdworkbench-3.4 | bdworkbench-3.4//bdworkbench/appconfig/appconfig_autogen/replace.pyfile:/bdworkbench/appconfig/appconfig_autogen/replace.py:function:autogenReplace/autogenReplace | def autogenReplace(autogenCmd, outputLines, autogenDict, entryDict):
"""
Handle any pattern replacements requested by the user.
"""
if not autogenDict.has_key('replace'):
return
for pathDict in autogenDict['replace']:
pattern = pathDict['substitute']['pattern']
containerDst = pathDict['path']
roles = pathDict.get('onroles', None)
macro = pathDict['substitute']['macro']
with autogenCmd.getRoleIfContext(roles, outputLines) as ric:
outputLines.append('%sREPLACE_PATTERN %s %s %s\n' % (ric.spaces,
pattern, containerDst, macro))
outputLines.append('\n')
return
|
tap-surveymonkey-0.1.6 | tap-surveymonkey-0.1.6//tap_surveymonkey/mode.pyfile:/tap_surveymonkey/mode.py:function:get_selected_streams/get_selected_streams | def get_selected_streams(catalog):
"""
Gets selected streams. Checks schema's 'selected' first (legacy)
and then checks metadata (current), looking for an empty breadcrumb
and mdata with a 'selected' entry
"""
selected_streams = []
for stream in catalog['streams']:
stream_metadata = stream['metadata']
if stream['schema'].get('selected', False):
selected_streams.append(stream['tap_stream_id'])
else:
for entry in stream_metadata:
if not entry['breadcrumb'] and entry['metadata'].get('selected'
, None):
selected_streams.append(stream['tap_stream_id'])
return selected_streams
|
libtools-0.3.3 | libtools-0.3.3//libtools/userinput.pyfile:/libtools/userinput.py:function:is_bool/is_bool | def is_bool(variable):
"""Tests if provided parameter is bool data type"""
try:
if type(variable) is bool:
return True
except TypeError:
return False
|
pyramid-1.10.4 | pyramid-1.10.4//src/pyramid/interfaces.pyclass:IViewDeriver/__call__ | def __call__(view, info):
"""
Derive a new view from the supplied view.
View options, package information and registry are available on
``info``, an instance of :class:`pyramid.interfaces.IViewDeriverInfo`.
The ``view`` is a callable accepting ``(context, request)``.
"""
|
luckydonald-utils-0.77 | luckydonald-utils-0.77//luckydonaldUtils/iterators.pyfile:/luckydonaldUtils/iterators.py:function:chunks_known_length/chunks_known_length | def chunks_known_length(iterable, size, length=None):
"""
Yield successive `size`-sized chunks from `iterable`,
if the length of `iterable is already known or easy to compute.
https://stackoverflow.com/a/312464/3423324
:param iterable: The object you want to split into pieces.
:param size: The size each of the resulting pieces should have.
:type size: int
:param length: Optional. Length of the `iterable`.
If set to `None` (default), the length get calculated automatically.
:type length: None | int
"""
if length is None:
len(iterable)
for i in range(0, length, size):
yield iterable[i:i + size]
|
pastml-1.9.29.2 | pastml-1.9.29.2//pastml/visualisation/cytoscape_manager.pyfile:/pastml/visualisation/cytoscape_manager.py:function:get_scaling_function/get_scaling_function | def get_scaling_function(y_m, y_M, x_m, x_M):
"""
Returns a linear function y = k x + b, where y \\in [m, M]
:param y_m:
:param y_M:
:param x_m:
:param x_M:
:return:
"""
if x_M <= x_m:
return lambda _: y_m
k = (y_M - y_m) / (x_M - x_m)
b = y_m - k * x_m
return lambda _: int(k * _ + b)
|
csirtg-geo-0.3 | csirtg-geo-0.3//versioneer.pyfile:/versioneer.py:function:plus_or_dot/plus_or_dot | def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if '+' in pieces.get('closest-tag', ''):
return '.'
return '+'
|
homeassistant-0.109.6 | homeassistant-0.109.6//homeassistant/components/smartthings/smartapp.pyfile:/homeassistant/components/smartthings/smartapp.py:function:format_unique_id/format_unique_id | def format_unique_id(app_id: str, location_id: str) ->str:
"""Format the unique id for a config entry."""
return f'{app_id}_{location_id}'
|
nmstate-0.2.10 | nmstate-0.2.10//libnmstate/nm/ipv4.pyfile:/libnmstate/nm/ipv4.py:function:get_ip_profile/get_ip_profile | def get_ip_profile(active_connection):
"""
Get NMSettingIP4Config from NMActiveConnection.
For any error, return None.
"""
remote_conn = active_connection.get_connection()
if remote_conn:
return remote_conn.get_setting_ip4_config()
return None
|
fake-bpy-module-2.78-20200428 | fake-bpy-module-2.78-20200428//bgl.pyfile:/bgl.py:function:glPopClientAttrib/glPopClientAttrib | def glPopClientAttrib():
"""Pop the client attribute stack
"""
pass
|
becca | becca//tools.pyfile:/tools.py:function:str_to_int/str_to_int | def str_to_int(exp):
"""
Convert a string to an integer.
The method is primitive, using a simple hash based on the
ordinal value of the characters and their position in the string.
Parameters
----------
exp : str
The string expression to convert to an int.
Returns
-------
sum : int
An integer that is likely (though not extremely so) to be unique
within the scope of the program.
"""
sum_ = 0
for i, character in enumerate(exp):
sum_ += i + ord(character) + i * ord(character)
return sum_
|
savory-pie-0.4.11 | savory-pie-0.4.11//savory_pie/django/resources.pyclass:ModelResource/create_resource | @classmethod
def create_resource(cls):
"""
Creates a new ModelResource around a new model_class instance
"""
return cls(cls.model_class())
|
dadi | dadi//DFE/Cache2D_mod.pyfile:/DFE/Cache2D_mod.py:function:mixture_symmetric_point_pos/mixture_symmetric_point_pos | def mixture_symmetric_point_pos(params, ns, s1, s2, sel_dist1, sel_dist2,
theta, pts=None):
"""
Weighted summation of 1d and 2d distributions with positive selection.
The 1d distribution is equivalent to assuming selection coefficients are
perfectly correlated.
params: Parameters for potential optimization.
The last parameter is the weight for the 2d dist.
The second-to-last parameter is positive gammma for the point mass.
The third-to-last parameter is the proportion of positive selection.
The fourth-to-last parameter is the correlation coefficient for the
2d distribution.
The remaining parameters as must be shared between the 1d and 2d
distributions.
ns: Ignored
s1: Cache1D object for 1d distribution
s2: Cache2D object for 2d distribution
sel_dist1: Univariate probability distribution for s1
sel_dist2: Bivariate probability distribution for s2
theta: Population-scaled mutation rate
pts: Ignored
"""
pdf_params = params[:-4]
rho, ppos, gamma_pos, p2d = params[-4:]
params1 = list(pdf_params) + [ppos, gamma_pos]
fs1 = s1.integrate_point_pos(params1, None, sel_dist1, theta, Npos=1,
pts=None)
params2 = list(pdf_params) + [rho, ppos, gamma_pos]
fs2 = s2.integrate_symmetric_point_pos(params2, None, sel_dist2, theta,
None)
return (1 - p2d) * fs1 + p2d * fs2
|
chromewhip | chromewhip//protocol/indexeddb.pyclass:IndexedDB/disable | @classmethod
def disable(cls):
"""Disables events from backend.
"""
return cls.build_send_payload('disable', {}), None
|
atsim.potentials-0.2.1 | atsim.potentials-0.2.1//atsim/potentials/_dlpoly_writeTABLE.pyfile:/atsim/potentials/_dlpoly_writeTABLE.py:function:_calculateForce/_calculateForce | def _calculateForce(pot, r, h=1e-05):
"""Calls pot.force for separation (r) and returns DL_POLY -r dU/dr values rather than
the dU/dr value normally returned by potentials.Potential.force() method
@param pot potential from which force should be calculated
@param r Separation at which force should be calculated
@param h Increment used when performing numerical differentiation to obtain force
@return -r dU/dr"""
dUdr = pot.force(r, h=h)
return r * dUdr
|
apstools | apstools//utils.pyfile:/utils.py:function:_get_pv/_get_pv | def _get_pv(obj):
"""
returns PV name, prefix of None from ophyd object
"""
if hasattr(obj, 'pvname'):
return obj.pvname
elif hasattr(obj, 'prefix'):
return obj.prefix
|
fpd-0.1.10 | fpd-0.1.10//fpd/mag_tools.pyfile:/fpd/mag_tools.py:function:tesla2Oe/tesla2Oe | def tesla2Oe(tesla):
"""
Field (Oe) from induction (T).
Parameters
----------
tesla : ndarray or scalar
Induction in Tesla.
Returns
-------
o : ndarray or scalar
field in Oe.
"""
o = tesla * 10000
return o
|
callchain-0.2.6 | callchain-0.2.6//callchain/services/reduce.pyclass:KReduce/zip | def zip():
"""
smash incoming things into one single thing, pairing things by iterable
position
"""
|
topydo-0.13 | topydo-0.13//topydo/lib/ListFormat.pyfile:/topydo/lib/ListFormat.py:function:_filler/_filler | def _filler(p_str, p_len):
"""
Returns p_str preceded by additional spaces if p_str is shorter than p_len.
"""
to_fill = p_len - len(p_str)
return to_fill * ' ' + p_str
|
codimension-4.8.1 | codimension-4.8.1//codimension/debugger/varfilters.pyfile:/codimension/debugger/varfilters.py:function:filterLocalVariables/filterLocalVariables | def filterLocalVariables(isGlobal, varName, varType):
"""Filters out the local variables"""
del varName
del varType
return not isGlobal
|
das7pad-dns-lexicon-3.3.12 | das7pad-dns-lexicon-3.3.12//lexicon/providers/hetzner.pyfile:/lexicon/providers/hetzner.py:function:provider_parser/provider_parser | def provider_parser(subparser):
"""Configure a provider parser for Hetzner"""
subparser.add_argument('--auth-account', help=
'specify type of Hetzner account: by default Hetzner Robot (robot) or Hetzner konsoleH (konsoleh)'
)
subparser.add_argument('--auth-username', help=
'specify username of Hetzner account')
subparser.add_argument('--auth-password', help=
'specify password of Hetzner account')
subparser.add_argument('--linked', help=
"if exists, uses linked CNAME as A|AAAA|TXT record name for edit actions: by default (yes); Further restriction: Only enabled if record name or raw FQDN record identifier 'type/name/content' is specified, and additionally for update actions the record name remains the same"
, default=str('yes'), choices=['yes', 'no'])
subparser.add_argument('--propagated', help=
'waits until record is publicly propagated after succeeded create|update actions: by default (yes)'
, default=str('yes'), choices=['yes', 'no'])
subparser.add_argument('--latency', help=
'specify latency, used during checks for publicly propagation and additionally for Hetzner Robot after record edits: by default 30s (30)'
, default=int(30), type=int)
|
plone.transformchain-2.0.2 | plone.transformchain-2.0.2//plone/transformchain/interfaces.pyclass:ITransformer/__call__ | def __call__(request, result, encoding):
"""Return a modified result.
`request` is the Zope request. Response headers may be read or
modified in `request.response`.
`result` is an iterable of byte strings that represents the response
body. When unwound, its contents will match the response content type.
`encoding` is the default encoding used.
Return the new result iterable, or a string. If a string is returned,
the Content-Type header will be updated automatically. If a six.text_type
string is returned, it will be encoded with the current content
encoding.
Do not call `request.response.setBody()`. It will have no effect.
"""
|
iPOPO-1.0.0 | iPOPO-1.0.0//pelix/misc/mqtt_client.pyclass:MqttClient/on_connect | @staticmethod
def on_connect(client, result_code):
"""
User callback: called when the client is connected
:param client: The Pelix MQTT client which connected
:param result_code: The MQTT result code
"""
pass
|
nova-20.2.0 | nova-20.2.0//nova/notifications/objects/metrics.pyclass:MetricPayload/from_monitor_metric_list_obj | @classmethod
def from_monitor_metric_list_obj(cls, monitor_metric_list):
"""Returns a list of MetricPayload objects based on the passed
MonitorMetricList object.
"""
payloads = []
for monitor_metric in monitor_metric_list:
payloads.append(cls(monitor_metric))
return payloads
|
plone.app.portlets-4.4.5 | plone.app.portlets-4.4.5//plone/app/portlets/portlets/rss.pyclass:IFeed/needs_update | def needs_update():
"""return if this feed needs to be updated"""
|
katdal-0.15 | katdal-0.15//katdal/datasources.pyfile:/katdal/datasources.py:function:_shorten_key/_shorten_key | def _shorten_key(telstate, key):
"""Shorten telstate key by subtracting the first prefix that fits.
Parameters
----------
telstate : :class:`katdal.sensordata.TelstateToStr` object
Telescope state
key : string
Telescope state key
Returns
-------
short_key : string
Suffix of `key` after subtracting first matching prefix, or empty
string if `key` does not start with any of the prefixes (or exactly
matches a prefix, which is also considered pathological)
"""
for prefix in telstate.prefixes:
if key.startswith(prefix):
return key[len(prefix):]
return ''
|