repo
stringlengths 1
29
| path
stringlengths 24
332
| code
stringlengths 39
579k
|
---|---|---|
transaction | transaction//interfaces.pyclass:ITransactionManager/commit | def commit():
"""Commit the current transaction.
In explicit mode, if a transaction hasn't begun, a
`NoTransaction` exception will be raised.
"""
|
chemics-20.4 | chemics-20.4//chemics/terminal_velocity.pyfile:/chemics/terminal_velocity.py:function:ut_haider/ut_haider | def ut_haider(dp, mu, phi, rhog, rhos):
"""
Calculate terminal velocity of a particle as discussed in the Haider and
Levenspiel [5]_. Valid for particle sphericities of 0.5 to 1. Particle
diameter should be an equivalent spherical diameter, such as the diameter
of a sphere having same volume as the particle.
To determine the terminal velocity for a range of particle sphericities, Haider
and Levenspiel first define two dimensionless quantities
.. math::
d_{*} = d_p \\left[ \\frac{g\\, \\rho_g (\\rho_s - \\rho_g)}{\\mu^2} \\right]^{1/3} \\
u_* = \\left[ \\frac{18}{d{_*}^2} + \\frac{2.3348 - 1.7439\\, \\phi}{d{_*}^{0.5}} \\right]^{-1}
where :math:`0.5 \\leq \\phi \\leq 1` and particle diameter :math:`d_p` is an
equivalent spherical diameter, the diameter of a sphere having the same volume
as the particle. The relationship between :math:`u_*` and :math:`u_t` is given
by
.. math:: u_* = u_t \\left[ \\frac{\\rho{_g}^2}{g\\, \\mu\\, (\\rho_s - \\rho_g)} \\right]^{1/3}
The terminal velocity of the particle can finally be determined by rearranging
the above equation such that
.. math:: u_t = u_* \\left[ \\frac{g\\, \\mu\\, (\\rho_s - \\rho_g)}{\\rho{_g}^2} \\right]^{1/3}
Parameters
----------
dp : float
Diameter of particle [m]
mu : float
Viscosity of gas [kg/(m s)]
phi : float
Sphericity of particle [-]
rhog : float
Density of gas [kg/m^3]
rhos : float
Density of particle [kg/m^3]
Returns
-------
ut : float
Terminal velocity of a particle [m/s]
Example
-------
>>> ut_haider(0.00016, 1.8e-5, 0.67, 1.2, 2600)
0.8857
References
----------
.. [5] A. Haider and O. Levenspiel. Drag coefficient and terminal velocity
of spherical and nonspherical particles. Powder Technology,
58:63–70, 1989.
"""
if phi > 1.0 or phi < 0.5:
raise ValueError('Sphericity must be 0.5 <= phi <= 1.0')
d_star = dp * (9.81 * rhog * (rhos - rhog) / mu ** 2) ** (1 / 3)
u_star = (18 / d_star ** 2 + (2.3348 - 1.7439 * phi) / d_star ** 0.5) ** -1
ut = u_star * (9.81 * (rhos - rhog) * mu / rhog ** 2) ** (1 / 3)
return ut
|
fake-bpy-module-2.79-20200428 | fake-bpy-module-2.79-20200428//bpy/ops/mesh.pyfile:/bpy/ops/mesh.py:function:wireframe/wireframe | def wireframe(use_boundary: bool=True, use_even_offset: bool=True,
use_relative_offset: bool=False, use_replace: bool=True, thickness:
float=0.01, offset: float=0.01, use_crease: bool=False, crease_weight:
float=0.01):
"""Create a solid wire-frame from faces
:param use_boundary: Boundary, Inset face boundaries
:type use_boundary: bool
:param use_even_offset: Offset Even, Scale the offset to give more even thickness
:type use_even_offset: bool
:param use_relative_offset: Offset Relative, Scale the offset by surrounding geometry
:type use_relative_offset: bool
:param use_replace: Replace, Remove original faces
:type use_replace: bool
:param thickness: Thickness
:type thickness: float
:param offset: Offset
:type offset: float
:param use_crease: Crease, Crease hub edges for improved subsurf
:type use_crease: bool
:param crease_weight: Crease weight
:type crease_weight: float
"""
pass
|
cogef | cogef//probability.pyfile:/probability.py:function:rupture_force_from_dpdf/rupture_force_from_dpdf | def rupture_force_from_dpdf(dpdf, forces, force_step=None):
"""Calculate the average rupture force for a given probability
density and the associated force values by numerical integration.
*f_ext* (the theoretical external force for a rigid molecule) is defined
by f_ext = 'spring constant' * 'velocity' * 'time' + 'initial force'
or the real external force for zero sping constant.
Parameters
----------
dpdf: list of floats
dp/df-values for each *f_ext*-value.
forces: list of floats
External forces for each *f_ext*.
force_step: float (optional)
Step size of *f_ext*.
Returns
-------
result: float
Rupture force.
"""
if force_step is None:
force_step = forces[1] - forces[0]
for i in range(2, len(forces)):
if abs(force_step - (forces[i] - forces[i - 1])) > 1e-06:
raise ValueError('The force steps in list forces should ' +
'be constant.')
integration1 = 0.0
integration2 = 0.0
for i in range(len(dpdf)):
integration1 += dpdf[i] * force_step * forces[i]
integration2 += dpdf[i] * force_step
return integration1 / integration2
|
kindle_maker-1.0.1 | kindle_maker-1.0.1//kindle_maker/ebooklib.pyfile:/kindle_maker/ebooklib.py:function:format_file_name/format_file_name | def format_file_name(path: str) ->str:
"""
strip illegal characters
"""
return path.replace('/', '').replace(' ', '').replace('+', '-').replace('"'
, '').replace('\\', '').replace(':', '-').replace('|', '-')
|
masakari-8.0.0 | masakari-8.0.0//masakari/wsgi.pyclass:Application/factory | @classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = masakari.api.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import masakari.api.fancy_api
fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls(**local_config)
|
SRITPOT-1.2.0 | SRITPOT-1.2.0//tpot/gp_deap.pyfile:/tpot/gp_deap.py:function:initialize_stats_dict/initialize_stats_dict | def initialize_stats_dict(individual):
"""
Initializes the stats dict for individual
The statistics initialized are:
'generation': generation in which the individual was evaluated. Initialized as: 0
'mutation_count': number of mutation operations applied to the individual and its predecessor cumulatively. Initialized as: 0
'crossover_count': number of crossover operations applied to the individual and its predecessor cumulatively. Initialized as: 0
'predecessor': string representation of the individual. Initialized as: ('ROOT',)
Parameters
----------
individual: deap individual
Returns
-------
object
"""
individual.statistics['generation'] = 0
individual.statistics['mutation_count'] = 0
individual.statistics['crossover_count'] = 0
individual.statistics['predecessor'] = 'ROOT',
|
aikit | aikit//tools/data_structure_helper.pyfile:/tools/data_structure_helper.py:function:_set_index/_set_index | def _set_index(x, index):
""" set the index attribute of something it is possible
if index is None, or x doesn't have an index attribute it won't do anything
"""
if index is None:
return x
if hasattr(x, 'index'):
x.index = index
return x
|
wyqpython | wyqpython//base/path.pyfile:/base/path.py:function:get_desktop/get_desktop | def get_desktop():
"""
Get Desktop path
"""
import winreg
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
'Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders'
)
return winreg.QueryValueEx(key, 'Desktop')[0]
|
krust | krust//file_utils/path.pyfile:/file_utils/path.py:function:get_rel_path/get_rel_path | def get_rel_path(path, base):
"""get relative path, e.g., get_rel_path('abc/de/fg', 'abc') => 'de/fg'
"""
lb = len(base)
assert path[:lb] == base
if len(path) == lb:
rel_path = ''
elif path[lb] == '/':
rel_path = path[lb + 1:]
else:
rel_path = path[lb:]
return rel_path
|
frac-1.1.2 | frac-1.1.2//frac.pyfile:/frac.py:function:med/med | def med(x, D, mixed=False):
"""Generate fraction representation using Mediant method"""
n1, d1 = int(x), 1
n2, d2 = n1 + 1, 1
m = 0.0
if x != n1:
while d1 <= D and d2 <= D:
m = float(n1 + n2) / (d1 + d2)
if x == m:
if d1 + d2 <= D:
n1, d1 = n1 + n2, d1 + d2
d2 = D + 1
elif d1 > d2:
d2 = D + 1
else:
d1 = D + 1
break
elif x < m:
n2, d2 = n1 + n2, d1 + d2
else:
n1, d1 = n1 + n2, d1 + d2
if d1 > D:
n1, d1 = n2, d2
if not mixed:
return [0, n1, d1]
q = divmod(n1, d1)
return [q[0], q[1], d1]
|
menpo3d-0.8.0 | menpo3d-0.8.0//menpo3d/morphablemodel/algorithm/derivatives.pyfile:/menpo3d/morphablemodel/algorithm/derivatives.py:function:d_perspective_camera_d_shape_parameters/d_perspective_camera_d_shape_parameters | def d_perspective_camera_d_shape_parameters(shape_pc_uv, warped_uv, camera):
"""
Calculates the derivative of the perspective projection with respect to the
shape parameters.
Parameters
----------
shape_pc_uv : ``(n_points, 3, n_parameters)`` `ndarray`
The (sampled) basis of the shape model.
warped_uv : ``(n_points, 3)`` `ndarray`
The shape instance with the view transform (rotation and translation)
applied on it.
camera : `menpo3d.camera.PerspectiveCamera`
The camera object that is responsible of projecting the model to the
image plane.
Returns
-------
dw_da : ``(2, n_shape_parameters, n_points)`` `ndarray`
The derivative of the perspective camera transform with respect to
the shape parameters.
"""
n_points, n_dims, n_parameters = shape_pc_uv.shape
assert n_dims == 3
z = warped_uv[:, (2)]
dw_da = camera.rotation_transform.apply(shape_pc_uv.transpose(0, 2, 1)).T
dw_da[:2] -= warped_uv[:, :2].T[:, (None)] * dw_da[2] / z
return camera.projection_transform.focal_length * dw_da[:2] / z
|
pluggdapps-0.43dev | pluggdapps-0.43dev//pluggdapps/web/interfaces.pyclass:IHTTPInBound/transform | def transform(request, data, finishing=False):
"""Transform in-coming message entity. request will be updated in
place. Returns the transformed request data.
``request``,
:class:`IHTTPRequest` plugin whose `request.response` attribute
is being transformed.
``data``,
Either request body or chunk data (in case of chunked encoding)
in byte-string.
``finishing``,
In case of chunked encoding, this denotes whether this is the last
chunk to be received.
"""
|
oxtie-0.2.1 | oxtie-0.2.1//oxtie/fronts/base.pyclass:Frontend/load | @classmethod
def load(cls, key, backend=None, only_hdr=False):
"""Load an instance of self with the given key from specified backend.
:param key: A dictionary with string keys and string values as
produced by the get_key method or a Frontend instance.
:param backend=None: The backend we want to load from. If this is
None, we call get_backend() to try to get one.
:param only_hdr=False: Whether to only load the header (in which case
we do not need a backend).
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:return: A new instance of self corresponding to the one we
`save`-ed previously if only_hdr is False or a dictionary
representing the header if only_hdr is True.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Inverse of save method; load it back.
"""
backend = cls.get_backend(backend)
new_item = backend.load(key, only_hdr=only_hdr, front_cls=cls)
return new_item
|
vishwakarma-0.1.3 | vishwakarma-0.1.3//vishwakarma/pmfplot.pyclass:pmfplot/geometric | @classmethod
def geometric(cls, n, p):
"""
Visualization for a Geometric distribution
Args:
n, p (float): parameters to a Geometric distribution
Returns:
image (IPython.display.Image): The image that can be displayed inline in a Jupyter notebook
"""
if not isinstance(n, int):
raise ValueError(
'For a Geometric distribution, n should always be an integer.')
if n < 0:
raise ValueError(
'For a Geometric distribution, n should be greater than or equal to zero.'
)
if not 0 <= p <= 1:
raise ValueError(
'For a Geometric distribution, p should always be between 0 and 1.'
)
return cls._call_post(dist='geometric', n=n, p=p)
|
heamy-0.0.7 | heamy-0.0.7//heamy/utils/main.pyfile:/heamy/utils/main.py:function:reshape_1d/reshape_1d | def reshape_1d(df):
"""If parameter is 1D row vector then convert it into 2D matrix."""
shape = df.shape
if len(shape) == 1:
return df.reshape(shape[0], 1)
else:
return df
|
tpm2-pytss-0.1.2 | tpm2-pytss-0.1.2//tpm2_pytss/util/swig.pyclass:WrapperMetaClass/call_mod_ptr_or_value | @staticmethod
def call_mod_ptr_or_value(annotation, value):
"""
Last step in a call_mod_ for classes which wrap swig types and expose them
via ``value`` and ``ptr`` properties.
"""
if annotation[::-1].startswith('*'):
return value.ptr
return value.value
|
pynuget | pynuget//core.pyfile:/core.py:function:et_to_str/et_to_str | def et_to_str(node):
"""Get the text value of an Element, returning None if not found."""
try:
return node.text
except AttributeError:
return None
|
fake-bpy-module-2.78-20200428 | fake-bpy-module-2.78-20200428//bpy/ops/wm.pyfile:/bpy/ops/wm.py:function:copy_prev_settings/copy_prev_settings | def copy_prev_settings():
"""Copy settings from previous version
"""
pass
|
pyramid | pyramid//interfaces.pyclass:IRequestFactory/__call__ | def __call__(environ):
""" Return an instance of ``pyramid.request.Request``"""
|
perceval-0.12.24 | perceval-0.12.24//perceval/backends/core/confluence.pyclass:Confluence/has_resuming | @classmethod
def has_resuming(cls):
"""Returns whether it supports to resume the fetch process.
:returns: this backend supports items resuming
"""
return True
|
photonai-1.0.0b0 | photonai-1.0.0b0//photonai/helper/helper.pyclass:PhotonPrintHelper/_optimize_printing | @staticmethod
def _optimize_printing(pipe, config: dict):
"""
make the sklearn config syntax prettily readable for humans
"""
if pipe is None:
return str(config)
prettified_config = ['' + '\n']
for el_key, el_value in config.items():
items = el_key.split('__')
name = items[0]
rest = '__'.join(items[1:])
if name in pipe.named_steps:
new_pretty_key = ' ' + name + '->'
prettified_config.append(new_pretty_key + pipe.named_steps[name
].prettify_config_output(rest, el_value) + '\n')
else:
raise ValueError('Item is not contained in pipeline:' + name)
return ''.join(prettified_config)
|
superset-growth-0.26.3 | superset-growth-0.26.3//superset/connectors/druid/models.pyclass:DruidDatasource/_dimensions_to_values | @staticmethod
def _dimensions_to_values(dimensions):
"""
Replace dimensions specs with their `dimension`
values, and ignore those without
"""
values = []
for dimension in dimensions:
if isinstance(dimension, dict):
if 'extractionFn' in dimension:
values.append(dimension)
elif 'dimension' in dimension:
values.append(dimension['dimension'])
else:
values.append(dimension)
return values
|
pytzer | pytzer//parameters.pyfile:/parameters.py:function:psi_Ca_H_HSO4_HMW84/psi_Ca_H_HSO4_HMW84 | def psi_Ca_H_HSO4_HMW84(T, P):
"""c-c'-a: calcium hydrogen bisulfate [HMW84]."""
psi = 0.0
valid = T == 298.15
return psi, valid
|
hgl-0.1.3 | hgl-0.1.3//hgl/calculate/line.pyfile:/hgl/calculate/line.py:function:order_points/order_points | def order_points(p1, p2, nd=3):
"""Given two points swap each axes where p1 > p2
Args:
p1 (point): starting point
p2 (point): ending point
nd (int optional): number of axes to compare defaults to 3 ie x, y, z
Returns:
list: returns same points with axes ordered [[x,y,z], [x,y,z]]
Links:
Images:"""
for i in range(0, nd):
if p1[i] > p2[i]:
t1 = p1[i]
p1[i] = p2[i]
p2[i] = t1
return p1, p2
|
appenlight | appenlight//predicates.pyfile:/predicates.py:function:unauthed_report_predicate/unauthed_report_predicate | def unauthed_report_predicate(context, request):
"""
This allows the user to access the view if context object public
flag is True
"""
if context.public:
return True
|
mathx | mathx//moments.pyfile:/moments.py:function:moment2/moment2 | def moment2(x, y, f, nx, ny, nrm_fac=None):
"""sum[x^nx*y^ny*f]/nrm_fac"""
if nrm_fac is None:
nrm_fac = f.sum()
return (x ** nx * y ** ny * f).sum() / nrm_fac
|
projectpredict-0.0.1 | projectpredict-0.0.1//projectpredict/project.pyclass:Project/from_dict | @classmethod
def from_dict(cls, data_in, model):
"""Constructs a Project from a dictionary of values and a model
Args:
data_in (dict): The data to construct the Project from
model: The model used to predict the durations of tasks
Returns:
Project: The constructed project
"""
return cls(data_in['name'], model, uid=data_in.get('uid', None), tasks=
data_in.get('tasks', []), dependencies=data_in.get('dependencies', []))
|
regraph | regraph//neo4j/cypher_utils/generic.pyfile:/neo4j/cypher_utils/generic.py:function:predecessors_query/predecessors_query | def predecessors_query(var_name, node_id, node_label, edge_label,
predecessor_label=None):
"""Generate query for getting the ids of all the predecessors of a node.
Parameters
----------
var_name
Name of the variable corresponding to the node to match
node_id
Id of the node to match
node_label
Label of the node to match
edge_label
Label of the edge to match
predecessor_label
Label of the predecessors we want to find. node_label if None.
"""
if predecessor_label is None:
predecessor_label = node_label
query = "OPTIONAL MATCH (pred:{})-[:{}]-> (n:{} {{id : '{}'}})\n".format(
predecessor_label, edge_label, node_label, node_id
) + 'RETURN pred.id as pred'
return query
|
dsin100daysv11-6.0.0.dev0 | dsin100daysv11-6.0.0.dev0//notebook/_tz.pyfile:/notebook/_tz.py:function:isoformat/isoformat | def isoformat(dt):
"""Return iso-formatted timestamp
Like .isoformat(), but uses Z for UTC instead of +00:00
"""
return dt.isoformat().replace('+00:00', 'Z')
|
geodata-1.7.1 | geodata-1.7.1//geodata/QueryList.pyfile:/geodata/QueryList.py:function:inc_key/inc_key | def inc_key(text):
""" increment the last letter of text by one. Used to replace key in SQL LIKE case with less than """
return text[0:-1] + chr(ord(text[-1]) + 1)
|
quantum-pecos-0.1.2 | quantum-pecos-0.1.2//pecos/misc/stabilizer_funcs.pyfile:/pecos/misc/stabilizer_funcs.py:function:find_stab/find_stab | def find_stab(state, stab_xs, stab_zs):
"""
Find the sign of the logical operator.
Args:
state:
stab_xs:
stab_zs:
Returns:
"""
if len(stab_xs) == 0 and len(stab_zs) == 0:
return True
stab_xs = set(stab_xs)
stab_zs = set(stab_zs)
stabs = state.stabs
built_up_xs = set()
built_up_zs = set()
for q in stab_xs:
for stab_id in state.destabs.col_z[q]:
built_up_xs ^= stabs.row_x[stab_id]
built_up_zs ^= stabs.row_z[stab_id]
for q in stab_zs:
for stab_id in state.destabs.col_x[q]:
built_up_xs ^= stabs.row_x[stab_id]
built_up_zs ^= stabs.row_z[stab_id]
built_up_xs ^= stab_xs
built_up_zs ^= stab_zs
if len(built_up_xs) != 0 or len(built_up_zs) != 0:
return False
else:
return True
|
html2text | html2text//utils.pyfile:/utils.py:function:google_has_height/google_has_height | def google_has_height(style):
"""
Check if the style of the element has the 'height' attribute
explicitly defined
:type style: dict
:rtype: bool
"""
return 'height' in style
|
augpathlib-0.0.15 | augpathlib-0.0.15//augpathlib/remotes.pyclass:RemotePath/_new | @classmethod
def _new(cls, local_class, cache_class):
""" when constructing a new remote using _new you MUST
call init afterward to bind the remote api """
newcls = type(cls.__name__, (cls,), dict(_local_class=local_class,
_cache_class=cache_class))
local_class._remote_class = newcls
local_class._cache_class = cache_class
cache_class._remote_class = newcls
cache_class._local_class = local_class
newcls.weighAnchor()
cache_class.weighAnchor()
return newcls
|
bkr | bkr//client/command.pyclass:CommandContainer/normalize_name | @classmethod
def normalize_name(cls, name):
"""
Replace some characters in command names.
"""
return name.lower().replace('_', '-').replace(' ', '-')
|
overcast_parser | overcast_parser//itunes_podcast_rss/extract.pyfile:/itunes_podcast_rss/extract.py:function:feed_url/feed_url | def feed_url(itunes_lookup_response):
"""
Returns feed URL from the itunes lookup response
:param itunes_lookup_response:
:return: str
"""
if len(itunes_lookup_response.get('results')) == 0:
raise LookupError('iTunes response has no results')
url = itunes_lookup_response.get('results')[0].get('feedUrl')
if url is None:
raise LookupError('feedUrl field is not present in response')
return url
|
kipoi | kipoi//metadata.pyclass:GenomicRanges/from_interval | @classmethod
def from_interval(cls, interval):
"""Create the ranges object from `pybedtools.Interval`
# Arguments
interval: `pybedtools.Interval` instance
"""
return cls(chr=interval.chrom, start=interval.start, end=interval.end,
id=interval.name, strand=interval.strand)
|
sftoolbox | sftoolbox//actions.pyclass:PythonCodeAction/from_json | @classmethod
def from_json(cls, project, data):
"""return instance made from given json
"""
action = cls(project)
action._apply_json(data)
return action
|
reV | reV//supply_curve/sc_aggregation.pyclass:SupplyCurveAggFileHandler/_close_data_layers | @staticmethod
def _close_data_layers(data_layers):
"""Close all data layers with exclusion h5 handlers.
Parameters
----------
data_layers : None | dict
Aggregation data layers. Must have fobj exclusion handlers to close
"""
if data_layers is not None:
for layer in data_layers.values():
if 'fobj' in layer:
layer['fobj'].close()
|
tempest-lib-1.0.0 | tempest-lib-1.0.0//tempest_lib/common/utils/data_utils.pyfile:/tempest_lib/common/utils/data_utils.py:function:parse_image_id/parse_image_id | def parse_image_id(image_ref):
"""Return the image id from a given image ref
This function just returns the last word of the given image ref string
splitting with '/'.
:param str image_ref: a string that includes the image id
:return: the image id string
:rtype: string
"""
return image_ref.rsplit('/')[-1]
|
MISP_maltego | MISP_maltego//transforms/common/util.pyfile:/transforms/common/util.py:function:get_attribute_in_object/get_attribute_in_object | def get_attribute_in_object(o, attribute_type=False, attribute_value=False,
drop=False):
"""Gets the first attribute of a specific type within an object"""
found_attribute = {'value': ''}
for i, a in enumerate(o['Attribute']):
if a['type'] == attribute_type:
found_attribute = a.copy()
if drop:
o['Attribute'].pop(i)
break
if a['value'] == attribute_value:
found_attribute = a.copy()
if drop:
o['Attribute'].pop(i)
if '|' in a['type'] or a['type'] == 'malware-sample':
if attribute_value in a['value'].split('|'):
found_attribute = a.copy()
if drop:
o['Attribute'].pop(i)
return found_attribute
|
TESPy-0.2.2 | TESPy-0.2.2//tespy/components/nodes.pyclass:splitter/initialise_fluids | @staticmethod
def initialise_fluids(nw):
"""
Fluid initialisation for fluid mixture at outlet of the node.
Parameters
----------
nw : tespy.networks.network
Network using this component object.
"""
return
|
strct-0.0.30 | strct-0.0.30//versioneer.pyfile:/versioneer.py:function:render_git_describe_long/render_git_describe_long | def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
rendered += '-%d-g%s' % (pieces['distance'], pieces['short'])
else:
rendered = pieces['short']
if pieces['dirty']:
rendered += '-dirty'
return rendered
|
slack_entities-0.1.18 | slack_entities-0.1.18//slack_entities/entities/resource.pyclass:SlackResource/from_item | @classmethod
def from_item(cls, item):
"""
Create SlackResource-like class object
:param item: dict
:return:
"""
return cls(**item)
|
svviz-1.6.2 | svviz-1.6.2//src/svviz/utilities.pyfile:/src/svviz/utilities.py:function:reverseString/reverseString | def reverseString(st):
""" Reverses a string """
return str(st[::-1])
|
allmydata | allmydata//interfaces.pyclass:IStorageBucketWriter/put_uri_extension | def put_uri_extension(data):
"""This block of data contains integrity-checking information (hashes
of plaintext, crypttext, and shares), as well as encoding parameters
that are necessary to recover the data. This is a serialized dict
mapping strings to other strings. The hash of this data is kept in
the URI and verified before any of the data is used. All buckets for
a given file contain identical copies of this data.
The serialization format is specified with the following pseudocode:
for k in sorted(dict.keys()):
assert re.match(r'^[a-zA-Z_\\-]+$', k)
write(k + ':' + netstring(dict[k]))
@param data=URIExtensionData
@return: a Deferred that fires (with None) when the operation completes
"""
|
vumi_message_store | vumi_message_store//interfaces.pyclass:IQueryMessageStore/get_batch_event_count | def get_batch_event_count(batch_id):
"""
Return the count of events.
:param batch_id:
The batch identifier for the batch to operate on.
:returns:
The number of events in the batch.
If async, a Deferred is returned instead.
"""
|
nucypher | nucypher//config/characters.pyclass:StakeHolderConfiguration/generate | @classmethod
def generate(cls, *args, **kwargs):
"""Shortcut: Hook-up a new initial installation and write configuration file to the disk"""
node_config = cls(*args, dev_mode=False, **kwargs)
node_config.initialize()
node_config.to_configuration_file()
return node_config
|
mxnet-1.6.0.data | mxnet-1.6.0.data//purelib/mxnet/ndarray/gen_contrib.pyfile:/purelib/mxnet/ndarray/gen_contrib.py:function:hawkesll/hawkesll | def hawkesll(lda=None, alpha=None, beta=None, state=None, lags=None, marks=
None, valid_length=None, max_time=None, out=None, name=None, **kwargs):
"""Computes the log likelihood of a univariate Hawkes process.
The log likelihood is calculated on point process observations represented
as *ragged* matrices for *lags* (interarrival times w.r.t. the previous point),
and *marks* (identifiers for the process ID). Note that each mark is considered independent,
i.e., computes the joint likelihood of a set of Hawkes processes determined by the conditional intensity:
.. math::
\\lambda_k^*(t) = \\lambda_k + \\alpha_k \\sum_{\\{t_i < t, y_i = k\\}} \\beta_k \\exp(-\\beta_k (t - t_i))
where :math:`\\lambda_k` specifies the background intensity ``lda``, :math:`\\alpha_k` specifies the *branching ratio* or ``alpha``, and :math:`\\beta_k` the delay density parameter ``beta``.
``lags`` and ``marks`` are two NDArrays of shape (N, T) and correspond to the representation of the point process observation, the first dimension corresponds to the batch index, and the second to the sequence. These are "left-aligned" *ragged* matrices (the first index of the second dimension is the beginning of every sequence. The length of each sequence is given by ``valid_length``, of shape (N,) where ``valid_length[i]`` corresponds to the number of valid points in ``lags[i, :]`` and ``marks[i, :]``.
``max_time`` is the length of the observation period of the point process. That is, specifying ``max_time[i] = 5`` computes the likelihood of the i-th sample as observed on the time interval :math:`(0, 5]`. Naturally, the sum of all valid ``lags[i, :valid_length[i]]`` must be less than or equal to 5.
The input ``state`` specifies the *memory* of the Hawkes process. Invoking the memoryless property of exponential decays, we compute the *memory* as
.. math::
s_k(t) = \\sum_{t_i < t} \\exp(-\\beta_k (t - t_i)).
The ``state`` to be provided is :math:`s_k(0)` and carries the added intensity due to past events before the current batch. :math:`s_k(T)` is returned from the function where :math:`T` is ``max_time[T]``.
Example::
# define the Hawkes process parameters
lda = nd.array([1.5, 2.0, 3.0]).tile((N, 1))
alpha = nd.array([0.2, 0.3, 0.4]) # branching ratios should be < 1
beta = nd.array([1.0, 2.0, 3.0])
# the "data", or observations
ia_times = nd.array([[6, 7, 8, 9], [1, 2, 3, 4], [3, 4, 5, 6], [8, 9, 10, 11]])
marks = nd.zeros((N, T)).astype(np.int32)
# starting "state" of the process
states = nd.zeros((N, K))
valid_length = nd.array([1, 2, 3, 4]) # number of valid points in each sequence
max_time = nd.ones((N,)) * 100.0 # length of the observation period
A = nd.contrib.hawkesll(
lda, alpha, beta, states, ia_times, marks, valid_length, max_time
)
References:
- Bacry, E., Mastromatteo, I., & Muzy, J. F. (2015).
Hawkes processes in finance. Market Microstructure and Liquidity
, 1(01), 1550005.
Defined in src/operator/contrib/hawkes_ll.cc:L84
Parameters
----------
lda : NDArray
Shape (N, K) The intensity for each of the K processes, for each sample
alpha : NDArray
Shape (K,) The infectivity factor (branching ratio) for each process
beta : NDArray
Shape (K,) The decay parameter for each process
state : NDArray
Shape (N, K) the Hawkes state for each process
lags : NDArray
Shape (N, T) the interarrival times
marks : NDArray
Shape (N, T) the marks (process ids)
valid_length : NDArray
The number of valid points in the process
max_time : NDArray
the length of the interval where the processes were sampled
out : NDArray, optional
The output NDArray to hold the result.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
"""
return 0,
|
coinaddr | coinaddr//interfaces.pyclass:INamedInstanceContainer/__getitem__ | def __getitem__(name):
"""Return the named instance"""
|
Flintrock-1.0.0 | Flintrock-1.0.0//flintrock/flintrock.pyfile:/flintrock/flintrock.py:function:option_name_to_variable_name/option_name_to_variable_name | def option_name_to_variable_name(option: str):
"""
Convert an option name like `--ec2-user` to the Python name it gets mapped to,
like `ec2_user`.
"""
return option.replace('--', '', 1).replace('-', '_')
|
nbconvert | nbconvert//filters/strings.pyfile:/filters/strings.py:function:comment_lines/comment_lines | def comment_lines(text, prefix='# '):
"""
Build a Python comment line from input text.
Parameters
----------
text : str
Text to comment out.
prefix : str
Character to append to the start of each line.
"""
return prefix + ('\n' + prefix).join(text.split('\n'))
|
scml | scml//cliadv.pyfile:/cliadv.py:function:print_progress/print_progress | def print_progress(_, i, n) ->None:
"""Prints the progress of a tournament"""
global n_completed, n_total
n_completed = i + 1
n_total = n
print(
f'{n_completed:04} of {n:04} worlds completed ({n_completed / n:0.2%})'
, flush=True)
|
opentelemetry-sdk-0.6b0 | opentelemetry-sdk-0.6b0//src/opentelemetry/sdk/context/propagation/b3_format.pyfile:/src/opentelemetry/sdk/context/propagation/b3_format.py:function:format_trace_id/format_trace_id | def format_trace_id(trace_id: int) ->str:
"""Format the trace id according to b3 specification."""
return format(trace_id, '032x')
|
datavis-0.0.4 | datavis-0.0.4//datavis/utils/path.pyfile:/datavis/utils/path.py:function:joinExt/joinExt | def joinExt(*extensions):
""" Join several path parts with a ."""
return '.'.join(extensions)
|
polyjit | polyjit//experiments/polyjit.pyclass:PolyJIT/init_project | @classmethod
def init_project(cls, project):
"""
Execute the benchbuild experiment.
We perform this experiment in 2 steps:
1. with likwid disabled.
2. with likwid enabled.
Args:
project: The project we initialize.
Returns:
The initialized project.
"""
project.ldflags += ['-lpjit', '-lgomp']
project.cflags = ['-fno-omit-frame-pointer', '-rdynamic', '-Xclang',
'-load', '-Xclang', 'LLVMPolly.so', '-Xclang', '-load', '-Xclang',
'LLVMPolyJIT.so', '-O3', '-mllvm', '-polli-enable-log', '-mllvm',
'-polli']
return project
|
mlts | mlts//tf/adapter.pyfile:/tf/adapter.py:function:_reset_regularization_hyperparameter/_reset_regularization_hyperparameter | def _reset_regularization_hyperparameter(hparams):
"""Reset regularization hyperparameter lambda."""
modified_hparams = hparams.copy()
if 'lambda' in modified_hparams:
del modified_hparams['lambda']
return modified_hparams
|
lbryschema-0.0.16 | lbryschema-0.0.16//lbryschema/uri.pyclass:URI/from_dict | @classmethod
def from_dict(cls, uri_dict):
"""
Creates URI from dict
:return: URI
"""
return cls(**uri_dict)
|
pyboto3-1.4.4 | pyboto3-1.4.4//pyboto3/servicecatalog.pyfile:/pyboto3/servicecatalog.py:function:describe_product_as_admin/describe_product_as_admin | def describe_product_as_admin(AcceptLanguage=None, Id=None):
"""
Retrieves information about a specified product, run with administrator access.
See also: AWS API Documentation
:example: response = client.describe_product_as_admin(
AcceptLanguage='string',
Id='string'
)
:type AcceptLanguage: string
:param AcceptLanguage: The language code to use for this operation. Supported language codes are as follows:
'en' (English)
'jp' (Japanese)
'zh' (Chinese)
If no code is specified, 'en' is used as the default.
:type Id: string
:param Id: [REQUIRED]
The identifier of the product for which to retrieve information.
:rtype: dict
:return: {
'ProductViewDetail': {
'ProductViewSummary': {
'Id': 'string',
'ProductId': 'string',
'Name': 'string',
'Owner': 'string',
'ShortDescription': 'string',
'Type': 'CLOUD_FORMATION_TEMPLATE',
'Distributor': 'string',
'HasDefaultPath': True|False,
'SupportEmail': 'string',
'SupportDescription': 'string',
'SupportUrl': 'string'
},
'Status': 'AVAILABLE'|'CREATING'|'FAILED',
'ProductARN': 'string',
'CreatedTime': datetime(2015, 1, 1)
},
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
"""
pass
|
avwx | avwx//parsing/remarks.pyfile:/parsing/remarks.py:function:_tdec/_tdec | def _tdec(code: str, unit: str='C') ->str:
"""
Translates a 4-digit decimal temperature representation
Ex: 1045 -> -4.5°C 0237 -> 23.7°C
"""
if not code:
return
ret = f"{'-' if code[0] == '1' else ''}{int(code[1:3])}.{code[3]}"
if unit:
ret += f'°{unit}'
return ret
|
PseudoNetCDF-3.1.0 | PseudoNetCDF-3.1.0//src/PseudoNetCDF/camxfiles/FortranFileUtil.pyfile:/src/PseudoNetCDF/camxfiles/FortranFileUtil.py:function:check_read/check_read | def check_read(requested, result):
"""Checks that the result of a read from a file was of the expected size
Raises EOFError if nothing was read, or IOError if the read was incomplete
"""
if len(result) != requested and requested != 0:
if result == '':
raise EOFError()
else:
raise IOError('Incomplete read, requested %d recieved %d' % (
requested, len(result)))
|
alphapy | alphapy//transforms.pyfile:/transforms.py:function:mval/mval | def mval(f, c):
"""Get the negative value, otherwise zero.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the column ``c``.
c : str
Name of the column in the dataframe ``f``.
Returns
-------
new_val : float
Negative value or zero.
"""
new_val = -f[c] if f[c] < 0 else 0
return new_val
|
uwsgiconf | uwsgiconf//uwsgi_stub.pyfile:/uwsgi_stub.py:function:cache_exists/cache_exists | def cache_exists(key, cache=None):
"""Checks whether there is a value in the cache associated with the given key.
:param str|unicode key: The cache key to check.
:param str|unicode cache: Cache name with optional address (if @-syntax is used).
:rtype: bool
"""
return False
|
perceval | perceval//backends/mozilla/remo.pyclass:ReMo/metadata_id | @staticmethod
def metadata_id(item):
"""Extracts the identifier from a ReMo item."""
return str(item['remo_url'])
|
DIRestPlus-0.2.2 | DIRestPlus-0.2.2//direstplus/utils/fh_utils.pyfile:/direstplus/utils/fh_utils.py:function:_get_df_between_date_by_index/_get_df_between_date_by_index | def _get_df_between_date_by_index(data_df, date_frm, date_to):
"""
该函数仅用于 return_risk_analysis 中计算使用
:param data_df:
:param date_frm:
:param date_to:
:return:
"""
if date_frm is not None and date_to is not None:
new_data_df = data_df[(data_df.index >= date_frm) & (data_df.index <=
date_to)]
elif date_frm is not None:
new_data_df = data_df[data_df.index >= date_frm]
elif date_to is not None:
new_data_df = data_df[data_df.index <= date_to]
else:
new_data_df = data_df
return new_data_df
|
tiquations | tiquations//equations.pyfile:/equations.py:function:circle_diam_rad/circle_diam_rad | def circle_diam_rad(radius):
"""Usage: Find circle's diameter from radius"""
return radius * 2
|
econ-ark-0.10.6 | econ-ark-0.10.6//HARK/utilities.pyfile:/HARK/utilities.py:function:CRRAutilityP/CRRAutilityP | def CRRAutilityP(c, gam):
"""
Evaluates constant relative risk aversion (CRRA) marginal utility of consumption
c given risk aversion parameter gam.
Parameters
----------
c : float
Consumption value
gam : float
Risk aversion
Returns
-------
(unnamed) : float
Marginal utility
"""
return c ** -gam
|
osmtogtfs | osmtogtfs//osm/builders/route_builder.pyfile:/osm/builders/route_builder.py:function:create_route_short_name/create_route_short_name | def create_route_short_name(relation):
"""Create a meaningful route short name."""
return relation.tags.get('ref') or ''
|
scisoftpy | scisoftpy//python/pyscisoft.pyfile:/python/pyscisoft.py:function:crossings/crossings | def crossings(y, value, x=None):
"""Finds the crossing points where a (poly-)line defined by a 1D y array has the given
values and return the (linearly) interpolated index or x value if an x array is given
"""
raise NotImplementedError
|
pacopy | pacopy//euler_newton.pyfile:/euler_newton.py:function:tangent/tangent | def tangent(u, lmbda):
"""Computes the normalized arc tangent (du/ds, lmbda/ds). Computation is based on
the equations
||du/ds||^2 + (dlmbda/ds)^2 = 1,
d/ds f(u(s), lmbda(s)) = 0.
They can be reduced to the nonlinear equation
0 = df/du v + df/dlmbda sqrt(1 - ||v||^2)
for `v := du/ds`. Its Jacobian is
J(v) = df/du - 2 df/dlmbda v^T / sqrt(1 - ||v||^2),
a rank-1 update to df/du which can be solved via Sherman-Morrison.
Note that this does not work at turning points where `dlmbda/ds = 0`. Here, `v` is
the nontrivial solution to
0 = df/du v.
"""
pass
|
pass_import | pass_import//manager.pyclass:PasswordManager/description | @classmethod
def description(cls):
"""Get password manager description."""
return cls.__doc__.split('\n')[0][:-1]
|
fake-bpy-module-2.79-20200428 | fake-bpy-module-2.79-20200428//bpy/ops/graph.pyfile:/bpy/ops/graph.py:function:delete/delete | def delete():
"""Remove all selected keyframes
"""
pass
|
ipython-cypher-0.2.6 | ipython-cypher-0.2.6//src/cypher/column_guesser.pyfile:/src/cypher/column_guesser.py:function:is_quantity/is_quantity | def is_quantity(val):
"""Is ``val`` a quantity (int, float, datetime, etc) (not str, bool)?
Relies on presence of __sub__.
"""
return hasattr(val, '__sub__')
|
lava-tool-0.11.1 | lava-tool-0.11.1//lava_tool/utils.pyfile:/lava_tool/utils.py:function:can_edit_file/can_edit_file | def can_edit_file(path):
"""Checks if a file can be opend in write mode.
:param path: The path to the file.
:return True if it is possible to write on the file, False otherwise.
"""
can_edit = True
try:
fp = open(path, 'a')
fp.close()
except IOError:
can_edit = False
return can_edit
|
watcher | watcher//decision_engine/strategy/strategies/base.pyclass:ZoneMigrationBaseStrategy/get_config_opts | @classmethod
def get_config_opts(cls):
"""Override base class config options as do not use datasource """
return []
|
pure_protobuf | pure_protobuf//legacy.pyfile:/legacy.py:function:_pack_key/_pack_key | def _pack_key(tag, wire_type):
"""
Packs a tag and a wire_type into single int according to the protobuf spec.
"""
return tag << 3 | wire_type
|
metawards-0.10.0 | metawards-0.10.0//src/metawards/_variableset.pyfile:/src/metawards/_variableset.py:function:_set_uv/_set_uv | def _set_uv(params, name: str, index: int, value: float):
"""Adjust the Parameters.UV parameter"""
if index is not None:
raise IndexError('You cannot index the UV parameter')
params.UV = value
|
eniric | eniric//IOmodule.pyfile:/IOmodule.py:function:write_2col/write_2col | def write_2col(filename, data1, data2):
"""Writes data in 2 columns separated by tabs in a "filename" file."""
f = open(filename, 'w')
for i, __ in enumerate(data1):
f.write('\t' + str(data1[i]) + '\t\t' + str(data2[i]) + '\n')
f.close()
|
ipyparallel-6.3.0 | ipyparallel-6.3.0//ipyparallel/client/remotefunction.pyfile:/ipyparallel/client/remotefunction.py:function:getname/getname | def getname(f):
"""Get the name of an object.
For use in case of callables that are not functions, and
thus may not have __name__ defined.
Order: f.__name__ > f.name > str(f)
"""
try:
return f.__name__
except:
pass
try:
return f.name
except:
pass
return str(f)
|
Pympler-0.8 | Pympler-0.8//pympler/asizeof.pyfile:/pympler/asizeof.py:function:_power2/_power2 | def _power2(n):
"""Find the next power of 2.
"""
p2 = 16
while n > p2:
p2 += p2
return p2
|
obsplus-0.1.0 | obsplus-0.1.0//versioneer.pyfile:/versioneer.py:function:render_pep440_old/render_pep440_old | def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
else:
rendered = '0.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
return rendered
|
mangrove | mangrove//mgvWrapperNoServer.pyfile:/mgvWrapperNoServer.py:function:deleteLink/deleteLink | def deleteLink(nodeA, nodeB):
"""Delete a link between two node objects.
This function can be empty. Used for optional complex database quieries.
Parameters:
nodeA (MgvNode): source node.
nodeB (MgvNode): destination node.
"""
return
|
gtdb_species_clusters-0.1.0 | gtdb_species_clusters-0.1.0//gtdb_species_clusters/genome_utils.pyfile:/gtdb_species_clusters/genome_utils.py:function:canonical_gid/canonical_gid | def canonical_gid(gid):
"""Get canonical form of NCBI genome accession.
Example:
G005435135 -> G005435135
GCF_005435135.1 -> G005435135
GCF_005435135.1_ASM543513v1_genomic -> G005435135
RS_GCF_005435135.1 -> G005435135
GB_GCA_005435135.1 -> G005435135
"""
if gid.startswith('U'):
return gid
gid = gid.replace('RS_', '').replace('GB_', '')
gid = gid.replace('GCA_', 'G').replace('GCF_', 'G')
if '.' in gid:
gid = gid[0:gid.find('.')]
return gid
|
invenio-oaiserver-1.2.0 | invenio-oaiserver-1.2.0//invenio_oaiserver/alembic/759d47cbdba7_create_oaiserver_branch.pyfile:/invenio_oaiserver/alembic/759d47cbdba7_create_oaiserver_branch.py:function:downgrade/downgrade | def downgrade():
"""Downgrade database."""
pass
|
parso | parso//tree.pyfile:/tree.py:function:search_ancestor/search_ancestor | def search_ancestor(node, *node_types):
"""
Recursively looks at the parents of a node and returns the first found node
that matches node_types. Returns ``None`` if no matching node is found.
:param node: The ancestors of this node will be checked.
:param node_types: type names that are searched for.
:type node_types: tuple of str
"""
while True:
node = node.parent
if node is None or node.type in node_types:
return node
|
cloudmesh_client | cloudmesh_client//default.pyclass:Default/set_user | @classmethod
def set_user(cls, value):
"""
sets the cloud in the category general
:param value: the cloud as defined in cloudmesh.yaml
:return:
"""
cls.set('user', value)
|
WsgiDAV-3.0.3 | WsgiDAV-3.0.3//wsgidav/util.pyfile:/wsgidav/util.py:function:get_content_length/get_content_length | def get_content_length(environ):
"""Return a positive CONTENT_LENGTH in a safe way (return 0 otherwise)."""
try:
return max(0, int(environ.get('CONTENT_LENGTH', 0)))
except ValueError:
return 0
|
pymor | pymor//tools/mpi.pyfile:/tools/mpi.py:function:run_code/run_code | def run_code(code):
"""Execute the code string `code`.
Intended to be used in conjunction with :func:`call`.
"""
exec(code)
|
AccessControl | AccessControl//interfaces.pyclass:IUser/getRoles | def getRoles():
"""Get a sequence of the global roles assigned to the user.
"""
|
minicloudstack-1.1.4 | minicloudstack-1.1.4//minicloudstack/mcs.pyfile:/minicloudstack/mcs.py:function:set_verbosity/set_verbosity | def set_verbosity(verbosity=0):
"""
Increase verbosity of functions.
:param verbosity: 0=low, 1=details, 2=very verbose, 3=debug
"""
global VERBOSE
VERBOSE = int(verbosity)
|
mcmd | mcmd//config/loader.pyfile:/config/loader.py:function:_get_object_id/_get_object_id | def _get_object_id(ordered_dict):
"""
Gets the first value of the ordered dict (that represents a YAML object).
ruamel.YAML uses a CommentedMapValuesView as OrderedDict which doesn't support indexing, so we iterate.
"""
return next(iter(ordered_dict.values()))
|
allmydata | allmydata//interfaces.pyclass:IDownloadStatus/get_progress | def get_progress():
"""Returns a float (from 0.0 to 1.0) describing the amount of the
download that has completed. This value will remain at 0.0 until the
first byte of plaintext is pushed to the download target."""
|
pyteck | pyteck//parse_files_XML.pyfile:/parse_files_XML.py:function:get_file_metadata/get_file_metadata | def get_file_metadata(root):
"""Read and parse ReSpecTh XML file metadata (file author, version, etc.)
Parameters
----------
root : ``etree.Element``
root of ReSpecTh XML file
Returns
-------
properties : dict
Dictionary with file metadata
"""
properties = {}
properties['file-authors'] = [{'name': '', 'ORCID': ''}]
try:
properties['file-authors'][0]['name'] = root.find('fileAuthor').text
except AttributeError:
print('Warning: no fileAuthor given')
properties['file-version'] = '(1, 0)'
elem = root.find('fileVersion')
if elem is None:
print('Warning: no fileVersion element')
try:
version = int(elem.find('major').text), int(elem.find('minor').text)
except AttributeError:
print('Warning: missing fileVersion major/minor')
properties['file-version'] = str(version)
properties['reference'] = {}
elem = root.find('bibliographyLink')
try:
properties['reference']['citation'] = elem.attrib['preferredKey']
except KeyError:
print('Warning: missing preferredKey attribute in bibliographyLink')
try:
properties['reference']['doi'] = elem.attrib['doi']
except KeyError:
print('Warning: missing doi attribute in bibliographyLink')
return properties
|
fake-bpy-module-2.80-20200428 | fake-bpy-module-2.80-20200428//bpy/ops/curve.pyfile:/bpy/ops/curve.py:function:duplicate/duplicate | def duplicate():
"""Duplicate selected control points
"""
pass
|
afkak | afkak//_util.pyfile:/_util.py:function:_coerce_client_id/_coerce_client_id | def _coerce_client_id(client_id):
"""
Ensure the provided client ID is a byte string. If a text string is
provided, it is encoded as UTF-8 bytes.
:param client_id: :class:`bytes` or :class:`str` instance
"""
if isinstance(client_id, type(u'')):
client_id = client_id.encode('utf-8')
if not isinstance(client_id, bytes):
raise TypeError(
'{!r} is not a valid consumer group (must be str or bytes)'.
format(client_id))
return client_id
|
SoftLayer-5.8.7 | SoftLayer-5.8.7//SoftLayer/managers/dns.pyclass:DNSManager/_generate_create_dict | @staticmethod
def _generate_create_dict(record, record_type, data, ttl, **kwargs):
"""Returns a dict appropriate to pass into Dns_Domain_ResourceRecord::createObject"""
resource_record = {'host': record, 'data': data, 'ttl': ttl, 'type':
record_type}
for key, value in kwargs.items():
resource_record.setdefault(key, value)
return resource_record
|
monero_glue | monero_glue//xmr/core/ec_trezor.pyfile:/xmr/core/ec_trezor.py:function:ge_dsm_precomp/ge_dsm_precomp | def ge_dsm_precomp(point):
"""
void ge_dsm_precomp(ge_dsmp r, const ge_p3 *s)
:param point:
:return:
"""
return point
|
profitbricks-4.1.3 | profitbricks-4.1.3//examples/pb_controlServerState.pyfile:/examples/pb_controlServerState.py:function:getServerInfo/getServerInfo | def getServerInfo(pbclient=None, dc_id=None):
""" gets info of servers of a data center"""
if pbclient is None:
raise ValueError("argument 'pbclient' must not be None")
if dc_id is None:
raise ValueError("argument 'dc_id' must not be None")
server_info = []
servers = pbclient.list_servers(dc_id, 1)
for server in servers['items']:
props = server['properties']
info = dict(id=server['id'], name=props['name'], state=server[
'metadata']['state'], vmstate=props['vmState'])
server_info.append(info)
return server_info
|
histogrammar | histogrammar//hgawk_grammar.pyfile:/hgawk_grammar.py:function:p_dictorsetmaker_star2_2/p_dictorsetmaker_star2_2 | def p_dictorsetmaker_star2_2(p):
"""dictorsetmaker_star2 : dictorsetmaker_star2 COMMA test"""
keys, values = p[1]
p[0] = keys, values + [p[3]]
|
acitoolkit-0.4 | acitoolkit-0.4//acitoolkit/aciphysobject.pyclass:Interface/_parse_physical_dn | @staticmethod
def _parse_physical_dn(dn):
"""
Handles DNs that look like the following:
topology/pod-1/node-103/sys/phys-[eth1/12]
"""
name = dn.split('/')
pod = name[1].split('-')[1]
node = name[2].split('-')[1]
module = name[4].split('[')[1]
interface_type = module[:3]
module = module[3:]
port = name[5].split(']')[0]
return interface_type, pod, node, module, port
|