text stringlengths 75 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 0.18 |
|---|---|---|---|
def clean_promoted_guids(raw_promoted_guids):
""" Verify that the promoted GUIDs are formatted correctly,
otherwise strip it down into an empty list.
"""
valid = True
for row in raw_promoted_guids:
if len(row) != 2:
valid = False
break
if not (
(isinstance(row[0], str) or isinstance(row[0], unicode))
and (isinstance(row[1], int) or isinstance(row[1], float)) # noqa
):
valid = False
break
if valid:
return raw_promoted_guids
return [] | [
"def",
"clean_promoted_guids",
"(",
"raw_promoted_guids",
")",
":",
"valid",
"=",
"True",
"for",
"row",
"in",
"raw_promoted_guids",
":",
"if",
"len",
"(",
"row",
")",
"!=",
"2",
":",
"valid",
"=",
"False",
"break",
"if",
"not",
"(",
"(",
"isinstance",
"(... | 26.285714 | 0.001748 |
def raw_mode():
"""
Enables terminal raw mode during the context.
Note: Currently noop for Windows systems.
Usage: ::
with raw_mode():
do_some_stuff()
"""
if WIN:
# No implementation for windows yet.
yield # needed for the empty context manager to work
else:
# imports are placed here because this will fail under Windows
import tty
import termios
if not isatty(sys.stdin):
f = open("/dev/tty")
fd = f.fileno()
else:
fd = sys.stdin.fileno()
f = None
try:
old_settings = termios.tcgetattr(fd)
tty.setraw(fd)
except termios.error:
pass
try:
yield
finally:
# this block sets the terminal to sane mode again,
# also in case an exception occured in the context manager
try:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
# sys.stdout.flush() # not needed I think.
if f is not None:
f.close()
except termios.error:
pass | [
"def",
"raw_mode",
"(",
")",
":",
"if",
"WIN",
":",
"# No implementation for windows yet.",
"yield",
"# needed for the empty context manager to work",
"else",
":",
"# imports are placed here because this will fail under Windows",
"import",
"tty",
"import",
"termios",
"if",
"no... | 27.404762 | 0.000839 |
def renamed(self, name):
"""
Duplicate the datum and rename it
"""
duplicate = copy(self)
duplicate._name = name
return duplicate | [
"def",
"renamed",
"(",
"self",
",",
"name",
")",
":",
"duplicate",
"=",
"copy",
"(",
"self",
")",
"duplicate",
".",
"_name",
"=",
"name",
"return",
"duplicate"
] | 24.428571 | 0.011299 |
def _network_indicator_percentages(fields, network_indicators):
"""Encapsula el cálculo de indicadores de porcentaje (de errores,
de campos recomendados/optativos utilizados, de datasets actualizados)
sobre la red de nodos entera.
Args:
fields (dict): Diccionario con claves 'recomendado', 'optativo',
'total_recomendado', 'total_optativo', cada uno con valores
que representan la cantidad de c/u en la red de nodos entera.
network_indicators (dict): Diccionario de la red de nodos, con
las cantidades de datasets_meta_ok y datasets_(des)actualizados
calculados previamente. Se modificará este argumento con los
nuevos indicadores.
"""
# Los porcentuales no se pueden sumar, tienen que ser recalculados
percentages = {
'datasets_meta_ok_pct':
(network_indicators.get('datasets_meta_ok_cant'),
network_indicators.get('datasets_meta_error_cant')),
'datasets_actualizados_pct':
(network_indicators.get('datasets_actualizados_cant'),
network_indicators.get('datasets_desactualizados_cant')),
'datasets_federados_pct':
(network_indicators.get('datasets_federados_cant'),
network_indicators.get('datasets_no_federados_cant')),
'datasets_con_datos_pct':
(network_indicators.get('datasets_con_datos_cant'),
network_indicators.get('datasets_sin_datos_cant')),
}
for indicator in percentages:
pct = 0.00
partial = percentages[indicator][0] or 0
total = partial + (percentages[indicator][1] or 0)
# Evita division por 0
if total:
pct = float(partial) / total
network_indicators[indicator] = round(pct, 4)
# % de campos recomendados y optativos utilizados en el catálogo entero
if fields: # 'fields' puede estar vacío si ningún campo es válido
rec_pct = float(fields['recomendado']) / \
fields['total_recomendado']
opt_pct = float(fields['optativo']) / fields['total_optativo']
network_indicators.update({
'campos_recomendados_pct': round(rec_pct, 4),
'campos_optativos_pct': round(opt_pct, 4)
}) | [
"def",
"_network_indicator_percentages",
"(",
"fields",
",",
"network_indicators",
")",
":",
"# Los porcentuales no se pueden sumar, tienen que ser recalculados",
"percentages",
"=",
"{",
"'datasets_meta_ok_pct'",
":",
"(",
"network_indicators",
".",
"get",
"(",
"'datasets_meta... | 42.711538 | 0.00044 |
def multi_dict(pairs):
"""
Given a set of key value pairs, create a dictionary.
If a key occurs multiple times, stack the values into an array.
Can be called like the regular dict(pairs) constructor
Parameters
----------
pairs: (n,2) array of key, value pairs
Returns
----------
result: dict, with all values stored (rather than last with regular dict)
"""
result = collections.defaultdict(list)
for k, v in pairs:
result[k].append(v)
return result | [
"def",
"multi_dict",
"(",
"pairs",
")",
":",
"result",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"k",
",",
"v",
"in",
"pairs",
":",
"result",
"[",
"k",
"]",
".",
"append",
"(",
"v",
")",
"return",
"result"
] | 24.85 | 0.001938 |
def import_recursive(path):
"""
Recursively import all modules and packages
Thanks http://stackoverflow.com/a/25562415/1267398
XXX: Needs UT
:attr path: Path to package/module
"""
results = {}
obj = importlib.import_module(path)
results[path] = obj
path = getattr(obj, '__path__', os.path.dirname(obj.__file__))
for loader, name, is_pkg in pkgutil.walk_packages(path):
full_name = obj.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(import_recursive(full_name))
return results | [
"def",
"import_recursive",
"(",
"path",
")",
":",
"results",
"=",
"{",
"}",
"obj",
"=",
"importlib",
".",
"import_module",
"(",
"path",
")",
"results",
"[",
"path",
"]",
"=",
"obj",
"path",
"=",
"getattr",
"(",
"obj",
",",
"'__path__'",
",",
"os",
".... | 33.333333 | 0.001621 |
def reset_weights(self):
"""Reset weights after a resampling step.
"""
if self.fk.isAPF:
lw = (rs.log_mean_exp(self.logetat, W=self.W)
- self.logetat[self.A])
self.wgts = rs.Weights(lw=lw)
else:
self.wgts = rs.Weights() | [
"def",
"reset_weights",
"(",
"self",
")",
":",
"if",
"self",
".",
"fk",
".",
"isAPF",
":",
"lw",
"=",
"(",
"rs",
".",
"log_mean_exp",
"(",
"self",
".",
"logetat",
",",
"W",
"=",
"self",
".",
"W",
")",
"-",
"self",
".",
"logetat",
"[",
"self",
"... | 33.111111 | 0.009804 |
def to_string(self):
"""Return a string that tries to reconstitute the variable decl."""
suffix = '%s %s' % (self.type, self.name)
if self.initial_value:
suffix += ' = ' + self.initial_value
return suffix | [
"def",
"to_string",
"(",
"self",
")",
":",
"suffix",
"=",
"'%s %s'",
"%",
"(",
"self",
".",
"type",
",",
"self",
".",
"name",
")",
"if",
"self",
".",
"initial_value",
":",
"suffix",
"+=",
"' = '",
"+",
"self",
".",
"initial_value",
"return",
"suffix"
] | 40.5 | 0.008065 |
def update(self, cur_value, mesg=None):
"""Update progressbar with current value of process
Parameters
----------
cur_value : number
Current value of process. Should be <= max_value (but this is not
enforced). The percent of the progressbar will be computed as
(cur_value / max_value) * 100
mesg : str
Message to display to the right of the progressbar. If None, the
last message provided will be used. To clear the current message,
pass a null string, ''.
"""
# Ensure floating-point division so we can get fractions of a percent
# for the progressbar.
self.cur_value = cur_value
progress = float(self.cur_value) / self.max_value
num_chars = int(progress * self.max_chars)
num_left = self.max_chars - num_chars
# Update the message
if mesg is not None:
self.mesg = mesg
# The \r tells the cursor to return to the beginning of the line rather
# than starting a new line. This allows us to have a progressbar-style
# display in the console window.
bar = self.template.format(self.progress_character * num_chars,
' ' * num_left,
progress * 100,
self.spinner_symbols[self.spinner_index],
self.mesg)
sys.stdout.write(bar)
# Increament the spinner
if self.spinner:
self.spinner_index = (self.spinner_index + 1) % self.n_spinner
# Force a flush because sometimes when using bash scripts and pipes,
# the output is not printed until after the program exits.
sys.stdout.flush() | [
"def",
"update",
"(",
"self",
",",
"cur_value",
",",
"mesg",
"=",
"None",
")",
":",
"# Ensure floating-point division so we can get fractions of a percent",
"# for the progressbar.",
"self",
".",
"cur_value",
"=",
"cur_value",
"progress",
"=",
"float",
"(",
"self",
".... | 43.073171 | 0.001107 |
def get_gene_id(gene_name):
'''Retrieve systematic yeast gene name from the common name.
:param gene_name: Common name for yeast gene (e.g. ADE2).
:type gene_name: str
:returns: Systematic name for yeast gene (e.g. YOR128C).
:rtype: str
'''
from intermine.webservice import Service
service = Service('http://yeastmine.yeastgenome.org/yeastmine/service')
# Get a new query on the class (table) you will be querying:
query = service.new_query('Gene')
# The view specifies the output columns
query.add_view('primaryIdentifier', 'secondaryIdentifier', 'symbol',
'name', 'sgdAlias', 'crossReferences.identifier',
'crossReferences.source.name')
# Uncomment and edit the line below (the default) to select a custom sort
# order:
# query.add_sort_order('Gene.primaryIdentifier', 'ASC')
# You can edit the constraint values below
query.add_constraint('organism.shortName', '=', 'S. cerevisiae', code='B')
query.add_constraint('Gene', 'LOOKUP', gene_name, code='A')
# Uncomment and edit the code below to specify your own custom logic:
# query.set_logic('A and B')
for row in query.rows():
gid = row['secondaryIdentifier']
return gid | [
"def",
"get_gene_id",
"(",
"gene_name",
")",
":",
"from",
"intermine",
".",
"webservice",
"import",
"Service",
"service",
"=",
"Service",
"(",
"'http://yeastmine.yeastgenome.org/yeastmine/service'",
")",
"# Get a new query on the class (table) you will be querying:",
"query",
... | 35.2 | 0.00079 |
def flux_randomization(model, threshold, tfba, solver):
"""Find a random flux solution on the boundary of the solution space.
The reactions in the threshold dictionary are constrained with the
associated lower bound.
Args:
model: MetabolicModel to solve.
threshold: dict of additional lower bounds on reaction fluxes.
tfba: If True enable thermodynamic constraints.
solver: LP solver instance to use.
Returns:
An iterator of reaction ID and reaction flux pairs.
"""
optimize = {}
for reaction_id in model.reactions:
if model.is_reversible(reaction_id):
optimize[reaction_id] = 2*random.random() - 1.0
else:
optimize[reaction_id] = random.random()
fba = _get_fba_problem(model, tfba, solver)
for reaction_id, value in iteritems(threshold):
fba.prob.add_linear_constraints(fba.get_flux_var(reaction_id) >= value)
fba.maximize(optimize)
for reaction_id in model.reactions:
yield reaction_id, fba.get_flux(reaction_id) | [
"def",
"flux_randomization",
"(",
"model",
",",
"threshold",
",",
"tfba",
",",
"solver",
")",
":",
"optimize",
"=",
"{",
"}",
"for",
"reaction_id",
"in",
"model",
".",
"reactions",
":",
"if",
"model",
".",
"is_reversible",
"(",
"reaction_id",
")",
":",
"... | 34.5 | 0.00094 |
async def get_json(
self, force: bool=False, silent: bool=False, cache: bool=True,
) -> Any:
"""Parses the body data as JSON and returns it.
Arguments:
force: Force JSON parsing even if the mimetype is not JSON.
silent: Do not trigger error handling if parsing fails, without
this the :meth:`on_json_loading_failed` will be called on
error.
cache: Cache the parsed JSON on this request object.
"""
if cache and self._cached_json is not sentinel:
return self._cached_json
if not (force or self.is_json):
return None
data = await self._load_json_data()
try:
result = loads(data)
except ValueError as error:
if silent:
result = None
else:
self.on_json_loading_failed(error)
if cache:
self._cached_json = result
return result | [
"async",
"def",
"get_json",
"(",
"self",
",",
"force",
":",
"bool",
"=",
"False",
",",
"silent",
":",
"bool",
"=",
"False",
",",
"cache",
":",
"bool",
"=",
"True",
",",
")",
"->",
"Any",
":",
"if",
"cache",
"and",
"self",
".",
"_cached_json",
"is",... | 33.103448 | 0.009109 |
def validate(self):
""" validate: Makes sure topic is valid
Args: None
Returns: boolean indicating if topic is valid
"""
try:
assert self.kind == content_kinds.TOPIC, "Assumption Failed: Node is supposed to be a topic"
return super(TopicNode, self).validate()
except AssertionError as ae:
raise InvalidNodeException("Invalid node ({}): {} - {}".format(ae.args[0], self.title, self.__dict__)) | [
"def",
"validate",
"(",
"self",
")",
":",
"try",
":",
"assert",
"self",
".",
"kind",
"==",
"content_kinds",
".",
"TOPIC",
",",
"\"Assumption Failed: Node is supposed to be a topic\"",
"return",
"super",
"(",
"TopicNode",
",",
"self",
")",
".",
"validate",
"(",
... | 47.4 | 0.008282 |
def GetNeighbors(EPIC, season=None, model=None, neighbors=10,
mag_range=(11., 13.),
cdpp_range=None, aperture_name='k2sff_15',
cadence='lc', **kwargs):
'''
Return `neighbors` random bright stars on the same module as `EPIC`.
:param int EPIC: The EPIC ID number
:param str model: The :py:obj:`everest` model name. Only used when \
imposing CDPP bounds. Default :py:obj:`None`
:param int neighbors: Number of neighbors to return. Default 10
:param str aperture_name: The name of the aperture to use. Select \
`custom` to call \
:py:func:`GetCustomAperture`. Default `k2sff_15`
:param str cadence: The light curve cadence. Default `lc`
:param tuple mag_range: (`low`, `high`) values for the Kepler magnitude. \
Default (11, 13)
:param tuple cdpp_range: (`low`, `high`) values for the de-trended CDPP. \
Default :py:obj:`None`
'''
# Zero neighbors?
if neighbors == 0:
return []
# Get the IDs
# Campaign no.
if season is None:
campaign = Season(EPIC)
if hasattr(campaign, '__len__'):
raise AttributeError(
"Please choose a campaign/season for this target: %s."
% campaign)
else:
campaign = season
epics, kepmags, channels, short_cadence = np.array(GetK2Stars()[
campaign]).T
short_cadence = np.array(short_cadence, dtype=bool)
epics = np.array(epics, dtype=int)
c = GetNeighboringChannels(Channel(EPIC, campaign=season))
# Manage kwargs
if aperture_name is None:
aperture_name = 'k2sff_15'
if mag_range is None:
mag_lo = -np.inf
mag_hi = np.inf
else:
mag_lo = mag_range[0]
mag_hi = mag_range[1]
# K2-specific tweak. The short cadence stars are preferentially
# really bright ones, so we won't get many neighbors if we
# stick to the default magnitude range! I'm
# therefore enforcing a lower magnitude cut-off of 8.
if cadence == 'sc':
mag_lo = 8.
if cdpp_range is None:
cdpp_lo = -np.inf
cdpp_hi = np.inf
else:
cdpp_lo = cdpp_range[0]
cdpp_hi = cdpp_range[1]
targets = []
# First look for nearby targets, then relax the constraint
# If still no targets, widen magnitude range
for n in range(3):
if n == 0:
nearby = True
elif n == 1:
nearby = False
elif n == 2:
mag_lo -= 1
mag_hi += 1
# Loop over all stars
for star, kp, channel, sc in zip(epics, kepmags, channels, short_cadence):
# Preliminary vetting
if not (((channel in c) if nearby else True) and (kp < mag_hi) \
and (kp > mag_lo) and (sc if cadence == 'sc' else True)):
continue
# Reject if self or if already in list
if (star == EPIC) or (star in targets):
continue
# Ensure raw light curve file exists
if not os.path.exists(
os.path.join(TargetDirectory(star, campaign), 'data.npz')):
continue
# Ensure crowding is OK. This is quite conservative, as we
# need to prevent potential astrophysical false positive
# contamination from crowded planet-hosting neighbors when
# doing neighboring PLD.
contam = False
data = np.load(os.path.join(
TargetDirectory(star, campaign), 'data.npz'))
aperture = data['apertures'][()][aperture_name]
# Check that the aperture exists!
if aperture is None:
continue
fpix = data['fpix']
for source in data['nearby'][()]:
# Ignore self
if source['ID'] == star:
continue
# Ignore really dim stars
if source['mag'] < kp - 5:
continue
# Compute source position
x = int(np.round(source['x'] - source['x0']))
y = int(np.round(source['y'] - source['y0']))
# If the source is within two pixels of the edge
# of the target aperture, reject the target
for j in [x - 2, x - 1, x, x + 1, x + 2]:
if j < 0:
# Outside the postage stamp
continue
for i in [y - 2, y - 1, y, y + 1, y + 2]:
if i < 0:
# Outside the postage stamp
continue
try:
if aperture[i][j]:
# Oh-oh!
contam = True
except IndexError:
# Out of bounds... carry on!
pass
if contam:
continue
# HACK: This happens for K2SFF M67 targets in C05.
# Let's skip them
if aperture.shape != fpix.shape[1:]:
continue
# Reject if the model is not present
if model is not None:
if not os.path.exists(os.path.join(
TargetDirectory(star, campaign), model + '.npz')):
continue
# Reject if CDPP out of range
if cdpp_range is not None:
cdpp = np.load(os.path.join(TargetDirectory(
star, campaign), model + '.npz'))['cdpp']
if (cdpp > cdpp_hi) or (cdpp < cdpp_lo):
continue
# Passed all the tests!
targets.append(star)
# Do we have enough? If so, return
if len(targets) == neighbors:
random.shuffle(targets)
return targets
# If we get to this point, we didn't find enough neighbors...
# Return what we have anyway.
return targets | [
"def",
"GetNeighbors",
"(",
"EPIC",
",",
"season",
"=",
"None",
",",
"model",
"=",
"None",
",",
"neighbors",
"=",
"10",
",",
"mag_range",
"=",
"(",
"11.",
",",
"13.",
")",
",",
"cdpp_range",
"=",
"None",
",",
"aperture_name",
"=",
"'k2sff_15'",
",",
... | 36.107784 | 0.000484 |
def print_languages(ctx, param, value):
"""Callback for <all> flag.
Prints formatted sorted list of supported languages and exits
"""
if not value or ctx.resilient_parsing:
return
try:
langs = tts_langs()
langs_str_list = sorted("{}: {}".format(k, langs[k]) for k in langs)
click.echo(' ' + '\n '.join(langs_str_list))
except RuntimeError as e: # pragma: no cover
log.debug(str(e), exc_info=True)
raise click.ClickException("Couldn't fetch language list.")
ctx.exit() | [
"def",
"print_languages",
"(",
"ctx",
",",
"param",
",",
"value",
")",
":",
"if",
"not",
"value",
"or",
"ctx",
".",
"resilient_parsing",
":",
"return",
"try",
":",
"langs",
"=",
"tts_langs",
"(",
")",
"langs_str_list",
"=",
"sorted",
"(",
"\"{}: {}\"",
"... | 38.071429 | 0.001832 |
def parse_pac_file(pacfile):
"""
Reads the pacfile and evaluates it in the Javascript engine created by
init().
"""
try:
with open(pacfile) as f:
pac_script = f.read()
_pacparser.parse_pac_string(pac_script)
except IOError:
raise IOError('Could not read the pacfile: {}'.format(pacfile)) | [
"def",
"parse_pac_file",
"(",
"pacfile",
")",
":",
"try",
":",
"with",
"open",
"(",
"pacfile",
")",
"as",
"f",
":",
"pac_script",
"=",
"f",
".",
"read",
"(",
")",
"_pacparser",
".",
"parse_pac_string",
"(",
"pac_script",
")",
"except",
"IOError",
":",
... | 28.090909 | 0.018809 |
def get(self, path, watch=None):
"""Returns the data of the specified node."""
_log.debug(
"ZK: Getting {path}".format(path=path),
)
return self.zk.get(path, watch) | [
"def",
"get",
"(",
"self",
",",
"path",
",",
"watch",
"=",
"None",
")",
":",
"_log",
".",
"debug",
"(",
"\"ZK: Getting {path}\"",
".",
"format",
"(",
"path",
"=",
"path",
")",
",",
")",
"return",
"self",
".",
"zk",
".",
"get",
"(",
"path",
",",
"... | 33.833333 | 0.009615 |
def xerrorbar(self, canvas, X, Y, error, color=None, label=None, **kwargs):
"""
Make an errorbar along the xaxis for points at (X,Y) on the canvas.
if error is two dimensional, the lower error is error[:,0] and
the upper error is error[:,1]
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library") | [
"def",
"xerrorbar",
"(",
"self",
",",
"canvas",
",",
"X",
",",
"Y",
",",
"error",
",",
"color",
"=",
"None",
",",
"label",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"Implement all plot functions in AbstractPlottin... | 53 | 0.008247 |
def attach_image(field, nested_fields, page, record_keeper=None):
'''
Returns a function that attaches an image to page if it exists
Currenlty assumes that images have already been imported and info
has been stored in record_keeper
'''
if (field in nested_fields) and nested_fields[field]:
foreign_image_id = nested_fields[field]["id"]
# Handle the following
# record keeper may not exist
# record keeper may not have image ref
if record_keeper:
try:
local_image_id = record_keeper.get_local_image(
foreign_image_id)
local_image = Image.objects.get(id=local_image_id)
setattr(page, field, local_image)
except ObjectDoesNotExist:
raise ObjectDoesNotExist(
("executing attach_image: local image referenced"
"in record_keeper does not actually exist."),
None)
except Exception:
raise
else:
raise Exception(
("Attempted to attach image without record_keeper. "
"This functionality is not yet implemented")) | [
"def",
"attach_image",
"(",
"field",
",",
"nested_fields",
",",
"page",
",",
"record_keeper",
"=",
"None",
")",
":",
"if",
"(",
"field",
"in",
"nested_fields",
")",
"and",
"nested_fields",
"[",
"field",
"]",
":",
"foreign_image_id",
"=",
"nested_fields",
"["... | 41 | 0.000822 |
async def create_redis(address, *, db=None, password=None, ssl=None,
encoding=None, commands_factory=Redis,
parser=None, timeout=None,
connection_cls=None, loop=None):
"""Creates high-level Redis interface.
This function is a coroutine.
"""
conn = await create_connection(address, db=db,
password=password,
ssl=ssl,
encoding=encoding,
parser=parser,
timeout=timeout,
connection_cls=connection_cls,
loop=loop)
return commands_factory(conn) | [
"async",
"def",
"create_redis",
"(",
"address",
",",
"*",
",",
"db",
"=",
"None",
",",
"password",
"=",
"None",
",",
"ssl",
"=",
"None",
",",
"encoding",
"=",
"None",
",",
"commands_factory",
"=",
"Redis",
",",
"parser",
"=",
"None",
",",
"timeout",
... | 44.529412 | 0.001294 |
def update(did):
"""Update DDO of an existing asset
---
tags:
- ddo
consumes:
- application/json
parameters:
- in: body
name: body
required: true
description: DDO of the asset.
schema:
type: object
required:
- "@context"
- created
- id
- publicKey
- authentication
- proof
- service
properties:
"@context":
description:
example: https://w3id.org/future-method/v1
type: string
id:
description: ID of the asset.
example: did:op:123456789abcdefghi
type: string
created:
description: date of ddo creation.
example: "2016-02-08T16:02:20Z"
type: string
publicKey:
type: array
description: List of public keys.
example: [{"id": "did:op:123456789abcdefghi#keys-1"},
{"type": "Ed25519VerificationKey2018"},
{"owner": "did:op:123456789abcdefghi"},
{"publicKeyBase58": "H3C2AVvLMv6gmMNam3uVAjZpfkcJCwDwnZn6z3wXmqPV"}]
authentication:
type: array
description: List of authentication mechanisms.
example: [{"type": "RsaSignatureAuthentication2018"},
{"publicKey": "did:op:123456789abcdefghi#keys-1"}]
proof:
type: dictionary
description: Information about the creation and creator of the asset.
example: {"type": "UUIDSignature",
"created": "2016-02-08T16:02:20Z",
"creator": "did:example:8uQhQMGzWxR8vw5P3UWH1ja",
"signatureValue": "QNB13Y7Q9...1tzjn4w=="
}
service:
type: array
description: List of services.
example: [{"type": "Access",
"serviceEndpoint":
"http://mybrizo.org/api/v1/brizo/services/consume?pubKey=${
pubKey}&serviceId={serviceId}&url={url}"},
{"type": "Compute",
"serviceEndpoint":
"http://mybrizo.org/api/v1/brizo/services/compute?pubKey=${
pubKey}&serviceId={serviceId}&algo={algo}&container={container}"},
{
"type": "Metadata",
"serviceDefinitionId": "2",
"serviceEndpoint":
"http://myaquarius.org/api/v1/provider/assets/metadata/{did}",
"metadata": {
"base": {
"name": "UK Weather information 2011",
"type": "dataset",
"description": "Weather information of UK including
temperature and humidity",
"dateCreated": "2012-02-01T10:55:11Z",
"author": "Met Office",
"license": "CC-BY",
"copyrightHolder": "Met Office",
"compression": "zip",
"workExample": "stationId,latitude,longitude,datetime,
temperature,humidity/n423432fsd,51.509865,-0.118092,
2011-01-01T10:55:11+00:00,7.2,68",
"files": [{
"contentLength": "4535431",
"contentType": "text/csv",
"encoding": "UTF-8",
"compression": "zip",
"resourceId": "access-log2018-02-13-15-17-29-18386C502CAEA932"
}
],
"encryptedFiles": "0x098213xzckasdf089723hjgdasfkjgasfv",
"links": [{
"name": "Sample of Asset Data",
"type": "sample",
"url": "https://foo.com/sample.csv"
},
{
"name": "Data Format Definition",
"type": "format",
"AssetID":
"4d517500da0acb0d65a716f61330969334630363ce4a6a9d39691026ac7908ea"
}
],
"inLanguage": "en",
"tags": "weather, uk, 2011, temperature, humidity",
"price": 10,
"checksum":
"38803b9e6f04fce3fba4b124524672592264d31847182c689095a081c9e85262"
},
"curation": {
"rating": 0.93,
"numVotes": 123,
"schema": "Binary Voting"
},
"additionalInformation": {
"updateFrecuency": "yearly",
"structuredMarkup": [{
"uri": "http://skos.um.es/unescothes/C01194/jsonld",
"mediaType": "application/ld+json"
},
{
"uri": "http://skos.um.es/unescothes/C01194/turtle",
"mediaType": "text/turtle"
}
]
}
}
}]
responses:
200:
description: Asset successfully updated.
201:
description: Asset successfully registered.
400:
description: One of the required attributes is missing.
404:
description: Invalid asset data.
500:
description: Error
"""
required_attributes = ['@context', 'created', 'id', 'publicKey', 'authentication', 'proof',
'service']
required_metadata_base_attributes = ['name', 'dateCreated', 'author', 'license',
'price', 'encryptedFiles', 'type', 'checksum']
required_metadata_curation_attributes = ['rating', 'numVotes']
assert isinstance(request.json, dict), 'invalid payload format.'
data = request.json
if not data:
logger.error(f'request body seems empty, expecting {required_attributes}')
return 400
msg, status = check_required_attributes(required_attributes, data, 'update')
if msg:
return msg, status
msg, status = check_required_attributes(required_metadata_base_attributes,
_get_base_metadata(data['service']), 'update')
if msg:
return msg, status
msg, status = check_required_attributes(required_metadata_curation_attributes,
_get_curation_metadata(data['service']), 'update')
if msg:
return msg, status
msg, status = check_no_urls_in_files(_get_base_metadata(data['service']), 'register')
if msg:
return msg, status
msg, status = validate_date_format(data['created'])
if msg:
return msg, status
_record = dict()
_record = copy.deepcopy(data)
_record['created'] = datetime.strptime(data['created'], '%Y-%m-%dT%H:%M:%SZ')
try:
if dao.get(did) is None:
register()
return _sanitize_record(_record), 201
else:
for service in _record['service']:
service_id = int(service['serviceDefinitionId'])
if service['type'] == 'Metadata':
_record['service'][service_id]['metadata']['base']['datePublished'] = _get_date(
dao.get(did)['service'])
dao.update(_record, did)
return Response(_sanitize_record(_record), 200, content_type='application/json')
except Exception as err:
return f'Some error: {str(err)}', 500 | [
"def",
"update",
"(",
"did",
")",
":",
"required_attributes",
"=",
"[",
"'@context'",
",",
"'created'",
",",
"'id'",
",",
"'publicKey'",
",",
"'authentication'",
",",
"'proof'",
",",
"'service'",
"]",
"required_metadata_base_attributes",
"=",
"[",
"'name'",
",",... | 46.203125 | 0.002428 |
def adjust_weights_discrepancy(self, resfile=None,original_ceiling=True):
"""adjusts the weights of each non-zero weight observation based
on the residual in the pest residual file so each observations contribution
to phi is 1.0
Parameters
----------
resfile : str
residual file name. If None, try to use a residual file
with the Pst case name. Default is None
original_ceiling : bool
flag to keep weights from increasing - this is generally a good idea.
Default is True
"""
if resfile is not None:
self.resfile = resfile
self.__res = None
obs = self.observation_data.loc[self.nnz_obs_names,:]
swr = (self.res.loc[self.nnz_obs_names,:].residual * obs.weight)**2
factors = (1.0/swr).apply(np.sqrt)
if original_ceiling:
factors = factors.apply(lambda x: 1.0 if x > 1.0 else x)
self.observation_data.loc[self.nnz_obs_names,"weight"] *= factors | [
"def",
"adjust_weights_discrepancy",
"(",
"self",
",",
"resfile",
"=",
"None",
",",
"original_ceiling",
"=",
"True",
")",
":",
"if",
"resfile",
"is",
"not",
"None",
":",
"self",
".",
"resfile",
"=",
"resfile",
"self",
".",
"__res",
"=",
"None",
"obs",
"=... | 42.416667 | 0.008646 |
def verbose(f):
""" Add verbose flags and add logging handlers
"""
@click.pass_context
def new_func(ctx, *args, **kwargs):
global log
verbosity = ["critical", "error", "warn", "info", "debug"][
int(min(ctx.obj.get("verbose", 0), 4))
]
log.setLevel(getattr(logging, verbosity.upper()))
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
ch = logging.StreamHandler()
ch.setLevel(getattr(logging, verbosity.upper()))
ch.setFormatter(formatter)
log.addHandler(ch)
# GrapheneAPI logging
if ctx.obj.get("verbose", 0) > 4:
verbosity = ["critical", "error", "warn", "info", "debug"][
int(min(ctx.obj.get("verbose", 4) - 4, 4))
]
log = logging.getLogger("grapheneapi")
log.setLevel(getattr(logging, verbosity.upper()))
log.addHandler(ch)
if ctx.obj.get("verbose", 0) > 8:
verbosity = ["critical", "error", "warn", "info", "debug"][
int(min(ctx.obj.get("verbose", 8) - 8, 4))
]
log = logging.getLogger("graphenebase")
log.setLevel(getattr(logging, verbosity.upper()))
log.addHandler(ch)
return ctx.invoke(f, *args, **kwargs)
return update_wrapper(new_func, f) | [
"def",
"verbose",
"(",
"f",
")",
":",
"@",
"click",
".",
"pass_context",
"def",
"new_func",
"(",
"ctx",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"global",
"log",
"verbosity",
"=",
"[",
"\"critical\"",
",",
"\"error\"",
",",
"\"warn\"",
",... | 34.897436 | 0.000715 |
def _update_trace_info(self, fields, hm):
"""Parses a trace line and updates the :attr:`status_info` attribute.
Parameters
----------
fields : list
List of the tab-seperated elements of the trace line
hm : dict
Maps the column IDs to their position in the fields argument.
This dictionary object is retrieve from :func:`_header_mapping`.
"""
process = fields[hm["process"]]
if process not in self.processes:
return
# Get information from a single line of trace file
info = dict((column, fields[pos]) for column, pos in hm.items())
# The headers that will be used to populate the process
process_tag_headers = ["realtime", "rss", "rchar", "wchar"]
for h in process_tag_headers:
# In the rare occasion the tag is parsed first in the trace
# file than the log file, add the new tag.
if info["tag"] not in self.process_tags[process]:
# If the 'start' tag is present in the trace, use that
# information. If not, it will be parsed in the log file.
try:
timestart = info["start"].split()[1]
except KeyError:
timestart = None
self.process_tags[process][info["tag"]] = {
"workdir": self._expand_path(info["hash"]),
"start": timestart
}
if h in info and info["tag"] != "-":
if h != "realtime" and info[h] != "-":
self.process_tags[process][info["tag"]][h] = \
round(self._size_coverter(info[h]), 2)
else:
self.process_tags[process][info["tag"]][h] = info[h]
# Set allocated cpu and memory information to process
if "cpus" in info and not self.processes[process]["cpus"]:
self.processes[process]["cpus"] = info["cpus"]
if "memory" in info and not self.processes[process]["memory"]:
try:
self.processes[process]["memory"] = self._size_coverter(
info["memory"])
except ValueError:
self.processes[process]["memory"] = None
if info["hash"] in self.stored_ids:
return
# If the task hash code is provided, expand it to the work directory
# and add a new entry
if "hash" in info:
hs = info["hash"]
info["work_dir"] = self._expand_path(hs)
if "tag" in info:
tag = info["tag"]
if tag != "-" and tag not in self.samples and \
tag.split()[0] not in self.samples:
self.samples.append(tag)
self.trace_info[process].append(info)
self.stored_ids.append(info["hash"]) | [
"def",
"_update_trace_info",
"(",
"self",
",",
"fields",
",",
"hm",
")",
":",
"process",
"=",
"fields",
"[",
"hm",
"[",
"\"process\"",
"]",
"]",
"if",
"process",
"not",
"in",
"self",
".",
"processes",
":",
"return",
"# Get information from a single line of tra... | 39.138889 | 0.000692 |
def generate_salt_cmd(target, module, args=None, kwargs=None):
"""
Generates a command (the arguments) for the `salt` or `salt-ssh` CLI
"""
args = args or []
kwargs = kwargs or {}
target = target or '*'
target = '"%s"' % target
cmd = [target, module]
for arg in args:
cmd.append(arg)
for key in kwargs:
cmd.append('{0}={1}'.format(key, kwargs[key]))
return cmd | [
"def",
"generate_salt_cmd",
"(",
"target",
",",
"module",
",",
"args",
"=",
"None",
",",
"kwargs",
"=",
"None",
")",
":",
"args",
"=",
"args",
"or",
"[",
"]",
"kwargs",
"=",
"kwargs",
"or",
"{",
"}",
"target",
"=",
"target",
"or",
"'*'",
"target",
... | 29.071429 | 0.002381 |
def eigenvectors_rev(T, k, right=True, ncv=None, mu=None):
r"""Compute eigenvectors of reversible transition matrix.
Parameters
----------
T : (M, M) scipy.sparse matrix
Transition matrix (stochastic matrix)
k : int
Number of eigenvalues to compute
right : bool, optional
If True compute right eigenvectors, left eigenvectors otherwise
ncv : int, optional
The number of Lanczos vectors generated, `ncv` must be greater than k;
it is recommended that ncv > 2*k
mu : (M,) ndarray, optional
Stationary distribution of T
Returns
-------
eigvec : (M, k) ndarray
k-eigenvectors of T
"""
if mu is None:
mu = stationary_distribution(T)
""" symmetrize T """
smu = np.sqrt(mu)
D = diags(smu, 0)
Dinv = diags(1.0/smu, 0)
S = (D.dot(T)).dot(Dinv)
"""Compute eigenvalues, eigenvecs using a solver for
symmetric/hermititan eigenproblems"""
val, eigvec = scipy.sparse.linalg.eigsh(S, k=k, ncv=ncv, which='LM',
return_eigenvectors=True)
"""Sort eigenvectors"""
ind = np.argsort(np.abs(val))[::-1]
eigvec = eigvec[:, ind]
if right:
return eigvec / smu[:, np.newaxis]
else:
return eigvec * smu[:, np.newaxis] | [
"def",
"eigenvectors_rev",
"(",
"T",
",",
"k",
",",
"right",
"=",
"True",
",",
"ncv",
"=",
"None",
",",
"mu",
"=",
"None",
")",
":",
"if",
"mu",
"is",
"None",
":",
"mu",
"=",
"stationary_distribution",
"(",
"T",
")",
"\"\"\" symmetrize T \"\"\"",
"smu"... | 31.243902 | 0.000757 |
def download(url: str, filename: str,
skip_cert_verify: bool = True) -> None:
"""
Downloads a URL to a file.
Args:
url: URL to download from
filename: file to save to
skip_cert_verify: skip SSL certificate check?
"""
log.info("Downloading from {} to {}", url, filename)
# urllib.request.urlretrieve(url, filename)
# ... sometimes fails (e.g. downloading
# https://www.openssl.org/source/openssl-1.1.0g.tar.gz under Windows) with:
# ssl.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:777) # noqa
# ... due to this certificate root problem (probably because OpenSSL
# [used by Python] doesn't play entirely by the same rules as others?):
# https://stackoverflow.com/questions/27804710
# So:
ctx = ssl.create_default_context() # type: ssl.SSLContext
if skip_cert_verify:
log.debug("Skipping SSL certificate check for " + url)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with urllib.request.urlopen(url, context=ctx) as u, open(filename,
'wb') as f: # noqa
f.write(u.read()) | [
"def",
"download",
"(",
"url",
":",
"str",
",",
"filename",
":",
"str",
",",
"skip_cert_verify",
":",
"bool",
"=",
"True",
")",
"->",
"None",
":",
"log",
".",
"info",
"(",
"\"Downloading from {} to {}\"",
",",
"url",
",",
"filename",
")",
"# urllib.request... | 41.068966 | 0.00082 |
def search(query, query_type=DEFAULT_QUERY_TYPE):
"""Search database using parsed query.
Executes a database search query from the given ``query``
(a ``Query`` object) and optionally accepts a list of search weights.
By default, the search results are ordered by weight.
:param query: containing terms, filters, and sorts.
:type query: Query
:returns: a sequence of records that match the query conditions
:rtype: QueryResults (which is a sequence of QueryRecord objects)
"""
# Build the SQL statement.
statement, arguments = _build_search(query)
# Execute the SQL.
if statement is None and arguments is None:
return QueryResults([], [], 'AND')
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
cursor.execute(statement, arguments)
search_results = cursor.fetchall()
# Wrap the SQL results.
return QueryResults(search_results, query, query_type) | [
"def",
"search",
"(",
"query",
",",
"query_type",
"=",
"DEFAULT_QUERY_TYPE",
")",
":",
"# Build the SQL statement.",
"statement",
",",
"arguments",
"=",
"_build_search",
"(",
"query",
")",
"# Execute the SQL.",
"if",
"statement",
"is",
"None",
"and",
"arguments",
... | 38.16 | 0.001022 |
def parse_ndxlist(output):
"""Parse output from make_ndx to build list of index groups::
groups = parse_ndxlist(output)
output should be the standard output from ``make_ndx``, e.g.::
rc,output,junk = gromacs.make_ndx(..., input=('', 'q'), stdout=False, stderr=True)
(or simply use
rc,output,junk = cbook.make_ndx_captured(...)
which presets input, stdout and stderr; of course input can be overriden.)
:Returns:
The function returns a list of dicts (``groups``) with fields
name
name of the groups
nr
number of the group (starts at 0)
natoms
number of atoms in the group
"""
m = NDXLIST.search(output) # make sure we pick up a proper full list
grouplist = m.group('LIST')
return parse_groups(grouplist) | [
"def",
"parse_ndxlist",
"(",
"output",
")",
":",
"m",
"=",
"NDXLIST",
".",
"search",
"(",
"output",
")",
"# make sure we pick up a proper full list",
"grouplist",
"=",
"m",
".",
"group",
"(",
"'LIST'",
")",
"return",
"parse_groups",
"(",
"grouplist",
")"
] | 26.766667 | 0.002404 |
def deparagraph(element, doc):
"""Panflute filter function that converts content wrapped in a Para to
Plain.
Use this filter with pandoc as::
pandoc [..] --filter=lsstprojectmeta-deparagraph
Only lone paragraphs are affected. Para elements with siblings (like a
second Para) are left unaffected.
This filter is useful for processing strings like titles or author names so
that the output isn't wrapped in paragraph tags. For example, without
this filter, pandoc converts a string ``"The title"`` to
``<p>The title</p>`` in HTML. These ``<p>`` tags aren't useful if you
intend to put the title text in ``<h1>`` tags using your own templating
system.
"""
if isinstance(element, Para):
# Check if siblings exist; don't process the paragraph in that case.
if element.next is not None:
return element
elif element.prev is not None:
return element
# Remove the Para wrapper from the lone paragraph.
# `Plain` is a container that isn't rendered as a paragraph.
return Plain(*element.content) | [
"def",
"deparagraph",
"(",
"element",
",",
"doc",
")",
":",
"if",
"isinstance",
"(",
"element",
",",
"Para",
")",
":",
"# Check if siblings exist; don't process the paragraph in that case.",
"if",
"element",
".",
"next",
"is",
"not",
"None",
":",
"return",
"elemen... | 39.071429 | 0.000892 |
def _address_rxp(self, addr):
""" Create a regex string for addresses, that matches several representations:
- with(out) '0x' prefix
- `pex` version
This function takes care of maintaining additional lookup keys for substring matches.
In case the given string is no address, it returns the original string.
"""
try:
addr = to_checksum_address(addr)
rxp = '(?:0x)?' + pex(address_checksum_and_decode(addr)) + f'(?:{addr.lower()[10:]})?'
self._extra_keys[pex(address_checksum_and_decode(addr))] = addr.lower()
self._extra_keys[addr[2:].lower()] = addr.lower()
except ValueError:
rxp = addr
return rxp | [
"def",
"_address_rxp",
"(",
"self",
",",
"addr",
")",
":",
"try",
":",
"addr",
"=",
"to_checksum_address",
"(",
"addr",
")",
"rxp",
"=",
"'(?:0x)?'",
"+",
"pex",
"(",
"address_checksum_and_decode",
"(",
"addr",
")",
")",
"+",
"f'(?:{addr.lower()[10:]})?'",
"... | 48.8 | 0.009383 |
def version(self, pretty=False, best=False):
"""
Return the version of the OS distribution, as a string.
For details, see :func:`distro.version`.
"""
versions = [
self.os_release_attr('version_id'),
self.lsb_release_attr('release'),
self.distro_release_attr('version_id'),
self._parse_distro_release_content(
self.os_release_attr('pretty_name')).get('version_id', ''),
self._parse_distro_release_content(
self.lsb_release_attr('description')).get('version_id', ''),
self.uname_attr('release')
]
version = ''
if best:
# This algorithm uses the last version in priority order that has
# the best precision. If the versions are not in conflict, that
# does not matter; otherwise, using the last one instead of the
# first one might be considered a surprise.
for v in versions:
if v.count(".") > version.count(".") or version == '':
version = v
else:
for v in versions:
if v != '':
version = v
break
if pretty and version and self.codename():
version = u'{0} ({1})'.format(version, self.codename())
return version | [
"def",
"version",
"(",
"self",
",",
"pretty",
"=",
"False",
",",
"best",
"=",
"False",
")",
":",
"versions",
"=",
"[",
"self",
".",
"os_release_attr",
"(",
"'version_id'",
")",
",",
"self",
".",
"lsb_release_attr",
"(",
"'release'",
")",
",",
"self",
"... | 40.787879 | 0.001451 |
def get_archives_to_prune(archives, hook_data):
"""Return list of keys to delete."""
files_to_skip = []
for i in ['current_archive_filename', 'old_archive_filename']:
if hook_data.get(i):
files_to_skip.append(hook_data[i])
archives.sort(key=itemgetter('LastModified'),
reverse=False) # sort from oldest to newest
# Drop all but last 15 files
return [i['Key'] for i in archives[:-15] if i['Key'] not in files_to_skip] | [
"def",
"get_archives_to_prune",
"(",
"archives",
",",
"hook_data",
")",
":",
"files_to_skip",
"=",
"[",
"]",
"for",
"i",
"in",
"[",
"'current_archive_filename'",
",",
"'old_archive_filename'",
"]",
":",
"if",
"hook_data",
".",
"get",
"(",
"i",
")",
":",
"fil... | 47 | 0.002088 |
def apply_host_template(resource_root, name, cluster_name, host_ids, start_roles):
"""
Apply a host template identified by name on the specified hosts and
optionally start them.
@param resource_root: The root Resource object.
@param name: Host template name.
@param cluster_name: Cluster name.
@param host_ids: List of host ids.
@param start_roles: Whether to start the created roles or not.
@return: An ApiCommand object.
@since: API v3
"""
host_refs = []
for host_id in host_ids:
host_refs.append(ApiHostRef(resource_root, host_id))
params = {"startRoles" : start_roles}
return call(resource_root.post,
APPLY_HOST_TEMPLATE_PATH % (cluster_name, name),
ApiCommand, data=host_refs, params=params, api_version=3) | [
"def",
"apply_host_template",
"(",
"resource_root",
",",
"name",
",",
"cluster_name",
",",
"host_ids",
",",
"start_roles",
")",
":",
"host_refs",
"=",
"[",
"]",
"for",
"host_id",
"in",
"host_ids",
":",
"host_refs",
".",
"append",
"(",
"ApiHostRef",
"(",
"res... | 36.95 | 0.013193 |
def project_events(self, initial_state, domain_events):
"""
Evolves initial state using the sequence of domain events and a mutator function.
"""
return reduce(self._mutator_func or self.mutate, domain_events, initial_state) | [
"def",
"project_events",
"(",
"self",
",",
"initial_state",
",",
"domain_events",
")",
":",
"return",
"reduce",
"(",
"self",
".",
"_mutator_func",
"or",
"self",
".",
"mutate",
",",
"domain_events",
",",
"initial_state",
")"
] | 50.4 | 0.015625 |
def getProgressPercentage(self):
"""Returns the progress percentage of this worksheet
"""
state = api.get_workflow_status_of(self)
if state == "verified":
return 100
steps = 0
query = dict(getWorksheetUID=api.get_uid(self))
analyses = api.search(query, CATALOG_ANALYSIS_LISTING)
max_steps = len(analyses) * 2
for analysis in analyses:
an_state = analysis.review_state
if an_state in ["rejected", "retracted", "cancelled"]:
steps += 2
elif an_state in ["verified", "published"]:
steps += 2
elif an_state == "to_be_verified":
steps += 1
if steps == 0:
return 0
if steps > max_steps:
return 100
return (steps * 100)/max_steps | [
"def",
"getProgressPercentage",
"(",
"self",
")",
":",
"state",
"=",
"api",
".",
"get_workflow_status_of",
"(",
"self",
")",
"if",
"state",
"==",
"\"verified\"",
":",
"return",
"100",
"steps",
"=",
"0",
"query",
"=",
"dict",
"(",
"getWorksheetUID",
"=",
"a... | 34.5 | 0.00235 |
def exit(self):
'''Close connection.'''
self.output.write("Happy hacking!")
self.output.nextLine()
self.output.loseConnection() | [
"def",
"exit",
"(",
"self",
")",
":",
"self",
".",
"output",
".",
"write",
"(",
"\"Happy hacking!\"",
")",
"self",
".",
"output",
".",
"nextLine",
"(",
")",
"self",
".",
"output",
".",
"loseConnection",
"(",
")"
] | 31 | 0.012579 |
def _cache(self, func, func_memory_level=1, **kwargs):
""" Return a joblib.Memory object.
The memory_level determines the level above which the wrapped
function output is cached. By specifying a numeric value for
this level, the user can to control the amount of cache memory
used. This function will cache the function call or not
depending on the cache level.
Parameters
----------
func: function
The function the output of which is to be cached.
memory_level: int
The memory_level from which caching must be enabled for the wrapped
function.
Returns
-------
mem: joblib.Memory
object that wraps the function func. This object may be
a no-op, if the requested level is lower than the value given
to _cache()). For consistency, a joblib.Memory object is always
returned.
"""
verbose = getattr(self, 'verbose', 0)
# Creates attributes if they don't exist
# This is to make creating them in __init__() optional.
if not hasattr(self, "memory_level"):
self.memory_level = 0
if not hasattr(self, "memory"):
self.memory = Memory(cachedir=None, verbose=verbose)
if isinstance(self.memory, _basestring):
self.memory = Memory(cachedir=self.memory, verbose=verbose)
# If cache level is 0 but a memory object has been provided, set
# memory_level to 1 with a warning.
if self.memory_level == 0:
if (isinstance(self.memory, _basestring)
or self.memory.cachedir is not None):
warnings.warn("memory_level is currently set to 0 but "
"a Memory object has been provided. "
"Setting memory_level to 1.")
self.memory_level = 1
return cache(func, self.memory, func_memory_level=func_memory_level,
memory_level=self.memory_level, **kwargs) | [
"def",
"_cache",
"(",
"self",
",",
"func",
",",
"func_memory_level",
"=",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"verbose",
"=",
"getattr",
"(",
"self",
",",
"'verbose'",
",",
"0",
")",
"# Creates attributes if they don't exist",
"# This is to make creating the... | 40.72 | 0.000959 |
def get_features_from_equation_file(filename):
"""
returns list of feature names read from equation file given
by ``filename``.
format: one feature per line; comments start with ``#``
Example::
#this is a comment
basefeature
#empty lines are ignored
myfeature
anotherfeature
:param filename:
:return:
"""
features = []
for line in open(filename):
line = line.split('#')[0].strip()
if line:
features.append(line)
return features | [
"def",
"get_features_from_equation_file",
"(",
"filename",
")",
":",
"features",
"=",
"[",
"]",
"for",
"line",
"in",
"open",
"(",
"filename",
")",
":",
"line",
"=",
"line",
".",
"split",
"(",
"'#'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"if",
... | 21.666667 | 0.001842 |
def convert_sklearn_metric_function(scoring):
"""If ``scoring`` is a sklearn metric function, convert it to a
sklearn scorer and return it. Otherwise, return ``scoring`` unchanged."""
if callable(scoring):
module = getattr(scoring, '__module__', None)
if (
hasattr(module, 'startswith') and
module.startswith('sklearn.metrics.') and
not module.startswith('sklearn.metrics.scorer') and
not module.startswith('sklearn.metrics.tests.')
):
return make_scorer(scoring)
return scoring | [
"def",
"convert_sklearn_metric_function",
"(",
"scoring",
")",
":",
"if",
"callable",
"(",
"scoring",
")",
":",
"module",
"=",
"getattr",
"(",
"scoring",
",",
"'__module__'",
",",
"None",
")",
"if",
"(",
"hasattr",
"(",
"module",
",",
"'startswith'",
")",
... | 44.769231 | 0.001684 |
def import_pipeline(url, pipeline_id, auth, json_payload, verify_ssl, overwrite = False):
"""Import a pipeline.
This will completely overwrite the existing pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json payload as a dictionary.
overwrite (bool): overwrite existing pipeline
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
parameters = { 'overwrite' : overwrite }
import_result = requests.post(url + '/' + pipeline_id + '/import', params=parameters,
headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=json_payload)
if import_result.status_code != 200:
logging.error('Import error response: ' + import_result.text)
import_result.raise_for_status()
logging.info('Pipeline import successful.')
return import_result.json() | [
"def",
"import_pipeline",
"(",
"url",
",",
"pipeline_id",
",",
"auth",
",",
"json_payload",
",",
"verify_ssl",
",",
"overwrite",
"=",
"False",
")",
":",
"parameters",
"=",
"{",
"'overwrite'",
":",
"overwrite",
"}",
"import_result",
"=",
"requests",
".",
"pos... | 39.814815 | 0.008174 |
def select_record(self, table, where=None, values=None, orderby=None, limit=None, columns=None):
''' Support these keywords where, values, orderby, limit and columns'''
query = self.schema.query_builder.build_select(table, where, orderby, limit, columns)
return table.to_table(self.execute(query, values), columns=columns) | [
"def",
"select_record",
"(",
"self",
",",
"table",
",",
"where",
"=",
"None",
",",
"values",
"=",
"None",
",",
"orderby",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"columns",
"=",
"None",
")",
":",
"query",
"=",
"self",
".",
"schema",
".",
"quer... | 85.75 | 0.011561 |
def zadd(self, key, score, member, mode, client=None):
"""
Like ZADD, but supports different score update modes, in case the
member already exists in the ZSET:
- "nx": Don't update the score
- "xx": Only update elements that already exist. Never add elements.
- "min": Use the smaller of the given and existing score
- "max": Use the larger of the given and existing score
"""
if mode == 'nx':
f = self._zadd_noupdate
elif mode == 'xx':
f = self._zadd_update_existing
elif mode == 'min':
f = self._zadd_update_min
elif mode == 'max':
f = self._zadd_update_max
else:
raise NotImplementedError('mode "%s" unsupported' % mode)
return f(keys=[key], args=[score, member], client=client) | [
"def",
"zadd",
"(",
"self",
",",
"key",
",",
"score",
",",
"member",
",",
"mode",
",",
"client",
"=",
"None",
")",
":",
"if",
"mode",
"==",
"'nx'",
":",
"f",
"=",
"self",
".",
"_zadd_noupdate",
"elif",
"mode",
"==",
"'xx'",
":",
"f",
"=",
"self",... | 41.7 | 0.002345 |
def grep_iter(target, pattern, **kwargs):
"""
Main grep function, as a memory efficient iterator.
Note: this function does not support the 'quiet' or 'count' flags.
:param target: Target to apply grep on. Can be a single string, an iterable, a function, or an opened file handler.
:param pattern: Grep pattern to search.
:param kwargs: See grep() help for more info.
:return: Next match.
"""
# unify flags (convert shortcuts to full name)
__fix_args(kwargs)
# parse the params that are relevant to this function
f_offset = kwargs.get('byte_offset')
f_line_number = kwargs.get('line_number')
f_trim = kwargs.get('trim')
f_after_context = kwargs.get('after_context')
f_before_context = kwargs.get('before_context')
f_only_matching = kwargs.get('only_matching')
# if target is a callable function, call it first to get value
if callable(target):
target = target()
# if we got a single string convert it to a list
if isinstance(target, _basestring):
target = [target]
# calculate if need to trim end of lines
need_to_trim_eol = not kwargs.get('keep_eol') and hasattr(target, 'readline')
# list of previous lines, used only when f_before_context is set
prev_lines = []
# iterate target and grep
for line_index, line in enumerate(target):
# fix current line
line = __process_line(line, need_to_trim_eol, f_trim)
# do grap
match, offset, endpos = __do_grep(line, pattern, **kwargs)
# nullify return value
value = None
# if matched
if match:
# the textual part we return in response
ret_str = line
# if only return matching
if f_only_matching:
ret_str = ret_str[offset:endpos]
# if 'before_context' is set
if f_before_context:
# make ret_str be a list with previous lines
ret_str = prev_lines + [ret_str]
# if need to return X lines after trailing context
if f_after_context:
# convert return string to list (unless f_before_context is set, in which case its already a list)
if not f_before_context:
ret_str = [ret_str]
# iterate X lines to read after
for i in range(f_after_context):
# if target got next or readline, use next()
# note: unfortunately due to python files next() implementation we can't use tell and seek to
# restore position and not skip next matches.
if hasattr(target, '__next__') or hasattr(target, 'readline'):
try:
val = next(target)
except StopIteration:
break
# if not, try to access next item based on index (for lists)
else:
try:
val = target[line_index+i+1]
except IndexError:
break
# add value to return string
ret_str.append(__process_line(val, need_to_trim_eol, f_trim))
# if requested offset, add offset + line to return list
if f_offset:
value = (offset, ret_str)
# if requested line number, add offset + line to return list
elif f_line_number:
value = (line_index, ret_str)
# default: add line to return list
else:
value = ret_str
# maintain a list of previous lines, if the before-context option is provided
if f_before_context:
prev_lines.append(line)
if len(prev_lines) > f_before_context:
prev_lines.pop(0)
# if we had a match return current value
if value is not None:
yield value
# done iteration
raise StopIteration | [
"def",
"grep_iter",
"(",
"target",
",",
"pattern",
",",
"*",
"*",
"kwargs",
")",
":",
"# unify flags (convert shortcuts to full name)",
"__fix_args",
"(",
"kwargs",
")",
"# parse the params that are relevant to this function",
"f_offset",
"=",
"kwargs",
".",
"get",
"(",... | 34.4 | 0.002211 |
def add_user(self, user_obj):
"""Add a user object to the database
Args:
user_obj(scout.models.User): A dictionary with user information
Returns:
user_info(dict): a copy of what was inserted
"""
LOG.info("Adding user %s to the database", user_obj['email'])
if not '_id' in user_obj:
user_obj['_id'] = user_obj['email']
try:
self.user_collection.insert_one(user_obj)
LOG.debug("User inserted")
except DuplicateKeyError as err:
raise IntegrityError("User {} already exists in database".format(user_obj['email']))
return user_obj | [
"def",
"add_user",
"(",
"self",
",",
"user_obj",
")",
":",
"LOG",
".",
"info",
"(",
"\"Adding user %s to the database\"",
",",
"user_obj",
"[",
"'email'",
"]",
")",
"if",
"not",
"'_id'",
"in",
"user_obj",
":",
"user_obj",
"[",
"'_id'",
"]",
"=",
"user_obj"... | 34.15 | 0.008547 |
def do_windowed(self, line):
"""
Un-fullscreen the current window
"""
self.bot.canvas.sink.trigger_fullscreen_action(False)
print(self.response_prompt, file=self.stdout) | [
"def",
"do_windowed",
"(",
"self",
",",
"line",
")",
":",
"self",
".",
"bot",
".",
"canvas",
".",
"sink",
".",
"trigger_fullscreen_action",
"(",
"False",
")",
"print",
"(",
"self",
".",
"response_prompt",
",",
"file",
"=",
"self",
".",
"stdout",
")"
] | 34 | 0.009569 |
def get_model(token_num,
embed_dim,
encoder_num,
decoder_num,
head_num,
hidden_dim,
attention_activation=None,
feed_forward_activation='relu',
dropout_rate=0.0,
use_same_embed=True,
embed_weights=None,
embed_trainable=None,
trainable=True):
"""Get full model without compilation.
:param token_num: Number of distinct tokens.
:param embed_dim: Dimension of token embedding.
:param encoder_num: Number of encoder components.
:param decoder_num: Number of decoder components.
:param head_num: Number of heads in multi-head self-attention.
:param hidden_dim: Hidden dimension of feed forward layer.
:param attention_activation: Activation for multi-head self-attention.
:param feed_forward_activation: Activation for feed-forward layer.
:param dropout_rate: Dropout rate.
:param use_same_embed: Whether to use the same token embedding layer. `token_num`, `embed_weights` and
`embed_trainable` should be lists of two elements if it is False.
:param embed_weights: Initial weights of token embedding.
:param embed_trainable: Whether the token embedding is trainable. It will automatically set to False if the given
value is None when embedding weights has been provided.
:param trainable: Whether the layers are trainable.
:return: Keras model.
"""
if not isinstance(token_num, list):
token_num = [token_num, token_num]
encoder_token_num, decoder_token_num = token_num
if not isinstance(embed_weights, list):
embed_weights = [embed_weights, embed_weights]
encoder_embed_weights, decoder_embed_weights = embed_weights
if encoder_embed_weights is not None:
encoder_embed_weights = [encoder_embed_weights]
if decoder_embed_weights is not None:
decoder_embed_weights = [decoder_embed_weights]
if not isinstance(embed_trainable, list):
embed_trainable = [embed_trainable, embed_trainable]
encoder_embed_trainable, decoder_embed_trainable = embed_trainable
if encoder_embed_trainable is None:
encoder_embed_trainable = encoder_embed_weights is None
if decoder_embed_trainable is None:
decoder_embed_trainable = decoder_embed_weights is None
if use_same_embed:
encoder_embed_layer = decoder_embed_layer = EmbeddingRet(
input_dim=encoder_token_num,
output_dim=embed_dim,
mask_zero=True,
weights=encoder_embed_weights,
trainable=encoder_embed_trainable,
name='Token-Embedding',
)
else:
encoder_embed_layer = EmbeddingRet(
input_dim=encoder_token_num,
output_dim=embed_dim,
mask_zero=True,
weights=encoder_embed_weights,
trainable=encoder_embed_trainable,
name='Encoder-Token-Embedding',
)
decoder_embed_layer = EmbeddingRet(
input_dim=decoder_token_num,
output_dim=embed_dim,
mask_zero=True,
weights=decoder_embed_weights,
trainable=decoder_embed_trainable,
name='Decoder-Token-Embedding',
)
encoder_input = keras.layers.Input(shape=(None,), name='Encoder-Input')
encoder_embed = TrigPosEmbedding(
mode=TrigPosEmbedding.MODE_ADD,
name='Encoder-Embedding',
)(encoder_embed_layer(encoder_input)[0])
encoded_layer = get_encoders(
encoder_num=encoder_num,
input_layer=encoder_embed,
head_num=head_num,
hidden_dim=hidden_dim,
attention_activation=attention_activation,
feed_forward_activation=feed_forward_activation,
dropout_rate=dropout_rate,
trainable=trainable,
)
decoder_input = keras.layers.Input(shape=(None,), name='Decoder-Input')
decoder_embed, decoder_embed_weights = decoder_embed_layer(decoder_input)
decoder_embed = TrigPosEmbedding(
mode=TrigPosEmbedding.MODE_ADD,
name='Decoder-Embedding',
)(decoder_embed)
decoded_layer = get_decoders(
decoder_num=decoder_num,
input_layer=decoder_embed,
encoded_layer=encoded_layer,
head_num=head_num,
hidden_dim=hidden_dim,
attention_activation=attention_activation,
feed_forward_activation=feed_forward_activation,
dropout_rate=dropout_rate,
trainable=trainable,
)
dense_layer = EmbeddingSim(
trainable=trainable,
name='Output',
)([decoded_layer, decoder_embed_weights])
return keras.models.Model(inputs=[encoder_input, decoder_input], outputs=dense_layer) | [
"def",
"get_model",
"(",
"token_num",
",",
"embed_dim",
",",
"encoder_num",
",",
"decoder_num",
",",
"head_num",
",",
"hidden_dim",
",",
"attention_activation",
"=",
"None",
",",
"feed_forward_activation",
"=",
"'relu'",
",",
"dropout_rate",
"=",
"0.0",
",",
"us... | 40.643478 | 0.001253 |
def mount(directory, lower_dir, upper_dir, mount_table=None):
"""Creates a mount"""
return OverlayFS.mount(directory, lower_dir, upper_dir,
mount_table=mount_table) | [
"def",
"mount",
"(",
"directory",
",",
"lower_dir",
",",
"upper_dir",
",",
"mount_table",
"=",
"None",
")",
":",
"return",
"OverlayFS",
".",
"mount",
"(",
"directory",
",",
"lower_dir",
",",
"upper_dir",
",",
"mount_table",
"=",
"mount_table",
")"
] | 45.25 | 0.01087 |
def call_for_each_tower(self, tower_fn):
"""
Call the function `tower_fn` under :class:`TowerContext` for each tower.
Returns:
a list, contains the return values of `tower_fn` on each tower.
"""
ps_device = 'cpu' if len(self.towers) >= 4 else 'gpu'
raw_devices = ['/gpu:{}'.format(k) for k in self.towers]
if ps_device == 'gpu':
devices = [LeastLoadedDeviceSetter(d, raw_devices) for d in raw_devices]
else:
devices = [tf.train.replica_device_setter(
worker_device=d, ps_device='/cpu:0', ps_tasks=1) for d in raw_devices]
return DataParallelBuilder.build_on_towers(self.towers, tower_fn, devices) | [
"def",
"call_for_each_tower",
"(",
"self",
",",
"tower_fn",
")",
":",
"ps_device",
"=",
"'cpu'",
"if",
"len",
"(",
"self",
".",
"towers",
")",
">=",
"4",
"else",
"'gpu'",
"raw_devices",
"=",
"[",
"'/gpu:{}'",
".",
"format",
"(",
"k",
")",
"for",
"k",
... | 41.588235 | 0.008299 |
def Bond(rhol, rhog, sigma, L):
r'''Calculates Bond number, `Bo` also known as Eotvos number,
for a fluid with the given liquid and gas densities, surface tension,
and geometric parameter (usually length).
.. math::
Bo = \frac{g(\rho_l-\rho_g)L^2}{\sigma}
Parameters
----------
rhol : float
Density of liquid, [kg/m^3]
rhog : float
Density of gas, [kg/m^3]
sigma : float
Surface tension, [N/m]
L : float
Characteristic length, [m]
Returns
-------
Bo : float
Bond number []
Examples
--------
>>> Bond(1000., 1.2, .0589, 2)
665187.2339558573
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
'''
return (g*(rhol-rhog)*L**2/sigma) | [
"def",
"Bond",
"(",
"rhol",
",",
"rhog",
",",
"sigma",
",",
"L",
")",
":",
"return",
"(",
"g",
"*",
"(",
"rhol",
"-",
"rhog",
")",
"*",
"L",
"**",
"2",
"/",
"sigma",
")"
] | 23.885714 | 0.001149 |
def extract_metric_name(self, metric_name):
"""
Method to extract SAR metric names from the section given in the config. The SARMetric class assumes that
the section name will contain the SAR types listed in self.supported_sar_types tuple
:param str metric_name: Section name from the config
:return: str which identifies what kind of SAR metric the section represents
"""
for metric_type in self.supported_sar_types:
if metric_type in metric_name:
return metric_type
logger.error('Section [%s] does not contain a valid metric type, using type: "SAR-generic". Naarad works better '
'if it knows the metric type. Valid SAR metric names are: %s', metric_name, self.supported_sar_types)
return 'SAR-generic' | [
"def",
"extract_metric_name",
"(",
"self",
",",
"metric_name",
")",
":",
"for",
"metric_type",
"in",
"self",
".",
"supported_sar_types",
":",
"if",
"metric_type",
"in",
"metric_name",
":",
"return",
"metric_type",
"logger",
".",
"error",
"(",
"'Section [%s] does n... | 54.214286 | 0.009067 |
def nameop_set_collided( cls, nameop, history_id_key, history_id ):
"""
Mark a nameop as collided
"""
nameop['__collided__'] = True
nameop['__collided_history_id_key__'] = history_id_key
nameop['__collided_history_id__'] = history_id | [
"def",
"nameop_set_collided",
"(",
"cls",
",",
"nameop",
",",
"history_id_key",
",",
"history_id",
")",
":",
"nameop",
"[",
"'__collided__'",
"]",
"=",
"True",
"nameop",
"[",
"'__collided_history_id_key__'",
"]",
"=",
"history_id_key",
"nameop",
"[",
"'__collided_... | 39.428571 | 0.01773 |
def input_text_with_keyboard_emulation(self, text):
"""
Works around the problem of emulating user interactions with text inputs.
Emulates a key-down action on the first char of the input. This way, implementations which
require key-down event to trigger auto-suggest are testable.
Then the chains sends the rest of the text and releases the key.
"""
ActionChains(self.driver).key_down(text).key_up(Keys.CONTROL).perform() | [
"def",
"input_text_with_keyboard_emulation",
"(",
"self",
",",
"text",
")",
":",
"ActionChains",
"(",
"self",
".",
"driver",
")",
".",
"key_down",
"(",
"text",
")",
".",
"key_up",
"(",
"Keys",
".",
"CONTROL",
")",
".",
"perform",
"(",
")"
] | 60.875 | 0.008097 |
def launch_command(command, parameter=''):
'''Can launch a cozy-monitor command
:param command: The cozy-monitor command to launch
:param parameter: The parameter to push on cozy-monitor if needed
:returns: the command string
'''
result = ''
# Transform into an array if it not one
if not isinstance(parameter, list):
parameter = [parameter]
# Iterate on all parameter with action & put them in result string
for name in parameter:
result += subprocess.Popen('cozy-monitor {} {}'.format(command, name),
shell=True,
stdout=subprocess.PIPE).stdout.read()
return result | [
"def",
"launch_command",
"(",
"command",
",",
"parameter",
"=",
"''",
")",
":",
"result",
"=",
"''",
"# Transform into an array if it not one",
"if",
"not",
"isinstance",
"(",
"parameter",
",",
"list",
")",
":",
"parameter",
"=",
"[",
"parameter",
"]",
"# Iter... | 35.842105 | 0.001431 |
def record(self):
# type: () -> bytes
'''
A method to generate the string representing this UDF NSR Volume
Structure.
Parameters:
None.
Returns:
A string representing this UDF BEA Volume Strucutre.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF NSR Volume Structure not initialized')
return struct.pack(self.FMT, 0, self.standard_ident, 1, b'\x00' * 2041) | [
"def",
"record",
"(",
"self",
")",
":",
"# type: () -> bytes",
"if",
"not",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'UDF NSR Volume Structure not initialized'",
")",
"return",
"struct",
".",
"pack",
"(",
"sel... | 34.071429 | 0.008163 |
def get_free_memory():
"""Return current free memory on the machine.
Currently supported for Windows, Linux, MacOS.
:returns: Free memory in MB unit
:rtype: int
"""
if 'win32' in sys.platform:
# windows
return get_free_memory_win()
elif 'linux' in sys.platform:
# linux
return get_free_memory_linux()
elif 'darwin' in sys.platform:
# mac
return get_free_memory_osx() | [
"def",
"get_free_memory",
"(",
")",
":",
"if",
"'win32'",
"in",
"sys",
".",
"platform",
":",
"# windows",
"return",
"get_free_memory_win",
"(",
")",
"elif",
"'linux'",
"in",
"sys",
".",
"platform",
":",
"# linux",
"return",
"get_free_memory_linux",
"(",
")",
... | 25.411765 | 0.002232 |
def url_name_for_action(self, action):
"""
Returns the reverse name for this action
"""
return "%s.%s_%s" % (self.module_name.lower(), self.model_name.lower(), action) | [
"def",
"url_name_for_action",
"(",
"self",
",",
"action",
")",
":",
"return",
"\"%s.%s_%s\"",
"%",
"(",
"self",
".",
"module_name",
".",
"lower",
"(",
")",
",",
"self",
".",
"model_name",
".",
"lower",
"(",
")",
",",
"action",
")"
] | 39 | 0.015075 |
def main():
""" command line script """
# boilerplate
print("Ontospy " + ontospy.VERSION)
ontospy.get_or_create_home_repo()
ONTOSPY_LOCAL_MODELS = ontospy.get_home_location()
opts, args = parse_options()
sTime = time.time()
# switch dir and start server
startServer(port=DEFAULT_PORT, location=ONTOSPY_LOCAL_MODELS)
# finally:
# print some stats....
eTime = time.time()
tTime = eTime - sTime
printDebug("-" * 10)
printDebug("Time: %0.2fs" % tTime) | [
"def",
"main",
"(",
")",
":",
"# boilerplate",
"print",
"(",
"\"Ontospy \"",
"+",
"ontospy",
".",
"VERSION",
")",
"ontospy",
".",
"get_or_create_home_repo",
"(",
")",
"ONTOSPY_LOCAL_MODELS",
"=",
"ontospy",
".",
"get_home_location",
"(",
")",
"opts",
",",
"arg... | 23.736842 | 0.036247 |
def is_import(self):
"""Whether the stage file was created with `dvc import`."""
return not self.cmd and len(self.deps) == 1 and len(self.outs) == 1 | [
"def",
"is_import",
"(",
"self",
")",
":",
"return",
"not",
"self",
".",
"cmd",
"and",
"len",
"(",
"self",
".",
"deps",
")",
"==",
"1",
"and",
"len",
"(",
"self",
".",
"outs",
")",
"==",
"1"
] | 54 | 0.012195 |
def get_person_usage(person, project, start, end):
"""Return a tuple of cpu hours and number of jobs for a person in a
specific project
Keyword arguments:
person --
project -- The project the usage is from
start -- start date
end -- end date
"""
try:
cache = PersonCache.objects.get(
person=person, project=project, date=datetime.date.today(),
start=start, end=end)
return cache.cpu_time, cache.no_jobs
except PersonCache.DoesNotExist:
return 0, 0 | [
"def",
"get_person_usage",
"(",
"person",
",",
"project",
",",
"start",
",",
"end",
")",
":",
"try",
":",
"cache",
"=",
"PersonCache",
".",
"objects",
".",
"get",
"(",
"person",
"=",
"person",
",",
"project",
"=",
"project",
",",
"date",
"=",
"datetime... | 30.588235 | 0.001866 |
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, (str, unicode)):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None") | [
"def",
"_normalize_module",
"(",
"module",
",",
"depth",
"=",
"2",
")",
":",
"if",
"inspect",
".",
"ismodule",
"(",
"module",
")",
":",
"return",
"module",
"elif",
"isinstance",
"(",
"module",
",",
"(",
"str",
",",
"unicode",
")",
")",
":",
"return",
... | 41.888889 | 0.001297 |
def execute_builtin_action(self, p_action_str, p_size=None):
"""
Executes built-in action specified in p_action_str.
Currently supported actions are: 'up', 'down', 'home', 'end',
'first_column', 'last_column', 'prev_column', 'next_column',
'append_column', 'insert_column', 'edit_column', 'delete_column',
'copy_column', swap_right', 'swap_left', 'postpone', 'postpone_s',
'pri', 'mark', 'mark_all, 'reset' and 'repeat'.
"""
column_actions = ['first_column',
'last_column',
'prev_column',
'next_column',
'append_column',
'insert_column',
'edit_column',
'delete_column',
'copy_column',
'swap_left',
'swap_right',
'reset',
]
if p_action_str in column_actions:
urwid.emit_signal(self, 'column_action', p_action_str)
elif p_action_str in ['up', 'down']:
self.listbox.keypress(p_size, p_action_str)
elif p_action_str == 'home':
self._scroll_to_top(p_size)
elif p_action_str == 'end':
self._scroll_to_bottom(p_size)
elif p_action_str in ['postpone', 'postpone_s']:
pass
elif p_action_str == 'pri':
pass
elif p_action_str == 'mark':
self._toggle_marked_status()
elif p_action_str == 'mark_all':
self._mark_all()
elif p_action_str == 'repeat':
self._repeat_cmd() | [
"def",
"execute_builtin_action",
"(",
"self",
",",
"p_action_str",
",",
"p_size",
"=",
"None",
")",
":",
"column_actions",
"=",
"[",
"'first_column'",
",",
"'last_column'",
",",
"'prev_column'",
",",
"'next_column'",
",",
"'append_column'",
",",
"'insert_column'",
... | 39.97619 | 0.001163 |
def set_value(ctx, key, value):
"""Assigns values to config file entries. If the value is omitted,
you will be prompted, with the input hidden if it is sensitive.
\b
$ ddev config set github.user foo
New setting:
[github]
user = "foo"
"""
scrubbing = False
if value is None:
scrubbing = key in SECRET_KEYS
value = click.prompt('Value for `{}`'.format(key), hide_input=scrubbing)
if key in ('core', 'extras', 'agent') and not value.startswith('~'):
value = os.path.abspath(value)
user_config = new_config = ctx.obj
user_config.pop('repo_choice', None)
data = [value]
data.extend(reversed(key.split('.')))
key = data.pop()
value = data.pop()
# Use a separate mapping to show only what has changed in the end
branch_config_root = branch_config = {}
# Consider dots as keys
while data:
default_branch = {value: ''}
branch_config[key] = default_branch
branch_config = branch_config[key]
new_value = new_config.get(key)
if not hasattr(new_value, 'get'):
new_value = default_branch
new_config[key] = new_value
new_config = new_config[key]
key = value
value = data.pop()
value = string_to_toml_type(value)
branch_config[key] = new_config[key] = value
save_config(user_config)
output_config = scrub_secrets(branch_config_root) if scrubbing else branch_config_root
echo_success('New setting:')
echo_info(toml.dumps(output_config).rstrip()) | [
"def",
"set_value",
"(",
"ctx",
",",
"key",
",",
"value",
")",
":",
"scrubbing",
"=",
"False",
"if",
"value",
"is",
"None",
":",
"scrubbing",
"=",
"key",
"in",
"SECRET_KEYS",
"value",
"=",
"click",
".",
"prompt",
"(",
"'Value for `{}`'",
".",
"format",
... | 28.396226 | 0.001927 |
def _setup_metrics(self):
"""
Start metric exposition
"""
path = os.environ.get("prometheus_multiproc_dir")
if not os.path.exists(self.metrics_dir):
try:
log.info("Creating metrics directory")
os.makedirs(self.metrics_dir)
except OSError:
log.error("Failed to create metrics directory!")
raise ConfigurationException("Failed to create metrics directory!")
path = self.metrics_dir
elif path != self.metrics_dir:
path = self.metrics_dir
os.environ['prometheus_multiproc_dir'] = path
log.info("Cleaning metrics collection directory")
log.debug("Metrics directory set to: {}".format(path))
files = os.listdir(path)
for f in files:
if f.endswith(".db"):
os.remove(os.path.join(path, f))
log.debug("Starting metrics exposition")
if self.metrics_enabled:
registry = CollectorRegistry()
multiprocess.MultiProcessCollector(registry)
start_http_server(
port=self.metrics_port,
addr=self.metrics_address,
registry=registry
) | [
"def",
"_setup_metrics",
"(",
"self",
")",
":",
"path",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"prometheus_multiproc_dir\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"metrics_dir",
")",
":",
"try",
":",
"log",
".",
... | 39.580645 | 0.002387 |
def _update_secrets(self):
'''update secrets will take a secrets credential file
either located at .sregistry or the environment variable
SREGISTRY_CLIENT_SECRETS and update the current client
secrets as well as the associated API base. For the case of
using Docker Hub, if we find a .docker secrets file, we update
from there.
'''
# If the user has defined secrets, use them
credentials = self._get_setting('SREGISTRY_DOCKERHUB_SECRETS')
# First try for SINGULARITY exported, then try sregistry
username = self._get_setting('SINGULARITY_DOCKER_USERNAME')
password = self._get_setting('SINGULARITY_DOCKER_PASSWORD')
username = self._get_setting('SREGISTRY_DOCKERHUB_USERNAME', username)
password = self._get_setting('SREGISTRY_DOCKERHUB_PASSWORD', password)
# Option 1: the user exports username and password
auth = None
if username is not None and password is not None:
auth = basic_auth_header(username, password)
self.headers.update(auth)
# Option 2: look in .docker config file
if credentials is not None and auth is None:
if os.path.exists(credentials):
credentials = read_json(credentials)
# Find a matching auth in .docker config
if "auths" in credentials:
for auths, params in credentials['auths'].items():
if self._base in auths:
if 'auth' in params:
auth = "Basic %s" % params['auth']
self.headers['Authorization'] = auth
# Also update headers
if 'HttpHeaders' in credentials:
for key, value in credentials['HttpHeaders'].items():
self.headers[key] = value
else:
bot.warning('Credentials file set to %s, but does not exist.') | [
"def",
"_update_secrets",
"(",
"self",
")",
":",
"# If the user has defined secrets, use them",
"credentials",
"=",
"self",
".",
"_get_setting",
"(",
"'SREGISTRY_DOCKERHUB_SECRETS'",
")",
"# First try for SINGULARITY exported, then try sregistry",
"username",
"=",
"self",
".",
... | 44.422222 | 0.002447 |
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message) | [
"def",
"activate",
"(",
"self",
",",
"uid",
"=",
"None",
")",
":",
"# Check input",
"if",
"uid",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"uid",
",",
"six",
".",
"string_types",
")",
":",
"raise",
"TypeError",
"(",
"\"uid must be a strin... | 33.419355 | 0.001876 |
def parent():
"""Determine subshell matching the currently running shell
The shell is determined by either a pre-defined BE_SHELL
environment variable, or, if none is found, via psutil
which looks at the parent process directly through
system-level calls.
For example, is `be` is run from cmd.exe, then the full
path to cmd.exe is returned, and the same goes for bash.exe
and bash (without suffix) for Unix environments.
The point is to return an appropriate subshell for the
running shell, as opposed to the currently running OS.
"""
if self._parent:
return self._parent
if "BE_SHELL" in os.environ:
self._parent = os.environ["BE_SHELL"]
else:
# If a shell is not provided, rely on `psutil`
# to look at the calling process name.
try:
import psutil
except ImportError:
raise ImportError(
"No shell provided, see documentation for "
"BE_SHELL for more information.\n"
"https://github.com/mottosso/be/wiki"
"/environment#read-environment-variables")
parent = psutil.Process(os.getpid()).parent()
# `pip install` creates an additional executable
# that tricks the above mechanism to think of it
# as the parent shell. See #34 for more.
if parent.name() in ("be", "be.exe"):
parent = parent.parent()
self._parent = str(parent.exe())
return self._parent | [
"def",
"parent",
"(",
")",
":",
"if",
"self",
".",
"_parent",
":",
"return",
"self",
".",
"_parent",
"if",
"\"BE_SHELL\"",
"in",
"os",
".",
"environ",
":",
"self",
".",
"_parent",
"=",
"os",
".",
"environ",
"[",
"\"BE_SHELL\"",
"]",
"else",
":",
"# I... | 32.688889 | 0.00066 |
def cluster_DBSCAN(data, eps=None, min_samples=None,
n_clusters=None, maxiter=200, **kwargs):
"""
Identify clusters using DBSCAN algorithm.
Parameters
----------
data : array_like
array of size [n_samples, n_features].
eps : float
The minimum 'distance' points must be apart for them to be in the
same cluster. Defaults to 0.3. Note: If the data are normalised
(they should be for DBSCAN) this is in terms of total sample
variance. Normalised data have a mean of 0 and a variance of 1.
min_samples : int
The minimum number of samples within distance `eps` required
to be considered as an independent cluster.
n_clusters : int
The number of clusters expected. If specified, `eps` will be
incrementally reduced until the expected number of clusters is
found.
maxiter : int
The maximum number of iterations DBSCAN will run.
Returns
-------
dict
boolean array for each identified cluster and core samples.
"""
if n_clusters is None:
if eps is None:
eps = 0.3
db = cl.DBSCAN(eps=eps, min_samples=min_samples, **kwargs).fit(data)
else:
clusters = 0
eps_temp = 1 / .95
niter = 0
while clusters < n_clusters:
clusters_last = clusters
eps_temp *= 0.95
db = cl.DBSCAN(eps=eps_temp, min_samples=min_samples, **kwargs).fit(data)
clusters = (len(set(db.labels_)) -
(1 if -1 in db.labels_ else 0))
if clusters < clusters_last:
eps_temp *= 1 / 0.95
db = cl.DBSCAN(eps=eps_temp, min_samples=min_samples, **kwargs).fit(data)
clusters = (len(set(db.labels_)) -
(1 if -1 in db.labels_ else 0))
warnings.warn(('\n\n***Unable to find {:.0f} clusters in '
'data. Found {:.0f} with an eps of {:.2e}'
'').format(n_clusters, clusters, eps_temp))
break
niter += 1
if niter == maxiter:
warnings.warn(('\n\n***Maximum iterations ({:.0f}) reached'
', {:.0f} clusters not found.\nDeacrease '
'min_samples or n_clusters (or increase '
'maxiter).').format(maxiter, n_clusters))
break
labels = db.labels_
core_samples_mask = np.zeros_like(labels)
core_samples_mask[db.core_sample_indices_] = True
return labels, core_samples_mask | [
"def",
"cluster_DBSCAN",
"(",
"data",
",",
"eps",
"=",
"None",
",",
"min_samples",
"=",
"None",
",",
"n_clusters",
"=",
"None",
",",
"maxiter",
"=",
"200",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"n_clusters",
"is",
"None",
":",
"if",
"eps",
"is",
... | 39.333333 | 0.001879 |
def list_exports(exports='/etc/exports'):
'''
List configured exports
CLI Example:
.. code-block:: bash
salt '*' nfs.list_exports
'''
ret = {}
with salt.utils.files.fopen(exports, 'r') as efl:
for line in salt.utils.stringutils.to_unicode(efl.read()).splitlines():
if not line:
continue
if line.startswith('#'):
continue
comps = line.split()
# Handle the case where the same path is given twice
if not comps[0] in ret:
ret[comps[0]] = []
newshares = []
for perm in comps[1:]:
if perm.startswith('/'):
newshares.append(perm)
continue
permcomps = perm.split('(')
permcomps[1] = permcomps[1].replace(')', '')
hosts = permcomps[0]
if not isinstance(hosts, six.string_types):
# Lists, etc would silently mangle /etc/exports
raise TypeError('hosts argument must be a string')
options = permcomps[1].split(',')
ret[comps[0]].append({'hosts': hosts, 'options': options})
for share in newshares:
ret[share] = ret[comps[0]]
return ret | [
"def",
"list_exports",
"(",
"exports",
"=",
"'/etc/exports'",
")",
":",
"ret",
"=",
"{",
"}",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"exports",
",",
"'r'",
")",
"as",
"efl",
":",
"for",
"line",
"in",
"salt",
".",
"utils",
".... | 33.102564 | 0.000752 |
def load_orthologs(fo: IO, metadata: dict):
"""Load orthologs into ArangoDB
Args:
fo: file obj - orthologs file
metadata: dict containing the metadata for orthologs
"""
version = metadata["metadata"]["version"]
# LOAD ORTHOLOGS INTO ArangoDB
with timy.Timer("Load Orthologs") as timer:
arango_client = arangodb.get_client()
belns_db = arangodb.get_belns_handle(arango_client)
arangodb.batch_load_docs(
belns_db, orthologs_iterator(fo, version), on_duplicate="update"
)
log.info(
"Load orthologs",
elapsed=timer.elapsed,
source=metadata["metadata"]["source"],
)
# Clean up old entries
remove_old_ortholog_edges = f"""
FOR edge in ortholog_edges
FILTER edge.source == "{metadata["metadata"]["source"]}"
FILTER edge.version != "{version}"
REMOVE edge IN ortholog_edges
"""
remove_old_ortholog_nodes = f"""
FOR node in ortholog_nodes
FILTER node.source == "{metadata["metadata"]["source"]}"
FILTER node.version != "{version}"
REMOVE node IN ortholog_nodes
"""
arangodb.aql_query(belns_db, remove_old_ortholog_edges)
arangodb.aql_query(belns_db, remove_old_ortholog_nodes)
# Add metadata to resource metadata collection
metadata["_key"] = f"Orthologs_{metadata['metadata']['source']}"
try:
belns_db.collection(arangodb.belns_metadata_name).insert(metadata)
except ArangoError as ae:
belns_db.collection(arangodb.belns_metadata_name).replace(metadata) | [
"def",
"load_orthologs",
"(",
"fo",
":",
"IO",
",",
"metadata",
":",
"dict",
")",
":",
"version",
"=",
"metadata",
"[",
"\"metadata\"",
"]",
"[",
"\"version\"",
"]",
"# LOAD ORTHOLOGS INTO ArangoDB",
"with",
"timy",
".",
"Timer",
"(",
"\"Load Orthologs\"",
")"... | 35.869565 | 0.00059 |
def video_load_time(self):
"""
Returns aggregate video load time for all pages.
"""
load_times = self.get_load_times('video')
return round(mean(load_times), self.decimal_precision) | [
"def",
"video_load_time",
"(",
"self",
")",
":",
"load_times",
"=",
"self",
".",
"get_load_times",
"(",
"'video'",
")",
"return",
"round",
"(",
"mean",
"(",
"load_times",
")",
",",
"self",
".",
"decimal_precision",
")"
] | 35.833333 | 0.009091 |
def copyDoc(self, recursive):
"""Do a copy of the document info. If recursive, the content
tree will be copied too as well as DTD, namespaces and
entities. """
ret = libxml2mod.xmlCopyDoc(self._o, recursive)
if ret is None:raise treeError('xmlCopyDoc() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp | [
"def",
"copyDoc",
"(",
"self",
",",
"recursive",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlCopyDoc",
"(",
"self",
".",
"_o",
",",
"recursive",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'xmlCopyDoc() failed'",
")",
"__tmp",
"=",
... | 44.125 | 0.011111 |
async def _auth_cram_md5(self, username, password):
"""
Performs an authentication attemps using the CRAM-MD5 mechanism.
Protocol:
1. Send 'AUTH CRAM-MD5' to server ;
2. If the server replies with a 334 return code, we can go on:
1) The challenge (sent by the server) is base64-decoded ;
2) The decoded challenge is hashed using HMAC-MD5 and the user
password as key (shared secret) ;
3) The hashed challenge is converted to a string of lowercase
hexadecimal digits ;
4) The username and a space character are prepended to the hex
digits ;
5) The concatenation is base64-encoded and sent to the server.
6) If the server replies with a return code of 235, user is
authenticated.
Args:
username (str): Identifier of the user trying to authenticate.
password (str): Password for the user.
Raises:
ConnectionResetError: If the connection with the server is
unexpectedely lost.
SMTPAuthenticationError: If the authentication attempt fails.
Returns:
(int, str): A (code, message) 2-tuple containing the server
response.
"""
mechanism = "CRAM-MD5"
code, message = await self.do_cmd("AUTH", mechanism, success=(334,))
decoded_challenge = base64.b64decode(message)
challenge_hash = hmac.new(
key=password.encode("utf-8"), msg=decoded_challenge, digestmod="md5"
)
hex_hash = challenge_hash.hexdigest()
response = "{} {}".format(username, hex_hash)
encoded_response = SMTP.b64enc(response)
try:
code, message = await self.do_cmd(encoded_response, success=(235, 503))
except SMTPCommandFailedError as e:
raise SMTPAuthenticationError(e.code, e.message, mechanism)
return code, message | [
"async",
"def",
"_auth_cram_md5",
"(",
"self",
",",
"username",
",",
"password",
")",
":",
"mechanism",
"=",
"\"CRAM-MD5\"",
"code",
",",
"message",
"=",
"await",
"self",
".",
"do_cmd",
"(",
"\"AUTH\"",
",",
"mechanism",
",",
"success",
"=",
"(",
"334",
... | 37.735849 | 0.001949 |
def zoom(image, factor, dimension, hdr = False, order = 3):
"""
Zooms the provided image by the supplied factor in the supplied dimension.
The factor is an integer determining how many slices should be put between each
existing pair.
If an image header (hdr) is supplied, its voxel spacing gets updated.
Returns the image and the updated header or false.
"""
# check if supplied dimension is valid
if dimension >= image.ndim:
raise argparse.ArgumentError('The supplied zoom-dimension {} exceeds the image dimensionality of 0 to {}.'.format(dimension, image.ndim - 1))
# get logger
logger = Logger.getInstance()
logger.debug('Old shape = {}.'.format(image.shape))
# perform the zoom
zoom = [1] * image.ndim
zoom[dimension] = (image.shape[dimension] + (image.shape[dimension] - 1) * factor) / float(image.shape[dimension])
logger.debug('Reshaping with = {}.'.format(zoom))
image = interpolation.zoom(image, zoom, order=order)
logger.debug('New shape = {}.'.format(image.shape))
if hdr:
new_spacing = list(header.get_pixel_spacing(hdr))
new_spacing[dimension] = new_spacing[dimension] / float(factor + 1)
logger.debug('Setting pixel spacing from {} to {}....'.format(header.get_pixel_spacing(hdr), new_spacing))
header.set_pixel_spacing(hdr, tuple(new_spacing))
return image, hdr | [
"def",
"zoom",
"(",
"image",
",",
"factor",
",",
"dimension",
",",
"hdr",
"=",
"False",
",",
"order",
"=",
"3",
")",
":",
"# check if supplied dimension is valid",
"if",
"dimension",
">=",
"image",
".",
"ndim",
":",
"raise",
"argparse",
".",
"ArgumentError",... | 43.46875 | 0.009142 |
def full_path_from_dirrecord(self, rec, rockridge=False):
# type: (Union[dr.DirectoryRecord, udfmod.UDFFileEntry], bool) -> str
'''
A method to get the absolute path of a directory record.
Parameters:
rec - The directory record to get the full path for.
rockridge - Whether to get the rock ridge full path.
Returns:
A string representing the absolute path to the file on the ISO.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
ret = b''
if isinstance(rec, dr.DirectoryRecord):
encoding = 'utf-8'
if self.joliet_vd is not None and id(rec.vd) == id(self.joliet_vd):
encoding = 'utf-16_be'
slash = '/'.encode(encoding)
# A root entry has no Rock Ridge entry, even on a Rock Ridge ISO. Just
# always return / here.
if rec.is_root:
return '/'
if rockridge and rec.rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidInput('Cannot generate a Rock Ridge path on a non-Rock Ridge ISO')
parent = rec # type: Optional[dr.DirectoryRecord]
while parent is not None:
if not parent.is_root:
if rockridge and parent.rock_ridge is not None:
ret = slash + parent.rock_ridge.name() + ret
else:
ret = slash + parent.file_identifier() + ret
parent = parent.parent
else:
if rec.parent is None:
return '/'
if rec.file_ident is not None:
encoding = rec.file_ident.encoding
else:
encoding = 'utf-8'
slash = '/'.encode(encoding)
udfparent = rec # type: Optional[udfmod.UDFFileEntry]
while udfparent is not None:
ident = udfparent.file_identifier()
if ident != b'/':
ret = slash + ident + ret
udfparent = udfparent.parent
if sys.version_info >= (3, 0):
# Python 3, just return the encoded version
return ret.decode(encoding)
# Python 2.
return ret.decode(encoding).encode('utf-8') | [
"def",
"full_path_from_dirrecord",
"(",
"self",
",",
"rec",
",",
"rockridge",
"=",
"False",
")",
":",
"# type: (Union[dr.DirectoryRecord, udfmod.UDFFileEntry], bool) -> str",
"if",
"not",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInvalidI... | 40.568966 | 0.00249 |
def search(self, conn, filters, order_by, offset=None, count=None, timeout=None):
'''
Search for model ids that match the provided filters.
Arguments:
* *filters* - A list of filters that apply to the search of one of
the following two forms:
1. ``'column:string'`` - a plain string will match a word in a
text search on the column
.. note:: Read the documentation about the ``Query`` object
for what is actually passed during text search
2. ``('column', min, max)`` - a numeric column range search,
between min and max (inclusive by default)
.. note:: Read the documentation about the ``Query`` object
for information about open-ended ranges
3. ``['column:string1', 'column:string2']`` - will match any
of the provided words in a text search on the column
4. ``Prefix('column', 'prefix')`` - will match prefixes of
words in a text search on the column
5. ``Suffix('column', 'suffix')`` - will match suffixes of
words in a text search on the column
6. ``Pattern('column', 'pattern')`` - will match patterns over
words in a text search on the column
* *order_by* - A string that names the numeric column by which to
sort the results by. Prefixing with '-' will return results in
descending order
.. note:: While you can technically pass a non-numeric index as an
*order_by* clause, the results will basically be to order the
results by string comparison of the ids (10 will come before 2).
.. note:: If you omit the ``order_by`` argument, results will be
ordered by the last filter. If the last filter was a text
filter, see the previous note. If the last filter was numeric,
then results will be ordered by that result.
* *offset* - A numeric starting offset for results
* *count* - The maximum number of results to return from the query
'''
# prepare the filters
pipe, intersect, temp_id = self._prepare(conn, filters)
# handle ordering
if order_by:
reverse = order_by and order_by.startswith('-')
order_clause = '%s:%s:idx'%(self.namespace, order_by.lstrip('-'))
intersect(temp_id, {temp_id:0, order_clause: -1 if reverse else 1})
# handle returning the temporary result key
if timeout is not None:
pipe.expire(temp_id, timeout)
pipe.execute()
return temp_id
offset = offset if offset is not None else 0
end = (offset + count - 1) if count and count > 0 else -1
pipe.zrange(temp_id, offset, end)
pipe.delete(temp_id)
return pipe.execute()[-2] | [
"def",
"search",
"(",
"self",
",",
"conn",
",",
"filters",
",",
"order_by",
",",
"offset",
"=",
"None",
",",
"count",
"=",
"None",
",",
"timeout",
"=",
"None",
")",
":",
"# prepare the filters",
"pipe",
",",
"intersect",
",",
"temp_id",
"=",
"self",
".... | 42.710145 | 0.001658 |
def battery(self):
"""
Current system batter status (:py:class:`Battery`).
"""
if self._voltage is None or self._current is None or self._level is None:
return None
return Battery(self._voltage, self._current, self._level) | [
"def",
"battery",
"(",
"self",
")",
":",
"if",
"self",
".",
"_voltage",
"is",
"None",
"or",
"self",
".",
"_current",
"is",
"None",
"or",
"self",
".",
"_level",
"is",
"None",
":",
"return",
"None",
"return",
"Battery",
"(",
"self",
".",
"_voltage",
",... | 38.285714 | 0.010949 |
def Size(self):
"""
Get the total size in bytes of the object.
Returns:
int: size.
"""
return s.uint8 + s.uint8 + GetVarSize(self.Attributes) + GetVarSize(self.inputs) + GetVarSize(self.outputs) + GetVarSize(self.Scripts) | [
"def",
"Size",
"(",
"self",
")",
":",
"return",
"s",
".",
"uint8",
"+",
"s",
".",
"uint8",
"+",
"GetVarSize",
"(",
"self",
".",
"Attributes",
")",
"+",
"GetVarSize",
"(",
"self",
".",
"inputs",
")",
"+",
"GetVarSize",
"(",
"self",
".",
"outputs",
"... | 33.375 | 0.010949 |
def prime_factors(n):
"""Lists prime factors of a given natural integer, from greatest to smallest
:param n: Natural integer
:rtype : list of all prime factors of the given natural n
"""
i = 2
while i <= sqrt(n):
if n % i == 0:
l = prime_factors(n/i)
l.append(i)
return l
i += 1
return [n] | [
"def",
"prime_factors",
"(",
"n",
")",
":",
"i",
"=",
"2",
"while",
"i",
"<=",
"sqrt",
"(",
"n",
")",
":",
"if",
"n",
"%",
"i",
"==",
"0",
":",
"l",
"=",
"prime_factors",
"(",
"n",
"/",
"i",
")",
"l",
".",
"append",
"(",
"i",
")",
"return",... | 27.461538 | 0.00813 |
def random_bits(bits_count):
""" Random generator (PyCrypto getrandbits wrapper). The result is a non-negative value.
:param bits_count: random bits to generate
:return: int
"""
bytes_count = int(math.ceil(bits_count / 8))
random_value = int.from_bytes(os.urandom(bytes_count), byteorder=sys.byteorder)
result_bits = bytes_count * 8
if result_bits > bits_count:
random_value = (random_value >> (result_bits - bits_count))
return random_value | [
"def",
"random_bits",
"(",
"bits_count",
")",
":",
"bytes_count",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"bits_count",
"/",
"8",
")",
")",
"random_value",
"=",
"int",
".",
"from_bytes",
"(",
"os",
".",
"urandom",
"(",
"bytes_count",
")",
",",
"byte... | 31.571429 | 0.028571 |
def sign(self, payload):
""" Sign payload using the supplied authenticator """
if self.authenticator:
return self.authenticator.signed(payload)
return payload | [
"def",
"sign",
"(",
"self",
",",
"payload",
")",
":",
"if",
"self",
".",
"authenticator",
":",
"return",
"self",
".",
"authenticator",
".",
"signed",
"(",
"payload",
")",
"return",
"payload"
] | 38 | 0.010309 |
def _load_key(key_object):
"""
Common code to load public and private keys into PublicKey and PrivateKey
objects
:param key_object:
An asn1crypto.keys.PublicKeyInfo or asn1crypto.keys.PrivateKeyInfo
object
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
oscrypto.errors.AsymmetricKeyError - when the key is incompatible with the OS crypto library
OSError - when an error is returned by the OS crypto library
:return:
A PublicKey or PrivateKey object
"""
if key_object.algorithm == 'ec':
curve_type, details = key_object.curve
if curve_type != 'named':
raise AsymmetricKeyError('OS X only supports EC keys using named curves')
if details not in set(['secp256r1', 'secp384r1', 'secp521r1']):
raise AsymmetricKeyError(pretty_message(
'''
OS X only supports EC keys using the named curves secp256r1,
secp384r1 and secp521r1
'''
))
elif key_object.algorithm == 'dsa' and key_object.hash_algo == 'sha2':
raise AsymmetricKeyError(pretty_message(
'''
OS X only supports DSA keys based on SHA1 (2048 bits or less) - this
key is based on SHA2 and is %s bits
''',
key_object.bit_size
))
elif key_object.algorithm == 'dsa' and key_object.hash_algo is None:
raise IncompleteAsymmetricKeyError(pretty_message(
'''
The DSA key does not contain the necessary p, q and g parameters
and can not be used
'''
))
if isinstance(key_object, keys.PublicKeyInfo):
source = key_object.dump()
key_class = Security.kSecAttrKeyClassPublic
else:
source = key_object.unwrap().dump()
key_class = Security.kSecAttrKeyClassPrivate
cf_source = None
cf_dict = None
cf_output = None
try:
cf_source = CFHelpers.cf_data_from_bytes(source)
key_type = {
'dsa': Security.kSecAttrKeyTypeDSA,
'ec': Security.kSecAttrKeyTypeECDSA,
'rsa': Security.kSecAttrKeyTypeRSA,
}[key_object.algorithm]
cf_dict = CFHelpers.cf_dictionary_from_pairs([
(Security.kSecAttrKeyType, key_type),
(Security.kSecAttrKeyClass, key_class),
(Security.kSecAttrCanSign, CoreFoundation.kCFBooleanTrue),
(Security.kSecAttrCanVerify, CoreFoundation.kCFBooleanTrue),
])
error_pointer = new(CoreFoundation, 'CFErrorRef *')
sec_key_ref = Security.SecKeyCreateFromData(cf_dict, cf_source, error_pointer)
handle_cf_error(error_pointer)
if key_class == Security.kSecAttrKeyClassPublic:
return PublicKey(sec_key_ref, key_object)
if key_class == Security.kSecAttrKeyClassPrivate:
return PrivateKey(sec_key_ref, key_object)
finally:
if cf_source:
CoreFoundation.CFRelease(cf_source)
if cf_dict:
CoreFoundation.CFRelease(cf_dict)
if cf_output:
CoreFoundation.CFRelease(cf_output) | [
"def",
"_load_key",
"(",
"key_object",
")",
":",
"if",
"key_object",
".",
"algorithm",
"==",
"'ec'",
":",
"curve_type",
",",
"details",
"=",
"key_object",
".",
"curve",
"if",
"curve_type",
"!=",
"'named'",
":",
"raise",
"AsymmetricKeyError",
"(",
"'OS X only s... | 35.651685 | 0.001533 |
def get_application_groups():
"""
Return the applications of the system, organized in various groups.
These groups are not connected with the application names,
but rather with a pattern of applications.
"""
groups = []
for title, groupdict in appsettings.FLUENT_DASHBOARD_APP_GROUPS:
# Allow to pass all possible arguments to the DashboardModule class.
module_kwargs = groupdict.copy()
# However, the 'models' is treated special, to have catch-all support.
if '*' in groupdict['models']:
default_module = appsettings.FLUENT_DASHBOARD_DEFAULT_MODULE
module_kwargs['exclude'] = ALL_KNOWN_APPS + list(module_kwargs.get('exclude', []))
del module_kwargs['models']
else:
default_module = 'CmsAppIconList'
# Get module to display, can be a alias for known variations.
module = groupdict.get('module', default_module)
if module in MODULE_ALIASES:
module = MODULE_ALIASES[module]
module_kwargs['module'] = module
groups.append((title, module_kwargs),)
return groups | [
"def",
"get_application_groups",
"(",
")",
":",
"groups",
"=",
"[",
"]",
"for",
"title",
",",
"groupdict",
"in",
"appsettings",
".",
"FLUENT_DASHBOARD_APP_GROUPS",
":",
"# Allow to pass all possible arguments to the DashboardModule class.",
"module_kwargs",
"=",
"groupdict"... | 38.172414 | 0.001762 |
def create(*context, **kwargs):
"""
Build a ContextStack instance from a sequence of context-like items.
This factory-style method is more general than the ContextStack class's
constructor in that, unlike the constructor, the argument list
can itself contain ContextStack instances.
Here is an example illustrating various aspects of this method:
>>> obj1 = {'animal': 'cat', 'vegetable': 'carrot', 'mineral': 'copper'}
>>> obj2 = ContextStack({'vegetable': 'spinach', 'mineral': 'silver'})
>>>
>>> context = ContextStack.create(obj1, None, obj2, mineral='gold')
>>>
>>> context.get('animal')
'cat'
>>> context.get('vegetable')
'spinach'
>>> context.get('mineral')
'gold'
Arguments:
*context: zero or more dictionaries, ContextStack instances, or objects
with which to populate the initial context stack. None
arguments will be skipped. Items in the *context list are
added to the stack in order so that later items in the argument
list take precedence over earlier items. This behavior is the
same as the constructor's.
**kwargs: additional key-value data to add to the context stack.
As these arguments appear after all items in the *context list,
in the case of key conflicts these values take precedence over
all items in the *context list. This behavior is the same as
the constructor's.
"""
items = context
context = ContextStack()
for item in items:
if item is None:
continue
if isinstance(item, ContextStack):
context._stack.extend(item._stack)
else:
context.push(item)
if kwargs:
context.push(kwargs)
return context | [
"def",
"create",
"(",
"*",
"context",
",",
"*",
"*",
"kwargs",
")",
":",
"items",
"=",
"context",
"context",
"=",
"ContextStack",
"(",
")",
"for",
"item",
"in",
"items",
":",
"if",
"item",
"is",
"None",
":",
"continue",
"if",
"isinstance",
"(",
"item... | 35.222222 | 0.002046 |
def request(self, method, path, **options):
"""
Dispatches a request to the Razorpay HTTP API
"""
options = self._update_user_agent_header(options)
url = "{}{}".format(self.base_url, path)
response = getattr(self.session, method)(url, auth=self.auth,
verify=self.cert_path,
**options)
if ((response.status_code >= HTTP_STATUS_CODE.OK) and
(response.status_code < HTTP_STATUS_CODE.REDIRECT)):
return response.json()
else:
msg = ""
code = ""
json_response = response.json()
if 'error' in json_response:
if 'description' in json_response['error']:
msg = json_response['error']['description']
if 'code' in json_response['error']:
code = str(json_response['error']['code'])
if str.upper(code) == ERROR_CODE.BAD_REQUEST_ERROR:
raise BadRequestError(msg)
elif str.upper(code) == ERROR_CODE.GATEWAY_ERROR:
raise GatewayError(msg)
elif str.upper(code) == ERROR_CODE.SERVER_ERROR:
raise ServerError(msg)
else:
raise ServerError(msg) | [
"def",
"request",
"(",
"self",
",",
"method",
",",
"path",
",",
"*",
"*",
"options",
")",
":",
"options",
"=",
"self",
".",
"_update_user_agent_header",
"(",
"options",
")",
"url",
"=",
"\"{}{}\"",
".",
"format",
"(",
"self",
".",
"base_url",
",",
"pat... | 41.125 | 0.001485 |
def build_process_isolation_temp_dir(self):
'''
Create a temporary directory for process isolation to use.
'''
path = tempfile.mkdtemp(prefix='ansible_runner_pi_', dir=self.process_isolation_path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
atexit.register(shutil.rmtree, path)
return path | [
"def",
"build_process_isolation_temp_dir",
"(",
"self",
")",
":",
"path",
"=",
"tempfile",
".",
"mkdtemp",
"(",
"prefix",
"=",
"'ansible_runner_pi_'",
",",
"dir",
"=",
"self",
".",
"process_isolation_path",
")",
"os",
".",
"chmod",
"(",
"path",
",",
"stat",
... | 44.125 | 0.008333 |
def compile(self):
"""
This compiles the alias into a form that can be of most benefit to the
en/decoder.
"""
if self._compiled:
return
self.decodable_properties = set()
self.encodable_properties = set()
self.inherited_dynamic = None
self.inherited_sealed = None
self.bases = []
self.exclude_attrs = set(self.exclude_attrs or [])
self.readonly_attrs = set(self.readonly_attrs or [])
self.static_attrs = list(self.static_attrs or [])
self.static_attrs_set = set(self.static_attrs)
self.proxy_attrs = set(self.proxy_attrs or [])
self.sealed = util.is_class_sealed(self.klass)
if self.external:
self._checkExternal()
self._finalise_compile()
# this class is external so no more compiling is necessary
return
if hasattr(self.klass, '__slots__'):
self.decodable_properties.update(self.klass.__slots__)
self.encodable_properties.update(self.klass.__slots__)
for k, v in self.klass.__dict__.iteritems():
if not isinstance(v, property):
continue
if v.fget:
self.encodable_properties.update([k])
if v.fset:
self.decodable_properties.update([k])
else:
self.readonly_attrs.update([k])
mro = inspect.getmro(self.klass)[1:]
for c in mro:
self._compile_base_class(c)
self.getCustomProperties()
self._finalise_compile() | [
"def",
"compile",
"(",
"self",
")",
":",
"if",
"self",
".",
"_compiled",
":",
"return",
"self",
".",
"decodable_properties",
"=",
"set",
"(",
")",
"self",
".",
"encodable_properties",
"=",
"set",
"(",
")",
"self",
".",
"inherited_dynamic",
"=",
"None",
"... | 29.377358 | 0.001243 |
def mget(self, ids, index=None, doc_type=None, **query_params):
"""
Get multi JSON documents.
ids can be:
list of tuple: (index, type, id)
list of ids: index and doc_type are required
"""
if not ids:
return []
body = []
for value in ids:
if isinstance(value, tuple):
if len(value) == 3:
a, b, c = value
body.append({"_index": a,
"_type": b,
"_id": c})
elif len(value) == 4:
a, b, c, d = value
body.append({"_index": a,
"_type": b,
"_id": c,
"fields": d})
else:
if index is None:
raise InvalidQuery("index value is required for id")
if doc_type is None:
raise InvalidQuery("doc_type value is required for id")
body.append({"_index": index,
"_type": doc_type,
"_id": value})
results = self._send_request('GET', "/_mget", body={'docs': body},
params=query_params)
if 'docs' in results:
model = self.model
return [model(self, item) for item in results['docs']]
return [] | [
"def",
"mget",
"(",
"self",
",",
"ids",
",",
"index",
"=",
"None",
",",
"doc_type",
"=",
"None",
",",
"*",
"*",
"query_params",
")",
":",
"if",
"not",
"ids",
":",
"return",
"[",
"]",
"body",
"=",
"[",
"]",
"for",
"value",
"in",
"ids",
":",
"if"... | 35.463415 | 0.001339 |
def create(self, service_name, json, **kwargs):
"""Create a new AppNexus object"""
return self._send(requests.post, service_name, json, **kwargs) | [
"def",
"create",
"(",
"self",
",",
"service_name",
",",
"json",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_send",
"(",
"requests",
".",
"post",
",",
"service_name",
",",
"json",
",",
"*",
"*",
"kwargs",
")"
] | 53 | 0.012422 |
def parse(self, text, fn=None):
"""
Parse the Mapfile
"""
if PY2 and not isinstance(text, unicode):
# specify Unicode for Python 2.7
text = unicode(text, 'utf-8')
if self.expand_includes:
text = self.load_includes(text, fn=fn)
try:
self._comments[:] = [] # clear any comments from a previous parse
tree = self.lalr.parse(text)
if self.include_comments:
self.assign_comments(tree, self._comments)
return tree
except (ParseError, UnexpectedInput) as ex:
if fn:
log.error("Parsing of {} unsuccessful".format(fn))
else:
log.error("Parsing of Mapfile unsuccessful")
log.info(ex)
raise | [
"def",
"parse",
"(",
"self",
",",
"text",
",",
"fn",
"=",
"None",
")",
":",
"if",
"PY2",
"and",
"not",
"isinstance",
"(",
"text",
",",
"unicode",
")",
":",
"# specify Unicode for Python 2.7",
"text",
"=",
"unicode",
"(",
"text",
",",
"'utf-8'",
")",
"i... | 31.8 | 0.002442 |
def active_user_organisations_resource(doc):
"""Get user.organisations subresouces"""
if doc.get('type') == 'user' and doc.get('state') != 'deactivated':
for org_id, resource in doc.get('organisations', {}).items():
if resource['state'] != 'deactivated':
resource['id'] = org_id
resource['user_id'] = doc['_id']
yield [doc['_id'], org_id], resource | [
"def",
"active_user_organisations_resource",
"(",
"doc",
")",
":",
"if",
"doc",
".",
"get",
"(",
"'type'",
")",
"==",
"'user'",
"and",
"doc",
".",
"get",
"(",
"'state'",
")",
"!=",
"'deactivated'",
":",
"for",
"org_id",
",",
"resource",
"in",
"doc",
".",... | 52.125 | 0.002358 |
def POST(self, name):
"""
/models/{name}/run
schema:
{
predictedFieldName: value
timestamp: %m/%d/%y %H:%M
}
NOTE: predictedFieldName MUST be the same name specified when
creating the model.
returns:
{
"predictionNumber":<number of record>,
"anomalyScore":anomalyScore
}
"""
global g_models
data = json.loads(web.data())
data["timestamp"] = datetime.datetime.strptime(
data["timestamp"], "%m/%d/%y %H:%M")
if name not in g_models.keys():
raise web.notfound("Model with name <%s> does not exist." % name)
modelResult = g_models[name].run(data)
predictionNumber = modelResult.predictionNumber
anomalyScore = modelResult.inferences["anomalyScore"]
return json.dumps({"predictionNumber": predictionNumber,
"anomalyScore": anomalyScore}) | [
"def",
"POST",
"(",
"self",
",",
"name",
")",
":",
"global",
"g_models",
"data",
"=",
"json",
".",
"loads",
"(",
"web",
".",
"data",
"(",
")",
")",
"data",
"[",
"\"timestamp\"",
"]",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"data",
"... | 26.090909 | 0.00224 |
def fetch_object(self, obj_name, include_meta=False, chunk_size=None):
"""
Alias for self.fetch(); included for backwards compatibility
"""
return self.fetch(obj=obj_name, include_meta=include_meta,
chunk_size=chunk_size) | [
"def",
"fetch_object",
"(",
"self",
",",
"obj_name",
",",
"include_meta",
"=",
"False",
",",
"chunk_size",
"=",
"None",
")",
":",
"return",
"self",
".",
"fetch",
"(",
"obj",
"=",
"obj_name",
",",
"include_meta",
"=",
"include_meta",
",",
"chunk_size",
"=",... | 44 | 0.011152 |
def validate_filepath(file_path, platform=None, min_len=1, max_len=None):
"""Verifying whether the ``file_path`` is a valid file path or not.
Args:
file_path (str):
File path to validate.
platform (str, optional):
.. include:: platform.txt
min_len (int, optional):
Minimum length of the ``file_path``. The value must be greater or equal to one.
Defaults to ``1``.
max_len (int, optional):
Maximum length of the ``file_path`` length. If the value is |None|,
in the default, automatically determined by the ``platform``:
- ``Linux``: 4096
- ``macOS``: 1024
- ``Windows``: 260
Raises:
NullNameError:
If the ``file_path`` is empty.
InvalidCharError:
If the ``file_path`` includes invalid char(s):
|invalid_file_path_chars|.
The following characters are also invalid for Windows platform:
|invalid_win_file_path_chars|
InvalidLengthError:
If the ``file_path`` is longer than ``max_len`` characters.
Example:
:ref:`example-validate-file-path`
See Also:
`Naming Files, Paths, and Namespaces (Windows)
<https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx>`__
"""
FilePathSanitizer(platform=platform, min_len=min_len, max_len=max_len).validate(file_path) | [
"def",
"validate_filepath",
"(",
"file_path",
",",
"platform",
"=",
"None",
",",
"min_len",
"=",
"1",
",",
"max_len",
"=",
"None",
")",
":",
"FilePathSanitizer",
"(",
"platform",
"=",
"platform",
",",
"min_len",
"=",
"min_len",
",",
"max_len",
"=",
"max_le... | 36.948718 | 0.002028 |
def distant_level(reference_level, distance, reference_distance=1.0):
"""
Calculates the sound pressure level
in dependence of a distance
where a perfect ball-shaped source and spread is assumed.
reference_level: Sound pressure level in reference distance in dB
distance: Distance to calculate sound pressure level for, in meters
reference_distance: reference distance in meters (defaults to 1)
"""
rel_dist = float(reference_distance) / float(distance)
level = float(reference_level) + 20.0 * (math.log(rel_dist) / math.log(10))
return level | [
"def",
"distant_level",
"(",
"reference_level",
",",
"distance",
",",
"reference_distance",
"=",
"1.0",
")",
":",
"rel_dist",
"=",
"float",
"(",
"reference_distance",
")",
"/",
"float",
"(",
"distance",
")",
"level",
"=",
"float",
"(",
"reference_level",
")",
... | 44.230769 | 0.001704 |
def require_session(handler):
""" Decorator to ensure a session is properly in the request """
@functools.wraps(handler)
async def decorated(request: web.Request) -> web.Response:
request_session_token = request.match_info['session']
session = session_from_request(request)
if not session or request_session_token != session.token:
LOG.warning(f"request for invalid session {request_session_token}")
return web.json_response(
data={'error': 'bad-token',
'message': f'No such session {request_session_token}'},
status=404)
return await handler(request, session)
return decorated | [
"def",
"require_session",
"(",
"handler",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"handler",
")",
"async",
"def",
"decorated",
"(",
"request",
":",
"web",
".",
"Request",
")",
"->",
"web",
".",
"Response",
":",
"request_session_token",
"=",
"request... | 49.285714 | 0.001422 |
def has_all_nonzero_section_lengths(neuron, threshold=0.0):
'''Check presence of neuron sections with length not above threshold
Arguments:
neuron(Neuron): The neuron object to test
threshold(float): value above which a section length is considered
to be non-zero
Returns:
CheckResult with result including list of ids of bad sections
'''
bad_ids = [s.id for s in _nf.iter_sections(neuron.neurites)
if section_length(s.points) <= threshold]
return CheckResult(len(bad_ids) == 0, bad_ids) | [
"def",
"has_all_nonzero_section_lengths",
"(",
"neuron",
",",
"threshold",
"=",
"0.0",
")",
":",
"bad_ids",
"=",
"[",
"s",
".",
"id",
"for",
"s",
"in",
"_nf",
".",
"iter_sections",
"(",
"neuron",
".",
"neurites",
")",
"if",
"section_length",
"(",
"s",
".... | 36.466667 | 0.001783 |
def copy(src, trg, transform=None):
''' copy items with optional fields transformation
'''
source = open(src[0], src[1])
target = open(trg[0], trg[1], autocommit=1000)
for item in source.get():
item = dict(item)
if '_id' in item:
del item['_id']
if transform:
item = transform(item)
target.put(trg[0](item))
source.close()
target.commit()
target.close() | [
"def",
"copy",
"(",
"src",
",",
"trg",
",",
"transform",
"=",
"None",
")",
":",
"source",
"=",
"open",
"(",
"src",
"[",
"0",
"]",
",",
"src",
"[",
"1",
"]",
")",
"target",
"=",
"open",
"(",
"trg",
"[",
"0",
"]",
",",
"trg",
"[",
"1",
"]",
... | 23.722222 | 0.002252 |
def Range(start, limit, delta):
"""
Range op.
"""
return np.arange(start, limit, delta, dtype=np.int32), | [
"def",
"Range",
"(",
"start",
",",
"limit",
",",
"delta",
")",
":",
"return",
"np",
".",
"arange",
"(",
"start",
",",
"limit",
",",
"delta",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
","
] | 23.2 | 0.008333 |
def _restart_on_unavailable(restart):
"""Restart iteration after :exc:`.ServiceUnavailable`.
:type restart: callable
:param restart: curried function returning iterator
"""
resume_token = b""
item_buffer = []
iterator = restart()
while True:
try:
for item in iterator:
item_buffer.append(item)
if item.resume_token:
resume_token = item.resume_token
break
except ServiceUnavailable:
del item_buffer[:]
iterator = restart(resume_token=resume_token)
continue
if len(item_buffer) == 0:
break
for item in item_buffer:
yield item
del item_buffer[:] | [
"def",
"_restart_on_unavailable",
"(",
"restart",
")",
":",
"resume_token",
"=",
"b\"\"",
"item_buffer",
"=",
"[",
"]",
"iterator",
"=",
"restart",
"(",
")",
"while",
"True",
":",
"try",
":",
"for",
"item",
"in",
"iterator",
":",
"item_buffer",
".",
"appen... | 26.25 | 0.001312 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.