text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def set_state(self, color_hex):
"""
:param color_hex: a hex string indicating the color of the porkfolio nose
:return: nothing
From the api...
"the color of the nose is not in the desired_state
but on the object itself."
"""
root_name = self.json_state.get('piggy_bank_id', self.name())
response = self.api_interface.set_device_state(self, {
"nose_color": color_hex
}, root_name)
self._update_state_from_response(response) | 0.005618 |
def is_delimiter(line):
""" True if a line consists only of a single punctuation character."""
return bool(line) and line[0] in punctuation and line[0]*len(line) == line | 0.00565 |
def cover(self, minAcc, maxAcc, groupBy=None, new_reg_fields=None, cover_type="normal"):
"""
*Wrapper of* ``COVER``
COVER is a GMQL operator that takes as input a dataset (of usually,
but not necessarily, multiple samples) and returns another dataset
(with a single sample, if no groupby option is specified) by “collapsing”
the input samples and their regions according to certain rules
specified by the COVER parameters. The attributes of the output regions
are only the region coordinates, plus in case, when aggregate functions
are specified, new attributes with aggregate values over attribute values
of the contributing input regions; output metadata are the union of the
input ones, plus the metadata attributes JaccardIntersect and
JaccardResult, representing global Jaccard Indexes for the considered
dataset, computed as the correspondent region Jaccard Indexes but on
the whole sample regions.
:param cover_type: the kind of cover variant you want ['normal', 'flat', 'summit', 'histogram']
:param minAcc: minimum accumulation value, i.e. the minimum number
of overlapping regions to be considered during COVER execution. It can be any positive
number or the strings {'ALL', 'ANY'}.
:param maxAcc: maximum accumulation value, i.e. the maximum number
of overlapping regions to be considered during COVER execution. It can be any positive
number or the strings {'ALL', 'ANY'}.
:param groupBy: optional list of metadata attributes
:param new_reg_fields: dictionary of the type
{'new_region_attribute' : AGGREGATE_FUNCTION('field'), ...}
:return: a new GMQLDataset
An example of usage::
cell_tf = narrow_peak.cover("normal", minAcc=1, maxAcc="Any",
groupBy=['cell', 'antibody_target'])
"""
if isinstance(cover_type, str):
coverFlag = self.opmng.getCoverTypes(cover_type)
else:
raise TypeError("type must be a string. "
"{} was provided".format(type(cover_type)))
if isinstance(minAcc, str):
minAccParam = self.opmng.getCoverParam(minAcc.lower())
elif isinstance(minAcc, int):
minAccParam = self.opmng.getCoverParam(str(minAcc).lower())
else:
raise TypeError("minAcc must be a string or an integer. "
"{} was provided".format(type(minAcc)))
if isinstance(maxAcc, str):
maxAccParam = self.opmng.getCoverParam(maxAcc.lower())
elif isinstance(maxAcc, int):
maxAccParam = self.opmng.getCoverParam(str(maxAcc).lower())
else:
raise TypeError("maxAcc must be a string or an integer. "
"{} was provided".format(type(minAcc)))
if isinstance(groupBy, list) and \
all([isinstance(x, str) for x in groupBy]):
groupBy_result = Some(groupBy)
elif groupBy is None:
groupBy_result = none()
else:
raise TypeError("groupBy must be a list of string. "
"{} was provided".format(type(groupBy)))
aggregates = []
if isinstance(new_reg_fields, dict):
expBuild = self.pmg.getNewExpressionBuilder(self.__index)
for k in new_reg_fields.keys():
if isinstance(k, str):
item = new_reg_fields[k]
if isinstance(item, (SUM, MIN, MAX, AVG, BAG, BAGD,
MEDIAN, COUNT)):
op_name = item.get_aggregate_name()
op_argument = item.get_argument()
if op_argument is None:
op_argument = none()
else:
op_argument = Some(op_argument)
regsToReg = expBuild.getRegionsToRegion(op_name, k, op_argument)
aggregates.append(regsToReg)
else:
raise TypeError("The items in new_reg_fields must be Aggregates (SUM, MIN, MAX, AVG, BAG, "
"BAGD, MEDIAN, COUNT)"
" {} was provided".format(type(item)))
else:
raise TypeError("The key of new_reg_fields must be a string. "
"{} was provided".format(type(k)))
elif new_reg_fields is None:
pass
else:
raise TypeError("new_reg_fields must be a list of dictionary. "
"{} was provided".format(type(new_reg_fields)))
new_index = self.opmng.cover(self.__index, coverFlag, minAccParam, maxAccParam,
groupBy_result, aggregates)
return GMQLDataset(index=new_index, location=self.location,
local_sources=self._local_sources,
remote_sources=self._remote_sources, meta_profile=self.meta_profile) | 0.005721 |
def changed(self, selection='all'):
'''
Returns the list of changed values.
The key is added to each item.
selection
Specifies the desired changes.
Supported values are
``all`` - all changed items are included in the output
``intersect`` - changed items present in both lists are included
'''
changed = []
if selection == 'all':
for recursive_item in self._get_recursive_difference(type='all'):
# We want the unset values as well
recursive_item.ignore_unset_values = False
key_val = six.text_type(recursive_item.past_dict[self._key]) \
if self._key in recursive_item.past_dict \
else six.text_type(recursive_item.current_dict[self._key])
for change in recursive_item.changed():
if change != self._key:
changed.append('.'.join([self._key, key_val, change]))
return changed
elif selection == 'intersect':
# We want the unset values as well
for recursive_item in self._get_recursive_difference(type='intersect'):
recursive_item.ignore_unset_values = False
key_val = six.text_type(recursive_item.past_dict[self._key]) \
if self._key in recursive_item.past_dict \
else six.text_type(recursive_item.current_dict[self._key])
for change in recursive_item.changed():
if change != self._key:
changed.append('.'.join([self._key, key_val, change]))
return changed | 0.006344 |
def teardown_handler(teardown_fixtures_fn, teardown_fn):
"""Returns a function that adds fixtures handling to the teardown method.
Calls the given teardown method first before calling the fixtures teardown.
"""
def handler(obj):
teardown_fn(obj)
teardown_fixtures_fn(obj)
return handler | 0.011396 |
def remove(self, workflow_id):
""" Removes a document specified by its id from the data store.
All associated GridFs documents are deleted as well.
Args:
workflow_id (str): The id of the document that represents a workflow run.
Raises:
DataStoreNotConnected: If the data store is not connected to the server.
"""
try:
db = self._client[self.database]
fs = GridFSProxy(GridFS(db.unproxied_object))
for grid_doc in fs.find({"workflow_id": workflow_id},
no_cursor_timeout=True):
fs.delete(grid_doc._id)
col = db[WORKFLOW_DATA_COLLECTION_NAME]
return col.delete_one({"_id": ObjectId(workflow_id)})
except ConnectionFailure:
raise DataStoreNotConnected() | 0.004657 |
def get_session(ec=None, create=True):
"""
ec - engine_name or connection
"""
ec = ec or __default_engine__
if isinstance(ec, (str, unicode)):
session = engine_manager[ec].session(create=True)
elif isinstance(ec, Session):
session = ec
else:
raise Error("Connection %r should be existed engine name or Session object" % ec)
return session | 0.007519 |
def _has_definition(self):
"""True if a footer is defined for this section."""
footerReference = self._sectPr.get_footerReference(self._hdrftr_index)
return False if footerReference is None else True | 0.008969 |
def fit_circle_check(points,
scale,
prior=None,
final=False,
verbose=False):
"""
Fit a circle, and reject the fit if:
* the radius is larger than tol.radius_min*scale or tol.radius_max*scale
* any segment spans more than tol.seg_angle
* any segment is longer than tol.seg_frac*scale
* the fit deviates by more than tol.radius_frac*radius
* the segments on the ends deviate from tangent by more than tol.tangent
Parameters
---------
points: (n, d) set of points which represent a path
prior: (center, radius) tuple for best guess, or None if unknown
scale: float, what is the overall scale of the set of points
verbose: boolean, if True output log.debug messages for the reasons
for fit rejection. Potentially generates hundreds of thousands of
messages so only suggested in manual debugging.
Returns
---------
if fit is acceptable:
(center, radius) tuple
else:
None
"""
# an arc needs at least three points
if len(points) < 3:
return None
# do a least squares fit on the points
C, R, r_deviation = fit_nsphere(points, prior=prior)
# check to make sure radius is between min and max allowed
if not tol.radius_min < (R / scale) < tol.radius_max:
if verbose:
log.debug('circle fit error: R %f', R / scale)
return None
# check point radius error
r_error = r_deviation / R
if r_error > tol.radius_frac:
if verbose:
log.debug('circle fit error: fit %s', str(r_error))
return None
vectors = np.diff(points, axis=0)
segment = np.linalg.norm(vectors, axis=1)
# approximate angle in radians, segments are linear length
# not arc length but this is close and avoids a cosine
angle = segment / R
if (angle > tol.seg_angle).any():
if verbose:
log.debug('circle fit error: angle %s', str(angle))
return None
if final and (angle > tol.seg_angle_min).sum() < 3:
log.debug('final: angle %s', str(angle))
return None
# check segment length as a fraction of drawing scale
scaled = segment / scale
if (scaled > tol.seg_frac).any():
if verbose:
log.debug('circle fit error: segment %s', str(scaled))
return None
# check to make sure the line segments on the ends are actually
# tangent with the candidate circle fit
mid_pt = points[[0, -2]] + (vectors[[0, -1]] * .5)
radial = unitize(mid_pt - C)
ends = unitize(vectors[[0, -1]])
tangent = np.abs(np.arccos(diagonal_dot(radial, ends)))
tangent = np.abs(tangent - np.pi / 2).max()
if tangent > tol.tangent:
if verbose:
log.debug('circle fit error: tangent %f',
np.degrees(tangent))
return None
result = {'center': C,
'radius': R}
return result | 0.000333 |
def ct2mu(im):
'''HU units to 511keV PET mu-values
https://link.springer.com/content/pdf/10.1007%2Fs00259-002-0796-3.pdf
C. Burger, et al., PET attenuation coefficients from CT images,
'''
# convert nans to -1024 for the HU values only
im[np.isnan(im)] = -1024
# constants
muwater = 0.096
mubone = 0.172
rhowater = 0.184
rhobone = 0.428
uim = np.zeros(im.shape, dtype=np.float32)
uim[im<=0] = muwater * ( 1+im[im<=0]*1e-3 )
uim[im> 0] = muwater+im[im>0]*(rhowater*(mubone-muwater)/(1e3*(rhobone-rhowater)))
# remove negative values
uim[uim<0] = 0
return uim | 0.020344 |
def fromlineno(self):
"""The first line that this node appears on in the source code.
:type: int or None
"""
lineno = super(Arguments, self).fromlineno
return max(lineno, self.parent.fromlineno or 0) | 0.008333 |
def objects(self, protocol=None, purposes=None, model_ids=None, groups=None,
classes=None):
"""Returns a list of :py:class:`.File` for the specific query by the user.
Keyword Parameters:
protocol
One of the Biosecurid protocols ('A').
purposes
The purposes required to be retrieved ('enrol', 'probe') or a tuple
with several of them. If 'None' is given (this is the default), it is
considered the same as a tuple with all possible values. This field is
ignored for the data from the "world" group.
model_ids
Only retrieves the files for the provided list of model ids (claimed
client id). The model ids are string. If 'None' is given (this is
the default), no filter over the model_ids is performed.
groups
One of the groups ('dev', 'eval', 'world') or a tuple with several of them.
If 'None' is given (this is the default), it is considered the same as a
tuple with all possible values.
classes
The classes (types of accesses) to be retrieved ('client', 'impostor')
or a tuple with several of them. If 'None' is given (this is the
default), it is considered the same as a tuple with all possible values.
Returns: A list of :py:class:`.File` objects.
"""
#groups = self.__group_replace_alias_clients__(groups)
protocol = self.check_parameters_for_validity(protocol, "protocol", self.protocol_names())
purposes = self.check_parameters_for_validity(purposes, "purpose", self.purposes())
groups = self.check_parameters_for_validity(groups, "group", self.groups())
classes = self.check_parameters_for_validity(classes, "class", ('client', 'impostor'))
import collections
if(model_ids is None):
model_ids = ()
elif(not isinstance(model_ids,collections.Iterable)):
model_ids = (model_ids,)
# Now query the database
retval = []
if ('eval' in groups):
if('enrol' in purposes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Genuine'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'enrol'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
if('probe' in purposes):
if('client' in classes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Genuine'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'probe'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
if('impostor' in classes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Impostor'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'probe'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
return list(set(retval)) | 0.007692 |
def _regex_to_static(src, regex):
'''
Expand regular expression to static match.
'''
if not src or not regex:
return None
try:
compiled = re.compile(regex, re.DOTALL)
src = [line for line in src if compiled.search(line) or line.count(regex)]
except Exception as ex:
raise CommandExecutionError("{0}: '{1}'".format(_get_error_message(ex), regex))
return src and src or [] | 0.006944 |
def _waitForIP(cls, instance):
"""
Wait until the instances has a public IP address assigned to it.
:type instance: boto.ec2.instance.Instance
"""
logger.debug('Waiting for ip...')
while True:
time.sleep(a_short_time)
instance.update()
if instance.ip_address or instance.public_dns_name or instance.private_ip_address:
logger.debug('...got ip')
break | 0.006424 |
def multiprocess_mapping(func, iterable):
"""Multiprocess mapping the given function on the given iterable.
This only works in Linux and Mac systems since Windows has no forking capability. On Windows we fall back on
single processing. Also, if we reach memory limits we fall back on single cpu processing.
Args:
func (func): the function to apply
iterable (iterable): the iterable with the elements we want to apply the function on
"""
if os.name == 'nt': # In Windows there is no fork.
return list(map(func, iterable))
try:
p = multiprocessing.Pool()
return_data = list(p.imap(func, iterable))
p.close()
p.join()
return return_data
except OSError:
return list(map(func, iterable)) | 0.00507 |
def t_BIN(self, t):
r'(%[01]+)|([01]+[bB])' # A Binary integer
# Note 00B is a 0 binary, but
# 00Bh is a 12 in hex. So this pattern must come
# after HEXA
if t.value[0] == '%':
t.value = t.value[1:] # Remove initial %
else:
t.value = t.value[:-1] # Remove last 'b'
t.value = int(t.value, 2) # Convert to decimal
t.type = 'INTEGER'
return t | 0.004535 |
def _prepare_DF(self, n_T, scan_onsets=None):
""" Prepare the essential template matrices D and F for
pre-calculating some terms to be re-used.
The inverse covariance matrix of AR(1) noise is
sigma^-2 * (I - rho1*D + rho1**2 * F).
And we denote A = I - rho1*D + rho1**2 * F"""
run_TRs, n_run = self._run_TR_from_scan_onsets(n_T, scan_onsets)
D_ele = map(self._D_gen, run_TRs)
F_ele = map(self._F_gen, run_TRs)
D = scipy.linalg.block_diag(*D_ele)
F = scipy.linalg.block_diag(*F_ele)
# D and F above are templates for constructing
# the inverse of temporal covariance matrix of noise
return D, F, run_TRs, n_run | 0.002743 |
def markup_line(text, offset, marker='>>!<<'):
"""Insert `marker` at `offset` into `text`, and return the marked
line.
.. code-block:: python
>>> markup_line('0\\n1234\\n56', 3)
1>>!<<234
"""
begin = text.rfind('\n', 0, offset)
begin += 1
end = text.find('\n', offset)
if end == -1:
end = len(text)
return text[begin:offset] + marker + text[offset:end] | 0.002398 |
def _pfp__add_child(self, name, child, stream=None, overwrite=False):
"""Add a child to the Struct field. If multiple consecutive fields are
added with the same name, an implicit array will be created to store
all fields of that name.
:param str name: The name of the child
:param pfp.fields.Field child: The field to add
:param bool overwrite: Overwrite existing fields (False)
:param pfp.bitwrap.BitwrappedStream stream: unused, but her for compatability with Union._pfp__add_child
:returns: The resulting field added
"""
if not overwrite and self._pfp__is_non_consecutive_duplicate(name, child):
return self._pfp__handle_non_consecutive_duplicate(name, child)
elif not overwrite and name in self._pfp__children_map:
return self._pfp__handle_implicit_array(name, child)
else:
child._pfp__parent = self
self._pfp__children.append(child)
child._pfp__name = name
self._pfp__children_map[name] = child
return child | 0.00366 |
def query(self, query_samples):
"""
Query docs with query_samples number of Gibbs
sampling iterations.
"""
self.sampled_topics = np.zeros((self.samples, self.N),
dtype=np.int)
for s in range(self.samples):
self.sampled_topics[s, :] = \
samplers_lda.sampler_query(self.docid, self.tokens,
self.topic_seed,
np.ascontiguousarray(
self.tt[:, :, s],
dtype=np.float),
self.N, self.K, self.D,
self.alpha, query_samples)
print("Sample %d queried" % s)
self.dt = np.zeros((self.D, self.K, self.samples))
for s in range(self.samples):
self.dt[:, :, s] = \
samplers_lda.dt_comp(self.docid, self.sampled_topics[s, :],
self.N, self.K, self.D, self.alpha) | 0.003497 |
def partition_query(
self,
session,
sql,
transaction=None,
params=None,
param_types=None,
partition_options=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a set of partition tokens that can be used to execute a query
operation in parallel. Each of the returned partition tokens can be used
by ``ExecuteStreamingSql`` to specify a subset of the query result to
read. The same session and read-only transaction must be used by the
PartitionQueryRequest used to create the partition tokens and the
ExecuteSqlRequests that use the partition tokens.
Partition tokens become invalid when the session used to create them is
deleted, is idle for too long, begins a new transaction, or becomes too
old. When any of these happen, it is not possible to resume the query,
and the whole operation must be restarted from the beginning.
Example:
>>> from google.cloud import spanner_v1
>>>
>>> client = spanner_v1.SpannerClient()
>>>
>>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]')
>>>
>>> # TODO: Initialize `sql`:
>>> sql = ''
>>>
>>> response = client.partition_query(session, sql)
Args:
session (str): Required. The session used to create the partitions.
sql (str): The query request to generate partitions for. The request will fail if
the query is not root partitionable. The query plan of a root
partitionable query has a single distributed union operator. A
distributed union operator conceptually divides one or more tables into
multiple splits, remotely evaluates a subquery independently on each
split, and then unions all results.
This must not contain DML commands, such as INSERT, UPDATE, or DELETE.
Use ``ExecuteStreamingSql`` with a PartitionedDml transaction for large,
partition-friendly DML operations.
transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): Read only snapshot transactions are supported, read/write and single use
transactions are not.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.TransactionSelector`
params (Union[dict, ~google.cloud.spanner_v1.types.Struct]): The SQL query string can contain parameter placeholders. A parameter
placeholder consists of ``'@'`` followed by the parameter name.
Parameter names consist of any combination of letters, numbers, and
underscores.
Parameters can appear anywhere that a literal value is expected. The
same parameter name can be used more than once, for example:
``"WHERE id > @msg_id AND id < @msg_id + 100"``
It is an error to execute an SQL query with unbound parameters.
Parameter values are specified using ``params``, which is a JSON object
whose keys are parameter names, and whose values are the corresponding
parameter values.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.Struct`
param_types (dict[str -> Union[dict, ~google.cloud.spanner_v1.types.Type]]): It is not always possible for Cloud Spanner to infer the right SQL type
from a JSON value. For example, values of type ``BYTES`` and values of
type ``STRING`` both appear in ``params`` as JSON strings.
In these cases, ``param_types`` can be used to specify the exact SQL
type for some or all of the SQL query parameters. See the definition of
``Type`` for more information about SQL types.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.Type`
partition_options (Union[dict, ~google.cloud.spanner_v1.types.PartitionOptions]): Additional options that affect how many partitions are created.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.PartitionOptions`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.spanner_v1.types.PartitionResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "partition_query" not in self._inner_api_calls:
self._inner_api_calls[
"partition_query"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.partition_query,
default_retry=self._method_configs["PartitionQuery"].retry,
default_timeout=self._method_configs["PartitionQuery"].timeout,
client_info=self._client_info,
)
request = spanner_pb2.PartitionQueryRequest(
session=session,
sql=sql,
transaction=transaction,
params=params,
param_types=param_types,
partition_options=partition_options,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("session", session)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["partition_query"](
request, retry=retry, timeout=timeout, metadata=metadata
) | 0.004135 |
def unmajority(p, a, b, c):
"""Unmajority gate."""
p.ccx(a, b, c)
p.cx(c, a)
p.cx(a, b) | 0.009709 |
def watch(path, handler):
"""Watch a directory for events.
- path should be the directory to watch
- handler should a function which takes an event_type and src_path
and does something interesting. event_type will be one of 'created',
'deleted', 'modified', or 'moved'. src_path will be the absolute
path to the file that triggered the event.
"""
# let the user just deal with events
@functools.wraps(handler)
def wrapper(self, event):
if not event.is_directory:
return handler(event.event_type, event.src_path)
attrs = {'on_any_event': wrapper}
EventHandler = type("EventHandler", (FileSystemEventHandler,), attrs)
observer = Observer()
observer.schedule(EventHandler(), path=path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join() | 0.001064 |
def addLocalHandlers (logger):
"""Adds logging handlers to logger to log to the following local
resources:
1. The terminal
2. localhost:514 (i.e. syslogd)
3. localhost:2514 (i.e. the AIT GUI syslog-like handler)
"""
termlog = logging.StreamHandler()
termlog.setFormatter( LogFormatter() )
logger.addHandler( termlog )
logger.addHandler( SysLogHandler() )
logger.addHandler( SysLogHandler(('localhost', 2514)) ) | 0.021186 |
def secure(self):
'''
secure the sockets for root-only access
'''
log.debug('ConCache securing sockets')
if os.path.exists(self.cache_sock):
os.chmod(self.cache_sock, 0o600)
if os.path.exists(self.update_sock):
os.chmod(self.update_sock, 0o600)
if os.path.exists(self.upd_t_sock):
os.chmod(self.upd_t_sock, 0o600) | 0.004938 |
def _scatter_ndarray(ar, axis=-1, destination=None, blocksize=None):
"""Turn a numpy ndarray into a DistArray or RemoteArray
Args:
ar (array_like)
axis (int, optional): specifies along which axis to split the array to
distribute it. The default is to split along the last axis. `None` means
do not distribute.
destination (int or list of int, optional): Optionally force the array to
go to a specific engine. If an array is to be scattered along an axis,
this should be a list of engine ids with the same length as that axis.
blocksize (int): Optionally control the size of intervals into which the
distributed axis is split (the default splits the distributed axis
evenly over all computing engines).
"""
from .arrays import DistArray, RemoteArray
shape = ar.shape
ndim = len(shape)
if axis is None:
return _directed_scatter([ar], destination=[destination],
blocksize=blocksize)[0]
if axis < -ndim or axis > ndim - 1:
raise DistobValueError('axis out of range')
if axis < 0:
axis = ndim + axis
n = shape[axis]
if n == 1:
return _directed_scatter([ar], destination=[destination])[0]
if isinstance(destination, collections.Sequence):
ne = len(destination) # number of engines to scatter array to
else:
if distob.engine is None:
setup_engines()
ne = distob.engine.nengines # by default scatter across all engines
if blocksize is None:
blocksize = ((n - 1) // ne) + 1
if blocksize > n:
blocksize = n
if isinstance(ar, DistArray):
if axis == ar._distaxis:
return ar
else:
raise DistobError('Currently can only scatter one axis of array')
# Currently, if requested to scatter an array that is already Remote and
# large, first get whole array locally, then scatter. Not really optimal.
if isinstance(ar, RemoteArray) and n > blocksize:
ar = ar._ob
s = slice(None)
subarrays = []
low = 0
for i in range(0, n // blocksize):
high = low + blocksize
index = (s,)*axis + (slice(low, high),) + (s,)*(ndim - axis - 1)
subarrays.append(ar[index])
low += blocksize
if n % blocksize != 0:
high = low + (n % blocksize)
index = (s,)*axis + (slice(low, high),) + (s,)*(ndim - axis - 1)
subarrays.append(ar[index])
subarrays = _directed_scatter(subarrays, destination=destination)
return DistArray(subarrays, axis) | 0.001936 |
def multihead_self_attention_memory_compressed(x,
mask_right,
compression_factor,
kv_channels,
heads,
dropout=0.0,
dropout_broadcast_dims=None,
master_dtype=tf.float32,
slice_dtype=tf.float32,
name="multihead_attention"):
"""Memory-compressed self-attention.
The memory is first average-pooled (strided) to make it shorter by
a factor of compression_factor.
Args:
x: a mtf.Tensor with shape
[<batch_dims>, query_length, io_channels]
mask_right: a boolean
compression_factor: an integer
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
dropout: a floating point value
dropout_broadcast_dims: an optional list of mtf.Dimension
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
A mtf.Tensor with shape [batch, query_length, io_channels]
Raises:
ValueError: if the dimensions do not match.
"""
batch_dims = x.shape.dims[:-2]
length, io_channels = x.shape.dims[-2:]
with tf.variable_scope(name,
default_name="compressed_attention",
values=[x]):
wq, wk, wv, wo = multihead_attention_vars(
x.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, x.dtype)
memory_antecedent = compress_mean(x, length, compression_factor)
memory_antecedent = rename_length_to_memory_length(memory_antecedent)
memory_length = memory_antecedent.shape.dims[-2]
q = mtf.einsum(
[x, wq],
mtf.Shape(batch_dims + [heads, length, kv_channels]))
k = mtf.einsum(
[memory_antecedent, wk],
mtf.Shape(batch_dims + [heads, memory_length, kv_channels]))
v = mtf.einsum(
[memory_antecedent, wv],
mtf.Shape(batch_dims + [heads, memory_length, kv_channels]))
if mask_right:
query_pos = mtf.range(x.mesh, length, dtype=tf.int32)
memory_pos = (
mtf.range(x.mesh, memory_length, dtype=tf.int32) * compression_factor
+ (compression_factor - 1))
mask = mtf.cast(mtf.greater(memory_pos, query_pos), x.dtype) * -1e9
else:
mask = None
o = dot_product_attention(
q, k, v, mask, dropout, dropout_broadcast_dims, extra_logit=0.0)
return mtf.einsum(
[o, wo], mtf.Shape(batch_dims + [length, io_channels])) | 0.003239 |
def publishPublicReport(self):
"""Activate public report for this check.
Returns status message"""
response = self.pingdom.request('PUT', 'reports.public/%s' % self.id)
return response.json()['message'] | 0.008475 |
def gitignore(opt):
"""Will check directories upwards from the Secretfile in order
to ensure the gitignore file is set properly"""
directory = os.path.dirname(abspath(opt.secretfile))
gitignore_file = find_file('.gitignore', directory)
if gitignore_file:
secrets_path = subdir_path(abspath(opt.secrets), gitignore_file)
if secrets_path:
if not in_file(secrets_path, gitignore_file):
e_msg = "The path %s was not found in %s" \
% (secrets_path, gitignore_file)
raise aomi.exceptions.AomiFile(e_msg)
else:
LOG.debug("Using a non-relative secret directory")
else:
raise aomi.exceptions.AomiFile("You should really have a .gitignore") | 0.001304 |
def dist_strcmp95(src, tar, long_strings=False):
"""Return the strcmp95 distance between two strings.
This is a wrapper for :py:meth:`Strcmp95.dist`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
long_strings : bool
Set to True to increase the probability of a match when the number of
matched characters is large. This option allows for a little more
tolerance when the strings are large. It is not an appropriate test
when comparing fixed length fields such as phone and social security
numbers.
Returns
-------
float
Strcmp95 distance
Examples
--------
>>> round(dist_strcmp95('cat', 'hat'), 12)
0.222222222222
>>> round(dist_strcmp95('Niall', 'Neil'), 12)
0.1545
>>> round(dist_strcmp95('aluminum', 'Catalan'), 12)
0.345238095238
>>> round(dist_strcmp95('ATCG', 'TAGC'), 12)
0.166666666667
"""
return Strcmp95().dist(src, tar, long_strings) | 0.000948 |
def getBuffer(x):
"""
Copy @x into a (modifiable) ctypes byte array
"""
b = bytes(x)
return (c_ubyte * len(b)).from_buffer_copy(bytes(x)) | 0.006369 |
def get_env_dirs(self):
"""Return list of directories in env_root."""
repo_dirs = next(os.walk(self.env_root))[1]
if '.git' in repo_dirs:
repo_dirs.remove('.git') # not relevant for any repo operations
return repo_dirs | 0.007605 |
def cli(env, identifier):
"""Create credentials for an IBM Cloud Object Storage Account"""
mgr = SoftLayer.ObjectStorageManager(env.client)
credential = mgr.create_credential(identifier)
table = formatting.Table(['id', 'password', 'username', 'type_name'])
table.sortby = 'id'
table.add_row([
credential['id'],
credential['password'],
credential['username'],
credential['type']['name']
])
env.fout(table) | 0.002128 |
def bna_config_cmd_status_output_status_string(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
bna_config_cmd_status = ET.Element("bna_config_cmd_status")
config = bna_config_cmd_status
output = ET.SubElement(bna_config_cmd_status, "output")
status_string = ET.SubElement(output, "status-string")
status_string.text = kwargs.pop('status_string')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003802 |
def set_log_format(self, log_type, log_format):
'''Configures log format
Arguments:
log_type (:obj:`str`): log type (error, debug or stream)
log_format (:obj:`str`): log format (ex:"Log: %(message)s | Log level:%(levelname)s |
Date:%(asctime)s',datefmt='%m/%d/%Y %I:%M:%S")
'''
if not (log_type == 'error' or log_type == 'stream' or log_type == 'debug'):
self.log.debug('Log type must be error, stream, or debug')
else:
self.default_formatter = logging.Formatter(log_format)
if log_type == 'error':
self.error_handler.setFormatter(self.default_formatter)
elif log_type == 'debug':
self.debug_handler.setFormatter(self.default_formatter)
elif log_type == 'stream':
self.stream_handler.setFormatter(self.default_formatter) | 0.004396 |
def get_config(self, request, **kwargs):
"""
Get the arguments given to the template tag element and complete these
with the ones from the settings.py if necessary.
"""
config = kwargs
config_from_settings = deepcopy(inplace_settings.DEFAULT_INPLACE_EDIT_OPTIONS)
config_one_by_one = inplace_settings.DEFAULT_INPLACE_EDIT_OPTIONS_ONE_BY_ONE
if not config_one_by_one:
# Solution 1: Using default config only if none specified.
if not config and config_from_settings:
config = config_from_settings
else:
# Solution 2: Updating the configured config with the default one.
config = dict(config_from_settings, **config)
return config | 0.005168 |
def list_message_files (package, suffix=".mo"):
"""Return list of all found message files and their installation paths."""
for fname in glob.glob("po/*" + suffix):
# basename (without extension) is a locale name
localename = os.path.splitext(os.path.basename(fname))[0]
domainname = "%s.mo" % package.lower()
yield (fname, os.path.join(
"share", "locale", localename, "LC_MESSAGES", domainname)) | 0.004474 |
def tags(self):
"""Returns a dictionary that lists all available tags that can be used
for further filtering
"""
ret = {}
for typ in _meta_fields_twig:
if typ in ['uniqueid', 'plugin', 'feedback', 'fitting', 'history', 'twig', 'uniquetwig']:
continue
k = '{}s'.format(typ)
ret[k] = getattr(self, k)
return ret | 0.007299 |
def parse_derived_variable(self, node):
"""
Parses <DerivedVariable>
@param node: Node containing the <DerivedVariable> element
@type node: xml.etree.Element
@raise ParseError: Raised when no name of specified for the derived variable.
"""
if 'name' in node.lattrib:
name = node.lattrib['name']
elif 'exposure' in node.lattrib:
name = node.lattrib['exposure']
else:
self.raise_error('<DerivedVariable> must specify a name')
params = dict()
for attr_name in ['dimension', 'exposure', 'select', 'value', 'reduce', 'required']:
if attr_name in node.lattrib:
params[attr_name] = node.lattrib[attr_name]
self.current_regime.add_derived_variable(DerivedVariable(name, **params)) | 0.005974 |
def nmap_fp(target, oport=80, cport=81):
"""nmap fingerprinting
nmap_fp(target, [oport=80,] [cport=81,]) -> list of best guesses with accuracy
"""
sigs = nmap_sig(target, oport, cport)
return nmap_search(sigs) | 0.004525 |
def check_conf_percentage_validity(conf_percentage):
"""
Ensures that `conf_percentage` is in (0, 100). Raises a helpful ValueError
if otherwise.
"""
msg = "conf_percentage MUST be a number between 0.0 and 100."
condition_1 = isinstance(conf_percentage, Number)
if not condition_1:
raise ValueError(msg)
else:
condition_2 = 0 < conf_percentage < 100
if not condition_2:
raise ValueError(msg)
return None | 0.002105 |
def del_unused_keyframes(self):
"""Scans through list of keyframes in the channel and removes those
which are not in self.key_frame_list."""
skl = self.key_frame_list.sorted_key_list()
unused_keys = [k for k in self.dct['keys']
if k not in skl]
for k in unused_keys:
del self.dct['keys'][k] | 0.005479 |
def get_templates(self, limit=100, offset=0):
"""
Get all account templates
"""
url = self.TEMPLATES_URL + "?limit=%s&offset=%s" % (limit, offset)
connection = Connection(self.token)
connection.set_url(self.production, url)
return connection.get_request() | 0.006369 |
def _set_shutdown_management_oper(self, v, load=False):
"""
Setter method for shutdown_management_oper, mapped from YANG variable /interface/management/shutdown_management_oper (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_shutdown_management_oper is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_shutdown_management_oper() directly.
YANG Description: Show the status of this management interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="shutdown_management_oper", rest_name="oper-status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Show the status of this management interface.', u'alt-name': u'oper-status'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """shutdown_management_oper must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="shutdown_management_oper", rest_name="oper-status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Show the status of this management interface.', u'alt-name': u'oper-status'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='string', is_config=False)""",
})
self.__shutdown_management_oper = t
if hasattr(self, '_set'):
self._set() | 0.005991 |
def run(cmd, cwd=None, env=None, timeout=None, stream=False, warn_only=False):
"""
:param cmd: command to run
:param cwd: change dir into before execute, default is current dir
:param env: environments to pass to subprocess
:param timeout: timeout
:param stream: stream output, default is False, block until finished
:param warn_only: default False, set to True to allow unsuccessful result
"""
proc = Proc(
expand_args(cmd),
os.path.realpath(cwd or os.getcwd()),
env=env or {},
timeout=timeout or DEFAULT_TIMEOUT
)
if not stream:
proc.block(warn_only=warn_only)
return proc | 0.001508 |
def parse_value(self, value):
"""Cast value to `bool`."""
parsed = super(BoolField, self).parse_value(value)
return bool(parsed) if parsed is not None else None | 0.01087 |
def read_cstring(self, terminator=b'\x00'):
"""Reads a single null termianted string
:return: string without bytes
:rtype: :class:`bytes`
"""
null_index = self.data.find(terminator, self.offset)
if null_index == -1:
raise RuntimeError("Reached end of buffer")
result = self.data[self.offset:null_index] # bytes without the terminator
self.offset = null_index + len(terminator) # advance offset past terminator
return result | 0.007843 |
def config(name,
config,
write=True):
'''
Builds syslog-ng configuration. This function is intended to be used from
the state module, users should not use it directly!
name : the id of the Salt document or it is the format of <statement name>.id
config : the parsed YAML code
write : if True, it writes the config into the configuration file,
otherwise just returns it
CLI Example:
.. code-block:: bash
salt '*' syslog_ng.config name='s_local' config="[{'tcp':[{'ip':'127.0.0.1'},{'port':1233}]}]"
'''
_build_config_tree(name, config)
configs = _render_configuration()
if __opts__.get('test', False):
comment = 'State syslog_ng will write \'{0}\' into {1}'.format(
configs,
__SYSLOG_NG_CONFIG_FILE
)
return _format_state_result(name, result=None, comment=comment)
succ = write
if write:
succ = _write_config(config=configs)
return _format_state_result(name, result=succ,
changes={'new': configs, 'old': ''}) | 0.00273 |
def get_entity_loaded_propnames(entity):
""" Get entity property names that are loaded (e.g. won't produce new queries)
:param entity: Entity
:type entity: sqlalchemy.ext.declarative.api.DeclarativeMeta
:returns: Set of entity property names
:rtype: set
"""
ins = inspect(entity)
keynames = get_entity_propnames(ins)
# If the entity is not transient -- exclude unloaded keys
# Transient entities won't load these anyway, so it's safe to include all columns and get defaults
if not ins.transient:
keynames -= ins.unloaded
# If the entity is expired -- reload expired attributes as well
# Expired attributes are usually unloaded as well!
if ins.expired:
keynames |= ins.expired_attributes
# Finish
return keynames | 0.003699 |
def export(target_folder, source_folders = None, class_type ='all', raise_errors = False):
"""
exports the existing scripts/instruments (future: probes) into folder as .b26 files
Args:
target_folder: target location of created .b26 script files
source_folder: singel path or list of paths that contains the location of python script files can also be just the name of a module
class_type: string, one of the 4 following options
-probes (exports probes) --not implemented yet--
-scripts (exports scripts)
-instruments (exports instruments)
-all (exports instruments, scripts and probes)
target_folder: target folder whereb .b26 files are created
Returns:
"""
if class_type not in ('all', 'scripts', 'instruments', 'probes'):
print('unknown type to export')
return
if not os.path.isdir(target_folder):
try:
os.mkdir(target_folder)
except:
print((target_folder, ' is invalid target folder'))
target_folder = None
if target_folder is not None:
if source_folders is None:
module_list = [os.path.dirname(os.path.dirname(inspect.getfile(inspect.currentframe())))]
elif isinstance(source_folders, str):
module_list = [source_folders]
elif isinstance(source_folders, list):
module_list = source_folders
else:
raise TypeError('unknown type for source_folders')
for path_to_module in module_list:
if class_type in ('all', 'scripts'):
export_default_scripts(target_folder, source_folder=path_to_module, raise_errors=raise_errors)
if class_type in ('all', 'instruments'):
export_default_instruments(target_folder, path_to_module, raise_errors=raise_errors)
if class_type in ('all', 'probes'):
print('WARNING: probes currently not supported') | 0.006543 |
def fetch_logins(roles, repo):
"""Fetch logins for users with given roles.
"""
users = set()
if 'stargazer' in roles:
printmp('Fetching stargazers')
users |= set(repo.stargazers())
if 'collaborator' in roles:
printmp('Fetching collaborators')
users |= set(repo.collaborators())
if 'issue' in roles:
printmp('Fetching issues creators')
users |= set([i.user for i in repo.issues(state='all')])
return users | 0.002083 |
def register_blueprint(self, blueprint, register_with_babel=True, **options):
"""
Like :meth:`~flask.Flask.register_blueprint`, but if ``register_with_babel``
is True, then we also allow the Babel Bundle an opportunity to register language
code prefixed URLs.
"""
if self.unchained.babel_bundle and register_with_babel:
self.unchained.babel_bundle.register_blueprint(self, blueprint, **options)
return super().register_blueprint(blueprint, **options) | 0.009653 |
def add_model(self, *args, **kwargs):
# type: (*Any, **Any) -> Part
"""Add a new child model to this model.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:return: a :class:`Part` of category `MODEL`
"""
if self.category != Category.MODEL:
raise APIError("Part should be of category MODEL")
return self._client.create_model(self, *args, **kwargs) | 0.008584 |
def to_str(self):
'''Convert to string.'''
pairs = []
for name, value in self.get_all():
if value and self._wrap_width:
pairs.append('{0}:{1}'.format(
name,
'\r\n'.join(textwrap.wrap(
value, width=self._wrap_width,
drop_whitespace=False, initial_indent=' ',
subsequent_indent=' '
))
))
elif value:
pairs.append('{0}: {1}'.format(name, value))
else:
pairs.append('{0}:'.format(name))
pairs.append('')
return '\r\n'.join(pairs) | 0.002865 |
def get_ansible_by_id(self, ansible_id):
"""Return a ansible with that id or None."""
for elem in self.ansible_hosts:
if elem.id == ansible_id:
return elem
return None | 0.009091 |
def to_dict(self):
"""Pack the stats computed into a dictionary."""
return {
'high': self.high,
'low': self.low,
'mean': self.mean,
'count': self.count,
'deviation': self.deviation,
} | 0.007491 |
def append_to_keys(adict, preffix):
"""
Parameters
----------
adict:
preffix:
Returns
-------
"""
return {preffix + str(key): (value if isinstance(value, dict) else value)
for key, value in list(adict.items())} | 0.003846 |
def withValues(cls, *values):
"""Creates a subclass with discreet values constraint.
"""
class X(cls):
subtypeSpec = cls.subtypeSpec + constraint.SingleValueConstraint(
*values)
X.__name__ = cls.__name__
return X | 0.007092 |
def put(self, url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param **kwargs: Optional arguments that ``request`` takes.
"""
return self.request('put', url, data=data, **kwargs) | 0.007371 |
def unban_chat_member(self, user_id):
"""
Use this method to unban a previously kicked user in a supergroup.
The bot must be an administrator in the group for this to work.
:param int user_id: Unique identifier of the target user
"""
return self.bot.api_call("unbanChatMember", chat_id=self.id, user_id=user_id) | 0.008333 |
def _parse_flags(element):
"""Parse OSM XML element for generic data.
Args:
element (etree.Element): Element to parse
Returns:
tuple: Generic OSM data for object instantiation
"""
visible = True if element.get('visible') else False
user = element.get('user')
timestamp = element.get('timestamp')
if timestamp:
timestamp = utils.Timestamp.parse_isoformat(timestamp)
tags = {}
try:
for tag in element['tag']:
key = tag.get('k')
value = tag.get('v')
tags[key] = value
except AttributeError:
pass
return visible, user, timestamp, tags | 0.001522 |
def set_blend_func(self, srgb='one', drgb='zero',
salpha=None, dalpha=None):
"""Specify pixel arithmetic for RGB and alpha
Parameters
----------
srgb : str
Source RGB factor.
drgb : str
Destination RGB factor.
salpha : str | None
Source alpha factor. If None, ``srgb`` is used.
dalpha : str
Destination alpha factor. If None, ``drgb`` is used.
"""
salpha = srgb if salpha is None else salpha
dalpha = drgb if dalpha is None else dalpha
self.glir.command('FUNC', 'glBlendFuncSeparate',
srgb, drgb, salpha, dalpha) | 0.007112 |
def public_key_to_connection_id(self, public_key):
"""
Get stored connection id for a public key.
"""
with self._connections_lock:
for connection_id, connection_info in self._connections.items():
if connection_info.public_key == public_key:
return connection_id
return None | 0.005464 |
def getAtomLinesForResidueInRosettaStructure(self, resid):
'''We assume a Rosetta-generated structure where residues are uniquely identified by number.'''
lines = [line for line in self.lines if line[0:4] == "ATOM" and resid == int(line[22:27])]
if not lines:
#print('Failed searching for residue %d.' % resid)
#print("".join([line for line in self.lines if line[0:4] == "ATOM"]))
raise Exception("Could not find the ATOM/HETATM line corresponding to residue '%(resid)s'." % vars())
return lines | 0.01421 |
def _evolve(self, state, qargs=None):
"""Evolve a quantum state by the QuantumChannel.
Args:
state (QuantumState): The input statevector or density matrix.
qargs (list): a list of QuantumState subsystem positions to apply
the operator on.
Returns:
QuantumState: the output quantum state.
Raises:
QiskitError: if the operator dimension does not match the
specified QuantumState subsystem dimensions.
"""
# If subsystem evolution we use the SuperOp representation
if qargs is not None:
return SuperOp(self)._evolve(state, qargs)
# Otherwise we compute full evolution directly
state = self._format_state(state)
if state.shape[0] != self._input_dim:
raise QiskitError(
"QuantumChannel input dimension is not equal to state dimension."
)
if state.ndim == 1 and self._data[1] is None and \
self._data[0].shape[0] // self._output_dim == 1:
# If the shape of the stinespring operator is equal to the output_dim
# evolution of a state vector psi -> stine.psi
return np.dot(self._data[0], state)
# Otherwise we always return a density matrix
state = self._format_state(state, density_matrix=True)
stine_l, stine_r = self._data
if stine_r is None:
stine_r = stine_l
din, dout = self.dim
dtr = stine_l.shape[0] // dout
shape = (dout, dtr, din)
return np.einsum('iAB,BC,jAC->ij', np.reshape(stine_l, shape), state,
np.reshape(np.conjugate(stine_r), shape)) | 0.002323 |
def get_events(self, *args, **kwargs):
"""
Returns a full EventDataWrapper object for this creator.
/creators/{creatorId}/events
:returns: EventDataWrapper -- A new request to API. Contains full results set.
"""
from .event import Event, EventDataWrapper
return self.get_related_resource(Event, EventDataWrapper, args, kwargs) | 0.007792 |
def competence(stochastic):
"""
The competence function for Binary One-At-A-Time Metropolis
"""
if stochastic.dtype in bool_dtypes:
return 2
elif isinstance(stochastic, distributions.Bernoulli):
return 2
elif (isinstance(stochastic, distributions.Categorical) and
(len(stochastic.parents['p'])==2)):
return 2
else:
return 0 | 0.015385 |
def close(self):
""" Shut down, closing any open connections in the pool.
"""
if not self._closed:
self._closed = True
if self._pool is not None:
self._pool.close()
self._pool = None | 0.007634 |
def batch_stats(self, funcs:Collection[Callable]=None, ds_type:DatasetType=DatasetType.Train)->Tensor:
"Grab a batch of data and call reduction function `func` per channel"
funcs = ifnone(funcs, [torch.mean,torch.std])
x = self.one_batch(ds_type=ds_type, denorm=False)[0].cpu()
return [func(channel_view(x), 1) for func in funcs] | 0.030471 |
def _double_prefix(self):
"""Grow the given deque by doubling, but don't split the second chunk just
because the first one is small.
"""
new_len = max(len(self._buf[0]) * 2, (len(self._buf[0]) + len(self._buf[1])))
self._merge_prefix(new_len) | 0.014184 |
def look(self):
"""Look at the next token."""
old_token = next(self)
result = self.current
self.push(result)
self.current = old_token
return result | 0.010256 |
def complete(self, flag_message="Complete", padding=None, force=False):
""" Log Level: :attr:COMPLETE
@flag_message: #str flags the message with the given text
using :func:flag
@padding: #str 'top', 'bottom' or 'all', adds a new line to the
specified area with :func:padd
@color: #str colorizes @flag_message using :func:colorize
@force: #bool whether or not to force the message to log in spite
of the assigned log level
..
from vital.debug import Logg
logg = Logg(loglevel="v")
logg("World").complete("Hello")
# (Hello) World
logg("Hello world").complete()
# (Complete) Hello world
..
"""
if self.should_log(self.COMPLETE) or force:
self._print_message(
flag_message=flag_message, color=colors.complete_color,
padding=padding) | 0.001969 |
def widgets(self):
"""Gets all (first) child wigets"""
w = []
for i in range(self.count()):
w.append(self.widget(i))
return w | 0.011834 |
def setup_default_logger(logfile=None, level=logging.DEBUG, formatter=None, maxBytes=0, backupCount=0, disableStderrLogger=False):
"""
Deprecated. Use `logzero.loglevel(..)`, `logzero.logfile(..)`, etc.
Globally reconfigures the default `logzero.logger` instance.
Usage:
.. code-block:: python
from logzero import logger, setup_default_logger
setup_default_logger(level=logging.WARN)
logger.info("hello") # this will not be displayed anymore because minimum loglevel was set to WARN
:arg string logfile: If set, also write logs to the specified filename.
:arg int level: Minimum `logging-level <https://docs.python.org/2/library/logging.html#logging-levels>`_ to display (default: `logging.DEBUG`).
:arg Formatter formatter: `Python logging Formatter object <https://docs.python.org/2/library/logging.html#formatter-objects>`_ (by default uses the internal LogFormatter).
:arg int maxBytes: Size of the logfile when rollover should occur. Defaults to 0, rollover never occurs.
:arg int backupCount: Number of backups to keep. Defaults to 0, rollover never occurs.
:arg bool disableStderrLogger: Should the default stderr logger be disabled. Defaults to False.
"""
global logger
logger = setup_logger(name=LOGZERO_DEFAULT_LOGGER, logfile=logfile, level=level, formatter=formatter, disableStderrLogger=disableStderrLogger)
return logger | 0.00632 |
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if sock is False: # Platform-specific: AppEngine
return False
if sock is None: # Connection already closed (such as by httplib).
return True
if not HAS_SELECT:
return False
try:
return bool(wait_for_read(sock, timeout=0.0))
except SelectorError:
return True | 0.001401 |
def _load_from_ini_py2(ini):
"""
py2从单个配置文件中,获取设置
:param :
:param ini:
:return:
"""
logger.debug('使用PY2不支持自定义default_section,其默认值是:%s' % _DEFAULT_SECTION)
cf = configparser.ConfigParser()
cf.read(ini)
settings = OrderedDict()
for k, v in cf.defaults().items():
settings[k.upper()] = convert_value(v)
cf._defaults = {}
for section in cf.sections():
section_dict = OrderedDict()
for option in cf.items(section):
section_dict[option[0]] = option[1]
settings[section] = section_dict
return settings | 0.001684 |
def adc_to_percentage(value, max_volts, clamp=True):
"""
Convert the ADC raw value to a percentage.
"""
percentage = (100.0 / const.ADC_MAX_VAL) * value
return max(min(100, percentage), 0) if clamp else percentage | 0.004292 |
def SetProjection(self, sref):
"""Sets the spatial reference.
Intercepts the gdal.Dataset call to ensure use as a property setter.
Arguments:
sref -- SpatialReference or any format supported by the constructor
"""
if not hasattr(sref, 'ExportToWkt'):
sref = SpatialReference(sref)
self._sref = sref
self.ds.SetProjection(sref.ExportToWkt()) | 0.004785 |
def listDatasets(self, dataset="", parent_dataset="", is_dataset_valid=1,
release_version="", pset_hash="", app_name="", output_module_label="", global_tag="",
processing_version=0, acquisition_era_name="", run_num=-1,
physics_group_name="", logical_file_name="", primary_ds_name="", primary_ds_type="",
processed_ds_name='', data_tier_name="", dataset_access_type="VALID", prep_id='', create_by="", last_modified_by="",
min_cdate='0', max_cdate='0', min_ldate='0', max_ldate='0', cdate='0',
ldate='0', detail=False, dataset_id=-1):
"""
API to list dataset(s) in DBS
* You can use ANY combination of these parameters in this API
* In absence of parameters, all valid datasets known to the DBS instance will be returned
:param dataset: Full dataset (path) of the dataset.
:type dataset: str
:param parent_dataset: Full dataset (path) of the dataset
:type parent_dataset: str
:param release_version: cmssw version
:type release_version: str
:param pset_hash: pset hash
:type pset_hash: str
:param app_name: Application name (generally it is cmsRun)
:type app_name: str
:param output_module_label: output_module_label
:type output_module_label: str
:param global_tag: global_tag
:type global_tag: str
:param processing_version: Processing Version
:type processing_version: str
:param acquisition_era_name: Acquisition Era
:type acquisition_era_name: str
:param run_num: Specify a specific run number or range. Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...]. run_num=1 is not allowed.
:type run_num: int,list,str
:param physics_group_name: List only dataset having physics_group_name attribute
:type physics_group_name: str
:param logical_file_name: List dataset containing the logical_file_name
:type logical_file_name: str
:param primary_ds_name: Primary Dataset Name
:type primary_ds_name: str
:param primary_ds_type: Primary Dataset Type (Type of data, MC/DATA)
:type primary_ds_type: str
:param processed_ds_name: List datasets having this processed dataset name
:type processed_ds_name: str
:param data_tier_name: Data Tier
:type data_tier_name: str
:param dataset_access_type: Dataset Access Type ( PRODUCTION, DEPRECATED etc.)
:type dataset_access_type: str
:param prep_id: prep_id
:type prep_id: str
:param create_by: Creator of the dataset
:type create_by: str
:param last_modified_by: Last modifier of the dataset
:type last_modified_by: str
:param min_cdate: Lower limit for the creation date (unixtime) (Optional)
:type min_cdate: int, str
:param max_cdate: Upper limit for the creation date (unixtime) (Optional)
:type max_cdate: int, str
:param min_ldate: Lower limit for the last modification date (unixtime) (Optional)
:type min_ldate: int, str
:param max_ldate: Upper limit for the last modification date (unixtime) (Optional)
:type max_ldate: int, str
:param cdate: creation date (unixtime) (Optional)
:type cdate: int, str
:param ldate: last modification date (unixtime) (Optional)
:type ldate: int, str
:param detail: List all details of a dataset
:type detail: bool
:param dataset_id: dataset table primary key used by CMS Computing Analytics.
:type dataset_id: int, long, str
:returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contain the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type)
:rtype: list of dicts
"""
dataset = dataset.replace("*", "%")
parent_dataset = parent_dataset.replace("*", "%")
release_version = release_version.replace("*", "%")
pset_hash = pset_hash.replace("*", "%")
app_name = app_name.replace("*", "%")
output_module_label = output_module_label.replace("*", "%")
global_tag = global_tag.replace("*", "%")
logical_file_name = logical_file_name.replace("*", "%")
physics_group_name = physics_group_name.replace("*", "%")
primary_ds_name = primary_ds_name.replace("*", "%")
primary_ds_type = primary_ds_type.replace("*", "%")
data_tier_name = data_tier_name.replace("*", "%")
dataset_access_type = dataset_access_type.replace("*", "%")
processed_ds_name = processed_ds_name.replace("*", "%")
acquisition_era_name = acquisition_era_name.replace("*", "%")
#processing_version = processing_version.replace("*", "%")
#create_by and last_modified_by have be full spelled, no wildcard will allowed.
#We got them from request head so they can be either HN account name or DN.
#This is depended on how an user's account is set up.
#
# In the next release we will require dataset has no wildcard in it.
# DBS will reject wildcard search with dataset name with listDatasets call.
# One should seperate the dataset into primary , process and datatier if any wildcard.
# YG Oct 26, 2016
# Some of users were overwhiled by the API change. So we split the wildcarded dataset in the server instead of by the client.
# YG Dec. 9 2016
#
# run_num=1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours
# We will disbale all the run_num=1 calls in DBS. Run_num=1 will be OK when logical_file_name is given.
# YG Jan. 15 2019
#
if (run_num != -1 and logical_file_name ==''):
for r in parseRunRange(run_num):
if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long):
if r == 1 or r == '1':
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.",
self.logger.exception)
elif isinstance(r, run_tuple):
if r[0] == r[1]:
dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.",
self.logger.exception)
elif r[0] <= 1 <= r[1]:
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.",
self.logger.exception)
if( dataset and ( dataset == "/%/%/%" or dataset== "/%" or dataset == "/%/%" ) ):
dataset=''
elif( dataset and ( dataset.find('%') != -1 ) ) :
junk, primary_ds_name, processed_ds_name, data_tier_name = dataset.split('/')
dataset = ''
if ( primary_ds_name == '%' ):
primary_ds_name = ''
if( processed_ds_name == '%' ):
processed_ds_name = ''
if ( data_tier_name == '%' ):
data_tier_name = ''
try:
dataset_id = int(dataset_id)
except:
dbsExceptionHandler("dbsException-invalid-input2", "Invalid Input for dataset_id that has to be an int.",
self.logger.exception, 'dataset_id has to be an int.')
if create_by.find('*')!=-1 or create_by.find('%')!=-1 or last_modified_by.find('*')!=-1\
or last_modified_by.find('%')!=-1:
dbsExceptionHandler("dbsException-invalid-input2", "Invalid Input for create_by or last_modified_by.\
No wildcard allowed.", self.logger.exception, 'No wildcards allowed for create_by or last_modified_by')
try:
if isinstance(min_cdate, basestring) and ('*' in min_cdate or '%' in min_cdate):
min_cdate = 0
else:
try:
min_cdate = int(min_cdate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for min_cdate")
if isinstance(max_cdate, basestring) and ('*' in max_cdate or '%' in max_cdate):
max_cdate = 0
else:
try:
max_cdate = int(max_cdate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for max_cdate")
if isinstance(min_ldate, basestring) and ('*' in min_ldate or '%' in min_ldate):
min_ldate = 0
else:
try:
min_ldate = int(min_ldate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for min_ldate")
if isinstance(max_ldate, basestring) and ('*' in max_ldate or '%' in max_ldate):
max_ldate = 0
else:
try:
max_ldate = int(max_ldate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for max_ldate")
if isinstance(cdate, basestring) and ('*' in cdate or '%' in cdate):
cdate = 0
else:
try:
cdate = int(cdate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for cdate")
if isinstance(ldate, basestring) and ('*' in ldate or '%' in ldate):
ldate = 0
else:
try:
ldate = int(ldate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for ldate")
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listDatasets. %s \n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
detail = detail in (True, 1, "True", "1", 'true')
try:
return self.dbsDataset.listDatasets(dataset, parent_dataset, is_dataset_valid, release_version, pset_hash,
app_name, output_module_label, global_tag, processing_version, acquisition_era_name,
run_num, physics_group_name, logical_file_name, primary_ds_name, primary_ds_type, processed_ds_name,
data_tier_name, dataset_access_type, prep_id, create_by, last_modified_by,
min_cdate, max_cdate, min_ldate, max_ldate, cdate, ldate, detail, dataset_id)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listdatasets. %s.\n Exception trace: \n %s" % (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) | 0.009979 |
def update_permissions_for_group(apps, schema_editor):
'''
Update permissions for some users.
Give bulk-delete permissions to moderators.
Give edit permission to moderators and editors in order
to display 'Main' page in the explorer.
'''
db_alias = schema_editor.connection.alias
try:
# Django 1.9
emit_post_migrate_signal(2, False, db_alias)
except TypeError:
# Django < 1.9
try:
# Django 1.8
emit_post_migrate_signal(2, False, 'default', db_alias)
except TypeError: # Django < 1.8
emit_post_migrate_signal([], 2, False, 'default', db_alias)
Group = apps.get_model('auth.Group')
Permission = apps.get_model('auth.Permission')
GroupPagePermission = apps.get_model('wagtailcore.GroupPagePermission')
SectionIndexPage = apps.get_model('core.SectionIndexPage')
MainPage = apps.get_model('core.Main')
moderator_group = Group.objects.filter(name='Moderators').first()
editor_group = Group.objects.filter(name='Editors').first()
if moderator_group:
sections = SectionIndexPage.objects.first()
GroupPagePermission.objects.get_or_create(
group_id=moderator_group.id,
page_id=sections.id,
permission_type='bulk_delete'
)
main = MainPage.objects.first()
GroupPagePermission.objects.get_or_create(
group_id=moderator_group.id,
page_id=main.id,
permission_type='edit'
)
if editor_group:
main = MainPage.objects.first()
GroupPagePermission.objects.get_or_create(
group_id=editor_group.id,
page_id=main.id,
permission_type='edit'
) | 0.000571 |
def geweke(x, first=.1, last=.5, intervals=20, maxlag=20):
"""Return z-scores for convergence diagnostics.
Compare the mean of the first % of series with the mean of the last % of
series. x is divided into a number of segments for which this difference is
computed. If the series is converged, this score should oscillate between
-1 and 1.
Parameters
----------
x : array-like
The trace of some stochastic parameter.
first : float
The fraction of series at the beginning of the trace.
last : float
The fraction of series at the end to be compared with the section
at the beginning.
intervals : int
The number of segments.
maxlag : int
Maximum autocorrelation lag for estimation of spectral variance
Returns
-------
scores : list [[]]
Return a list of [i, score], where i is the starting index for each
interval and score the Geweke score on the interval.
Notes
-----
The Geweke score on some series x is computed by:
.. math:: \frac{E[x_s] - E[x_e]}{\sqrt{V[x_s] + V[x_e]}}
where :math:`E` stands for the mean, :math:`V` the variance,
:math:`x_s` a section at the start of the series and
:math:`x_e` a section at the end of the series.
References
----------
Geweke (1992)
"""
if not has_sm:
print("statsmodels not available. Geweke diagnostic cannot be calculated.")
return
if np.ndim(x) > 1:
return [geweke(y, first, last, intervals) for y in np.transpose(x)]
# Filter out invalid intervals
if first + last >= 1:
raise ValueError(
"Invalid intervals for Geweke convergence analysis",
(first, last))
# Initialize list of z-scores
zscores = [None] * intervals
# Starting points for calculations
starts = np.linspace(0, int(len(x)*(1.-last)), intervals).astype(int)
# Loop over start indices
for i,s in enumerate(starts):
# Size of remaining array
x_trunc = x[s:]
n = len(x_trunc)
# Calculate slices
first_slice = x_trunc[:int(first * n)]
last_slice = x_trunc[int(last * n):]
z = (first_slice.mean() - last_slice.mean())
z /= np.sqrt(spec(first_slice)/len(first_slice) +
spec(last_slice)/len(last_slice))
zscores[i] = len(x) - n, z
return zscores | 0.002072 |
def pair_SAM_alignments_with_buffer(
alignments,
max_buffer_size=30000000,
primary_only=False):
'''Iterate over SAM aligments with buffer, position-sorted paired-end
Args:
alignments (iterator of SAM/BAM alignments): the alignments to wrap
max_buffer_size (int): maxmal numer of alignments to keep in memory.
primary_only (bool): for each read, consider only the primary line
(SAM flag 0x900 = 0). The SAM specification requires one and only
one of those for each read.
Yields:
2-tuples with each pair of alignments.
'''
almnt_buffer = {}
ambiguous_pairing_counter = 0
for almnt in alignments:
if not almnt.paired_end:
raise ValueError(
"Sequence of paired-end alignments expected, but got single-end alignment.")
if almnt.pe_which == "unknown":
raise ValueError(
"Cannot process paired-end alignment found with 'unknown' 'pe_which' status.")
# FIXME: almnt.not_primary_alignment currently means secondary
if primary_only and (almnt.not_primary_alignment or almnt.supplementary):
continue
matekey = (
almnt.read.name,
"second" if almnt.pe_which == "first" else "first",
almnt.mate_start.chrom if almnt.mate_aligned else None,
almnt.mate_start.pos if almnt.mate_aligned else None,
almnt.iv.chrom if almnt.aligned else None,
almnt.iv.start if almnt.aligned else None,
-almnt.inferred_insert_size if almnt.aligned and almnt.mate_aligned else None)
if matekey in almnt_buffer:
if len(almnt_buffer[matekey]) == 1:
mate = almnt_buffer[matekey][0]
del almnt_buffer[matekey]
else:
mate = almnt_buffer[matekey].pop(0)
if ambiguous_pairing_counter == 0:
ambiguous_pairing_first_occurance = matekey
ambiguous_pairing_counter += 1
if almnt.pe_which == "first":
yield (almnt, mate)
else:
yield (mate, almnt)
else:
almntkey = (
almnt.read.name, almnt.pe_which,
almnt.iv.chrom if almnt.aligned else None,
almnt.iv.start if almnt.aligned else None,
almnt.mate_start.chrom if almnt.mate_aligned else None,
almnt.mate_start.pos if almnt.mate_aligned else None,
almnt.inferred_insert_size if almnt.aligned and almnt.mate_aligned else None)
if almntkey not in almnt_buffer:
almnt_buffer[almntkey] = [almnt]
else:
almnt_buffer[almntkey].append(almnt)
if len(almnt_buffer) > max_buffer_size:
raise ValueError(
"Maximum alignment buffer size exceeded while pairing SAM alignments.")
if len(almnt_buffer) > 0:
warnings.warn(
"Mate records missing for %d records; first such record: %s." %
(len(almnt_buffer), str(list(almnt_buffer.values())[0][0])))
for almnt_list in list(almnt_buffer.values()):
for almnt in almnt_list:
if almnt.pe_which == "first":
yield (almnt, None)
else:
yield (None, almnt)
if ambiguous_pairing_counter > 0:
warnings.warn(
"Mate pairing was ambiguous for %d records; mate key for first such record: %s." %
(ambiguous_pairing_counter, str(ambiguous_pairing_first_occurance))) | 0.002464 |
def traverse(self, edge):
"""
Traverse the graph, and selecting the destination
nodes for a particular relation that the selected
nodes are a source of, i.e. select the friends of
my friends. You can traverse indefinitely.
:param edge: The edge query. If the edge's
destination node is specified then the source
nodes will be selected.
"""
query = self.statement
rel, dst = edge.rel, edge.dst
statement, params = (
SQL.compound_fwd_query(query, rel) if dst is None else
SQL.compound_inv_query(query, rel, dst)
)
return self.derived(statement, params, replace=True) | 0.002821 |
def get_func_cfg_with_tainted_args(self, definition):
"""Build a function cfg and return it, with all arguments tainted."""
log.debug("Getting CFG for %s", definition.name)
func_cfg = make_cfg(
definition.node,
self.project_modules,
self.local_modules,
definition.path,
definition.module_definitions
)
args = Arguments(definition.node.args)
if args:
function_entry_node = func_cfg.nodes[0]
function_entry_node.outgoing = list()
first_node_after_args = func_cfg.nodes[1]
first_node_after_args.ingoing = list()
# We are just going to give all the tainted args the lineno of the def
definition_lineno = definition.node.lineno
# Taint all the arguments
for i, arg in enumerate(args):
node_type = TaintedNode
if i == 0 and arg == 'self':
node_type = AssignmentNode
arg_node = node_type(
label=arg,
left_hand_side=arg,
ast_node=None,
right_hand_side_variables=[],
line_number=definition_lineno,
path=definition.path
)
function_entry_node.connect(arg_node)
# 1 and not 0 so that Entry Node remains first in the list
func_cfg.nodes.insert(1, arg_node)
arg_node.connect(first_node_after_args)
return func_cfg | 0.001894 |
def extract_suffix(self, name):
"""
Returns a tuple of (name, suffix), or (name, None) if no suffix could be found.
As the method name indicates, the name is returned without the suffix.
Suffixes deemed to be degrees are discarded.
"""
# don't extract suffixes if we can't reasonably suspect we have enough parts to the name for there to be one
if len(name.strip().split()) > 2:
name, suffix = self.extract_matching_portion(r'\b(?P<suffix>{})(?=\b|\s|\Z|\W)'.format(SUFFIX_RE), name)
suffix, degree = self.extract_matching_portion(DEGREE_RE, suffix or '')
return name, suffix or None
return name, None | 0.008535 |
def insert(self, **data):
"""
Insert the passed +data+ into the table. Raises Invalid if a where
clause is present (i.e. no INSERT INTO table WHERE)
"""
if self.where_clause:
raise Invalid("Cannot insert with 'where' clause.")
# Ensure that order is preserved
data = data.items()
cmd = "insert into {table} ({attrs}) values ({values})".format(
table=self.table_name,
attrs=", ".join(entry[0] for entry in data),
values=", ".join(["?"] * len(data)),
)
handle = Repo.db.execute(cmd, [entry[1] for entry in data])
# Return the id of the added row
return handle.lastrowid | 0.002813 |
def partitions_for(self, topic):
"""Returns set of all known partitions for the topic."""
max_wait = self.config['max_block_ms'] / 1000.0
return self._wait_on_metadata(topic, max_wait) | 0.009615 |
def network_simplex(self, display, pivot, root):
'''
API:
network_simplex(self, display, pivot, root)
Description:
Solves minimum cost feasible flow problem using network simplex
algorithm. It is recommended to use min_cost_flow(algo='simplex')
instead of using network_simplex() directly. Returns True when an
optimal solution is found, returns False otherwise. 'flow' attribute
values of arcs should be considered as junk when returned False.
Pre:
(1) check Pre section of min_cost_flow()
Input:
pivot: specifies pivot rule. Check min_cost_flow()
display: 'off' for no display, 'pygame' for live update of
spanning tree.
root: Root node for the underlying spanning trees that will be
generated by network simplex algorthm.
Post:
(1) Changes 'flow' attribute of edges.
Return:
Returns True when an optimal solution is found, returns
False otherwise.
'''
# ==== determine an initial tree structure (T,L,U)
# find a feasible flow
if not self.find_feasible_flow():
return False
t = self.simplex_find_tree()
self.set_display_mode(display)
# mark spanning tree arcs
self.simplex_mark_st_arcs(t)
# display initial spanning tree
t.simplex_redraw(display, root)
t.set_display_mode(display)
#t.display()
self.display()
# set predecessor, depth and thread indexes
t.simplex_search(root, 1)
# compute potentials
self.simplex_compute_potentials(t, root)
# while some nontree arc violates optimality conditions
while not self.simplex_optimal(t):
self.display()
# select an entering arc (k,l)
(k,l) = self.simplex_select_entering_arc(t, pivot)
self.simplex_mark_entering_arc(k, l)
self.display()
# determine leaving arc
((p,q), capacity, cycle)=self.simplex_determine_leaving_arc(t,k,l)
# mark leaving arc
self.simplex_mark_leaving_arc(p, q)
self.display()
self.simplex_remove_arc(t, p, q, capacity, cycle)
# display after arc removed
self.display()
self.simplex_mark_st_arcs(t)
self.display()
# set predecessor, depth and thread indexes
t.simplex_redraw(display, root)
#t.display()
t.simplex_search(root, 1)
# compute potentials
self.simplex_compute_potentials(t, root)
return True | 0.003664 |
def encode_str(s, mutable=False):
"""Encodes a SemaphoreStr"""
rv = ffi.new("SemaphoreStr *")
if isinstance(s, text_type):
s = s.encode("utf-8")
if mutable:
s = bytearray(s)
rv.data = ffi.from_buffer(s)
rv.len = len(s)
# we have to hold a weak reference here to ensure our string does not
# get collected before the string is used.
attached_refs[rv] = s
return rv | 0.002387 |
def warm(self, jittering_ratio=0.2):
"""Progressively load the previous snapshot during the day.
Loading all the snapshots at once can takes a substantial amount of time. This method, if called
periodically during the day will progressively load those snapshots one by one. Because many workers are
going to use this method at the same time, we add a jittering to the period between load to avoid
hammering the disk at the same time.
"""
if self.snapshot_to_load == None:
last_period = self.current_period - dt.timedelta(days=self.expiration-1)
self.compute_refresh_period()
self.snapshot_to_load = []
base_filename = "%s/%s_%s_*.dat" % (self.snapshot_path, self.name, self.expiration)
availables_snapshots = glob.glob(base_filename)
for filename in availables_snapshots:
snapshot_period = dt.datetime.strptime(filename.split('_')[-1].strip('.dat'), "%Y-%m-%d")
if snapshot_period >= last_period:
self.snapshot_to_load.append(filename)
self.ready = False
if self.snapshot_to_load and self._should_warm():
filename = self.snapshot_to_load.pop()
self._union_bf_from_file(filename)
jittering = self.warm_period * (np.random.random()-0.5) * jittering_ratio
self.next_snapshot_load = time.time() + self.warm_period + jittering
if not self.snapshot_to_load:
self.ready = True | 0.007074 |
def ajax_kindcat_arr(self, kind_sig):
'''
Get the sub category.
根据kind值(kind_sig)获取相应分类,返回Json格式
'''
out_arr = {}
for catinfo in MCategory.query_kind_cat(kind_sig):
out_arr[catinfo.uid] = catinfo.name
json.dump(out_arr, self) | 0.006803 |
def __init_from_np2d(self, mat, params_str, ref_dataset):
"""Initialize data from a 2-D numpy matrix."""
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
self.handle = ctypes.c_void_p()
if mat.dtype == np.float32 or mat.dtype == np.float64:
data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else:
# change non-float data to float data, need to copy
data = np.array(mat.reshape(mat.size), dtype=np.float32)
ptr_data, type_ptr_data, _ = c_float_array(data)
_safe_call(_LIB.LGBM_DatasetCreateFromMat(
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int(mat.shape[0]),
ctypes.c_int(mat.shape[1]),
ctypes.c_int(C_API_IS_ROW_MAJOR),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self | 0.002077 |
def clear_display_label(self):
"""Clears the display label.
raise: NoAccess - ``display_label`` cannot be modified
*compliance: mandatory -- This method must be implemented.*
"""
if (self.get_display_label_metadata().is_read_only() or
self.get_display_label_metadata().is_required()):
raise errors.NoAccess()
self._my_map['displayLabel'] = self._display_label_metadata['default_string_values'][0] | 0.006316 |
def _add_most_severe_consequence(self, variant_obj):
"""Add the most severe consequence
Args:
variant_obj (puzzle.models.Variant)
"""
most_severe_consequence = None
most_severe_score = None
for consequence in variant_obj.consequences:
logger.debug("Checking severity score for consequence: {0}".format(
consequence))
severity_score = SEVERITY_DICT.get(consequence)
if severity_score != None:
if most_severe_score:
if severity_score < most_severe_score:
most_severe_consequence = consequence
most_severe_score = severity_score
else:
most_severe_consequence = consequence
most_severe_score = severity_score
variant_obj.most_severe_consequence = most_severe_consequence | 0.007172 |
def select_coords(self):
"""
generate the selection
"""
start, stop = self.start, self.stop
nrows = self.table.nrows
if start is None:
start = 0
elif start < 0:
start += nrows
if self.stop is None:
stop = nrows
elif stop < 0:
stop += nrows
if self.condition is not None:
return self.table.table.get_where_list(self.condition.format(),
start=start, stop=stop,
sort=True)
elif self.coordinates is not None:
return self.coordinates
return np.arange(start, stop) | 0.00274 |
def parse_images(self, markup, treshold=6):
""" Returns a list of images found in the markup.
An image has a pathname, a description in plain text
and a list of properties Wikipedia uses to size and place images.
# A Wikipedia image looks like:
# [[Image:Columbia Supercomputer - NASA Advanced Supercomputing Facility.jpg|right|thumb|
# The [[NASA]] [[Columbia (supercomputer)|Columbia Supercomputer]].]]
# Parts are separated by "|".
# The first part is the image file, the last part can be a description.
# In between are display properties, like "right" or "thumb".
"""
images = []
m = re.findall(self.re["image"], markup)
for p in m:
p = self.parse_balanced_image(p)
img = p.split("|")
path = img[0].replace("[[Image:", "").strip()
description = u""
links = {}
properties = []
if len(img) > 1:
img = "|".join(img[1:])
links = self.parse_links(img)
properties = self.plain(img).split("|")
description = u""
# Best guess: an image description is normally
# longer than six characters, properties like
# "thumb" and "right" are less than six characters.
if len(properties[-1]) > treshold:
description = properties[-1]
properties = properties[:-1]
img = WikipediaImage(path, description, links, properties)
images.append(img)
markup = markup.replace(p, "")
return images, markup.strip() | 0.004614 |
def add_space(self, line):
"""Add a Space object to the section
Used during initial parsing mainly
Args:
line (str): one line that defines the space, maybe whitespaces
"""
if not isinstance(self.last_item, Space):
space = Space(self._structure)
self._structure.append(space)
self.last_item.add_line(line)
return self | 0.004878 |
def add_spectra(self, spectra_dict, key_sort_func=None):
"""
Add a dictionary of doses, with an optional sorting function for the
keys.
Args:
dos_dict: dict of {label: Dos}
key_sort_func: function used to sort the dos_dict keys.
"""
if key_sort_func:
keys = sorted(spectra_dict.keys(), key=key_sort_func)
else:
keys = spectra_dict.keys()
for label in keys:
self.add_spectra(label, spectra_dict[label]) | 0.003802 |
def get_item(self, **kwargs):
""" Get collection item taking into account generated queryset
of parent view.
This method allows working with nested resources properly. Thus an item
returned by this method will belong to its parent view's queryset, thus
filtering out objects that don't belong to the parent object.
Returns an object from the applicable ACL. If ACL wasn't applied, it is
applied explicitly.
"""
if six.callable(self.context):
self.reload_context(es_based=False, **kwargs)
objects = self._parent_queryset()
if objects is not None and self.context not in objects:
raise JHTTPNotFound('{}({}) not found'.format(
self.Model.__name__,
self._get_context_key(**kwargs)))
return self.context | 0.002339 |
def to_gbq(
dataframe,
destination_table,
project_id=None,
chunksize=None,
reauth=False,
if_exists="fail",
auth_local_webserver=False,
table_schema=None,
location=None,
progress_bar=True,
credentials=None,
verbose=None,
private_key=None,
):
"""Write a DataFrame to a Google BigQuery table.
The main method a user calls to export pandas DataFrame contents to
Google BigQuery table.
This method uses the Google Cloud client library to make requests to
Google BigQuery, documented `here
<https://google-cloud-python.readthedocs.io/en/latest/bigquery/usage.html>`__.
See the :ref:`How to authenticate with Google BigQuery <authentication>`
guide for authentication instructions.
Parameters
----------
dataframe : pandas.DataFrame
DataFrame to be written to a Google BigQuery table.
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists, do nothing.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
.. versionadded:: 0.2.0
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``.
- If ``table_schema`` is provided, it may contain all or a subset of
DataFrame columns. If a subset is provided, the rest will be
inferred from the DataFrame dtypes.
- If ``table_schema`` is **not** provided, it will be
generated according to dtypes of DataFrame columns. See
`Inferring the Table Schema
<https://pandas-gbq.readthedocs.io/en/latest/writing.html#writing-schema>`__.
for a description of the schema inference.
See `BigQuery API documentation on valid column names
<https://cloud.google.com/bigquery/docs/schemas#column_names`>__.
.. versionadded:: 0.3.1
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
.. versionadded:: 0.5.0
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
.. versionadded:: 0.5.0
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
.. versionadded:: 0.8.0
verbose : bool, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
"""
_test_google_api_imports()
from pandas_gbq import schema
if verbose is not None and SHOW_VERBOSE_DEPRECATION:
warnings.warn(
"verbose is deprecated and will be removed in "
"a future version. Set logging level in order to vary "
"verbosity",
FutureWarning,
stacklevel=1,
)
if private_key is not None and SHOW_PRIVATE_KEY_DEPRECATION:
warnings.warn(
PRIVATE_KEY_DEPRECATION_MESSAGE, FutureWarning, stacklevel=2
)
if if_exists not in ("fail", "replace", "append"):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
if "." not in destination_table:
raise NotFoundException(
"Invalid Table Name. Should be of the form 'datasetId.tableId' "
)
connector = GbqConnector(
project_id,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
location=location,
credentials=credentials,
private_key=private_key,
)
dataset_id, table_id = destination_table.rsplit(".", 1)
table = _Table(
project_id,
dataset_id,
location=location,
credentials=connector.credentials,
)
default_schema = _generate_bq_schema(dataframe)
if not table_schema:
table_schema = default_schema
else:
table_schema = schema.update_schema(
default_schema, dict(fields=table_schema)
)
# If table exists, check if_exists parameter
if table.exists(table_id):
if if_exists == "fail":
raise TableCreationError(
"Could not create the table because it "
"already exists. "
"Change the if_exists parameter to "
"'append' or 'replace' data."
)
elif if_exists == "replace":
connector.delete_and_recreate_table(
dataset_id, table_id, table_schema
)
elif if_exists == "append":
if not connector.schema_is_subset(
dataset_id, table_id, table_schema
):
raise InvalidSchema(
"Please verify that the structure and "
"data types in the DataFrame match the "
"schema of the destination table."
)
else:
table.create(table_id, table_schema)
if dataframe.empty:
# Create the table (if needed), but don't try to run a load job with an
# empty file. See: https://github.com/pydata/pandas-gbq/issues/237
return
connector.load_data(
dataframe,
dataset_id,
table_id,
chunksize=chunksize,
schema=table_schema,
progress_bar=progress_bar,
) | 0.000131 |
End of preview. Expand
in Dataset Viewer.
No dataset card yet
New: Create and edit this dataset card directly on the website!
Contribute a Dataset Card- Downloads last month
- 23