code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def _send_request(self):
msg = Message()
msg.subject = "An RPC call!"
msg.address = self._to
msg.reply_to = self._reply_to
msg.body = self._method
msg.correlation_id = 5
print("sending RPC call request: %s" % str(self._method))
self._sender.send(msg, self) | Send a message containing the RPC method call |
def dense_convolutional_network(units: tf.Tensor,
n_hidden_list: List,
filter_width=3,
use_dilation=False,
use_batch_norm=False,
training_ph=None):
units_list = [units]
for n_layer, n_filters in enumerate(n_hidden_list):
total_units = tf.concat(units_list, axis=-1)
if use_dilation:
dilation_rate = 2 ** n_layer
else:
dilation_rate = 1
units = tf.layers.conv1d(total_units,
n_filters,
filter_width,
dilation_rate=dilation_rate,
padding=,
kernel_initializer=INITIALIZER())
if use_batch_norm:
units = tf.layers.batch_normalization(units, training=training_ph)
units = tf.nn.relu(units)
units_list.append(units)
return units | Densely connected convolutional layers. Based on the paper:
[Gao 17] https://arxiv.org/abs/1608.06993
Args:
units: a tensorflow tensor with dimensionality [None, n_tokens, n_features]
n_hidden_list: list with number of hidden units at the ouput of each layer
filter_width: width of the kernel in tokens
use_batch_norm: whether to use batch normalization between layers
use_dilation: use power of 2 dilation scheme [1, 2, 4, 8 .. ] for layers 1, 2, 3, 4 ...
training_ph: boolean placeholder determining whether is training phase now or not.
It is used only for batch normalization to determine whether to use
current batch average (std) or memory stored average (std)
Returns:
units: tensor at the output of the last convolutional layer
with dimensionality [None, n_tokens, n_hidden_list[-1]] |
def calc_std_mod_reduc(mod_reduc):
mod_reduc = np.asarray(mod_reduc).astype(float)
std = (np.exp(-4.23) + np.sqrt(0.25 / np.exp(3.62) - (mod_reduc - 0.5)
** 2 / np.exp(3.62)))
return std | Calculate the standard deviation as a function of G/G_max.
Equation 7.29 from Darendeli (2001).
Parameters
----------
mod_reduc : array_like
Modulus reduction values.
Returns
-------
std : :class:`numpy.ndarray`
Standard deviation. |
def create(self, ignore=None):
ignore = ignore or []
def _create(tree_or_filename, alias=None):
for name, value in tree_or_filename.items():
if isinstance(value, dict):
for result in _create(value, alias=name):
yield result
else:
with open(value, ) as body:
yield name, self.client.indices.create(
index=name,
body=json.load(body),
ignore=ignore,
)
if alias:
yield alias, self.client.indices.put_alias(
index=list(_get_indices(tree_or_filename)),
name=alias,
ignore=ignore,
)
for result in _create(self.active_aliases):
yield result | Yield tuple with created index name and responses from a client. |
def _restore_clipboard_text(self, backup: str):
time.sleep(0.2)
self.clipboard.text = backup if backup is not None else "" | Restore the clipboard content. |
def get_grade_entry_admin_session(self):
if not self.supports_grade_entry_admin():
raise errors.Unimplemented()
return sessions.GradeEntryAdminSession(runtime=self._runtime) | Gets the ``OsidSession`` associated with the grade entry administration service.
return: (osid.grading.GradeEntryAdminSession) - a
``GradeEntryAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_entry_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_admin()`` is ``true``.* |
def jdn_to_gdate(jdn):
l = l - (1461 * i) // 4 + 31
j = (80 * l) // 2447
day = l - (2447 * j) // 80
l = j // 11
month = j + 2 - (12 * l)
year = 100 * (n - 49) + i + l
return datetime.date(year, month, day) | Convert from the Julian day to the Gregorian day.
Algorithm from 'Julian and Gregorian Day Numbers' by Peter Meyer.
Return: day, month, year |
def merge(args):
from jcvi.formats.base import DictFile
p = OptionParser(merge.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
quartets, registry, lost = args
qq = DictFile(registry, keypos=1, valuepos=3)
lost = DictFile(lost, keypos=1, valuepos=0, delimiter=)
qq.update(lost)
fp = open(quartets)
cases = {
"AN,CN": 4,
"BO,AN,CN": 8,
"BO,CN": 2,
"BR,AN": 1,
"BR,AN,CN": 6,
"BR,BO": 3,
"BR,BO,AN": 5,
"BR,BO,AN,CN": 9,
"BR,BO,CN": 7,
}
ip = {
"syntenic_model": "Syntenic_model_excluded_by_OMG",
"complete": "Predictable",
"partial": "Truncated",
"pseudogene": "Pseudogene",
"random": "Match_random",
"real_ns": "Transposed",
"gmap_fail": "GMAP_fail",
"AN LOST": "AN_LOST",
"CN LOST": "CN_LOST",
"BR LOST": "BR_LOST",
"BO LOST": "BO_LOST",
"outside": "Outside_synteny_blocks",
"[NF]": "Not_found",
}
for row in fp:
atoms = row.strip().split("\t")
genes = atoms[:4]
tag = atoms[4]
a, b, c, d = [qq.get(x, ".").rsplit("-", 1)[-1] for x in genes]
qqs = [c, d, a, b]
for i, q in enumerate(qqs):
if atoms[i] != :
qqs[i] = "syntenic_model"
comment = "Case{0}".format(cases[tag])
dots = sum([1 for x in genes if x == ])
if dots == 1:
idx = genes.index(".")
status = qqs[idx]
status = ip[status]
comment += "-" + status
print(row.strip() + "\t" + "\t".join(qqs + [comment])) | %prog merge protein-quartets registry LOST
Merge protein quartets table with dna quartets registry. This is specific
to the napus project. |
def shell(self, term=):
channel = self._ssh.invoke_shell(term)
self._bridge(channel)
channel.close() | Start an interactive shell session
This method invokes a shell on the remote SSH server and proxies
traffic to/from both peers.
You must connect to a SSH server using ssh_connect()
prior to starting the session. |
def lookup_controller(obj, remainder, request=None):
if request is None:
warnings.warn(
(
"The function signature for %s.lookup_controller is changing "
"in the next version of pecan.\nPlease update to: "
"`lookup_controller(self, obj, remainder, request)`." % (
__name__,
)
),
DeprecationWarning
)
notfound_handlers = []
while True:
try:
obj, remainder = find_object(obj, remainder, notfound_handlers,
request)
handle_security(obj)
return obj, remainder
except (exc.HTTPNotFound, exc.HTTPMethodNotAllowed,
PecanNotFound) as e:
if isinstance(e, PecanNotFound):
e = exc.HTTPNotFound()
while notfound_handlers:
name, obj, remainder = notfound_handlers.pop()
if name == :
return obj, remainder
else:
result = handle_lookup_traversal(obj, remainder)
if result:
if (
remainder == [] and
len(obj._pecan[].args) > 1
):
raise e
obj_, remainder_ = result
return lookup_controller(obj_, remainder_, request)
else:
raise e | Traverses the requested url path and returns the appropriate controller
object, including default routes.
Handles common errors gracefully. |
def add_event(self, event):
self.events.append(event)
self.event_notify(event) | Add a new event and notify subscribers.
event -- the event that occurred |
def __xinclude_lxml(target, source, env):
from lxml import etree
doc = etree.parse(str(source[0]))
doc.xinclude()
try:
doc.write(str(target[0]), xml_declaration=True,
encoding="UTF-8", pretty_print=True)
except:
pass
return None | Resolving XIncludes, using the lxml module. |
def _get_adj_list_directional(self, umis, counts):
adj_list = {umi: [] for umi in umis}
if self.fuzzy_match:
for umi1 in umis:
comp_regex_err = regex.compile("(%s){e<=1}" % str(umi1))
comp_regex_del = regex.compile("(%s){i<=1}" % str(umi1)[::-1])
for umi2 in umis:
if umi1 == umi2:
continue
if counts[umi1] >= (counts[umi2]*self.dir_threshold):
if (max(len(umi1), len(umi2)) -
min(len(umi1), len(umi2))) > 1:
continue
if (comp_regex_err.match(str(umi2)) or
comp_regex_del.match(str(umi2))):
adj_list[umi1].append(umi2)
else:
for umi1, umi2 in itertools.combinations(umis, 2):
if edit_distance(umi1, umi2) <= 1:
if counts[umi1] >= (counts[umi2]*2)-1:
adj_list[umi1].append(umi2)
if counts[umi2] >= (counts[umi1]*2)-1:
adj_list[umi2].append(umi1)
return adj_list | identify all umis within the hamming distance threshold
and where the counts of the first umi is > (2 * second umi counts)-1 |
def __generate_location(self):
screen_width = world.get_backbuffer_size().X
self.movement_speed = random.randrange(10, 25)
self.coords = R.Vector2(screen_width + self.image.get_width(), random.randrange(0, 100)) | Reset the location of the cloud once it has left the viewable area of the screen. |
def write_rtt(jlink):
try:
while jlink.connected():
bytes = list(bytearray(input(), "utf-8") + b"\x0A\x00")
bytes_written = jlink.rtt_write(0, bytes)
except Exception:
print("IO write thread exception, exiting...")
thread.interrupt_main()
raise | Writes kayboard input to JLink RTT buffer #0.
This method is a loop that blocks waiting on stdin. When enter is pressed,
LF and NUL bytes are added to the input and transmitted as a byte list.
If the JLink is disconnected, it will exit gracefully. If any other
exceptions are raised, they will be caught and re-raised after interrupting
the main thread.
Args:
jlink (pylink.JLink): The JLink to write to.
Raises:
Exception on error. |
def __is_valid_value_for_arg(self, arg, value, check_extension=True):
if "values" not in arg and "extension_values" not in arg:
return True
if "values" in arg and value.lower() in arg["values"]:
return True
if "extension_values" in arg:
extension = arg["extension_values"].get(value.lower())
if extension:
condition = (
check_extension and
extension not in RequireCommand.loaded_extensions
)
if condition:
raise ExtensionNotLoaded(extension)
return True
return False | Check if value is allowed for arg
Some commands only allow a limited set of values. The method
always returns True for methods that do not provide such a
set.
:param arg: the argument's name
:param value: the value to check
:param check_extension: check if value requires an extension
:return: True on succes, False otherwise |
def calc_point_distance_vary(self, chi_coords, point_fupper, mus):
chi1_bin, chi2_bin = self.find_point_bin(chi_coords)
min_dist = 1000000000
indexes = None
for chi1_bin_offset, chi2_bin_offset in self.bin_loop_order:
curr_chi1_bin = chi1_bin + chi1_bin_offset
curr_chi2_bin = chi2_bin + chi2_bin_offset
curr_bank = self.massbank[curr_chi1_bin][curr_chi2_bin]
if not curr_bank[].size:
continue
f_upper = numpy.minimum(point_fupper, curr_bank[])
f_other = numpy.maximum(point_fupper, curr_bank[])
freq_idxes = numpy.array([self.frequency_map[f] for f in f_upper])
vecs1 = mus[freq_idxes, :]
range_idxes = numpy.arange(len(freq_idxes))
vecs2 = curr_bank[][range_idxes, freq_idxes, :]
dists = (vecs1 - vecs2)*(vecs1 - vecs2)
dists = numpy.sum(dists, axis=1)
norm_upper = numpy.array([self.normalization_map[f] \
for f in f_upper])
norm_other = numpy.array([self.normalization_map[f] \
for f in f_other])
norm_fac = norm_upper / norm_other
renormed_dists = 1 - (1 - dists)*norm_fac
curr_min_dist = renormed_dists.min()
if curr_min_dist < min_dist:
min_dist = curr_min_dist
indexes = curr_chi1_bin, curr_chi2_bin, renormed_dists.argmin()
return min_dist, indexes | Calculate distance between point and the bank allowing the metric to
vary based on varying upper frequency cutoff. Slower than
calc_point_distance, but more reliable when upper frequency cutoff can
change a lot.
Parameters
-----------
chi_coords : numpy.array
The position of the point in the chi coordinates.
point_fupper : float
The upper frequency cutoff to use for this point. This value must
be one of the ones already calculated in the metric.
mus : numpy.array
A 2D array where idx 0 holds the upper frequency cutoff and idx 1
holds the coordinates in the [not covaried] mu parameter space for
each value of the upper frequency cutoff.
Returns
--------
min_dist : float
The smallest **SQUARED** metric distance between the test point and
the bank.
indexes : The chi1_bin, chi2_bin and position within that bin at which
the closest matching point lies. |
def initializable(self):
return bool(lib.EnvSlotInitableP(self._env, self._cls, self._name)) | True if the Slot is initializable. |
def transform_op(self, op, value):
if value is None:
if _EQ_RE.match(op):
return "is"
elif _NEQ_RE.match(op):
return "is not"
return op | For comparisons, if the value is None (null), the '=' operator must be replaced with ' is '
and the '!=' operator must be replaced with ' is not '. This function handles that conversion.
It's up to the caller to call this function only on comparisons and not on assignments. |
def get_unset_cache(self):
caches = []
if self._cached_api_global_response is None:
caches.append()
if self._cached_api_ticker_response is None:
caches.append()
return (len(caches), caches) | return : returns a tuple (num_of_not_None_caches, [list of unset caches endpoint]) |
def remove_out_of_image(self, fully=True, partly=False):
polys_clean = [
poly for poly in self.polygons
if not poly.is_out_of_image(self.shape, fully=fully, partly=partly)
]
return PolygonsOnImage(polys_clean, shape=self.shape) | Remove all polygons that are fully or partially outside of the image.
Parameters
----------
fully : bool, optional
Whether to remove polygons that are fully outside of the image.
partly : bool, optional
Whether to remove polygons that are partially outside of the image.
Returns
-------
imgaug.PolygonsOnImage
Reduced set of polygons, with those that were fully/partially
outside of the image removed. |
def reduce(coro, iterable, initializer=None, limit=1, right=False, loop=None):
assert_corofunction(coro=coro)
assert_iter(iterable=iterable)
acc = initializer
if len(iterable) == 0:
return initializer
pool = ConcurrentExecutor(limit=limit, loop=loop)
def reducer(element):
@asyncio.coroutine
def wrapper():
nonlocal acc
acc = yield from coro(acc, element)
return wrapper
if right:
iterable.reverse()
for element in iterable:
pool.add(reducer(element))
yield from pool.run(ignore_empty=True)
return acc | Apply function of two arguments cumulatively to the items of sequence,
from left to right, so as to reduce the sequence to a single value.
Reduction will be executed sequentially without concurrency,
so passed values would be in order.
This function is the asynchronous coroutine equivalent to Python standard
`functools.reduce()` function.
This function is a coroutine.
This function can be composed in a pipeline chain with ``|`` operator.
Arguments:
coro (coroutine function): reducer coroutine binary function.
iterable (iterable|asynchronousiterable): an iterable collection
yielding coroutines functions.
initializer (mixed): initial accumulator value used in
the first reduction call.
limit (int): max iteration concurrency limit. Use ``0`` for no limit.
right (bool): reduce iterable from right to left.
loop (asyncio.BaseEventLoop): optional event loop to use.
Raises:
TypeError: if input arguments are not valid.
Returns:
mixed: accumulated final reduced value.
Usage::
async def reducer(acc, num):
return acc + num
await paco.reduce(reducer, [1, 2, 3, 4, 5], initializer=0)
# => 15 |
def add_cli_summarize(main: click.Group) -> click.Group:
@main.command()
@click.pass_obj
def summarize(manager: AbstractManager):
if not manager.is_populated():
click.secho(f, fg=)
sys.exit(1)
for name, count in sorted(manager.summarize().items()):
click.echo(f)
return main | Add a ``summarize`` command to main :mod:`click` function. |
def get_elements(self, tag_name, attribute, with_namespace=True):
for i in self.xml:
if self.xml[i] is None:
continue
for item in self.xml[i].findall( + tag_name):
if with_namespace:
value = item.get(self._ns(attribute))
else:
value = item.get(attribute)
if value:
yield self._format_value(value) | Deprecated: use `get_all_attribute_value()` instead
Return elements in xml files which match with the tag name and the specific attribute
:param tag_name: a string which specify the tag name
:param attribute: a string which specify the attribute |
def ModuleHelp(self, module):
helplist = []
self.__RenderOurModuleKeyFlags(module, helplist)
return .join(helplist) | Describe the key flags of a module.
Args:
module: A module object or a module name (a string).
Returns:
string describing the key flags of a module. |
def spin(compound, theta, around):
around = np.asarray(around).reshape(3)
if np.array_equal(around, np.zeros(3)):
raise ValueError()
center_pos = compound.center
translate(compound, -center_pos)
rotate(compound, theta, around)
translate(compound, center_pos) | Rotate a compound in place around an arbitrary vector.
Parameters
----------
compound : mb.Compound
The compound being rotated.
theta : float
The angle by which to rotate the compound, in radians.
around : np.ndarray, shape=(3,), dtype=float
The axis about which to spin the compound. |
def format_metadata_to_key(key_metadata):
securesystemslib.formats.KEY_SCHEMA.check_match(key_metadata)
key_dict = {}
keytype = key_metadata[]
scheme = key_metadata[]
key_value = key_metadata[]
default_keyid = _get_keyid(keytype, scheme, key_value)
keyids = set()
keyids.add(default_keyid)
for hash_algorithm in securesystemslib.settings.HASH_ALGORITHMS:
keyid = _get_keyid(keytype, scheme, key_value, hash_algorithm)
keyids.add(keyid)
key_dict[] = keytype
key_dict[] = scheme
key_dict[] = default_keyid
key_dict[] = securesystemslib.settings.HASH_ALGORITHMS
key_dict[] = key_value
return key_dict, keyids | <Purpose>
Construct a key dictionary (e.g., securesystemslib.formats.RSAKEY_SCHEMA)
according to the keytype of 'key_metadata'. The dict returned by this
function has the exact format as the dict returned by one of the key
generations functions, like generate_ed25519_key(). The dict returned
has the form:
{'keytype': keytype,
'scheme': scheme,
'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...',
'keyval': {'public': '...',
'private': '...'}}
For example, RSA key dictionaries in RSAKEY_SCHEMA format should be used by
modules storing a collection of keys, such as with keydb.py. RSA keys as
stored in metadata files use a different format, so this function should be
called if an RSA key is extracted from one of these metadata files and need
converting. The key generation functions create an entirely new key and
return it in the format appropriate for 'keydb.py'.
>>> ed25519_key = generate_ed25519_key()
>>> key_val = ed25519_key['keyval']
>>> keytype = ed25519_key['keytype']
>>> scheme = ed25519_key['scheme']
>>> ed25519_metadata = \
format_keyval_to_metadata(keytype, scheme, key_val, private=True)
>>> ed25519_key_2, junk = format_metadata_to_key(ed25519_metadata)
>>> securesystemslib.formats.ED25519KEY_SCHEMA.matches(ed25519_key_2)
True
>>> ed25519_key == ed25519_key_2
True
<Arguments>
key_metadata:
The key dictionary as stored in Metadata files, conforming to
'securesystemslib.formats.KEY_SCHEMA'. It has the form:
{'keytype': '...',
'scheme': scheme,
'keyval': {'public': '...',
'private': '...'}}
<Exceptions>
securesystemslib.exceptions.FormatError, if 'key_metadata' does not conform
to 'securesystemslib.formats.KEY_SCHEMA'.
<Side Effects>
None.
<Returns>
In the case of an RSA key, a dictionary conformant to
'securesystemslib.formats.RSAKEY_SCHEMA'. |
def _request(request, request_fallback=None):
all_dicts = [
lambda: request.json,
lambda: request.forms,
lambda: request.query,
lambda: request.files,
lambda: request_fallback
]
request_dict = dict()
for req_dict_ in all_dicts:
try:
req_dict = req_dict_()
except KeyError:
continue
if req_dict is not None and hasattr(req_dict, ):
for req_key, req_val in req_dict.items():
request_dict[req_key] = req_val
return request_dict | Extract request fields wherever they may come from: GET, POST, forms, fallback |
def resizeEvent(self, event):
if self.resizeMode() == self.Adjust:
self.scheduleDelayedItemsLayout()
return super(ListLevel, self).resizeEvent(event) | Schedules an item layout if resize mode is \"adjust\". Somehow this is
needed for correctly scaling down items.
The reason this was reimplemented was the CommentDelegate.
:param event: the resize event
:type event: QtCore.QEvent
:returns: None
:rtype: None
:raises: None |
def _getFromTime(self, atDate=None):
return getLocalTime(self.date_from, self.time_from, self.tz) | Time that the event starts (in the local time zone). |
def linkify_with_escalations(self, escalations):
for i in self:
if not hasattr(i, ):
continue
links_list = strip_and_uniq(i.escalations)
new = []
for name in [e for e in links_list if e]:
escalation = escalations.find_by_name(name)
if escalation is not None and escalation.uuid not in new:
new.append(escalation.uuid)
else:
i.add_error("the escalation defined for is unknown"
% (name, i.get_name()))
i.escalations = new | Link with escalations
:param escalations: all escalations object
:type escalations: alignak.objects.escalation.Escalations
:return: None |
def get_tagged_albums(self, tag, limit=None, cacheable=True):
params = self._get_params()
params["tag"] = tag
params["taggingtype"] = "album"
if limit:
params["limit"] = limit
doc = self._request(self.ws_prefix + ".getpersonaltags", cacheable, params)
return _extract_albums(doc, self.network) | Returns the albums tagged by a user. |
def _find_best_in_population(population, values):
best_value = tf.math.reduce_min(input_tensor=values)
best_index = tf.where(tf.math.equal(values, best_value))[0, 0]
return ([population_part[best_index] for population_part in population],
best_value) | Finds the population member with the lowest value. |
def working_cycletime(start, end, workday_start=datetime.timedelta(hours=0), workday_end=datetime.timedelta(hours=24)):
def clamp(t, start, end):
"Return clamped to the range [, ]"
return max(start, min(end, t))
def day_part(t):
"Return timedelta between midnight and ."
return t - t.replace(hour=0, minute=0, second=0)
if not start:
return None
if not end:
end = datetime.datetime.now()
zero = datetime.timedelta(0)
assert(zero <= workday_start <= workday_end <= datetime.timedelta(1))
workday = workday_end - workday_start
days = (end - start).days + 1
weeks = days // 7
extra = (max(0, 5 - start.weekday()) + min(5, 1 + end.weekday())) % 5
weekdays = weeks * 5 + extra
total = workday * weekdays
if start.weekday() < 5:
total -= clamp(workday_end - day_part(end), zero, workday)
cycle_time = timedelta_total_seconds(total) / timedelta_total_seconds(workday)
return cycle_time | Get the working time between a beginning and an end point subtracting out non-office time |
def andrews_curves(frame, class_column, ax=None, samples=200, color=None,
colormap=None, **kwds):
from math import sqrt, pi
import matplotlib.pyplot as plt
def function(amplitudes):
def f(t):
x1 = amplitudes[0]
result = x1 / sqrt(2.0)
coeffs = np.delete(np.copy(amplitudes), 0)
coeffs.resize(int((coeffs.size + 1) / 2), 2)
harmonics = np.arange(0, coeffs.shape[0]) + 1
trig_args = np.outer(harmonics, t)
result += np.sum(coeffs[:, 0, np.newaxis] * np.sin(trig_args) +
coeffs[:, 1, np.newaxis] * np.cos(trig_args),
axis=0)
return result
return f
n = len(frame)
class_col = frame[class_column]
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
t = np.linspace(-pi, pi, samples)
used_legends = set()
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type=,
color=color)
colors = dict(zip(classes, color_values))
if ax is None:
ax = plt.gca(xlim=(-pi, pi))
for i in range(n):
row = df.iloc[i].values
f = function(row)
y = f(t)
kls = class_col.iat[i]
label = pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(t, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(t, y, color=colors[kls], **kwds)
ax.legend(loc=)
ax.grid()
return ax | Generate a matplotlib plot of Andrews curves, for visualising clusters of
multivariate data.
Andrews curves have the functional form:
f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) +
x_4 sin(2t) + x_5 cos(2t) + ...
Where x coefficients correspond to the values of each dimension and t is
linearly spaced between -pi and +pi. Each row of frame then corresponds to
a single curve.
Parameters
----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color : list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
class:`matplotlip.axis.Axes` |
def check_psd(matrix, tolerance=1e-6):
hermitian = (matrix + matrix.T.conjugate()) / 2
eigenvalues = np.linalg.eigh(hermitian)[0]
return (eigenvalues > -tolerance).all() | A square matrix is PSD if all eigenvalues of its Hermitian part are
non- negative. The Hermitian part is given by (self + M*)/2, where M* is
the complex conjugate transpose of M |
def summarize(self, text, topics=4, length=5, binary_matrix=True, topic_sigma_threshold=0.5):
text = self._parse_input(text)
sentences, unprocessed_sentences = self._tokenizer.tokenize_sentences(text)
length = self._parse_summary_length(length, len(sentences))
if length == len(sentences):
return unprocessed_sentences
topics = self._validate_num_topics(topics, sentences)
weighting = if binary_matrix else
sentence_matrix = self._compute_matrix(sentences, weighting=weighting)
sentence_matrix = sentence_matrix.transpose()
sentence_matrix = sentence_matrix.multiply(sentence_matrix > 0)
s, u, v = self._svd(sentence_matrix, num_concepts=topics)
if 1 <= topic_sigma_threshold < 0:
raise ValueError()
sigma_threshold = max(u) * topic_sigma_threshold
u[u < sigma_threshold] = 0
saliency_vec = np.dot(np.square(u), np.square(v))
top_sentences = saliency_vec.argsort()[-length:][::-1]
top_sentences.sort()
return [unprocessed_sentences[i] for i in top_sentences] | Implements the method of latent semantic analysis described by Steinberger and Jezek in the paper:
J. Steinberger and K. Jezek (2004). Using latent semantic analysis in text summarization and summary evaluation.
Proc. ISIM ’04, pp. 93–100.
:param text: a string of text to be summarized, path to a text file, or URL starting with http
:param topics: the number of topics/concepts covered in the input text (defines the degree of
dimensionality reduction in the SVD step)
:param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage
of the original document (e.g. 0.5)
:param binary_matrix: boolean value indicating whether the matrix of word counts should be binary
(True by default)
:param topic_sigma_threshold: filters out topics/concepts with a singular value less than this
percentage of the largest singular value (must be between 0 and 1, 0.5 by default)
:return: list of sentences for the summary |
def grab_bulbs(host, token=None):
xml = grab_xml(host, token)
bulbs = {}
for room in xml:
for device in room[]:
bulbs[int(device[])] = device
return bulbs | Grab XML, then add all bulbs to a dict. Removes room functionality |
def main(args):
p = OptionParser(main.__doc__)
p.add_option("--outgroup", help="Outgroup for rerooting the tree. " +
"Use comma to separate multiple taxa.")
p.add_option("--noreroot", default=False, action="store_true",
help="Don't reroot the input tree [default: %default]")
p.add_option("--rmargin", default=.3, type="float",
help="Set blank rmargin to the right [default: %default]")
p.add_option("--gffdir", default=None,
help="The directory that contain GFF files [default: %default]")
p.add_option("--sizes", default=None,
help="The FASTA file or the sizes file [default: %default]")
p.add_option("--SH", default=None, type="string",
help="SH test p-value [default: %default]")
p.add_option("--scutoff", default=0, type="int",
help="cutoff for displaying node support, 0-100 [default: %default]")
p.add_option("--barcode", default=None,
help="path to seq names barcode mapping file: "
"barcode<tab>new_name [default: %default]")
p.add_option("--leafcolor", default="k",
help="Font color for the OTUs, or path to a file "
"containing color mappings: leafname<tab>color [default: %default]")
p.add_option("--leaffont", default=12, help="Font size for the OTUs")
p.add_option("--geoscale", default=False, action="store_true",
help="Plot geological scale")
opts, args, iopts = p.set_image_options(args, figsize="8x6")
if len(args) != 1:
sys.exit(not p.print_help())
datafile, = args
outgroup = None
reroot = not opts.noreroot
if opts.outgroup:
outgroup = opts.outgroup.split(",")
if datafile == "demo":
tx =
else:
logging.debug("Load tree file `{0}`.".format(datafile))
tx = open(datafile).read()
pf = datafile.rsplit(".", 1)[0]
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
if opts.geoscale:
draw_geoscale(root)
else:
if op.isfile(opts.leafcolor):
leafcolor = "k"
leafcolorfile = opts.leafcolor
else:
leafcolor = opts.leafcolor
leafcolorfile = None
draw_tree(root, tx, rmargin=opts.rmargin, leafcolor=leafcolor,
outgroup=outgroup, reroot=reroot, gffdir=opts.gffdir,
sizes=opts.sizes, SH=opts.SH, scutoff=opts.scutoff,
barcodefile=opts.barcode, leafcolorfile=leafcolorfile,
leaffont=opts.leaffont)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | %prog newicktree
Plot Newick formatted tree. The gene structure can be plotted along if
--gffdir is given. The gff file needs to be `genename.gff`. If --sizes is
on, also show the number of amino acids.
With --barcode a mapping file can be provided to convert seq names to
eg. species names, useful in unified tree display. This file should have
distinctive barcodes in column1 and new names in column2, tab delimited. |
def QA_indicator_BOLL(DataFrame, N=20, P=2):
C = DataFrame[]
boll = MA(C, N)
UB = boll + P * STD(C, N)
LB = boll - P * STD(C, N)
DICT = {: boll, : UB, : LB}
return pd.DataFrame(DICT) | 布林线 |
def get_git_repositories_activity_metrics(self, project, from_date, aggregation_type, skip, top):
route_values = {}
if project is not None:
route_values[] = self._serialize.url(, project, )
query_parameters = {}
if from_date is not None:
query_parameters[] = self._serialize.query(, from_date, )
if aggregation_type is not None:
query_parameters[] = self._serialize.query(, aggregation_type, )
if skip is not None:
query_parameters[] = self._serialize.query(, skip, )
if top is not None:
query_parameters[] = self._serialize.query(, top, )
response = self._send(http_method=,
location_id=,
version=,
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize(, self._unwrap_collection(response)) | GetGitRepositoriesActivityMetrics.
[Preview API] Retrieves git activity metrics for repositories matching a specified criteria.
:param str project: Project ID or project name
:param datetime from_date: Date from which, the trends are to be fetched.
:param str aggregation_type: Bucket size on which, trends are to be aggregated.
:param int skip: The number of repositories to ignore.
:param int top: The number of repositories for which activity metrics are to be retrieved.
:rtype: [RepositoryActivityMetrics] |
def clone(self, *args, **overrides):
link = overrides.pop(, True)
settings = dict(self.get_param_values(), **overrides)
if not in settings:
settings[] = self.id
if not args and link:
settings[] = self._plot_id
pos_args = getattr(self, + type(self).__name__ + , [])
return self.__class__(*(settings[n] for n in pos_args),
**{k:v for k,v in settings.items()
if k not in pos_args}) | Returns a clone of the object with matching parameter values
containing the specified args and kwargs. |
def _get_popularity_baseline(self):
response = self.__proxy__.get_popularity_baseline()
from .popularity_recommender import PopularityRecommender
return PopularityRecommender(response) | Returns a new popularity model matching the data set this model was
trained with. Can be used for comparison purposes. |
def setCheckedItems(self, items):
if not self.isCheckable():
return
model = self.model()
for i in range(self.count()):
item_text = self.itemText(i)
if not item_text:
continue
if nativestring(item_text) in items:
state = Qt.Checked
else:
state = Qt.Unchecked
model.item(i).setCheckState(state) | Returns the checked items for this combobox.
:return items | [<str>, ..] |
def get_binary_dist(self, requirement):
cache_file = self.cache.get(requirement)
if cache_file:
if self.needs_invalidation(requirement, cache_file):
logger.info("Invalidating old %s binary (source has changed) ..", requirement)
cache_file = None
else:
logger.debug("%s hasnpip-accel-bdist-.tar.gzw:gzrbr:gz')
try:
for member in archive.getmembers():
yield member, archive.extractfile(member.name)
finally:
archive.close() | Get or create a cached binary distribution archive.
:param requirement: A :class:`.Requirement` object.
:returns: An iterable of tuples with two values each: A
:class:`tarfile.TarInfo` object and a file-like object.
Gets the cached binary distribution that was previously built for the
given requirement. If no binary distribution has been cached yet, a new
binary distribution is built and added to the cache.
Uses :func:`build_binary_dist()` to build binary distribution
archives. If this fails with a build error :func:`get_binary_dist()`
will use :class:`.SystemPackageManager` to check for and install
missing system packages and retry the build when missing system
packages were installed. |
def row_col_maker(app, fromdocname, all_needs, need_info, need_key, make_ref=False, ref_lookup=False, prefix=):
row_col = nodes.entry()
para_col = nodes.paragraph()
if need_key in need_info and need_info[need_key] is not None:
if not isinstance(need_info[need_key], (list, set)):
data = [need_info[need_key]]
else:
data = need_info[need_key]
for index, datum in enumerate(data):
link_id = datum
link_part = None
if need_key in [, ]:
if in datum:
link_id = datum.split()[0]
link_part = datum.split()[1]
datum_text = prefix + datum
text_col = nodes.Text(datum_text, datum_text)
if make_ref or ref_lookup:
try:
ref_col = nodes.reference("", "")
if not ref_lookup:
ref_col[] = app.builder.get_relative_uri(fromdocname, need_info[])
ref_col[] += "
else:
temp_need = all_needs[link_id]
ref_col[] = app.builder.get_relative_uri(fromdocname, temp_need[])
ref_col[] += "
if link_part is not None:
ref_col[] += + link_part
except KeyError:
para_col += text_col
else:
ref_col.append(text_col)
para_col += ref_col
else:
para_col += text_col
if index + 1 < len(data):
para_col += nodes.emphasis("; ", "; ")
row_col += para_col
return row_col | Creates and returns a column.
:param app: current sphinx app
:param fromdocname: current document
:param all_needs: Dictionary of all need objects
:param need_info: need_info object, which stores all related need data
:param need_key: The key to access the needed data from need_info
:param make_ref: If true, creates a reference for the given data in need_key
:param ref_lookup: If true, it uses the data to lookup for a related need and uses its data to create the reference
:param prefix: string, which is used as prefix for the text output
:return: column object (nodes.entry) |
def at_time(self, time, nearest_sample=False):
if nearest_sample:
time += self.delta_t / 2.0
return self[int((time-self.start_time)*self.sample_rate)] | Return the value at the specified gps time |
def route_request(self, request_json, metadata=None):
request = Request(request_json)
request.metadata = metadata
handler_fn = self._handlers[self._default]
if not request.is_intent() and (request.request_type() in self._handlers):
handler_fn = self._handlers[request.request_type()]
elif request.is_intent() and request.intent_name() in self._handlers[]:
handler_fn = self._handlers[][request.intent_name()]
response = handler_fn(request)
response.set_session(request.session)
return response.to_json() | Route the request object to the right handler function |
def find_commands(command_dir: str) -> List[str]:
if not command_dir:
return []
return [name for _, name, is_pkg in pkgutil.iter_modules([command_dir])
if not is_pkg and not name.startswith()] | Get all command names in the a folder
:return: List of commands names |
def loadbinary(fname):
X = np.load(fname)
if isinstance(X, np.lib.npyio.NpzFile):
if in X.files:
coloring = X[].tolist()
else:
coloring = None
if in X.files:
return [X[], X[].dtype, coloring]
else:
return [None, None, coloring]
else:
return [X, X.dtype, None] | Load a numpy binary file or archive created by tabular.io.savebinary.
Load a numpy binary file (``.npy``) or archive (``.npz``) created by
:func:`tabular.io.savebinary`.
The data and associated data type (e.g. `dtype`, including if given, column
names) are loaded and reconstituted.
If `fname` is a numpy archive, it may contain additional data giving
hierarchical column-oriented structure (e.g. `coloring`). See
:func:`tabular.tab.tabarray.__new__` for more information about coloring.
The ``.npz`` file is a zipped archive created using :func:`numpy.savez` and
containing one or more ``.npy`` files, which are NumPy binary files created
by :func:`numpy.save`.
**Parameters**
**fname** : string or file-like object
File name or open numpy binary file (``.npy``) or archive
(``.npz``) created by :func:`tabular.io.savebinary`.
* When `fname` is a ``.npy`` binary file, it is reconstituted as a
flat ndarray of data, with structured dtype.
* When `fname` is a ``.npz`` archive, it contains at least one
``.npy`` binary file and optionally another:
* ``data.npy`` must be in the archive, and is reconstituted as `X`,
a flat ndarray of data, with structured dtype, `dtype`.
* ``coloring.npy``, if present is reconstitued as `coloring`, a
dictionary.
**Returns**
**X** : numpy ndarray with structured dtype
The data, where each column is named and is of a uniform NumPy data
type.
**dtype** : numpy dtype object
The data type of `X`, e.g. `X.dtype`.
**coloring** : dictionary, or None
Hierarchical structure on the columns given in the header of the
file; an attribute of tabarrays.
See :func:`tabular.tab.tabarray.__new__` for more information about
coloring.
**See Also:**
:func:`tabular.io.savebinary`, :func:`numpy.load`,
:func:`numpy.save`, :func:`numpy.savez` |
def add_attribute_model(self,
name,
attr,
writeable_func=None,
):
return self._field_registry.add_attribute_model(
name, attr, writeable_func, self._part) | Register a pre-existing AttributeModel to be added to the Block |
def queue_instances(instances):
for instance_id in instances:
node = _get_node(instance_id=instance_id)
__utils__[](node, __active_provider_name__, __opts__) | Queue a set of instances to be provisioned later. Expects a list.
Currently this only queries node data, and then places it in the cloud
cache (if configured). If the salt-cloud-reactor is being used, these
instances will be automatically provisioned using that.
For more information about the salt-cloud-reactor, see:
https://github.com/saltstack-formulas/salt-cloud-reactor |
def _load_entities(self):
if not self._chat_peer:
return True
self._chat, self._input_chat = self._get_entity_pair(self.chat_id)
return self._input_chat is not None | Must load all the entities it needs from cache, and
return ``False`` if it could not find all of them. |
def print_stats(correctness, confidence, name):
accuracy = correctness.mean()
wrongness = 1 - correctness
denom1 = np.maximum(1, wrongness.sum())
ave_prob_on_mistake = (wrongness * confidence).sum() / denom1
assert ave_prob_on_mistake <= 1., ave_prob_on_mistake
denom2 = np.maximum(1, correctness.sum())
ave_prob_on_correct = (correctness * confidence).sum() / denom2
covered = confidence > 0.5
cov_half = covered.mean()
acc_half = (correctness * covered).sum() / np.maximum(1, covered.sum())
print( % (name, accuracy))
print("Average prob on mistakes: %0.4f" % ave_prob_on_mistake)
print("Average prob on correct: %0.4f" % ave_prob_on_correct)
print("Accuracy when prob thresholded at .5: %0.4f" % acc_half)
print("Coverage when prob thresholded at .5: %0.4f" % cov_half)
success_rate = acc_half * cov_half
print("Success rate at .5: %0.4f" % success_rate)
failure_rate = (1. - acc_half) * cov_half
print("Failure rate at .5: %0.4f" % failure_rate)
print() | Prints out accuracy, coverage, etc. statistics
:param correctness: ndarray
One bool per example specifying whether it was correctly classified
:param confidence: ndarray
The probability associated with each prediction
:param name: str
The name of this type of data (e.g. "clean", "MaxConfidence") |
def origin_central_asia(origin):
return origin_afghanistan(origin) or origin_kazakhstan(origin) \
or origin_kyrgyzstan(origin) or origin_tajikistan(origin) \
or origin_turkmenistan(origin) or origin_uzbekistan(origin) | \
Returns if the origin is located in Central Asia.
Holds true for the following countries:
* Afghanistan
* Kazakhstan
* Kyrgyzstan
* Tajikistan
* Turkmenistan
* Uzbekistan
`origin`
The origin to check. |
def derivatives(self, x, y, Rs, theta_Rs, e1, e2, center_x=0, center_y=0):
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
x_shift = x - center_x
y_shift = y - center_y
cos_phi = np.cos(phi_G)
sin_phi = np.sin(phi_G)
e = min(abs(1. - q), 0.99)
xt1 = (cos_phi*x_shift+sin_phi*y_shift)*np.sqrt(1 - e)
xt2 = (-sin_phi*x_shift+cos_phi*y_shift)*np.sqrt(1 + e)
R_ = np.sqrt(xt1**2 + xt2**2)
rho0_input = self.nfw._alpha2rho0(theta_Rs=theta_Rs, Rs=Rs)
if Rs < 0.0000001:
Rs = 0.0000001
f_x_prim, f_y_prim = self.nfw.nfwAlpha(R_, Rs, rho0_input, xt1, xt2)
f_x_prim *= np.sqrt(1 - e)
f_y_prim *= np.sqrt(1 + e)
f_x = cos_phi*f_x_prim-sin_phi*f_y_prim
f_y = sin_phi*f_x_prim+cos_phi*f_y_prim
return f_x, f_y | returns df/dx and df/dy of the function (integral of NFW) |
def exception(self, timeout=None):
if not self._completed.wait(timeout=timeout):
raise exceptions.TimeoutError("Timed out waiting for result.")
if self._result != self._SENTINEL:
return None
return self._exception | Return the exception raised by the call, if any.
This blocks until the message has successfully been published, and
returns the exception. If the call succeeded, return None.
Args:
timeout (Union[int, float]): The number of seconds before this call
times out and raises TimeoutError.
Raises:
TimeoutError: If the request times out.
Returns:
Exception: The exception raised by the call, if any. |
def add_host(host):
p = new_prefix()
p.prefix = str(host[])
p.type = "host"
p.description = host[]
p.node = host[]
p.avps = {}
if in host:
p.comment = host[]
if len(host[]) > 0:
p.avps[] = host[]
if len(host[]) > 0:
p.avps[] = host[]
if len(host[]) > 0:
p.avps[] = host[]
if len(host[]) > 0:
p.avps[] = host[]
return p | Put your host information in the prefix object. |
def build_vocab(self, *args, **kwargs):
counter = Counter()
sources = []
for arg in args:
if isinstance(arg, Dataset):
sources += [getattr(arg, name) for name, field in
arg.fields.items() if field is self]
else:
sources.append(arg)
for data in sources:
for x in data:
if not self.sequential:
x = [x]
try:
counter.update(x)
except TypeError:
counter.update(chain.from_iterable(x))
specials = list(OrderedDict.fromkeys(
tok for tok in [self.unk_token, self.pad_token, self.init_token,
self.eos_token] + kwargs.pop(, [])
if tok is not None))
self.vocab = self.vocab_cls(counter, specials=specials, **kwargs) | Construct the Vocab object for this field from one or more datasets.
Arguments:
Positional arguments: Dataset objects or other iterable data
sources from which to construct the Vocab object that
represents the set of possible values for this field. If
a Dataset object is provided, all columns corresponding
to this field are used; individual columns can also be
provided directly.
Remaining keyword arguments: Passed to the constructor of Vocab. |
def update_asset_ddo(self, did, ddo):
response = self.requests_session.put(f, data=ddo.as_text(),
headers=self._headers)
if response.status_code == 200 or response.status_code == 201:
return json.loads(response.content)
else:
raise Exception(f) | Update the ddo of a did already registered.
:param did: Asset DID string
:param ddo: DDO instance
:return: API response (depends on implementation) |
def create_group(self, name):
self.service.create_group(
name, self.url_prefix, self.auth, self.session,
self.session_send_opts) | Create a new group.
Args:
name (string): Name of the group to create.
Raises:
requests.HTTPError on failure. |
def handle_relative(self, event):
delta_x, delta_y = self._get_relative(event)
if delta_x:
self.events.append(
self.emulate_rel(0x00,
delta_x,
self.timeval))
if delta_y:
self.events.append(
self.emulate_rel(0x01,
delta_y,
self.timeval)) | Relative mouse movement. |
def pretty_str(self, indent=0):
spaces = * indent
pretty = .format(spaces, self.name)
pretty += .join(c.pretty_str(indent + 2) for c in self.children)
return pretty | Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation. |
def _parse_proc_pid_cgroup(content):
for ownCgroup in content:
ownCgroup = ownCgroup.strip().split()
try:
path = ownCgroup[2][1:]
except IndexError:
raise IndexError("index out of range for " + str(ownCgroup))
for subsystem in ownCgroup[1].split():
yield (subsystem, path) | Parse a /proc/*/cgroup file into tuples of (subsystem,cgroup).
@param content: An iterable over the lines of the file.
@return: a generator of tuples |
def find(file_node, dirs=ICON_DIRS, default_name=None, file_ext=):
names = []
for attr_name in (, , ):
attr = getattr(file_node, attr_name)
if attr:
names.append(attr)
if default_name:
names.append(default_name)
icon_path = StaticPathFinder.find(names, dirs, file_ext)
if icon_path:
return StaticIconFile(file_node, icon_path) | Iterating all icon dirs, try to find a file called like the node's
extension / mime subtype / mime type (in that order).
For instance, for an MP3 file ("audio/mpeg"), this would look for:
"mp3.png" / "audio/mpeg.png" / "audio.png" |
def lookup_field_class(self, field, obj=None, default=None):
css = ""
if field in self.field_config and in self.field_config[field]:
css = self.field_config[field][]
elif default:
css = default
return css | Looks up any additional class we should include when rendering this field |
def decode_timeseries_row(self, tsrow, tscols=None,
convert_timestamp=False):
row = []
for i, cell in enumerate(tsrow.cells):
col = None
if tscols is not None:
col = tscols[i]
if cell.HasField():
if col and not (col.type == TsColumnType.Value() or
col.type == TsColumnType.Value()):
raise TypeError()
else:
row.append(cell.varchar_value)
elif cell.HasField():
if col and col.type != TsColumnType.Value():
raise TypeError()
else:
row.append(cell.sint64_value)
elif cell.HasField():
if col and col.type != TsColumnType.Value():
raise TypeError()
else:
row.append(cell.double_value)
elif cell.HasField():
if col and col.type != TsColumnType.Value():
raise TypeError()
else:
dt = cell.timestamp_value
if convert_timestamp:
dt = datetime_from_unix_time_millis(
cell.timestamp_value)
row.append(dt)
elif cell.HasField():
if col and col.type != TsColumnType.Value():
raise TypeError()
else:
row.append(cell.boolean_value)
else:
row.append(None)
return row | Decodes a TsRow into a list
:param tsrow: the protobuf TsRow to decode.
:type tsrow: riak.pb.riak_ts_pb2.TsRow
:param tscols: the protobuf TsColumn data to help decode.
:type tscols: list
:rtype list |
def _parse_total_magnetization(line, lines):
toks = line.split()
res = {"number of electrons": float(toks[3])}
if len(toks) > 5:
res["total magnetization"] = float(toks[5])
return res | Parse the total magnetization, which is somewhat hidden |
def get_labels(self, depth=None):
labels = libCopy.deepcopy(self.labels)
if depth is None or depth > 0:
for element in self.elements:
if isinstance(element, CellReference):
labels.extend(
element.get_labels(None if depth is None else depth -
1))
elif isinstance(element, CellArray):
labels.extend(
element.get_labels(None if depth is None else depth -
1))
return labels | Returns a list with a copy of the labels in this cell.
Parameters
----------
depth : integer or ``None``
If not ``None``, defines from how many reference levels to
retrieve labels from.
Returns
-------
out : list of ``Label``
List containing the labels in this cell and its references. |
def compare(array, other, op, ty_str):
weld_obj = WeldObject(encoder_, decoder_)
array_var = weld_obj.update(array)
if isinstance(array, WeldObject):
array_var = array.obj_id
weld_obj.dependencies[array_var] = array
if isinstance(other, str) or isinstance(other, WeldObject):
other_var = weld_obj.update(other)
if isinstance(other, WeldObject):
other_var = tmp.obj_id
weld_obj.dependencies[other_var] = other
else:
other_var = "%s(%s)" % (ty_str, str(other))
weld_template =
weld_obj.weld_code = weld_template % {"array": array_var,
"other": other_var,
"op": op, "ty": ty_str}
return weld_obj | Performs passed-in comparison op between every element in the passed-in
array and other, and returns an array of booleans.
Args:
array (WeldObject / Numpy.ndarray): Input array
other (WeldObject / Numpy.ndarray): Second input array
op (str): Op string used for element-wise comparison (== >= <= !=)
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this computation |
def mean_length(infile, limit=None):
total = 0
count = 0
seq_reader = sequences.file_reader(infile)
for seq in seq_reader:
total += len(seq)
count += 1
if limit is not None and count >= limit:
break
assert count > 0
return total / count | Returns the mean length of the sequences in the input file. By default uses all sequences. To limit to the first N sequences, use limit=N |
def get_file_extension(file_path):
_ext = os.path.splitext(file_path)[-1]
if _ext:
return _ext[1:] if _ext.startswith() else _ext
return "" | >>> get_file_extension("/a/b/c")
''
>>> get_file_extension("/a/b.txt")
'txt'
>>> get_file_extension("/a/b/c.tar.xz")
'xz' |
def p_jsonpath_named_operator(self, p):
"jsonpath : NAMED_OPERATOR"
if p[1] == :
p[0] = This()
elif p[1] == :
p[0] = Parent()
else:
raise Exception( % (p[1], p.lineno(1), p.lexpos(1))) | jsonpath : NAMED_OPERATOR |
def sim(self, src, tar):
if src == tar:
return 1.0
if not src or not tar:
return 0.0
min_word, max_word = (src, tar) if len(src) < len(tar) else (tar, src)
min_len = len(min_word)
for i in range(min_len, 0, -1):
if min_word[:i] == max_word[:i]:
return i / min_len
return 0.0 | Return the prefix similarity of two strings.
Prefix similarity is the ratio of the length of the shorter term that
exactly matches the longer term to the length of the shorter term,
beginning at the start of both terms.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Prefix similarity
Examples
--------
>>> cmp = Prefix()
>>> cmp.sim('cat', 'hat')
0.0
>>> cmp.sim('Niall', 'Neil')
0.25
>>> cmp.sim('aluminum', 'Catalan')
0.0
>>> cmp.sim('ATCG', 'TAGC')
0.0 |
def delete_firewall_rule(self, server_uuid, firewall_rule_position):
url = .format(server_uuid, firewall_rule_position)
return self.request(, url) | Delete a firewall rule based on a server uuid and rule position. |
def executable_path(conn, executable):
executable_path = conn.remote_module.which(executable)
if not executable_path:
raise ExecutableNotFound(executable, conn.hostname)
return executable_path | Remote validator that accepts a connection object to ensure that a certain
executable is available returning its full path if so.
Otherwise an exception with thorough details will be raised, informing the
user that the executable was not found. |
def buffer_typechecks_and_display(self, call_id, payload):
self.buffer_typechecks(call_id, payload)
self.editor.display_notes(self.buffered_notes) | Adds typecheck events to the buffer, and displays them right away.
This is a workaround for this issue:
https://github.com/ensime/ensime-server/issues/1616 |
def setCurrentAction(self, action):
if action == self._currentAction:
return
self._currentAction = action
self.currentActionChanged.emit(action)
labels = self.actionLabels()
anim_grp = QParallelAnimationGroup(self)
max_size = self.maximumPixmapSize()
min_size = self.minimumPixmapSize()
if action:
label = self.labelForAction(action)
index = labels.index(label)
palette = self.palette()
effect = QGraphicsDropShadowEffect(label)
effect.setXOffset(0)
effect.setYOffset(0)
effect.setBlurRadius(20)
effect.setColor(QColor(40, 40, 40))
label.setGraphicsEffect(effect)
offset = self.padding()
if self.position() in (XDockToolbar.Position.East,
XDockToolbar.Position.West):
self.resize(max_size.width() + offset, self.height())
elif self.position() in (XDockToolbar.Position.North,
XDockToolbar.Position.South):
self.resize(self.width(), max_size.height() + offset)
w = max_size.width()
h = max_size.height()
dw = (max_size.width() - min_size.width()) / 3
dh = (max_size.height() - min_size.height()) / 3
for i in range(4):
before = index - i
after = index + i
if 0 <= before and before < len(labels):
anim = XObjectAnimation(labels[before],
,
anim_grp)
anim.setEasingCurve(self.easingCurve())
anim.setStartValue(labels[before].pixmapSize())
anim.setEndValue(QSize(w, h))
anim.setDuration(self.duration())
anim_grp.addAnimation(anim)
if i:
labels[before].setGraphicsEffect(None)
if after != before and 0 <= after and after < len(labels):
anim = XObjectAnimation(labels[after],
,
anim_grp)
anim.setEasingCurve(self.easingCurve())
anim.setStartValue(labels[after].pixmapSize())
anim.setEndValue(QSize(w, h))
anim.setDuration(self.duration())
anim_grp.addAnimation(anim)
if i:
labels[after].setGraphicsEffect(None)
w -= dw
h -= dh
else:
offset = self.padding()
for label in self.actionLabels():
label.setGraphicsEffect(None)
anim = XObjectAnimation(label, , self)
anim.setEasingCurve(self.easingCurve())
anim.setStartValue(label.pixmapSize())
anim.setEndValue(min_size)
anim.setDuration(self.duration())
anim_grp.addAnimation(anim)
anim_grp.finished.connect(self.resizeToMinimum)
anim_grp.start()
self._animating = True
anim_grp.finished.connect(anim_grp.deleteLater)
anim_grp.finished.connect(self.__markAnimatingFinished)
if self._currentAction:
self._hoverTimer.start()
else:
self._hoverTimer.stop() | Sets the current action for this widget that highlights the size
for this toolbar.
:param action | <QAction> |
def _get_all(self, *args, **kwargs):
headers = kwargs.get(, args[2] if len(args) > 2 else None) or dict()
if in headers:
keys = super(Bucket, self)._get_all(*args, **kwargs)
for key in keys:
mimicdb.backend.sadd(tpl.bucket % self.name, key.name)
mimicdb.backend.hmset(tpl.key % (self.name, key.name), dict(size=key.size, md5=key.etag.strip()))
key.name = key.name
return keys
prefix = kwargs.get(, )
return list(self.list(prefix=prefix)) | If 'force' is in the headers, retrieve the list of keys from S3.
Otherwise, use the list() function to retrieve the keys from MimicDB. |
def price_rounding(price, decimals=2):
try:
exponent = D( + decimals * )
except InvalidOperation:
exponent = D()
return price.quantize(exponent, rounding=ROUND_UP) | Takes a decimal price and rounds to a number of decimal places |
def double_width(self, action):
onoff
if action == :
action =
elif action == :
action =
else:
raise RuntimeError()
self.send(chr(27)++action) | Enable/cancel doublewidth printing
Args:
action: Enable or disable doublewidth printing. Options are 'on' and 'off'
Returns:
None
Raises:
RuntimeError: Invalid action. |
def main(input_filename, songname, format, counter):
song_data = AudioSegment.from_file(input_filename, format=format)
song_data = song_data.set_channels(1)
wav_tmp = song_data.export(format="wav")
wav_tmp.seek(0)
rate, wav_data = wavfile.read(wav_tmp)
peaks = resound.get_peaks(np.array(wav_data))
fingerprints = list(resound.hashes(peaks))
if not fingerprints:
raise RuntimeError("No fingerprints detected in source file - check your parameters passed to Resound.")
for fp, abs_offset in fingerprints:
counter[fp].append((abs_offset, songname))
print " Identified {} keypoints in .".format(len(counter), songname)
return counter | Calculate the fingerprint hashses of the referenced audio file and save
to disk as a pickle file |
def control(self, on=[], off=[]):
controls = {"light", "valve", "fan", "pump"}
def cast_arg(arg):
if type(arg) is str:
if arg == "all":
return controls
else:
return {arg} & controls
else:
return set(arg) & controls
for item in cast_arg(on):
self.manage(item, "on")
for item in cast_arg(off):
self.manage(item, "off")
sleep(.01)
return self.update() | This method serves as the primary interaction point
to the controls interface.
- The 'on' and 'off' arguments can either be a list or a single string.
This allows for both individual device control and batch controls.
Note:
Both the onlist and offlist are optional.
If only one item is being managed, it can be passed as a string.
Usage:
- Turning off all devices:
ctrlobj.control(off="all")
- Turning on all devices:
ctrlobj.control(on="all")
- Turning on the light and fan ONLY (for example)
ctrlobj.control(on=["light", "fan"])
- Turning on the light and turning off the fan (for example)
ctrolobj.control(on="light", off="fan") |
def build(
src, requirements=None, local_package=None,
config_file=, profile_name=None,
):
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
return path_to_zip_file | Builds the file bundle.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi) |
def index(self, key):
if isinstance(key, int):
if 0 <= key < len(self.__keys):
return key
raise IndexError(key)
elif isinstance(key, str):
try:
return self.__keys.index(key)
except ValueError:
raise KeyError(key)
else:
raise TypeError(key) | Return the index of the given item.
:param key:
:return: |
def receive(args, reactor=reactor, _debug_stash_wormhole=None):
r = Receiver(args, reactor)
d = r.go()
if _debug_stash_wormhole is not None:
_debug_stash_wormhole.append(r._w)
return d | I implement 'wormhole receive'. I return a Deferred that fires with
None (for success), or signals one of the following errors:
* WrongPasswordError: the two sides didn't use matching passwords
* Timeout: something didn't happen fast enough for our tastes
* TransferError: the sender rejected the transfer: verifier mismatch
* any other error: something unexpected happened |
def _add_chrome_proxy_extension(
chrome_options, proxy_string, proxy_user, proxy_pass):
if not "".join(sys.argv) == "-c":
proxy_helper.create_proxy_zip(proxy_string, proxy_user, proxy_pass)
else:
lock = threading.Lock()
with lock:
time.sleep(random.uniform(0.02, 0.15))
if not os.path.exists(PROXY_ZIP_PATH):
proxy_helper.create_proxy_zip(
proxy_string, proxy_user, proxy_pass)
time.sleep(random.uniform(0.1, 0.2))
proxy_zip = PROXY_ZIP_PATH
if not os.path.exists(PROXY_ZIP_PATH):
proxy_zip = PROXY_ZIP_PATH_2
chrome_options.add_extension(proxy_zip)
return chrome_options | Implementation of https://stackoverflow.com/a/35293284 for
https://stackoverflow.com/questions/12848327/
(Run Selenium on a proxy server that requires authentication.) |
async def monitor_status(self, alarm_status_callback=None,
zone_changed_callback=None,
output_changed_callback=None):
self._alarm_status_callback = alarm_status_callback
self._zone_changed_callback = zone_changed_callback
self._output_changed_callback = output_changed_callback
_LOGGER.info("Starting monitor_status loop")
while not self.closed:
_LOGGER.debug("Iteration... ")
while not self.connected:
_LOGGER.info("Not connected, re-connecting... ")
await self.connect()
if not self.connected:
_LOGGER.warning("Not connected, sleeping for 10s... ")
await asyncio.sleep(self._reconnection_timeout)
continue
await self.start_monitoring()
if not self.connected:
_LOGGER.warning("Start monitoring failed, sleeping for 10s...")
await asyncio.sleep(self._reconnection_timeout)
continue
while True:
await self._update_status()
_LOGGER.debug("Got status!")
if not self.connected:
_LOGGER.info("Got connection broken, reconnecting!")
break
_LOGGER.info("Closed, quit monitoring.") | Start monitoring of the alarm status.
Send command to satel integra to start sending updates. Read in a
loop and call respective callbacks when received messages. |
def _set_avg_session_metrics(session_group):
assert session_group.sessions,
metric_stats = collections.defaultdict(_MetricStats)
for session in session_group.sessions:
for metric_value in session.metric_values:
metric_name = _MetricIdentifier(group=metric_value.name.group,
tag=metric_value.name.tag)
stats = metric_stats[metric_name]
stats.total += metric_value.value
stats.count += 1
stats.total_step += metric_value.training_step
stats.total_wall_time_secs += metric_value.wall_time_secs
del session_group.metric_values[:]
for (metric_name, stats) in six.iteritems(metric_stats):
session_group.metric_values.add(
name=api_pb2.MetricName(group=metric_name.group, tag=metric_name.tag),
value=float(stats.total)/float(stats.count),
training_step=stats.total_step // stats.count,
wall_time_secs=stats.total_wall_time_secs / stats.count) | Sets the metrics for the group to be the average of its sessions.
The resulting session group metrics consist of the union of metrics across
the group's sessions. The value of each session group metric is the average
of that metric values across the sessions in the group. The 'step' and
'wall_time_secs' fields of the resulting MetricValue field in the session
group are populated with the corresponding averages (truncated for 'step')
as well.
Args:
session_group: A SessionGroup protobuffer. |
def homogenize(series_dict):
index = None
need_reindex = False
for _, series in series_dict.items():
if not np.isnan(series.fill_value):
raise TypeError()
if index is None:
index = series.sp_index
elif not series.sp_index.equals(index):
need_reindex = True
index = index.intersect(series.sp_index)
if need_reindex:
output = {}
for name, series in series_dict.items():
if not series.sp_index.equals(index):
series = series.sparse_reindex(index)
output[name] = series
else:
output = series_dict
return output | Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndex
corresponding to the locations where they all have data
Parameters
----------
series_dict : dict or DataFrame
Notes
-----
Using the dumbest algorithm I could think of. Should put some more thought
into this
Returns
-------
homogenized : dict of SparseSeries |
def _CalculateHashDataStream(self, file_entry, data_stream_name):
hash_context = hashlib.sha256()
try:
file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)
except IOError as exception:
logging.warning((
).format(
file_entry.path_spec.comparable, exception))
return None
if not file_object:
return None
try:
data = file_object.read(self._READ_BUFFER_SIZE)
while data:
hash_context.update(data)
data = file_object.read(self._READ_BUFFER_SIZE)
except IOError as exception:
logging.warning((
).format(
file_entry.path_spec.comparable, exception))
return None
finally:
file_object.close()
return hash_context.hexdigest() | Calculates a message digest hash of the data of the file entry.
Args:
file_entry (dfvfs.FileEntry): file entry.
data_stream_name (str): name of the data stream.
Returns:
bytes: digest hash or None. |
def Fritzsche(SG, Tavg, L=None, D=None, P1=None, P2=None, Q=None, Ts=288.7,
Ps=101325., Zavg=1, E=1):
r
c5 = 93.50009798751128188757518688244137811221
c2 = 0.8587
c3 = 0.538
c4 = 2.69
if Q is None and (None not in [L, D, P1, P2]):
return c5*E*(Ts/Ps)*((P1**2 - P2**2)/(SG**c2*Tavg*L*Zavg))**c3*D**c4
elif D is None and (None not in [L, Q, P1, P2]):
return (Ps*Q*(SG**(-c2)*(P1**2 - P2**2)/(L*Tavg*Zavg))**(-c3)/(E*Ts*c5))**(1./c4)
elif P1 is None and (None not in [L, Q, D, P2]):
return (L*SG**c2*Tavg*Zavg*(D**(-c4)*Ps*Q/(E*Ts*c5))**(1./c3) + P2**2)**0.5
elif P2 is None and (None not in [L, Q, D, P1]):
return (-L*SG**c2*Tavg*Zavg*(D**(-c4)*Ps*Q/(E*Ts*c5))**(1./c3) + P1**2)**0.5
elif L is None and (None not in [P2, Q, D, P1]):
return SG**(-c2)*(D**(-c4)*Ps*Q/(E*Ts*c5))**(-1./c3)*(P1**2 - P2**2)/(Tavg*Zavg)
else:
raise Exception() | r'''Calculation function for dealing with flow of a compressible gas in a
pipeline with the Fritzsche formula. Can calculate any of the following,
given all other inputs:
* Flow rate
* Upstream pressure
* Downstream pressure
* Diameter of pipe
* Length of pipe
A variety of different constants and expressions have been presented
for the Fritzsche formula. Here, the form as in [1]_
is used but with all inputs in base SI units.
.. math::
Q = 93.500 \frac{T_s}{P_s}\left(\frac{P_1^2 - P_2^2}
{L\cdot {SG}^{0.8587} \cdot T_{avg}}\right)^{0.538}D^{2.69}
Parameters
----------
SG : float
Specific gravity of fluid with respect to air at the reference
temperature and pressure `Ts` and `Ps`, [-]
Tavg : float
Average temperature of the fluid in the pipeline, [K]
L : float, optional
Length of pipe, [m]
D : float, optional
Diameter of pipe, [m]
P1 : float, optional
Inlet pressure to pipe, [Pa]
P2 : float, optional
Outlet pressure from pipe, [Pa]
Q : float, optional
Flow rate of gas through pipe, [m^3/s]
Ts : float, optional
Reference temperature for the specific gravity of the gas, [K]
Ps : float, optional
Reference pressure for the specific gravity of the gas, [Pa]
Zavg : float, optional
Average compressibility factor for gas, [-]
E : float, optional
Pipeline efficiency, a correction factor between 0 and 1
Returns
-------
Q, P1, P2, D, or L : float
The missing input which was solved for [base SI]
Notes
-----
This model is also presented in [1]_ with a leading constant of 2.827,
the same exponents as used here, units of mm (diameter), kPa, km (length),
and flow in m^3/hour.
This model is shown in base SI units in [2]_, and with a leading constant
of 94.2565, a diameter power of 2.6911, main group power of 0.5382
and a specific gravity power of 0.858. The difference is very small.
Examples
--------
>>> Fritzsche(D=0.340, P1=90E5, P2=20E5, L=160E3, SG=0.693, Tavg=277.15)
39.421535157535565
References
----------
.. [1] Menon, E. Shashi. Gas Pipeline Hydraulics. 1st edition. Boca Raton,
FL: CRC Press, 2005.
.. [2] Coelho, Paulo M., and Carlos Pinho. "Considerations about Equations
for Steady State Flow in Natural Gas Pipelines." Journal of the
Brazilian Society of Mechanical Sciences and Engineering 29, no. 3
(September 2007): 262-73. doi:10.1590/S1678-58782007000300005. |
def has_more_pages(self):
if self.has_next is not None:
return self.has_next
total_pages = self.get_total_pages()
if self.page_number is None or total_pages is None:
return None
else:
return self.page_number + 1 < total_pages | :return: ``True`` if there are more pages available on the server. |
def remove_library_from_file_system(self, library_path, library_name):
library_file_system_path = self.get_os_path_to_library(library_path, library_name)[0]
shutil.rmtree(library_file_system_path)
self.refresh_libraries() | Remove library from hard disk. |
def summary(self):
print("Type: %s" % self.__class__.__name__)
print("Batch Name: %r" % self.batch_name)
if self.tag:
print("Tag: %s" % self.tag)
print("Root directory: %r" % self.get_root_directory())
print("Maximum concurrency: %s" % self.max_concurrency)
if self.description:
print("Description: %s" % self.description) | A succinct summary of the Launcher configuration. Unlike the
repr, a summary does not have to be complete but must supply
key information relevant to the user. |
def get_authzd_permissions(self, identifier, perm_domain):
related_perms = []
keys = [, perm_domain]
def query_permissions(self):
msg = ("Could not obtain cached permissions for [{0}]. "
"Will try to acquire permissions from account store."
.format(identifier))
logger.debug(msg)
permissions = self.account_store.get_authz_permissions(identifier)
if not permissions:
msg = "Could not get permissions from account_store for {0}".\
format(identifier)
raise ValueError(msg)
return permissions
try:
msg2 = ("Attempting to get cached authz_info for [{0}]"
.format(identifier))
logger.debug(msg2)
domain = + self.name
related_perms = self.cache_handler.\
hmget_or_create(domain=domain,
identifier=identifier,
keys=keys,
creator_func=query_permissions,
creator=self)
except ValueError:
msg3 = ("No permissions found for identifiers [{0}]. "
"Returning None.".format(identifier))
logger.warning(msg3)
except AttributeError:
queried_permissions.get(perm_domain)]
return related_perms | :type identifier: str
:type domain: str
:returns: a list of relevant json blobs, each a list of permission dicts |
def future_import(feature, node):
root = find_root(node)
if does_tree_import(u"__future__", feature, node):
return
shebang_encoding_idx = None
for idx, node in enumerate(root.children):
if is_shebang_comment(node) or is_encoding_comment(node):
shebang_encoding_idx = idx
if is_docstring(node):
continue
names = check_future_import(node)
if not names:
break
if feature in names:
return
import_ = FromImport(u, [Leaf(token.NAME, feature, prefix=" ")])
if shebang_encoding_idx == 0 and idx == 0:
import_.prefix = root.children[0].prefix
root.children[0].prefix = u
children = [import_ , Newline()]
root.insert_child(idx, Node(syms.simple_stmt, children)) | This seems to work |
def explode(self, escalations):
for escalation in self:
properties = escalation.__class__.properties
name = getattr(escalation, , getattr(escalation, , ))
creation_dict = {
:
% (name, escalation.uuid)
}
for prop in properties:
if hasattr(escalation, prop):
creation_dict[prop] = getattr(escalation, prop)
escalations.add_escalation(Escalation(creation_dict)) | Create instance of Escalation for each HostEscalation object
:param escalations: list of escalation, used to add new ones
:type escalations: alignak.objects.escalation.Escalations
:return: None |
def _init_idxs_strpat(self, usr_hdrs):
strpat = self.strpat_hdrs.keys()
self.idxs_strpat = [
Idx for Hdr, Idx in self.hdr2idx.items() if Hdr in usr_hdrs and Hdr in strpat] | List of indexes whose values will be strings. |
def save(self, path=None, debug=False):
p = self.copy()
plugin = p._plugin
p.path = path or self.path
if not p.path:
raise ValueError, "path is required"
if isinstance(p.path, basestring):
(folder, filename) = os.path.split(p.path)
(name, extension) = os.path.splitext(filename)
if path:
try:
plugin = kurt.plugin.Kurt.get_plugin(extension=extension)
except ValueError:
pass
if not name:
name = _clean_filename(self.name)
if not name:
raise ValueError, "name is required"
filename = name + plugin.extension
p.path = os.path.join(folder, filename)
fp = open(p.path, "wb")
else:
fp = p.path
path = None
if not plugin:
raise ValueError, "must convert project to a format before saving"
for m in p.convert(plugin):
print m
result = p._save(fp)
if path:
fp.close()
return result if debug else p.path | Save project to file.
:param path: Path or file pointer.
If you pass a file pointer, you're responsible for closing
it.
If path is not given, the :attr:`path` attribute is used,
usually the original path given to :attr:`load()`.
If `path` has the extension of an existing plugin, the
project will be converted using :attr:`convert`.
Otherwise, the extension will be replaced with the
extension of the current plugin.
(Note that log output for the conversion will be printed
to stdout. If you want to deal with the output, call
:attr:`convert` directly.)
If the path ends in a folder instead of a file, the
filename is based on the project's :attr:`name`.
:param debug: If true, return debugging information from the format
plugin instead of the path.
:raises: :py:class:`ValueError` if there's no path or name.
:returns: path to the saved file. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.