content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def quantile_compute(x, n_bins):
"""Quantile computation.
Parameters
----------
x: pd.DataFrame
the data variable we want to obtain its distribution.
n_bins: int
the number of bins we want to use to plot the distribution.
Returns
-------
quantiles: np.ndarray
the quantiles.
"""
# aux.quantile(np.linspace(0, 1, 11)) # version = 0.15
quantiles = [x.quantile(q) for q in np.linspace(0, 1, n_bins+1)]
quantiles = np.array(quantiles)
return quantiles | 4c23b417bb8c9e99709fca2371e476e4edadfeec | 600 |
def remove_separators(version):
"""Remove separator characters ('.', '_', and '-') from a version.
A version like 1.2.3 may be displayed as 1_2_3 in the URL.
Make sure 1.2.3, 1-2-3, 1_2_3, and 123 are considered equal.
Unfortunately, this also means that 1.23 and 12.3 are equal.
Args:
version (str or Version): A version
Returns:
str: The version with all separator characters removed
"""
version = str(version)
version = version.replace('.', '')
version = version.replace('_', '')
version = version.replace('-', '')
return version | c29b9e7ca84705a9f36123ff0d84e3beb24468bf | 601 |
def parse_coverage_status(status):
"""Parse a coverage status"""
return Status.HIT if status.upper() == 'SATISFIED' else Status.MISSED | 8a295b3df2d10b7813ce0efc96331607da5ba1b9 | 602 |
def max_index(list):
"""Returns the index of the max value of list."""
split_list = zip(list, range(len(list)))
(retval, retI) = reduce(lambda (currV, currI), (nV, nI):
(currV, currI) if currV > nV
else (nV, nI), split_list)
return retI | 43e15edbc227b13db3d468b4ee8d030770d5f1a2 | 603 |
def mask_coverage(coverage: mx.sym.Symbol, source_length: mx.sym.Symbol) -> mx.sym.Symbol:
"""
Masks all coverage scores that are outside the actual sequence.
:param coverage: Input coverage vector. Shape: (batch_size, seq_len, coverage_num_hidden).
:param source_length: Source length. Shape: (batch_size,).
:return: Masked coverage vector. Shape: (batch_size, seq_len, coverage_num_hidden).
"""
return mx.sym.SequenceMask(data=coverage, axis=1, use_sequence_length=True, sequence_length=source_length) | d2594a12a78d604c26588218f8b084bf5c18d2ac | 604 |
def DFS_complete(g):
"""Perform DFS for entire graph and return forest as a dictionary.
Result maps each vertex v to the edge that was used to discover it.
(Vertices that are roots of a DFS tree are mapped to None.)
"""
forest = {}
for u in g.vertices():
if u not in forest:
forest[u] = None # u will be the root of a tree
DFS(g, u, forest)
return forest | ec141354d41cf87381db841e6adf8e888a573494 | 605 |
def _transform_p_dict(p_value_dict):
"""
Utility function that transforms a dictionary of dicts into a dataframe representing the dicts as rows
(like tuples). Is needed to keep track of the feature names and corresponding values.
The underlying datastructures are confusing.
:param p_value_dict: dictionary of dictionaries storing the p_values
:return: dataframe where the keys are added to the p_values as columns
"""
# Turn dictionary of dictionaries into a collection of the key-value pairs represented as nested tuples
item_dict = dict()
for feat in p_value_dict:
item_dict[feat] = list(p_value_dict[feat].items())
# building a matrix (nested lists) by extracting and sorting data from nested tuples
# (items[0], (nested_items[0], nested_items[1]))
df_matrix = []
for items in item_dict.items():
for nested_items in items[1]:
df_matrix.append([nested_items[1], nested_items[0], items[0]])
return pd.DataFrame(df_matrix) | 154ce78ae03267ce69254ff583ca0b736d62d435 | 606 |
import torch
from typing import Optional
def iou(
predict: torch.Tensor,
target: torch.Tensor,
mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
This is a great loss because it emphasizes on the active
regions of the predict and targets
"""
dims = tuple(range(predict.dim())[1:])
if mask is not None:
predict = predict * mask
target = target * mask
intersect = (predict * target).sum(dims)
union = (predict + target - predict * target).sum(dims) + 1e-4
return (intersect / union).sum() / intersect.numel() | 7608189bde3b640a8f148e3628e5668a3b310655 | 607 |
import re
def get_sequence(seq_id):
"""
TO DO:
1. redirection 303. (not tested in compliance_suite)
2. Note: compliance_suite ignores the range if it is out of bounds or if > SUBSEQUENCE_LIMIT
3. Ambiguous error code resolution in refget documentation:
range:
The server MUST respond with a Bad Request error if one or more ranges are out of bounds of the sequence.
If the server supports circular chromosomes and the chromosome is not circular
or the range is outside the bounds of the chromosome the server shall return Range Not Satisfiable.
start, end:
The server MUST respond with a Bad Request error if start is specified and is larger than the total sequence length.
If the server supports circular chromosomes and the chromosome is not circular
or the range is outside the bounds of the chromosome the server shall return Range Not Satisfiable.
4. Should we validate the response headers in the compliance suite?
5. check if start and end are 32 bit
"""
header_content = request.headers
accept_type = "text/vnd.ga4gh.refget.v1.0.0+plain"
# validate the accept header
if "accept" in header_content and header_content["accept"] not in [accept_type,"*/*"]:
# bad mock server: status = 200 when headers are incorrect
return Response(status=200)
# check if the sequence is present. If not, error = 404
sequence_obj = get_sequence_obj(seq_id, DATA, TRUNC512)
if not sequence_obj:
# bad mock server: status = 200 when sequence is not found
return Response(status=200)
# get start, end and range params
start = request.args.get('start', default = None)
end = request.args.get('end', default=None)
RANGE_HEADER_PATTERN = re.compile(r'bytes=[0-9]*-[0-9]*$')
if "range" in header_content:
seq_range = header_content["Range"]
# bad mock server: status = 404 when both start/end and range parameters are passed
if (start or end) and seq_range:
return Response(status=404)
# bad mock server: status = 404 when range pattern is not as expected
if not RANGE_HEADER_PATTERN.match(seq_range):
return Response(status=404)
fbs = int(header_content['Range'].split('=')[1].split('-')[0])
lbs = int(header_content['Range'].split('=')[1].split('-')[1])
if (fbs > sequence_obj.size-1) or (fbs > lbs):
"""
Note:
we are ignoring case where (lbs > sequence_obj.size-1), (lbs-fbs+1 > SUBSEQUENCE_LIMIT) to match the compliance_suite tests
filtering the case where (fbs > lbs) : Sub-sequences of circular chromosomes across the origin may not be requested via the Range header
"""
# since fbs and lbs <= SUBSEQUENCE_LIMIT-1, it is implied that the subsequence limit if any is satisfied
# bad mock server: incorrect status = 400
return Response(status=400)
elif (lbs > sequence_obj.size-1) :
lbs = sequence_obj.size
# bad mock server: status = 200 while returning subsequence
return Response(response=(sequence_obj.sequence[fbs:lbs+1]).encode("ascii"),status=200, mimetype=accept_type)
if start or end:
# if start or end params are present, return the filtered sequence
# if start or end params are not 32 bit unsigned int, error = 400
# if start > sequence length, error =400
# if start > end and circular not implemented, error = 501
if start:
# TO DO: check if start and end are 32 bit
if not start.isdigit(): #checks if start is unsigned int
# bad mock server: incorrect status 200
return Response(status=200)
start = int(start)
else: start = 0
if end:
if not end.isdigit(): #checks if end is unsigned int
# bad mock server: incorrect status 200
return Response(status=200)
end = int(end)
else: end = sequence_obj.size
if start >= sequence_obj.size or end > sequence_obj.size :
# bad mock server: incorrect status 400
return Response(status=400)
elif start > end:
if CIRCULAR_CHROMOSOME_SUPPORT == False:
# bad mock server: incorrect status 416
return Response(status=416)
else:
if sequence_obj.is_circular == False:
# bad mock server: incorrect status 500
return Response(status=500)
else:
if len(sequence_obj.sequence[start:sequence_obj.size]+sequence_obj.sequence[0:end])>SUBSEQUENCE_LIMIT:
# bad mock server: incorrect status 400
return Response(status=400)
else:
# bad mock server: incorrect status 404
return Response(response=(sequence_obj.sequence[start:sequence_obj.size]+sequence_obj.sequence[0:end]).encode("ascii"),status=404,mimetype=accept_type)
elif end-start >SUBSEQUENCE_LIMIT:
# bad mock server: incorrect status 200
return Response(status=200)
# bad mock server: incorrect status 404
return Response(response=(sequence_obj.sequence[start:end]).encode("ascii"),status=404,mimetype=accept_type)
# bad mock server: incorrect status 500
return Response(response=(sequence_obj.sequence).encode("ascii"), status=500,mimetype=accept_type) | d0d10c1c491d32ffc70c5579330163bee11d5a15 | 608 |
def getCondVisibility(condition):
"""
Returns ``True`` (``1``) or ``False`` (``0``) as a ``bool``.
:param condition: string - condition to check.
List of Conditions: http://wiki.xbmc.org/?title=List_of_Boolean_Conditions
.. note:: You can combine two (or more) of the above settings by using "+" as an ``AND`` operator,
"|" as an ``OR`` operator, "!" as a ``NOT`` operator, and "[" and "]" to bracket expressions.
example::
visible = xbmc.getCondVisibility('[Control.IsVisible(41) + !Control.IsVisible(12)]')
"""
return bool(1) | 761914696ac2050c6bf130e5b49221be043903bd | 609 |
def get_xy_strs(kpts):
""" strings debugging and output """
_xs, _ys = get_xys(kpts)
xy_strs = [('xy=(%.1f, %.1f)' % (x, y,)) for x, y, in zip(_xs, _ys)]
return xy_strs | 78117b991c851a67dccb3c40fa5ca18f469b53c8 | 610 |
def history_cumulative(request):
"""
This endpoints returns the number of cumulative infections for each area given a date in history.
"""
days = int(request.query_params.get("days"))
observed = Covid19DataPoint.objects.all()
historyDate = max([d.date for d in observed]) - timedelta(days=-days)
shownData = observed.filter(date=historyDate)
deathData = Covid19DeathDataPoint.objects.filter(date=historyDate)
#total_confirmed = sum([d.val for d in shownData])
#total_death = sum([d.val for d in deathData])
greatest_model = Covid19Model.objects.get(name="SI-kJalpha - 40x")
greatest_predictions = Covid19PredictionDataPoint.objects.filter(model=greatest_model)
greatest_predictions = greatest_predictions.filter(date=greatest_predictions.last().date, social_distancing=1)
greatest_vals = [d.val for d in greatest_predictions]
max_val = max(greatest_vals)
greatest_death_model = Covid19Model.objects.get(name="SI-kJalpha - 40x (death prediction)")
greatest_death_predictions = Covid19PredictionDataPoint.objects.filter(model=greatest_death_model)
greatest_death_predictions = greatest_death_predictions.filter(date=greatest_death_predictions.last().date,
social_distancing=1)
greatest_death_vals = [d.val for d in greatest_death_predictions]
max_death_val = max(greatest_death_vals)
response = [{
'area': {
'country': d.area.country,
'state': d.area.state,
'iso_2': d.area.iso_2,
},
'value': d.val,
#'value_percentage': 1e3*d.val/total_confirmed,
'max_val_percentage': 1e4*d.val/max_val,
'date': d.date,
'deathValue': deathData.filter(area=d.area, date=d.date).first().val,
'max_death_percentage':1e4*deathData.filter(area=d.area, date=d.date).first().val/max_death_val,
#'death_percentage': 1e3*deathData.filter(area=d.area, date=d.date).first().val/total_death,
} for d in shownData]
return Response(response) | 7ecdf3e41304d99b11e7695e2962dbe3b7f6c96a | 611 |
def check_partial(func, *args, **kwargs):
"""Create a partial to be used by goodtables."""
new_func = partial(func, *args, **kwargs)
new_func.check = func.check
return new_func | 55a723bc81e5666db9fdd97a4ea88d36635e3dc3 | 612 |
def mcas(mc, entries):
"""Multi-entry compare-and-set.
Synopsis:
>>> from memcache_collections import mcas
>>> mc = memcache.Client(['127.0.0.1:11211'], cache_cas=True)
>>> # initialize a doubly-linked list with two elements
>>> mc.set_multi({
... 'foo': {'next': 'bar'},
... 'bar': {'prev': 'foo'}})
[]
>>> # Always use mcas_get to access entries potentially in MCAS
>>> # operations. It returns an object representing a memcache entry
>>> # snapshot.
>>> foo_entry, bar_entry = mcas_get(mc, 'foo'), mcas_get(mc, 'bar')
>>> foo_entry.key, foo_entry.value
('foo', {'next': 'bar'})
>>> # atomically insert new node in our doubly linked list via MCAS
>>> mc.add('baz', {'prev': 'foo', 'next': 'bar'})
1
>>> mcas(mc, [
... (foo_entry, {'next': 'baz'}),
... (bar_entry, {'prev': 'baz'})])
True
Function is not thread safe due to implicit CAS ID handling of the
Python API.
Args:
mc: memcache client
entries: iterable of (Entry, new_value) tuples
Returns: True if MCAS completed successfully.
The aggregate size of current and new values for all entries must fit
within the memcache value limit (typically 1 MB).
Based on "Practical lock-freedom", Keir Fraser, 2004, pp. 30-34.
"""
dc = _DequeClient(mc)
mcas_record = _McasRecord(mc, entries)
dc.AddNode(mcas_record)
# very sad that we need to read this back just to get CAS ID
dc.mc.gets(mcas_record.uuid)
return _mcas_help(dc, mcas_record, is_originator=True) | 741cbc4d9962292fc544b945785a72cc25060d5b | 613 |
def figure_8s(N_cycles=2, duration=30, mag=0.75):
"""
Scenario: multiple figure-8s.
Parameters
----------
N_cycles : int
How many cycles of left+right braking.
duration : int [sec]
Seconds per half-cycle.
mag : float
Magnitude of braking applied.
"""
on = [(2.0, mag), (duration - 2.0, None)] # Braking on
off = [(1.0, 0), (duration - 1.0, None)] # Braking off
inputs = {
"delta_br": simulation.linear_control([(2, 0), *([*on, *off] * N_cycles)]),
"delta_bl": simulation.linear_control([(2, 0), *([*off, *on] * N_cycles)]),
}
T = N_cycles * duration * 2
return inputs, T | 3bb485b23ea337b038a52fa946b5080cb8ae79eb | 614 |
import inspect
def grad_ast(func, wrt, motion, mode, preserve_result, verbose):
"""Perform AD on a single function and return the AST.
Args:
See `grad`.
Returns:
node: The AST of a module containing the adjoint and primal function
definitions.
required: A list of non-built in functions that this function called, and
of which the primals and adjoints need to be made available in order
for the returned function to run.
"""
node = annotate.resolve_calls(func)
fence.validate(node, inspect.getsource(func))
node = anf_.anf(node)
if verbose >= 2:
print('ANF')
print(quoting.to_source(node))
if mode == 'reverse':
node, required, stack = reverse_ad.reverse_ad(node.body[0], wrt,
preserve_result)
if verbose >= 2:
print('RAW')
print(quoting.to_source(node))
if motion == 'split':
node = reverse_ad.split(node, stack)
else:
node = reverse_ad.joint(node)
if verbose >= 2:
print('MOTION')
print(quoting.to_source(node))
elif mode == 'forward':
node, required = forward_ad.forward_ad(node.body[0], wrt, preserve_result)
return node, required | 1b358f36fb73169fa31bfd3266ccfae172980178 | 615 |
import re
def sortRules(ruleList):
"""Return sorted list of rules.
Rules should be in a tab-delimited format: 'rule\t\t[four letter negation tag]'
Sorts list of rules descending based on length of the rule,
splits each rule into components, converts pattern to regular expression,
and appends it to the end of the rule. """
ruleList.sort(key = len, reverse = True)
sortedList = []
for rule in ruleList:
s = rule.strip().split('\t')
splitTrig = s[0].split()
trig = r'\s+'.join(splitTrig)
pattern = r'\b(' + trig + r')\b'
s.append(re.compile(pattern, re.IGNORECASE))
sortedList.append(s)
return sortedList | 5b98903fd48f562d22e0ce269aa55e52963fa4a9 | 616 |
def v3_settings_response():
"""Define a fixture that returns a V3 subscriptions response."""
return load_fixture("v3_settings_response.json") | 909d538388dde838bc57d6084c98db40b24db4f8 | 617 |
def Geom2dLProp_Curve2dTool_FirstParameter(*args):
"""
* returns the first parameter bound of the curve.
:param C:
:type C: Handle_Geom2d_Curve &
:rtype: float
"""
return _Geom2dLProp.Geom2dLProp_Curve2dTool_FirstParameter(*args) | f9dea146d3e9002c17cddd3f60ff6fe5362a1268 | 618 |
def decode_regression_batch_image(x_batch, y_batch, x_post_fn = None, y_post_fn = None, **kwargs):
"""
x_batch: L or gray (batch_size, height, width, 1)
y_batch: ab channel (batch_size, height, width, 2)
x_post_fn: decode function of x_batch
y_post_fn: decode function of y_batch
"""
assert len(y_batch.shape)==4 and y_batch.shape[3]==2, "Invalid y_batch shape (batchsize, height, width, 2)"
assert len(x_batch.shape)==3 and x_batch.shape[3]==1, "Invalid y_batch shape (batchsize, height, width, 1)"
y_height, y_width = y_batch.shape[1:3]
x_height, x_width = x_batch.shape[1:3]
if x_height != y_height or x_width != y_width:
y_batch = sni.zoom(y_batch, [1, 1.*x_height/y_height, 1.*x_width/y_width, 1])
# if
x_batch = x_post_fn(x_batch) if x_post_fn is not None else x_batch
y_batch = y_post_fn(y_batch) if y_post_fn is not None else y_batch
y_batch_Lab = np.concatenate([y_batch_L, y_batch_ab], axis = 3)
y_batch_RGB = np.array([cv2.cvtColor(image.astype(np.uint8), cv2.COLOR_Lab2RGB) for image in y_batch_Lab])
return y_batch_RGB | 270c0b60646c0d16f8f8a4f3f2d5933bef369e3e | 619 |
def get_mag_from_obs(h, e, d0=0):
"""gets the magnetic north components given the observatory components.
Parameters
__________
h: array_like
the h component from the observatory
e: array_like
the e component from the observatory
d0: float
the declination baseline angle in radians
Returns
_______
tuple of array_like
[0]: total h component as a float
[1]: total d declination as a float
"""
mag_h = get_mag_h_from_obs(h, e)
mag_d = get_mag_d_from_obs(h, e, d0)
return (mag_h, mag_d) | 27f4d538c3a9e13522f195bd9abb2683037ba72a | 620 |
import subprocess
def get_local_host(choice='IP'):
"""
choice: 'IP' or 'NAME'
"""
if choice == 'IP':
cmd = 'hostname -i'
else:
cmd = 'hostname'
out = subprocess.check_output(cmd.split())
if choice == 'hostname':
return out.strip('\n')
else:
ip_tmp = out.strip('\n').strip()
if ip_tmp and address_can_be_linked(ip_tmp):
ip = ip_tmp
else:
ip = None
ip_list = get_all_ips(None)
for ip_tmp in ip_list:
if address_can_be_linked(ip_tmp):
ip = ip_tmp
break
return ip | ef7b21e6ae10c360759bdb11e9c80695f63ea31e | 621 |
def set_cell(client, instance, colid, value, file_=None):
"""Set the value of one cell of a family table.
Args:
client (obj):
creopyson Client.
instance (str):
Family Table instance name.
colid (str):
Column ID.
value (depends on data type):
Cell value.
`file_` (str, optional):
File name (usually an assembly).
Defaults is currently active model.
Returns:
None
"""
data = {
"instance": instance,
"colid": colid,
"value": value,
}
if file_ is not None:
data["file"] = file_
else:
active_file = client.file_get_active()
if active_file:
data["file"] = active_file["file"]
return client._creoson_post("familytable", "set_cell", data) | b1617484a3a710046cda5daaf6bbe9b376f83418 | 622 |
def find_bounds(particles):
"""
Find the maximum and minimum bounds describing a set of particles.
"""
min_bound = np.array(
[np.min(particles[:, 0]), np.min(particles[:, 1]), np.min(particles[:, 2])]
)
max_bound = np.array(
[np.max(particles[:, 0]), np.max(particles[:, 1]), np.max(particles[:, 2])]
)
return max_bound, min_bound | 2640ce6aa79cbe4392d2e044bddf46a94f6b76af | 623 |
def get_tags_date(link, default_date=None):
"""Extract tags and date from the link."""
tags = ["links"]
date = ""
fltr = [
"Bookmarks Menu",
"Bookmark Bar",
"Personal Toolbar Folder",
"Importierte Lesezeichen",
"Bookmarks Toolbar",
"Kein Label vorhanden",
"Unsorted Bookmarks",
"Unsortierte Lesezeichen",
"Recently Bookmarked",
"Recent Tags",
]
for parent in link.parents:
if parent.name == "dl":
for sibling in parent.previous_siblings:
if sibling.name == "h3":
tags += sibling.get_text().split(">")
datestr = (
sibling.get("add_date", None)
or sibling.get("last_visit", None)
or sibling.get("last_modified", None)
or default_date
)
date = convert_date(datestr)
for sibling in parent.next_siblings:
if sibling.name == "h3":
tags += sibling.get_text().split(">")
datestr = (
sibling.get("add_date", None)
or sibling.get("last_visit", None)
or sibling.get("last_modified", None)
or default_date
)
date = convert_date(datestr)
break
return ([standardize_tag(i) for i in tags if i not in fltr], date) | b9f28a1f2f819cdfcc794ce3dc507ff4df288906 | 624 |
from datetime import datetime
def _is_future(time, time_ref=None):
"""
check if `time` is in future (w.r.t. `time_ref`, by default it is now)
Parameters
----------
time : int or datetime
the time to check (if int it's considered a
timestamp, see :py:meth:`datetime.timestamp`)
time_ref : int or datetime
the time reference (if int it's considered a timestamp, see
:py:meth:`datetime.timestamp`), if None use the present time
(default: None)
Returns
-------
bool
is in future or not
"""
time = _parse_time_from_input(time, "time")
if time_ref is None:
time_ref = datetime.now()
else:
time_ref = _parse_time_from_input(time_ref, "time_ref")
return time > time_ref | ece5121ee0e49c77a260b117cb0f251a35aa289b | 625 |
from datetime import datetime
def create_and_train_model(x_learn, y_learn, model, n_cores):
"""General method to create and train model"""
print(model.fit(x_learn, y_learn))
start_time = datetime.now()
c_val = cross_val_score(model, x_learn, y_learn, cv=10, n_jobs=n_cores)
end_time = datetime.now()
print(type(model).__name__, "with n_jobs =", n_cores, "took:", (end_time.second - start_time.second), "seconds")
print(type(model).__name__, "cross_val_score:", c_val.mean())
return model, c_val | babef20c893c39a09fe4a0e0f777b2ec326b694c | 626 |
def cardidolizedimageurl(context, card, idolized, english_version=False):
"""
Returns an image URL for a card in the context of School Idol Contest
"""
prefix = 'english_' if english_version else ''
if card.is_special or card.is_promo:
idolized = True
if idolized:
if getattr(card, prefix + 'round_card_idolized_image'):
return _imageurl(getattr(card, prefix + 'round_card_idolized_image'), context=context)
if getattr(card, prefix + 'card_idolized_image'):
return _imageurl(getattr(card, prefix + 'card_idolized_image'), context=context)
return _imageurl('static/default-' + card.attribute + '.png', context=context)
if getattr(card, prefix + 'round_card_image'):
return _imageurl(getattr(card, prefix + 'round_card_image'), context=context)
if getattr(card, prefix + 'card_image'):
return _imageurl(getattr(card, prefix + 'card_image'), context=context)
return _imageurl('static/default-' + card.attribute + '.png', context=context) | c69e1a4d998d632091fcf1d69a240d68386e0b21 | 627 |
def extract_simple_tip(e):
"""
"""
emin = e.min()
emax = e.max()
indices = [nearest_index(emin), nearest_index(emax)]
indices.sort()
imin,imax = indices
imax +=1 # for python style indexing
return imin, imax | c3d215ee34caa733a83b8be54cc92f9071839bef | 628 |
def parse_pipeline_config(pipeline_config_file):
"""Returns pipeline config and meta architecture name."""
with tf.gfile.GFile(pipeline_config_file, 'r') as config_file:
config_str = config_file.read()
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
text_format.Merge(config_str, pipeline_config)
meta_arch = pipeline_config.model.WhichOneof('model')
return pipeline_config, meta_arch | 4e7047c2e7b195bce8cadf83fa19e1b7c1cb16ca | 629 |
import math
def get_pwl(time_series, pwl_epsilon):
""" This is a wrapper function for getting a bounded piecewise linear approximation of the data """
if not isinstance(pwl_epsilon, (int, float)):
raise TypeError("pwl_epsilon must be a numeric type!")
if not (isinstance(time_series, pd.DataFrame) or isinstance(time_series, list)):
raise TypeError("The argument time_series must be a Pandas Dataframe, or a list!")
if isinstance(time_series, pd.DataFrame):
# just how hakimis algorithm wants the data
polyline_from_data = list(zip(time_series.index.tolist(),
time_series[construct_variable_name(1)].values.tolist()))
else:
polyline_from_data = time_series
if math.isclose(pwl_epsilon, 0.0):
return polyline_from_data
else:
approx_grap = create_approximation_graph(timeseries=polyline_from_data, epsilon=pwl_epsilon)
shortest_path_gen =\
nx.all_shortest_paths(approx_grap, tuple(polyline_from_data[0]), tuple(polyline_from_data[-1]))
# this avoids generating all paths, since we take just the first one (saves memory and time)
return next(shortest_path_gen) | 431d83e59e4f3faa3cbe7f21106690f030330529 | 630 |
def to_array(string):
"""Converts a string to an array relative to its spaces.
Args:
string (str): The string to convert into array
Returns:
str: New array
"""
try:
new_array = string.split(" ") # Convert the string into array
while "" in new_array: # Check if the array contains empty strings
new_array.remove("")
return new_array
except:
print("The parameter string is not a str")
return string | 7ee87a2b245a71666939e9ce2e23dc07fcaa0153 | 631 |
def convert_atoms_to_pdb_molecules(atoms: t.List[Atom]) -> t.List[str]:
"""
This function converts the atom list into pdb blocks.
Parameters
----------
atoms : t.List[Atom]
List of atoms
Returns
-------
t.List[str]
pdb strings of that molecule
"""
# 1) GROUP ATOMS BT MOLECULES
molecules = defaultdict(list)
for a in atoms:
molecules[a.resi].append(a)
# 2) CONSTUCT PDB BLOCKS
#ref: https://www.cgl.ucsf.edu/chimera/docs/UsersGuide/tutorials/pdbintro.html
pdb_format = "ATOM {:>5d} {:<2}{:1}{:>3} {:1}{:>3d}{:1} {:>7.3f}{:>7.3f}{:>7.3f}{:>5}{:>6}{:<3}{:>2} {:>2d}"
dummy_occupancy= dummy_bfactor= dummy_charge = 0.0
dummy_alt_location= dummy_chain= dummy_insertion_code= dummy_segment = ""
pdb_molecules: t.List[str] = []
for m_ID in sorted(molecules):
m = molecules[m_ID]
atoms_as_lines: t.List[str] = []
for a in sorted(m, key= lambda x: x.id):
atoms_as_lines.append(pdb_format.format(int(a.id), a.name, dummy_alt_location, a.resn, dummy_chain, int(a.resi), dummy_insertion_code, a.x, a.y, a.z, dummy_occupancy, dummy_bfactor, dummy_segment, a.elem, int(dummy_charge)))
# Sort by Id: => convert str up do first space to int
#atoms_as_lines = sorted(atoms_as_lines, key=lambda x: int(x[:x.index('\t')]))
molecule_as_str = "TITLE "+a.resn+"\n"+'\n'.join(atoms_as_lines) + '\nEND'
# molecule_as_str = molecule_as_str.replace('\t',' ')
pdb_molecules.append(molecule_as_str)
print(pdb_molecules[-1])
return pdb_molecules | 3f073818b92ce1db7b2e8c4aaf0724afb546beba | 632 |
def unvoiced_features(sig,fs,vcont,sil_cont):
"""
Unvoiced segment features.
Requires voiced and silence/pauses segment detection.
"""
#Unvoiced features
uv_seg,_,_ = unvoiced_seg(sig,fs,vcont,sil_cont)
lunvoiced = []
for uv in uv_seg:
lunvoiced.append(len(uv)/fs)#Length of unvoiced segment
uunvoiced = np.mean(lunvoiced)#Average length
# sunvoiced = np.std(lunvoiced)#variation of length
uvrate = (len(uv_seg)*fs)/len(sig)#Unvoiced segments per second
numuv = len(uv_seg)
rPVI,nPVI = get_pvi(lunvoiced)
pGPI,dGPI = get_gpi(lunvoiced,len(sig)/fs)
# feats_unvoiced = np.hstack([numuv,uvrate,uunvoiced,rPVI,nPVI,pGPI,dGPI])
feats_unvoiced = {'Unvoiced_counts':numuv,
'Unvoiced_rate':uvrate,
'Unvoiced_duration':uunvoiced,
'Unvoiced_rPVI':rPVI,
'Unvoiced_nPVI':nPVI,
'Unvoiced_dGPI':dGPI}
return feats_unvoiced | bb6f5fc0c939a7d838140b7a7eadda2ff32e5592 | 633 |
def civic_methods(method001, method002, method003):
"""Create test fixture for methods."""
return [method001, method002, method003] | 63913e2cfe866c65d9a1e7d5d3ba2e081b8e12f6 | 634 |
def _generate_tags(encoding_type, number_labels=4):
"""
:param encoding_type: 例如BIOES, BMES, BIO等
:param number_labels: 多少个label,大于1
:return:
"""
vocab = {}
for i in range(number_labels):
label = str(i)
for tag in encoding_type:
if tag == 'O':
if tag not in vocab:
vocab['O'] = len(vocab) + 1
continue
vocab['{}-{}'.format(tag, label)] = len(vocab) + 1 # 其实表达的是这个的count
return vocab | 36125c684be9dc1d0abc522d536276be7e3d7328 | 635 |
def delete_link_tag(api_client, link_id, tag_key, **kwargs): # noqa: E501
"""delete_link_tag # noqa: E501
Delete link tag by key
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> response = await api.delete_link_tag(client, link_id, tag_key, async_req=True)
:param link_id str: str of link. e.g. lnk0
:param tag_key str: key of tag
:param async_req bool: execute request asynchronously
:param bool sorted: Sort resources
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: APIResponse or awaitable if async
"""
local_var_params = locals()
request_params = [] # noqa: E501
collection_formats = {}
path_params = {"link_id": link_id, "tag_key": tag_key}
query_params = []
for param in [p for p in request_params if local_var_params.get(p) is not None]:
query_params.append((param, local_var_params[param])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501
return api_client.call_api(
"/links/{link_id}/tags/{tag_key}",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
) | 5e643eaa02b50a733030ac35da5821a494ee2517 | 636 |
def infect_graph(g, title):
"""
Function to infect the graph using SI model.
Parameters:
g: Graph
Returns:
G : Infected graph
t : Time of diffusion of each node
"""
G=g
# Model selection - diffusion time
model = ep.SIModel(G)
nos = 1/len(G)
# Model Configuration
config = mc.Configuration()
config.add_model_parameter('beta', 0.03)
config.add_model_parameter("fraction_infected", 0.05)
model.set_initial_status(config)
# Simulation execution
iterations = model.iteration_bunch(200)
diffusionTime={}
for i in range(1,len(G)):
diffusionTime[i]=-1
for i in iterations:
for j in i['status']:
if(i['status'][j]==1):
diffusionTime[j]=i['iteration']
nodeColor = []
source_nodes = []
for i in G.nodes():
if iterations[0]["status"][i]==1:
nodeColor.append('red')
source_nodes.append(i)
else:
nodeColor.append('blue')
sorted_values = sorted(diffusionTime.values()) # Sort the values
sorted_dict = {}
for i in sorted_values:
for k in diffusionTime.keys():
if diffusionTime[k] == i:
sorted_dict[k] = diffusionTime[k]
plt.clf()
nx.draw(G, node_color=nodeColor, with_labels=True)
plt.title('Intial Phase')
plt.savefig(f'./plots/{title}_Initial-infect.png')
plt.clf()
nx.draw(G, node_color=list(x for i,x in diffusionTime.items()),cmap=plt.cm.Reds, with_labels=True)
plt.title('Final Phase')
plt.savefig(f'./plots/{title}_Final-infect.png')
return (G, sorted_dict, source_nodes) | b0c3f5d2518083cabf4de4214acf65027fc623f5 | 637 |
def main_func_SHORT():
""" Func. called by the main T """
sleep(SHORT)
return True | de5cffd80a74a048f0016e5912dd15b93f0a4dd6 | 638 |
from typing import Tuple
import math
def split_train_test(X: pd.DataFrame, y: pd.Series, train_proportion: float = .75) \
-> Tuple[pd.DataFrame, pd.Series, pd.DataFrame, pd.Series]:
"""
Randomly split given sample to a training- and testing sample
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Data frame of samples and feature values.
y : Series of shape (n_samples, )
Responses corresponding samples in data frame.
train_proportion: Fraction of samples to be split as training set
Returns
-------
train_X : DataFrame of shape (ceil(train_proportion * n_samples), n_features)
Design matrix of train set
train_y : Series of shape (ceil(train_proportion * n_samples), )
Responses of training samples
test_X : DataFrame of shape (floor((1-train_proportion) * n_samples), n_features)
Design matrix of test set
test_y : Series of shape (floor((1-train_proportion) * n_samples), )
Responses of test samples
"""
no_of_train_rows = math.ceil(train_proportion * X.shape[0])
X : pd.DataFrame = pd.DataFrame.join(X,y)
train_data = X.sample(n=no_of_train_rows, axis=0)
test_data = X.loc[X.index.difference(train_data.index), ]
train_y = train_data[y.name]
test_y = test_data[y.name]
train_data.drop(columns=y.name,inplace=True)
test_data.drop(columns=y.name,inplace=True)
# print(train_data.shape)
# print(test_data.shape)
# print(train_y.shape)
# print(test_y.shape)
return train_data, train_y, test_data, test_y | 2261e72821ab55ac12f4f0b88511ce1f6d9d8d5f | 639 |
def s2sdd(s):
""" Converts a 4-port single-ended S-parameter matrix
to a 2-port differential mode representation.
Reference: https://www.aesa-cortaillod.com/fileadmin/documents/knowledge/AN_150421_E_Single_ended_S_Parameters.pdf
"""
sdd = np.zeros((2, 2), dtype=np.complex128)
sdd[0, 0] = 0.5*(s[0, 0] - s[0, 2] - s[2, 0] + s[2, 2])
sdd[0, 1] = 0.5*(s[0, 1] - s[0, 3] - s[2, 1] + s[2, 3])
sdd[1, 0] = 0.5*(s[1, 0] - s[1, 2] - s[3, 0] + s[3, 2])
sdd[1, 1] = 0.5*(s[1, 1] - s[1, 3] - s[3, 1] + s[3, 3])
return sdd | 0d29d2d248a49dd27bab202abd84014aab799907 | 640 |
def plot_gdf(gdf, map_f=None, maxitems=-1, style_func_args={}, popup_features=[],
tiles='cartodbpositron', zoom=6, geom_col='geometry', control_scale=True):
"""
:param gdf: GeoDataFrame
GeoDataFrame to visualize.
:param map_f: folium.Map
`folium.Map` object where the GeoDataFrame `gdf` will be plotted. If `None`, a new map will be created.
:param maxitems: int
maximum number of tiles to plot. If `-1`, all tiles will be plotted.
:param style_func_args: dict
dictionary to pass the following style parameters (keys) to the GeoJson style function of the polygons:
'weight', 'color', 'opacity', 'fillColor', 'fillOpacity', 'radius'
:param popup_features: list
when clicking on a tile polygon, a popup window displaying the information in the
columns of `gdf` listed in `popup_features` will appear.
:param tiles: str
folium's `tiles` parameter.
:param zoom: int
initial zoom.
:param geom_col: str
name of the geometry column of `gdf`.
:param control_scale: bool
if `True`, add scale information in the bottom left corner of the visualization. The default is `True`.
Returns
-------
`folium.Map` object with the plotted GeoDataFrame.
"""
if map_f is None:
# initialise map
lon, lat = np.mean(np.array(list(gdf[geom_col].apply(utils.get_geom_centroid).values)), axis=0)
map_f = folium.Map(location=[lat, lon], tiles=tiles, zoom_start=zoom, control_scale=control_scale)
count = 0
for k in gdf.index:
g = gdf.loc[k]
if type(g[geom_col]) == gpd.geoseries.GeoSeries:
for i in range(len(g[geom_col])):
map_f = add_to_map(g[geom_col].iloc[i], g.iloc[i], map_f,
popup_features=popup_features,
style_func_args=style_func_args)
else:
map_f = add_to_map(g[geom_col], g, map_f,
popup_features=popup_features,
style_func_args=style_func_args)
count += 1
if count == maxitems:
break
return map_f | 80c4002ca82e849a1701c00ead54671729009670 | 641 |
def set_catflap_cat_inside(request, catflap_uuid):
"""GET so it can be used as an email link."""
catflap = CatFlap.objects.get(uuid=catflap_uuid)
if not catflap.cat_inside:
catflap.cat_inside = True
catflap.save()
track_manual_intervention(catflap, cat_inside=True)
return redirect_to_status_page(request, catflap_uuid) | cb7feabe83ef69598aff9fe6ba9867996312c892 | 642 |
import ctypes
def feature_list():
"""Check the library for compile-time features. The list of features are maintained in libinfo.h and libinfo.cc
Returns
-------
list
List of :class:`.Feature` objects
"""
lib_features_c_array = ctypes.POINTER(Feature)()
lib_features_size = ctypes.c_size_t()
check_call(_LIB.MXLibInfoFeatures(ctypes.byref(lib_features_c_array), ctypes.byref(lib_features_size)))
features = [lib_features_c_array[i] for i in range(lib_features_size.value)]
return features | ec20748fb4aae07898949822b3a7ba9835f4ed57 | 643 |
def _symm_herm(C):
"""To get rid of NaNs produced by _scalar2array, symmetrize operators
where C_ijkl = C_jilk*"""
nans = np.isnan(C)
C[nans] = np.einsum('jilk', C)[nans].conj()
return C | bd2e46b3ed751eb380aedd8d280294177fb6b3fd | 644 |
def cat(self, dim=0):
"""Map of 'cat' pytorch method."""
x = self
dim = _dim_explicit(x[0].shape, dim)
return P.concat(x, dim) | 9eba4de4941ac437e82f98647e0c6dc014b1578f | 645 |
import re
def _name_xform(o):
"""transform names to lowercase, without symbols (except underscore)
Any chars other than alphanumeric are converted to an underscore
"""
return re.sub("\W", "_", o.lower()) | 8ea563f805493d8885c143d9c2e2e54447ef19e8 | 646 |
def runner(app):
"""创建一个运行器,用于调用应用注册的 Click 命令"""
return app.test_cli_runner() | f9ffb3040045e0789a5686eb9a80f3fdef126a9d | 647 |
def create_activation_cache(model):
"""Creates an activation cache for the tensors of a model."""
input_quantizer = quantized_relu(8, 0)
output_cache = {}
# If using a Sequential model, the input layer is hidden. Therefore, add the
# input quantization to the cache if the first layer is not an input layer
if not isinstance(model.layers[0], InputLayer):
output_cache[model.layers[0].input.experimental_ref()] = input_quantizer
# cache graph tensors' activations
for l in model.layers:
output_cache[l.output.experimental_ref()] = l
if isinstance(l, QActivation) or isinstance(l, QAdaptiveActivation) :
output_cache[l.output.experimental_ref()] = l.quantizer
elif isinstance(l, InputLayer):
# assume the input is 8-bit positive value
output_cache[l.output.experimental_ref()] = input_quantizer
elif l.__class__.__name__ in [
"QDense", "QConv2D", "QConv1D", "QDepthwiseConv2D"
]:
output_cache[l.output.experimental_ref()] = l.activation
else:
if isinstance(l.input, list):
# right now, we just get the first one - we assume this is the leading
# one.
all_q = [
output_cache.get(l.input[i].experimental_ref())
for i in range(len(l.input))
]
q = all_q[0]
else:
q = output_cache.get(l.input.experimental_ref(), None)
output_cache[l.output.experimental_ref()] = q
if q is None:
raise ValueError("Unknown operation in {}".format(l.name))
return output_cache | a35c11e95831e3aa51fadce75577e97bd150cc1e | 648 |
def feature_scatterplot(fset_path, features_to_plot):
"""Create scatter plot of feature set.
Parameters
----------
fset_path : str
Path to feature set to be plotted.
features_to_plot : list of str
List of feature names to be plotted.
Returns
-------
(str, str)
Returns (docs_json, render_items) json for the desired plot.
"""
fset, data = featurize.load_featureset(fset_path)
fset = fset[features_to_plot]
colors = cycle(palette[5])
plots = np.array([[figure(width=300, height=200)
for j in range(len(features_to_plot))]
for i in range(len(features_to_plot))])
for (j, i), p in np.ndenumerate(plots):
if (j == i == 0):
p.title.text = "Scatterplot matrix"
p.circle(fset.values[:,i], fset.values[:,j], color=next(colors))
p.xaxis.minor_tick_line_color = None
p.yaxis.minor_tick_line_color = None
p.ygrid[0].ticker.desired_num_ticks = 2
p.xgrid[0].ticker.desired_num_ticks = 4
p.outline_line_color = None
p.axis.visible = None
plot = gridplot(plots.tolist(), ncol=len(features_to_plot), mergetools=True, responsive=True, title="Test")
# Convert plot to json objects necessary for rendering with bokeh on the
# frontend
render_items = [{'docid': plot._id, 'elementid': make_id()}]
doc = Document()
doc.add_root(plot)
docs_json_inner = doc.to_json()
docs_json = {render_items[0]['docid']: docs_json_inner}
docs_json = serialize_json(docs_json)
render_items = serialize_json(render_items)
return docs_json, render_items | e8e0a545b992042eb334554042c9efa68a4f6a1f | 649 |
def model1(v, va, vb, ka, Wa, Wb, pa):
"""
A translation of the equation from Sandström's Dynamic NMR Spectroscopy,
p. 14, for the uncoupled 2-site exchange simulation.
v: frequency whose amplitude is to be calculated
va, vb: frequencies of a and b singlets (slow exchange limit) (va > vb)
ka: rate constant for state A--> state B
pa: fraction of population in state Adv: frequency difference (va - vb)
between a and b singlets (slow exchange)
T2a, T2b: T2 (transverse relaxation time) for each nuclei
returns: amplitude at frequency v
"""
pi = np.pi
pb = 1 - pa
tau = pb / ka
dv = va - vb
Dv = (va + vb) / 2 - v
T2a = 1 / (pi * Wa)
T2b = 1 / (pi * Wb)
P = tau * ((1 / (T2a * T2b)) - 4 * (pi ** 2) * (Dv ** 2) +
(pi ** 2) * (dv ** 2))
P += ((pa / T2a) + (pb / T2b))
Q = tau * (2 * pi * Dv - pi * dv * (pa - pb))
R = 2 * pi * Dv * (1 + tau * ((1 / T2a) + (1 / T2b)))
R += pi * dv * tau * ((1 / T2b) - (1 / T2a)) + pi * dv * (pa - pb)
I = (P * (1 + tau * ((pb / T2a) + (pa / T2b))) + Q * R) / (P ** 2 + R ** 2)
return I | c1110cbd16bfcd942a086e8d878bfe3117bf4f99 | 650 |
def calculate_laminar_flame_speed(
initial_temperature,
initial_pressure,
species_dict,
mechanism,
phase_specification="",
unit_registry=_U
):
"""
This function uses cantera to calculate the laminar flame speed of a given
gas mixture.
Parameters
----------
initial_temperature : pint.Quantity
Initial temperature of gas mixture
initial_pressure : pint.Quantity
Initial pressure of gas mixture
species_dict : dict
Dictionary with species names (all caps) as keys and moles as values
mechanism : str
String of mechanism to use (e.g. "gri30.cti")
phase_specification : str
Phase specification for cantera solution
unit_registry : pint.UnitRegistry
Unit registry for managing units to prevent conflicts with parent
unit registry
Returns
-------
pint.Quantity
Laminar flame speed in m/s as a pint quantity
"""
gas = ct.Solution(mechanism, phase_specification)
quant = unit_registry.Quantity
tools.check_pint_quantity(
initial_pressure,
"pressure",
ensure_positive=True
)
tools.check_pint_quantity(
initial_temperature,
"temperature",
ensure_positive=True
)
# ensure species dict isn't empty
if len(species_dict) == 0:
raise ValueError("Empty species dictionary")
# ensure all species are in the mechanism file
bad_species = ""
good_species = gas.species_names
for species in species_dict:
if species not in good_species:
bad_species += species + "\n"
if len(bad_species) > 0:
raise ValueError("Species not in mechanism:\n" + bad_species)
gas.TPX = (
initial_temperature.to("K").magnitude,
initial_pressure.to("Pa").magnitude,
species_dict
)
# find laminar flame speed
flame = ct.FreeFlame(gas)
flame.set_refine_criteria(ratio=3, slope=0.1, curve=0.1)
flame.solve(loglevel=0)
return quant(flame.u[0], "m/s") | cdceb35dd5313d32e5b7cdfd4af2b842d2019587 | 651 |
def extrapolate_coverage(lines_w_status):
"""
Given the following input:
>>> lines_w_status = [
(1, True),
(4, True),
(7, False),
(9, False),
]
Return expanded lines with their extrapolated line status.
>>> extrapolate_coverage(lines_w_status) == [
(1, True),
(2, True),
(3, True),
(4, True),
(5, None),
(6, None),
(7, False),
(8, False),
(9, False),
]
"""
lines = []
prev_lineno = 0
prev_status = True
for lineno, status in lines_w_status:
while (lineno - prev_lineno) > 1:
prev_lineno += 1
if prev_status is status:
lines.append((prev_lineno, status))
else:
lines.append((prev_lineno, None))
lines.append((lineno, status))
prev_lineno = lineno
prev_status = status
return lines | e7685359f570ae979f2421c3a64513409b9df352 | 652 |
def get_image_features(filename):
"""
Param:
Path to image
Returns:
Desired features of image in the form of a dictionary (key = feature_name, value = feature_value)
"""
array, metadata = nrrd.read(filename)
return {k: f(array, metadata, filename) for k, f in image_feature_functions.items()} | 0f991ebed175f0f41e30654cb4665dea09a1053d | 653 |
def get_DCT_transform_matrix(N):
"""
Return the normalised N-by-N discrete cosine transform (DCT) matrix.
Applying the returned transform matrix to a vector x: D.dot(x) yields the
DCT of x. Applying the returned transform matrix to a matrix A: D.dot(A)
applies the DCT to the columns of A. Taking D.dot(A.dot(D.T)) applies the
DCT to both columns and rows, i.e. a full 2D separable DCT transform. The
inverse transform (the 1D IDCT) is D.T.
Parameters
----------
N : int
The size of the DCT transform matrix to return.
Returns
-------
D : ndarray
The DCT transform matrix.
Notes
-----
The returned DCT matrix normalised such that is consitutes a orthonormal
transform as given by equations (2.119) and (2.120) in [1]_.
References
----------
.. [1] A.N. Akansu, R.A. Haddad, and P.R. Haddad, *Multiresolution Signal
Decomposition: Transforms, Subbands, and Wavelets*, Academic Press,
2000.
Examples
--------
For example, get a 5-by-5 DCT matrix
>>> import numpy as np
>>> from magni.imaging.dictionaries import get_DCT_transform_matrix
>>> D = get_DCT_transform_matrix(5)
>>> np.round(np.abs(D), 4)
array([[ 0.4472, 0.4472, 0.4472, 0.4472, 0.4472],
[ 0.6015, 0.3717, 0. , 0.3717, 0.6015],
[ 0.5117, 0.1954, 0.6325, 0.1954, 0.5117],
[ 0.3717, 0.6015, 0. , 0.6015, 0.3717],
[ 0.1954, 0.5117, 0.6325, 0.5117, 0.1954]])
and apply the 2D DCT transform to a dummy image
>>> np.random.seed(6021)
>>> img = np.random.randn(5, 5)
>>> img_dct = D.dot(img.dot(D.T))
>>> np.round(img_dct, 4)
array([[-0.5247, -0.0225, 0.9098, 0.369 , -0.477 ],
[ 1.7309, -0.4142, 1.9455, -0.6726, -1.3676],
[ 0.6987, 0.5355, 0.7213, -0.8498, -0.1023],
[ 0.0078, -0.0545, 0.3649, -1.4694, 1.732 ],
[-1.5864, 0.156 , 0.8932, -0.8091, 0.5056]])
"""
@_decorate_validation
def validate_input():
_numeric('N', 'integer', range_='[1;inf)')
validate_input()
nn, rr = np.meshgrid(*map(np.arange, (N, N)))
D = np.cos((2 * nn + 1) * rr * np.pi / (2 * N))
D[0, :] /= np.sqrt(N)
D[1:, :] /= np.sqrt(N/2)
return D | 87314407c0836892a79747ea01aa9c369224198b | 654 |
def get_reduce_nodes(name, nodes):
"""
Get nodes that combine the reduction variable with a sentinel variable.
Recognizes the first node that combines the reduction variable with another
variable.
"""
reduce_nodes = None
for i, stmt in enumerate(nodes):
lhs = stmt.target.name
rhs = stmt.value
if isinstance(stmt.value, ir.Expr):
in_vars = set(v.name for v in stmt.value.list_vars())
if name in in_vars:
args = get_expr_args(stmt.value)
args.remove(name)
assert len(args) == 1
replace_vars_inner(stmt.value, {args[0]:
ir.Var(stmt.target.scope, name+"#init", stmt.target.loc)})
reduce_nodes = nodes[i:]
break;
assert reduce_nodes, "Invalid reduction format"
return reduce_nodes | 8438829236f9c45986d1e1394cd0c6864caec73f | 655 |
def extract_mesh_descriptor_id(descriptor_id_str: str) -> int:
""" Converts descriptor ID strings (e.g. 'D000016') into a number ID (e.g. 16). """
if len(descriptor_id_str) == 0:
raise Exception("Empty descriptor ID")
if descriptor_id_str[0] != "D":
raise Exception("Expected descriptor ID to start with 'D', {}".format(descriptor_id_str))
return int(descriptor_id_str[1:]) | 9f013eadee9a149b9617e4a1c058bbe67c6dd8ba | 656 |
def process_sources(sources_list):
"""
This function processes the sources result
:param sources_list: A list of dictionaries
:return: A list of source objects
"""
sources_results = []
for sources_item in sources_list:
id = sources_item.get('id')
name = sources_item.get('name')
description = sources_item.get('description')
url = sources_item.get('url')
category = sources_item.get('category')
language = sources_item.get('language')
country = sources_item.get('country')
print(sources_item)
sources_object = Sources(id, name, description, url)
sources_results.append(sources_object)
return sources_results | 7742a721802c66daf525520969f5831f9a497137 | 657 |
import math
def encrypt(message_text, key):
"""Method Defined for ENCRYPTION of a Simple \
String message into a Cipher Text Using \
2x2 Hill Cipher Technique
\nPARAMETERS\n
message_text: string to be encrypted
key: string key for encryption with length <= 4
\nRETURNS\n
cipher_text: encrypted Message string
"""
# for 2x2 Hill Cipher length of key must be <= 4
# print("Warning: All Spaces with be lost!")
cipher_text = ""
key_matrix = None
if len(key) <= 4:
key_matrix = string_to_Matrix_Z26(key, 2, 2)
else:
print("Key Length must be <= 4 in 2x2 Hill Cipher")
return
pairs = math.ceil((len(message_text)/2))
matrix = string_to_Matrix_Z26(message_text, 2, pairs)
key_inverse = matrix_inverse_Z26(key_matrix)
if type(key_inverse) == type(None):
print("NOTE: The provided Key is NOT Invertible,")
print("To avoid failure while decryption,")
print("Try again with an invertible Key")
return None
for i in range(pairs):
result_char = (key_matrix*matrix[:, i]) % 26
cipher_text += ENGLISH_ALPHABETS[
result_char[0, 0]
]
cipher_text += ENGLISH_ALPHABETS[
result_char[1, 0]
]
return cipher_text | 2b36ba888980021cc69bffb1316b4e352bd026e8 | 658 |
import torch
def resnet101(pretrained=False, num_groups=None, weight_std=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], num_groups=num_groups, weight_std=weight_std, **kwargs)
if pretrained:
model_dict = model.state_dict()
if num_groups and weight_std:
pretrained_dict = torch.load('data/R-101-GN-WS.pth.tar')
overlap_dict = {k[7:]: v for k, v in pretrained_dict.items() if k[7:] in model_dict}
assert len(overlap_dict) == 312
elif not num_groups and not weight_std:
pretrained_dict = model_zoo.load_url(model_urls['resnet101'])
overlap_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
else:
raise ValueError('Currently only support BN or GN+WS')
model_dict.update(overlap_dict)
model.load_state_dict(model_dict)
return model | da4213b280eeef56cf1bdce7bfd05cfc0b8cde7d | 659 |
def cron(cronline, venusian_category='irc3.plugins.cron'):
"""main decorator"""
def wrapper(func):
def callback(context, name, ob):
obj = context.context
crons = obj.get_plugin(Crons)
if info.scope == 'class':
callback = getattr(
obj.get_plugin(ob),
func.__name__)
else:
callback = irc3.utils.wraps_with_context(func, obj)
crons.add_cron(cronline, callback)
info = venusian.attach(func, callback, category=venusian_category)
return func
return wrapper | 9ffe20fd3e803b1260577ff38122b4912ab4d69a | 660 |
def oauth_type():
"""Check if Slack or another OAuth has been configured"""
if "OAUTH_TYPE" in current_app.config:
return current_app.config["OAUTH_TYPE"].lower()
else:
return None | d9fe8f77fd502890becd44cbaf802b2e94598a6f | 661 |
import re
import logging
import os
def _get_scenarios(rule_dir, scripts, scenarios_regex, benchmark_cpes):
""" Returns only valid scenario files, rest is ignored (is not meant
to be executed directly.
"""
if scenarios_regex is not None:
scenarios_pattern = re.compile(scenarios_regex)
scenarios = []
for script in scripts:
if scenarios_regex is not None:
if scenarios_pattern.match(script) is None:
logging.debug("Skipping script %s - it did not match --scenarios regex" % script)
continue
script_context = _get_script_context(script)
if script_context is not None:
script_params = _parse_parameters(os.path.join(rule_dir, script))
if common.matches_platform(script_params["platform"], benchmark_cpes):
scenarios += [Scenario(script, script_context, script_params)]
else:
logging.info("Script %s is not applicable on given platform" % script)
return scenarios | 5f91798103461c98ea53cd9ddf1ac25b042f97ce | 662 |
import random
import string
def create_categories():
"""Create a group of random strings for each column in the table."""
return [
[
''.join(random.choices(string.ascii_lowercase, k=random.randint(STR_MIN, STR_MAX)))
for _i in range(CAT_COUNT)
]
for _j in range(COL_COUNT)
] | 8552be3fb45091f404d396f452dd37824a0cae23 | 663 |
import os
def get_asp_output_folder(project_name):
"""
:type project_name: string
"""
loc = PROJECT_RESULTS_LOC + project_name + '/' + PROJECT_ASP_OUTPUT_FOLDER
mkdir_p(loc)
return os.path.abspath(loc) | f9a1a7eec875bf77b0dc414bcda0205010f09c3b | 664 |
from typing import Union
from typing import Tuple
from typing import List
from typing import Any
def _compute_comm_classes(
A: Union[np.ndarray, spmatrix]
) -> Tuple[List[List[Any]], bool]:
"""Compute communication classes for a graph given by A."""
di_graph = (
nx.from_scipy_sparse_matrix(A, create_using=nx.DiGraph)
if issparse(A)
else nx.from_numpy_array(A, create_using=nx.DiGraph)
)
nx.strongly_connected_components(di_graph)
comm_classes = sorted(
nx.strongly_connected_components(di_graph), key=len, reverse=True
)
is_irreducible = len(comm_classes) == 1
return comm_classes, is_irreducible | 15a7025d10855a4644be60d52dba273c526c2b43 | 665 |
import typing
def parse_lines(lines: typing.List[str],
units: Units,
use_na: bool = True) -> typing.List[typing.Dict[str, typing.Any]]:
"""
Returns a list of parsed line dictionaries
"""
parsed_lines = []
prob = ''
while lines:
raw_line = lines[0].strip()
line = core.sanitize_line(raw_line)
# Remove prob from the beginning of a line
if line.startswith('PROB'):
# Add standalone prob to next line
if len(line) == 6:
prob = line
line = ''
# Add to current line
elif len(line) > 6:
prob = line[:6]
line = line[6:].strip()
if line:
parsed_line = (parse_na_line if use_na else parse_in_line)(line, units)
for key in ('start_time', 'end_time'):
parsed_line[key] = core.make_timestamp(parsed_line[key])
parsed_line['probability'] = core.make_number(prob[4:])
parsed_line['raw'] = raw_line
parsed_line['sanitized'] = prob + ' ' + line if prob else line
prob = ''
parsed_lines.append(parsed_line)
lines.pop(0)
return parsed_lines | 356b3c4d80a462643ab6c7747d4f1174202129be | 666 |
import random
def rand_cutout(np_img, pcts=(0.05, 0.4), depth=(1., 0.), max_k=1):
"""Cut out from image, and edges of rectangles are smooth.
Returns:
applied image, cut mask
"""
cut = np.ones(np_img.shape[:2])
k = random.randint(1, max_k)
for _ in range(k):
d = random.random() * depth[0] + depth[1]
hill = rand_solid_hill((np_img.shape[1], np_img.shape[0]), pcts=pcts)
cut = cut * (1 - d * hill)
return np_img * cut[..., np.newaxis], (cut < 0.9).astype(np.int8) | fd5b40138314827c3ff69bb571f30453049c073b | 667 |
from typing import List
def create_content_list(contents: List[str]) -> str:
"""Format list of string into markdown list
Args:
contents: (List[string]), list of string to be formatted
Returns:
String
"""
return '\n'.join(
[template.LIST_TEMPLATE.format(
level='',
content=item
) for item in contents]) | 4080d7540def0bd0199f380a787b49eafc512b6f | 668 |
from typing import Optional
import requests
import logging
def convert(key: str, content: str, output_format: OWLFormat=OWLFormat.func) -> Optional[str]:
"""
Convert content into output_format
:param key: Key of content for error reporting
:param content: OWL representation
:param output_format: target format
:return: Converted information if successful
"""
try:
resp = requests.post('https://www.ldf.fi/service/owl-converter/',
data=dict(onto=content, to=output_format.name))
except ConnectionError as e:
logging.getLogger().error(f"{key}: {str(e)}")
return None
if resp.ok:
return resp.text
logging.getLogger().error(f"{key}: {str(resp)}") | c350a00198489d6224693cf528fb952283c6fc27 | 669 |
def _post_single_image(client: Imgur, image_path, title, description=None):
"""
Limit to 1250 POST requests per hour and 12500 per day
"""
image = client.image_upload(image_path, title, description)
# album_id = client.album_get('Family Photos')['response']['data']['id']
# client.album_add(album_id, image['response']['data']['id'])
return image['response']['data']['link'] | 22b1c5996986616515e8edcfcd22d9c82df1d27b | 670 |
def load_data(path, start=0, end=99999, step=1, returnNames = False):
"""Load images into a list
#Arguments
paths: List of strings representing paths to folders containing
images that must be named as numbers
start,end,step: Refers to the number of name of images. Only loads
images with in this range.
"""
imgs = load_imgs(path,start,end,step,returnNames = returnNames)
return imgs | 7edaff4bc977a63c22f140b74a86bc9c6fdae604 | 671 |
def animated_1d_plot(probe_data_dnf: np.ndarray,
probe_data_input1: np.ndarray,
probe_data_input2: np.ndarray,
interval: ty.Optional[int] = 30) -> None:
"""Generates an animated plot for examples in the DNF regimes tutorial.
Parameters
----------
probe_data_dnf : numpy.ndarray
probe data of the DNF
probe_data_input1 : numpy.ndarray
probe data of the first spiking input
probe_data_input2 : numpy.ndarray
probe data of the second spiking input
interval : int
interval to use in matplotlib.animation.FuncAnimation
"""
probe_data_input = probe_data_input1 + probe_data_input2
probe_data_input = probe_data_input.astype(np.float)
probe_data_dnf = probe_data_dnf.astype(np.float)
probe_data_input = np.transpose(probe_data_input)
probe_data_dnf = np.transpose(probe_data_dnf)
num_neurons = np.size(probe_data_input, axis=1)
num_time_steps = np.size(probe_data_dnf, axis=0)
input_spike_rates = compute_spike_rates(probe_data_input)
dnf_spike_rates = compute_spike_rates(probe_data_dnf)
fig, ax = plt.subplots(2, 1, figsize=(10, 5))
line0, = ax[0].plot(np.zeros((num_neurons,)), 'bo-')
line1, = ax[1].plot(np.zeros((num_neurons,)), 'ro-')
im = [line0, line1]
ax[0].set_xlabel("")
ax[1].set_xlabel("Input neuron idx")
ax[0].set_ylabel("Input spike rate")
ax[1].set_ylabel("DNF spike rate")
ax[0].set_xticks([])
ax[1].set_xticks([0, num_neurons - 1])
ax[0].set_yticks([0, 1])
ax[1].set_yticks([0, 1])
ax[0].set_xlim(-1, num_neurons)
ax[1].set_xlim(-1, num_neurons)
offset = 0.1
ax[0].set_ylim(np.min(input_spike_rates) - offset,
np.max(input_spike_rates) + offset)
ax[1].set_ylim(np.min(dnf_spike_rates) - offset,
np.max(dnf_spike_rates) + offset)
plt.tight_layout()
def animate(i: int) -> ty.List:
x = range(num_neurons)
im[0].set_data(x, input_spike_rates[i, :])
im[1].set_data(x, dnf_spike_rates[i, :])
return im
anim = animation.FuncAnimation(fig,
animate,
frames=num_time_steps,
interval=interval,
blit=True)
html = display.HTML(anim.to_jshtml())
display.display(html)
plt.close() | 8ec34c6772b728daeb429a2ecf4af52ab673bc96 | 672 |
def create_tendencies(params, return_inner_products=False, return_qgtensor=False):
"""Function to handle the inner products and tendencies tensors construction.
Returns the tendencies function :math:`\\boldsymbol{f}` determining the model's ordinary differential
equations:
.. math:: \dot{\\boldsymbol{x}} = \\boldsymbol{f}(\\boldsymbol{x})
which is for the model's integration.
It returns also the linearized tendencies
:math:`\\boldsymbol{\mathrm{J}} \equiv \\boldsymbol{\mathrm{D}f} = \\frac{\partial \\boldsymbol{f}}{\partial \\boldsymbol{x}}`
(Jacobian matrix) which are used by the tangent linear model:
.. math :: \dot{\\boldsymbol{\delta x}} = \\boldsymbol{\mathrm{J}}(\\boldsymbol{x}) \cdot \\boldsymbol{\delta x}
Parameters
----------
params: ~params.params.QgParams
The parameters fully specifying the model configuration.
return_inner_products: bool
If True, return the inner products of the model. Default to False.
return_qgtensor: bool
If True, return the tendencies tensor of the model. Default to False.
Returns
-------
f: callable
The numba-jitted tendencies function.
Df: callable
The numba-jitted linearized tendencies function.
inner_products: (AtmosphericInnerProducts, OceanicInnerProducts)
If `return_inner_products` is True, the inner products of the system.
qgtensor: QgsTensor
If `return_qgtensor` is True, the tendencies tensor of the system.
"""
if params.ablocks is not None:
aip = AtmosphericInnerProducts(params)
else:
aip = None
if params.goblocks is not None and params.gotemperature_params._name == "Oceanic Temperature":
oip = OceanicInnerProducts(params)
else:
oip = None
if aip is not None and oip is not None:
aip.connect_to_ocean(oip)
agotensor = QgsTensor(aip, oip)
coo = agotensor.tensor.coords.T
val = agotensor.tensor.data
@njit
def f(t, x):
xx = np.concatenate((np.full((1,), 1.), x))
xr = sparse_mul3(coo, val, xx, xx)
return xr[1:]
jcoo = agotensor.jacobian_tensor.coords.T
jval = agotensor.jacobian_tensor.data
@njit
def Df(t, x):
xx = np.concatenate((np.full((1,), 1.), x))
mul_jac = sparse_mul2(jcoo, jval, xx)
return mul_jac[1:, 1:]
ret = list()
ret.append(f)
ret.append(Df)
if return_inner_products:
ret.append((aip, oip))
if return_qgtensor:
ret.append(agotensor)
return ret | 2ca9f8f6c52b72070c8d339a6ec23d7dd52ea66c | 673 |
import re
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords | ff70ee2690bc36aaf892653040996597750c52da | 674 |
def script(text, interpreter="sh"):
"""Execute a shell script.
The script is passed to the interpreter via stdin and the return
code of the interpreter is returned."""
process = Popen(interpreter, stdin=PIPE)
process.communicate(input=text)
process.wait()
return process.returncode | c4dcbe40f2868099bb8986b82753ab6e00c9a1c5 | 675 |
import tempfile
from pathlib import Path
import subprocess
def generateImage(boardfilename, outputfilename, dpi, pcbdrawArgs, back):
"""
Generate board image for the diagram. Returns bounding box (top let, bottom
right) active areas of the images in KiCAD native units.
"""
# For now, use PcbDraw as a process until we rewrite the tool so it can be
# used as a library. Also note that we always generate SVG first as we can
# easily read the active area from it. Then we manually convert it to PNG
with tempfile.TemporaryDirectory() as d:
tmpdir = Path(d)
svgfilename = tmpdir / "img.svg"
command = ["pcbdraw", "--shrink", "0"]
if back:
command.append("--back")
if pcbdrawArgs["style"] is not None:
command.extend(["--style", pcbdrawArgs["style"]])
if pcbdrawArgs["libs"] is not None:
command.extend(["--libs", pcbdrawArgs["libs"]])
if pcbdrawArgs["remap"] is not None:
command.extend(["--remap", pcbdrawArgs["remap"]])
if pcbdrawArgs["filter"] is not None:
command.extend(["--filter", pcbdrawArgs["filter"]])
command.append(boardfilename)
command.append(str(svgfilename))
subprocess.run(command, check=True)
svgToBitmap(svgfilename, outputfilename, dpi)
document = etree.parse(str(svgfilename))
tlx, tly, w, h = map(float, document.getroot().attrib["viewBox"].split())
return {
"tl": (ki2mm(svg2ki(tlx)), ki2mm(svg2ki(tly))),
"br": (ki2mm(svg2ki(tlx + w)), ki2mm(svg2ki(tly + h)))
} | 8f7e4f01583ad9c0dcdbf0783663e73923eda2a5 | 676 |
import warnings
from typing import Optional
def mask_to_image(
mask: _T_input, batch_first: bool = False,
color: Optional[str] = None,
origin: str = 'lower'
) -> np.ndarray:
"""
Creates an image from a mask `Tensor` or `ndarray`.
For more details of the output shape, see the tensorboardx docs
Note:
Clips mask to range [0, 1]. Any values outside of this range will be
ignored.
Args:
mask: Mask to plot
batch_first: If `True`, `signal` is expected to have shape
`(batch [optional], frames, features)`. If `False`, the batch axis
is assumed to be in the second position, i.e.,
`(frames, batch [optional], features)`.
color: A color map name. The name is forwarded to
`matplotlib.pyplot.cm.get_cmap` to get the color map. If `None`,
grayscale is used.
origin: Origin of the plot. Can be `'upper'` or `'lower'`.
Returns:
Colorized image with shape (color (1 or 3), features, frames)
"""
mask = to_numpy(mask, detach=True)
clipped_values = np.sum((mask < 0) | (mask > 1))
if clipped_values:
warnings.warn(
f'Mask value passed to mask_to_image out of range ([0, 1])! '
f'{clipped_values} values are clipped!'
)
image = np.clip(mask * 255, 0, 255)
image = image.astype(np.uint8)
image = _remove_batch_axis(image, batch_first=batch_first)
return _colorize(_apply_origin(image.T, origin), color) | a4679d78fd9df003fe91742fb0eb0707ca3fd5f8 | 677 |
def lerp(x0, x1, t):
""" Linear interpolation """
return (1.0 - t) * x0 + t * x1 | 82d9ce36dd5879c7aab64dc5615a2fb298471383 | 678 |
def read_uint4(f):
"""
>>> import io
>>> read_uint4(io.BytesIO(b'\\xff\\x00\\x00\\x00'))
255
>>> read_uint4(io.BytesIO(b'\\x00\\x00\\x00\\x80')) == 2**31
True
"""
data = f.read(4)
if len(data) == 4:
return _unpack('<I', data)[0]
raise ValueError('not enough data in stream to read uint4') | 4a2ffd3f58100d44f0e430847f5a7d8ef1f54a33 | 679 |
from re import L
def bound_contribution(w, g, G, nmax, squeeze=True):
# TODO docstring
# This method assumes g, if it is multidimensional varies along the first axis while w varies along the zeroth axis.
"""
"""
# helper methods
def f1(g):
return np.sqrt(g).astype(int)
# * L(w + (1 / ell - ell / g) ** 2, 0, G)
def f2(w, g, G, ell):
return (
np.pi
* L(w + (1 / ell - ell / g) ** 2, 0, G)
* 2
* (g - ell ** 2)
* (2 * ell ** 2 - g)
/ (ell ** 3 * g ** 2)
)
def f3(ell):
out = np.ones(ell.shape)
out[ell == 0] = 0
return out
def f4(g, ell, n):
# we aren't ensuring that we don't compute ell == n.
# TODO catch ell == n with a more robust method
out = (
n ** 2
* (n ** 2 * ell ** 2 - (g - ell ** 2) ** 2)
/ ((n ** 2 - ell ** 2) * (n ** 2 * ell ** 2 - g ** 2))
)
out[out == -np.inf] = np.nan
out[out == np.inf] = np.nan
return out
# check input arrays
ndim_org = w.ndim
w, g, G = _checkndim_copy_reshape([w, g, G], 2)
# create array to sum over
ellmax = int(np.max(np.sqrt(g)))
ell_base = np.arange(1, ellmax + 1, 1)
new_shape = (1,) * ndim_org + (ell_base.size,) + (1,)
ell_base = np.reshape(ell_base, new_shape)
ell = np.repeat(
ell_base, g.size, axis=1
) # TODO np.argmax g.shape | assumes g is varying along 1st axis
ell[f1(g) < ell] = 0
# create array to product over
n = np.arange(1, nmax + 1, 1)
new_shape = (1,) * (ndim_org + 1) + n.shape
n = np.reshape(n, new_shape)
# now actually create output arrays
out = f4(g, ell, n)
out = np.nanprod(out, axis=-1, keepdims=True)
out = out * f2(w, g, G, ell) * f3(ell)
out = np.nansum(
out, axis=-2, keepdims=True
) # TODO figure out why I need nansum here
if squeeze:
out = np.squeeze(out)
return out | 9011a4a13ef187982809640d944fc9db51160c38 | 680 |
def get_onto_class_by_node_type(ont: owlready2.namespace.Ontology, node_label: str):
"""Get an object corresponding to an ontology class given the node label.
`owlready2` doesn't make it easy to dynamically retrieve ontology classes.
This uses some (relatively unsafe) string manipulation to hack together a
solution.
Notes
-----
This should be refactored if/when a better solution is available!
"""
matches = [c for c in ont.classes() if str(c).split(".")[-1] == node_label]
if len(matches) == 1:
return matches[0]
elif len(matches) == 0:
return None
else:
raise ValueError(
"Error: Something is wrong with your ontology's class hierarchy! Check for duplicate classes with '{0}' in the name".format(
node_label
)
) | 0da35b5dc63b49cddece1cf0886d53315c77bf43 | 681 |
def validate_dtype_freq(dtype, freq):
"""
If both a dtype and a freq are available, ensure they match. If only
dtype is available, extract the implied freq.
Parameters
----------
dtype : dtype
freq : DateOffset or None
Returns
-------
freq : DateOffset
Raises
------
ValueError : non-period dtype
IncompatibleFrequency : mismatch between dtype and freq
"""
if freq is not None:
freq = frequencies.to_offset(freq)
if dtype is not None:
dtype = pandas_dtype(dtype)
if not is_period_dtype(dtype):
raise ValueError("dtype must be PeriodDtype")
if freq is None:
freq = dtype.freq
elif freq != dtype.freq:
raise IncompatibleFrequency("specified freq and dtype are different")
return freq | 407f3c52781a1e6986feaa455f78224d64cb7ca8 | 682 |
def extract_filtered_series(data_frame, column_list):
"""
Returns a filtered Panda Series one-dimensional ndarray from a targeted column.
Duplicate values and NaN or blank values are dropped from the result set which is
returned sorted (ascending).
:param data_frame: Pandas DataFrame
:param column_list: list of columns
:return: Panda Series one-dimensional ndarray
"""
return data_frame[column_list].drop_duplicates().dropna(axis=0, how='all').sort_values(
column_list)
# return data_frame[column_list].str.strip().drop_duplicates().dropna().sort_values() | 33120a2abedd5e8a7801bd0dfd89b107b1b593cb | 683 |
from re import T
def event_rheader(r):
""" Resource headers for component views """
rheader = None
if r.representation == "html":
if r.name == "event":
# Event Controller
tabs = [(T("Event Details"), None)]
#if settings.has_module("req"):
# tabs.append((T("Requests"), "req"))
rheader_tabs = s3_rheader_tabs(r, tabs)
event = r.record
if event:
if event.exercise:
exercise = TH(T("EXERCISE"))
else:
exercise = TH()
if event.closed:
closed = TH(T("CLOSED"))
else:
closed = TH()
rheader = DIV(TABLE(TR(exercise),
TR(TH("%s: " % T("Name")),
event.name),
TH("%s: " % T("Comments")),
event.comments,
TR(TH("%s: " % T("Zero Hour")),
event.zero_hour),
TR(closed),
), rheader_tabs)
if r.name == "incident":
# Incident Controller
tabs = [(T("Incident Details"), None)]
if settings.has_module("project"):
tabs.append((T("Tasks"), "task"))
if settings.has_module("hrm"):
tabs.append((T("Human Resources"), "human_resource"))
if settings.has_module("asset"):
tabs.append((T("Assets"), "asset"))
tabs.append((T("Facilities"), "site"))
tabs.append((T("Map Configuration"), "config"))
rheader_tabs = s3_rheader_tabs(r, tabs)
record = r.record
if record:
if record.exercise:
exercise = TH(T("EXERCISE"))
else:
exercise = TH()
if record.closed:
closed = TH(T("CLOSED"))
else:
closed = TH()
rheader = DIV(TABLE(TR(exercise),
TR(TH("%s: " % T("Name")),
record.name),
TH("%s: " % T("Comments")),
record.comments,
TR(TH("%s: " % T("Zero Hour")),
record.zero_hour),
TR(closed),
), rheader_tabs)
return rheader | 8e63b275927f2ae1a98076185e56be4f5e565ce3 | 684 |
import argparse
import sys
import yaml
def parse_args():
"""Read arguments from config file and command line args."""
options_default = {
'permissions': True,
'checksum': True,
'interval': 60,
'pidfile': '~/.dropboxhandler.pid',
'daemon': False,
'umask': 0o077,
}
parser = argparse.ArgumentParser(
description="Listen for new files in " +
"dropboxdirs and move to ObenBis/storage",
)
parser.add_argument("-c", "--conf-file",
help="Specify config file", metavar="FILE",
default="~/.dropboxhandler.conf")
parser.add_argument("--print-example-config",
help="Print a example config file to stdout.",
action="store_true", default=False)
parser.add_argument('-d', '--daemon', action='store_true', default=None)
parser.add_argument('--pidfile', default=None)
parser.add_argument('--check-config', default=False, action='store_true',
help="Do not start the daemon, but check the " +
"config file")
args = parser.parse_args()
if args.print_example_config:
print_example_config()
sys.exit(0)
try:
with open(args.conf_file) as f:
config = yaml.load(f)
except dropboxhandler.FileNotFoundError:
error_exit("Could not find config file (default location: " +
"~/.dropboxhandler.conf")
except yaml.parser.ParserError as e:
error_exit("Could not parse config file. Error was %s" % e)
for key in ['incoming', 'outgoing', 'openbis', 'options']:
if key not in config:
error_exit("Config file must include section '%s'" % key)
options_default.update(config['options'])
config['options'] = options_default
if args.pidfile is not None:
config['options']['pidfile'] = args.pidfile
if args.daemon is not None:
config['options']['daemon'] = args.daemon
config['check_config'] = args.check_config
return config | c878a0a01a8e64f9f23cf56010f5b6aee146ea31 | 685 |
def backend_is_up(backend):
"""Returns whether a server is receiving traffic in HAProxy.
:param backend: backend dict, like one of those returned by smartstack_tools.get_multiple_backends.
:returns is_up: Whether the backend is in a state that receives traffic.
"""
return str(backend['status']).startswith('UP') | 9cb729bc14821b97d21d3d864c3ca7a1d6d46085 | 686 |
import glob
def get_bot_files_glob(**kwargs):
"""Returns a `list` with the matching file names using the format string for BOT data """
outdict = {}
kwcopy = kwargs.copy()
test_name = kwcopy.pop('testName').lower()
nfiles = kwcopy.get('nfiles', None)
rafts = get_raft_names_dc(kwcopy['run'], kwcopy.get('teststand', 'bot'))
for raft in rafts:
raftdict = {}
slots = getSlotList(raft)
for slot in slots:
glob_string = BOT_FORMATTER(raft=raft, slot=slot, testName=test_name, **kwcopy)
files = sorted(glob.glob(glob_string))
if nfiles is None:
raftdict[slot] = files
else:
raftdict[slot] = files[0:nfiles]
outdict[raft] = raftdict
return outdict | 818abf757b09866afba712c114d1a87938cad990 | 687 |
def minutiae_selection(minutiae):
""" Selects the subset of most reliable minutiae.
"""
M = np.array([(m['x'], m['y'], m['direction'], m['reliability']) for m in minutiae])
M[:,2] = np.round(np.rad2deg(nbis_idx2angle(M[:,2], N=16)))
M[:,3] = np.round(M[:,3] * 100.0)
M = M.astype(int)
M = M[M[:,3] > np.percentile(M[:,3], 5), :]
return M | 2eff6c4f4f92b395da25aaa83a1de46fb07f4269 | 688 |
def alt_blend_value(data, i, j, k):
"""Computes the average value of the three vertices of a triangle in the
simplex triangulation, where two of the vertices are on the upper
horizontal."""
keys = alt_triangle_coordinates(i, j, k)
return blend_value(data, i, j, k, keys=keys) | 3ddf30c8bb983622d2df99eb76511270fcf62c1b | 689 |
def _BinaryCrossEntropy():
"""Returns a layer that computes prediction-target cross entropies."""
def f(model_output, target_category): # pylint: disable=invalid-name
shapes.assert_same_shape(model_output, target_category)
batch_size = model_output.shape[0]
j = jnp.dot(jnp.transpose(target_category), jnp.log(model_output))
j += jnp.dot(jnp.transpose(1 - target_category), jnp.log(1 - model_output))
j = -1.0/batch_size * jnp.squeeze(j)
return j
return Fn('_BinaryCrossEntropy', f) | 2def2ce8e8e32af94ac67de4d99e28e15ff07622 | 690 |
def normalize(subs, strict):
"""
Normalises subtitles.
:param subs: :py:class:`Subtitle` objects
:param bool strict: Whether to enable strict mode, see
:py:func:`Subtitle.to_srt` for more information
:returns: A single SRT formatted string, with each input
:py:class:`Subtitle` represented as an SRT block
:rtype: str
:raises SRTParseError: If parsing fails.
"""
return _cli.compose_suggest_on_fail(subs, strict) | e0e90be189fe77b123bfe960e4e9b9e63977bbe3 | 691 |
def Stern_Brocot(n):
"""
Another way to iterate over rationals
References:
https://stackoverflow.com/questions/24997970/iterating-over-parts-of-the-stern-brocot-tree-in-python
"""
states = [(0, 1, 1, 1)]
result = []
while len(states) != 0:
a, b, c, d = states.pop()
if a + b + c + d <= n:
result.append((a + c, b + d))
states.append((a, b, a + c, b + d))
states.append((a + c, b + d, c, d))
return result | cdaa919932668b33e8233c59964c3ca9bbc30119 | 692 |
def compare_elements(prev_hash_dict, current_hash_dict):
"""Compare elements that have changed between prev_hash_dict and current_hash_dict.
Check if any elements have been added, removed or modified.
"""
changed = {}
for key in prev_hash_dict:
elem = current_hash_dict.get(key, '')
if elem == '':
changed[key] = 'deleted'
elif elem != prev_hash_dict[key]:
changed[key] = 'changed'
for key in current_hash_dict:
elem = prev_hash_dict.get(key, '')
if elem == '':
changed[key] = 'added'
return changed | 2f24863a16aca86ccd3a82a4148b34282349e640 | 693 |
def generator_string(lang_uses: str = 'all', char_count: int = 1,
char_size: str = 'lower') -> str:
"""Generator string
:param lang_uses: набор символов
:type lang_uses: str
:param char_count: сколько символов отдать
:type char_count: int
:param char_size: размер символов
:type char_size: str
:return: str
"""
random_string = ''.join(choices(get_alphabet(lang_uses=lang_uses, char_size=char_size), k=char_count))
return random_string | fc517613ce1df5f3d208b4d6b3e5a356ac2f7e13 | 694 |
import sys
import re
def get_host_ips(version=4, exclude=None):
"""
Gets all IP addresses assigned to this host.
Ignores Loopback Addresses
This function is fail-safe and will return an empty array instead of
raising any exceptions.
:param version: Desired IP address version. Can be 4 or 6. defaults to 4
:param exclude: list of interface name regular expressions to ignore
(ex. ["^lo$","docker0.*"])
:return: List of IPAddress objects.
"""
exclude = exclude or []
ip_addrs = []
# Select Regex for IPv6 or IPv4.
ip_re = IPV4_RE if version is 4 else IPV6_RE
# Call `ip addr`.
try:
ip_addr_output = check_output(["ip", "-%d" % version, "addr"])
except (CalledProcessError, OSError):
print("Call to 'ip addr' Failed")
sys.exit(1)
# Separate interface blocks from ip addr output and iterate.
for iface_block in INTERFACE_SPLIT_RE.findall(ip_addr_output):
# Try to get the interface name from the block
match = IFACE_RE.match(iface_block)
iface = match.group(1)
# Ignore the interface if it is explicitly excluded
if match and not any(re.match(regex, iface) for regex in exclude):
# Iterate through Addresses on interface.
for address in ip_re.findall(iface_block):
# Append non-loopback addresses.
if not IPNetwork(address).ip.is_loopback():
ip_addrs.append(IPAddress(address))
return ip_addrs | 33ceabec2165021919322881e501745dd673a733 | 695 |
def rate_string(rate, work_unit, computer_prefix=False):
"""Return a human-friendly string representing a rate. 'rate' is given
in 'work_unit's per second. If the rate is less than 0.1 then the inverse
is shown.
Examples:
>>> rate_string(200000, "B", True)
'195KB/s'
>>> rate_string(0.01, "file")
'1m40s/file'
>>> rate_string(1.0 / 24 / 3600, "earthrot")
'1d0h/earthrot'
"""
if rate > 0 and rate < 0.1:
return "%s/%s" % (time_string(1.0 / rate), work_unit)
else:
return "%s/s" % (quantity_string(rate, work_unit, computer_prefix)) | 4e7f62684be770525812465014c35dae0ce1806b | 696 |
def get_queue_arn(sqs_client, queue_url: str) -> str:
"""Encapsulates SQS::get_queue_attributes with special attribute QueueArn.
:param sqs_client: The Boto3 AWS SQS client object.
:param queue_url: URL of the queue
:return: The Amazon Resource Name (ARN) of the queue.
"""
try:
response = sqs_client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=["QueueArn"])
queue_arn = response["Attributes"]["QueueArn"]
logger.info("Retrieved queue ARN: '%s' for URL: '%s'.", queue_arn, queue_url)
except ClientError:
logger.exception("Couldn't retrieve ARN for queue URL: %s.", queue_url)
raise
else:
return queue_arn | a8fbf9e8271e8809494da7634fff682769020ecd | 697 |
import psutil
def any_flexloggers_running() -> bool:
"""Returns whether any FlexLogger.exe processes are running."""
for proc in psutil.process_iter(["pid", "name"]):
if proc.info["name"].lower() == "flexlogger.exe":
return True
return False | 3dc7fb6c5120e41ff26fda41b111dec9c08560a3 | 698 |
def _get_non_white_runs(mask):
"""Returns those runs that are delimeted by white cells."""
res = []
in_a_block = False
last_idx = len(mask) - 1
for idx, cell in enumerate(mask):
if cell != WHITE and not in_a_block:
in_a_block = True
start = idx
if cell == WHITE and in_a_block:
in_a_block = False
end = idx - 1
res.append(Block(start, end, length=end - start + 1))
if idx == last_idx and in_a_block:
res.append(Block(start, last_idx, length=last_idx - start + 1))
return res | 0a1c4251b0a86dc95f1cea8962827b88f4945edb | 699 |