code stringlengths 57 46.3k | quality_prob float64 0.7 0.99 | learning_prob float64 0.5 1 |
|---|---|---|
def reduce_func(nodes):
"""Collect messages and update node representations.
Parameters
----------
nodes : NodeBatch
A batch of nodes.
Returns
-------
dict mapping 'hv_new' to Float32 tensor of shape (V, K * T)
Updated node representations. V for the number of nodes, K for the number of
radial filters and T for the number of features to use
(types of atomic number in the paper).
"""
return {'hv_new': nodes.mailbox['m'].sum(1)} | 0.844569 | 0.72964 |
def clean(string_value):
"""Standardizes string values for lookup by removing case and special characters and spaces.
Args:
string_value (str): The lookup key to be transformed.
Returns:
str: The original value, but lowercase and without spaces, underscores or dashes.
"""
return string_value.lower().strip().replace("_","").replace("-","").replace(" ","") | 0.737347 | 0.697933 |
def metadata_with_prefix(prefix, **kw):
"""Create RPC metadata containing a prefix.
Args:
prefix (str): appropriate resource path.
Returns:
List[Tuple[str, str]]: RPC metadata with supplied prefix
"""
return [("google-cloud-resource-prefix", prefix)] | 0.865906 | 0.529263 |
def mean(iterator, length):
""" Returns the arithmetic mean of the values in the given iterator.
"""
return sum(iterator) / float(length or 1) | 0.784649 | 0.707228 |
def is_power_of_two(n):
"""Checks if n is a power of 2.
Args:
n: Non-negative integer.
"""
if n < 0:
raise ValueError('Input argument must be >= 0.')
return n & (n-1) == 0 | 0.810028 | 0.624752 |
def weighted_avg(x, weights):
"""Return a weighted average of x (a sequence of vectors).
Args:
x: batch * len * hdim
weights: batch * len, sum(dim = 1) = 1
Output:
x_avg: batch * hdim
"""
return weights.unsqueeze(1).bmm(x).squeeze(1) | 0.91034 | 0.894098 |
def get_model_outputs(model, batch_X, batch_mask, use_MLP, use_PSP, contexts, task_index):
"""
Get model outputs with the forward pass of the batch of input data (batch_X).
:param model: torch model instance
:param batch_X: a batch of input data
:param batch_mask: a batch of input data masks if used
:param use_MLP: boolean - if True use MLP, else use Transformer
:param use_PSP: boolean - if True, PSP method is used, meaning we need set of contexts for each task (including the first)
:param contexts: contexts: binary context vectors
:param task_index: index of the current task, which is being learned
:return: batch model output
"""
if use_PSP:
if use_MLP:
return model.forward(batch_X, use_PSP, contexts, task_index)
else:
return model.forward(batch_X, batch_mask, use_PSP, contexts, task_index)
else:
if use_MLP:
return model.forward(batch_X)
else:
return model.forward(batch_X, batch_mask) | 0.905152 | 0.542136 |
import numpy
def ufunc_group_by_idx(idx, values, ufunc, init, minlength=None):
"""
Abstract wrapper to compute ufunc grouped by values in array ``idx``.
Return an array containing the results of ``ufunc`` applied to ``values``
grouped by the indexes in array ``idx``.
(See available ufuncs `here <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_).
Warning: the ``init`` parameter is not a filling value for missing indexes.
If index ``i`` is missing, then ``out[i] = init``
but this value also serves as the initialization of ``ufunc`` on all the groups of ``values``.
For example, if ``ufunc`` is ``numpy.add`` and ``init = -1`` then for each index,
the sum of the corresponding values will be decreased by one.
:param array idx: ``(n,) int array``
:param array values: ``(n,) dtype array``
:param numpy.ufunc ufunc: universal function applied to the groups of ``values``
:param dtype init: initialization value
:param int? minlength: ``(default: idx.max() + 1)``
:returns: (min-length,) dtype array, such that ``out[i] = ufunc(values[idx==i])``
Example
_______
>>> idx = numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 3, 3])
>>> values = numpy.array([0, 1, 2, 3, 4, 0, 2, 4, 6, 0, 4, 6])
>>> ufunc_group_by_idx(idx, values, numpy.maximum, -1)
array([ 4, 6, -1, 6])
>>> ufunc_group_by_idx(idx, values, numpy.add, -1)
array([ 9, 11, -1, 9])
>>> ufunc_group_by_idx(idx, values, numpy.add, 0)
array([ 10, 12, -0, 10])
"""
length = max(idx.max() + 1, minlength or 0)
out = numpy.full(length, init)
ufunc.at(out, idx, values)
return out | 0.859162 | 0.689734 |
import numpy
def get_value_by_idx(idx, values, default, check_unique=True, minlength=None):
"""
Given array of indexes ``idx`` and array ``values`` (unordered, not necesarilly full),
output array such that ``out[i] = values[idx==i]``.
If all indexes in ``idx`` are unique, it is equivalent to sorting the ``values``
by their ``idx`` and filling with ``default`` for missing ``idx``.
If ``idx`` elements are not unique and you still want to proceed,
you can set ``check_unique`` to ``False``. The output values for the non-unique indexes
will be chosen arbitrarily among the multiple values corresponding.
:param array idx: ``(n,) uint array`` with values < max_idx
:param array values: ``(n,) dtype array``
:param dtype default: filling value for ``output[i]`` if there is no ``idx == i``
:param bool check_unique: if ``True``, will check that ``idx`` are unique
If ``False``, if the ``idx`` are not unique, then an arbitrary value
will be chosen.
:param int? minlength: minimum shape for the output array (``default: idx.max() + 1``).
:returns array: (max_idx+1,), dtype array such that
``out[i] = values[idx==i]``.
Example
_______
>>> idx = numpy.array([8,2,4,7])
>>> values = numpy.array([100, 200, 300, 400])
>>> get_value_by_idx(idx, values, -1, check_unique=False, minlength=None)
array([ -1, -1, 200, -1, 300, -1, -1, 400, 100])
Example with non-unique elements in ``idx``:
>>> idx = numpy.array([2,2,4,7])
>>> values = numpy.array([100, 200, 300, 400])
>>> get_value_by_idx(idx, values, -1, check_unique=False, minlength=None)
array([ -1, -1, 200, -1, 300, -1, -1, 400])
"""
assert idx.dtype.kind == 'u' or (idx.dtype.kind == 'i' and (idx >= 0).all()), (
'Can only use get_xx_by_idx with integer indexes in `idx`, where (idx >= 0).all()')
if check_unique:
assert numpy.unique(idx).shape == idx.shape, "indexes in `idx` should be unique"
length = max(idx.max() + 1, minlength or 0)
out = numpy.full(length, default, dtype=values.dtype)
out[idx] = values
return out | 0.77569 | 0.675464 |
import numpy
def average_by_idx(idx, values, weights=None, minlength=None, fill=0, dtype='float64'):
"""
Compute average-by-idx given array of indexes ``idx``, ``values``, and optional ``weights``
:param array idx: (n,) int array
:param array values: (n,) float array
:param array? weights: (n,) float array
:param int? minlength: (default: idx.max() + 1)
:param float? fill: filling value for missing idx (default: 0)
:param str? dtype: (default: 'float32')
:returns: (min-length,) float array, such that out[i] = mean(values[idx==i])
Example
_______
>>> idx = numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 3, 3])
>>> values = numpy.array([0, 1, 2, 3, 4, 0, 2, 4, 6, 0, 4, 6])
>>> average_by_idx(idx, values, fill=0)
array([ 2. , 3. , 0. , 3.33333333])
>>> weights = numpy.array([0, 1, 0, 0, 0, 1, 2, 3, 4, 1, 1, 0])
>>> average_by_idx(idx, values, weights=weights, fill=0)
array([ 1., 4., 0., 2.])
"""
assert idx.dtype.kind == 'u' or (idx.dtype.kind == 'i' and (idx >= 0).all()), (
'Can only use get_xx_by_idx with integer idx, where (idx >= 0).all()')
# FIXME: define dtype whitelist instead
assert values.dtype.kind not in 'USOb', ('values dtype not supported')
norm_by_idx = numpy.bincount(
idx, weights, minlength=minlength).astype(dtype)
if weights is not None:
values = values * weights
sum_by_idx = numpy.bincount(idx, values, minlength=minlength).astype(dtype)
with numpy.warnings.catch_warnings():
numpy.warnings.filterwarnings('ignore', r'.*divide.*')
return numpy.where(norm_by_idx > 0, sum_by_idx / norm_by_idx, fill) | 0.729038 | 0.604487 |
def datetime_to_string(datetime):
"""
Serializes a datetime to a string.
:param datetime: datetime value
:return: string. containing iso8601 format date string
"""
return datetime.isoformat().replace('+00:00', 'Z') | 0.731155 | 0.773559 |
def HighlightAlignedHtml(hyp, ref, err_type):
"""Generate a html element to highlight the difference between hyp and ref.
Args:
hyp: Hypothesis string.
ref: Reference string.
err_type: one of 'none', 'sub', 'del', 'ins'.
Returns:
a html string where disagreements are highlighted.
Note `hyp` is highlighted in green, and marked with <del> </del>
`ref` is highlighted in yellow. If you want html with nother styles,
consider to write your own function.
Raises:
ValueError: if err_type is not among ['none', 'sub', 'del', 'ins'].
or if when err_type == 'none', hyp != ref
"""
highlighted_html = ''
if err_type == 'none':
if hyp != ref:
raise ValueError('hyp (%s) does not match ref (%s) for none error' %
(hyp, ref))
highlighted_html += '%s ' % hyp
elif err_type == 'sub':
highlighted_html += """<span style="background-color: yellow">
<del>%s</del></span><span style="background-color: yellow">
%s </span> """ % (hyp, ref)
elif err_type == 'del':
highlighted_html += """<span style="background-color: red">
%s </span> """ % (
ref)
elif err_type == 'ins':
highlighted_html += """<span style="background-color: green">
<del>%s</del> </span> """ % (
hyp)
else:
raise ValueError('unknown err_type ' + err_type)
return highlighted_html | 0.845783 | 0.615781 |
def spec_sum(ph2):
"""Compute total spectral sum of the real spectral quantity``ph^2``.
Parameters
----------
model : pyqg.Model instance
The model object from which `ph` originates
ph2 : real array
The field on which to compute the sum
Returns
-------
var_dens : float
The sum of `ph2`
"""
ph2 = 2.*ph2
ph2[...,0] = ph2[...,0]/2.
ph2[...,-1] = ph2[...,-1]/2.
return ph2.sum(axis=(-1,-2)) | 0.893053 | 0.847716 |
def Score(low, high, n):
"""Score whether the actual value falls in the range.
Hitting the posts counts as 0.5, -1 is invalid.
low: low end of range
high: high end of range
n: actual value
Returns: -1, 0, 0.5 or 1
"""
if n is None:
return -1
if low < n < high:
return 1
if n == low or n == high:
return 0.5
else:
return 0 | 0.722331 | 0.766818 |
def shape(x):
"""Returns the symbolic shape of a tensor or variable.
# Arguments
x: A tensor or variable.
# Returns
A symbolic shape (which is itself a tensor).
# Examples
```python
# TensorFlow example
>>> from keras import backend as K
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> inputs = K.placeholder(shape=(2, 4, 5))
>>> K.shape(kvar)
Shape.0
>>> K.shape(inputs)
Shape.0
# To get integer shape (Instead, you can use K.int_shape(x))
>>> K.shape(kvar).eval()
array([2, 2])
```
"""
return x.shape | 0.910169 | 0.952574 |
def int_shape(x):
"""Returns the shape of tensor or variable as a tuple of int or None entries.
# Arguments
x: Tensor or variable.
# Returns
A tuple of integers (or None entries).
# Examples
```python
>>> from keras import backend as K
>>> inputs = K.placeholder(shape=(2, 4, 5))
>>> K.int_shape(inputs)
(2, 4, 5)
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.int_shape(kvar)
(2, 2)
```
"""
if hasattr(x, '_keras_shape'):
return x._keras_shape
else:
return None | 0.853073 | 0.870267 |
def ndim(x):
"""Returns the number of axes in a tensor, as an integer.
# Arguments
x: Tensor or variable.
# Returns
Integer (scalar), number of axes.
# Examples
```python
>>> from keras import backend as K
>>> inputs = K.placeholder(shape=(2, 4, 5))
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.ndim(inputs)
3
>>> K.ndim(kvar)
2
```
"""
return x.ndim | 0.873876 | 0.962462 |
def dtype(x):
"""Returns the dtype of a Keras tensor or variable, as a string.
# Arguments
x: Tensor or variable.
# Returns
String, dtype of `x`.
# Examples
```python
>>> from keras import backend as K
>>> K.dtype(K.placeholder(shape=(2,4,5)))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float32'))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float64'))
'float64'
# Keras variable
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]))
>>> K.dtype(kvar)
'float32_ref'
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.dtype(kvar)
'float32_ref'
```
"""
return x.dtype | 0.895151 | 0.820397 |
def update(x, new_x):
"""Update the value of `x` to `new_x`.
# Arguments
x: A `Variable`.
new_x: A tensor of same shape as `x`.
# Returns
The variable `x` updated.
"""
return (x, new_x) | 0.728941 | 0.5083 |
def moving_average_update(variable, value, momentum):
"""Compute the moving average of a variable.
# Arguments
x: A `Variable`.
value: A tensor with the same shape as `x`.
momentum: The moving average momentum.
# Returns
An operation to update the variable.
"""
return (variable, variable * momentum + value * (1. - momentum)) | 0.888215 | 0.763968 |
def gather(reference, indices):
"""Retrieves the elements of indices `indices` in the tensor `reference`.
# Arguments
reference: A tensor.
indices: An integer tensor of indices.
# Returns
A tensor of same type as `reference`.
"""
y = reference[indices]
if hasattr(reference, '_keras_shape') and hasattr(indices, '_keras_shape'):
y._keras_shape = indices._keras_shape + reference._keras_shape[1:]
return y | 0.831177 | 0.786705 |
def get_variable_shape(x):
"""Returns the shape of a variable.
# Arguments
x: A variable.
# Returns
A tuple of integers.
"""
return x.get_value(borrow=True, return_internal_type=True).shape | 0.716913 | 0.586197 |
def _preprocess_conv2d_input(x, data_format):
"""Transpose and cast the input before the conv2d.
# Arguments
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
# Returns
A tensor.
"""
if data_format == 'channels_last':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, rows, cols)
# TF input shape: (samples, rows, cols, input_depth)
x = x.dimshuffle((0, 3, 1, 2))
return x | 0.845465 | 0.594875 |
def _preprocess_conv3d_input(x, data_format):
"""Transpose and cast the input before the conv3d.
# Arguments
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
# Returns
A tensor.
"""
if data_format == 'channels_last':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, rows, cols, slices)
# TF input shape: (samples, rows, cols, slices, input_depth)
x = x.dimshuffle((0, 4, 1, 2, 3))
return x | 0.865508 | 0.584627 |
def match_cases(original, replacement):
""" Returns a copy of the replacement word with the cases
altered to match the original word's case
Only supports upper case, capitalised (title) and lower case -
everything else gets lower cased by default"""
if original.isupper():
return replacement.upper()
if original.istitle():
return replacement.title()
return replacement | 0.716615 | 0.746809 |
def test_bit(value, offset):
"""Test a bit at offset position
:param value: value of integer to test
:type value: int
:param offset: bit offset (0 is lsb)
:type offset: int
:returns: value of bit at offset position
:rtype: bool
"""
mask = 1 << offset
return bool(value & mask) | 0.831451 | 0.964422 |
def set_bit(value, offset):
"""Set a bit at offset position
:param value: value of integer where set the bit
:type value: int
:param offset: bit offset (0 is lsb)
:type offset: int
:returns: value of integer with bit set
:rtype: int
"""
mask = 1 << offset
return int(value | mask) | 0.831656 | 0.586789 |
def reset_bit(value, offset):
"""Reset a bit at offset position
:param value: value of integer where reset the bit
:type value: int
:param offset: bit offset (0 is lsb)
:type offset: int
:returns: value of integer with bit reset
:rtype: int
"""
mask = ~(1 << offset)
return int(value & mask) | 0.815416 | 0.572992 |
def toggle_bit(value, offset):
"""Return an integer with the bit at offset position inverted
:param value: value of integer where invert the bit
:type value: int
:param offset: bit offset (0 is lsb)
:type offset: int
:returns: value of integer with bit inverted
:rtype: int
"""
mask = 1 << offset
return int(value ^ mask) | 0.854612 | 0.929887 |
def _combine_sup_unsup_datasets(sup_data, unsup_data):
"""Combines supervised and usupervised samples into single dictionary.
Args:
sup_data: dictionary with examples from supervised dataset.
unsup_data: dictionary with examples from unsupervised dataset.
Returns:
Dictionary with combined suvervised and unsupervised examples.
"""
# Copy all values from supervised data as is
output_dict = dict(sup_data)
# take only 'image' and 'aug_image' from unsupervised dataset and
# rename then into 'unsup_image' and 'unsup_aug_image'
if 'image' in unsup_data:
output_dict['unsup_image'] = unsup_data.pop('image')
if 'aug_image' in unsup_data:
output_dict['unsup_aug_image'] = unsup_data.pop('aug_image')
return output_dict | 0.842151 | 0.654826 |
def getVal(item):
"""
Get value of an item, as weight / value, used for sorting
Args:
item: A dictionary with entries "weight" and "value"
Returns:
The item's weight per value
"""
return item["weight"] / item["value"] | 0.703957 | 0.560313 |
def _compare_time(f_time, interval):
"""
Compares time with interval less than interval
Args:
f_time ([type]): [description]
interval ([type]): [description]
Returns:
[type]: [description]
"""
if f_time < interval:
f_time = interval
elif f_time % interval != 0:
f_time -= f_time % interval
return f_time | 0.861232 | 0.508361 |
def channel_shuffle(x, groups):
"""channel shuffle operation
Args:
x: input tensor
groups: input branch number
"""
batch_size, channels, height, width = x.size()
channels_per_group = int(channels / groups)
x = x.view(batch_size, groups, channels_per_group, height, width)
x = x.transpose(1, 2).contiguous()
x = x.view(batch_size, -1, height, width)
return x | 0.838548 | 0.586523 |
def expand_differentia(differentia_code):
"""
In most cases, the differentia remains unmodified
:param differentia_code:
:return:
"""
return "No differentia" if "*" in differentia_code else differentia_code | 0.743261 | 0.992289 |
def get_m2_name(self):
"""Return the name of the current area unit
Parameters
----------
self : Unit
A Unit object
Returns
-------
unit_name : str
Name of the current unit
"""
if self.unit_m2 == 1:
return "mm²"
else:
return "m²" | 0.734405 | 0.536313 |
import torch
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False):
"""Calculate overlap between two set of bboxes.
If ``is_aligned`` is ``False``, then calculate the ious between each bbox
of bboxes1 and bboxes2, otherwise the ious between each aligned pair of
bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (m, 4)
bboxes2 (Tensor): shape (n, 4), if is_aligned is ``True``, then m and n
must be equal.
mode (str): "iou" (intersection over union) or iof (intersection over
foreground).
Returns:
ious(Tensor): shape (m, n) if is_aligned == False else shape (m, 1)
"""
assert mode in ['iou', 'iof']
rows = bboxes1.size(0)
cols = bboxes2.size(0)
if is_aligned:
assert rows == cols
if rows * cols == 0:
return bboxes1.new(rows, 1) if is_aligned else bboxes1.new(rows, cols)
if is_aligned:
lt = torch.max(bboxes1[:, :2], bboxes2[:, :2]) # [rows, 2]
rb = torch.min(bboxes1[:, 2:], bboxes2[:, 2:]) # [rows, 2]
wh = (rb - lt + 1).clamp(min=0) # [rows, 2]
overlap = wh[:, 0] * wh[:, 1]
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
bboxes1[:, 3] - bboxes1[:, 1] + 1)
if mode == 'iou':
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
bboxes2[:, 3] - bboxes2[:, 1] + 1)
ious = overlap / (area1 + area2 - overlap)
else:
ious = overlap / area1
else:
lt = torch.max(bboxes1[:, None, :2], bboxes2[:, :2]) # [rows, cols, 2]
rb = torch.min(bboxes1[:, None, 2:], bboxes2[:, 2:]) # [rows, cols, 2]
wh = (rb - lt + 1).clamp(min=0) # [rows, cols, 2]
overlap = wh[:, :, 0] * wh[:, :, 1]
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
bboxes1[:, 3] - bboxes1[:, 1] + 1)
if mode == 'iou':
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
bboxes2[:, 3] - bboxes2[:, 1] + 1)
ious = overlap / (area1[:, None] + area2 - overlap)
else:
ious = overlap / (area1[:, None])
return ious | 0.897038 | 0.728597 |
def minimum(measurement_one, measurement_two, include_uncertainty=True):
"""
:param measurement_one:
:param measurement_two:
:param include_uncertainty:
:return:
"""
if measurement_one.value < measurement_two.value:
return measurement_one
elif measurement_two.value < measurement_one.value:
return measurement_two
if not include_uncertainty:
return measurement_one
if measurement_one.uncertainty > measurement_two.uncertainty:
return measurement_one
elif measurement_two.uncertainty > measurement_one.uncertainty:
return measurement_two
return measurement_one | 0.840161 | 0.723261 |
def maximum(measurement_one, measurement_two, include_uncertainty = True):
"""
:param measurement_one:
:param measurement_two:
:param include_uncertainty:
:return:
"""
if measurement_one.value > measurement_two.value:
return measurement_one
elif measurement_two.value > measurement_one.value:
return measurement_two
if not include_uncertainty:
return measurement_one
if measurement_one.uncertainty > measurement_two.uncertainty:
return measurement_one
elif measurement_two.uncertainty > measurement_one.uncertainty:
return measurement_two
return measurement_one | 0.84781 | 0.660693 |
def draw_predictions(task, video_vis):
"""
Draw prediction for the given task.
Args:
task (TaskInfo object): task object that contain
the necessary information for visualization. (e.g. frames, preds)
All attributes must lie on CPU devices.
video_vis (VideoVisualizer object): the video visualizer object.
"""
frames = task.frames
preds = task.action_preds
keyframe_idx = len(frames) // 2 - task.num_buffer_frames
draw_range = [
keyframe_idx - task.clip_vis_size,
keyframe_idx + task.clip_vis_size,
]
buffer = frames[: task.num_buffer_frames]
frames = frames[task.num_buffer_frames:]
# frames = video_vis.draw_clip_range(
# frames, preds, keyframe_idx=keyframe_idx, draw_range=draw_range
# )
frames = video_vis.draw_clip(frames, preds)
del task
return buffer + frames | 0.809953 | 0.522811 |
def packet_read(idn, reg0, width):
""" Create an instruction packet to read data from the DXL control table.
NOTE: The namesake function in dxl_commv1 serves a specfic purpose. However, this is just a filler here.
We use this function to fit with the old code. Helps with backward compatibility.
Args:
idn: An integer representing the DXL ID number
reg0: An integer representing the register index in the control table
num_regs: An integer representing the number of registers to read from the control table starting at reg0
Returns:
A tuple - (ID of DXL device, address of register, width of the register in bytes)
"""
return (idn, reg0, width) | 0.838944 | 0.612484 |
def init_parameters(parameter):
"""Auxiliary function to set the parameter dictionary
Parameters
----------
parameter: dict
See the above function NMFdiag for further information
Returns
-------
parameter: dict
"""
parameter = dict() if parameter is None else parameter
parameter['distMeas'] = 'divergence' if 'distMeas' not in parameter else parameter['distMeas']
parameter['numOfIter'] = 50 if 'fixW' not in parameter else parameter['numOfIter']
parameter['fixW'] = False if 'fixW' not in parameter else parameter['fixW']
parameter['continuity'] = {'length': 10,
'grid': 5,
'sparsen': [1, 1],
'polyphony': 5} if 'continuity' not in parameter else parameter['continuity']
parameter['vis'] = False if 'vis' not in parameter else parameter['vis']
return parameter | 0.779532 | 0.599485 |
def gaia_dr2_conesearch_query(ra=165.86, dec=34.829694, radius=3., max=100000):
"""
Generate a query string for the TAP servers
TBD
Parameters
----------
ra, dec : float
RA, Dec in decimal degrees
radius : float
Search radius, in arc-minutes.
Returns
-------
query : str
Query string
"""
query = "SELECT TOP {3} * FROM gaiadr2.gaia_source WHERE CONTAINS(POINT('ICRS',gaiadr2.gaia_source.ra,gaiadr2.gaia_source.dec),CIRCLE('ICRS',{0},{1},{2:.2f}))=1".format(ra, dec, radius/60., max)
return query | 0.891233 | 0.53206 |
def neighbors(cell):
"""Returns the neighbors of a given cell."""
x, y = cell
return [(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1), (x + 1, y + 1),
(x + 1, y - 1), (x - 1, y + 1), (x - 1, y - 1)] | 0.762601 | 0.991513 |
def perpendicular_vector(v):
"""
Compute a vector perpendicular to the input vector
"""
# x = y = z = 0 is not an acceptable solution
if v[0] == v[1] == v[2] == 0:
raise ValueError("zero-vector")
if v[2] == 0:
return [-v[1], v[0], 0]
else:
return [1.0, 1.0, -1.0 * (v[0] + v[1]) / v[2]] | 0.755907 | 0.674446 |
def read_default(field):
"""
Read the default value otherwise return an empty string.
:param field: field to get the default value if exists
:type field: rosidl_parser.definition.Member
:returns: dictionary with the compact definition (constanst and message with links)
:rtype: dict
"""
if field.has_annotations('default'):
return '=' + str(field.get_annotation_values('default')[0]['value'])
else:
return '' | 0.724383 | 0.606673 |
import torch
def _norm_mpc(t, norm_factor):
"""
Computation of a norm of a vector in MPC. The vector should be an AdditiveSharedTensor.
It performs the norm calculation by masking the tensor with a multiplication
by a big random number drawn from a uniform distribution, and computing the
square root of the squared norm of the masked tensor, which is computed
beforehand with a dot product in MPC.
In order to maintain stability and avoid overflow, this functions uses a
norm_factor that scales down the tensor for MPC computations and rescale it at the end.
For example in the case of the DASH algorithm, this norm_factor should be of
the order of the square root of number of entries in the original matrix
used to perform the compression phase assuming the entries are standardized.
Args:
t: 1-dim AdditiveSharedTensor, representing a vector.
norm_factor: float. The normalization factor used to avoid overflow
Returns:
the norm of the vector as an AdditiveSharedTensor
"""
workers = t.child.child.locations
crypto_prov = t.child.child.crypto_provider
prec_frac = t.child.precision_fractional
field = t.child.child.field
norm_factor = int(norm_factor)
t_normalized = t / norm_factor
Q = int(field ** (1 / 2) / 10 ** (prec_frac / 2))
norm_sq = (t_normalized ** 2).sum().squeeze()
# Random big number
r = (
torch.LongTensor([0])
.fix_precision(precision_fractional=prec_frac)
.share(*workers, crypto_provider=crypto_prov)
.random_(0, Q)
)
# Compute masked norm
masked_norm_sq = r ** 2 * norm_sq
# Get compute square root
masked_norm_sq = masked_norm_sq.send(crypto_prov).remote_get().float_precision()
masked_norm = torch.sqrt(masked_norm_sq)
# Secret share and compute unmasked norm in MPC
masked_norm = (
masked_norm.fix_precision(precision_fractional=prec_frac)
.share(*workers, crypto_provider=crypto_prov)
.get()
)
norm = masked_norm / r * norm_factor
return norm.squeeze() | 0.940195 | 0.858422 |
def nystroem_oos(dmap_object, Y):
"""
Performs Nystroem out-of-sample extension to calculate the values of the diffusion coordinates at each given point.
Parameters
----------
dmap_object : DiffusionMap object
Diffusion map upon which to perform the out-of-sample extension.
Y : array-like, shape (n_query, n_features)
Data for which to perform the out-of-sample extension.
Returns
-------
phi : numpy array, shape (n_query, n_eigenvectors)
Transformed value of the given values.
"""
# check if Y is equal to data. If yes, no computation needed.
# compute the values of the kernel matrix
kernel_extended = dmap_object.local_kernel.compute(Y)
weights = dmap_object._compute_weights(dmap_object.local_kernel.data)
P = dmap_object._left_normalize(dmap_object._right_normalize(kernel_extended, dmap_object.right_norm_vec, weights))
oos_evecs = P * dmap_object.dmap
# evals_p = dmap_object.local_kernel.epsilon_fitted * dmap_object.evals + 1.
# oos_dmap = np.dot(oos_evecs, np.diag(1. / evals_p))
return oos_evecs | 0.928741 | 0.650009 |
def take(a, indices, axis=None, out=None):
"""Takes elements of an array at specified indices along an axis.
This is an implementation of "fancy indexing" at single axis.
This function does not support ``mode`` option.
Args:
a (cupy.ndarray): Array to extract elements.
indices (int or array-like): Indices of elements that this function
takes.
axis (int): The axis along which to select indices. The flattened input
is used by default.
out (cupy.ndarray): Output array. If provided, it should be of
appropriate shape and dtype.
Returns:
cupy.ndarray: The result of fancy indexing.
.. seealso:: :func:`numpy.take`
"""
# TODO(okuta): check type
return a.take(indices, axis, out) | 0.827375 | 0.585664 |
def compress(condition, a, axis=None, out=None):
"""Returns selected slices of an array along given axis.
Args:
condition (1-D array of bools): Array that selects which entries to
return. If len(condition) is less than the size of a along the
given axis, then output is truncated to the length of the condition
array.
a (cupy.ndarray): Array from which to extract a part.
axis (int): Axis along which to take slices. If None (default), work
on the flattened array.
out (cupy.ndarray): Output array. If provided, it should be of
appropriate shape and dtype.
Returns:
cupy.ndarray: A copy of a without the slices along axis for which
condition is false.
.. warning::
This function may synchronize the device.
.. seealso:: :func:`numpy.compress`
"""
return a.compress(condition, axis, out) | 0.912733 | 0.603114 |
def diagonal(a, offset=0, axis1=0, axis2=1):
"""Returns specified diagonals.
This function extracts the diagonals along two specified axes. The other
axes are not changed. This function returns a writable view of this array
as NumPy 1.10 will do.
Args:
a (cupy.ndarray): Array from which the diagonals are taken.
offset (int): Index of the diagonals. Zero indicates the main
diagonals, a positive value upper diagonals, and a negative value
lower diagonals.
axis1 (int): The first axis to take diagonals from.
axis2 (int): The second axis to take diagonals from.
Returns:
cupy.ndarray: A view of the diagonals of ``a``.
.. seealso:: :func:`numpy.diagonal`
"""
# TODO(okuta): check type
return a.diagonal(offset, axis1, axis2) | 0.871379 | 0.605741 |
import torch
def one_hot_encoder(idx, n_cls):
"""
One hot encoder for categorical features
Args:
idx: index of the categorical feature
n_cls: number of classes
Returns:
one hot encoded tensor
"""
assert torch.max(idx).item() < n_cls
if idx.dim() == 1:
idx = idx.unsqueeze(1)
onehot = torch.zeros(idx.size(0), n_cls)
onehot = onehot.to(idx.device)
onehot.scatter_(1, idx.long(), 1)
return onehot | 0.860867 | 0.542621 |
def runrate_column(df=None, column=None, window=5, win_type=None):
"""
Calculate the run rate, that is, the moving average,
of a column, and add it as a new column.
Parameters
==========
df : input dataframe
column : column for which the run rate should be computed
window : how many observations are used for run rate
win_type : window type, see https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.rolling.html
'None' means equal weighting
Returns
=======
df : dataframe with run rate appended as column + "_rr"
column_rr : name of newly added column
"""
column_rr = column + "_rr" + str(window)
df[column_rr] = df[column].rolling(window=window, win_type=win_type).mean()
return df, column_rr | 0.895225 | 0.637454 |
def ewm_column(df=None, column=None, alpha=0.8, ignore_na=True, func="mean"):
"""
Calculate the exponentially weighted moving average
of a column, and add it as a new column.
Parameters
==========
df : input dataframe
column : column for which the run rate should be computed
alpha : smoothing factor [0 < alpha <= 1]
ignore_na : ignore missing values
Returns
=======
df : dataframe with exponentially weighted moving average appended as column + "_ewm"
column_ewm : name of newly added column
"""
column_ewm = column + "_ewm" + str(alpha)
if func == "mean":
column_ewm = column_ewm + "_mean"
df[column_ewm] = df[column].ewm(alpha=alpha, ignore_na=ignore_na).mean()
elif func == "var":
column_ewm = column_ewm + "_var"
df[column_ewm] = df[column].ewm(alpha=alpha, ignore_na=ignore_na).var()
elif func == "std":
column_ewm = column_ewm + "_std"
df[column_ewm] = df[column].ewm(alpha=alpha, ignore_na=ignore_na).std()
return df, column_ewm | 0.875534 | 0.613005 |
def format_numeric(fig, plot, col):
"""
Returns a formatted histogram plot for numeric data in the dataset.
Parameters:
fig: figure to be formatted
plot: plot to be formatted
col (pandas Series): Numeric variable column in df to plot
histogram for
Returns:
plot (figure): formatted histogram plot for numeric variable col
"""
fig.suptitle('Numerical Histogram Plot(s):', weight='bold', size=30,
color='k', x=0.5, y=0.965)
plot = plot.set(title='Count of {}'.format(col.name))
return plot | 0.891894 | 0.771585 |
def format_categorical(fig, plot, col):
"""
Returns a formatted bar count plot for categorical data in the dataset.
Parameters:
fig: figure to be formatted
plot: plot to be formatted
col (pandas Series): Categorical variable column in df to plot
histogram for
Returns:
plot (figure): formatted count bar plot for categorical
variable col
"""
fig.suptitle('Categorical Histogram Plot(s):', weight='bold', size=30,
color='k', x=0.5, y=0.935)
plot = plot.set(title='Count of {}'.format(col.name))
return plot | 0.891327 | 0.756537 |
def get_top_k(df, proba_col, true_label_col, k=5, decision_threshold=0.5):
"""
For binary classification problems
Returns k most correct and incorrect example for each class
Also returns k most unsure examples
:param df: DataFrame containing predictions, and true labels
:param proba_col: column name of predicted probabilities
:param true_label_col: column name of true labels
:param k: number of examples to show for each category
:param decision_threshold: classifier decision boundary to classify as positive
:return: correct_pos, correct_neg, incorrect_pos, incorrect_neg, unsure
"""
# Get correct and incorrect predictions
correct = df[
(df[proba_col] > decision_threshold) == df[true_label_col]
].copy()
incorrect = df[
(df[proba_col] > decision_threshold) != df[true_label_col]
].copy()
top_correct_positive = correct[correct[true_label_col]].nlargest(
k, proba_col
)
top_correct_negative = correct[~correct[true_label_col]].nsmallest(
k, proba_col
)
top_incorrect_positive = incorrect[incorrect[true_label_col]].nsmallest(
k, proba_col
)
top_incorrect_negative = incorrect[~incorrect[true_label_col]].nlargest(
k, proba_col
)
# Get closest examples to decision threshold
most_uncertain = df.iloc[
(df[proba_col] - decision_threshold).abs().argsort()[:k]
]
return (
top_correct_positive,
top_correct_negative,
top_incorrect_positive,
top_incorrect_negative,
most_uncertain,
) | 0.852874 | 0.60364 |
def get_cluster_num(model):
"""
Parameters
----------
model :
An instance of Scikit-learn model.
Returns
-------
model.n_clusters: Integer
Returns the number of clusters
"""
return model.n_clusters | 0.760917 | 0.555134 |
def get_threshold():
"""
It returns the Threshold value.
Returns
-------
Returns the Threshold value
"""
return '0.001' | 0.718594 | 0.552419 |
def get_multiple_model_method(model):
"""
It returns the name of the Multiple Model Chain element of the model.
Parameters
----------
model :
A Scikit-learn model instance
Returns
-------
The multiple model method for a mining model.
"""
if model.__class__.__name__ == 'GradientBoostingClassifier':
return 'modelChain'
elif model.__class__.__name__ == 'GradientBoostingRegressor':
return 'sum'
elif model.__class__.__name__ == 'RandomForestClassifier':
return 'majorityVote'
elif model.__class__.__name__ in ['RandomForestRegressor','IsolationForest']:
return 'average' | 0.84228 | 0.61832 |
def get_classificationMethod(model):
"""
It returns the Classification Model name of the model.
Parameters
----------
model :
A Scikit-learn model instance.
Returns
-------
Returns the classification method of the SVM model
"""
if model.__class__.__name__ == 'SVC':
return 'OneAgainstOne'
else:
return 'OneAgainstAll' | 0.721645 | 0.623291 |
def get_funct(sk_model):
"""
It returns the activation fucntion of the model.
Parameters
----------
model :
A Scikit-learn model instance.
Returns
-------
a_fn : String
Returns the activation function.
"""
a_fn = sk_model.activation
if a_fn =='relu':
a_fn = 'rectifier'
return a_fn | 0.725065 | 0.568356 |
def get_dtype(feat_value):
"""
It return the data type of the value.
Parameters
----------
feat_value :
Contains a value for finding the its data type.
Returns
-------
Returns the respective data type of that value.
"""
data_type=str(type(feat_value))
if 'float' in data_type:
return 'float'
if 'int' in data_type:
return 'integer'
if 'long' in data_type:
return 'long'
if 'complex' in data_type:
return 'complex'
if 'str' in data_type:
return 'string' | 0.776623 | 0.694859 |
import torch
def pairwise_orthogonalization_torch(v1, v2, center:bool=False):
"""
Orthogonalizes columns of v2 off of the columns of v1
and returns the orthogonalized v1 and the explained
variance ratio of v2 off of v1.
v1: y_true, v2: y_pred
Since it's just pairwise, there should not be any
numerical instability issues.
Same as pairwise_orthogonalization, but with
torch.jit.script instead of njit.
RH 2021
Args:
v1 (ndarray):
y_true
Vector set 1. Either a single vector or a 2-D
array where the columns are the vectors.
v2 (ndarray):
y_pred
Vector set 2. Either a single vector or a 2-D
array where the columns are the vectors.
center (bool):
Whether to center the vectors.
Centering prevents negative EVR values.
Returns:
v1_orth (ndarray):
Vector set 1 with the projections onto vector
set 2 subtracted off.
Same size as v1.
EVR (ndarray):
Explained Variance Ratio for each column of v1.
Amount of variance that all the vectors in v2
can explain for each vector in v1.
EVR_total_weighted (scalar):
Average amount of variance explained in v1 by v2
weighted by the variance of each column of v1.
EVR_total_unweighted (scalar):
Average amount of variance explained in v1 by v2
"""
assert v1.ndim == v2.ndim
if v1.ndim==1:
v1 = v1[:,None]
v2 = v2[:,None]
assert v1.shape[1] == v2.shape[1]
assert v1.shape[0] == v2.shape[0]
if center:
v1 = v1 - torch.mean(v1, dim=0)
v2 = v2 - torch.mean(v2, dim=0)
# v1_orth = v1 - (torch.diag(torch.matmul(v1.T, v2)) / torch.diag(torch.matmul(v2.T, v2)))*v2
v1_orth = v1 - (torch.sum(v1 * v2, dim=0) / torch.sum(v2 * v2, dim=0) )*v2
v1_var = torch.var(v1, dim=0)
EVR = 1 - (torch.var(v1_orth, dim=0) / v1_var)
EVR_total_weighted = torch.sum(v1_var * EVR) / torch.sum(v1_var)
EVR_total_unweighted = torch.mean(EVR)
return v1_orth.squeeze(), EVR, EVR_total_weighted, EVR_total_unweighted | 0.919308 | 0.81335 |
def _reasonable_histogram_range(arr):
"""Return 'range' argument for np.histogram
Fixes problem with too small default ranges
which is roughly arr.max()-arr.min() < 1e-08.
We take 5*e-8 as threshold in order to be safe.
Parameters
----------
arr: array
array to calculate histogram for
Returns
-------
(float, float)
The lower and upper range of the bins.
"""
arr_min = arr.min()
arr_max = arr.max()
if arr_max - arr_min < 5e-8:
hist_range = (arr_min - 1e-3, arr_max + 1e-3)
else:
hist_range = (arr_min, arr_max)
return hist_range | 0.871721 | 0.600364 |
import torch
def distChamfer(a, b):
"""
:param a: Pointclouds Batch x nul_points x dim
:param b: Pointclouds Batch x nul_points x dim
:return:
-closest point on b of points from a
-closest point on a of points from b
-idx of closest point on b of points from a
-idx of closest point on a of points from b
Works for pointcloud of any dimension
"""
x, y = a, b
bs, num_points_x, points_dim = x.size()
bs, num_points_y, points_dim = y.size()
xx = torch.pow(x, 2).sum(2)
yy = torch.pow(y, 2).sum(2)
zz = torch.bmm(x, y.transpose(2, 1))
rx = xx.unsqueeze(1).expand(bs, num_points_y, num_points_x) # Diagonal elements xx
ry = yy.unsqueeze(1).expand(bs, num_points_x, num_points_y) # Diagonal elements yy
P = rx.transpose(2, 1) + ry - 2 * zz
return torch.min(P, 2)[0], torch.min(P, 1)[0], torch.min(P, 2)[1].int(), torch.min(P, 1)[1].int() | 0.845592 | 0.707038 |
def f11(xx):
"""
Example of a analytic expression replacing the external point number
:param xx: the distance between two bodies (or markers)
"""
return 20.0/(0.5*xx*xx+1.0) #np.sqrt(np.abs(xx*xx)) | 0.764892 | 0.521654 |
def container_to_string(cont):
"""Convert a container to a command line string.
Elements of the container are joined with a space between them,
suitable for a command line parameter.
If the container `cont` is only a sequence, like a string and not a
container, it is returned unmodified.
Parameters
----------
cont : container
A container object like a list, tuple, dict, or a set.
Returns
-------
cont_str : string
Container elements joined into a string.
"""
if hasattr(cont, '__iter__') and not isinstance(cont, str):
cont = ' '.join(cont)
return str(cont) | 0.860442 | 0.512083 |
def parameter_value(atmosphere_data_parameter):
"""This function returns the parameter value corresponding to the athmosphere gaz value selected in the GUI.
Parameters
----------
atmosphere_data_parameter: string
Atmosphere data parameter selected.
Returns
-------
data_param: string
The data parameter value used in the ECMWFAPI request
"""
if atmosphere_data_parameter == "Nitrogen dioxyde":
data_param = "125.210"
elif atmosphere_data_parameter == "Ozone":
data_param = "206.210"
elif atmosphere_data_parameter == "Sulfur dioxyde":
data_param = "126.210"
elif atmosphere_data_parameter == "Particulate Matter <2.5 um":
data_param = "73.210"
elif atmosphere_data_parameter == "Particulate Matter <10 um":
data_param = "74.210"
return data_param | 0.795658 | 0.842798 |
def define_actions(action):
"""
Given an action string, returns a list of corresponding actions.
Args
action: String. either "all" or one of the h36m actions
Returns
actions: List of strings. Actions to use.
Raises
ValueError: if the action is not a valid action in Human 3.6M
"""
actions = ["Directions",
"Discussion",
"Eating",
"Greeting",
"Phoning",
"Photo",
"Posing",
"Purchases",
"Sitting",
"SittingDown",
"Smoking",
"Waiting",
"WalkDog",
"Walking",
"WalkTogether"
]
if action == "All" or action == "all":
return actions
if not action in actions:
raise( ValueError, "Unrecognized action: %s" % action )
return [action] | 0.91067 | 0.620995 |
def _verify_variational_params(variational_params):
"""Verifies that the format of the input `variational_params`.
Checks that the input parameters is a 2-tuple of tensors of equal shape.
Args:
variational_params: The parameters to check.
Raises:
RuntimeError: If the input is not a 2-tuple of tensors with equal shape.
Returns:
The input `variational_parameters`.
"""
if len(variational_params) != 2:
raise RuntimeError("Incorrect number of variational parameters.")
if variational_params[0].shape != variational_params[1].shape:
raise RuntimeError("Variational parameters must be the same shape.")
return variational_params | 0.913478 | 0.668542 |
def linear_system(x, K):
""" A linear system which scales a signal by a factor"""
return K * x | 0.791982 | 0.723163 |
def nonlinear_system(x, K, eta=.43, power=2):
""" A non-linear system which scales a signal by a factor introduces a
waveform distortion"""
return K * (x + eta * (x ** power)) | 0.801431 | 0.512449 |
def addzeros(number):
"""Convert a number into a string and add leading zeros.
Typically used to construct filenames with equal lengths.
:param number: the number
:type number: int
:return: zerostring - string with leading zeros
:rtype: str
"""
if number < 10:
zerostring = '0000' + str(number)
if number >= 10 and number < 100:
zerostring = '000' + str(number)
if number >= 100 and number < 1000:
zerostring = '00' + str(number)
if number >= 1000 and number < 10000:
zerostring = '0' + str(number)
return zerostring | 0.849285 | 0.639595 |
def norm_columns(df, colname='Time [s]', mode='min'):
"""Normalize a specif column inside a Pandas dataframe
:param df: DataFrame
:type df: pf.DataFrame
:param colname: Name of the coumn to be normalized, defaults to 'Time [s]'
:type colname: str, optional
:param mode: Mode of Normalization, defaults to 'min'
:type mode: str, optional
:return: Dataframe with normalized column
:rtype: pd.DataFrame
"""
# normalize columns according to min or max value
if mode == 'min':
min_value = df[colname].min()
df[colname] = df[colname] - min_value
if mode == 'max':
max_value = df[colname].max()
df[colname] = df[colname] - max_value
return df | 0.817356 | 0.81134 |
def energy_stats(energy_consumption_kwh, energy_tracker):
"""Extract and compute energy metrics from codecarbon Energy Tracker.
IMPORTANT: this function should be called right after stopping the tracker.
"""
energy_consumption_joules = energy_consumption_kwh * 1000 * 3600 #Joules
duration = energy_tracker._last_measured_time - energy_tracker._start_time
return energy_consumption_joules, duration | 0.843315 | 0.678463 |
import torch
def index_points(device, points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C]
"""
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
# batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)
batch_indices = torch.arange(B, dtype=torch.long).cuda().view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points | 0.745491 | 0.657717 |
def scrap(consensus, end_of_field):
"""
Consume lines upon matching a criterion.
Returns (consensus-without-first-line, first-line)
if end_of_field(first-line) returns True,
else returns (consensus-with-first-line, None)
:param bytes consensus: input which first line may be consumed
:param function end_of_field: passed a line, returns True when no match
:returns: a tuple (updated-consensus, next-field-or-None)
"""
if b'\n' not in consensus:
return consensus, None
line, remaining = consensus.split(b'\n', 1)
if end_of_field(line):
return consensus, None
return remaining, line | 0.821188 | 0.528777 |
def f(p):
""" True value of the first state
Args:
p (float): probability of the action 'right'.
Returns:
True value of the first state.
The expression is obtained by manually solving the easy linear system
of Bellman equations using known dynamics.
"""
return (2 * p - 4) / (p * (1 - p)) | 0.804636 | 0.756268 |
def repeating_interval(interval):
"""
Returns a ``can_retry`` function for :py:func:retry` that returns the
specified interval all the time.
:return: a function that accepts a :class:`Failure` and returns ``interval``
"""
return lambda f: interval | 0.856092 | 0.728893 |
def test_args(args):
""" Ensures arguments are properly formatted.
Inspired by `automated_scripts/cal_uvis_make_darks/cal_uvis_make_darks.py`
by <NAME>.
Paramters
---------
args : object
Containing the image and destination arguments.
Returns
-------
args : object
Containing the image and destination arguments. Modified for
proper formatting.
"""
if args.sep_out == 0:
args.sep_out = False
else:
args.sep_out = True
return args | 0.703244 | 0.553445 |
def add_symmetric_matrix(M, M_sym):
"""Add a regular matrix and a symmetric one.
:param M: A [3x3] matrix to add with symmetric matrix.
:type M: :py:class:`numpy.ndarray`
:param M_sym: A [6x1] array to add with M.
:type M_sym: :py:class:`numpy.ndarray`
:return: The sum of the two matrices.
:rtype: :py:class:`numpy.ndarray`
"""
M[0, 0] += M_sym[0]
M[0, 1] += M_sym[1]
M[1, 0] += M_sym[1]
M[0, 2] += M_sym[2]
M[2, 0] += M_sym[2]
M[1, 1] += M_sym[3]
M[1, 2] += M_sym[4]
M[2, 1] += M_sym[4]
M[2, 2] += M_sym[5]
return M | 0.907 | 0.866979 |
def calculate_delta(df, kpi, period="annual"):
"""
This method will calculate the growth rate given a financial statement
and a key performance indicator.
Args:
df (pandas.DataFrame): financial statement
kpi (str): key performance indicator
Returns:
growth rate
"""
latest = 0
if period == "annual":
previous = 1
elif period == "quarterly":
previous = 4
growth_rate = (
(df.iloc[latest][kpi] - df.iloc[previous][kpi]) /
df.iloc[previous][kpi]) * 100.0
return growth_rate | 0.895831 | 0.882174 |
def mmd2(PPk, QQk, PQk):
"""Calculate squared Maximum Mean Discrepancy distance.
Args:
PPk: None, scalar torch tensor containing the mean PPk
or the full pairwise distance matrix
QQk: scalar torch tensor containing the mean QQk
or the full pairwise distance matrix
PQk: scalar torch tensor containing the mean PQk
or the full pairwise distance matrix
"""
assert(PQk is not None)
# Allow `PPk` to be None, if we want to compute mmd2 for the generator
if PPk is None:
PPk_ = 0
elif len(PPk.shape) == 2:
m = PPk.size(0)
PPk_ = (PPk.sum() - PPk.trace()) / (m**2 - m) if m != 1 else 0
elif len(PPk.shape) == 1:
PPk_ = PPk.mean()
elif len(PPk.shape) == 0:
PPk_ = PPk
else:
raise ValueError("Not supported `PPk`.")
if QQk is None:
QQk_ = 0
elif len(QQk.shape) == 2:
n = QQk.size(0)
QQk_ = (QQk.sum() - QQk.trace()) / (n**2 - n) if n != 1 else 0
elif len(QQk.shape) == 1:
QQk_ = QQk.mean()
elif len(QQk.shape) == 0:
QQk_ = QQk
else:
raise ValueError("Not supported `QQk`.")
if PQk.size():
PQk_ = PQk.mean()
else:
PQk_ = PQk
return PPk_ + QQk_ - 2 * PQk_ | 0.938997 | 0.725533 |
import torch
def nosigmloss1d(a, b):
"""Get 1D sigmoid loss, WITHOUT applying the sigmoid function to the inputs beforehand.
Args:
a (torch.Tensor): Predicted output
b (torch.Tensor): True output
Returns:
torch.Tensor: Loss
"""
x = a
y = b
ret = torch.mean(-y*torch.log(x)-(1-y)*torch.log(1-x), dim=1)
# ret=torch.mean(torch.clamp(x,0)-x*y+torch.log(1+torch.exp(-torch.abs(x))),dim=1)
return ret | 0.807081 | 0.73627 |
def concatenate_transforms(pet_to_t1w_tranform, t1w_to_mni_tranform):
"""Concatenate two input transformation files into a list.
Args:
transform1 (str): first transformation to apply
transform2 (str): second transformation to apply
Returns:
transform_list (list of string): both transform files path in a list
"""
return [t1w_to_mni_tranform, pet_to_t1w_tranform] | 0.737442 | 0.519338 |
def crop_nifti(input_img, ref_crop):
"""Crop input image based on the reference. It uses nilearn
`resample_to_img` function.
Args:
input_img (str): image to be processed
ref_img (str): template used to crop the image
Returns:
output_img (nifty image): crop image on disk.
crop_template (nifty image): output template on disk.
"""
import os
import nibabel as nib
import numpy as np
from nilearn.image import resample_to_img
basedir = os.getcwd()
# resample the individual MRI into the cropped template image
crop_img = resample_to_img(input_img, ref_crop, force_resample=True)
output_img = os.path.join(
basedir, os.path.basename(input_img).split(".nii")[0] + "_cropped.nii.gz"
)
crop_img.to_filename(output_img)
return output_img | 0.791942 | 0.560072 |
import torch
def get_rot_matrix(angles=None, yaw=None, pitch=None, roll=None):
""" Generating a rotation matrix given the rotation (yaw, pitch, roll) angles """
if(angles is None):
assert yaw is not None and pitch is not None and roll is not None,\
"If angles list is not given, angles (yaw, pitch, roll) must be specified"
angles = (yaw, pitch, roll)
if yaw is None and pitch is None and roll is None:
assert angles is not None, "If angles (yaw, pitch, roll) not given, angle list must be specified"
yaw = torch.tensor([
[torch.cos(angles[0]), -torch.sin(angles[0]), 0],
[torch.sin(angles[0]), torch.cos(angles[0]), 0],
[0, 0, 1]
], dtype=torch.double)
pitch = torch.tensor([
[torch.cos(angles[1]), 0, torch.sin(angles[1])],
[0, 1, 0],
[-torch.sin(angles[1]), 0, torch.cos(angles[1])],
], dtype=torch.double)
roll = torch.tensor([
[1, 0, 0],
[0, torch.cos(angles[2]), -torch.sin(angles[2])],
[0, torch.sin(angles[2]), torch.cos(angles[2])],
], dtype=torch.double)
rot_matrix = (yaw @ pitch @ roll).float()
return rot_matrix | 0.820254 | 0.723138 |
def c_edge(Eg):
"""
Compute the Compton edge energy associatedwith a given
gamma energy
Parameters
----------
Eg : float
Energy of gamma that compton scatters
Returns
-------
Ec : float
Energy of Compton edge
Notes
-----
.. math:: E_c(E_\\gamma) = \\frac{2.0 E_\\gamma^2}{511.0+2.0E_\\gamma}
"""
Ec = 2.0*Eg**2/(511.0+2.0*Eg)
return Ec | 0.870212 | 0.890865 |
def frohner_cor(sig1,sig2,n1,n2):
"""
Takes cross-sections [barns] and atom densities [atoms/barn] for
two thicknesses of the same sample, and returns extrapolated cross
section according to Frohner.
Parameters
----------
sig1 : array_like
Cross section of the thinner of the two samples.
sig2 : array_like
Cross section of the thicker of the two samples.
n1 : float
Atom density of the thinner sample
n2 : float
Atom density of the thicker sample
Returns
-------
sig0 : array_like
The extrapolated cross section from sig1 and sig2
"""
return (n2*sig1-n1*sig2)/(n2-n1) | 0.862511 | 0.564038 |
def frohner_cor_3rd_order(sig1,sig2,sig3,n1,n2,n3):
"""
Takes cross-sections [barns] and atom densities [atoms/barn] for
three thicknesses of the same sample, and returns extrapolated
cross section according to Frohner.
Parameters
----------
sig1 : array_like
Cross section of the thinnest of the three samples.
sig2 : array_like
Cross section of the mid-thickness of the three samples.
sig3 : array_like
Cross section of the thickest of the three samples.
n1 : float
Atom density of the thinnest sample
n2 : float
Atom density of the mid-thickness sample
n3 : float
Atom density of the thickest sample
Returns
-------
sig0 : array_like
The extrapolated cross section from sig1, sig2, and sig3
"""
# two terms in the numerator
numer1 = (n1*sig2-n2*sig1)*(n3**2-n1**2-(n1-n3)/(n1-n2)*(n2**2-n1**2))
numer2 = (n1*n2**2-n1**2*n2)*(sig3-sig2-(n1-n3)/(n1-n2)*(sig2-sig1))
denom = (n1-n2)*(n3**2-n1**2) - (n1-n3)*(n2**2-n1**2)
return (numer1-numer2)/denom | 0.890229 | 0.648049 |
def compare_data(plt_type, correct, given):
"""
Determines whether the given data matches any of the data found in the
correct data. This handles plots of different types: if a histogram
was plotted with the expected data for a line plot, it will return True.
Args:
plt_type (str): The expected type of this plot
correct (List of Int or List of List of Int): The expected data.
given (Dict): The actual plotted data and information
Returns:
bool: Whether the correct data was found in the given plot.
"""
# Infer arguments
if plt_type == 'hist':
correct_xs = None
correct_ys = correct
elif not correct:
correct_xs = []
correct_ys = []
elif isinstance(correct[0], (tuple, list)):
# We were given a list of lists of ints
correct_xs, correct_ys = correct
else:
# Assume it is a singular list
correct_xs = list(range(len(correct)))
correct_ys = correct
if given['type'] == 'hist':
return correct_ys == given['values']
elif plt_type == 'hist':
return correct_ys == given['y']
else:
return correct_xs == given['x'] and correct_ys == given['y'] | 0.866005 | 0.974141 |
def kmh_to_ms(kmh):
"""Convert kilometers/hour to meters/second."""
return kmh / 3.6 | 0.788787 | 0.552691 |
def lr_schedule(epoch):
"""Learning Rate Schedule
Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
"""
lr = 1e-3
if epoch > 180:
lr *= 0.5e-3
elif epoch > 160:
lr *= 1e-3
elif epoch > 120:
lr *= 1e-2
elif epoch > 80:
lr *= 1e-1
print('Learning rate: ', lr)
return lr | 0.841142 | 0.559952 |
def date_to_millis(dt):
"""
Converts a datetime object to the number of milliseconds since the unix
epoch.
"""
return int(dt.timestamp()) * 1000 | 0.705481 | 0.863852 |
def mask(length, offset=0):
"""
Generate a bitmask with the given parameter.
:param length: The bit length of the mask.
:param offset: The offset of the mask from the LSB bit. [default: 0]
:return: An integer representing the bit mask.
"""
return ((1 << length) - 1) << offset | 0.791982 | 0.980487 |
def ps_weight_timemean(field, ps):
"""
This takes the surface pressure time mean of atmos_fields
input:
field xr.DataArray or xr.Dataset
ps surface pressure field with the same dimensions are field, it does not need the verical coordinates
return
same structure are field but time averaged
"""
return (field * ps).mean('time') /ps.mean('time') | 0.758958 | 0.762181 |
def get_sample_id(bundle):
"""Return the sample id from the given bundle.
Args:
bundle (humancellatlas.data.metadata.Bundle): A Bundle object contains all of the necessary information.
Returns:
sample_id (str): String giving the sample id
"""
sample_id = str(bundle.sequencing_input[0].document_id)
return sample_id | 0.750827 | 0.692363 |
def calcMean(data):
"""\
Calculates statistical mean.
:param data: List of values
:returns: the mean of a list of values.
"""
return sum(data)/float(len(data)) | 0.770637 | 0.883889 |
def secsToNearestMilli(value):
"""\
Return value converted to the nearest number of milliseconds
:param value: seconds as a floating point number
:return: value expressed in integer number of milliseconds (rounded)
"""
return int(round(value * 1000)) | 0.833663 | 0.987865 |
def gapBetweenRanges(rangeA,rangeB):
"""\
Returns the gap between two ranges of values, or zero if there is no gap.
The sign of the returned value indicates which range is below the other.
For example:
* The gap between (0,10) and (15,25) is -5
* The gap between (0,10) and (9,20) is 0
* The gap between (20,30) and (10,18) is 2
:param rangeA: a tuple (lo,hi) representing a range of values.
:param rangeB: a tuple (lo,hi) representing a range of values.
:returns: zero if two ranges overlap; otherwise the gap separating them.
If rangeA is below range B then the value will be negative.
If rangeA is above range B then the value will be positive.
"""
aLo,aHi = rangeA
bLo,bHi = rangeB
if aLo > bHi:
return aLo-bHi
elif aHi < bLo:
return aHi-bLo
else:
return 0 | 0.866472 | 0.894651 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.