content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def geocentric_rotation(sphi, cphi, slam, clam):
"""
This rotation matrix is given by the following quaternion operations
qrot(lam, [0,0,1]) * qrot(phi, [0,-1,0]) * [1,1,1,1]/2
or
qrot(pi/2 + lam, [0,0,1]) * qrot(-pi/2 + phi , [-1,0,0])
where
qrot(t,v) = [cos(t/2), sin(t/2)*v[1], sin(t/2)*v[2], sin(t/2)*v[3]]
"""
M = np.zeros(9)
# Local X axis (east) in geocentric coords
M[0] = -slam; M[3] = clam; M[6] = 0;
# Local Y axis (north) in geocentric coords
M[1] = -clam * sphi; M[4] = -slam * sphi; M[7] = cphi;
# Local Z axis (up) in geocentric coords
M[2] = clam * cphi; M[5] = slam * cphi; M[8] = sphi;
return M | 83d37e79e35cab2fc309a640751fb85a9cab0177 | 3,659,200 |
def get_price_sma(
ohlcv: DataFrame,
window: int = 50,
price_col: str = "close",
) -> Series:
"""
Price to SMA.
"""
return pd.Series(
ohlcv[price_col] / get_sma(ohlcv, window),
name="Price/SMA{}".format(window),
) | 7f356610462b9f0fbc13c02c6f093d5ec29d4e76 | 3,659,201 |
def map_to_closest(multitrack, target_programs, match_len=True, drums_first=True):
"""
Keep closest tracks to the target_programs and map them to corresponding
programs in available in target_programs.
multitrack (pypianoroll.Multitrack): Track to normalize.
target_programs (list): List of available programs.
match_len (bool): If True set multitrack track length to length of target_programs.
(return only the len(target_programs) closest tracks in multitrack).
"""
new_multitrack = deepcopy(multitrack)
for track in new_multitrack.tracks:
min_dist = inf
for target in target_programs:
dist = abs(track.program - target)
if dist < min_dist:
min_dist = dist
track.program = target
track.min_dist = min_dist
if match_len:
length = len(target_programs)
new_multitrack.tracks.sort(key=lambda x: x.min_dist)
new_multitrack.tracks = new_multitrack.tracks[:length]
if drums_first:
new_multitrack.tracks.sort(key=lambda x: not x.is_drum)
return new_multitrack | 7fd91726fbc66dd3a3f233be9056c00b1b793f46 | 3,659,202 |
import time
def train_naive(args, X_train, y_train, X_test, y_test, rng, logger=None):
"""
Compute the time it takes to delete a specified number of
samples from a naive model sequentially.
"""
# initial naive training time
model = get_naive(args)
start = time.time()
model = model.fit(X_train, y_train)
before_train_time = time.time() - start
logger.info('\n[{}] before train time: {:.3f}s'.format('naive', before_train_time))
# predictive performance of the naive model
auc, acc, ap = exp_util.performance(model, X_test, y_test, logger=logger, name='naive')
# naive train after deleting data
delete_indices = rng.choice(np.arange(X_train.shape[0]), size=args.n_delete, replace=False)
new_X_train = np.delete(X_train, delete_indices, axis=0)
new_y_train = np.delete(y_train, delete_indices)
# after training time
model = get_naive(args)
start = time.time()
model = model.fit(new_X_train, new_y_train)
after_train_time = time.time() - start
logger.info('[{}] after train time: {:.3f}s'.format('naive', after_train_time))
# interpolate sequential updates
total_time = ((before_train_time + after_train_time) / 2) * args.n_delete
initial_utility = auc, acc, ap
return total_time, initial_utility | 0514df318219a9f69dbd49b65cad2664480e3031 | 3,659,203 |
def helperFunction():
"""A helper function created to return a value to the test."""
value = 10 > 0
return value | 2c4f2e5303aca2a50648860de419e8f94581fee7 | 3,659,204 |
def app_config(app_config):
"""Get app config."""
app_config['RECORDS_FILES_REST_ENDPOINTS'] = {
'RECORDS_REST_ENDPOINTS': {
'recid': '/files'
}
}
app_config['FILES_REST_PERMISSION_FACTORY'] = allow_all
app_config['CELERY_ALWAYS_EAGER'] = True
return app_config | 156f48cfd0937e5717de133fdfdf38c86e66ba71 | 3,659,205 |
from datetime import datetime
def ts(timestamp_string: str):
"""
Convert a DataFrame show output-style timestamp string into a datetime value
which will marshall to a Hive/Spark TimestampType
:param timestamp_string: A timestamp string in "YYYY-MM-DD HH:MM:SS" format
:return: A datetime object
"""
return datetime.strptime(timestamp_string, '%Y-%m-%d %H:%M:%S') | 1902e75ab70c7869686e3a374b22fa80a6dfcf1a | 3,659,206 |
def boxes(frame, data, f, parameters=None, call_num=None):
"""
Boxes places a rotated rectangle on the image that encloses the contours of specified particles.
Notes
-----
This method requires you to have used contours for the tracking and run boxes
in postprocessing.
Parameters
----------
cmap_type
Options are 'static' or 'dynamic'
cmap_column
Name of column containing data to specify colour in dynamic mode,
cmap_max
Specifies max data value for colour map in dynamic mode
cmap_scale
Scale factor for colour map
colour
Colour to be used for static cmap_type (B,G,R) values from 0-255
classifier_column
None selects all particles, column name of classifier values to specify subset of particles
classifier
The value in the classifier column which applies to subset (True or False)
thickness
Thickness of box. -1 fills the box in
Args
----
frame
This is the unmodified frame of the input movie
data
This is the dataframe that stores all the tracked data
f
frame index
parameters
Nested dictionary like object (same as .param files or output from general.param_file_creator.py)
call_num
Usually None but if multiple calls are made modifies method name with get_method_key
Returns
-----------
annotated frame : np.ndarray
"""
try:
method_key = get_method_key('boxes', call_num=call_num)
thickness = get_param_val(parameters[method_key]['thickness'])
subset_df = _get_class_subset(data, f, parameters, method=method_key)
box_pts = subset_df[['box_pts']].values
if np.shape(box_pts)[0] == 1:
df_empty = np.isnan(box_pts[0])
if np.all(df_empty):
#0 boxes
return frame
colours = colour_array(subset_df, f, parameters, method=method_key)
sz = np.shape(frame)
for index, box in enumerate(box_pts):
frame = _draw_contours(frame, box, col=colours[index],
thickness=int(get_param_val(parameters[method_key]['thickness'])))
return frame
except Exception as e:
raise BoxesError(e) | 813ef54a8c8b99d003b9ca74b28befc63da2c0b9 | 3,659,207 |
def process_states(states):
"""
Separate list of states into lists of depths and hand states.
:param states: List of states.
:return: List of depths and list of hand states; each pair is from the same state.
"""
depths = []
hand_states = []
for state in states:
depths.append(state[0])
hand_states.append(state[1])
depths = np.array(depths, dtype=np.float32)
hand_states = np.array(hand_states, dtype=np.int32)
return depths, hand_states | 6f71d2471a50a93a3dac6a4148a6e3c6c2aa61e8 | 3,659,208 |
import torch
import math
def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None):
"""
Perform uniform spatial sampling on the images and corresponding boxes.
Args:
images (tensor): images to perform uniform crop. The dimension is
`num frames` x `channel` x `height` x `width`.
size (int): size of height and weight to crop the images.
spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width
is larger than height. Or 0, 1, or 2 for top, center, and bottom
crop if height is larger than width.
boxes (ndarray or None): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
scale_size (int): optinal. If not None, resize the images to scale_size before
performing any crop.
Returns:
cropped (tensor): images with dimension of
`num frames` x `channel` x `size` x `size`.
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
assert spatial_idx in [0, 1, 2]
ndim = len(images.shape)
if ndim == 3:
images = images.unsqueeze(0)
height = images.shape[2]
width = images.shape[3]
if scale_size is not None:
if width <= height:
width, height = scale_size, int(height / width * scale_size)
else:
width, height = int(width / height * scale_size), scale_size
images = torch.nn.functional.interpolate(
images,
size=(height, width),
mode="bilinear",
align_corners=False,
)
y_offset = int(math.ceil((height - size) / 2))
x_offset = int(math.ceil((width - size) / 2))
if height > width:
if spatial_idx == 0:
y_offset = 0
elif spatial_idx == 2:
y_offset = height - size
else:
if spatial_idx == 0:
x_offset = 0
elif spatial_idx == 2:
x_offset = width - size
cropped = images[
:, :, y_offset : y_offset + size, x_offset : x_offset + size
]
cropped_boxes = (
crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None
)
if ndim == 3:
cropped = cropped.squeeze(0)
return cropped, cropped_boxes | c3e1d7eeb50b959fe0a075c742e23e5206730748 | 3,659,209 |
from typing import Tuple
def plot_xr_complex_on_plane(
var: xr.DataArray,
marker: str = "o",
label: str = "Data on imaginary plane",
cmap: str = "viridis",
c: np.ndarray = None,
xlabel: str = "Real{}{}{}",
ylabel: str = "Imag{}{}{}",
legend: bool = True,
ax: object = None,
**kwargs,
) -> Tuple[Figure, Axes]:
"""Plots complex data on the imaginary plane. Points are colored by default
according to their order in the array.
Parameters
----------
var
1D array of complex data.
marker
Marker used for the scatter plot.
label
Data label for the legend.
cmap
The colormap to use for coloring the points.
c
Color of the points. Defaults to an array of integers.
xlabel
Label o x axes.
ylabel
Label o y axes.
legend
Calls :meth:`~matplotlib.axes.Axes.legend` if ``True``.
ax
The matplotlib axes. If ``None`` a new axes (and figure) is created.
"""
if ax is None:
_, ax = plt.subplots()
if c is None:
c = np.arange(0, len(var))
ax.scatter(var.real, var.imag, marker=marker, label=label, c=c, cmap=cmap, **kwargs)
unit_str = get_unit_from_attrs(var)
ax.set_xlabel(xlabel.format(" ", var.name, unit_str))
ax.set_ylabel(ylabel.format(" ", var.name, unit_str))
if legend:
ax.legend()
return ax.get_figure(), ax | 8702f14fe0fbc508c3cb5893cc5a4a73bfdd0b85 | 3,659,210 |
import glob
import tokenize
import os
def imread(filename, imread=None, preprocess=None):
"""Read a stack of images into a dask array
Parameters
----------
filename: string
A globstring like 'myfile.*.png'
imread: function (optional)
Optionally provide custom imread function.
Function should expect a filename and produce a numpy array.
Defaults to ``skimage.io.imread``.
preprocess: function (optional)
Optionally provide custom function to preprocess the image.
Function should expect a numpy array for a single image.
Examples
--------
>>> from dask.array.image import imread
>>> im = imread('2015-*-*.png') # doctest: +SKIP
>>> im.shape # doctest: +SKIP
(365, 1000, 1000, 3)
Returns
-------
Dask array of all images stacked along the first dimension. All images
will be treated as individual chunks
"""
imread = imread or sk_imread
filenames = sorted(glob(filename))
if not filenames:
raise ValueError("No files found under name %s" % filename)
name = "imread-%s" % tokenize(filenames, map(os.path.getmtime, filenames))
sample = imread(filenames[0])
if preprocess:
sample = preprocess(sample)
keys = [(name, i) + (0,) * len(sample.shape) for i in range(len(filenames))]
if preprocess:
values = [
(add_leading_dimension, (preprocess, (imread, fn))) for fn in filenames
]
else:
values = [(add_leading_dimension, (imread, fn)) for fn in filenames]
dsk = dict(zip(keys, values))
chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape)
return Array(dsk, name, chunks, sample.dtype) | c9e830db9b0ebd0c7634bb2170bc80df3b2c5dcc | 3,659,211 |
import gzip
import tqdm
import sys
def importPredictions_Tombo(infileName, chr_col=0, start_col=1, readid_col=3, strand_col=5, meth_col=4, baseFormat=1,
score_cutoff=(-1.5, 2.5), output_first=False, include_score=False, filterChr=HUMAN_CHR_SET,
save_unified_format=False, outfn=None):
"""
We checked input as 0-based start format.
Return dict of key='chr1 123 +', and values=list of [1 1 0 0 1 1], in which 0-unmehylated, 1-methylated.
Note that the function requires per read stats, not frequencies of methylation.
### Example input format from Tombo
chr1 48020 48020 3526811b-6958-49f8-b78c-a205c1b5fc6e 1.185219591257949 + TATTACACCCG
chr1 48022 48022 3526811b-6958-49f8-b78c-a205c1b5fc6e 1.6267354150537658 + TTACACCCGTT
chr1 48023 48023 3526811b-6958-49f8-b78c-a205c1b5fc6e 2.6122662196889728 + TACACCCGTTA
chr1 48024 48024 3526811b-6958-49f8-b78c-a205c1b5fc6e 2.771131774766473 + ACACCCGTTAA
chr1 48041 48041 3526811b-6958-49f8-b78c-a205c1b5fc6e 6.524775544143312 + GATTTCTAAAT
chr1 48048 48048 3526811b-6958-49f8-b78c-a205c1b5fc6e 1.9142728191641216 + AAATGCATTGA
chr1 48054 48054 3526811b-6958-49f8-b78c-a205c1b5fc6e 1.8675210090110548 + ATTGACATTTG
......
chr1 8447736 8447736 c9339e26-1898-4483-a312-b78c3fafc6a9 8.073560995614967 - CTGTGCTGTGT
chr1 8447745 8447745 c9339e26-1898-4483-a312-b78c3fafc6a9 2.4467964154940858 - GTTGACCGTGT
chr1 8447746 8447746 c9339e26-1898-4483-a312-b78c3fafc6a9 1.966921521322515 - TTGACCGTGTA
chr1 8447754 8447754 c9339e26-1898-4483-a312-b78c3fafc6a9 5.387457000225035 - GTATGCAATGG
chr1 8447761 8447761 c9339e26-1898-4483-a312-b78c3fafc6a9 -0.8580941645036908 - ATGGACACAGA
============
"""
if score_cutoff is None:
score_cutoff = (-1.5, 2.5)
infile, lines = open_file_gz_or_txt(infileName)
if save_unified_format:
outf = gzip.open(outfn, 'wt')
outf.write(f"ID\tChr\tPos\tStrand\tScore\n")
cpgDict = defaultdict(list)
row_count = 0
meth_cnt = 0
unmeth_cnt = 0
for row in tqdm(infile, total=lines, desc="Import-Tombo"):
tmp = row.strip().split("\t")
if tmp[chr_col] not in filterChr:
continue
if output_first:
logger.debug(f'row = {list(enumerate(tmp))}')
output_first = False
if baseFormat == 1:
try:
start = int(tmp[start_col]) + 1
strand = tmp[strand_col]
if strand == '-':
start = start + 1
except:
logger.error(f" ####Tombo parse error at row={row}")
continue
elif baseFormat == 0:
try:
start = int(tmp[start_col])
strand = tmp[strand_col]
if strand == '-':
start = start + 1
except Exception as e:
logger.error(f" ####Tombo parse error at row={row}, exception={e}")
continue
else:
logger.error(
f"###\timportPredictions_Tombo InputValueError: baseCount value set to '{baseFormat}'. It should be equal to 0 or 1")
sys.exit(-1)
if strand not in ['-', '+']:
raise Exception(f'The file [{infileName}] can not recognized strand-info from row={row}, please check it')
try:
methCallTombo = float(tmp[meth_col])
except Exception as e:
logger.error(f" ####Tombo parse error at row={row}, exception={e}")
continue
meth_score = -methCallTombo
if save_unified_format:
# output to 1-based for meteore, ref: https://github.com/comprna/METEORE/blob/master/script_in_snakemake/extract_tombo_per_read_results.py
outf.write(f"{tmp[readid_col]}\t{tmp[chr_col]}\t{start}\t{tmp[strand_col]}\t{methCallTombo}\n")
key = (tmp[chr_col], start, strand)
if methCallTombo < score_cutoff[0]: # below -1.5 is methylated by default
meth_indicator = 1
meth_cnt += 1
elif methCallTombo > score_cutoff[1]: # above 2.5 is methylated by default
meth_indicator = 0
unmeth_cnt += 1
else:
continue
if include_score:
cpgDict[key].append((meth_indicator, meth_score))
else:
cpgDict[key].append(meth_indicator)
row_count += 1
infile.close()
if save_unified_format:
outf.close()
logger.debug(f'Save METEORE output format to {outfn}')
logger.debug(
f"###\timportPredictions_Tombo SUCCESS: {row_count:,} methylation calls (meth-calls={meth_cnt:,}, unmeth-call={unmeth_cnt:,}) mapped to {len(cpgDict):,} CpGs with score_cutoff={score_cutoff} from {infileName} file")
return cpgDict | 88c998d07f40855694f15d34c53359150dd6f64d | 3,659,212 |
def recommend_with_rating(user, train):
"""
用户u对物品i的评分预测
:param user: 用户
:param train: 训练集
:return: 推荐列表
"""
rank = {}
ru = train[user]
for item in _movie_set:
if item in ru:
continue
rank[item] = __predict(user, item)
return rank.iteritems() | 372cd9f77d8123351f4b76eeea241aa8a3bcaf97 | 3,659,213 |
def nl_to_break( text ):
"""
Text may have newlines, which we want to convert to <br />
when formatting for HTML display
"""
text=text.replace("<", "<") # To avoid HTML insertion
text=text.replace("\r", "")
text=text.replace("\n", "<br />")
return text | d2baf1c19fae686ae2c4571416b4cad8be065474 | 3,659,214 |
import requests
import logging
def get_page_state(url):
"""
Checks page's current state by sending HTTP HEAD request
:param url: Request URL
:return: ("ok", return_code: int) if request successful,
("error", return_code: int) if error response code,
(None, error_message: str) if page fetching failed (timeout, invalid URL, ...)
"""
try:
response = requests.head(url, verify=False, timeout=10)
except requests.exceptions.RequestException as exception:
logging.error(exception)
return None, "Error fetching page"
if response.status_code >= 400:
return "error", response.status_code
return "ok", response.status_code | f7b7db656968bed5e5e7d332725e4d4707f2b14b | 3,659,215 |
def unique(list_, key=lambda x: x):
"""efficient function to uniquify a list preserving item order"""
seen = set()
result = []
for item in list_:
seenkey = key(item)
if seenkey in seen:
continue
seen.add(seenkey)
result.append(item)
return result | 57c82081d92db74a7cbad15262333053a2acd3a7 | 3,659,216 |
import dateutil
import pytz
def date_is_older(date_str1, date_str2):
"""
Checks to see if the first date is older than the second date.
:param date_str1:
:param date_str2:
:return:
"""
date1 = dateutil.parser.parse(date_str1)
date2 = dateutil.parser.parse(date_str2)
# set or normalize the timezone
target_tz = pytz.timezone('UTC')
if date1.tzinfo is None:
date1 = target_tz.localize(date1)
else:
date1 = target_tz.normalize(date1)
if date2.tzinfo is None:
date2 = target_tz.localize(date2)
else:
date2 = target_tz.normalize(date2)
return date1 < date2 | 48fcf26cde4276e68daa07c1250de33a739bc5cb | 3,659,217 |
def shorten_namespace(elements, nsmap):
"""
Map a list of XML tag class names on the internal classes (e.g. with shortened namespaces)
:param classes: list of XML tags
:param nsmap: XML nsmap
:return: List of mapped names
"""
names = []
_islist = True
if not isinstance(elements, (list, frozenset)):
elements = [elements]
_islist = False
for el in elements:
for key, value in nsmap.items():
if value in el:
if key == "cim":
name = el.split(value)[-1]
name = name[1:] if name.startswith("}") else name
elif "{"+value+"}" in el:
name = el.replace("{"+value+"}", key+"_")
else:
name = el.replace(value, key+"_")
names.append(name)
if el.startswith("#"):
names.append(el.split("#")[-1])
if not _islist and len(names) == 1:
names = names[0]
return names | 73dfc4f24a9b0a73cf7b6af7dae47b880faa3e27 | 3,659,218 |
def list_select_options_stream_points():
""" Return all data_points under data_stream """
product_uid = request.args.get('productID', type=str)
query = DataStream.query
if product_uid:
query = query.filter(DataStream.productID == product_uid)
streams_tree = []
data_streams = query.many()
for data_stream in data_streams:
data_points = []
for data_point in data_stream.dataPoints:
select_option = {
'label': data_point.dataPointName,
'value': data_point.dataPointID
}
data_points.append(select_option)
streams_tree.append({
'label': data_stream.streamName,
'value': data_stream.streamID,
'children': data_points
})
return jsonify(streams_tree) | 34ce7df0ecd241a009b1c8ef44bf33ec64e3d82d | 3,659,219 |
import math
def func2():
"""
:type: None
:rtype: List[float]
"""
return [math.pi, math.pi / 2, math.pi / 4, math.pi / 8] | 62984ba7d8c1efd55569449adbf507e73888a1b7 | 3,659,220 |
def get_playlist_name(pl_id):
"""returns the name of the playlist with the given id"""
sql = """SELECT * FROM playlists WHERE PlaylistId=?"""
cur.execute(sql, (pl_id,))
return cur.fetchone()[1] | 9488eb1c32db8b66f3239dcb454a08b8ea80b8b4 | 3,659,221 |
import random
def weight(collection):
"""Choose an element from a dict based on its weight and return its key.
Parameters:
- collection (dict): dict of elements with weights as values.
Returns:
string: key of the chosen element.
"""
# 1. Get sum of weights
weight_sum = sum([value for value in collection.values()])
# 2. Generate random number between 1 and sum of weights
random_value = random.randint(1, weight_sum)
# 3. Iterate through items
for key, value in collection.items():
# 4. Subtract weight of each item from random number
random_value -= value
# 5. Compare with 0, if <= 0, that item has been chosen
if random_value <= 0:
return key
# 6. Else continue subtracting
# Should not reach here.
raise ValueError("Invalid argument value.") | 383ddadd4a47fb9ac7be0292ecc079fcc59c4481 | 3,659,222 |
def knnsearch(y, x, k) :
""" Finds k closest points in y to each point in x.
Parameters
----------
x : (n,3) float array
A point cloud.
y : (m,3) float array
Another point cloud.
k : int
Number of nearest neighbors one wishes to compute.
Returns
-------
ordered_neighbors : (n,k) int array
List of k nearest neighbors to each point in x.
dist : (n,k) flaot array
List of distances between each nearest neighbor and the corresponding point in x.
"""
x, y = map(np.asarray, (x, y))
tree =spatial.cKDTree(y)
ordered_neighbors = tree.query(x, k)[1] #sz x, k
ID = np.transpose(np.matlib.repmat(np.arange(np.shape(x)[0]), k,1))
dist = np.sum((x[ID,:]-y[ordered_neighbors,:])**2,axis=2)**.5
return ordered_neighbors, dist | 84cc1bf0f960e1fb44dd44ab95eccce1c424ec05 | 3,659,223 |
def segmentation_gaussian_measurement(
y_true,
y_pred,
gaussian_sigma=3,
measurement=keras.losses.binary_crossentropy):
""" Apply metric or loss measurement incorporating a 2D gaussian.
Only works with batch size 1.
Loop and call this function repeatedly over each sample
to use a larger batch size.
# Arguments
y_true: is assumed to be [label, x_img_coord, y_image_coord]
y_pred: is expected to be a 2D array of labels
with shape [1, img_height, img_width, 1].
"""
with K.name_scope(name='grasp_segmentation_gaussian_loss') as scope:
if keras.backend.ndim(y_true) == 4:
# sometimes the dimensions are expanded from 2 to 4
# to meet Keras' expectations.
# In that case reduce them back to 2
y_true = K.squeeze(y_true, axis=-1)
y_true = K.squeeze(y_true, axis=-1)
print('y_pred: ', y_pred)
print('y_true: ', y_true)
# y_true should have shape [batch_size, 3] here,
# label, y_height_coordinate, x_width_coordinate become shape:
# [batch_size, 1]
label = K.expand_dims(y_true[:, 0])
print('label: ', label)
y_height_coordinate = K.expand_dims(y_true[:, 1])
x_width_coordinate = K.expand_dims(y_true[:, 2])
# label = K.reshape(label, [1, 1])
print('label: ', label)
image_shape = tf.Tensor.get_shape(y_pred)
y_true_img = tile_vector_as_image_channels(label, image_shape)
y_true_img = K.cast(y_true_img, 'float32')
loss_img = measurement(y_true_img, y_pred)
y_pred_shape = K.int_shape(y_pred)
if len(y_pred_shape) == 3:
y_pred_shape = y_pred_shape[:-1]
if len(y_pred_shape) == 4:
y_pred_shape = y_pred_shape[1:3]
def batch_gaussian(one_y_true):
# def batch_gaussian(y_height_coord, x_width_coord):
# weights = gaussian_kernel_2D(size=y_pred_shape, center=(y_height_coord, x_width_coord), sigma=gaussian_sigma)
# weights = gaussian_kernel_2D(size=y_pred_shape, center=(y_height_coordinate, x_width_coordinate), sigma=gaussian_sigma)
return gaussian_kernel_2D(size=y_pred_shape, center=(one_y_true[0], one_y_true[1]), sigma=gaussian_sigma)
weights = K.map_fn(batch_gaussian, y_true)
loss_img = K.flatten(loss_img)
weights = K.flatten(weights)
weighted_loss_img = tf.multiply(loss_img, weights)
loss_sum = K.sum(weighted_loss_img)
loss_sum = K.reshape(loss_sum, [1, 1])
return loss_sum | 377f2fa7706c166756efdb3047937b8db2047674 | 3,659,224 |
import h5py
import numpy as np
import sys
def load_AACHEN_PARAMS(AHCHEN_h5_file, log_file_indicator):
"""
This module extract parameters trainded with the framework https://github.com/rwth-i6/returnn
and the neural network proposed in the mdlsmt demo structure.
Args:
AHCHEN_h5_file: file in format hdf5 generated with https://github.com/rwth-i6/returnn framework.
log_file_indicator: _io.TextIOWrapper of the file where logs will be written
Returns:
The parameters of each layer of the network.
"""
try:
print('Loading AACHEN params from h5 file: ' + str(AHCHEN_h5_file))
log_file_indicator.write('\nLoading AACHEN params from h5 file: ' + str(AHCHEN_h5_file) + '.\n')
fh5 = h5py.File(AHCHEN_h5_file, 'r')
except OSError:
print('File not found: ' + str(AHCHEN_h5_file))
log_file_indicator.write('\n Exception, file not found: ' + str(AHCHEN_h5_file) + '\n')
print('Closing')
log_file_indicator.close()
sys.exit(1)
else:
w_conv0 = fh5['conv0']['W_conv0'][:]
w_conv0 = w_conv0.transpose([2, 3, 1, 0]) # AACHEN shape (n_out, n_in, filter_size), tensorflow shape (filter_size, n_in, n_out)
b_conv0 = fh5['conv0']['b_conv0'][:]
w_conv1 = fh5['conv1']['W_conv1'][:]
w_conv1 = w_conv1.transpose([2, 3, 1, 0]) # AACHEN shape (n_out, n_in, filter_size), tensorflow shape (filter_size, n_in, n_out)
b_conv1 = fh5['conv1']['b_conv1'][:]
w_conv2 = fh5['conv2']['W_conv2'][:]
w_conv2 = w_conv2.transpose([2, 3, 1, 0]) # AACHEN shape (n_out, n_in, filter_size), tensorflow shape (filter_size, n_in, n_out)
b_conv2 = fh5['conv2']['b_conv2'][:]
w_conv3 = fh5['conv3']['W_conv3'][:]
w_conv3 = w_conv3.transpose([2, 3, 1, 0]) # AACHEN shape (n_out, n_in, filter_size), tensorflow shape (filter_size, n_in, n_out)
b_conv3 = fh5['conv3']['b_conv3'][:]
w_conv4 = fh5['conv4']['W_conv4'][:]
w_conv4 = w_conv4.transpose([2, 3, 1, 0]) # AACHEN shape (n_out, n_in, filter_size), tensorflow shape (filter_size, n_in, n_out)
b_conv4 = fh5['conv4']['b_conv4'][:]
# 1 AACHEN --> dwd-fwd
V_x1_mdlstm0, V_h1_mdlstm0, V_v1_mdlstm0 = fh5['mdlstm0']['W1_mdlstm0'][:], fh5['mdlstm0']['U1_mdlstm0'][:], fh5['mdlstm0']['V1_mdlstm0'][:]
W_df_mdlstm0 = np.concatenate((V_x1_mdlstm0, V_h1_mdlstm0, V_v1_mdlstm0), axis=0)
b_df_mdlstm0 = fh5['mdlstm0']['b1_mdlstm0']
# 2 AACHEN --> uwd-fwd
V_x2_mdlstm0, V_h2_mdlstm0, V_v2_mdlstm0 = fh5['mdlstm0']['W2_mdlstm0'][:], fh5['mdlstm0']['U2_mdlstm0'][:], fh5['mdlstm0']['V2_mdlstm0'][:]
W_uf_mdlstm0 = np.concatenate((V_x2_mdlstm0, V_h2_mdlstm0, V_v2_mdlstm0), axis=0)
b_uf_mdlstm0 = fh5['mdlstm0']['b2_mdlstm0']
# 2 AACHEN --> dwd-bwd
V_x3_mdlstm0, V_h3_mdlstm0, V_v3_mdlstm0 = fh5['mdlstm0']['W3_mdlstm0'][:], fh5['mdlstm0']['U3_mdlstm0'][:], fh5['mdlstm0']['V3_mdlstm0'][:]
W_db_mdlstm0 = np.concatenate((V_x3_mdlstm0, V_h3_mdlstm0, V_v3_mdlstm0), axis=0)
b_db_mdlstm0 = fh5['mdlstm0']['b3_mdlstm0']
# 2 AACHEN --> uwd-bwd
V_x4_mdlstm0, V_h4_mdlstm0, V_v4_mdlstm0 = fh5['mdlstm0']['W4_mdlstm0'][:], fh5['mdlstm0']['U4_mdlstm0'][:], fh5['mdlstm0']['V4_mdlstm0'][:]
W_ub_mdlstm0 = np.concatenate((V_x4_mdlstm0, V_h4_mdlstm0, V_v4_mdlstm0), axis=0)
b_ub_mdlstm0 = fh5['mdlstm0']['b4_mdlstm0']
# 1 AACHEN --> dwd-fwd
V_x1_mdlstm1, V_h1_mdlstm1, V_v1_mdlstm1 = fh5['mdlstm1']['W1_mdlstm1'][:], fh5['mdlstm1']['U1_mdlstm1'][:], fh5['mdlstm1']['V1_mdlstm1'][:]
W_df_mdlstm1 = np.concatenate((V_x1_mdlstm1, V_h1_mdlstm1, V_v1_mdlstm1), axis=0)
b_df_mdlstm1 = fh5['mdlstm1']['b1_mdlstm1']
# 2 AACHEN --> uwd-fwd
V_x2_mdlstm1, V_h2_mdlstm1, V_v2_mdlstm1 = fh5['mdlstm1']['W2_mdlstm1'][:], fh5['mdlstm1']['U2_mdlstm1'][:], fh5['mdlstm1']['V2_mdlstm1'][:]
W_uf_mdlstm1 = np.concatenate((V_x2_mdlstm1, V_h2_mdlstm1, V_v2_mdlstm1), axis=0)
b_uf_mdlstm1 = fh5['mdlstm1']['b2_mdlstm1']
# 2 AACHEN --> dwd-bwd
V_x3_mdlstm1, V_h3_mdlstm1, V_v3_mdlstm1 = fh5['mdlstm1']['W3_mdlstm1'][:], fh5['mdlstm1']['U3_mdlstm1'][:], fh5['mdlstm1']['V3_mdlstm1'][:]
W_db_mdlstm1 = np.concatenate((V_x3_mdlstm1, V_h3_mdlstm1, V_v3_mdlstm1), axis=0)
b_db_mdlstm1 = fh5['mdlstm1']['b3_mdlstm1']
# 2 AACHEN --> uwd-bwd
V_x4_mdlstm1, V_h4_mdlstm1, V_v4_mdlstm1 = fh5['mdlstm1']['W4_mdlstm1'][:], fh5['mdlstm1']['U4_mdlstm1'][:], fh5['mdlstm1']['V4_mdlstm1'][:]
W_ub_mdlstm1 = np.concatenate((V_x4_mdlstm1, V_h4_mdlstm1, V_v4_mdlstm1), axis=0)
b_ub_mdlstm1 = fh5['mdlstm1']['b4_mdlstm1']
# 1 AACHEN --> dwd-fwd
V_x1_mdlstm2, V_h1_mdlstm2, V_v1_mdlstm2 = fh5['mdlstm2']['W1_mdlstm2'][:], fh5['mdlstm2']['U1_mdlstm2'][:], fh5['mdlstm2']['V1_mdlstm2'][:]
W_df_mdlstm2 = np.concatenate((V_x1_mdlstm2, V_h1_mdlstm2, V_v1_mdlstm2), axis=0)
b_df_mdlstm2 = fh5['mdlstm2']['b1_mdlstm2']
# 2 AACHEN --> uwd-fwd
V_x2_mdlstm2, V_h2_mdlstm2, V_v2_mdlstm2 = fh5['mdlstm2']['W2_mdlstm2'][:], fh5['mdlstm2']['U2_mdlstm2'][:], fh5['mdlstm2']['V2_mdlstm2'][:]
W_uf_mdlstm2 = np.concatenate((V_x2_mdlstm2, V_h2_mdlstm2, V_v2_mdlstm2), axis=0)
b_uf_mdlstm2 = fh5['mdlstm2']['b2_mdlstm2']
# 2 AACHEN --> dwd-bwd
V_x3_mdlstm2, V_h3_mdlstm2, V_v3_mdlstm2 = fh5['mdlstm2']['W3_mdlstm2'][:], fh5['mdlstm2']['U3_mdlstm2'][:], fh5['mdlstm2']['V3_mdlstm2'][:]
W_db_mdlstm2 = np.concatenate((V_x3_mdlstm2, V_h3_mdlstm2, V_v3_mdlstm2), axis=0)
b_db_mdlstm2 = fh5['mdlstm2']['b3_mdlstm2']
# 2 AACHEN --> uwd-bwd
V_x4_mdlstm2, V_h4_mdlstm2, V_v4_mdlstm2 = fh5['mdlstm2']['W4_mdlstm2'][:], fh5['mdlstm2']['U4_mdlstm2'][:], fh5['mdlstm2']['V4_mdlstm2'][:]
W_ub_mdlstm2 = np.concatenate((V_x4_mdlstm2, V_h4_mdlstm2, V_v4_mdlstm2), axis=0)
b_ub_mdlstm2 = fh5['mdlstm2']['b4_mdlstm2']
# 1 AACHEN --> dwd-fwd
V_x1_mdlstm3, V_h1_mdlstm3, V_v1_mdlstm3 = fh5['mdlstm3']['W1_mdlstm3'][:], fh5['mdlstm3']['U1_mdlstm3'][:], fh5['mdlstm3']['V1_mdlstm3'][:]
W_df_mdlstm3 = np.concatenate((V_x1_mdlstm3, V_h1_mdlstm3, V_v1_mdlstm3), axis=0)
b_df_mdlstm3 = fh5['mdlstm3']['b1_mdlstm3']
# 2 AACHEN --> uwd-fwd
V_x2_mdlstm3, V_h2_mdlstm3, V_v2_mdlstm3 = fh5['mdlstm3']['W2_mdlstm3'][:], fh5['mdlstm3']['U2_mdlstm3'][:], fh5['mdlstm3']['V2_mdlstm3'][:]
W_uf_mdlstm3 = np.concatenate((V_x2_mdlstm3, V_h2_mdlstm3, V_v2_mdlstm3), axis=0)
b_uf_mdlstm3 = fh5['mdlstm3']['b2_mdlstm3']
# 2 AACHEN --> dwd-bwd
V_x3_mdlstm3, V_h3_mdlstm3, V_v3_mdlstm3 = fh5['mdlstm3']['W3_mdlstm3'][:], fh5['mdlstm3']['U3_mdlstm3'][:], fh5['mdlstm3']['V3_mdlstm3'][:]
W_db_mdlstm3 = np.concatenate((V_x3_mdlstm3, V_h3_mdlstm3, V_v3_mdlstm3), axis=0)
b_db_mdlstm3 = fh5['mdlstm3']['b3_mdlstm3']
# 2 AACHEN --> uwd-bwd
V_x4_mdlstm3, V_h4_mdlstm3, V_v4_mdlstm3 = fh5['mdlstm3']['W4_mdlstm3'][:], fh5['mdlstm3']['U4_mdlstm3'][:], fh5['mdlstm3']['V4_mdlstm3'][:]
W_ub_mdlstm3 = np.concatenate((V_x4_mdlstm3, V_h4_mdlstm3, V_v4_mdlstm3), axis=0)
b_ub_mdlstm3 = fh5['mdlstm3']['b4_mdlstm3']
# 1 AACHEN --> dwd-fwd
V_x1_mdlstm4, V_h1_mdlstm4, V_v1_mdlstm4 = fh5['mdlstm4']['W1_mdlstm4'][:], fh5['mdlstm4']['U1_mdlstm4'][:], fh5['mdlstm4']['V1_mdlstm4'][:]
W_df_mdlstm4 = np.concatenate((V_x1_mdlstm4, V_h1_mdlstm4, V_v1_mdlstm4), axis=0)
b_df_mdlstm4 = fh5['mdlstm4']['b1_mdlstm4']
# 2 AACHEN --> uwd-fwd
V_x2_mdlstm4, V_h2_mdlstm4, V_v2_mdlstm4 = fh5['mdlstm4']['W2_mdlstm4'][:], fh5['mdlstm4']['U2_mdlstm4'][:], fh5['mdlstm4']['V2_mdlstm4'][:]
W_uf_mdlstm4 = np.concatenate((V_x2_mdlstm4, V_h2_mdlstm4, V_v2_mdlstm4), axis=0)
b_uf_mdlstm4 = fh5['mdlstm4']['b2_mdlstm4']
# 2 AACHEN --> dwd-bwd
V_x3_mdlstm4, V_h3_mdlstm4, V_v3_mdlstm4 = fh5['mdlstm4']['W3_mdlstm4'][:], fh5['mdlstm4']['U3_mdlstm4'][:], fh5['mdlstm4']['V3_mdlstm4'][:]
W_db_mdlstm4 = np.concatenate((V_x3_mdlstm4, V_h3_mdlstm4, V_v3_mdlstm4), axis=0)
b_db_mdlstm4 = fh5['mdlstm4']['b3_mdlstm4']
# 2 AACHEN --> uwd-bwd
V_x4_mdlstm4, V_h4_mdlstm4, V_v4_mdlstm4 = fh5['mdlstm4']['W4_mdlstm4'][:], fh5['mdlstm4']['U4_mdlstm4'][:], fh5['mdlstm4']['V4_mdlstm4'][:]
W_ub_mdlstm4 = np.concatenate((V_x4_mdlstm4, V_h4_mdlstm4, V_v4_mdlstm4), axis=0)
b_ub_mdlstm4 = fh5['mdlstm4']['b4_mdlstm4']
W_dense = fh5['output']['W_in_mdlstm4_output']
b_dense = fh5['output']['b_output']
return [w_conv0, b_conv0,
w_conv1, b_conv1,
w_conv2, b_conv2,
w_conv3, b_conv3,
w_conv4, b_conv4,
W_df_mdlstm0, b_df_mdlstm0, W_uf_mdlstm0, b_uf_mdlstm0, W_db_mdlstm0, b_db_mdlstm0, W_ub_mdlstm0, b_ub_mdlstm0,
W_df_mdlstm1, b_df_mdlstm1, W_uf_mdlstm1, b_uf_mdlstm1, W_db_mdlstm1, b_db_mdlstm1, W_ub_mdlstm1, b_ub_mdlstm1,
W_df_mdlstm2, b_df_mdlstm2, W_uf_mdlstm2, b_uf_mdlstm2, W_db_mdlstm2, b_db_mdlstm2, W_ub_mdlstm2, b_ub_mdlstm2,
W_df_mdlstm3, b_df_mdlstm3, W_uf_mdlstm3, b_uf_mdlstm3, W_db_mdlstm3, b_db_mdlstm3, W_ub_mdlstm3, b_ub_mdlstm3,
W_df_mdlstm4, b_df_mdlstm4, W_uf_mdlstm4, b_uf_mdlstm4, W_db_mdlstm4, b_db_mdlstm4, W_ub_mdlstm4, b_ub_mdlstm4,
W_dense, b_dense] | 3c99757a1ff7f7351729d447b54d495241df46b7 | 3,659,225 |
import pprint
def prep_doc_id_context( doc_id: str, usr_first_name: str, usr_is_authenticated: bool ) -> dict:
""" Preps context for record_edit.html template when a doc_id (meaning a `Citation` id) is included.
Called by views.edit_record() """
log.debug( 'starting prep_doc_id_context()' )
log.debug( f'doc_id, ``{doc_id}``' )
context = { 'user_first_name': usr_first_name, 'user_is_authenticated': usr_is_authenticated }
session = make_session()
common_data: dict = prepare_common_data( session )
context.update( common_data ) # merges common_data key-vals into context
doc = session.query( models_alch.Citation ).get( doc_id )
log.debug( f'doc, ``{pprint.pformat(doc.__dict__)}``' )
context['rec_id'] = None
try:
context['doc_display'] = doc.display
except:
log.exception( 'doc.display not available; traceback follows; processing will continue' )
context['doc_display'] = 'display not available'
context['doc_id'] = doc.id
log.debug( f'context (first 1000 characters), ``{pprint.pformat(context)[0:1000]}``' )
return context | f5d61134aeb8e45d7f21205b00087a11788ac028 | 3,659,226 |
import json
def request_pull(repo, requestid, username=None, namespace=None):
"""View a pull request with the changes from the fork into the project."""
repo = flask.g.repo
_log.info("Viewing pull Request #%s repo: %s", requestid, repo.fullname)
if not repo.settings.get("pull_requests", True):
flask.abort(404, description="No pull-requests found for this project")
request = pagure.lib.query.search_pull_requests(
flask.g.session, project_id=repo.id, requestid=requestid
)
if not request:
flask.abort(404, description="Pull-request not found")
if request.remote:
repopath = pagure.utils.get_remote_repo_path(
request.remote_git, request.branch_from
)
parentpath = pagure.utils.get_repo_path(request.project)
else:
repo_from = request.project_from
parentpath = pagure.utils.get_repo_path(request.project)
repopath = parentpath
if repo_from:
repopath = pagure.utils.get_repo_path(repo_from)
repo_obj = pygit2.Repository(repopath)
orig_repo = pygit2.Repository(parentpath)
diff_commits = []
diff = None
# Closed pull-request
if request.status != "Open":
commitid = request.commit_stop
try:
for commit in repo_obj.walk(commitid, pygit2.GIT_SORT_NONE):
diff_commits.append(commit)
if commit.oid.hex == request.commit_start:
break
except KeyError:
# This happens when repo.walk() cannot find commitid
pass
if diff_commits:
# Ensure the first commit in the PR as a parent, otherwise
# point to it
start = diff_commits[-1].oid.hex
if diff_commits[-1].parents:
start = diff_commits[-1].parents[0].oid.hex
# If the start and the end commits are the same, it means we are,
# dealing with one commit that has no parent, so just diff that
# one commit
if start == diff_commits[0].oid.hex:
diff = diff_commits[0].tree.diff_to_tree(swap=True)
else:
diff = repo_obj.diff(
repo_obj.revparse_single(start),
repo_obj.revparse_single(diff_commits[0].oid.hex),
)
else:
try:
diff_commits, diff = pagure.lib.git.diff_pull_request(
flask.g.session, request, repo_obj, orig_repo
)
except pagure.exceptions.PagureException as err:
flask.flash("%s" % err, "error")
except SQLAlchemyError as err: # pragma: no cover
flask.g.session.rollback()
_log.exception(err)
flask.flash(
"Could not update this pull-request in the database", "error"
)
if diff:
diff.find_similar()
form = pagure.forms.MergePRForm()
trigger_ci_pr_form = pagure.forms.TriggerCIPRForm()
# we need to leave out all members of trigger_ci_conf that have
# "meta" set to False or meta["requires_project_hook_attr"] condition
# defined and it's not met
trigger_ci_conf = pagure_config["TRIGGER_CI"]
if not isinstance(trigger_ci_conf, dict):
trigger_ci_conf = {}
trigger_ci = {}
# make sure all the backrefs are set properly on repo
pagure.lib.plugins.get_enabled_plugins(repo)
for comment, meta in trigger_ci_conf.items():
if not meta:
continue
cond = meta.get("requires_project_hook_attr", ())
if cond and not pagure.utils.project_has_hook_attr_value(repo, *cond):
continue
trigger_ci[comment] = meta
committer = False
if request.project_from:
committer = pagure.utils.is_repo_committer(request.project_from)
else:
committer = pagure.utils.is_repo_committer(request.project)
can_rebase_branch = not request.remote_git and committer
can_delete_branch = (
pagure_config.get("ALLOW_DELETE_BRANCH", True) and can_rebase_branch
)
return flask.render_template(
"repo_pull_request.html",
select="requests",
requestid=requestid,
repo=repo,
username=username,
repo_obj=repo_obj,
pull_request=request,
diff_commits=diff_commits,
diff=diff,
mergeform=form,
subscribers=pagure.lib.query.get_watch_list(flask.g.session, request),
tag_list=pagure.lib.query.get_tags_of_project(flask.g.session, repo),
can_rebase_branch=can_rebase_branch,
can_delete_branch=can_delete_branch,
trigger_ci=trigger_ci,
trigger_ci_pr_form=trigger_ci_pr_form,
flag_statuses_labels=json.dumps(pagure_config["FLAG_STATUSES_LABELS"]),
) | 7b83c83a236ed840ed5b2eefbf87859d5e120aac | 3,659,227 |
from typing import List
def make_matrix(points: List[float], degree: int) -> List[List[float]]:
"""Return a nested list representation of a matrix consisting of the basis
elements of the polynomial of degree n, evaluated at each of the points.
In other words, each row consists of 1, x, x^2, ..., x^n, where n is the degree,
and x is a value in points.
Preconditions:
- degree < len(points)
>>> make_matrix([1, 2, 3], 2)
[[1, 1, 1], [1, 2, 4], [1, 3, 9]]
"""
matrix = []
for point in points:
row = [point ** index for index in range(degree + 1)]
matrix.append(row)
return matrix | d8fbea3a0f9536cb681b001a852b07ac7b17f6c2 | 3,659,228 |
def verify(request, token, template_name='uaccounts/verified.html'):
"""Try to verify email address using given token."""
try:
verification = verify_token(token, VERIFICATION_EXPIRES)
except VerificationError:
return redirect('uaccounts:index')
if verification.email.profile != request.user.profile:
return redirect('uaccounts:index')
verification.email.verified = True
verification.email.save()
verification.delete()
return render(request, template_name) | 56971aca43d04d7909ea6015fd48b6f30fa5b0ab | 3,659,229 |
from typing import Tuple
import threading
from pydantic_factories import ModelFactory
def run_train(params: dict) -> Tuple[threading.Thread, threading.Thread]:
"""Train a network on a data generator.
params -> dictionary.
Required fields:
* model_name
* generator_name
* dataset_dir
* tile_size
* clf_name
* checkpoints_dir
* summaries_dir
Returns prefetch thread & model.fit thread"""
assert 'model_name' in params
assert 'generator_name' in params
Model = ModelFactory.get_model(params['model_name'])
Generator = GeneratorFactory.get_generator(params['generator_name'])
model = Model(**params)
feed = Generator(**params)
pf = PreFetch(feed)
t1 = threading.Thread(target=pf.fetch)
t2 = threading.Thread(target=model.fit, args=(pf,))
t1.start()
t2.start()
return t1,t2 | c3a96996c3d34c18bfeab89b14836d13829d183e | 3,659,230 |
import six
import requests
def request(url, method='GET', headers=None, original_ip=None, debug=False,
logger=None, **kwargs):
"""Perform a http request with standard settings.
A wrapper around requests.request that adds standard headers like
User-Agent and provides optional debug logging of the request.
Arguments that are not handled are passed through to the requests library.
:param string url: The url to make the request of.
:param string method: The http method to use. (eg. 'GET', 'POST')
:param dict headers: Headers to be included in the request. (optional)
:param string original_ip: Mark this request as forwarded for this ip.
(optional)
:param bool debug: Enable debug logging. (Defaults to False)
:param logging.Logger logger: A logger to output to. (optional)
:raises exceptions.ClientException: For connection failure, or to indicate
an error response code.
:returns: The response to the request.
"""
if not headers:
headers = dict()
if not logger:
logger = _logger
headers.setdefault('User-Agent', USER_AGENT)
if original_ip:
headers['Forwarded'] = "for=%s;by=%s" % (original_ip, USER_AGENT)
if debug:
string_parts = ['curl -i']
if method:
string_parts.append(' -X %s' % method)
string_parts.append(' %s' % url)
if headers:
for header in six.iteritems(headers):
string_parts.append(' -H "%s: %s"' % header)
logger.debug("REQ: %s" % "".join(string_parts))
data = kwargs.get('data')
if data:
logger.debug("REQ BODY: %s\n" % data)
try:
resp = requests.request(
method,
url,
headers=headers,
**kwargs)
except requests.ConnectionError:
msg = 'Unable to establish connection to %s' % url
raise exceptions.ClientException(msg)
if debug:
logger.debug("RESP: [%s] %s\nRESP BODY: %s\n",
resp.status_code, resp.headers, resp.text)
if resp.status_code >= 400:
logger.debug("Request returned failure status: %s",
resp.status_code)
raise exceptions.from_response(resp, method, url)
return resp | b0a6bdc0ea4fc3c9abc03fed42ec8428f532ab92 | 3,659,231 |
def adjust_seconds_fr(samples_per_channel_in_frame,fs,seconds_fr,num_frame):
"""
Get the timestamp for the first sample in this frame.
Parameters
----------
samples_per_channel_in_frame : int
number of sample components per channel.
fs : int or float
sampling frequency.
seconds_fr : int or float
seconds for this frame (from frame header)
num_frame : int
frame number (from frame header).
Returns
-------
time_first_frame : float
timestamp [s] corresponding to the first sample of this frame.
"""
seconds_per_frame=samples_per_channel_in_frame/float(fs)
time_first_sample=float(seconds_fr)+num_frame*seconds_per_frame
return(time_first_sample) | a19775db3ebcdbe66b50c30bc531e2980ca10082 | 3,659,232 |
import argparse
def create_parser():
"""
Construct the program options
"""
parser = argparse.ArgumentParser(
prog="xge",
description="Extract, transform and load GDC data onto UCSC Xena",
)
parser.add_argument(
"--version",
action="version",
version="%(prog)s {v}".format(v=__version__),
)
subparsers = parser.add_subparsers(
help="Sub-parsers for xena-gdc-ETL", dest="subcomm"
)
# equal_matrices subparser
equality_parser = subparsers.add_parser(
"xena-eql", help="Test the equality of 2 Xena matrices."
)
equality_parser.add_argument(
"df1", type=str, help='Directory for the first matrix.'
)
equality_parser.add_argument(
"df2", type=str, help='Directory for the second matrix.'
)
# gdc_check_new subparser
gdc_check_new_parser = subparsers.add_parser(
"gdc-check-new",
description="Check GDC's list of updated files and summarize "
"impacted project(s), data_type(s) and "
"analysis.workflow_type(s).",
)
gdc_check_new_parser.add_argument(
'url',
type=str,
metavar='URL',
help='URL for GDC\'s list of updated files. It can be a compressed '
'file with a supported extension, which includes ".gz", ".bz2", '
'".zip", or "xz". New files should be listed under a column named'
' by "New File UUID".',
)
# merge-xena subparser
merge_xena_subparser = subparsers.add_parser(
"merge-xena",
description='Pipeline for merging Xena matrices of the same data'
'type.',
)
merge_xena_subparser.add_argument(
'-f',
'--files',
type=str,
nargs='+',
required=True,
help='A list of paths for Xena matrices files to be merged. All paths '
'in this list support UNIX style pathname pattern expansion with '
'"glob". Files will be read by pandas.read_csv with sep="\t".',
)
merge_xena_subparser.add_argument(
'-t',
'--datatype',
type=str,
required=True,
help='One data type code indication the data type in matrices to be '
'merged. Supported data type codes include: {}'.format(
str(valid_dtype)
),
)
merge_xena_subparser.add_argument(
'-o',
'--outdir',
type=str,
default='.',
help='A directory to put the merged matrix. Defaults to the current '
'working directory of python.',
)
merge_xena_subparser.add_argument(
'-n',
'--name',
type=str,
default=None,
help='Filename for the merged matrix. Defaults to None. If None, the '
'filename will be derived from the cohort name and the data type. '
'Check "-t" and "-c" options for details.',
)
merge_xena_subparser.add_argument(
'-c',
'--cohort',
type=str,
default=None,
help='A cohort name for the merged matrix. Defaults to None. If '
'None, it will be set to a format of "MergedCohort<date>" by default. '
'For example, "MergedCohort{}".'.format(
date.today().strftime('%m%d%Y')
),
)
# Subcommand for full ETL (download, transform, and metadata)
etlparser = subparsers.add_parser(
'etl',
help='Download and transform GDC data into Xena matrix, '
'and generate corresponding metadata.',
epilog='Supported data types are: {}'.format(str(valid_dtype)),
)
etlparser.add_argument(
'-r',
'--root',
type=str,
default='.',
help='Root directory for imported data.',
)
etlparser.add_argument(
'-D',
'--delete',
action='store_true',
help='Deletes raw data upon generation of Xena_matrix.',
)
projects_group = etlparser.add_mutually_exclusive_group()
projects_group.add_argument(
'-p',
'--projects',
type=str,
nargs='+',
help='GDC project ID(s) to be imported; or "all" if all projects on'
'GDC are going to be imported. Defaults to "all".',
default=['all'],
)
projects_group.add_argument(
'-P',
'--not-projects',
type=str,
nargs='+',
help='Import all projects on GDC except projects specified by this'
'option. This option and the "-p" option are mutually exclusive.',
default=[],
)
datatype_group = etlparser.add_mutually_exclusive_group()
datatype_group.add_argument(
'-t',
'--datatype',
type=str,
nargs='+',
help='Data type code(s) to be imported; or "all" if all supported'
'types are going to be imported. Defaults to "all".',
default=['all'],
)
datatype_group.add_argument(
'-T',
'--not-datatype',
type=str,
nargs='+',
help='Import all supported types except projects specified by this'
'option. This option and the "-t" option are mutually exclusive.',
default=[],
)
# Subcommand for making metadata
metaparser = subparsers.add_parser(
'metadata',
help='Generate metadata for a Xena matrix',
epilog='Supported data types are: {}'.format(str(valid_dtype)),
)
metaparser.add_argument(
'-p',
'--project',
type=str,
required=True,
help='The project of the matrix.',
)
metaparser.add_argument(
'-t',
'--datatype',
type=str,
required=True,
help='One data type code for the matrix.',
)
metaparser.add_argument(
'-m', '--matrix', type=str, required=True, help='Path to a Xena matrix'
)
metaparser.add_argument(
'-r',
'--release',
type=float,
required=True,
help='GDC data release number.',
)
return parser | a26669161e8e768edf850cffa3f405f6b3ce8033 | 3,659,233 |
def add_header(unicode_csv_data, new_header):
"""
Given row, return header with iterator
"""
final_iterator = [",".join(new_header)]
for row in unicode_csv_data:
final_iterator.append(row)
return iter(final_iterator) | 1fa50492d786aa28fba6062ac472f1c6470a6311 | 3,659,234 |
def get_most_energetic(particles):
"""Get most energetic particle. If no particle with a non-NaN energy is
found, returns a copy of `NULL_I3PARTICLE`.
Parameters
----------
particles : ndarray of dtyppe I3PARTICLE_T
Returns
-------
most_energetic : shape () ndarray of dtype I3PARTICLE_T
"""
return get_best_filter(
particles=particles, filter_function=true_filter, cmp_function=more_energetic,
) | b43e275183b0c2992cfd28239a7e038965b40ccf | 3,659,235 |
import sys
import re
def parse_file(path):
"""Parses a file for ObjectFiles.
Args:
path: String path to the file.
Returns:
List of ObjectFile objects parsed from the given file.
"""
if sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# Assume Linux has GNU objdump. This has the options:
# -t (list symbols), -C (de-mangle symbol names)
objdump_args = ['objdump', '-t', '-C']
elif sys.platform.startswith('darwin'):
# Assume OSX has LLVM objdump. This has the options:
# -t (list symbols)
objdump_args = ['objdump', '-t']
objdump_args.append(path)
with StreamingProcess(objdump_args) as proc:
# Find the first non-blank line.
first_line = proc.peek()
while not first_line:
try:
proc.next()
first_line = proc.peek()
except StopIteration:
return []
# Is this an archive?
match = re.match(r'^.*[Aa]rchive\s+(.+):$', first_line)
if match:
# In this format we have to skip this descriptive line.
proc.next()
return parse_archive(match.group(1), proc)
# Some objdumps format archives differently.
match = re.match(r'^(.+)\((.+)\):\s+file format', first_line)
if match:
return parse_archive(match.group(1), proc)
# Otherwise maybe it's an object file?
match = re.match(r'^(.+):\s+file format', first_line)
if match:
return [parse_object_file(match.group(1), proc)]
# Otherwise it's not an archive or object file.
return [] | f0e32e9ab8bb624038ac2d277eb0b03838a44151 | 3,659,236 |
def stack(tup, axis=0, out=None):
"""Stacks arrays along a new axis.
Args:
tup (sequence of arrays): Arrays to be stacked.
axis (int): Axis along which the arrays are stacked.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: Stacked array.
.. seealso:: :func:`numpy.stack`
"""
return concatenate([cupy.expand_dims(x, axis) for x in tup], axis, out) | 5f97bed62c77f28415ae82402cbb379372b4708c | 3,659,237 |
def winning_pipeline(mydata,mytestdata,myfinalmodel,feature_selection_done = True,myfeatures =None,numerical_attributes = None):
"""
If feature _selection has not been performed:
Function performs Cross Validation (with scaling within folds) on the data passed through.
Scales the data with RobustScaler() and Imputes the data with IterativeImputer(). Additionally adds clusters for the cities latitude and longitude
Else:
Performs Cross-Validation given the estimator on a subset of the features of mydata which were passed through to myfeatures
Arguments
@myestimator: sklearn estimator
@mydata: training data with missing values and is not scaled)
@myfolds: number of folds for cross validation
@feature_selection_done: Boolean flag indicating if feature_selection has been done to the data in `mydata`
@myfeatures: list of informative features from features
@checknoise: Whether scoring for Cross-Validation should be Explained Variance
"""
# part 1 create location feature for training data using optics clustering
optics_df = mydata[['Latitude','Longitude']].copy()
clust = OPTICS(min_samples=50, xi=.05, min_cluster_size=.05)
clust.fit(optics_df)
#
optics_df['clust_label'] = clust.labels_
#
location_max = np.max(optics_df.clust_label.unique())
#optics labels noisy samples as -1 need to replace for successful onehotencoding
optics_df['clust_label'].replace([-1],location_max+1,inplace=True)
#one hot encoding and combining to mydata
enc = OneHotEncoder(categories='auto')
optics_df_1hot = enc.fit_transform(optics_df[['clust_label']])
location_labels = ['cluster' + str(l) for l in optics_df.clust_label.unique()]
optics_df_1hot = pd.DataFrame(optics_df_1hot.todense(),index = optics_df.index,columns= location_labels )
#part1done cluster columns added
mydata = pd.concat([mydata,optics_df_1hot],axis=1)
#part 2 drop unneccessary columns in our case
mydata_labels = mydata['med_rental_rate'].copy()
mydata = mydata.drop('med_rental_rate',axis =1)
if feature_selection_done:
mydata = mydata.loc[:,myfeatures].copy()
else:
mydata = mydata.drop(['city','Latitude','Longitude','change_hunits','studio_1000_1499', 'studio_1500_more',
'studio_750_999', 'onebed_1000_1499', 'onebed_1500_more',
'onebed_750_999', 'twobed_1000_1499', 'twobed_1500_more',
'twobed_750_999', 'threebed_1000_1499', 'threebed_1500_more',
'threebed_750_999'],axis=1)
imputer = IterativeImputer(max_iter = 10 ,random_state =22,min_value=0)
imputed_dat = imputer.fit_transform(mydata)
#scale only numerical attrbs which are everything but the columns which were appended earlier
imputed_dat = pd.DataFrame(imputed_dat,columns=mydata.columns)
ct = ColumnTransformer(
[('scale1',RobustScaler(),numerical_attributes)],
remainder = 'passthrough')
X_train_prepped = ct.fit_transform(imputed_dat)
#to pickle
processed_training_data = X_train_prepped.copy()
#nowfor the test data
# part 1 create location feature for test data using optics clustering
optics_df = mytestdata[['Latitude','Longitude']].copy()
clust = OPTICS(min_samples=50, xi=.05, min_cluster_size=.05)
clust.fit(optics_df)
#
optics_df['clust_label'] = clust.labels_
#
location_max = np.max(optics_df.clust_label.unique())
#optics labels noisy samples as -1 need to replace for successful onehotencoding
optics_df['clust_label'].replace([-1],location_max+1,inplace=True)
#one hot encoding and combining to mydata
enc = OneHotEncoder(categories='auto')
optics_df_1hot = enc.fit_transform(optics_df[['clust_label']])
location_labels = ['cluster' + str(l) for l in optics_df.clust_label.unique()]
optics_df_1hot = pd.DataFrame(optics_df_1hot.todense(),index = optics_df.index,columns= location_labels )
#part1done cluster columns added
mytestdata = pd.concat([mytestdata,optics_df_1hot],axis=1)
#part 2 drop unneccessary columns in our case
mytest_data_labels = mytestdata['med_rental_rate'].copy()
mytestdata = mytestdata.drop('med_rental_rate',axis =1)
if feature_selection_done:
mytestdata = mytestdata.loc[:,myfeatures].copy()
else:
mydata = mydata.drop(['city','Latitude','Longitude','change_hunits','studio_1000_1499', 'studio_1500_more',
'studio_750_999', 'onebed_1000_1499', 'onebed_1500_more',
'onebed_750_999', 'twobed_1000_1499', 'twobed_1500_more',
'twobed_750_999', 'threebed_1000_1499', 'threebed_1500_more',
'threebed_750_999'],axis=1)
#prepare testdata them
imputed_testdata = imputer.transform(mytestdata)
imputed_testdata = pd.DataFrame(imputed_testdata,columns=mytestdata.columns)
mytestdata_prepared = ct.transform(imputed_testdata)
#to pickle
processed_test_data = mytestdata_prepared.copy()
#make final predictions
myfinalmodel.fit(X_train_prepped,mydata_labels)
final_predictions = myfinalmodel.predict(mytestdata_prepared)
final_mse = mean_squared_error(mytest_data_labels,final_predictions)
final_rmse = np.sqrt(final_mse)
final_expvar = explained_variance_score(mytest_data_labels,final_predictions)
return {'final_rmse':final_rmse,'final_predictions':final_predictions,'final_expvar':final_expvar,'myfinalmodel':myfinalmodel,
'processed_training_data':processed_training_data,'processed_test_data':processed_test_data} | 636d922e405842ea338f774dd45b5ff78158bfdf | 3,659,238 |
def calc_single_d(chi_widths, chis, zs, z_widths, z_SN, use_chi=True):
"""Uses single_m_convergence with index starting at 0 and going along the entire line of sight.
Inputs:
chi_widths -- the width of the comoving distance bins.
chis -- the mean comoving distances of each bin.
zs -- the mean redshift of each bin, for the scale factor.
z_SN -- the reshift of the SN.
use_chi -- boolean that determined whether equal comoving distance or redshift bins are used.
"""
comoving_to_SN = b_comoving(0, z_SN)
chi_SN = comoving_to_SN[-1]
convergence = np.linspace(0, 0, len(chis))
mass = MSOL * 10 ** 15
for i in range(0, len(chis)):
if use_chi:
convergence[i] = single_d_convergence(chi_widths, chis, zs, i, 1, chi_SN)
else:
convergence[i] = single_d_convergence_z(z_widths, chis, zs, i, 1, chi_SN)
return convergence | 6cafe9d8d1910f113fdcd8a3417e127f4f1cf5e6 | 3,659,239 |
def ppo(
client, symbol, timeframe="6m", col="close", fastperiod=12, slowperiod=26, matype=0
):
"""This will return a dataframe of Percentage Price Oscillator for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
col (string): column to use to calculate
fastperiod (int): fast period to calculate across
slowperiod (int): slow period to calculate across
matype (int): moving average type (0-sma)
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
ppo = t.PPO(df[col].values, fastperiod, slowperiod, matype)
return pd.DataFrame({col: df[col].values, "ppo": ppo}) | 0b6c48408b810131370500921a7ab2addbccea8b | 3,659,240 |
import random
def getRandomCoin():
""" returns a randomly generated coin """
coinY = random.randrange(20, int(BASEY * 0.6))
coinX = SCREENWIDTH + 100
return [
{'x': coinX, 'y': coinY},
] | 44a5ea7baddc77f8d1b518c3d1adcccd28935108 | 3,659,241 |
def is_success(msg):
"""
Whether message is success
:param msg:
:return:
"""
return msg['status'] == 'success' | 43ecbf3c7ac8d03ce92ab059e7ec902e51505d0a | 3,659,242 |
from bs4 import BeautifulSoup
def scrape_data(url):
"""
scrapes relevant data from given url
@param {string} url
@return {dict} {
url : link
links : list of external links
title : title of the page
description : sample text
}
"""
http = httplib2.Http()
try:
status, response = http.request(url)
except Exception as e:
return None
# get links
links = []
for link in BeautifulSoup(response, "html.parser", parse_only=SoupStrainer('a')):
if link.has_attr('href'):
links.append(link['href'])
# get description
soup = BeautifulSoup(response, "html.parser")
description = soup.find('meta', attrs={'name':'og:description'}) or soup.find('meta', attrs={'property':'description'}) or soup.find('meta', attrs={'name':'description'})
if description:
description = description.get('content')
# return dictionary
return {
"url" : url,
"links" : links,
"title" : BeautifulSoup(response, "html.parser"),
"description" : description
} | 4ab640aad73506e74e3a899467a90c2ddec34308 | 3,659,243 |
def list_strip_comments(list_item: list, comment_denominator: str = '#') -> list:
"""
Strips all items which are comments from a list.
:param list_item: The list object to be stripped of comments.
:param comment_denominator: The character with which comment lines start with.
:return list: A cleaned list object.
"""
_output = list()
for _item in list_item:
if not _item[0] == comment_denominator:
_output.append(_item)
return _output | e5dd6e0c34a1d91586e12e5c39a3a5413746f731 | 3,659,244 |
def guess_number(name):
"""User defined function which performs the all the operations and prints the result"""
guess_limit = 0
magic_number = randint(1, 20)
while guess_limit < 6: # perform the multiple guess operations and print output
user_guess = get_input("Take a guess: ")
if 0 < user_guess <= 20: # condition that allows the numbers only if in between 1 to 20
guess_limit += 1
if user_guess == magic_number:
print(f"Good job, {name}! You guessed my number in {guess_limit} guesses!")
break
elif user_guess < magic_number:
print("Your Guess is too low")
elif user_guess > magic_number:
print("Your Guess is too high")
else:
print("Try again, Your number must have be in the range of 1 to 20!!")
else:
print(f"The number I was thinking of was {magic_number}")
return 0 | 14c81f8adc18f59c29aa37ecec91808b275524e2 | 3,659,245 |
def writerformat(extension):
"""Returns the writer class associated with the given file extension."""
return writer_map[extension] | a2f981a993ba4be25304c0f41b0e6b51bef68d68 | 3,659,246 |
def index_like(index):
"""
Does index look like a default range index?
"""
return not (isinstance(index, pd.RangeIndex) and
index._start == 0 and
index._stop == len(index) and
index._step == 1 and index.name is None) | 91a8e626547121768ee7708e5c7cdcf8265c3991 | 3,659,247 |
def zplsc_c_absorbtion(t, p, s, freq):
"""
Description:
Calculate Absorption coeff using Temperature, Pressure and Salinity and transducer frequency.
This Code was from the ASL MatLab code LoadAZFP.m
Implemented by:
2017-06-23: Rene Gelinas. Initial code.
:param t:
:param p:
:param s:
:param freq: Frequency in KHz
:return: sea_abs
"""
# Calculate relaxation frequencies
t_k = t + 273.0
f1 = 1320.0*t_k * np.exp(-1700/t_k)
f2 = 1.55e7*t_k * np.exp(-3052/t_k)
# Coefficients for absorption equations
k = 1 + p/10.0
a = 8.95e-8 * (1 + t*(2.29e-2 - 5.08e-4*t))
b = (s/35.0)*4.88e-7*(1+0.0134*t)*(1-0.00103*k + 3.7e-7*(k*k))
c = 4.86e-13*(1+t*((-0.042)+t*(8.53e-4-t*6.23e-6)))*(1+k*(-3.84e-4+k*7.57e-8))
freqk = freq*1000
sea_abs = (a*f1*(freqk**2))/((f1*f1)+(freqk**2))+(b*f2*(freqk**2))/((f2*f2)+(freqk**2))+c*(freqk**2)
return sea_abs | af5a7d1ea0ad4fbfacfd1b7142eaf0a31899cb4c | 3,659,248 |
def estimate_quintic_poly(x, y):
"""Estimate degree 5 polynomial coefficients.
"""
return estimate_poly(x, y, deg=5) | be389d9f09208da14d0b5c9d48d3c6d2e6a86e8d | 3,659,249 |
def add(A, B):
"""
Return the sum of Mats A and B.
>>> A1 = Mat([[1,2,3],[1,2,3]])
>>> A2 = Mat([[1,1,1],[1,1,1]])
>>> B = Mat([[2,3,4],[2,3,4]])
>>> A1 + A2 == B
True
>>> A2 + A1 == B
True
>>> A1 == Mat([[1,2,3],[1,2,3]])
True
>>> zero = Mat([[0,0,0],[0,0,0]])
>>> B + zero == B
True
"""
assert A.size == B.size
return Mat([[Acol + Bcol for index, (Acol, Bcol) in enumerate(zip(Arow, Brow))] for index, (Arow, Brow) in enumerate(zip(A.store, B.store))]) | 5b0054397a76a20194b3a34435074fc901a34f6b | 3,659,250 |
def hindu_lunar_event(l_month, tithi, tee, g_year):
"""Return the list of fixed dates of occurrences of Hindu lunar tithi
prior to sundial time, tee, in Hindu lunar month, l_month,
in Gregorian year, g_year."""
l_year = hindu_lunar_year(
hindu_lunar_from_fixed(gregorian_new_year(g_year)))
date1 = hindu_tithi_occur(l_month, tithi, tee, l_year)
date2 = hindu_tithi_occur(l_month, tithi, tee, l_year + 1)
return list_range([date1, date2],
gregorian_year_range(g_year)) | aca03e1a77ff6906d31a64ab50355642f848f9d9 | 3,659,251 |
async def statuslist(ctx, *, statuses: str):
"""Manually make a changing status with each entry being in the list."""
bot.x = 0
statuses = statuses.replace("\n", bot.split)
status_list = statuses.split(bot.split)
if len(status_list) <= 1:
return await bot.send_embed(ctx, f"You cannot have a list with only {len(status_list)} entry.", negative=True)
bot.statuses = status_list
bot.autostatus = True
await bot.send_embed(ctx, "Changed statuslist.") | 99c43ea464759356977bc35ffcd941655763d783 | 3,659,252 |
def kebab(string):
"""kebab-case"""
return "-".join(string.split()) | 24bc29e066508f6f916013fa056ff54408dcd46d | 3,659,253 |
def getUserID(person):
"""
Gets Humhub User ID using name information
:param person: Name of the person to get the Humhub User ID for
:type person: str.
"""
# search for person string in humhub db
# switch case for only one name (propably lastname) or
# two separate strings (firstname + lastname)
firstname = ''
lastname = ''
if len(person.split()) == 1:
# only lastname
lastname = person
else:
firstname = person.split()[0]
lastname = person.split()[1]
global offlinemode
if offlinemode:
return 8
# search in humhub db
cnx = establishDBConnection(dbconfig)
cursor = cnx.cursor()
query = ''
if firstname == '':
query = ("""SELECT user_id FROM profile WHERE lastname = {}
""").format(lastname)
else:
query = ("""SELECT user_id FROM profile WHERE firstname = {}
AND lastname = {}
""").format(firstname, lastname)
cursor.execute(query)
for user_id in cursor:
userid = user_id
cnx.close()
return userid | 31d40b6dd0aec8f6e8481aeaa3252d71c6935c39 | 3,659,254 |
def parse_atom(s, atom_index=-1, debug=False):
""" Parses an atom in a string s
:param s: The string to parse
:type s: str
:param atom_index: the atom_index counter for continous parsing. Default is -1.
:type atom_index: int
:return: a list of atoms, a list of bonds and an updated atom_index for the next atom
:rtype: list
"""
if len(s) == 0:
raise ValueError("parse_atom: argument 's' cannot have length 0.")
if debug:
print(" Smiles.parse_atom: '{}'".format(s))
Z = 0
if len(s) == 1:
try:
Z = LABEL2Z[s]
except KeyError:
raise IllegalAtomError("The atom '{}' is invalid.".format(s))
else:
# just return the atom
return [Atom(Z, idx=atom_index)], [], atom_index +1
idx_atom_end = -1 # atomic label from 0:idx_atom_end
# find indices for hydrogens + counts
n_hydrogens = 0
idx_hydrogen = s.find("H")
if idx_hydrogen > 0: # ignore atomic hydrogen (or proton)
idx_atom_end = idx_hydrogen
n_hydrogens = 1
idx_hydrogen_count = idx_hydrogen + 1
try:
n_hydrogens = int(s[idx_hydrogen_count])
except IndexError: # ran past the end of string
pass
except ValueError: # hit something other than a number
pass
idx_cat = s.find("+")
idx_ani = s.find("-")
idx_charge = max(idx_cat, idx_ani)
charge = 0
if idx_cat > 0:
charge = 1
elif idx_ani > 0:
charge = -1
if idx_charge > 0:
if idx_hydrogen > 0:
idx_atom_end = min(idx_charge, idx_hydrogen)
else:
idx_atom_end = idx_charge
try:
charge = int(s[idx_charge+1])
except IndexError: # ran past the end of string
pass
except ValueError: # hit another + or -
charge = charge * sum(count_items_exclusive(s, ["+", "-"]))
if idx_atom_end == -1:
idx_atom_end = len(s)
if debug:
print(" n_hydrogens :", n_hydrogens)
print(" n_charge :", charge)
print(" base atom : s[0:{}] = {}".format(idx_atom_end, s[0:idx_atom_end]))
try:
Z = LABEL2Z[s[0:idx_atom_end]]
except KeyError:
raise IllegalAtomError("The atom '{}' is invalid.".format(s[0:idx_atom_end]))
atoms = [Atom(Z, idx=atom_index, fcharge=charge)]
bonds = []
for i in range(n_hydrogens):
atoms.append(Atom(1, idx=atom_index+1+i))
bonds.append(Bond(atom_index, atom_index+1+i))
return atoms, bonds, atom_index+1+n_hydrogens | c269449d3a7d872687b75582264c2c94532016ba | 3,659,255 |
def b58decode(v, length):
""" decode v into a string of len bytes
"""
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = ''
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result | 4757e451106691de3d8805e9f7bdaeb24bd52816 | 3,659,256 |
def get_submission_by_id(request, submission_id):
"""
Returns a list of test results assigned to the submission with the given id
"""
submission = get_object_or_404(Submission, pk=submission_id)
data = submission.tests.all()
serializer = TestResultSerializer(data, many=True)
return Response(serializer.data, status.HTTP_200_OK) | 4504b46a03056cb289bb0b53dc01d58f0c5c986c | 3,659,257 |
import pkg_resources
def get_resource_path(resource_name):
"""Get the resource path.
Args:
resource_name (str): The resource name relative to the project root
directory.
Returns:
str: The true resource path on the system.
"""
package = pkg_resources.Requirement.parse(PACKAGE_NAME)
return pkg_resources.resource_filename(package, resource_name) | 0f95e5f26edc9f351323a93ddc75df920e65375d | 3,659,258 |
def do_cluster(items, mergefun, distfun, distlim):
"""Pairwise nearest merging clusterer.
items -- list of dicts
mergefun -- merge two items
distfun -- distance function
distlim -- stop merging when distance above this limit
"""
def heapitem(d0, dests):
"""Find nearest neighbor for d0 as sortable [distance, nearest, d0]"""
dists = (
Sort0List([distfun(d0, d1), d1, d0])
for d1 in dests if d1 is not d0
)
return min(dists)
heap = [Sort0List([None, None, d]) for d in items]
d0 = d1 = merged = None
while len(heap) > 1:
for item in heap:
# rescan nearest where nearest was merged away, or not yet set
if item[1] in (None, d0, d1):
item[:] = heapitem(item[2], (x[2] for x in heap))
continue
# update others where merged now nearest
if item[2] is not merged:
distance = distfun(item[2], merged)
if item[0] > distance:
item[0:2] = distance, merged
# arrange heap, pop out one end of shortest edge
heapify(heap)
distance, d1, d0 = item = heappop(heap)
# if shortest edge is long enough, unpop and stop
if distance is None or distance > distlim:
heappush(heap, item) # unspill the milk
break
# replace other end with merged destination
merged = mergefun(d0, d1)
for i in range(len(heap)):
if heap[i][2] is d1:
heap[i] = Sort0List([None, None, merged])
break
return [x[2] for x in heap] | afcd32c390c5d9b57eb070d3f923b4abd6f9ac6b | 3,659,259 |
import csv
def read_csv(input_file, quotechar='"'):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f,quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines | 3b789904ae612b9b211a7dac5c49289659c415c5 | 3,659,260 |
def rotate_coordinates(local3d, angles):
"""
Rotate xyz coordinates from given view_angles.
local3d: numpy array. Unit LOCAL xyz vectors
angles: tuple of length 3. Rotation angles around each GLOBAL axis.
"""
cx, cy, cz = np.cos(angles)
sx, sy, sz = np.sin(angles)
mat33_x = np.array([
[1, 0, 0],
[0, cx, sx],
[0, -sx, cx]
], dtype='float')
mat33_y = np.array([
[cy, 0, sy],
[0, 1, 0],
[-sy, 0, cy]
], dtype='float')
mat33_z = np.array([
[cz, sz, 0],
[-sz, cz, 0],
[0, 0, 1]
], dtype='float')
local3d = local3d @ mat33_x @ mat33_y @ mat33_z
return local3d | 3243cc9d82dd08384995f62709d3fabc7b896dce | 3,659,261 |
import torch
def quantize_enumerate(x_real, min, max):
"""
Randomly quantize in a way that preserves probability mass.
We use a piecewise polynomial spline of order 3.
"""
assert min < max
lb = x_real.detach().floor()
# This cubic spline interpolates over the nearest four integers, ensuring
# piecewise quadratic gradients.
s = x_real - lb
ss = s * s
t = 1 - s
tt = t * t
probs = torch.stack([
t * tt,
4 + ss * (3 * s - 6),
4 + tt * (3 * t - 6),
s * ss,
], dim=-1) * (1/6)
logits = safe_log(probs)
q = torch.arange(-1., 3.)
x = lb.unsqueeze(-1) + q
x = torch.max(x, 2 * min - 1 - x)
x = torch.min(x, 2 * max + 1 - x)
return x, logits | d73083d6078c47456aeb64859d8361ad37f7d962 | 3,659,262 |
def counter_format(counter):
"""Pretty print a counter so that it appears as: "2:200,3:100,4:20" """
if not counter:
return "na"
return ",".join("{}:{}".format(*z) for z in sorted(counter.items())) | 992993a590eabb2966eb9de26625077f2597718c | 3,659,263 |
def drot(x, y, c, s):
"""
Apply the Givens rotation {(c,s)} to {x} and {y}
"""
# compute
gsl.blas_drot(x.data, y.data, c, s)
# and return
return x, y | 8554586f2069f04db0116dfee7868d5d0527999a | 3,659,264 |
def _update_dict_within_dict(items, config):
""" recursively update dict within dict, if any """
for key, value in items:
if isinstance(value, dict):
config[key] = _update_dict_within_dict(
value.items(), config.get(key, {})
)
else:
config[key] = value
return config | 75b840b8091568b80f713b2ca7725b1a1f917d3a | 3,659,265 |
def masterProductFieldUpdate(objectId: str):
"""
Submit handler for updating & removing field overrides.
:param objectId: The mongodb master product id.
"""
key = request.form.get("field-key")
value = request.form.get("field-value")
# Clean up and trim tags if being set.
if key == MASTER_PRODUCT_FIELD__TAGS:
tags = value.strip().split(",")
if len(tags):
tags = ", ".join([tag.strip() for tag in set(tags) if tag.strip()])
value = tags
if thk.products.overrideProductField(objectId, key, value):
# If the product is active, mark it for upserting.
product = thk.products.getOneProduct(objectId)
if product and THK_ACTIVE in product and product[THK_ACTIVE]:
thk.products.rebuildActiveProduct(objectId)
flash("Field modified successfully.", "success")
else:
flash("Field could not be modified.", "danger")
return redirect(url_for("products.masterProductEdit", objectId=objectId)) | 3d87cf2de42d5ee0ee9d43116c0bff4181f42da0 | 3,659,266 |
def recalc_Th(Pb, age):
"""Calculates the equivalent amount of ThO_2 that would be required to produce the
measured amount of PbO if there was no UO_2 in the monazite.
INPUTS:
Pb: the concentration of Pb in parts per million
age: the age in million years
"""
return (232. / 208.) * Pb / (np.exp(4.95e-11 * (age * 1e6)) - 1) | 79ba3cc8e9db8adba1d31ec6f9fe3588d3531b97 | 3,659,267 |
def relative_periodic_trajectory_wrap(
reference_point: ParameterVector,
trajectory: ArrayOfParameterVectors,
period: float = 2 * np.pi,
) -> ArrayOfParameterVectors:
"""Function that returns a wrapped 'copy' of a parameter trajectory such that
the distance between the final point of the trajectory and the reference point
is minimal inside the specified period.
The rest of the trajectory is being transformed in the same manner.
NOTE:
It only works as intended if the period is larger than the distance
between the consecutive points in the trajectory.
Args:
reference_point: Reference point for periodic wrapping of the trajectory.
trajectory: Trajectory that is wrapped to a copy of itself such that
the distance between the final point in the trajectory
and the reference point is minimal.
period: Periodicity of each parameter in each point of the trajectory.
Defaults to 2*np.pi.
"""
if not np.all(np.linalg.norm(np.diff(trajectory, axis=0), axis=1) < period):
raise ValueError(
"Distances between consecutive points must be smaller than period."
)
wrapped_trajectory = np.copy(trajectory).astype(float)
wrapped_trajectory[-1] = relative_periodic_wrap(
reference_point, trajectory[-1], period=period
)
for ii in range(2, len(wrapped_trajectory) + 1):
wrapped_trajectory[-ii] = relative_periodic_wrap(
wrapped_trajectory[-ii + 1], trajectory[-ii], period=period
)
return wrapped_trajectory | ee41bdb547367186b82324e3e080b758984b7747 | 3,659,268 |
import warnings
def planToSet(world,robot,target,
edgeCheckResolution=1e-2,
extraConstraints=[],
equalityConstraints=[],
equalityTolerance=1e-3,
ignoreCollisions=[],
movingSubset=None,
**planOptions):
"""
Creates a MotionPlan object that can be called to solve a standard motion
planning problem for a robot in a world. The plan starts from the robot's
current configuration and ends in a target set.
Args:
world (WorldModel): the world in which the robot lives, including
obstacles
robot (RobotModel): the moving robot. The plan starts from
robot.getConfig()
target (function or CSpace): a function f(q) returning a bool which is
True if the configuration q is a goal, OR an instance of a CSpace
subclass where sample() generates a sample in the target set and
feasible(x) tests whether a sample is in the target set.
.. note::
The function should accept vectors of the same dimensionality
as the robot, not the moving subset. Similarly, the CSpace
should have the same dimensionality as the robot.
edgeCheckResolution (float, optional): the resolution at which edges in the path are
checked for feasibility
extraConstraints (list, optional): possible extra constraint functions, each
of which needs to return True if satisfied.
.. note::
Don't put cartesian constraints here! Instead place your function in equalityConstraints.
equalityConstraints (list, optional): a list of IKObjectives or equality
constraints f(x)=0 that must be satisfied during the motion. Equality
constraints may return a float or a list of floats. In the latter case, this
is interpreted as a vector function, in which all entries of the vector must be 0.
equalityTolerance (float, optional): a tolerance to which all the equality constraints
must be satisfied.
ignoreCollisions (list): a list of ignored collisions. Each element may be
a body in the world, or a pair (a,b) where a, b are bodies in the world.
movingSubset (optional): if 'auto', 'all', or None (default), all joints
will be allowed to move. If this is a list, then only these joint
indices will be allowed to move.
planOptions (keywords): keyword options that will be sent to the planner. See
the documentation for MotionPlan.setOptions for more details.
Returns:
MotionPlan: a planner instance that can be called to get a
kinematically-feasible plan. (see :meth:`MotionPlan.planMore` )
The underlying configuration space (a RobotCSpace, ClosedLoopRobotCSpace, or
EmbeddedRobotCSpace) can be retrieved using the "space" attribute of the
resulting MotionPlan object.
"""
q0 = robot.getConfig()
subset = []
if movingSubset == 'auto' or movingSubset == 'all' or movingSubset == None:
subset = list(range(len(q0)))
else:
subset = movingSubset
space = makeSpace(world=world,robot=robot,
edgeCheckResolution=edgeCheckResolution,
extraConstraints=extraConstraints,
equalityConstraints=equalityConstraints,
equalityTolerance=equalityTolerance,
ignoreCollisions=ignoreCollisions,
movingSubset=subset)
if hasattr(space,'lift'): #the planning takes place in a space of lower dimension than #links
plan = EmbeddedMotionPlan(space,q0,**planOptions)
else:
plan = MotionPlan(space,**planOptions)
#convert target to a (test,sample) pair if it's a cspace
if isinstance(target,CSpace):
goal = [(lambda x:target.feasible(x)),(lambda : target.sample())]
else:
if not callable(target):
if not isinstance(target,(tuple,list)) or len(target)!=2 or not callable(target[0]) or not callable(target[1]):
raise TypeError("target must be a predicate function or CSpace object")
goal = target
try:
plan.setEndpoints(q0,goal)
except RuntimeError:
#the start configuration is infeasible, print it out
if space.cspace==None: space.setup()
sfailures = space.cspace.feasibilityFailures(plan.space.project(q0))
warnings.warn("Start configuration fails {}".format(sfailures))
raise
return plan | d03ec2c6c1e00388d1271af1e17a94eda0f50122 | 3,659,269 |
def itkimage_to_json(itkimage, manager=None):
"""Serialize a Python itk.Image object.
Attributes of this dictionary are to be passed to the JavaScript itkimage
constructor.
"""
if itkimage is None:
return None
else:
direction = itkimage.GetDirection()
directionMatrix = direction.GetVnlMatrix()
directionList = []
dimension = itkimage.GetImageDimension()
pixelArr = itk.array_view_from_image(itkimage)
compressor = zstd.ZstdCompressor(level=3)
compressed = compressor.compress(pixelArr.data)
pixelArrCompressed = memoryview(compressed)
for col in range(dimension):
for row in range(dimension):
directionList.append(directionMatrix.get(row, col))
componentType, pixelType = _image_to_type(itkimage)
imageType = dict(
dimension=dimension,
componentType=componentType,
pixelType=pixelType,
components=itkimage.GetNumberOfComponentsPerPixel()
)
return dict(
imageType=imageType,
origin=tuple(itkimage.GetOrigin()),
spacing=tuple(itkimage.GetSpacing()),
size=tuple(itkimage.GetBufferedRegion().GetSize()),
direction={'data': directionList,
'rows': dimension,
'columns': dimension},
compressedData=pixelArrCompressed
) | e55f2da9792e4772de4b145375d1eec1e6ee6e06 | 3,659,270 |
import threading
import os
import time
def test_two_agents(tmp_path, empty_ensemble):
"""
:tmp_path: https://docs.pytest.org/en/stable/tmpdir.html
"""
@fdb.transactional
def get_started(tr):
return joshua_model._get_snap_counter(tr, ensemble_id, "started")
assert len(joshua_model.list_active_ensembles()) == 0
ensemble_id = joshua_model.create_ensemble(
"joshua", {"max_runs": 1, "timeout": 1}, open(empty_ensemble, "rb")
)
agents = []
for rank in range(2):
agent = threading.Thread(
target=joshua_agent.agent,
args=(),
kwargs={
"work_dir": os.path.join(tmp_path, str(rank)),
"agent_idle_timeout": 1,
},
)
agent.setDaemon(True)
agent.start()
agents.append(agent)
# before starting agent two, wait until agent one has started on this ensemble
while get_started(joshua_model.db) != 1:
time.sleep(0.001)
joshua.tail_ensemble(ensemble_id, username="joshua")
@fdb.transactional
def get_started(tr):
return joshua_model._get_snap_counter(tr, ensemble_id, "started")
# The second agent won't have started this ensemble (unless somehow > 10
# seconds passed without the first agent completing the ensemble)
assert get_started(joshua_model.db) == 1
for agent in agents:
agent.join() | 19e71ad5ddf0ebb351733cabfab493d6d694ce51 | 3,659,271 |
def project(pnt, norm):
"""Projects a point following a norm."""
t = -np.sum(pnt*norm)/np.sum(norm*norm)
ret = pnt+norm*t
return ret/np.linalg.norm(ret) | 865b658862ebc47eccc117f0daebc8afcc99a2ac | 3,659,272 |
def fix_trajectory(traj):
"""Remove duplicate waypoints that are introduced during smoothing.
"""
cspec = openravepy.ConfigurationSpecification()
cspec.AddDeltaTimeGroup()
iwaypoint = 1
num_removed = 0
while iwaypoint < traj.GetNumWaypoints():
waypoint = traj.GetWaypoint(iwaypoint, cspec)
delta_time = cspec.ExtractDeltaTime(waypoint)
if delta_time == 0.0:
traj.Remove(iwaypoint, iwaypoint + 1)
num_removed += 1
else:
iwaypoint += 1
return num_removed | 30e3925c518dd4aff0f38ef7a02aaa9f7ab3680a | 3,659,273 |
def calculate_edt(im, outpath=''):
"""Calculate distance from mask."""
mask = im.ds[:].astype('bool')
abs_es = np.absolute(im.elsize)
dt = distance_transform_edt(~mask, sampling=abs_es)
# mask = im.ds[:].astype('uint32')
# dt = edt.edt(mask, anisotropy=im.elsize, black_border=True, order='F', parallel=1)
# TODO?: leverage parallel
mo = write_output(outpath, dt, im.get_props())
return mo, mask | 469dde8ff81a782125aa29706a82a1da15db965b | 3,659,274 |
def select_report_data(conn):
""" select report data to DB """
cur = conn.cursor()
cur.execute("SELECT * FROM report_analyze")
report = cur.fetchall()
cur.close()
return report | 9d0bf6d4f6758c873bd6643673784239f9bf4557 | 3,659,275 |
import numpy
def func_lorentz_by_h_pv(z, h_pv, flag_z: bool = False, flag_h_pv: bool = False):
"""Gauss function as function of h_pv
"""
inv_h_pv = 1./h_pv
inv_h_pv_sq = numpy.square(inv_h_pv)
z_deg = z * 180./numpy.pi
c_a = 2./numpy.pi
a_l = c_a * inv_h_pv
b_l = 4.*inv_h_pv_sq
z_deg_sq = numpy.square(z_deg)
res = numpy.expand_dims(a_l, axis=-1) /(1+ numpy.expand_dims(b_l, axis=-1) * z_deg_sq)
dder = {}
if flag_z:
dder["z"] = -2.*z_deg*numpy.expand_dims(b_l,axis=-1)*res/(1.+numpy.expand_dims(b_l, axis=-1)*z_deg_sq) * 180./numpy.pi
if flag_h_pv:
dder["h_pv"] = (c_a * (numpy.expand_dims(h_pv, axis=-1) + 4*z_deg_sq) - \
c_a * numpy.expand_dims(h_pv, axis=-1))/numpy.square(numpy.expand_dims(h_pv, axis=-1) + 4*z_deg_sq)
return res, dder | 802029e167439471e892fbfbfe4d6fdce8cb1a0e | 3,659,276 |
from typing import Tuple
from typing import Optional
from datetime import datetime
import os
def backup_postgres_db() -> Tuple[Optional[str], bytes]:
"""Backup postgres db to a file."""
try:
time_str = datetime.now().strftime("%d-%m-%YT%H:%M:%S")
filename = f"backup_restore/backups/{time_str}-{settings.POSTGRES_CUSTOM_DB}.dump"
backup_name = f"{time_str}-{settings.POSTGRES_CUSTOM_DB}.dump.gz"
# create the dump with pg dump and terminal command
pass
# check if command worked
pass
# compress the dump
compress_file(filename)
# upload the zipped dump
error = upload_backup(backup_name)
if error is not None:
return error, bytes()
# remove created files
# with temporary file it would be the cleaner way
try:
for file in listdir("backup_restore/backups"):
if file != "__init__.py":
os.remove(f"backup_restore/backups/{file}")
logger.info("Removed backup files")
except Exception:
logger.error("Could not remove backup files")
return None, bytes()
except Exception as e:
logger.error(e)
return "Backup failed", bytes() | b541371f83976d42f8f4c4a55752ac67b792346e | 3,659,277 |
def get_profile(aid):
"""
get profile image of author with the aid
"""
if 'logged_in' in session and aid ==session['logged_id']:
try:
re_aid = request.args.get("aid")
re = aController.getAuthorByAid(re_aid)
if re != None:
return re
return redirect(url_for('/'))
except KeyError:
return redirect(url_for('/'))
return redirect(url_for('/')) | daa759c1493a15d6a2e300a6ab552aae30f59706 | 3,659,278 |
def SystemSettings_GetMetric(*args, **kwargs):
"""SystemSettings_GetMetric(int index, Window win=None) -> int"""
return _misc_.SystemSettings_GetMetric(*args, **kwargs) | d9d6d00e6cf54f8e2ed8a06c616b17d6b2905526 | 3,659,279 |
from pathlib import Path
def all_files(dir, pattern):
"""Recursively finds every file in 'dir' whose name matches 'pattern'."""
return [f.as_posix() for f in [x for x in Path(dir).rglob(pattern)]] | 45f12cda2e16cb745d99d2c8dfb454b32130e1c8 | 3,659,280 |
def get_identity_groups(ctx):
"""Load identity groups definitions."""
return render_template('identity-groups', ctx) | 820eb3ebf8d141f37a93485e4428e1cd79da6a44 | 3,659,281 |
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.models import AnonymousUser
from ..django_legacy.django2_0.utils.deprecation import CallableFalse, CallableTrue
def fix_behaviour_contrib_auth_user_is_anonymous_is_authenticated_callability(utils):
"""
Make user.is_anonymous and user.is_authenticated behave both as properties and methods,
by preserving their callability like in earlier Django version.
"""
utils.skip_if_app_not_installed("django.contrib.contenttypes") # BEFORE IMPORTS!
@property
def is_anonymous_for_AbstractBaseUser(self):
"""
Always return False. This is a way of comparing User objects to
anonymous users.
"""
return CallableFalse
@property
def is_authenticated_for_AbstractBaseUser(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return CallableTrue
utils.inject_attribute(AbstractBaseUser, "is_anonymous", is_anonymous_for_AbstractBaseUser)
utils.inject_attribute(AbstractBaseUser, "is_authenticated", is_authenticated_for_AbstractBaseUser)
@property
def is_anonymous_for_AnonymousUser(self):
return CallableTrue
@property
def is_authenticated_for_AnonymousUser(self):
return CallableFalse
utils.inject_attribute(AnonymousUser, "is_anonymous", is_anonymous_for_AnonymousUser)
utils.inject_attribute(AnonymousUser, "is_authenticated", is_authenticated_for_AnonymousUser) | b3f94992c0ada29b82e64d40cac190a567db9013 | 3,659,282 |
def BZPoly(pnts, poly, mag, openPoly=False):
"""TODO WRITEME.
Parameters
----------
pnts : list
Measurement points [[p1x, p1z], [p2x, p2z],...]
poly : list
Polygon [[p1x, p1z], [p2x, p2z],...]
mag : [M_x, M_y, M_z]
Magnetization = [M_x, M_y, M_z]
"""
dgz = calcPolyGz(pnts, poly, density=1.0, openPoly=openPoly)[1]
dgz[:,2] *= -1
return poissonEoetvoes(adot(mag, -dgz)) | 1ba775034c8728c854fc58f4b4f75ad691a7ecec | 3,659,283 |
def matches(spc, shape_):
"""
Return True if the shape adheres to the spc (spc has optional color/shape
restrictions)
"""
(c, s) = spc
matches_color = c is None or (shape_.color == c)
matches_shape = s is None or (shape_.name == s)
return matches_color and matches_shape | fa9c90ea2be17b0cff7e4e76e63cf2c6a70cc1ec | 3,659,284 |
from typing import Any
def jsonsafe(obj: Any) -> ResponseVal:
"""
Catch the TypeError which results from encoding non-encodable types
This uses the serialize function from my.core.serialize, which handles
serializing most types in HPI
"""
try:
return Response(dumps(obj), status=200, headers={"Content-Type": "application/json"})
except TypeError as encode_err:
return {
"error": "Could not encode response from HPI function as JSON",
"exception": str(encode_err),
}, 400 | 90aaaad3e890eeb09aaa683395a80f80394bba3e | 3,659,285 |
def get_appliances(self) -> list:
"""Get all appliances from Orchestrator
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - appliance
- GET
- /appliance
:return: Returns list of dictionaries of each appliance
:rtype: list
"""
return self._get("/appliance") | 2f1e48869f4494a995efd4adba80a235c4fb1486 | 3,659,286 |
def is_str(element):
"""True if string else False"""
check = isinstance(element, str)
return check | c46b80d109b382de761618c8c9a50d94600af876 | 3,659,287 |
import tempfile
import shutil
def anonymize_and_streamline(old_file, target_folder):
"""
This function loads the edfs of a folder and
1. removes their birthdate and patient name
2. renames the channels to standardized channel names
3. saves the files in another folder with a non-identifyable
4. verifies that the new files have the same content as the old
"""
# load the two csvs with the edfs that we dont process and where the ECG is upside down
pre_coding_discard = [line[0] for line in misc.read_csv(cfg.edfs_discard) if line[2]=='1']
to_invert = [line[0] for line in misc.read_csv(cfg.edfs_invert)]
# Here we read the list of controls and patients with their age and gender
mappings = misc.read_csv(cfg.controls_csv)
mappings.extend(misc.read_csv(cfg.patients_csv))
mappings = dict([[name, {'gender':gender, 'age':age}] for name, gender, age,*_ in mappings])
# old name is the personalized file without file extension, e.g. thomas_smith(1)
old_name = ospath.splitext(ospath.basename(old_file))[0]
# new name is the codified version without extension e.g '123_45678'
new_name = codify(old_name)
# use a temporary file to write and then move it,
# this avoids half-written files that cannot be read later
tmp_name = tempfile.TemporaryFile(prefix='anonymize').name
if old_name in pre_coding_discard:
print('EDF is marked as corrupt and will be discarded')
return
# this is where the anonymized file will be stored
new_file = ospath.join(target_folder, new_name + '.edf')
if ospath.exists(new_file):
print ('New file extists already {}'.format(new_file))
else:
# anonymize
print ('Writing {} from {}'.format(new_file, old_name))
assert ospath.isfile(old_file), f'{old_file} does not exist'
signals, signal_headers, header = sleep_utils.read_edf(old_file,
digital=True,
verbose=False)
# remove patient info
header['birthdate'] = ''
header['patientname'] = new_name
header['patientcode'] = new_name
header['gender'] = mappings[old_name]['gender']
header['age'] = mappings[old_name]['age']
# rename channels to a unified notation, e.g. EKG becomes ECG I
for shead in signal_headers:
ch = shead['label']
if ch in ch_mapping:
ch = ch_mapping[ch]
shead['label'] = ch
# Invert the ECG channel if necessary
if old_name in to_invert:
for i,sig in enumerate(signals):
label = signal_headers[i]['label'].lower()
if label == cfg.ecg_channel.lower():
signals[i] = -sig
# we write to tmp to prevent that corrupted files are not left
print ('Writing tmp for {}'.format(new_file))
sleep_utils.write_edf(tmp_name, signals, signal_headers, header,
digital=True, correct=True)
# verify that contents for both files match exactly
print ('Verifying tmp for {}'.format(new_file))
# embarrasing hack, as dmin/dmax dont in this files after inverting
if not old_name=='B0036':
sleep_utils.compare_edf(old_file, tmp_name, verbose=False)
# now we move the tmp file to its new location.
shutil.move(tmp_name, new_file)
# also copy additional file information ie hypnograms and kubios files
old_dir = ospath.dirname(old_file)
pattern = old_name.replace('_m', '').replace('_w', '') # remove gender from weitere nt1 patients
add_files = ospath.list_files(old_dir, patterns=[f'{pattern}*txt', f'{pattern}*dat', f'{pattern}*mat'])
for add_file in add_files:
# e.g. .mat or .npy etc etc
new_add_file = ospath.join(target_folder,
ospath.basename(add_file.replace(pattern, new_name)))
if ospath.exists(new_add_file):continue
# hypnograms will be copied to .hypno
try:
new_add_file = new_add_file.replace('-Schlafprofil', '')
new_add_file = new_add_file.replace('_sl','')
new_add_file = new_add_file.replace('.txt', '.hypno').replace('.dat', '.hypno')
shutil.copy(add_file, new_add_file)
except Exception as e:
print(e)
return old_name, new_name | 2210c72891c3faec73a9d5ce4b83d56ee9adef38 | 3,659,288 |
def deal_text(text: str) -> str:
"""deal the text
Args:
text (str): text need to be deal
Returns:
str: dealed text
"""
text = " "+text
text = text.replace("。","。\n ")
text = text.replace("?","?\n ")
text = text.replace("!","!\n ")
text = text.replace(";",";\n ")
return text | 8f16e7cd2431dfc53503c877f9d4b5429f738323 | 3,659,289 |
import zipfile
import os
def extract_zip(src, dest):
"""extract a zip file"""
bundle = zipfile.ZipFile(src)
namelist = bundle.namelist()
for name in namelist:
filename = os.path.realpath(os.path.join(dest, name))
if name.endswith('/'):
os.makedirs(filename)
else:
path = os.path.dirname(filename)
if not os.path.isdir(path):
os.makedirs(path)
_dest = open(filename, 'wb')
_dest.write(bundle.read(name))
_dest.close()
bundle.close()
return namelist | 5e8af22a446e52c26b99b71fefdd29d3b10e02ec | 3,659,290 |
def _find_timepoints_1D(single_stimulus_code):
"""
Find the indexes where the value of single_stimulus_code turn from zero to non_zero
single_stimulus_code : 1-D array
>>> _find_timepoints_1D([5,5,0,0,4,4,4,0,0,1,0,2,0])
array([ 0, 4, 9, 11])
>>> _find_timepoints_1D([0,0,1,2,3,0,1,0,0])
array([2, 6])
>>> _find_timepoints_1D([0,0,1,2,0,1])
array([2, 5])
>>> _find_timepoints_1D([5,0,0,1,2,5])
array([0, 3])
"""
flag = True # whether have seen 0 so far
timepoints = []
for index, timepoint in enumerate(single_stimulus_code):
if timepoint != 0 and flag:
timepoints.append(index)
flag = False
if timepoint == 0 and not flag:
flag = True
return np.array(timepoints) | b2c3d08f229b03f9b9f5278fea4e25c25274d213 | 3,659,291 |
def stiffness_tric(
components: np.ndarray = None,
components_d: dict = None
) -> np.ndarray:
"""Generate triclinic fourth-order stiffness tensor.
Parameters
----------
components : np.ndarray
21 components of triclinic tensor, see
stiffness_component_dict
components_d : dictionary
dictionary with 21 components
of triclinic tensor, see
stiffness_component_dict
Returns
-------
np.ndarray
Fourth-order triclinic tensor with minor
and major symmetries
"""
out = np.zeros(shape=[3, 3, 3, 3])
if not isinstance(components, type(None)):
components_d = stiffness_component_dict(components)
for k, v in components_d.items():
i = [int(s)-1 for s in k]
out[i[0], i[1], i[2], i[3]] = v
# tt_l
out[i[1], i[0], i[2], i[3]] = v
# tt_r
out[i[0], i[1], i[3], i[2]] = v
out[i[1], i[0], i[3], i[2]] = v # + tt_l
# tt_m
out[i[2], i[3], i[0], i[1]] = v
out[i[3], i[2], i[0], i[1]] = v # + tt_l
out[i[2], i[3], i[1], i[0]] = v # + tt_r
out[i[3], i[2], i[1], i[0]] = v # + tt_l + tt_r
return out | f96a2ffb4e0542f56a4329393b77e9a875dc6cd5 | 3,659,292 |
from typing import Optional
from pathlib import Path
def get_dataset(
dataset_name: str,
path: Optional[Path] = None,
regenerate: bool = False,
) -> TrainDatasets:
"""
Get the repository dataset.
Currently only [Retail Dataset](https://archive.ics.uci.edu/ml/datasets/online+retail) is available
Parameters:
dataset_name:
name of the dataset, for instance "retail"
regenerate:
whether to regenerate the dataset even if a local file is present.
If this flag is False and the file is present, the dataset will not
be downloaded again.
path:
where the dataset should be saved
Returns:
dataset obtained by either downloading or reloading from local file.
"""
if path is None:
path = default_dataset_path
dataset_path = materialize_dataset(dataset_name, path, regenerate)
return load_datasets(
metadata=dataset_path,
train=dataset_path / "train",
test=dataset_path / "test",
) | f913f613858c444ddac479d65a169b74a9b4db29 | 3,659,293 |
def get_info_safe(obj, attr, default=None):
"""safely retrieve @attr from @obj"""
try:
oval = obj.__getattribute__(attr)
except:
logthis("Attribute does not exist, using default", prefix=attr, suffix=default, loglevel=LL.WARNING)
oval = default
return oval | 24b4bace8a8cef16d7cddc44238a24dd636f6ca8 | 3,659,294 |
def mkviewcolbg(view=None, header=u'', colno=None, cb=None,
width=None, halign=None, calign=None,
expand=False, editcb=None, maxwidth=None):
"""Return a text view column."""
i = gtk.CellRendererText()
if cb is not None:
i.set_property(u'editable', True)
i.connect(u'edited', cb, colno)
if calign is not None:
i.set_property(u'xalign', calign)
j = gtk.TreeViewColumn(header, i, background=colno)
if halign is not None:
j.set_alignment(halign)
if expand:
if width is not None:
j.set_min_width(width)
j.set_expand(True)
else:
if width is not None:
j.set_min_width(width)
if maxwidth is not None:
j.set_max_width(maxwidth)
view.append_column(j)
if editcb is not None:
i.connect(u'editing-started', editcb)
return i | 7b49154d9d26cc93f5e42116967634eddc06a06e | 3,659,295 |
def list2str(lst, indent=0, brackets=True, quotes=True):
"""
Generate a Python syntax list string with an indention
:param lst: list
:param indent: indention as integer
:param brackets: surround the list expression by brackets as boolean
:param quotes: surround each item with quotes
:return: string
"""
if quotes:
lst_str = str(lst)
if not brackets:
lst_str = lst_str[1:-1]
else:
lst_str = ', '.join(lst)
if brackets:
lst_str = '[' + lst_str + ']'
lb = ',\n' + indent*' '
return lst_str.replace(', ', lb) | ef441632bf59714d3d44ede5e78835625b41f047 | 3,659,296 |
import os
def full_path(path):
"""
Get an absolute path.
"""
if path[0] == "/":
return path
return os.path.realpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", path)
) | 0ea845638d541521277fac3904cb1c1b243e88b1 | 3,659,297 |
def _roi_pool_shape(op):
"""Shape function for the RoiPool op.
"""
dims_data = op.inputs[0].get_shape().as_list()
channels = dims_data[3]
dims_rois = op.inputs[1].get_shape().as_list()
num_rois = dims_rois[0]
pooled_height = op.get_attr('pooled_height')
pooled_width = op.get_attr('pooled_width')
output_shape = tf.TensorShape([num_rois, pooled_height, pooled_width, channels])
return [output_shape, output_shape] | 9c84aa0054dacacefcdf2fd9066538239668ee66 | 3,659,298 |
from typing import Optional
def get_users(*, limit: int, order_by: str = "id", offset: Optional[str] = None) -> APIResponse:
"""Get users"""
appbuilder = current_app.appbuilder
session = appbuilder.get_session
total_entries = session.query(func.count(User.id)).scalar()
to_replace = {"user_id": "id"}
allowed_filter_attrs = [
"user_id",
'id',
"first_name",
"last_name",
"user_name",
"email",
"is_active",
"role",
]
query = session.query(User)
query = apply_sorting(query, order_by, to_replace, allowed_filter_attrs)
users = query.offset(offset).limit(limit).all()
return user_collection_schema.dump(UserCollection(users=users, total_entries=total_entries)) | 5ef71bfcc79314f0e9481dfa78a8e079dce14339 | 3,659,299 |