content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def c4x(c: Circuit, c0: int, c1: int, c2: int, c3: int, t: int) -> Circuit:
"""A macro of 4-controlled X gate"""
return c.h[t].c4z(c0, c1, c2, c3, t).h[t] | 89b5d790a70448a1d46452554ab234e113e63c59 | 500 |
def pad(data, pad_id):
""" Pad all lists in data to the same length. """
width = max(len(d) for d in data)
return [d + [pad_id] * (width - len(d)) for d in data] | a0951f4332879600d25c061cf1c553126d6df8d2 | 501 |
from netharn import util
def draw_boxes_on_image(img, boxes, color='blue', thickness=1,
box_format=None):
"""
Example:
>>> from netharn import util
>>> img = np.zeros((10, 10, 3), dtype=np.uint8)
>>> color = 'blue'
>>> thickness = 1
>>> boxes = util.Boxes([[1, 1, 8, 8]], 'tlbr')
>>> img2 = draw_boxes_on_image(img, boxes, color, thickness)
>>> # xdoc: +REQUIRES(--show)
>>> from netharn.util import mplutil
>>> mplutil.autompl() # xdoc: +SKIP
>>> mplutil.figure(doclf=True, fnum=1)
>>> mplutil.imshow(img2)
"""
if not isinstance(boxes, util.Boxes):
if box_format is None:
raise ValueError('specify box_format')
boxes = util.Boxes(boxes, box_format)
color = tuple(util.Color(color).as255('bgr'))
tlbr = boxes.to_tlbr().data
img2 = img.copy()
for x1, y1, x2, y2 in tlbr:
# pt1 = (int(round(x1)), int(round(y1)))
# pt2 = (int(round(x2)), int(round(y2)))
pt1 = (int(x1), int(y1))
pt2 = (int(x2), int(y2))
img2 = cv2.rectangle(img2, pt1, pt2, color, thickness=thickness)
return img2 | 3c4a3b547d39bac940ea9f6999a98f2db62f938b | 502 |
def _select_random_features(feature_list, amount):
"""Selects a given amount of random features from the feature list"""
set_size = len(feature_list) -1
random_features = []
for i in range(amount):
while(True):
random_feature = feature_list[randint(0, set_size)]
if(random_feature in random_features):
continue
else:
random_features.append(random_feature)
break
return random_features | e281bfa75e153aa195119f84777f41db9d5e806c | 503 |
def matrixop_inp_matr():
"""
Функция возвращает матрицу, введённую пользователем с клавиатуры.
Returns
-------
a : [[float, float, ...],
[float, float, ...],
...]
Матрица, введенная пользователем
"""
while True:
try:
m = int(input('Сколько будет строк в матрице? '))
except:
print('Вы ввели не число')
else:
if m > 0:
break
else:
print('Вы ввели не натуральное число')
while True:
try:
n = int(input('Сколько будет столбцов в матрице? '))
except:
print('Вы ввели не число')
else:
if n > 0:
break
else:
print('Вы ввели не натуральное число')
print("Введите элементы матрицы (заполнение идёт по строкам)")
a = []
for i in range(m):
a.append([])
for j in range(n):
while True:
try:
print(f'Введите элемент a[{i+1}][{j+1}]')
elem = eval(input())
except:
print('Вы ввели не число')
else:
break
a[i].append(elem)
return a | c373af0c7493ff32f919d903644b2031cc51162c | 504 |
import os
def gen_info(run_event_files):
"""Generate subject_info structure from a list of event files
"""
info = []
for i, event_files in enumerate(run_event_files):
runinfo = Bunch(conditions=[], onsets=[], durations=[], amplitudes=[])
for event_file in event_files:
_, name = os.path.split(event_file)
if '.run' in name:
name, _ = name.split('.run%03d'%(i+1))
elif '.txt' in name:
name, _ = name.split('.txt')
runinfo.conditions.append(name)
event_info = np.atleast_2d(np.loadtxt(event_file))
runinfo.onsets.append(event_info[:, 0].tolist())
if event_info.shape[1] > 1:
runinfo.durations.append(event_info[:, 1].tolist())
else:
runinfo.durations.append([0])
if event_info.shape[1] > 2:
runinfo.amplitudes.append(event_info[:, 2].tolist())
else:
delattr(runinfo, 'amplitudes')
info.append(runinfo)
return info | 0e347231c811c39602530e05f3f767d8b91ba243 | 505 |
def dropannotation(annotation_list):
"""
Drop out the annotation contained in annotation_list
"""
target = ""
for c in annotation_list:
if not c == "#":
target += c
else:
return target
return target | 9f4a695eaf80f79dce943f2f91926d9c823483b6 | 506 |
def do_associate_latest_edit(parser, token):
"""
AssociateLatestEdit
"""
try:
tag, node = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError, "%r tag requires one argument" % token.contents.split()[0]
return AssociateLatestEdit(node) | 75cf36f1cccd2191636f3cb603503c6655ae0c67 | 507 |
def open_monitoring_db(dbhost, dbuser, dbpass, database):
"""
Open MySQL monitoring DB
"""
try:
conn = MySQLdb.connect(host=dbhost, user=dbuser,
passwd=dbpass, db=database)
except MySQLdb.Error, err:
print "Error %d: %s" % (err.args[0], err.args[1])
sys.exit(1)
return conn | db33430d20c7c72c7428d85f161d1c186404dc05 | 508 |
def matdiff(matrix1,matrix2,figsize=None,cmap=None):
"""
display the difference between two real matrices, alongside this plot this difference
on a log- colour scale (if diff!=0)
"""
if not figsize:
figsize = defaults['figsize']
if not cmap:
cmap = defaults['cmap']
_matdiff = matrix1-matrix2
f, (ax1, ax2) = plt.subplots(1,2,figsize=(2*figsize[0],figsize[1]))
imreal = ax1.imshow(_matdiff,interpolation='nearest',cmap=cmap)
f.colorbar(imreal,ax=ax1)
# trying to plot the log-scale diff will fail if the difference is zero everywhere
if not np.all(_matdiff==np.zeros(_matdiff.shape)):
imimag = ax2.imshow(np.log10(np.abs(_matdiff)),interpolation='nearest',cmap=cmap)
f.colorbar(imimag,ax=ax2)
return f | fb11354f7388e461ac49bcac942a9b6a2b5528d4 | 509 |
def _tokens_by_class_of(tokens):
"""Generates lookup table of tokens in each class."""
out = defaultdict(set)
for token, token_classes in tokens.items():
for token_class in token_classes:
out[token_class].add(token)
return out | c335582785e4c8a5b82232849ccee579f5ab068f | 510 |
def load_mnist_dataset(shape=(-1, 784), path='data'):
"""Load the original mnist.
Automatically download MNIST dataset and return the training, validation and test set with 50000, 10000 and 10000 digit images respectively.
Parameters
----------
shape : tuple
The shape of digit images (the default is (-1, 784), alternatively (-1, 28, 28, 1)).
path : str
The path that the data is downloaded to.
Returns
-------
X_train, y_train, X_val, y_val, X_test, y_test: tuple
Return splitted training/validation/test set respectively.
Examples
--------
>>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1,784), path='datasets')
>>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1))
"""
return _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/exdb/mnist/') | 856a758864ef85b4a5bea742dd90a49e997ad9b7 | 511 |
import logging
import os
import scipy
import json
def entropy_of_states(probabilities, output_path, n_clusters):
"""
Computes the entropy of probabilities of states
:param probabilities: array with states probabilities
:type probabilities: np.ndarray
:param output_path: path to output directory
:type output_path: str
:param n_clusters: number of clusters
:type: n_clusters: int
:return: entropy: calculated entropy
:rtype: int
"""
logging.basicConfig(
filename=os.path.join(output_path, 'entropy_n_clusters_{}.log'.format(
n_clusters)),
level=logging.INFO)
entropy = scipy.stats.entropy(probabilities)
logging.info('State {} entropy is {}'.format(n_clusters, entropy))
dict = {'State': n_clusters, 'entropy': entropy}
with open(os.path.join(output_path, 'entropy.json'), 'w') as fp:
json.dump(dict, fp)
return entropy | e67ae313b22a2bf335575b71a812eb7bd3614aec | 512 |
def EntryToSlaveName(entry):
"""Produces slave name from the slaves config dict."""
name = entry.get('slavename') or entry.get('hostname')
if 'subdir' in entry:
return '%s#%s' % (name, entry['subdir'])
return name | 258e68c683592c21ea8111f21ba3ab648ddb8c57 | 513 |
from app.main import bp as main_bp
from app.errors import bp as errors_bp
from app.api import bp as api_bp
import os
import logging
def create_app(config_class=Config):
"""
Constructs a Flask application instance.
Parameters
----------
config_class: class that stores the configuration variables.
Returns
-------
app : Flask application
"""
app = Flask(__name__)
app.config.from_object(config_class)
bootstrap.init_app(app)
app.register_blueprint(main_bp)
app.register_blueprint(errors_bp)
app.register_blueprint(api_bp, url_prefix='/api')
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/oslo-bysykkel-monitor.log',
maxBytes=10240, backupCount=10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('Oslo Bysykkel Monitor startup')
return app | fe84f5ef3239a7b0c6a2d9a948ca44d413ab4d81 | 514 |
def is_symmetric_re(root: TreeNode) -> bool:
"""Check if a binary tree is a mirror of itself (symmetric around its center)."""
if not root:
return False
def is_mirror(t1, t2):
if not t1 and not t2:
return True
if not t1 or not t2:
return False
return t1.val == t2.val and is_mirror(t1.left, t2.right) and is_mirror(t1.right, t2.left)
return is_mirror(root, root) | b2d0450a881e0a1748575baa8d7c6ae1224fb3c0 | 515 |
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up paperless from a config entry."""
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True | 8793312f379c510e3420e785eba6ce4db3f098c7 | 516 |
def azimuthal_average(image, center=None, stddev=True, binsize=0.5, interpnan=False):
"""
Modified based on https://github.com/keflavich/image_tools/blob/master/image_tools/radialprofile.py
Calculate the azimuthally averaged radial profile.
Parameters:
imgae (numpy ndarray): 2-D image
center (list): [x, y] pixel coordinates. If None, use image center.
Note that x is horizontal and y is vertical, y, x = image.shape.
stdev (bool): if True, the stdev of profile will also be returned.
binsize (float): size of the averaging bin. Can lead to strange results if
non-binsize factors are used to specify the center and the binsize is
too large.
interpnan (bool): Interpolate over NAN values, i.e. bins where there is no data?
Returns:
If `stdev == True`, it will return [radius, profile, stdev];
else, it will return [radius, profile].
"""
# Calculate the indices from the image
y, x = np.indices(image.shape)
if center is None:
center = np.array([(x.max() - x.min()) / 2.0, (y.max() - y.min()) / 2.0])
r = np.hypot(x - center[0], y - center[1])
# The 'bins' as initially defined are lower/upper bounds for each bin
# so that values will be in [lower,upper)
nbins = int(np.round(r.max() / binsize) + 1)
maxbin = nbins * binsize
bins = np.linspace(0, maxbin, nbins + 1)
# We're probably more interested in the bin centers than their left or right sides...
bin_centers = (bins[1:] + bins[:-1]) / 2.0
# There are never any in bin 0, because the lowest index returned by digitize is 1
nr = np.histogram(r, bins)[0] # nr is how many pixels are within each bin
# Radial profile itself
profile = np.histogram(r, bins, weights=image)[0] / nr
if interpnan:
profile = np.interp(bin_centers, bin_centers[~np.isnan(profile)],
profile[~np.isnan(profile)])
if stddev:
# Find out which radial bin each point in the map belongs to
# recall that bins are from 1 to nbins
whichbin = np.digitize(r.ravel(), bins)
profile_std = np.array([image.ravel()[whichbin == b].std() for b in range(1, nbins + 1)])
profile_std /= np.sqrt(nr) # 均值的偏差
return [bin_centers, profile, profile_std]
else:
return [bin_centers, profile] | 3ebadf5fa93cc93e6a5b14327392a2fdecb5d266 | 517 |
import re
def find_assign(data, varname):
"""Finds a substring that looks like an assignment.
:param data: Source to search in.
:param varname: Name of the variable for which an assignment should be
found.
"""
ASSIGN_RE = re.compile(BASE_ASSIGN_PATTERN.format(varname))
if len(ASSIGN_RE.findall(data)) > 1:
raise PluginError('Found multiple {}-strings.'.format(varname))
if len(ASSIGN_RE.findall(data)) < 1:
raise PluginError('No version assignment ("{}") found.'
.format(varname))
return ASSIGN_RE.search(data).group(2) | 99e1d1436307dd278fbef8b7e52c4d2eedd6d657 | 518 |
import requests
def remove(token: str, server: str="http://localhost:8080/remove", params: dict=None) -> int:
"""
Removes the data associated with the token.
:param token: the token to download the data for
:type token: str
:param server: the URL of the server to upload to
:type server: str
:param params: the additional parameters to send to the server, eg login information (user/password)
:type params: dict
:return: the status code, None if failed to download
:rtype: int
"""
if params is None:
files = {}
else:
files = params.copy()
files['token'] = token
r = requests.post(server, files=files)
return r.status_code | a74f2c5f84ae064a909df717917f4589b59eacb5 | 519 |
import requests
def get_pending_surveys_batch_number(batch_no):
"""
Gets batch number for the shared survey
:param batch_no: Shared survey batch number
:type batch_no: str
:raises ApiError: Raised when party returns api error
:return: list share surveys
"""
bound_logger = logger.bind(batch_no=batch_no)
bound_logger.info("Attempting to retrieve share surveys by batch number")
url = f"{app.config['PARTY_URL']}/party-api/v1/pending-surveys/{batch_no}"
response = requests.get(url, auth=app.config["BASIC_AUTH"])
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
bound_logger.error("Failed to retrieve share surveys by batch number")
raise ApiError(logger, response)
bound_logger.info("Successfully retrieved share surveys by batch number")
return response | ee31c28c393e29b6cd628aefc38d2ca948c7cdaf | 520 |
def before_after_to_box(element, pseudo_type, state, style_for,
get_image_from_uri, target_collector):
"""Return the boxes for ::before or ::after pseudo-element."""
style = style_for(element, pseudo_type)
if pseudo_type and style is None:
# Pseudo-elements with no style at all do not get a style dict.
# Their initial content property computes to 'none'.
return []
# TODO: should be the computed value. When does the used value for
# `display` differ from the computer value? It's at least wrong for
# `content` where 'normal' computes as 'inhibit' for pseudo elements.
display = style['display']
content = style['content']
if 'none' in (display, content) or content in ('normal', 'inhibit'):
return []
box = make_box('%s::%s' % (element.tag, pseudo_type), style, [])
quote_depth, counter_values, _counter_scopes = state
update_counters(state, style)
children = []
outside_markers = []
if display == 'list-item':
marker_boxes = marker_to_box(
element, state, style, style_for, get_image_from_uri,
target_collector)
if marker_boxes:
if style['list_style_position'] == 'outside':
outside_markers.extend(marker_boxes)
else:
children.extend(marker_boxes)
children.extend(content_to_boxes(
style, box, quote_depth, counter_values, get_image_from_uri,
target_collector))
box.children = children
return outside_markers + [box] | 2c34523b7e4ba690d54fd3622e903976a7629b3c | 521 |
def make_sign_initializer(random_sign_init):
"""Random sign intitializer for HyperBatchEnsemble layers."""
if random_sign_init > 0:
return ed.initializers.RandomSign(random_sign_init)
else:
return tf.keras.initializers.RandomNormal(
mean=1.0, stddev=-random_sign_init) | e8ea2653ef9c7c5ba447921d7def990e29a7c9b2 | 522 |
def _parallel_predict_proba(ensemble, X, idx, results):
"""
Compute predictions of SCM estimators
"""
for k in idx:
res = ensemble.estimators[k].predict(X[:, ensemble.estim_features[k]])
results = results + res
return results | b0a2d5c59318506202c9331597ab2a11eacb7a32 | 523 |
def compute_FP_TP_Probs(Ycorr, Xcorr, Probs, is_tumor, evaluation_mask, Isolated_Tumor_Cells, level):
"""Generates true positive and false positive stats for the analyzed image
Args:
Probs: list of the Probabilities of the detected lesions
Xcorr: list of X-coordinates of the lesions
Ycorr: list of Y-coordinates of the lesions
is_tumor: A boolean variable which is one when the case cotains tumor
evaluation_mask: The evaluation mask
Isolated_Tumor_Cells: list of labels containing Isolated Tumor Cells
level: The level at which the evaluation mask was made
Returns:
FP_probs: A list containing the probabilities of the false positive detections
TP_probs: A list containing the probabilities of the True positive detections
NumberOfTumors: Number of Tumors in the image (excluding Isolate Tumor Cells)
detection_summary: A python dictionary object with keys that are the labels
of the lesions that should be detected (non-ITC tumors) and values
that contain detection details [confidence score, X-coordinate, Y-coordinate].
Lesions that are missed by the algorithm have an empty value.
FP_summary: A python dictionary object with keys that represent the
false positive finding number and values that contain detection
details [confidence score, X-coordinate, Y-coordinate].
"""
max_label = np.amax(evaluation_mask)
FP_probs = []
TP_probs = np.zeros((max_label,), dtype=np.float32)
detection_summary = {}
FP_summary = {}
for i in range(1, max_label + 1):
if i not in Isolated_Tumor_Cells:
label = 'Label ' + str(i)
detection_summary[label] = []
FP_counter = 0
if (is_tumor):
for i in range(0, len(Xcorr)):
# note: the x, y coordinates are switched, I make the x, y to be int, so that the array of evaluation_mask
#HittedLabel = evaluation_mask[int(Xcorr[i] / pow(2, level)), int(Ycorr[i] / pow(2, level))]
HittedLabel = evaluation_mask[int(
Ycorr[i]/pow(2, level)), int(Xcorr[i]/pow(2, level))]
print(HittedLabel)
# HittedLabel = evaluation_mask[int(Ycorr[i]/pow(2, level)), int(Xcorr[i]/pow(2, level))]
# HittedLabel = evaluation_mask[Ycorr[i]/pow(2, level), Xcorr[i]/pow(2, level)]
if HittedLabel == 0:
FP_probs.append(Probs[i])
key = 'FP ' + str(FP_counter)
FP_summary[key] = [Probs[i], Xcorr[i], Ycorr[i]]
FP_counter += 1
elif HittedLabel not in Isolated_Tumor_Cells:
if (Probs[i] > TP_probs[HittedLabel - 1]):
label = 'Label ' + str(HittedLabel)
detection_summary[label] = [Probs[i], Xcorr[i], Ycorr[i]]
TP_probs[HittedLabel - 1] = Probs[i]
else:
for i in range(0, len(Xcorr)):
FP_probs.append(Probs[i])
key = 'FP ' + str(FP_counter)
FP_summary[key] = [Probs[i], Xcorr[i], Ycorr[i]]
FP_counter += 1
print(FP_counter)
num_of_tumors = max_label - len(Isolated_Tumor_Cells)
# just for diagnose
print('number of isolated tumor cells =', len(Isolated_Tumor_Cells))
return FP_probs, TP_probs, num_of_tumors, detection_summary, FP_summary | 434abefeba56201d20799b1f00783bc77dcbf2c0 | 524 |
def make_sentences(text, src):
"""
Builds a list of dictionaries, one for each sentence resulting from
the sentence parser. The dictionary schema is
{"src": src, "label": 0, "sentence": sent}
Substitutions are made for the identified tokens.
Args:
text (str): text to process
src (str): identifier (file name) to include in the output
Returns:
List[Dict]
"""
no_sec = True
text = text.replace(USC_DOT, USC)
text = text.replace(PL, PL_SPACE)
text = text.replace(EO, EO_SPACE)
sents = [scrubber(sent, no_sec=no_sec) for sent in sent_tokenize(text)]
sent_list = list()
for sent in sents:
if not sent:
continue
sent_list.append({"src": src, "label": 0, "sentence": sent})
return sent_list | 5da57a55f76a3d4d29f1d0ed681b8597e958b9d0 | 525 |
def read_test_case(file_path):
"""
reads one test case from file.
returns contents of test case
Parameters
----------
file_path : str
the path of the test case file to read.
Returns
-------
list
a list of contents of the test case.
"""
file = open(file_path, "r")
number = int(file.readline().strip())
case = list()
for i in range(number):
case.append(file.readline().strip())
file.close()
return case | 6a87ff979d0b1ccf838ebef56401a48760711541 | 526 |
def add_checkbox_column(col_list, row_list, checkbox_pos=1):
"""Insert a new column into the list of column dictionaries so that it
is the second column dictionary found in the list. Also add the
checkbox column header to the list of row dictionaries and
subsequent checkbox value
'col_list'- a list of dictionaries that defines the column
structure for the table (required). The order of the
columns from left to right is depicted by the index
of the column dictionary in the list. Each dictionary
in the list has the following keys and values:
'name' - a string for the column name (required)
'total' - a boolean for whether the column should be
totaled (required)
'row_list' - a list of dictionaries that represent the rows. Each
dictionaries keys should match the column names found in
'col_list' (required) Example:
[{col_name_1: value, col_name_2: value, ...},
{col_name_1: value, col_name_2: value, ...},
...]
checkbox_pos - an integer for the position of the checkbox
column. Defaulted at 1 (optional)
returns - a tuple of the updated column and rows list of dictionaries
in that order"""
LOGGER.debug('Adding a checkbox column to the column structure')
# Insert a new column dictionary in the list in the second spot
col_list.insert(checkbox_pos, {'name':'Select', 'total':False,
'attr':{'class':'checkbox'}, 'td_class':'checkbox'})
# For each dictionary in the row list add a 'Select' key which
# refers to the new column and set the value as a checkbox
for val in row_list:
val['Select'] = '<input type=checkbox name=cb value=1>'
# Return a tuple of the updated / modified column and row list of
# dictionaries
return (col_list, row_list) | a259d933ecf91f9c5e9dc9d14122a034eaf5f61f | 527 |
import torch
def accuracy4batch(model, testloader, criterion):
"""save a model checkpoint
INPUT:
model: pytorch nn model.
testloader: DataLoader. test data set
criterion: criterion. loss criterion
device: torch.device. device on which model/data is based
OUTPUT:
accuracy: float in [0:1]. percenct proportion of correct classifications in testloader
test_loss: float. absolute error
"""
test_loss = 0
accuracy = 0
model.eval()
with torch.no_grad():
for inputs, labels in testloader:
inputs, labels = inputs.to(model.device), labels.to(model.device)
logps = model.forward(inputs)
batch_loss = criterion(logps, labels)
test_loss += batch_loss.item()
# Calculate accuracy
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
accuracy = accuracy/len(testloader)
return accuracy, test_loss | 2005984b94f17bf601034953bbea3dca6542143d | 528 |
def AmendmentLinks(amendment, users_by_id, project_name):
"""Produce a list of value/url pairs for an Amendment PB.
Args:
amendment: Amendment PB to display.
users_by_id: dict {user_id: user_view, ...} including all users
mentioned in amendment.
project_nme: Name of project the issue/comment/amendment is in.
Returns:
A list of dicts with 'value' and 'url' keys. 'url' may be None.
"""
# Display both old and new summary
if amendment.field == tracker_pb2.FieldID.SUMMARY:
result = amendment.newvalue
if amendment.oldvalue:
result += ' (was: %s)' % amendment.oldvalue
return [{'value': result, 'url': None}]
# Display new owner only
elif amendment.field == tracker_pb2.FieldID.OWNER:
if amendment.added_user_ids and amendment.added_user_ids[0] > 0:
uid = amendment.added_user_ids[0]
return [{'value': users_by_id[uid].display_name, 'url': None}]
else:
return [{'value': framework_constants.NO_USER_NAME, 'url': None}]
elif amendment.field in (tracker_pb2.FieldID.BLOCKEDON,
tracker_pb2.FieldID.BLOCKING,
tracker_pb2.FieldID.MERGEDINTO):
values = amendment.newvalue.split()
bug_refs = [_SafeParseIssueRef(v.strip()) for v in values]
issue_urls = [FormatIssueURL(ref, default_project_name=project_name)
for ref in bug_refs]
# TODO(jrobbins): Permission checks on referenced issues to allow
# showing summary on hover.
return [{'value': v, 'url': u} for (v, u) in zip(values, issue_urls)]
elif amendment.newvalue:
# Catchall for everything except user-valued fields.
return [{'value': v, 'url': None} for v in amendment.newvalue.split()]
else:
# Applies to field==CC or CUSTOM with user type.
values = _PlusMinusString(
[users_by_id[uid].display_name for uid in amendment.added_user_ids
if uid in users_by_id],
[users_by_id[uid].display_name for uid in amendment.removed_user_ids
if uid in users_by_id])
return [{'value': v.strip(), 'url': None} for v in values.split()] | 9518bc4082bcecc50af8906869426806172b8022 | 529 |
def pick_an_experiment(i):
"""
Input: {
(repo_uoa) - experiment repository name (defaults to hackathon_local_repo, but can be overridden by '*')
(extra_tags) - extra tags to filter
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
repo_uoa = i.get('repo_uoa', hackathon_local_repo)
extra_tags = i.get('extra_tags')
list_exp_adict = { 'action': 'list_experiments',
'module_uoa': work['self_module_uoa'],
'repo_uoa': repo_uoa,
'extra_tags': extra_tags,
}
r=ck.access( list_exp_adict )
if r['return']>0: return r
if len(r['lst'])==0:
return {'return':1, 'error':'No experiments to choose from - please relax your filters'}
all_experiment_names = [ '{repo_uoa}:{module_uoa}:{data_uoa}'.format(**entry_dict) for entry_dict in r['lst']]
number_of_experiments = len(all_experiment_names)
select_adict = {'action': 'select_string',
'module_uoa': 'misc',
'options': all_experiment_names,
'default': str(number_of_experiments-1),
'question': 'Please select the experiment entry',
}
r=ck.access( select_adict )
if r['return']>0:
return r
else:
cid = r['selected_value']
return {'return':0, 'cid': cid} | 97671967ab153173280a142143fb4b07892c92fa | 530 |
import uuid
import pathlib
def run(args, image: str) -> str:
"""
Run docker image and mount user-provided folder with C++ files.
Parameters
----------
args : dict-like
User provided arguments parsed by argparse.ArgumentParser instance.
image : str
Name of image from which container is run
Returns
-------
str:
Name of created container. Consist of torchlambda prefix and random string
"""
def _add_components(args):
return (
'"' + ";".join(args.aws_components) + '"'
if args.aws_components
else '"core"'
)
def _compilation(args):
return '"' + args.compilation + '"' if args.compilation else ""
container_name = "torchlambda-" + str(uuid.uuid4())
source_directory = pathlib.Path(args.source).absolute()
if source_directory.is_dir():
command = "docker {} run {} -v {}:/usr/local/user_code --name {} {} {} ".format(
*general.parse_none(
args.docker,
args.docker_run,
source_directory,
container_name,
image,
_add_components(args),
)
)
command += _compilation(args)
general.run(
command,
operation="building inference AWS Lambda package.",
silent=args.silent,
)
return container_name
print("torchlambda:: Provided source files are not directory, exiting.")
exit(1) | eba0ab0e93543410ae4a2375ec80a9015168d303 | 531 |
def get_top_design_list(oProject):
"""
Returns a list of the names of the top-level designs.
Parameters
----------
oProject : pywin32 COMObject
The HFSS project in which the operation will be performed.
designname : str
Name of the design to insert.
Returns
-------
design_list : list of str
The top-level design list.
"""
design_list = list(oProject.GetTopDesignList())
return map(str,design_list) | 6610cd68a90e20fd916a2ec13b54f37a75c31050 | 532 |
from typing import Union
from typing import Dict
from typing import Any
def chi01(param_name: Union[str, None], yval: float, **kwargs) -> Dict[str, Any]:
"""Plot defaults for sweep_plotting.chi01"""
kwargs["xlabel"] = kwargs.get("xlabel") or recast_name(param_name)
kwargs["ylabel"] = kwargs.get("ylabel") or r"$\chi_{{01}}$ [{}]".format(
units.get_units()
)
kwargs["title"] = kwargs.get("title") or r"$\chi_{{01}}=${:.4f} {}".format(
yval, units.get_units()
)
return kwargs | 58e5d09152062a9307526bd953ff91832ef80321 | 533 |
def cstring(*args, **kwargs):
"""Return a colored string.
Parameters
----------
args : iterable of str
bold : bool
color : str, {'HEADER', 'LIGHTBLUE', 'LIGHTGREEN', 'WARNING', 'FAIL',
'ENDC', 'BOLD', 'UNDERLINE' 'BLACK', 'RED', 'GREEN',
'YELLOW', 'BLUE', 'MAGENTA', 'CYAN', 'WHITE'}
Terminal color to use.
"""
args, kwargs = _colorize(*args, **kwargs)
cstr = " ".join(args)
return cstr | 69bc81f0e9267743297bec25b2fccc8f2da2c89d | 534 |
from . import routes # Import routes
def create_app():
"""Construct the core application."""
app = Flask(__name__, instance_relative_config=False)
app.config.from_object('config.Config')
db.init_app(app)
admin.init_app(app)
basic_auth.init_app(app)
with app.app_context():
db.create_all() # Create sql tables for our data models
admin.add_view(ArticleView(Articles, db.session))
return app | 28bfb75626a5be05aef21404956f6ef7adf8f80a | 535 |
def bytes_index(x: bytes, sub: bytes, start: int, end: int) -> int:
"""Where is the first location of a subsequence within a given slice of a bytes object?
Compiling bytes.index compiles this function, when sub is a bytes object.
This function is only intended to be executed in this compiled form.
Args:
x: The bytes object in which to search.
sub: The subsequence to look for.
start: Beginning of slice of x. Interpreted as slice notation.
end: End of slice of x. Interpreted as slice notation.
Returns:
Lowest index of match within slice of x.
Raises:
ValueError: If sub is not found.
"""
ret = bytes_find(x, sub, start, end)
if ret == -1:
raise ValueError("subsection not found")
return ret | 3a78e029a96d27fdf5b5cdea397b21f57ca939d9 | 536 |
import os
def pickup_path(start_path, filename, default=None):
"""pickupping the config file path
start path = "/foo/bar/boo", filename = "config.ini"
finding candidates are ["/foo/bar/boo/config.ini", "/foo/bar/config.ini", "/foo/config.ini", "/config.ini"]
"""
start_point = os.path.normpath(os.path.abspath(start_path))
current = start_point
candidates = []
while True:
candidates.append(os.path.join(current, filename))
if current == "/":
break
current, dropped = os.path.split(current)
for path in candidates:
if os.path.exists(path):
return path
return default | 2535871cbb1197dde07f41063114bd37b88149e7 | 537 |
def uniform(minimum, maximum, shape=[]):
"""uniform(minimum, maximum, shape=[]) returns array of given shape of random reals
in given range"""
if shape == []:
shape = None
return mt.uniform(minimum, maximum, shape) | 882cd915cb7dfec0e1b6857f99ecefe876ae21b1 | 538 |
def unquote_to_bytes(urlencoded_string):
"""Replace %xx escapes by their single-character equivalent,
using the “iso-8859-1” encoding to decode all 8-bit values.
"""
return bytes(
unquote(urlencoded_string, encoding='iso-8859-1'),
encoding='iso-8859-1'
) | 60216d170381e356520c283f308add08754c987d | 539 |
def qualifiedName(item):
"""Return the full name of an item, including any projects that it's in.
If the item does not have a name, return ``None``.
XXX: Doesn't include folders.
"""
names = []
# Note: assumes that the presence of a single null name in the parent tree
# means that the item is not properly named.
for i in iterParents(item):
name = i.name()
if name is None:
return None
names.append(name)
return " / ".join(reversed(names)) | 2a1cdc9e15897c104f63793041ed2d4fe91e383d | 540 |
def assign_columns_of_sector_levels(df_load):
"""
Add additional column capturing the sector level in the two columns
:param df_load: df with at least on sector column
:param ambiguous_sector_assignment: if there are sectors that can be
assigned to multiple sector lengths (e.g., for government or household
sectors), option to specify which sector assignment to keep.
:return: df with new column for sector length
"""
df = replace_NoneType_with_empty_cells(df_load)
# load cw with column of sector levels
cw = load_sector_length_cw_melt()
# merge df assigning sector lengths
for s in ['Produced', 'Consumed']:
df = df.merge(cw, how='left', left_on=f'Sector{s}By',
right_on='Sector').drop(columns=['Sector']).rename(
columns={'SectorLength': f'Sector{s}ByLength'})
df[f'Sector{s}ByLength'] = df[f'Sector{s}ByLength'].fillna(0)
# There are cases where non-traditional sectors (non naics) have
# multiple naics assignments. If there is a non-zero value in the other
# sector length column, keep that row because sector lengths must always
# match.
# subset df into two dfs, one where one sector column length has a zero
# value and the second where both sector length columns have non-zero
# values
df1 = df[(df['SectorProducedByLength'] == 0) |
(df['SectorConsumedByLength'] == 0)]
df2 = df[(df['SectorProducedByLength'] != 0) &
(df['SectorConsumedByLength'] != 0)]
# only keep rows where the values are equal
df2e = df2[df2['SectorProducedByLength'] == df2['SectorConsumedByLength']]
# concat dfs
dfc = pd.concat([df1, df2e], ignore_index=True)
# check for duplicates. Rows might be duplicated if a sector is the same
# for multiple sector lengths
duplicate_cols = [e for e in dfc.columns if e not in [
'SectorProducedByLength', 'SectorConsumedByLength']]
duplicate_df = dfc[dfc.duplicated(subset=duplicate_cols,
keep=False)].reset_index(drop=True)
if len(duplicate_df) > 0:
log.warning('There are duplicate rows caused by ambiguous sectors.')
dfc = dfc.sort_values(['SectorProducedByLength',
'SectorConsumedByLength']).reset_index(drop=True)
return dfc | 4dd60267702f21e103cab18293975a5f62c934d2 | 541 |
def add_matrices(matrix_a, matrix_b):
"""Add two n x n matrices
"""
return [[x + y for x, y in zip(matrix_a[i], matrix_b[i])]
for i in range(len(matrix_a))] | a9f6a857892872fde584b6884e59a8b624220061 | 542 |
import scipy
def no_pretrain_inner_speech(subject):
"""This function aims at training a model without pretraining by training
only on the inner speech condition of a sigle subject
:return: metric history for every of the n k-folds
:rtype: list of dictonaries
"""
###### DATA
data, events = dp.load_data(subjects=[subject], filter_action=True)
# shuffle data and labels
data, events = sklearn.utils.shuffle(data, events)
# save memory by converting from 64bit to 32bit floats
data = data.astype(np.float32)
# filter out only the inner speech condition
data, events = dp.choose_condition(data, events, 'inner speech')
# select the column containing directions (up, down, left, right)
events = events[:, 1]
# one-hot event data
events = np_utils.to_categorical(events, 4)
# zscore normalize the data
data = scipy.stats.zscore(data, axis=2)
# reshape
data = data.reshape(*data.shape, 1)
print("Data Prepared.")
###### MODEL
gpus = tf.config.list_logical_devices('GPU')
mirrored_strategy = tf.distribute.MirroredStrategy(gpus)
with mirrored_strategy.scope():
# create EEGNet (source: https://github.com/vlawhern/arl-eegmodels)
model = EEGNet(nb_classes=4, Chans=data.shape[1],
Samples=data.shape[2], dropoutRate=DROPOUT,
kernLength=KERNEL_LENGTH, F1=8, D=2, F2=16,
dropoutType='Dropout')
# adam optimizer
optimizer = tf.keras.optimizers.Adam()
# compile model
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
model.build(input_shape=(BATCH_SIZE, *data.shape[1:]))
path = './models/saved_models/no_pretrain_inner_speech'
model.save(path)
del model
###### KFOLD TRAINING
history_accumulator = []
for _ in range(N_CHECKS):
history = kfold_training(data, events, path, BATCH_SIZE, EPOCHS)
history_accumulator += history
print(history_accumulator)
print("Subject", subject, " Mean Accuracy:", np.mean([h['val_accuracy'][-1] for h in history_accumulator]))
return history_accumulator | daa08a8ea88838b8e66c3fc23c2b6997bfdff490 | 543 |
from typing import Any
def build_put_big_decimal_negative_decimal_request(**kwargs: Any) -> HttpRequest:
"""Put big decimal value -99999999.99.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: The default value is -99999999.99. Note that overriding this default value may
result in unsupported behavior.
:paramtype json: float
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
content_type = kwargs.pop("content_type", None) # type: Optional[str]
json = kwargs.pop("json", -99999999.99) # type: float
accept = "application/json"
# Construct URL
url = "/number/big/decimal/-99999999.99"
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=url, headers=header_parameters, json=json, **kwargs) | e41800c8bee6a9c4874b3625ed374c9514c81fe7 | 544 |
def make_subplots(
rows=1,
cols=1,
shared_xaxes=False,
shared_yaxes=False,
start_cell="top-left",
print_grid=False,
horizontal_spacing=None,
vertical_spacing=None,
subplot_titles=None,
column_widths=None,
row_heights=None,
specs=None,
insets=None,
column_titles=None,
row_titles=None,
x_title=None,
y_title=None,
figure=None,
**kwargs,
) -> go.Figure:
"""
Return an instance of plotly.graph_objs.Figure with predefined subplots
configured in 'layout'.
Parameters
----------
rows: int (default 1)
Number of rows in the subplot grid. Must be greater than zero.
cols: int (default 1)
Number of columns in the subplot grid. Must be greater than zero.
shared_xaxes: boolean or str (default False)
Assign shared (linked) x-axes for 2D cartesian subplots
- True or 'columns': Share axes among subplots in the same column
- 'rows': Share axes among subplots in the same row
- 'all': Share axes across all subplots in the grid.
shared_yaxes: boolean or str (default False)
Assign shared (linked) y-axes for 2D cartesian subplots
- 'columns': Share axes among subplots in the same column
- True or 'rows': Share axes among subplots in the same row
- 'all': Share axes across all subplots in the grid.
start_cell: 'bottom-left' or 'top-left' (default 'top-left')
Choose the starting cell in the subplot grid used to set the
domains_grid of the subplots.
- 'top-left': Subplots are numbered with (1, 1) in the top
left corner
- 'bottom-left': Subplots are numbererd with (1, 1) in the bottom
left corner
print_grid: boolean (default True):
If True, prints a string representation of the plot grid. Grid may
also be printed using the `Figure.print_grid()` method on the
resulting figure.
horizontal_spacing: float (default 0.2 / cols)
Space between subplot columns in normalized plot coordinates. Must be
a float between 0 and 1.
Applies to all columns (use 'specs' subplot-dependents spacing)
vertical_spacing: float (default 0.3 / rows)
Space between subplot rows in normalized plot coordinates. Must be
a float between 0 and 1.
Applies to all rows (use 'specs' subplot-dependents spacing)
subplot_titles: list of str or None (default None)
Title of each subplot as a list in row-major ordering.
Empty strings ("") can be included in the list if no subplot title
is desired in that space so that the titles are properly indexed.
specs: list of lists of dict or None (default None)
Per subplot specifications of subplot type, row/column spanning, and
spacing.
ex1: specs=[[{}, {}], [{'colspan': 2}, None]]
ex2: specs=[[{'rowspan': 2}, {}], [None, {}]]
- Indices of the outer list correspond to subplot grid rows
starting from the top, if start_cell='top-left',
or bottom, if start_cell='bottom-left'.
The number of rows in 'specs' must be equal to 'rows'.
- Indices of the inner lists correspond to subplot grid columns
starting from the left. The number of columns in 'specs'
must be equal to 'cols'.
- Each item in the 'specs' list corresponds to one subplot
in a subplot grid. (N.B. The subplot grid has exactly 'rows'
times 'cols' cells.)
- Use None for a blank a subplot cell (or to move past a col/row span).
- Note that specs[0][0] has the specs of the 'start_cell' subplot.
- Each item in 'specs' is a dictionary.
The available keys are:
* type (string, default 'xy'): Subplot type. One of
- 'xy': 2D Cartesian subplot type for scatter, bar, etc.
- 'scene': 3D Cartesian subplot for scatter3d, cone, etc.
- 'polar': Polar subplot for scatterpolar, barpolar, etc.
- 'ternary': Ternary subplot for scatterternary
- 'mapbox': Mapbox subplot for scattermapbox
- 'domain': Subplot type for traces that are individually
positioned. pie, parcoords, parcats, etc.
- trace type: A trace type which will be used to determine
the appropriate subplot type for that trace
* secondary_y (bool, default False): If True, create a secondary
y-axis positioned on the right side of the subplot. Only valid
if type='xy'.
* colspan (int, default 1): number of subplot columns
for this subplot to span.
* rowspan (int, default 1): number of subplot rows
for this subplot to span.
* l (float, default 0.0): padding left of cell
* r (float, default 0.0): padding right of cell
* t (float, default 0.0): padding right of cell
* b (float, default 0.0): padding bottom of cell
- Note: Use 'horizontal_spacing' and 'vertical_spacing' to adjust
the spacing in between the subplots.
insets: list of dict or None (default None):
Inset specifications. Insets are subplots that overlay grid subplots
- Each item in 'insets' is a dictionary.
The available keys are:
* cell (tuple, default=(1,1)): (row, col) index of the
subplot cell to overlay inset axes onto.
* type (string, default 'xy'): Subplot type
* l (float, default=0.0): padding left of inset
in fraction of cell width
* w (float or 'to_end', default='to_end') inset width
in fraction of cell width ('to_end': to cell right edge)
* b (float, default=0.0): padding bottom of inset
in fraction of cell height
* h (float or 'to_end', default='to_end') inset height
in fraction of cell height ('to_end': to cell top edge)
column_widths: list of numbers or None (default None)
list of length `cols` of the relative widths of each column of suplots.
Values are normalized internally and used to distribute overall width
of the figure (excluding padding) among the columns.
For backward compatibility, may also be specified using the
`column_width` keyword argument.
row_heights: list of numbers or None (default None)
list of length `rows` of the relative heights of each row of subplots.
If start_cell='top-left' then row heights are applied top to bottom.
Otherwise, if start_cell='bottom-left' then row heights are applied
bottom to top.
For backward compatibility, may also be specified using the
`row_width` kwarg. If specified as `row_width`, then the width values
are applied from bottom to top regardless of the value of start_cell.
This matches the legacy behavior of the `row_width` argument.
column_titles: list of str or None (default None)
list of length `cols` of titles to place above the top subplot in
each column.
row_titles: list of str or None (default None)
list of length `rows` of titles to place on the right side of each
row of subplots. If start_cell='top-left' then row titles are
applied top to bottom. Otherwise, if start_cell='bottom-left' then
row titles are applied bottom to top.
x_title: str or None (default None)
Title to place below the bottom row of subplots,
centered horizontally
y_title: str or None (default None)
Title to place to the left of the left column of subplots,
centered vertically
figure: go.Figure or None (default None)
If None, a new go.Figure instance will be created and its axes will be
populated with those corresponding to the requested subplot geometry and
this new figure will be returned.
If a go.Figure instance, the axes will be added to the
layout of this figure and this figure will be returned. If the figure
already contains axes, they will be overwritten.
Examples
--------
Example 1:
>>> # Stack two subplots vertically, and add a scatter trace to each
>>> from plotly.subplots import make_subplots
>>> import plotly.graph_objects as go
>>> fig = make_subplots(rows=2)
This is the format of your plot grid:
[ (1,1) xaxis1,yaxis1 ]
[ (2,1) xaxis2,yaxis2 ]
>>> fig.add_scatter(y=[2, 1, 3], row=1, col=1) # doctest: +ELLIPSIS
Figure(...)
>>> fig.add_scatter(y=[1, 3, 2], row=2, col=1) # doctest: +ELLIPSIS
Figure(...)
or see Figure.append_trace
Example 2:
>>> # Stack a scatter plot
>>> fig = make_subplots(rows=2, shared_xaxes=True)
This is the format of your plot grid:
[ (1,1) xaxis1,yaxis1 ]
[ (2,1) xaxis2,yaxis2 ]
>>> fig.add_scatter(y=[2, 1, 3], row=1, col=1) # doctest: +ELLIPSIS
Figure(...)
>>> fig.add_scatter(y=[1, 3, 2], row=2, col=1) # doctest: +ELLIPSIS
Figure(...)
Example 3:
>>> # irregular subplot layout (more examples below under 'specs')
>>> fig = make_subplots(rows=2, cols=2,
... specs=[[{}, {}],
... [{'colspan': 2}, None]])
This is the format of your plot grid:
[ (1,1) xaxis1,yaxis1 ] [ (1,2) xaxis2,yaxis2 ]
[ (2,1) xaxis3,yaxis3 - ]
>>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=1, col=1) # doctest: +ELLIPSIS
Figure(...)
>>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=1, col=2) # doctest: +ELLIPSIS
Figure(...)
>>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=2, col=1) # doctest: +ELLIPSIS
Figure(...)
Example 4:
>>> # insets
>>> fig = make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.3}])
This is the format of your plot grid:
[ (1,1) xaxis1,yaxis1 ]
With insets:
[ xaxis2,yaxis2 ] over [ (1,1) xaxis1,yaxis1 ]
>>> fig.add_scatter(x=[1,2,3], y=[2,1,1]) # doctest: +ELLIPSIS
Figure(...)
>>> fig.add_scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2') # doctest: +ELLIPSIS
Figure(...)
Example 5:
>>> # include subplot titles
>>> fig = make_subplots(rows=2, subplot_titles=('Plot 1','Plot 2'))
This is the format of your plot grid:
[ (1,1) x1,y1 ]
[ (2,1) x2,y2 ]
>>> fig.add_scatter(x=[1,2,3], y=[2,1,2], row=1, col=1) # doctest: +ELLIPSIS
Figure(...)
>>> fig.add_bar(x=[1,2,3], y=[2,1,2], row=2, col=1) # doctest: +ELLIPSIS
Figure(...)
Example 6:
Subplot with mixed subplot types
>>> fig = make_subplots(rows=2, cols=2,
... specs=[[{'type': 'xy'}, {'type': 'polar'}],
... [{'type': 'scene'}, {'type': 'ternary'}]])
>>> fig.add_traces(
... [go.Scatter(y=[2, 3, 1]),
... go.Scatterpolar(r=[1, 3, 2], theta=[0, 45, 90]),
... go.Scatter3d(x=[1, 2, 1], y=[2, 3, 1], z=[0, 3, 5]),
... go.Scatterternary(a=[0.1, 0.2, 0.1],
... b=[0.2, 0.3, 0.1],
... c=[0.7, 0.5, 0.8])],
... rows=[1, 1, 2, 2],
... cols=[1, 2, 1, 2]) # doctest: +ELLIPSIS
Figure(...)
"""
return _sub.make_subplots(
rows,
cols,
shared_xaxes,
shared_yaxes,
start_cell,
print_grid,
horizontal_spacing,
vertical_spacing,
subplot_titles,
column_widths,
row_heights,
specs,
insets,
column_titles,
row_titles,
x_title,
y_title,
figure,
**kwargs,
) | 1119aba8e9b9f35b18959f765e4528e2d065a5b8 | 545 |
from re import T
def op_table(name):
"""Get the symbol `name' as an int8_t[]."""
return gdb.parse_and_eval("&'" + name + "'").cast(T('int8_t').pointer()) | 8266128cb1bf59b9d71d7dabb7d002ff22e41192 | 546 |
from typing import Dict
from typing import Any
def nf_masks_to_neurof_dict(binary_masks: np.ndarray, dataset_name: str) -> Dict[str, Any]:
"""
Take as input a tensor of binary mask and produces dict format for neurofinder
Args:
binary_masks: 3d ndarray (components x dimension 1 x dimension 2)
dataset_filename: name of the dataset
Returns:
dset: dict
dataset in neurofinder format to be saved in json
"""
regions = []
for m in binary_masks:
coords = [[int(x), int(y)] for x, y in zip(*np.where(m))]
regions.append({"coordinates": coords})
dset = {"regions": regions, "dataset": dataset_name}
return dset | daee76afc56e6e0da0939530a49f4a77b4c1d5f6 | 547 |
def get_domain_machine_command():
"""Retrieves a collection of Machines that have communicated to or from a given domain address.
Returns:
(str, dict, dict). Human readable, context, raw response
"""
headers = ['ID', 'ComputerDNSName', 'OSPlatform', 'LastIPAddress', 'LastExternalIPAddress', 'HealthStatus',
'RiskScore', 'ExposureLevel']
domain = demisto.args().get('domain')
response = get_domain_machines_request(domain)
machines_list = get_machines_list(response)
human_readable = tableToMarkdown(f'Machines that have communicated with {domain} domain:', machines_list,
headers=headers, removeNull=True)
context_output = {
'Domain': domain,
'Machines': machines_list
}
entry_context = {
'MicrosoftATP.DomainMachine(val.Domain === obj.Domain)': context_output
}
return human_readable, entry_context, response | 0e614d15abd3e99408d4a3d7a93ecc49dba694ad | 548 |
import copy
from functools import reduce
def flatten_dict(source_dict, name_delimiter='_', inner_name=False):
"""
flatten nest dict
Parameters
----------
source_dict : nest dict
name_delimiter : flatten name delimiter(non-use when inner_name is True)
inner_name : False, use innermost name as retrun dict key or not
Returns
-------
flatten dict
Examples
--------
>>> from tidyframe import flatten_dict
>>> nest_dict = {
... 'a': 1,
... 'b': [1, 2],
... 'c': {
... 'cc1': 3,
... 'cc2': 4
... },
... 'd': {
... 'd1': 5,
... 'd2': {
... 'dd1': 6,
... 'dd2': 7
... }
... }
... }
>>> flatten_dict(nest_dict)
{'a': 1, 'b': [1, 2], 'c_cc1': 3, 'c_cc2': 4, 'd_d1': 5, 'd_d2_dd1': 6, 'd_d2_dd2': 7}
>>> flatten_dict(nest_dict, inner_name=True)
{'a': 1, 'b': [1, 2], 'cc1': 3, 'cc2': 4, 'd1': 5, 'dd1': 6, 'dd2': 7}
"""
assert isinstance(source_dict, dict), "import source_dict is not dict"
json_name = {}
for key in source_dict.keys():
if isinstance(get_in(source_dict, [key]), dict):
val = [True, [key]]
json_name.update({key: val})
else:
val = [False, [key]]
json_name.update({key: val})
while True:
key_inner = list(filter(lambda x: json_name.get(x)[0], json_name))
if key_inner:
for x in key_inner:
dict_to_update_json_name = {}
val = json_name.get(x)[1]
for key in get_in(source_dict, val).keys():
val_in = copy(val)
val_in.append(key)
if isinstance(get_in(source_dict, val_in), dict):
dict_to_update = {
reduce(lambda x, y: x + name_delimiter + y, val_in):
[True, val_in]
}
else:
dict_to_update = {
reduce(lambda x, y: x + name_delimiter + y, val_in):
[False, val_in]
}
dict_to_update_json_name.update(dict_to_update)
json_name.update(dict_to_update_json_name)
json_name.pop(x)
else:
break
if inner_name:
return {
json_name.get(x)[1][-1]: get_in(source_dict,
json_name.get(x)[1])
for x in json_name.keys()
}
else:
return {
x: get_in(source_dict,
json_name.get(x)[1])
for x in json_name.keys()
} | fbba7666c25f5eafd47642b8308a486e25cdc6f9 | 549 |
def matrix_multiply(A, B):
""" Multiply two matrices A and B.
:param A: the right matrix
:param B: the left matrix
:return: A * B
"""
# define m and n for the matrix as well as l, the connecting dimension between A and B
m, l, n = len(A), len(A[0]), len(B[0])
# initialize an all zeros matrix
C = [[0.0 for _ in range(len(B[0]))] for _ in range(len(A))]
# iterative over the rows of C
for i in range(m):
# iterative over the columns of C
for j in range(n):
# set C[i][j] to the dot product of ith row of A and the jth column of B
C[i][j] = sum(A[i][k] * B[k][j] for k in range(l))
# return the matrix C = A @ B
return C | 3cd551ea87d9f925654a4153106c2fe87e33fa8c | 550 |
def hard_light(image1, image2):
"""
Superimposes two videos on top of each other using the Hard Light algorithm
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_hard_light(image2.im)) | dcfd25a83f142c2d42c38a787569001af6f6bfc9 | 551 |
def show(*actors, **options):
"""
Create on the fly an instance of class ``Plotter`` and show the object(s) provided.
Allowed input objects types are:
``str``, ``Mesh``, ``Volume``, ``Picture``, ``Assembly``
``vtkPolyData``, ``vtkActor``, ``vtkActor2D``, ``vtkImageActor``,
``vtkAssembly`` or ``vtkVolume``.
If filename is given, its type is guessed based on its extension.
Supported formats are:
`vtu, vts, vtp, ply, obj, stl, 3ds, xml, neutral, gmsh, pcd, xyz, txt, byu,
tif, slc, vti, mhd, png, jpg`.
:param int at: number of the renderer to plot to, if more than one exists
:param list shape: Number of sub-render windows inside of the main window.
Specify two across with ``shape=(2, 1)`` and a two by two grid
with ``shape=(2, 2)``. By default there is only one renderer.
Can also accept a shape as string descriptor. E.g.:
- shape="3|1" means 3 plots on the left and 1 on the right,
- shape="4/2" means 4 plots on top of 2 at bottom.
:param int axes: set the type of axes to be shown
- 0, no axes
- 1, draw three gray grid walls
- 2, show cartesian axes from (0,0,0)
- 3, show positive range of cartesian axes from (0,0,0)
- 4, show a triad at bottom left
- 5, show a cube at bottom left
- 6, mark the corners of the bounding box
- 7, draw a 3D ruler at each side of the cartesian axes
- 8, show the ``vtkCubeAxesActor`` object
- 9, show the bounding box outLine
- 10, show three circles representing the maximum bounding box
- 11, show a large grid on the x-y plane
- 12, show polar axes
- 13, draw a simple ruler at the bottom of the window
Axis type-1 can be fully customized by passing a dictionary ``axes=dict()`` where:
Check ``addons.Axes()`` for the full list of options.
:param float azimuth/elevation/roll: move camera accordingly
:param str viewup: either ['x', 'y', 'z'] or a vector to set vertical direction
:param bool resetcam: re-adjust camera position to fit objects
:param dict camera: Camera parameters can further be specified with a dictionary
assigned to the ``camera`` keyword (E.g. `show(camera={'pos':(1,2,3), 'thickness':1000,})`)
- pos, `(list)`, the position of the camera in world coordinates
- focalPoint `(list)`, the focal point of the camera in world coordinates
- viewup `(list)`, the view up direction for the camera
- distance `(float)`, set the focal point to the specified distance from the camera position.
- clippingRange `(float)`, distance of the near and far clipping planes along the direction
of projection.
- parallelScale `(float)`,
scaling used for a parallel projection, i.e. the height of the viewport
in world-coordinate distances. The default is 1. Note that the "scale" parameter works as
an "inverse scale", larger numbers produce smaller images.
This method has no effect in perspective projection mode.
- thickness `(float)`,
set the distance between clipping planes. This method adjusts the far clipping
plane to be set a distance 'thickness' beyond the near clipping plane.
- viewAngle `(float)`,
the camera view angle, which is the angular height of the camera view
measured in degrees. The default angle is 30 degrees.
This method has no effect in parallel projection mode.
The formula for setting the angle up for perfect perspective viewing is:
angle = 2*atan((h/2)/d) where h is the height of the RenderWindow
(measured by holding a ruler up to your screen) and d is the distance
from your eyes to the screen.
:param bool interactive: pause and interact with window (True)
or continue execution (False)
:param float rate: maximum rate of `show()` in Hertz
:param int interactorStyle: set the type of interaction
- 0 = TrackballCamera [default]
- 1 = TrackballActor
- 2 = JoystickCamera
- 3 = JoystickActor
- 4 = Flight
- 5 = RubberBand2D
- 6 = RubberBand3D
- 7 = RubberBandZoom
- 8 = Context
- 9 = 3D
- 10 = Terrain
- 11 = Unicam
:param bool q: force program to quit after `show()` command returns.
:param bool new: if set to `True`, a call to ``show`` will instantiate
a new ``Plotter`` object (a new window) instead of reusing the first created.
:return: the current ``Plotter`` class instance.
.. note:: With multiple renderers, keyword ``at`` can become a `list`, e.g.
.. code-block:: python
from vedo import *
s = Sphere()
c = Cube()
p = Paraboloid()
show(s, c, at=[0, 1], shape=(3,1))
show(p, at=2, interactive=True)
#
# is equivalent to:
plt = Plotter(shape=(3,1))
s = Sphere()
c = Cube()
p = Paraboloid()
plt.show(s, at=0)
plt.show(p, at=1)
plt.show(c, at=2, interactive=True)
"""
at = options.pop("at", None)
shape = options.pop("shape", (1, 1))
N = options.pop("N", None)
pos = options.pop("pos", (0, 0))
size = options.pop("size", "auto")
screensize = options.pop("screensize", "auto")
title = options.pop("title", "")
bg = options.pop("bg", "white")
bg2 = options.pop("bg2", None)
axes = options.pop("axes", settings.defaultAxesType)
interactive = options.pop("interactive", None)
offscreen = options.pop("offscreen", False)
sharecam = options.pop("sharecam", True)
resetcam = options.pop("resetcam", True)
zoom = options.pop("zoom", None)
viewup = options.pop("viewup", "")
azimuth = options.pop("azimuth", 0)
elevation = options.pop("elevation", 0)
roll = options.pop("roll", 0)
camera = options.pop("camera", None)
interactorStyle = options.pop("interactorStyle", 0)
q = options.pop("q", False)
newPlotter = options.pop("new", False)
if len(options):
for op in options:
printc("Warning: unknown keyword in show():", op, c='y')
if len(actors) == 0:
actors = None
elif len(actors) == 1:
actors = actors[0]
else:
actors = utils.flatten(actors)
if settings.plotter_instance and not newPlotter: # Plotter exists
plt = settings.plotter_instance
else: # Plotter must be created
if utils.isSequence(at): # user passed a sequence for "at"
if not utils.isSequence(actors):
printc("show() Error: input must be a list.", c='r')
raise RuntimeError()
if len(at) != len(actors):
printc("show() Error: lists 'input' and 'at', must have equal lengths.", c='r')
raise RuntimeError()
if len(at) > 1 and (shape == (1, 1) and N is None):
N = max(at) + 1
elif at is None and (N or shape != (1, 1)):
if not utils.isSequence(actors):
printc('show() Error: N or shape is set, but input is not a sequence.', c='r')
printc(' you may need to specify e.g. at=0', c='r')
raise RuntimeError()
at = list(range(len(actors)))
plt = Plotter(
shape=shape,
N=N,
pos=pos,
size=size,
screensize=screensize,
title=title,
axes=axes,
sharecam=sharecam,
resetcam=resetcam,
interactive=interactive,
offscreen=offscreen,
bg=bg,
bg2=bg2,
)
# use _plt_to_return because plt.show() can return a k3d/panel plot
_plt_to_return = None
if utils.isSequence(at):
for i, a in enumerate(actors):
_plt_to_return = plt.show(
a,
at=i,
zoom=zoom,
resetcam=resetcam,
viewup=viewup,
azimuth=azimuth,
elevation=elevation,
roll=roll,
camera=camera,
interactive=False,
interactorStyle=interactorStyle,
bg=bg,
bg2=bg2,
axes=axes,
q=q,
)
plt.interactive = interactive
if interactive or len(at)==N \
or (isinstance(shape[0],int) and len(at)==shape[0]*shape[1]):
# note that shape can be a string
if not offscreen:
plt.interactor.Start()
else:
_plt_to_return = plt.show(
actors,
at=at,
zoom=zoom,
resetcam=resetcam,
viewup=viewup,
azimuth=azimuth,
elevation=elevation,
roll=roll,
camera=camera,
interactive=interactive,
interactorStyle=interactorStyle,
bg=bg,
bg2=bg2,
axes=axes,
q=q,
)
return _plt_to_return | e95c9bbd325e8e6da6ee4f33ca861496c3048bd3 | 552 |
import subprocess
def convert_book(book):
"""
Attempt to convert any books of type in `CONVERTIBLE_MIMETYPES` to .mobi,
in the same folder as the given temporary path.
"""
tmp_path = book.get_tmp_pathname(u'send_books')
mobi_tmp_path = convert_to_mobi_path(tmp_path)
if mobi_tmp_path is None:
return None
log.info(u"Converting book for user id {0}".format(book.user_id))
try:
subprocess.check_output(['ebook-convert', tmp_path, mobi_tmp_path],
timeout=CONVERSION_TIMEOUT)
except subprocess.CalledProcessError as e:
return e.output
except subprocess.TimeoutExpired as e:
return "Timed out converting book"
except Exception as e:
return e.message | 707674ea97365bbf95dba4754052fcfc8409bae0 | 553 |
from typing import List
def get_followup_question_list(intent: str) -> List[str]:
"""
Get all imported followup questions for this intent as a list
* `intent`: name-parameter of the yml-section with which the followup questions were imported
**Returns:** None if no followup questions are known for this intent, otherwise list of followup questions for this intent
"""
return None if not qa.get(intent) else qa.get(intent).followup_questions | 00581a497d4e09670edaa0d44870a2a0d7589ada | 554 |
from typing import Tuple
def event_train_test_split(
evs: np.ndarray, n_evs: int, train_split: float, random_seed: int=1
) -> Tuple[np.ndarray, np.ndarray]:
"""[summary]
Args:
n_evs (int): [description]
train_split (float): [description]
random_seed (int, optional): [description]. Defaults to 1.
Returns:
Tuple[np.ndarray, np.ndarray]: [description]
"""
# some basic checks
assert 0 < train_split < 1, "Variable train_split (ts) must be 0<ts<1."
assert n_evs > 1, "Need more than 1 event to split."
# set the random state locally
r = np.random.RandomState(random_seed)
# compute the number of test and train samples
train_samples = int(np.round(train_split * n_evs, 0))
test_samples = int(n_evs - train_samples)
# split the events
train_events = r.choice(evs, train_samples, replace=False)
test_events = evs[~np.isin(evs, train_events)]
# make sure they add up to the total number!
assert len(train_events) + len(test_events) == n_evs
return train_events, test_events | 69deb28d199fc8636ed45a90630a69753f4c1066 | 555 |
def get_cd(wcs, n=1):
"""
Get the value of the change in world coordinate per pixel across a linear axis.
Defaults to wcs.wcs.cd if present. Does not support rotated headers (e.g.,
with nonzero CDm_n where m!=n)
"""
if hasattr(wcs.wcs,'cd'):
if wcs.wcs.cd[n-1,n-1] != 0:
return wcs.wcs.cd[n-1,n-1]
else:
return wcs.wcs.get_cdelt()[n-1] | 9b31c81a1a5e87efeb201ffef7f8f65f846fe0b7 | 556 |
def mock_clear():
"""Clear MOCK_DATA_HEAP"""
MOCK_DATA_HEAP.clear()
return "" | 96212726db3f1e29ac4da54e62df70cb89ba1f2e | 557 |
from datetime import datetime
def cls_merge_type(classification):
""" classification type이 2가지일 때 합쳐주는 함수
Parameters
----------
classification: cls
classification 리스트
Returns
-------
list of cls
변환된 classification 리스트
"""
cls_type = {'instant' if cls.get('instant_datetime') else 'not_instant' for cls in classification }
if len(cls_type) == 2:
for cls in classification:
instant_datetime = cls.get('instant_datetime')
if instant_datetime:
year = instant_datetime.year
start_datetime = datetime.datetime(year, 1, 1) # 해당년도 1월 1일로 설정
end_datetime = instant_datetime
cls['instant_datetime'] = None
cls['start_datetime'] = start_datetime
cls['end_datetime'] = end_datetime
return classification | e6a59f45adecdc21acb81f6890ac79cfaa93b4d6 | 558 |
def duplicate_detector(gate_orders: list[tuple[str]]) -> int:
"""Detects any schematics that have an identical combination of gates."""
difference = len(gate_orders) - len(list(set(gate_orders))) # List - list with no duplicates
return difference | e439a106abc0ff21bfe9773b3185d35b5bf05aa0 | 559 |
def permutations(x):
"""Return all permutations of x"""
def fn(i):
if i == len(x): ans.append(x.copy())
for k in range(i, len(x)):
x[i], x[k] = x[k], x[i]
fn(i+1)
x[i], x[k] = x[k], x[i]
ans = []
fn(0)
return ans | 691c701e1ac17da5dabb0fc3fe607ff68ac8fcdc | 560 |
import os
def _add_cobertura_package(packages, package_name, package_data): # type: (SubElement, str, t.Dict[str, t.Dict[str, int]]) -> t.Tuple[int, int]
"""Add a package element to the given packages element."""
elem_package = SubElement(packages, 'package')
elem_classes = SubElement(elem_package, 'classes')
total_lines_hit = 0
total_line_count = 0
for path, results in package_data.items():
lines_hit = len([True for hits in results.values() if hits])
line_count = len(results)
total_lines_hit += lines_hit
total_line_count += line_count
elem_class = SubElement(elem_classes, 'class')
class_name = os.path.splitext(os.path.basename(path))[0]
if class_name.startswith("Ansible.ModuleUtils"):
class_name = class_name[20:]
content_root = data_context().content.root
filename = path
if filename.startswith(content_root):
filename = filename[len(content_root) + 1:]
elem_class.attrib.update({
'branch-rate': '0',
'complexity': '0',
'filename': filename,
'line-rate': str(round(lines_hit / line_count, 4)) if line_count else "0",
'name': class_name,
})
SubElement(elem_class, 'methods')
elem_lines = SubElement(elem_class, 'lines')
for number, hits in results.items():
elem_line = SubElement(elem_lines, 'line')
elem_line.attrib.update(
hits=str(hits),
number=str(number),
)
elem_package.attrib.update({
'branch-rate': '0',
'complexity': '0',
'line-rate': str(round(total_lines_hit / total_line_count, 4)) if total_line_count else "0",
'name': package_name,
})
return total_lines_hit, total_line_count | 80db50ebf302ed5a590069ba08aa7d9de1b59cda | 561 |
def encode(model, text, out_file=None, topic_priors=None, prior_weight=1.0):
"""
Perform text-to-image encoding.
Parameters
----------
model : :obj:`gclda.model.Model`
Model object needed for decoding.
text : :obj:`str` or :obj:`list`
Text to encode into an image.
out_file : :obj:`str`, optional
If not None, writes the encoded image to a file.
topic_priors : :obj:`numpy.ndarray` of :obj:`float`, optional
A 1d array of size (n_topics) with values for topic weighting.
If None, no weighting is done. Default is None.
prior_weight : :obj:`float`, optional
The weight by which the prior will affect the encoding.
Default is 1.
Returns
-------
img : :obj:`nibabel.Nifti1Image`
The encoded image.
topic_weights : :obj:`numpy.ndarray` of :obj:`float`
The weights of the topics used in encoding.
Notes
-----
====================== ==============================================================
Notation Meaning
====================== ==============================================================
:math:`v` Voxel
:math:`t` Topic
:math:`w` Word type
:math:`h` Input text
:math:`p(v|t)` Probability of topic given voxel (``p_topic_g_voxel``)
:math:`\\tau_{t}` Topic weight vector (``topic_weights``)
:math:`p(w|t)` Probability of word type given topic (``p_word_g_topic``)
:math:`\omega` 1d array from input image (``input_values``)
====================== ==============================================================
1. Compute :math:`p(v|t)`
(``p_voxel_g_topic``).
- From :obj:`gclda.model.Model.get_spatial_probs()`
2. Compute :math:`p(t|w)`
(``p_topic_g_word``).
3. Vectorize input text according to model vocabulary.
4. Reduce :math:`p(t|w)` to only include word types in input text.
5. Compute :math:`p(t|h)` (``p_topic_g_text``) by multiplying :math:`p(t|w)`
by word counts for input text.
6. Sum topic weights (:math:`\\tau_{t}`) across
words.
- :math:`\\tau_{t} = \sum_{i}{p(t|h_{i})}`
7. Compute voxel
weights.
- :math:`p(v|h) \propto p(v|t) \cdot \\tau_{t}`
8. The resulting array (``voxel_weights``) reflects arbitrarily scaled
voxel weights for the input text.
9. Unmask and reshape ``voxel_weights`` into brain image.
"""
if isinstance(text, list):
text = " ".join(text)
# Assume that words in word_labels are underscore-separated.
# Convert to space-separation for vectorization of input string.
vocabulary = [term.replace("_", " ") for term in model.dataset.word_labels]
max_len = max([len(term.split(" ")) for term in vocabulary])
vectorizer = CountVectorizer(
vocabulary=model.dataset.word_labels, ngram_range=(1, max_len)
)
word_counts = np.squeeze(vectorizer.fit_transform([text]).toarray())
keep_idx = np.where(word_counts > 0)[0]
text_counts = word_counts[keep_idx]
n_topics_per_word_token = np.sum(model.n_word_tokens_word_by_topic, axis=1)
p_topic_g_word = (
model.n_word_tokens_word_by_topic / n_topics_per_word_token[:, None]
)
p_topic_g_word = np.nan_to_num(p_topic_g_word, 0)
p_topic_g_text = p_topic_g_word[keep_idx] # p(T|W) for words in text only
prod = p_topic_g_text * text_counts[:, None] # Multiply p(T|W) by words in text
topic_weights = np.sum(prod, axis=0) # Sum across words
if topic_priors is not None:
weighted_priors = weight_priors(topic_priors, prior_weight)
topic_weights *= weighted_priors
_, p_voxel_g_topic = model.get_spatial_probs()
voxel_weights = np.dot(p_voxel_g_topic, topic_weights)
img = unmask(voxel_weights, model.dataset.mask_img)
if out_file is not None:
img.to_filename(out_file)
return img, topic_weights | 941745be7b84c3af9d7f7e62cb2d93fabc3b22c1 | 562 |
def clean_string(s: str) -> str:
"""Cleans and returns an input string
>>> clean_string(" xYz ")
'XYZ'
"""
return str(s).strip().upper() | c97281505492ded5b9167076312959c5eee41a6c | 563 |
import collections
def get_unique_region_cov_df(unique_region_dict, fuzzer_names):
"""Returns a DataFrame where the two columns are fuzzers and the number
of unique regions covered."""
fuzzers = collections.defaultdict(int)
for region in unique_region_dict:
for fuzzer in unique_region_dict[region]:
fuzzers[fuzzer] += 1
dict_to_transform = {'fuzzer': [], 'unique_regions_covered': []}
for fuzzer in fuzzer_names:
covered_num = fuzzers[fuzzer]
dict_to_transform['fuzzer'].append(fuzzer)
dict_to_transform['unique_regions_covered'].append(covered_num)
return pd.DataFrame(dict_to_transform) | 923227fb804549252bf51cd94e65180c3f8564e8 | 564 |
from typing import Optional
from typing import Union
from typing import Tuple
import os
from typing import Counter
import torch
def load_continuous_dataset(
root: str,
name: str,
raw: bool = False,
random_state: Optional[RandomState] = None
) -> Union[
Tuple[np.ndarray, np.ndarray, np.ndarray],
Tuple[UnsupervisedDataset, UnsupervisedDataset, UnsupervisedDataset]
]:
"""
Load a continuous dataset.
All the datasets are preprocessed as in the original MAF paper repository.
See https://github.com/gpapamak/maf/tree/master/datasets for details.
:param root: The datasets root directory.
:param name: The name of the dataset.
:param raw: Whether to return unpreprocessed Numpy arrays instead of Torch Datasets.
Torch Datasets will have standardization as data transformation.
:param random_state: The random state to use for shuffling and transforming the data.
It can be either None, a seed integer or a Numpy RandomState.
:return: The train, validation and test dataset splits.
:raise ValueError: If the continuous dataset name is not known.
"""
# Check the random state
random_state = check_random_state(random_state)
directory = os.path.join(root, name)
if name == 'power':
# Load the dataset
data = np.load(os.path.join(directory, 'data.npy'))
random_state.shuffle(data)
n_samples = len(data)
data = np.delete(data, [1, 3], axis=1)
# Add noise as in original datasets preprocessing (MAF paper)
voltage_noise = 0.01 * random_state.rand(n_samples, 1)
gap_noise = 0.001 * random_state.rand(n_samples, 1)
sm_noise = random_state.rand(n_samples, 3)
time_noise = np.zeros(shape=(n_samples, 1))
data = data + np.hstack([gap_noise, voltage_noise, sm_noise, time_noise])
# Split the dataset
n_test = int(0.1 * len(data))
data_test = data[-n_test:]
data = data[:-n_test]
n_valid = int(0.1 * len(data))
data_valid = data[-n_valid:]
data_train = data[:-n_valid]
elif name == 'gas':
# Load the dataset
data = pd.read_pickle(os.path.join(directory, 'ethylene_CO.pickle'))
data.drop(['Meth', 'Eth', 'Time'], axis=1, inplace=True)
# Remove uninformative features
uninformative_idx = (data.corr() > 0.98).to_numpy().sum(axis=1)
while np.any(uninformative_idx > 1):
col_to_remove = np.where(uninformative_idx > 1)[0][0]
data.drop(data.columns[col_to_remove], axis=1, inplace=True)
uninformative_idx = (data.corr() > 0.98).to_numpy().sum(axis=1)
data = data.to_numpy()
random_state.shuffle(data)
# Split the dataset
n_test = int(0.1 * len(data))
data_test = data[-n_test:]
data = data[:-n_test]
n_valid = int(0.1 * len(data))
data_valid = data[-n_valid:]
data_train = data[:-n_valid]
elif name == 'hepmass':
# Load the dataset
data_train = pd.read_csv(os.path.join(directory, "1000_train.csv"), index_col=False)
data_test = pd.read_csv(os.path.join(directory, "1000_test.csv"), index_col=False)
# Gets rid of any background noise examples i.e. class label 0.
data_train = data_train[data_train[data_train.columns[0]] == 1]
data_train = data_train.drop(data_train.columns[0], axis=1)
data_test = data_test[data_test[data_test.columns[0]] == 1]
data_test = data_test.drop(data_test.columns[0], axis=1)
data_test = data_test.drop(data_test.columns[-1], axis=1)
data_train, data_test = data_train.to_numpy(), data_test.to_numpy()
# Remove any features that have too many re-occurring real values.
features_to_remove = []
for i, feature in enumerate(data_train.T):
c = Counter(feature)
max_count = next(v for k, v in sorted(c.items()))
if max_count > 5:
features_to_remove.append(i)
features_to_keep = [i for i in range(data_train.shape[1]) if i not in features_to_remove]
data_train = data_train[:, features_to_keep]
data_test = data_test[:, features_to_keep]
random_state.shuffle(data_train)
# Split the train dataset
n_valid = int(len(data_train) * 0.1)
data_valid = data_train[-n_valid:]
data_train = data_train[:-n_valid]
elif name == 'miniboone':
# Load the dataset
data = np.load(os.path.join(directory, 'data.npy'))
random_state.shuffle(data)
# Split the dataset
n_test = int(0.1 * len(data))
data_test = data[-n_test:]
data = data[:-n_test]
n_valid = int(0.1 * len(data))
data_valid = data[-n_valid:]
data_train = data[:-n_valid]
elif name == 'BSDS300':
# Load the dataset
with h5py.File(os.path.join(directory, 'BSDS300.hdf5'), 'r') as file:
data_train = file['train'][:]
data_valid = file['validation'][:]
data_test = file['test'][:]
else:
raise ValueError("Unknown continuous dataset called {}".format(name))
# Return raw Numpy arrays, if specified
if raw:
return data_train, data_valid, data_test
# Instantiate the standardize transform
mean = torch.tensor(np.mean(data_train, axis=0), dtype=torch.float32)
std = torch.tensor(np.std(data_train, axis=0), dtype=torch.float32)
transform = Normalize(mean, std)
# Wrap and return the datasets
data_train = UnsupervisedDataset(data_train, transform)
data_valid = UnsupervisedDataset(data_valid, transform)
data_test = UnsupervisedDataset(data_test, transform)
return data_train, data_valid, data_test | d33c3e36fbf4c225a0be07e52f4e6b8845dc2187 | 565 |
def display_generation_hit_results(hit_info, hit_results):
"""Displays the results of a generation HIT
Parameters
----------
hit_info : GenerationHITInfo
HITInfo object storing information regarding the HIT
hit_results : GenerationResults
HIT results object storing the results of the relevant HIT
Returns
-------
bool
returns True
"""
dec_string = format_decomposition_string(hit_results.decomposition)
print(
'HIT ID: {hit_id}'
'\nAssignment ID: {assignment_id}'
'\nHIT Type: Generation'
'\n'
'\nResults'
'\n======='
'\nAnnotation ID: {annotation_id}'
'\nQuestion ID: {question_id}'
'\nQuestion Text: {question_text}'
'\nDecomposition: {decomposition}'.format(
hit_id=hit_results.hit_id,
assignment_id=hit_results.assignment_id,
annotation_id=hit_info.annotation_id,
question_id=hit_info.question_id,
question_text=hit_info.question_text,
decomposition=dec_string))
return True | 649cf05d0ee3a87032993fd05fc120162c7f8d2b | 566 |
import sys
def check_help_all_output(pkg, subcommand=None):
"""test that `python -m PKG --help-all` works"""
cmd = [sys.executable, '-m', pkg]
if subcommand:
cmd.extend(subcommand)
cmd.append('--help-all')
out, err, rc = get_output_error_code(cmd)
nt.assert_equal(rc, 0, err)
nt.assert_not_in("Traceback", err)
nt.assert_in("Options", out)
nt.assert_in("Class parameters", out)
return out, err | adeb80303c1a087a1294803cff298978f305f738 | 567 |
def XOR(v1, v2):
"""
XOR operation element by element from 2 lists
:param v1: [1, 0, 1, 0, 0, 1]
:param v2: [1, 1, 0, 0, 1, 1]
:return: [0, 1, 1, 0, 1, 0]
"""
return [a ^ b for a, b in zip(v1, v2)] | e3b94b35ccf4e1dd99cc51f32c70f96c5fe99795 | 568 |
def linearly_spaced_combinations(bounds, num_samples):
"""
Return 2-D array with all linearly spaced combinations with the bounds.
Parameters
----------
bounds : sequence of tuples
The bounds for the variables, [(x1_min, x1_max), (x2_min, x2_max), ...]
num_samples : integer or array_likem
Number of samples to use for every dimension. Can be a constant if
the same number should be used for all, or an array to fine-tune
precision. Total number of data points is num_samples ** len(bounds).
Returns
-------
combinations : 2-d array
A 2-d arrray. If d = len(bounds) and l = prod(num_samples) then it
is of size l x d, that is, every row contains one combination of
inputs.
"""
bounds = np.atleast_2d(bounds)
num_vars = len(bounds)
num_samples = np.broadcast_to(num_samples, num_vars)
# Create linearly spaced test inputs
inputs = [np.linspace(b[0], b[1], n) for b, n in zip(bounds,
num_samples)]
# Convert to 2-D array
return combinations(inputs) | 4d493290ae5c8af91f2f0dce5041c12c22bb6aaa | 569 |
from typing import Optional
from typing import Tuple
def flatten_expressions_tree(
expression: Optional[Expression]) -> Tuple[Expression, ...]:
"""
Flatten expressions tree into a list.
"""
if not expression:
return tuple()
expressions = [expression]
for arg in expression.arguments:
if is_expression(arg):
expressions.extend(flatten_expressions_tree(arg))
return tuple(expressions) | dea87f37663e995a74b7709b3dbb62d5cc370f50 | 570 |
def policy_head(x, mode, params):
"""
The policy head attached after the residual blocks as described by DeepMind:
1. A convolution of 8 filters of kernel size 3 × 3 with stride 1
2. Batch normalisation
3. A rectifier non-linearity
4. A fully connected linear layer that outputs a vector of size 19²+1 = 362
corresponding to logit probabilities for all intersections and the pass
move
"""
num_channels = params['num_channels']
num_samples = params['num_samples']
def _forward(x, is_recomputing=False):
""" Returns the result of the forward inference pass on `x` """
y = batch_norm_conv2d(x, 'conv_1', (3, 3, num_channels, num_samples), mode, params, is_recomputing=is_recomputing)
y = tf.nn.relu(y)
y = tf.reshape(y, (-1, 361 * num_samples))
y = dense(y, 'linear_1', (361 * num_samples, 362), policy_offset_op, mode, params, is_recomputing=is_recomputing)
return tf.cast(y, tf.float32)
return recompute_grad(_forward)(x) | 8e8418ffd46857cf03921ea516729d15a0ef5ca9 | 571 |
from typing import Set
from typing import Sequence
def _get_doors_gt_nsrts() -> Set[NSRT]:
"""Create ground truth NSRTs for DoorsEnv."""
robot_type, door_type, room_type = _get_types_by_names(
CFG.env, ["robot", "door", "room"])
InRoom, InDoorway, InMainRoom, TouchingDoor, DoorIsOpen, DoorInRoom, \
DoorsShareRoom = _get_predicates_by_names(CFG.env, ["InRoom",
"InDoorway", "InMainRoom", "TouchingDoor", "DoorIsOpen",
"DoorInRoom", "DoorsShareRoom"])
MoveToDoor, OpenDoor, MoveThroughDoor = _get_options_by_names(
CFG.env, ["MoveToDoor", "OpenDoor", "MoveThroughDoor"])
nsrts = set()
# MoveToDoorFromMainRoom
# This operator should only be used on the first step of a plan.
robot = Variable("?robot", robot_type)
room = Variable("?room", room_type)
door = Variable("?door", door_type)
parameters = [robot, room, door]
option_vars = [robot, door]
option = MoveToDoor
preconditions = {
LiftedAtom(InRoom, [robot, room]),
LiftedAtom(InMainRoom, [robot, room]),
LiftedAtom(DoorInRoom, [door, room]),
}
add_effects = {
LiftedAtom(TouchingDoor, [robot, door]),
LiftedAtom(InDoorway, [robot, door])
}
delete_effects = {LiftedAtom(InMainRoom, [robot, room])}
side_predicates: Set[Predicate] = set()
move_to_door_nsrt = NSRT("MoveToDoorFromMainRoom", parameters,
preconditions, add_effects, delete_effects,
side_predicates, option, option_vars,
null_sampler)
nsrts.add(move_to_door_nsrt)
# MoveToDoorFromDoorWay
robot = Variable("?robot", robot_type)
start_door = Variable("?start_door", door_type)
end_door = Variable("?end_door", door_type)
parameters = [robot, start_door, end_door]
option_vars = [robot, end_door]
option = MoveToDoor
preconditions = {
LiftedAtom(InDoorway, [robot, start_door]),
LiftedAtom(DoorsShareRoom, [start_door, end_door]),
}
add_effects = {
LiftedAtom(TouchingDoor, [robot, end_door]),
LiftedAtom(InDoorway, [robot, end_door])
}
delete_effects = {LiftedAtom(InDoorway, [robot, start_door])}
side_predicates = set()
move_to_door_nsrt = NSRT("MoveToDoorFromDoorWay", parameters,
preconditions, add_effects, delete_effects,
side_predicates, option, option_vars,
null_sampler)
nsrts.add(move_to_door_nsrt)
# OpenDoor
robot = Variable("?robot", robot_type)
door = Variable("?door", door_type)
parameters = [door, robot]
option_vars = [door, robot]
option = OpenDoor
preconditions = {
LiftedAtom(TouchingDoor, [robot, door]),
LiftedAtom(InDoorway, [robot, door]),
}
add_effects = {LiftedAtom(DoorIsOpen, [door])}
delete_effects = {
LiftedAtom(TouchingDoor, [robot, door]),
}
side_predicates = set()
# Allow protected access because this is an oracle. Used in the sampler.
env = get_or_create_env(CFG.env)
assert isinstance(env, DoorsEnv)
get_open_door_target_value = env._get_open_door_target_value # pylint: disable=protected-access
# Even though this option does not need to be parameterized, we make it so,
# because we want to match the parameter space of the option that will
# get learned during option learning. This is useful for when we want
# to use sampler_learner = "oracle" too.
def open_door_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
del rng, goal # unused
door, _ = objs
assert door.is_instance(door_type)
# Calculate the desired change in the doors "rotation" feature.
# Allow protected access because this is an oracle.
mass = state.get(door, "mass")
friction = state.get(door, "friction")
target_rot = state.get(door, "target_rot")
target_val = get_open_door_target_value(mass=mass,
friction=friction,
target_rot=target_rot)
current_val = state.get(door, "rot")
delta_rot = target_val - current_val
# The door always changes from closed to open.
delta_open = 1.0
return np.array([delta_rot, delta_open], dtype=np.float32)
open_door_nsrt = NSRT("OpenDoor", parameters, preconditions, add_effects,
delete_effects, side_predicates, option, option_vars,
open_door_sampler)
nsrts.add(open_door_nsrt)
# MoveThroughDoor
robot = Variable("?robot", robot_type)
start_room = Variable("?start_room", room_type)
end_room = Variable("?end_room", room_type)
door = Variable("?door", door_type)
parameters = [robot, start_room, door, end_room]
option_vars = [robot, door]
option = MoveThroughDoor
preconditions = {
LiftedAtom(InRoom, [robot, start_room]),
LiftedAtom(InDoorway, [robot, door]),
LiftedAtom(DoorIsOpen, [door]),
LiftedAtom(DoorInRoom, [door, start_room]),
LiftedAtom(DoorInRoom, [door, end_room]),
}
add_effects = {
LiftedAtom(InRoom, [robot, end_room]),
}
delete_effects = {
LiftedAtom(InRoom, [robot, start_room]),
}
side_predicates = set()
move_through_door_nsrt = NSRT("MoveThroughDoor", parameters, preconditions,
add_effects, delete_effects, side_predicates,
option, option_vars, null_sampler)
nsrts.add(move_through_door_nsrt)
return nsrts | c7bf01b092b6e7a76053a4d2905a6e4e154f72fd | 572 |
def _get_word_ngrams(n, sentences):
"""Calculates word n-grams for multiple sentences.
"""
assert len(sentences) > 0
assert n > 0
words = sum(sentences, [])
return _get_ngrams(n, words) | 73640beb269895d7c54d21825135908eba3f3bd4 | 573 |
from typing import List
def override_list(base_list: List, dynamic_key: str, val):
"""
Customize the base list by updating with the
dynamic_key and val.
Parameters
----------
base: dict
Dictionary or List to be customized with dynamic args
dynamic_key: str
Key to identify the location the value should be updated.
Nested with DOT like "custom.key_0.key_1.key_2.0.0.key_4"
val: str or float or int or dict or list
Value to be set
Returns
-------
dict
Updated base_list based on the key-value pairs in dynamic_args
Notes
-----
This will be called recursively with override_dict.
If dynamic_key is not a number, then we try to match on `name` field
in the list of dictionaries.
"""
def find_root_key_index(base_list, root_key):
if root_key.isdigit():
# If array index
root_key = int(root_key)
else:
# If string, then match on `name`
for root_key_i in range(len(base_list)):
if root_key == base_list[root_key_i][NAME]:
root_key = root_key_i
break
if not isinstance(root_key, int):
raise KeyError("{} not found in List".format(root_key))
return root_key
if DOT in dynamic_key:
# Compute root and subtree keys
root_key = find_root_key_index(base_list, dynamic_key.split(DOT)[0])
subtree_key = DOT.join(dynamic_key.split(DOT)[1:])
# Extract subtree
subtree = base_list[root_key]
if isinstance(subtree, dict):
root_val = override_dict(base_dict=subtree,
dynamic_key=subtree_key,
val=val)
elif isinstance(subtree, list):
root_val = override_list(base_list=subtree,
dynamic_key=subtree_key,
val=val)
else:
raise ValueError(
"Unsupported subtree type. Must be one of list or dict")
else:
# End of nested dynamic key
root_key = find_root_key_index(base_list, dynamic_key)
root_val = val
base_list[root_key] = root_val
return base_list | eb7e4a0462d2db82cb1900784b5d99e2865dcc00 | 574 |
def c_components(DAG):
"""Return a list of the maximal c-component node sets in DAG."""
G = nx.Graph();
G.add_nodes_from(observable_nodes(DAG))
G.add_edges_from([(u,v) for u,v in observable_pairs(DAG) if
has_confounded_path(DAG, u, v)])
return list(nx.connected_components(G)) | 85c4ae4ee7b9572e31727f672864e4b079e3bde7 | 575 |
def wss_over_number_of_clusters(data, algorithm='kmeans',
max_iter=100, num_repeats = 5, max_num_clusters = 12,
plot_file = None):
"""
Calculates the within-sum-of-squares (WSS) for different numbers of clusters,
averaged over several iterations.
Parameters
----------
data : float array
Trajectory data [frames,frame_data]
algorithm : string
The algorithm to use for the clustering.
Options: kmeans, rspace.
Default: kmeans
max_iter : int, optional
Maximum number of iterations.
Default: 100.
num_repeats : int, optional
Number of times to run the clustering for each number of clusters.
Default: 5.
max_num_clusters : int, optional
Maximum number of clusters for k-means clustering.
Default: 12.
plot_file : str, optional
Name of the file to save the plot.
Returns
-------
all_wss : float array
WSS values for each number of clusters (starting at 2).
std_wss : float array
Standard deviations of the WSS.
"""
# Initialize lists
all_wss = []
std_wss = []
# Loop over the number of clusters
for nc in range(1,max_num_clusters):
rep_wss = []
# Run each clustering several times.
for repeat in range(num_repeats):
# Get clusters and WSS for this repetition.
cc = obtain_clusters(data, algorithm=algorithm, max_iter=max_iter,
num_clusters=nc, plot=False)
cidx, wss, centroids = cc
rep_wss.append(wss)
# Calculate mean and standard deviation for this number of clusters.
all_wss.append(np.mean(rep_wss))
std_wss.append(np.std(rep_wss))
# Plot the WSS over the number of clusters
fig, ax = plt.subplots(1,1, figsize=[4,3], dpi=300)
ax.errorbar(np.arange(len(all_wss))+2,np.array(all_wss),yerr=np.array(std_wss)/np.sqrt(num_repeats))
ax.set_xlabel('number of clusters')
ax.set_ylabel('total WSS')
fig.tight_layout()
# Save the plot to file.
if plot_file: fig.savefig(plot_file)
return all_wss, std_wss | fa00e00a5b0ba53b4b578fae04c43c69500d2d97 | 576 |
from typing import List
from typing import Tuple
def separate_classes(x: np.ndarray, y: np.ndarray) -> List[Tuple[int, np.ndarray]]:
"""Separate samples by classes into a list.
Args:
x (np.ndarray): Samples.
y (np.ndarray): Target labels (classes).
Returns:
List[Tuple[int, np.ndarray]]: List in the format [(class, samples),...]
"""
classes = np.unique(y)
l = []
for clss in classes:
l.append((clss, x[y==clss]))
return l | ee0a93b44785f6f017769da6fd6f1cc65f9cf121 | 577 |
import time
from datetime import datetime
def monitor_threads(threads, arguments):
"""
Monitor the threads.
Parameters
----------
threads: dict
The threads to monitor.
arguments: namespace
The parsed command line.
# --GT-- not used, kept to avoid to break the function call.
Returns
-------
int
0
"""
try:
# exit and let systemd restart the process to avoid issues with
# potential memory leaks
time.sleep(60 * 60 * 2)
except Exception:
# the sleep was interrupted
pass
for th in threads.keys():
threads[th].stop()
# give up to 30 seconds for threads to exit cleanly
timeout = datetime.now() + timedelta(seconds=30)
while timeout > datetime.now():
thread_running = False
for th in threads.keys():
if threads[th].is_alive():
thread_running = True
if not thread_running:
break
return 0 | fa0e1f329cb70eb1fd81c74c9ec555921cab9041 | 578 |
def fetch_cfr_parts(notice_xml):
""" Sometimes we need to read the CFR part numbers from the notice
XML itself. This would need to happen when we've broken up a
multiple-effective-date notice that has multiple CFR parts that
may not be included in each date. """
parts = []
for cfr_elm in notice_xml.xpath('//CFR'):
parts.extend(notice_cfr_p.parseString(cfr_elm.text).cfr_parts)
return list(sorted(set(parts))) | d4fc1be9004f5a670c58dc3badaef80d53f73fc4 | 579 |
def get_dayofweek(date):
"""
Returns day of week in string format from date parameter (in datetime format).
"""
return date.strftime("%A") | 4a0f728733870998331ea6f796b167b9dd3276ab | 580 |
def add_model_output(modelIn, mode=None, num_add=None, activation=None):
""" This function modifies the last dense layer in the passed keras model. The modification includes adding units and optionally changing the activation function.
Parameters
----------
modelIn : keras model
Keras model to be modified.
mode : string
Mode to modify the layer. It could be:
'abstain' for adding an arbitrary number of units for the abstention optimization strategy.
'qtl' for quantile regression which needs the outputs to be tripled.
'het' for heteroscedastic regression which needs the outputs to be doubled. (current implicit default: 'het')
num_add : integer
Number of units to add. This only applies to the 'abstain' mode.
activation : string
String with keras specification of activation function (e.g. 'relu', 'sigomid', 'softmax', etc.)
Return
----------
modelOut : keras model
Keras model after last dense layer has been modified as specified. If there is no mode specified it returns the same model.
"""
if mode is None:
return modelIn
numlayers = len(modelIn.layers)
# Find last dense layer
i = -1
while 'dense' not in (modelIn.layers[i].name) and ((i+numlayers) > 0):
i -= 1
# Minimal verification about the validity of the layer found
assert ((i + numlayers) >= 0)
assert ('dense' in modelIn.layers[i].name)
# Compute new output size
if mode is 'abstain':
assert num_add is not None
new_output_size = modelIn.layers[i].output_shape[-1] + num_add
elif mode is 'qtl': # for quantile UQ
new_output_size = 3 * modelIn.layers[i].output_shape[-1]
else: # for heteroscedastic UQ
new_output_size = 2 * modelIn.layers[i].output_shape[-1]
# Recover current layer options
config = modelIn.layers[i].get_config()
# Update number of units
config['units'] = new_output_size
# Update activation function if requested
if activation is not None:
config['activation'] = activation
# Create new Dense layer
reconstructed_layer = Dense.from_config(config)
# Connect new Dense last layer to previous one-before-last layer
additional = reconstructed_layer(modelIn.layers[i-1].output)
# If the layer to replace is not the last layer, add the remainder layers
if i < -1:
for j in range(i+1, 0):
config_j = modelIn.layers[j].get_config()
aux_j = layers.deserialize({'class_name': modelIn.layers[j].__class__.__name__,
'config': config_j})
reconstructed_layer = aux_j.from_config(config_j)
additional = reconstructed_layer(additional)
modelOut = Model(modelIn.input, additional)
return modelOut | 2b869477bb67f7349569d15e0ada229cc1400e39 | 581 |
def parse(f):
"""Parse ASDL from the given file and return a Module node describing it."""
parser = ASDLParser()
return parser.parse(f) | ca6b97c2181444dd7325877d67d74bb894c1bfef | 582 |
def distance(xyz, lattice, PBC=[1,2,3]):
"""
Returns the Euclidean distance from the origin for a fractional
displacement vector. Takes into account the lattice metric and periodic
boundary conditions, including up to one non-periodic axis.
Args:
xyz: a fractional 3d displacement vector. Can be obtained by
subtracting one fractional vector from another
lattice: a 3x3 matrix describing a unit cell's lattice vectors
PBC: the axes, if any, which are periodic. 1, 2, and 3 correspond
to x, y, and z respectively.
Returns:
a scalar for the distance of the point from the origin
"""
xyz = filtered_coords(xyz, PBC=PBC)
matrix = create_matrix(PBC=PBC)
matrix += xyz
matrix = np.dot(matrix, lattice)
return np.min(cdist(matrix,[[0,0,0]])) | e6a6a925773b35996cc5f71982b72aeaef25cc4f | 583 |
def dobro(n=0, formato=False):
"""
Dobrar número
:param n: número a ser dobrado
:param formato: (opicional) mostrar o moeda
:return: resultado
"""
n = float(n)
n += n
return moeda(n) if formato else n | f01267cf432d48ab263d90756fd920c7d6d987a1 | 584 |
def f_q2d(n, m):
"""Lowercase f term for 2D-Q polynomials. oe-20-3-2483 Eq. (A.18b).
Parameters
----------
n : int
radial order
m : int
azimuthal order
Returns
-------
float
f
"""
if n == 0:
return np.sqrt(F_q2d(n=0, m=m))
else:
return np.sqrt(F_q2d(n, m) - g_q2d(n-1, m) ** 2) | 52919fab40e51dfa8ba89eb1d6f97ec733ce9de5 | 585 |
def binary_search(data, target, low, high):
"""Return position if target is found in indicated portion of a python list and -1 if target is not found.
"""
if low > high:
return -1
mid = (low + high) // 2
if target == data[mid]:
return mid
elif target < data[mid]:
# recur on the portion left of the middle
return binary_search(data, target, low, mid - 1)
else:
# recur on the portion right of the middle
return binary_search(data, target, mid + 1, high) | 8f85596e4ff8971f4002b2f108c9c276304924be | 586 |
def delete_position(id):
"""Delete a post.
Ensures that the post exists and that the logged in user is the
author of the post.
"""
db = get_db()
db.execute('DELETE FROM gatekeeping WHERE id = ?', (id,))
db.commit()
return jsonify(status='ok') | cf5d6580eea191a0d17d1a98e2f86a8610a5f58a | 587 |
import struct
def read_string(stream, length):
"""read data from the file and return as a text string
"""
text = struct.unpack('{}s'.format(length), stream.read(length))
try:
result = str(text[0], encoding='utf-8')
except UnicodeDecodeError:
result = str(text[0], encoding='latin-1')
return result.rstrip('\x00') | da8ddf04ea6c0232e59c4612105a243a5c1807d4 | 588 |
import math
def generate_gate_y_hamiltonian_vec() -> np.ndarray:
"""Return the vector representation for the Hamiltonian of a Y gate with respect to the orthonormal Hermitian matrix basis with the normalized identity matrix as the 0th element.
The result is a real vector with size 4.
Parameters
----------
Returns
----------
np.ndarray
The real vector representation of the Hamiltonian of the gate.
"""
dim = 2
coeff = 0.5 * math.pi * np.sqrt(2)
vec = np.zeros(dim * dim, dtype=np.float64)
vec[0] = -coeff
vec[2] = coeff
return vec | 0a290c8f4a2c0acd81439caa4e5c4a7032a91d83 | 589 |
def _semi_implicit_midpoint(ode_fun, jac_fun, y_olds, t_old, f_old, dt, args,
solver_parameters, J00, I):
"""
Calculate solution at t_old+dt using the semi-implicit midpoint
formula. Based on equations IV.9.16a-b of Ref II.
"""
y_older, y_old = y_olds
je_tot=0
if(y_older is None): # Use Euler to get starting value
return _semi_implicit_euler(ode_fun, jac_fun, y_olds, t_old,
f_old, dt, args, solver_parameters,
J00, I)
if(f_old is None):
f_yj = ode_fun(*(y_old,t_old)+args)
fe_tot = 1
else: # We already computed it and can just reuse it
f_yj = f_old
fe_tot=0
b = np.dot(-(I+dt*J00),(y_old-y_older)) + 2*dt*f_yj
A = I-dt*J00
if(solver_parameters['initialGuess']): # Use Euler for initial guess
x0, f_yj, fe_tot_,je_tot=_explicit_euler(ode_fun, jac_fun, y_olds,
t_old, f_yj, dt,
args, solver_parameters)
fe_tot += fe_tot_
else:
x0=None
dy = linear_solve(A, b, iterative=solver_parameters['iterative'],
tol=solver_parameters['min_tol'], x0=x0)
y_new = y_old + dy
return (y_new, f_yj, fe_tot, je_tot) | 7807427e64d4348eb4deb4624d1db5e4df0226ca | 590 |
from tensorflow.python.framework.graph_util import (
convert_variables_to_constants,
remove_training_nodes,
)
def freeze_session(session,
keep_var_names=None,
output_names=None,
clear_devices=True):
"""
Freezes the state of a session into a pruned computation graph.
Creates a new computation graph where variable nodes are replaced by
constants taking their current value in the session. The new graph will be
pruned so subgraphs that are not necessary to compute the requested
outputs are removed.
@param session The TensorFlow session to be frozen.
@param keep_var_names A list of variable names that should not be frozen,
or None to freeze all the variables in the graph.
@param output_names Names of the relevant graph outputs.
@param clear_devices Remove the device directives from the graph for better portability.
@return The frozen graph definition.
"""
graph = session.graph
with graph.as_default():
freeze_var_names = list(
set(v.op.name for v in tf.global_variables()).difference(
keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
# Graph -> GraphDef ProtoBuf
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ""
frozen_graph = convert_variables_to_constants(
session, input_graph_def, output_names, freeze_var_names)
frozen_graph = remove_training_nodes(frozen_graph)
return frozen_graph | 3617fc93a1727fcabf128622fcaf0e8dd5008b24 | 591 |
def get_system_cpu_times():
"""Return system CPU times as a namedtuple."""
user, nice, system, idle = _psutil_osx.get_system_cpu_times()
return _cputimes_ntuple(user, nice, system, idle) | c2edb6c0b87449c31aba1fe023736e30cdf41154 | 592 |
def Mce1(m, q, ξ, *, p=0):
"""
v = Mce1(m, q, ξ, *, p=0)
Compute the value of the even Radial Mathieu function of the first kind
Mce⁽¹⁾ₘ(q, ξ).
Parameters
----------
m : array_like
interger order of the Mathieu function
q : array_like
positive parameter in the Mathieu differential equation
ξ : array like
``radial'' coordinate in the Elliptic coordinates
p : 0 or 1 or 2 (default 0)
0 for the function,
1 for the first derivative
2 for the second derivative
Returns
-------
v : array like
value of Mce⁽¹⁾ₘ(q, ξ) or Mce⁽¹⁾ₘ′(q, ξ) or Mce⁽¹⁾ₘ′´(q, ξ)
"""
if p == 0:
return mathieu_modcem1(m, q, ξ)[0]
if p == 1:
return mathieu_modcem1(m, q, ξ)[1]
if p == 2:
return (mathieu_a(m, q) - (2 * q) * cosh(2 * ξ)) * mathieu_modcem1(m, q, ξ)[0]
raise ValueError("The value p must be 0, 1, or 2.") | 7a5c7fc6b9a9380c5d763f1773a26516345e5a3b | 593 |
def profileown():
"""Display user's profile"""
return render_template("profile.html", user=session, person=session, books=None) | 83ac3e78fb2f3b43596874c8208099d041536ebb | 594 |
def GetChildConfigListMetadata(child_configs, config_status_map):
"""Creates a list for the child configs metadata.
This creates a list of child config dictionaries from the given child
configs, optionally adding the final status if the success map is
specified.
Args:
child_configs: The list of child configs for this build.
config_status_map: The map of config name to final build status.
Returns:
List of child config dictionaries, with optional final status
"""
child_config_list = []
for c in child_configs:
pass_fail_status = None
if config_status_map:
if config_status_map[c['name']]:
pass_fail_status = constants.FINAL_STATUS_PASSED
else:
pass_fail_status = constants.FINAL_STATUS_FAILED
child_config_list.append({'name': c['name'],
'boards': c['boards'],
'status': pass_fail_status})
return child_config_list | 621fa06eb0055a2dec1941bb4fd84ecb8fd6847c | 595 |
def get_samples(df, selected_rows, no_of_samples, records_in_db):
""" get samples without shuffling columns """
df_fixed = None
df_random = None
generic_data_dict = []
#drop rows with 'ignore' set to 'yes'
if 'ignore' in df.columns:
df = df[df["ignore"] != "yes"]
df = df.drop(['ignore'], axis = 1)
print_info("================================================================================")
print_info("Total no. of samples found in variable xls file : {}".format(len(df.index)))
print_info("Total no. of samples already tested : {}".format(len(records_in_db)))
print_info("Total no. of samples remaining to test : {}".format(len(df.index) - len(records_in_db)))
print_info("Total no. of random samples selected in this test : {}".format(no_of_samples))
if selected_rows:
print_info("Selected rows to test : {}".format(selected_rows))
print_info("================================================================================")
#select user selected rows
if selected_rows:
selected_rows = [row-1 for row in selected_rows]
df_fixed = df.iloc[selected_rows]
df = df.drop(selected_rows, axis=0)
#select records in df which are not in db_df
db_df = pd.DataFrame(records_in_db)
if db_df.columns.tolist():
df = df.merge(db_df, how = 'outer' ,indicator=True).\
loc[lambda x : x['_merge']=='left_only']
df = df.drop(['_merge'], axis = 1)
if no_of_samples and len(df.index) == 0:
print_error("All the samples are tested. use --reset_execution to restart test again")
exit(1)
if no_of_samples and no_of_samples <= len(df.index):
#select random samples
df_random = df.sample(n=no_of_samples)
elif no_of_samples and no_of_samples > len(df.index):
print_error("Given no. of samples {} is greater than remaining samples to" \
" test {}. please reduce no. of samples".format(no_of_samples, len(df.index)))
exit(1)
df = pd.concat([df_fixed, df_random])
generic_data_dict = df.to_dict('records')
print_info("selected samples : {}".format(generic_data_dict))
print_info("================================================================================")
return generic_data_dict | 16b13d59e50ebcb16e65feea41bc801b1e8b87b8 | 596 |
def update_member_names(oldasndict, pydr_input):
"""
Update names in a member dictionary.
Given an association dictionary with rootnames and a list of full
file names, it will update the names in the member dictionary to
contain '_*' extension. For example a rootname of 'u9600201m' will
be replaced by 'u9600201m_c0h' making sure that a MEf file is passed
as an input and not the corresponding GEIS file.
"""
omembers = oldasndict['members'].copy()
nmembers = {}
translated_names = [f.split('.fits')[0] for f in pydr_input]
newkeys = [fileutil.buildNewRootname(file) for file in pydr_input]
keys_map = list(zip(newkeys, pydr_input))
for okey, oval in list(omembers.items()):
if okey in newkeys:
nkey = pydr_input[newkeys.index(okey)]
nmembers[nkey.split('.fits')[0]] = oval
oldasndict.pop('members')
# replace should be always True to cover the case when flt files were removed
# and the case when names were translated
oldasndict.update(members=nmembers, replace=True)
oldasndict['order'] = translated_names
return oldasndict | 364a4bdd742fe03545e9bb4fb72ef996beea1550 | 597 |
def part1(entries: str) -> int:
"""part1 solver take a str and return an int"""
houses = {(0, 0): 1}
pos_x, pos_y = 0, 0
for direction in entries:
delta_x, delta_y = moves[direction]
pos_x += delta_x
pos_y += delta_y
houses[(pos_x, pos_y)] = houses.get((pos_x, pos_y), 0) + 1
return len(houses) | 104cf7b32ec9f395603a32f57b146129d30a417b | 598 |
def beqs(
screen, asof=None, typ='PRIVATE', group='General', **kwargs
) -> pd.DataFrame:
"""
Bloomberg equity screening
Args:
screen: screen name
asof: as of date
typ: GLOBAL/B (Bloomberg) or PRIVATE/C (Custom, default)
group: group name if screen is organized into groups
Returns:
pd.DataFrame
"""
logger = logs.get_logger(beqs, **kwargs)
service = conn.bbg_service(service='//blp/refdata', **kwargs)
request = service.createRequest('BeqsRequest')
request.set('screenName', screen)
request.set('screenType', 'GLOBAL' if typ[0].upper() in ['G', 'B'] else 'PRIVATE')
request.set('Group', group)
if asof:
overrides = request.getElement('overrides')
ovrd = overrides.appendElement()
ovrd.setElement('fieldId', 'PiTDate')
ovrd.setElement('value', utils.fmt_dt(asof, '%Y%m%d'))
logger.debug(f'Sending request to Bloomberg ...\n{request}')
conn.send_request(request=request, **kwargs)
res = pd.DataFrame(process.rec_events(func=process.process_ref))
if res.empty:
if kwargs.get('trial', 0): return pd.DataFrame()
else: return beqs(
screen=screen, asof=asof, typ=typ, group=group, trial=1, **kwargs
)
if kwargs.get('raw', False): return res
cols = res.field.unique()
return (
res
.set_index(['ticker', 'field'])
.unstack(level=1)
.rename_axis(index=None, columns=[None, None])
.droplevel(axis=1, level=0)
.loc[:, cols]
.pipe(pipeline.standard_cols)
) | ba65d7b4913862b340bf8ab9033ce9fe8fad8535 | 599 |