content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def upload_plans_for_template(request):
"""
Allow user to upload a csv file to create plans based on a previously selected template
"""
ctxd = {}
context = RequestContext(request, ctxd)
return render_to_response("rundb/plan/modal_batch_planning_upload.html", context_instance=context) | 7ec7930bcaf755b8325ea691e8d8181aca4f0feb | 800 |
def calculate_total_correlativity(coefficient_array):
""" Returns the total correlativity of the coefficient_array. The total
correlativity is the sum of the absolute values and a measure of how
correlated to timeseries are. The greater the value the more correlated."""
return sum(map(abs, coefficient_array)) | 84acdd15a47cb95285eec30a3ab9dc33e3044493 | 801 |
from datetime import datetime
def hide_topic(topic_id: TopicID, moderator_id: UserID) -> BoardTopicHidden:
"""Hide the topic."""
topic = _get_topic(topic_id)
moderator = _get_user(moderator_id)
now = datetime.utcnow()
topic.hidden = True
topic.hidden_at = now
topic.hidden_by_id = moderator.id
db.session.commit()
aggregate_topic(topic)
topic_creator = _get_user(topic.creator_id)
return BoardTopicHidden(
occurred_at=now,
initiator_id=moderator.id,
initiator_screen_name=moderator.screen_name,
board_id=topic.category.board_id,
topic_id=topic.id,
topic_creator_id=topic_creator.id,
topic_creator_screen_name=topic_creator.screen_name,
topic_title=topic.title,
moderator_id=moderator.id,
moderator_screen_name=moderator.screen_name,
url=None,
) | ee1c4fff752c81b31f1adec06ed03090cd0c0a99 | 802 |
def argmax(a, axis=None, out=None):
""" Returns the indices of the maximum values along an axis.
Parameters
----------
a: array_like
axis: int, optional
By default, the index is into the flattened array, otherwise along the specified axis.
out: numpy.array, optional
If provided, the result will be inserted into this array. It should be of the appropriate shape and dtype.
Returns
-------
numpy.ndarray[int]"""
a = a.data if isinstance(a, Tensor) else a
return np.argmax(a, axis, out) | 29e8e718e9d6c6455217fac614e53c642ff8cbb1 | 803 |
def FlowBalance_rule(model, node):
"""Ensures that flows into and out of a node are equal
"""
return model.Supply[node] \
+ sum(model.Flow[i, node] for i in model.NodesIn[node]) \
- model.Demand[node] \
- sum(model.Flow[node, j] for j in model.NodesOut[node]) \
== 0 | 628e8e2bb6967c9114dfcb8ea449d760180ab206 | 804 |
def abcd(actual, predicted, distribution, as_percent=True):
"""
Confusion Matrix:
|`````````````|`````````````|
| TN[0][0] | FP[0][1] |
| | |
|`````````````|`````````````|
| FN[1][0] | TP[1][1] |
| | |
`````````````````````````````
"""
c_mtx = confusion_matrix(actual, predicted)
"Probablity of Detection: Pd"
try:
p_d = c_mtx[1][1] / (c_mtx[1][1] + c_mtx[1][0]) # TP/(TP+FN)
except ZeroDivisionError:
p_d = 0
"Probability of False Alarm: Pf"
try:
p_f = c_mtx[0][1] / (c_mtx[0][1] + c_mtx[0][0]) # FP/(FP+TN)
except ZeroDivisionError:
p_f = 0
"Precision"
try:
p_r = c_mtx[1][1] / (c_mtx[1][1] + c_mtx[0][1]) # TP/(TP+FP)
if not np.isfinite(p_r): p_r = 0
except ZeroDivisionError:
p_r = 0
"Recall (Same as Pd)"
r_c = p_d
"F1 measure"
try:
f1 = 2 * c_mtx[1][1] / (2 * c_mtx[1][1] + c_mtx[0][1] + 1 * c_mtx[1][0]) # F1 = 2*TP/(2*TP+FP+FN)
except ZeroDivisionError:
f1 = 0
e_d = 2 * p_d * (1 - p_f) / (1 + p_d - p_f)
g = np.sqrt(p_d - p_d * p_f) # Harmonic Mean between True positive rate and True negative rate
try:
auroc = round(roc_auc_score(actual, distribution), 2)
except ValueError:
auroc = 0
if as_percent is True:
return p_d * 100, p_f * 100, p_r * 100, r_c * 100, f1 * 100, e_d * 100, g * 100, auroc * 100
else:
return p_d, p_f, p_r, r_c, f1, e_d, g, auroc | 8e4923ef72b3cb74bc5563083d223d3e8a92e982 | 805 |
def get_gene_name(protein_id, web_fallback=True):
"""Return the gene name for the given UniProt ID.
This is an alternative to get_hgnc_name and is useful when
HGNC name is not availabe (for instance, when the organism
is not homo sapiens).
Parameters
----------
protein_id : str
UniProt ID to be mapped.
web_fallback : Optional[bool]
If True and the offline lookup fails, the UniProt web service
is used to do the query.
Returns
-------
gene_name : str
The gene name corresponding to the given Uniprot ID.
"""
try:
gene_name = uniprot_gene_name[protein_id]
# There are cases when the entry is in the resource
# table but the gene name is empty. Often this gene
# name is actually available in the web service RDF
# so here we return only if the gene name is not None
# and not empty string.
if gene_name:
return gene_name
except KeyError:
pass
if not web_fallback:
return None
g = query_protein(protein_id)
if g is None:
return None
query = rdf_prefixes + """
SELECT ?name
WHERE {
?gene a up:Gene .
?gene skos:prefLabel ?name .
}
"""
res = g.query(query)
if res:
gene_name = [r for r in res][0][0].toPython()
if not gene_name:
return None
return gene_name
return None | 3319bd94a2933e03459ec586f76e105e9e3c64ed | 806 |
def summarize():
""" Returns summary of articles """
if request.method == 'POST':
url = request.form['pageurl']
parser = HtmlParser.from_url(url, Tokenizer(LANGUAGE))
stemmer = Stemmer(LANGUAGE)
summarizer = Summarizer(stemmer)
summarizer.stop_words = get_stop_words(LANGUAGE)
final = []
for sentence in summarizer(parser.document, SENTENCES_COUNT):
final.append(str(sentence))
return render_template('result.html', len=len(final), summary=final) | 4bfa483961609967657544049fa0495f173262e5 | 807 |
import os
def _env_constructor(loader, node):
"""
Replaces environment variables in YAML file
"""
value = loader.construct_scalar(node)
for group in env_pattern.findall(value):
try:
value = value.replace(f"${{{group}}}", os.environ.get(group))
except TypeError as error:
print(
f"An error occured while parsing YAML file:\n\n\tENV variable {group} not set\n"
)
raise Exception(f"ENV variable {group} not set") from error
return value | 160e0c3fff7c9b89fee4937349568f2de64aba2d | 808 |
def version(include_copyright=False):
"""Get the version number of ``ssocr``.
Equivalent of running: ``ssocr --version``
Parameters
----------
include_copyright : :class:`bool`, optional
Whether to include the copyright information.
Returns
-------
:class:`str`
The version number (and possibly copyright information).
"""
out = _run([ssocr_exe, '--version'])
if include_copyright:
return out
return out.splitlines()[0].split()[-1] | 189634e1a6fd00df5c71e8d671f28ad0858a1027 | 809 |
from typing import List
import itertools
def intersection(n: np.ndarray,
d: float,
A: np.ndarray,
b: np.ndarray) -> List[np.ndarray]:
"""Return the intersection of the plane and convex ployhedron.
Returns a list of points which define the intersection between the plane
nx = d and the convex ployhedron defined by linear inequalities Ax <= b.
Args:
n (np.ndarray): Normal vector of the plane.
d (np.ndarray): Offset (or distance) vector of the plane.
A (np.ndarray): LHS coefficents defining the linear inequalities.
b (np.ndarray): RHS constants defining the linear inequalities.
Returns:
List[np.ndarray]: List of vertices defining the intersection (if any).
Raises:
ValueError: Normal vector must be length 3.
ValueError: Matrix A must be of shape (n,3).
"""
if len(n) != 3:
raise ValueError('Normal vector must be length 3.')
if len(A[0]) != 3:
raise ValueError('Matrix A must be of shape (n,3).')
pts = []
n_d = np.hstack((n,d))
A_b = np.hstack((A,b))
for indices in itertools.combinations(range(len(A)),2):
R_c = np.vstack((n,A[list(indices)]))
R_d = np.vstack((n_d,A_b[list(indices)]))
if np.linalg.matrix_rank(R_c) == 3 and np.linalg.matrix_rank(R_d) == 3:
det = np.linalg.det(R_c)
if det != 0:
x_1 = np.linalg.det(R_d[:,[3,1,2]])/det
x_2 = np.linalg.det(R_d[:,[0,3,2]])/det
x_3 = np.linalg.det(R_d[:,[0,1,3]])/det
x = np.array([[x_1],[x_2],[x_3]])
if all(np.matmul(A,x) <= b + 1e-10):
pts.append(np.round(x, 10))
return pts | d3f653ae5f67dbe7be8b2f92de65031299e3b360 | 810 |
def create_session(checkpoint_path, target_device):
"""Create ONNX runtime session"""
if target_device == 'GPU':
providers = ['CUDAExecutionProvider']
elif target_device == 'CPU':
providers = ['CPUExecutionProvider']
else:
raise ValueError(
f'Unsupported target device {target_device}, '
f'Expected one of: "CPU", "GPU"'
)
session = ort.InferenceSession(checkpoint_path, providers=providers)
input_names = [x.name for x in session.get_inputs()]
return session, input_names | bbd900a7e454e499010c8503f95c15aedd8f8a5c | 811 |
def V_eN_int(cgf_1, cgf_2, mol):
"""
Compute electron-nuclear integral between two contracted gaussian functions.
"""
v = 0
for i, _ in enumerate(cgf_1.alpha):
for j, _ in enumerate(cgf_2.alpha):
for k in range(mol.num_atoms):
v += cgf_1.co[i] * cgf_2.co[j] * potential((cgf_1.alpha[i], cgf_1.coordinate),
(cgf_2.alpha[j], cgf_2.coordinate),
mol.coordinates[k], mol.charges[k])
return v | b13b26048e5a419d507e03f29387f84422ff2035 | 812 |
def create_subword_vocab(input_data,
subword_size):
"""create subword vocab from input data"""
def generate_subword(word,
subword_size):
"""generate subword for word"""
subwords = []
chars = list(word)
char_length = len(chars)
for i in range(char_length-subword_size+1):
subword = ''.join(chars[i:i+subword_size])
subwords.append(subword)
return subwords
subword_vocab_lookup = {}
for sentence in input_data:
words = sentence.strip().split(' ')
for word in words:
word_vocabs = [word, word.lower(), word.capitalize(), word.upper()]
for word_vocab in word_vocabs:
subword_vocabs = generate_subword(word_vocab, subword_size)
for subword_vocab in subword_vocabs:
if subword_vocab not in subword_vocab_lookup:
subword_vocab_lookup[subword_vocab] = 1
else:
subword_vocab_lookup[subword_vocab] += 1
return subword_vocab_lookup | 651f9da1e1df0f8b78168870bbdec6b4aff65425 | 813 |
import requests
import json
def request_json(input_data):
"""Request JSON data from full node, given request data input.
More info: http://docs.python-requests.org/en/master/"""
requested_data = None # Prevent no state if all servers fail!
for full_node_url in full_node_list_http:
try:
requested_data = requests.get(full_node_url, data=input_data)
except requests.exceptions.ConnectionError as err:
print("...")
print("Error: {}".format(full_node_url))
print(err)
print("...")
continue
if requested_data.status_code is not 200:
# Fail! Move onto the next URL!
print("./\/\/\.")
print("Not online: {}".format(full_node_url))
print(".\/\/\/.")
continue
else:
print("---")
print("Online: {}".format(full_node_url))
print(requested_data)
num_workers = json.loads(json.dumps(requested_data.text))
print(num_workers)
print("===")
continue
return requested_data | b26d022d28c186d6ad6d793d18c3eebb8056d9e8 | 814 |
def rrange(x, y = 0):
""" Creates a reversed range (from x - 1 down to y).
Example:
>>> rrange(10, 0) # => [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
"""
return range(x - 1, y - 1, -1) | 37c41673dab3fca797f4f6f0ab2f8160e7650248 | 815 |
def einsum_outshape(subscripts, *operants):
"""Compute the shape of output from `numpy.einsum`.
Does not support ellipses.
"""
if "." in subscripts:
raise ValueError(f"Ellipses are not supported: {subscripts}")
insubs, outsubs = subscripts.replace(",", "").split("->")
if outsubs == "":
return ()
insubs = np.array(list(insubs))
innumber = np.concatenate([op.shape for op in operants])
outshape = []
for o in outsubs:
indices, = np.where(insubs == o)
try:
outshape.append(innumber[indices].max())
except ValueError:
raise ValueError(f"Invalid subscripts: {subscripts}")
return tuple(outshape) | fb67dd428bda133548c8eb84293faf2013fe0ef1 | 816 |
def random_points(region, color="00FFFF", points=100, seed=0):
"""Generates a specified number of random points inside a given area.
Args: region(feature): region to generate points
color(color code): default is red, I think
points(numeric): how many points do you want? Default is 100
seed:(numeric): default is 0
Returns: a feature collection of locations
"""
if not isinstance(region, ee.Geometry):
err_str = "\n\nThe region of interest must be an ee.Geometry."
raise AttributeError(err_str)
color = "000000"
if color is None:
color = "00FFFF"
if points is None:
points = 100
points_rand = ee.FeatureCollection.randomPoints(
region=region, points=points, seed=seed
)
return points_rand | 9fc82d74a7d65bd7869f79b0e6050199f4368b3e | 817 |
import tqdm
def GridSearch(X_train_df, Y_train_df, hps_NaN_dict, features_l,
hps_models_dict, cv=5, n_jobs=-1, randomise=True):
"""Launch a grid search over different value of the hps."""
# Compute all the possible combinations of hps
tuples_hp = BuildHpTuples(X_train_df, hps_NaN_dict,
features_l, hps_models_dict)
# Creates dataframe in which all results will be stored
# (allows early stopping of grid search)
pd_res_df = pd.DataFrame()
# Executes a Cross Validation for all possible tuples
scores_param = []
# Randomisation of the tuples
if randomise:
np.random.shuffle(tuples_hp)
for tuple_i in tqdm(tuples_hp):
[best_score, best_params_n,
best_params_v, pd_res_df] = subGridSearch(X_train_df, Y_train_df,
tuple_i,
pd_res_df, cv=cv,
n_jobs=n_jobs)
results = (best_score, best_params_n, best_params_v)
scores_param.append(results)
# Extract best scores and parameters
maxi = 0
best_params_names = 0
best_params_values = 0
for sublist in scores_param:
if sublist[0] > maxi:
maxi = sublist[0]
best_params_names = sublist[1]
best_params_values = sublist[2]
# Return result
return maxi, best_params_names, best_params_values | 8aa5a89e011e47c46ba809792787eb5793f6b0ec | 818 |
def atm():
"""Fixture to compute atmospheric properties just once before tests.
Returns:
Atmosphere object
"""
return Atmosphere(h_geom_truth_arr) | eea29ad064920d71e00fc5f8dd3f4bef75776b87 | 819 |
def get_key(my_dict: dict, val):
"""Get key value form dictionary using key value.
Args:
collection_name: dict: collection in dictionary format
val: Value in dictionary
Returns:
Key from dictionary.
"""
for key, value in my_dict.items():
if val == value:
return key
return "key doesn't exist" | 99bb74468b4dd5bb02c6f642a86c345365d8d616 | 820 |
def closest(point, points):
"""works, but @@DEPRECATED!!
return index into points of closest-to-point
"""
da = dist_array(point, points)
return N.argmin(da) | 70f30f98dfb2aed9d6f485d17c113469005946ea | 821 |
def generate_provenance_json(script="unknown", params={}):
"""Generate the provenance in a format which can later be output as valid json.
Inputs:
string: The name of the script used to trigger the data generation/deidentification/synthesis process
dict: The parameters used to tune the data generation etc. process; should include random seeds and other
options as appropriate for the method
Returns:
dict: Details of the script called by the user and any relevant parameters
"""
commit = get_git_commit_hash()
local_mods = get_local_changes()
provenance = {"script": script,
"commit": commit,
"local_modifications": local_mods,
"parameters": params}
return provenance | f18712b8652da832f3271bcfb8098b7e0b029dd0 | 822 |
def dot(v, w):
"""v_1 * w_1 + ... + v_n * w_n"""
return sum(v_i * w_i for v_i, w_i in zip(v, w)) | 29f6551f44459ad6c7856500019a1c551aaa3cbe | 823 |
def get_users():
"""Query for user accounts."""
return JsonResponse(queryFor(UserAccount)) | 47cf0dcf2874ce89d1e394aa5e80c026e036cc0a | 824 |
from typing import Dict
def get_added_and_removed_pitches(
chord_root_tpc: int,
chord_type: ChordType,
changes: str,
key_tonic_tpc: int,
key_mode: KeyMode,
) -> Dict[str, str]:
"""
Get a mapping of pitch alterations from the given chord. Pitches are given
and returned with PitchType TPC because accidental-specific math is required
to correctly apply accidentals.
Parameters
----------
chord_root_tpc : int
The root pitch of the given chord, in TPC notation.
chord_type : ChordType
The type of the given chord.
changes : str
A string of the changes or alterations of a given chord, like "64" or "+b2".
key_tonic_tpc : int
The tonic pitch of the current key, including any relative root, in TPC notation.
key_mode : KeyMode
The mode of the current key, including any relative root.
Returns
-------
changed_pitches : Dict[str, str]
A dictionary representing pitch alterations to the given chord. Each entry represents
a mapping of original_pitch -> new_pitch, represented as a string of their TPC integer.
If original_pitch is empty, then the new_pitch is simply added. If new_pitch begins
with "+", then it is added in an upper octave.
"""
added_pitches = []
removed_pitches = []
# First, we have to find the chord numeral degree, since changes are notated numerically
# relative to the chord's tonal pitch class.
# i.e., any "2" change to a IV chord will have some V in it, regardless of any accidentals.
chord_root_str = get_pitch_string(chord_root_tpc, PitchType.TPC)
for degree in range(1, 8):
interval = get_interval_from_scale_degree(str(degree), True, key_mode, PitchType.TPC)
pitch_str = get_pitch_string(interval + key_tonic_tpc, PitchType.TPC)
if pitch_str[0] == chord_root_str[0]:
break
changes_list = split_changes_into_list(changes)
# Calculate added pitches first
for change in changes_list:
while change[0] in "v^+":
change = change[1:]
# Find the scale degree for this change
accidental_count, new_change = get_accidental_adjustment(change, in_front=True)
accidental_count = abs(accidental_count)
octave = "+" if int(new_change) >= 8 else ""
# Convert change to be relative to the key tonic, including accidentals
change_degree = (int(new_change) + degree - 2) % 7 # -2 since both are 1-indexed
change_degree += 1 # Conver back to 1-indexing
change_degree_str = change[:accidental_count] + str(change_degree)
# Calculate interval above scale degree, including additional octaves
interval = get_interval_from_scale_degree(change_degree_str, True, key_mode, PitchType.TPC)
# Store added pitch, including "+" if the pitch is an octave up
added_pitches.append(octave + str(interval + key_tonic_tpc))
# Calculate chord vector in ascending pitch order
chord_vector = get_vector_from_chord_type(chord_type, PitchType.TPC, chord_root_tpc)
chord_vector = np.where(chord_vector == 1)[0]
ascending_chord_vector = []
for degree in range(1, 8):
interval = get_interval_from_scale_degree(str(degree), True, key_mode, PitchType.TPC)
pitch_str = get_pitch_string(interval + chord_root_tpc, PitchType.TPC)
for pitch in chord_vector:
if get_pitch_string(pitch, PitchType.TPC)[0] == pitch_str[0]:
ascending_chord_vector.append(pitch)
# Calculate removed pitches
for change in changes_list:
if change[0] == "+":
# Added pitch only - no deletion
removed_pitches.append("")
_, new_change = get_accidental_adjustment(change, in_front=True)
if change[0] == "^" or (new_change in "246" and change[0] == "#"):
# Replaces the above pitch
if change == "#6" and len(ascending_chord_vector) == 3:
# Special case: If #6 occurs for a triad, it is an addition,
# since it cannot be a lower replacement to a non-existent 7
removed_pitches.append("")
continue
# 2 replaces the 2nd chord pitch, 4 replaces the 3rd, etc.
removed_pitches.append(str(ascending_chord_vector[int(change[-1]) // 2]))
elif change[0] == "v" or (new_change in "246" and change[0] != "#"):
# Replaces the below pitch
# 2 replaces the 1st chord pitch, 4 replaces the 2nd, etc.
removed_pitches.append(str(ascending_chord_vector[int(change[-1]) // 2 - 1]))
else:
# No removed pitch
removed_pitches.append("")
return {removed: added for removed, added in zip(removed_pitches, added_pitches)} | 857c581af61457450e89574ee94839131da94382 | 825 |
def get_n_random_points_in_region(region_mesh, N, s=None):
"""
Gets N random points inside (or on the surface) of a mes
"""
region_bounds = region_mesh.bounds()
if s is None:
s = int(N * 2)
X = np.random.randint(region_bounds[0], region_bounds[1], size=s)
Y = np.random.randint(region_bounds[2], region_bounds[3], size=s)
Z = np.random.randint(region_bounds[4], region_bounds[5], size=s)
pts = [[x, y, z] for x, y, z in zip(X, Y, Z)]
ipts = region_mesh.insidePoints(pts).points()
if N <= ipts.shape[0]:
return ipts[np.random.choice(ipts.shape[0], N, replace=False), :]
else:
return get_n_random_points_in_region(region_mesh, N, s=int(N * 4)) | ead32d58612f6ceddd495218efd8aac3d4cb3ca3 | 826 |
import numpy
def make_octad_tables(basis):
"""Return tables for encoding an decoding octads.
Octads are numbered in the lexicographical order as given by the
numbers of the corresponding Golay codewords in 'gcode'
representation.
The function returns a triple
(oct_enc_table, oct_dec_table, oct_enc_offset)
Given octad o, the correspondig Golay code word in gcode
representation is oct_dec_table(o), for 0 <= 0 < 759.
Given an octad (or a complement of an octad) v in 'gcode'
representation, the number of the corresponding octad is:
(oct_enc_table[v1] >> 1) + 3 * (v1 >> 3) - oct_enc_offset ,
where v1 = v & 0x7ff.
The vector v is a (possibly complemeted) octad if the following
condition holds:
oct_enc_table[v1] < 255 .
It is not a complemented octad if in addition we have:
(v >> 12) & oct_enc_table[v1] & 1 == 0
"""
codewords = lin_table(basis[:11])
oct_dec_table = numpy.zeros(759, dtype = uint16)
octad = 0
d = {}
w = {}
for gcode, vector in enumerate(codewords[:2048]):
weight = bw24(vector)
if weight in [8, 16]:
oct_dec_table[octad] = gcode + ((weight & 16) << 7)
d[gcode] = octad - 3 * (gcode >> 3)
w[gcode] = weight >> 4
octad += 1
assert octad == 759
d_min, d_max = min(d.values()), max(d.values())
assert d_min <= 0
assert d_max - d_min < 127
oct_enc_table = numpy.full(2048, 255, dtype = uint8)
for gcode, dict_value in d.items():
new_value = dict_value - d_min
oct_enc_table[gcode] = w[gcode] + 2 * new_value
return oct_enc_table, oct_dec_table, -d_min | 8db61ea0e3a2aa4792aefe0eb0cee9b77aa76d91 | 827 |
def fetch_profile(request):
""" attaches the user.profile object into the request object"""
context = {}
if request.user.is_authenticated():
profile_module_name = get_my_profile_module_name()
profile = getattr(request, profile_module_name, None)
if profile != None:
context[profile_module_name] = profile
return context | ac86e9aa47d316b0a66aadafc2f55fad24a150fe | 828 |
import os
def storeLabledImagesInFile():
"""Consolidates the images in the emotion directories and stores them in data.npy and lables.npy
file. Does virtual sampling for classes which do not have sufficient samples"""
data = []
labels = []
if os.path.exists(OUTPUT_DIRECTORY):
images = []
noOfImages = []
for dir in EMOTION_DIRECTORIES:
if os.path.exists(OUTPUT_DIRECTORY + dir):
images.append(os.listdir(OUTPUT_DIRECTORY + dir))
noOfImages.append(len(images[-1]))
targetCount = max(noOfImages)
for i in range(0, len(EMOTION_DIRECTORIES)):
if os.path.exists(OUTPUT_DIRECTORY + EMOTION_DIRECTORIES[i]):
mask = np.zeros((100, 100))
for j in range(0, targetCount):
if (j != 0 and j % noOfImages[i] == 0):
mask = np.random.randint(0, 3, (100, 100))
face = cv2.imread(OUTPUT_DIRECTORY + EMOTION_DIRECTORIES[i] + "/" + images[i][j % noOfImages[i]])[:,
:, 1]
face = face + mask
face[np.where(face >= 256)] = 255
data.append(face)
labels.append(i)
np.save(OUTPUT_DIRECTORY + "/data", np.array(data))
np.save(OUTPUT_DIRECTORY + "/labels", np.array(labels))
else:
print("Invalid path " + OUTPUT_DIRECTORY)
return False | 156ed4dfa18e319d8c5abbba3c99140bd7bcfb9f | 829 |
def to_local_op(input):
"""Returns the local tensor of a consistent tensor.
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> np_arr = np.array([0.5, 0.6, 0.7]).astype(np.float32)
>>> input = flow.Tensor(np_arr)
>>> placement = flow.placement("cpu", {0:range(1)})
>>> consistent_tensor = input.to_consistent(placement, [flow.sbp.split(0)])
>>> consistent_tensor.to_local()
tensor([0.5, 0.6, 0.7], dtype=oneflow.float32)
"""
assert input.is_consistent, "input must be a consistent tensor!"
return flow.F.to_local(input) | 3aa9797e21c8eaa566825df7e099f14064e64b73 | 830 |
import math
def col_round(x):
"""
As Python 3 rounds 0.5 fraction to closest even,
floor and cell round methods used here to round 0.5
up to next digit and 0.4 down back to previos.
"""
frac = x - math.floor(x)
if frac < 0.5:
return math.floor(x)
return math.ceil(x) | 3f21a6dcc525daebf78c9adfd6afee9ba865399b | 831 |
def _create_notebook(client, headers: dict[str, str]) -> str:
"""Create a notebook.
:param client: Test API client.
:param headers: Headers with the access token.
:return: Notebook ID.
"""
n = {"name": "Test Notebook"}
r = client.post("/notebooks/notebook", headers=headers, json=n)
return r.json["result"]["id"] | c112fdb16e858a83265518e50257888d587f59be | 832 |
def plot_range_range_rate(x_sat_orbdyn_stm:np.ndarray, x_obs_multiple:np.ndarray, t_sec: np.array):
""" Plots range and range relative to the station
Args:
x_sat_orbdyn_stm (np.ndarray): satellite trajectory array.
x_obs_multiple (np.ndarray): observer positions.
t_sec (np.ndarray): array of timesteps.
"""
if len(x_obs_multiple.shape) == 2:
x_obs_multiple = np.expand_dims(x_obs_multiple)
fig = plt.figure(figsize=(14,14))
n_obs = x_obs_multiple.shape[2]
for i in range(n_obs):
r, rr = range_range_rate(x_sat_orbdyn_stm, x_obs_multiple[:,:,i])
ax1 = fig.add_subplot(n_obs, 2, i*2+1)
ax1.plot(t_sec, r)
ax1.set_xlabel('Time (s)')
ax1.set_ylabel('Range (m)')
ax1.grid(':')
ax1.title.set_text('Station 1 - Range')
ax2 = fig.add_subplot(n_obs, 2, i*2+2)
ax2.plot(t_sec, rr)
ax2.set_xlabel('Time (s)')
ax2.set_ylabel('Range rate (m/s)')
ax2.grid(':')
ax2.title.set_text('Station 1 - Range Rate')
fig.subplots_adjust(hspace=0.3)
return fig | dafa57d5bf8347c19d02216c5cff6aa77392dfed | 833 |
import argparse
import shlex
def parse_autostep(rule):
"""
Parse the autostep line
"""
parser = argparse.ArgumentParser()
rules = shlex.split(rule)
rules.pop(0)
parser.add_argument("--autoscreenshot", dest="autoscreenshot", action="store")
args = clean_args(vars(parser.parse_args(rules)))
parser = None
return args | 443c46623a485b241015cac5c655ecba15982e11 | 834 |
def _make_tag(tagname: str, content: str = "", **attrs) -> str:
"""Creates a tag."""
tag = f"<{tagname} "
for key, value in attrs.items():
if key == "raw":
tag += " " + value
continue
if key == "cls":
key = "class"
if isinstance(value, float):
value = round(value, 2)
tag += f"{_slugify(key)}='{value}' "
tag += f">{content}</{tagname}>"
return tag | cf6dba281a63c90e0ff3ae8bf64c6adb3f936d4c | 835 |
from typing import Optional
import os
import gzip
import tarfile
import zipfile
def download(url: str, dest: Optional[str] = None, extract: bool=True, ignore_if_exists: bool = False,
compression: Optional[str] = None):
"""
Download a file from the internet.
Args:
url: the url to download
dest: destination file if extract=False, or destionation dir if extract=True. If None, it will be the last part of URL.
extract: extract a tar.gz or zip file?
ignore_if_exists: don't do anything if file exists
Returns:
the destination filename.
"""
base_url = url.split("?")[0]
if dest is None:
dest = [f for f in base_url.split("/") if f][-1]
if os.path.exists(dest) and ignore_if_exists:
return dest
stream = UrlStream(url)
extension = base_url.split(".")[-1].lower()
if extract and extension in ['gz', 'bz2', 'zip', 'tgz', 'tar']:
os.makedirs(dest, exist_ok=True)
if extension == "gz" and not base_url.endswith(".tar.gz"):
decompressed_file = gzip.GzipFile(fileobj=stream)
with open(os.path.join(dest, url.split("/")[-1][:-3]), 'wb') as f:
while True:
d = decompressed_file.read(1024 * 1024)
if not d:
break
f.write(d)
else:
if extension in ['gz', 'bz2', "tgz", "tar"]:
decompressed_file = tarfile.open(fileobj=stream, mode='r|' +
(compression or (
"gz" if extension == "tgz" else extension)))
elif extension == 'zip':
decompressed_file = zipfile.ZipFile(stream, mode='r')
else:
assert False, "Invalid extension: %s" % extension
decompressed_file.extractall(dest)
else:
try:
with open(dest, 'wb') as f:
for d in stream.iter_content(1024 * 1024):
f.write(d)
except:
os.remove(dest)
raise
return dest | fa335590c82984eb25e9f097e3111eb55b22f630 | 836 |
def action_functions(action_id: str):
"""Determines which function needs to be run."""
action_mappings = {
NewIncidentSubmission.form_slack_view: [report_incident_from_submitted_form],
UpdateParticipantCallbacks.submit_form: [update_participant_from_submitted_form],
UpdateParticipantCallbacks.update_view: [update_update_participant_modal],
}
# this allows for unique action blocks e.g. invite-user or invite-user-1, etc
for key in action_mappings.keys():
if key in action_id:
return action_mappings[key]
return [] | 132e847d63606db0e4d234befe6c313113f6bcaf | 837 |
import os
import pwd
import socket
def get_default_sender():
"""
Determines the sender / to address for outgoing emails.
"""
try:
return os.environ["EMAIL"]
except KeyError:
pass
else:
# Guess.
# Not sure if euid is the right one to use here.
user = pwd.getpwuid(os.geteuid()).pw_name
host = socket.getfqdn()
return f"{user}@{host}" | 987e85327aea314c11122a926804b6d090b2d1a1 | 838 |
def setup_function(function):
"""
Make sure there are no adapters defined before start of test
"""
clear_adapters()
# Setup a basic int adapter for all tests
@adapter((str, float, int), (int, str))
def to_int(obj, to_cls):
return to_cls(obj) | a72d7a4fc31fe88f155df9f01a79a4c08a38188a | 839 |
def get_best_response_actions_as_string(best_response_actions):
"""Turns a dict<bytes, int> into a bytestring compatible with C++.
i.e. the bytestring can be copy-pasted as the brace initialization for a
{std::unordered_,std::,absl::flat_hash_}map<std::string, int>.
Args:
best_response_actions: A dict mapping bytes to ints.
Returns:
A bytestring that can be copy-pasted to brace-initialize a C++
std::map<std::string, T>.
"""
best_response_keys = sorted(best_response_actions.keys())
best_response_strings = [
"%s: %i" % (k, best_response_actions[k]) for k in best_response_keys
]
return "{%s}" % (", ".join(best_response_strings)) | cf2b475d6bb76d262c17dc7753f1624e38cc69f4 | 840 |
def rank_velocity_genes(adata, vkey="velocity_S", prefix_store="rank", **kwargs):
"""Rank genes based on their raw and absolute velocities for each cell group.
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object that contains the gene-wise velocities.
vkey: str (default: 'velocity_S')
The velocity key.
prefix_store: str (default: 'rank')
The prefix added to the key for storing the returned in adata.
kwargs:
Keyword arguments passed to `vf.rank_genes`.
Returns
-------
adata: :class:`~anndata.AnnData`
AnnData object which has the rank dictionary for velocities in `.uns`.
"""
rdict = rank_genes(adata, vkey, **kwargs)
rdict_abs = rank_genes(adata, vkey, abs=True, **kwargs)
adata.uns[prefix_store + "_" + vkey] = rdict
adata.uns[prefix_store + "_abs_" + vkey] = rdict_abs
return adata | 214a788813782f4f9706357e7139758f78b04dfd | 841 |
def build_genome(tree, genome):
"""
Goes through a tree and builds a genome from all codons in the subtree.
:param tree: An individual's derivation tree.
:param genome: The list of all codons in a subtree.
:return: The fully built genome of a subtree.
"""
if tree.codon:
# If the current node has a codon, append it to the genome.
genome.append(tree.codon)
for child in tree.children:
# Recurse on all children.
genome = child.build_genome(genome)
return genome | 67fd7a23a9ca812717bde5d3e35affc5cc7474f4 | 842 |
from typing import Tuple
def fixture_yaml_formatting_fixtures(fixture_filename: str) -> Tuple[str, str, str]:
"""Get the contents for the formatting fixture files.
To regenerate these fixtures, please run ``test/fixtures/test_regenerate_formatting_fixtures.py``.
Ideally, prettier should not have to change any ``formatting-after`` fixtures.
"""
before_path = formatting_before_fixtures_dir / fixture_filename
prettier_path = formatting_prettier_fixtures_dir / fixture_filename
after_path = formatting_after_fixtures_dir / fixture_filename
before_content = before_path.read_text()
prettier_content = prettier_path.read_text()
formatted_content = after_path.read_text()
return before_content, prettier_content, formatted_content | 0eb4b1d6716b4b925cec0201354893cadd704945 | 843 |
import argparse
def get_base_parser(*args, **kwargs):
"""
Main parser
"""
parser=argparse.ArgumentParser(*args, **kwargs)
# formatter_class=argparse.ArgumentDefaultsHelpFormatter,
# )
# parser.add_argument('--help_all', '--help_model', '--help_dataset', '--help_strategy', '--help_task', '--help_ptracker',
# action=PrintHelpAction, nargs=0,
# help="Print help for given model, dataset, task, strategy args")
parser.add_argument('--task', type=str, default='fsl', choices=TASKS.keys(),
help='Task name')
parser.add_argument('--dataset', type=str, default='mini', choices=DATASETS.keys(),
help='Dataset name')
parser.add_argument('--model', type=str, default='protonet', choices=MODELS.keys(),
help='FSL method name')
parser.add_argument('--backbone', type=str, default='Conv4', choices=BACKBONES.keys(),
help='Backbone neural network name')
parser.add_argument('--strategy', type=str, default=None, choices=STRATEGIES.keys(),
help='Imbalance strategy. If None, no imbalance strategy is used')
parser.add_argument('--gpu', default='0',
help='gpu number or "cpu"')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--deterministic', type=str2bool, nargs='?', const=True, default=True,
help='If true, the tasks are generated deteministically based on the given seed')
parser.add_argument('--results_folder', type=str, default="../experiments/dummy/", # default="../../experiments/",
help='parent folder where all experiments are saved')
parser.add_argument('--experiment_name', type=str, default="default")
parser.add_argument('--experiment_folder', type=str, default=None,
help='experiment folder used to save checkpoints and results')
parser.add_argument('--clean_folder', type=str2bool, nargs='?', const=True, default=False,
help='Clears the experiment folder if it exisits')
parser.add_argument('--storage_friendly', type=str2bool, nargs='?', const=True, default=True,
help='Deletes previously saved models systematically, only keeps best, latest model')
parser.add_argument('--data_path', type=str, default="data/",
help='Data folder with datasets in named subdirectories.')
parser.add_argument('--continue_from', type=str, default=None,
help="Continue from a checkpoint file, epoch, or 'latest', 'best', or 'from_scratch'/None.")
parser.add_argument('--load_backbone_only', type=str2bool, nargs='?', const=True, default=False,
help="Loads the backbone only from 'continue_from'")
parser.add_argument('--dummy_run', type=str2bool, nargs='?', const=True, default=False,
help='A dry run of the settings with a 1 epoch and validation, a reduced number of tasks, no saving')
parser.add_argument('--conventional_split', type=str2bool, nargs='?', const=True, default=None,
help='Joins classes in meta-training and meta-validation datests. '
'Then conventional 80%%-20%% split for train-val datasets. '
'If None, will be split automatically based on model.')
parser.add_argument('--conventional_split_from_train_only', type=str2bool, nargs='?', const=True, default=False,
help='Performs conventional 80%%-20%% data split from the train dataset only,'
' without joining with the validation split. Working only when meta-dataset reduced, see'
' data.dataset_utils.prep_datasets() for details.')
parser.add_argument('--backbone_channel_dim', type=int, default=64,
help='Number of channels of the backbone model.')
parser.add_argument('--tqdm', type=str2bool, nargs='?', const=True, default=False,
help="Enable/Disable tqdm, especially useful when running experiment and redirecting to files")
group = parser.add_argument_group('TASK SAMPLING OPTIONS')
group.add_argument('--num_epochs', type=int, default=100,
help="If none, then will stop training after achieving a stopping criterion, see ExperimentBuilder")
group.add_argument('--num_tasks_per_epoch', type=int, default=500)
group.add_argument('--num_tasks_per_validation', type=int, default=200,
help="Number of tasks to evaluate on after every epoch.")
group.add_argument('--num_tasks_per_testing', type=int, default=600,
help="Number of tasks to evaluate on after meta-training.")
group.add_argument('--evaluate_on_test_set_only', '--test', type=str2bool, nargs='?', const=True, default=False,
help="If present, no (further) training is performed and only the test dataset is evaluated.")
group.add_argument('--val_or_test', type=str, choices=["test","val"], default="val",
help="Dataset to perform validation on. Default val")
group.add_argument('--no_val_loop', type=str2bool, nargs='?', const=True, default=False,
help="No validation loop. Default=False, meaning assume there is a validation loop.")
group.add_argument('--test_performance_tag', type=str, default="test",
help='The tag name for the performance file evaluated on test set, eg "test" in epoch-###_test.json')
group = parser.add_argument_group('VISUALISATION OPTIONS')
group.add_argument('--fix_class_distribution', type=str2bool, nargs='?', const=True, default=False,
help='If present, will fix the class distribution such that the model will be evaluated and tested '
'on the same set of classes between tasks.')
group.add_argument('--count_samples_stats', type=str2bool, nargs='?', const=True, default=False,
help='If true, counts the images and stores the distribution stats of images shown during the run')
return parser | 50892de17e2455ddb0f14365284085c83e8af4cf | 844 |
def conv_bboxinfo_bboxXYHW_to_centerscale(bbox_xyhw, bLooseBox = False):
"""
from (bbox_xyhw) -> (center, scale)
Args:
bbox_xyhw: [minX,minY,W,H]
bLooseBox: if true, draw less tight box with sufficient margin (SPIN's default setting)
Output:
center: bbox center
scale: scaling images before cropping. reference size is 200 pix (why??). >1.0 means size up, <1.0 means size down. See get_transform()
h = 200 * scale
t = np.zeros((3, 3))
t[0, 0] = float(res[1]) / h
t[1, 1] = float(res[0]) / h
t[0, 2] = res[1] * (-float(center[0]) / h + .5)
t[1, 2] = res[0] * (-float(center[1]) / h + .5)
t[2, 2] = 1
"""
center = [bbox_xyhw[0] + bbox_xyhw[2]/2, bbox_xyhw[1] + bbox_xyhw[3]/2]
if bLooseBox:
scaleFactor =1.2
scale = scaleFactor*max(bbox_xyhw[2], bbox_xyhw[3])/200 #This is the one used in SPIN's pre-processing. See preprocessdb/coco.py
else:
scale = max(bbox_xyhw[2], bbox_xyhw[3])/200
return center, scale | 1d85f9ee0ee6db00877eeb091729d2748fec08cf | 845 |
def add_video(db: Session, video: schemas.Video):
"""
Adds video to table video_library
model used is Video.
Attributes:
- video_user_id: int, non-nullable, foreign_key
- video_link: string, non-nullable, unique
- video_name: string, non-nullable,
- video_height: int, non-nullable,
- video_width: int, non-nullable,
- file_format: string, non-nullable,
- ts_upload: datetime, non-nullable,
- categories: string, nullable,
- description: string, nullable,
- length: int, non-nullable,
- views: int, non-nullable
- no_likes: int, non-nullable,
- no_dislikes: int, non-nullable,
Args:
- db: Session
- video: schemas.Video
Returns:
- db_video object
"""
db_video = models.Video(
video_username=video.video_username,
video_link=video.video_link,
video_name=video.video_name,
video_height=video.video_height,
video_width=video.video_width,
file_format=video.file_format,
ts_upload=get_timestamp_now(),
categories=video.categories,
description=video.description,
length=video.length,
views=video.views,
no_likes=video.no_likes,
no_dislikes=video.no_dislikes,
)
# add to database
db.add(db_video)
db.commit()
db.refresh(db_video)
return db_video | ff192b8b286ae5e143ff3a9671e74be630a1a557 | 846 |
def compute_principal_axes(xyz_centered, weights=None, twodim=True):
"""
:param xyz_centered: [list_of_xs, lst_of_ys, list_of_zs]
:param weights: weights of each pixel
:param twodim: whether to compute two main axes in xy plane, or three axes in 3D image.
:return: ax1, ax2, (ax3 if not twodim else None)
"""
if twodim:
xyz_centered = xyz_centered[:2]
cov = np.cov(xyz_centered, aweights=weights)#covariance between the variables x,y,z. pixels are the observations
evals, evecs = np.linalg.eig(cov)#MB: it seems to be for finding the main axis of the worm
# sort eigenvalues in decreasing order
sort_indices = np.argsort(evals)[::-1]
ax1 = evecs[:, sort_indices[0]]
ax2 = evecs[:, sort_indices[1]]
if twodim:
ax3 = None
else:
ax3 = evecs[:, sort_indices[2]]
return ax1, ax2, ax3 | fcee28353676dd2955e12ff134f31a353f8849cb | 847 |
import time
def stream_http_get(S, dest):
"""Get contents of http://dest/ via HTTP/1.0 and
samclasses.StreamSession S."""
C = S.connect(dest)
C.send('GET / HTTP/1.0\r\n\r\n')
while True:
line = stream_readline(C).strip()
if line.find('Content-Length: ') == 0:
clen = int(line.split()[1])
if line == '': break
s = C.recv(clen, timeout=None)
time.sleep(2.0)
C.close()
return s | e13e0ec701985b9fdabb33842e1233a8fee04fd8 | 848 |
import re
def get_definition(division_id, path=None):
"""
Returns the expected contents of a definition file.
"""
config = {}
division = Division.get(division_id, from_csv=ocd_division_csv)
# Determine slug, domain and authority.
name = division.name
if not name:
print('%-60s unknown name: check slug and domain manually' % division.id)
if division._type == 'country':
slug = 'Federal electoral districts'
config['domain'] = name
config['authority'] = ['Her Majesty the Queen in Right of Canada']
elif division._type in ('province', 'territory'):
slug = '%s electoral districts' % name
config['domain'] = name
config['authority'] = ['Her Majesty the Queen in Right of %s' % name]
elif division._type in ('cd', 'csd'):
province_or_territory_sgc_code = type_id(division.id)[:2]
if province_or_territory_sgc_code == '24' and division.id in divisions_with_boroughs():
slug = re.compile(r'\A%s (boroughs|districts)\Z' % name)
elif province_or_territory_sgc_code == '12' and division.attrs['classification'] != 'T':
slug = '%s districts' % name
elif province_or_territory_sgc_code == '47' and division.attrs['classification'] != 'CY':
slug = '%s divisions' % name
elif province_or_territory_sgc_code == '48' and division.attrs['classification'] == 'MD':
slug = '%s divisions' % name
elif province_or_territory_sgc_code == '24':
if division.id in quartiers:
slug = '%s quartiers' % name
else:
slug = '%s districts' % name
else:
slug = '%s wards' % name
config['domain'] = '%s, %s' % (name, province_or_territory_abbreviation(division.id))
if province_or_territory_sgc_code == '12' and 'boundaries/ca_ns_districts/' in path:
config['authority'] = ['Her Majesty the Queen in Right of Nova Scotia']
elif province_or_territory_sgc_code == '13' and 'boundaries/ca_nb_wards/' in path:
config['authority'] = ['Her Majesty the Queen in Right of New Brunswick']
elif province_or_territory_sgc_code == '24' and 'boundaries/ca_qc_' in path:
config['authority'] = ['Directeur général des élections du Québec']
elif province_or_territory_sgc_code == '47' and division.attrs['classification'] != 'CY':
config['authority'] = ['MuniSoft']
elif division._type == 'csd':
config['authority'] = authorities + [division.attrs['organization_name']]
else:
config['authority'] = [''] # We have no expectation for the authority of a Census division
elif division._type == 'borough':
province_or_territory_sgc_code = type_id(division.parent.id)[:2]
if name:
slug = '%s districts' % name
config['domain'] = '%s, %s, %s' % (name, division.parent.name, province_or_territory_abbreviation(division.parent.id))
else:
slug = None
config['domain'] = None
if province_or_territory_sgc_code == '24':
config['authority'] = ['Directeur général des élections du Québec']
else:
config['authority'] = [division.parent.attrs['organization_name']]
else:
raise Exception('%s: Unrecognized OCD type %s' % (division.id, division._type))
return (slug, config) | 5dd1b67729c7ddc6ca6287e3a5931a751751da2c | 849 |
def api_last_blog_update(request):
"""Return the date of the last blog update.
This is a PRIVATE API.
Format: __lastblogupdate.json
JSON return:
{'lastupdate': '2019-01-31'}
or if none available:
{'lastupdate': None}
"""
api_code = enter_api_call('api_last_blog_update', request)
if not request or request.GET is None:
ret = Http404(HTTP404_NO_REQUEST('/__lastblogupdate.json'))
exit_api_call(api_code, ret)
raise ret
lastupdate = None
try:
with open(settings.OPUS_LAST_BLOG_UPDATE_FILE, 'r') as fp:
lastupdate = fp.read().strip()
except:
try:
log.error('api_last_blog_update: Failed to read file "%s"',
settings.OPUS_LAST_BLOG_UPDATE_FILE)
except:
log.error('api_last_blog_update: Failed to read file UNKNOWN')
ret = json_response({'lastupdate': lastupdate})
exit_api_call(api_code, ret)
return ret | f22413dc6a202fb15cdb0f94ede5eefc4477c21b | 850 |
def pearson(arr1, arr2):
"""
calculate pearson correlation between two numpy arrays.
:param arr1: one array, the feature is a column. the shape is `m * n`
:param arr2: the other array, the feature is a column. the shape is `m * k`
:return: a pearson score np.array , the shape is `k * n`
"""
assert arr1.shape[0] == arr2.shape[0]
n = arr1.shape[0]
sums = np.multiply.outer(arr2.sum(0), arr1.sum(0))
stds = np.multiply.outer(arr2.std(0), arr1.std(0))
return (arr2.T.dot(arr1) - sums / n) / stds / n | 5de328cec314c7d320928e12cb634d13a60fa504 | 851 |
def exact_match_filter(query_set, field, values):
"""Check if a field exactly matches a value."""
return field_filter(lambda x, y: Q(**{x: y}), query_set, field, values) | 8b25770c74140921d3ef61283b37c67ab98e1e01 | 852 |
def get_move_descriptions(get_moved_ids, initial_state, current_state, obj_stuff, sort_attributes, obj_attributes):
"""
Get all 'move' descriptions from the current state (if any).
Parameters
----------
get_moved_ids: function
Function that extracts the id of objects that are being moved.
initial_state: nd.array
Initial state of the environment.
current_state: nd.array
Current state of the environment.
obj_stuff: list of objects and their sizes
List of initial objects {type, color, category} and their sizes.
sort_attributes: function
Function that separates adjective and name attributes.
obj_attributes: list of list
List of the list of object attributes for each object.
Returns
-------
descr: list of str
List of 'move' descriptions satisfied by the current state.
"""
obj_moved = get_moved_ids(initial_state, current_state)
verb = 'Move'
move_descriptions = []
for i_obj in obj_moved:
att = obj_attributes[i_obj]
adj_att, name_att = sort_attributes(att)
for adj in adj_att:
quantifier = 'any'
for name in name_att:
move_descriptions.append('{} {} {}'.format(verb, adj, name))
move_descriptions.append('{} {} {} object'.format(verb, quantifier, adj))
for name in name_att:
move_descriptions.append('{} any {}'.format(verb, name))
return move_descriptions.copy() | 0b423d41b24a60611ae33048afe4ddf60d15a558 | 853 |
def reliability_diagram(labels,
probs,
class_conditional=False,
y_axis='accuracy',
img=False):
"""Reliability Diagram plotting confidence against accuracy.
Note that this reliability diagram is created by looking at the calibration of
the set of datapoints that surround each datapoint, not through mutually
exclusive bins.
Args:
labels: label vector.
probs: probability matrix out of a softmax.
class_conditional: whether to visualize every class independently, or
conflate classes.
y_axis: takes 'accuracy or 'error'. Set y_axis to 'error' to graph the
calibration error (confidence - accuracy) against the accuracy instead.
img: return as image rather than as a plot.
Returns:
fig: matplotlib.pyplot figure.
"""
probs = np.array(probs)
labels = np.array(labels)
probs, _ = verify_probability_shapes(probs)
labels_matrix = one_hot_encode(labels, probs.shape[1])
if class_conditional:
for class_index in range(probs.shape[1]):
if img:
return to_image(plot_diagram(
probs[:, class_index], labels_matrix[:, class_index], y_axis))
else:
return plot_diagram(
probs[:, class_index], labels_matrix[:, class_index], y_axis)
else:
if img:
return to_image(
plot_diagram(probs.flatten(), labels_matrix.flatten(), y_axis))
else:
return plot_diagram(probs.flatten(), labels_matrix.flatten(), y_axis) | 2653321e0199ea4b067d4e2ea2d169b2c1d872bf | 854 |
def GuessSlugFromPath(path):
"""Returns the slug."""
if path.endswith('index.md'):
# If it ends with index, get the second last path component.
return path.split('/')[-2]
else:
# Otherwise, just get the filename.
return path.split('/')[-1].split('.')[0] | ff0c8f4f12fdc1ddf684393408a725a0d4c3ce0e | 855 |
import os
def get_file(fname, origin, cache_subdir='datasets', file_hash=None):
"""Downloads a file from a URL if not already in the cache.
ref: https://github.com/keras-team/keras/blob/7a39b6c62d43c25472b2c2476bd2a8983ae4f682/keras/utils/data_utils.py#L123
By default the file at the url `origin` is downloaded to the
CACHE_DIR `~/.kerasy`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.kerasy/datasets/example.txt`.
You have to make a directory `~/.kerasy` and `~./kerasy/datasets`,
and check whether you can access these directories using `os.access("DIRECOTRY", os.W_OK)`
If this method returns False, you have to change the ownership of them like
```
$ sudo chown iwasakioshuto: ~/.kerasy
$ sudo chown iwasakioshuto: ~/.kerasy/datasets
```
"""
# /Users/<username>/.kerasy/`cache_subdir`
DATADIR = os.path.join(DATADIR_BASE, cache_subdir)
if not os.path.exists(DATADIR):
os.makedirs(DATADIR)
fpath = os.path.join(DATADIR, fname)
if not os.path.exists(fpath):
print('Downloading data from', origin)
error_msg = 'URL fetch failure on {} : {} -- {}'
try:
try:
request.urlretrieve(origin, fpath)
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise
return fpath | e3d46ee375c2f0e45e62a5adb6f999f90e5d4af4 | 856 |
def simplified_fit(train_loader, val_loader, model, loss_fn, optimizer, n_epochs, is_cuda_available, metrics=[],
start_epoch=0, scheduler = None, log_interval=1):
"""
TODO
"""
train_list = []
valid_list = []
log_interval = len(train_loader)//2
if scheduler != None:
for epoch in range(0, start_epoch):
scheduler.step()
for epoch in range(start_epoch, n_epochs):
if scheduler != None:
scheduler.step()
# Train stage
train_loss, _metrics = train_epoch(train_loader, model, loss_fn, optimizer, is_cuda_available, log_interval, metrics)
message = 'Epoch: {}/{}. Train set: Average loss: {:.4f}'.format(epoch + 1, n_epochs, train_loss)
for metric in _metrics:
message += '\t{}: {}'.format(metric.name(), metric.value())
train_list.append(metric.value())
# Validation stage
if val_loader != None:
val_loss, _metrics = test_epoch(val_loader, model, loss_fn, is_cuda_available, metrics)
val_loss /= len(val_loader)
message += '\nEpoch: {}/{}. Validation set: Avg loss: {:.4f}'.format(epoch + 1, n_epochs,val_loss)
for metric in _metrics:
message += '\t{}: {}'.format(metric.name(), metric.value())
valid_list.append(metric.value())
print(message)
return (train_list, valid_list) | 949690e761d474b7678d3f7448c2e464e954adc6 | 857 |
import torch
def _onenorm_matrix_power_nnm(A, p):
"""
Compute the 1-norm of a non-negative integer power of a non-negative matrix.
Parameters
----------
A : a square ndarray or matrix or sparse matrix
Input matrix with non-negative entries.
p : non-negative integer
The power to which the matrix is to be raised.
Returns
-------
out : float
The 1-norm of the matrix power p of A.
"""
# check input
if int(p) != p or p < 0:
raise ValueError('expected non-negative integer p')
p = int(p)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
# Explicitly make a column vector so that this works when A is a
# numpy matrix (in addition to ndarray and sparse matrix).
v = torch.ones((A.shape[0], 1), dtype=A.dtype, device=A.device)
M = A.t()
for _ in range(p):
v = M.mm(v)
return torch.max(v).item() | 55ae02857418ca6b70a6532a27621e81e3ac3373 | 858 |
def _stiff_terms_null(states, *args, **kwargs):
"""Dummy function"""
return states | a77a23a8b6d480ab0bf92313cbb8a04ebd9770a8 | 859 |
import tqdm
import scipy
from sys import path
def add_dataset(dset_fp, dset_fromroot, list_ids, up3d_fp, # pylint: disable=too-many-locals, too-many-arguments, too-many-statements, too-many-branches
train_list_f, val_list_f, train_val_list_f, test_list_f, scale_f,
train_spec, val_spec, test_spec,
target_person_size, landmarks, train_crop, test_crop, running_idx,
only_missing=False, with_rlswap=True, write_gtjoints_as_lm=False,
human_annotations=False):
"""Add a dataset to the collection."""
test_ids = [int(id_[1:6]) for id_ in test_spec]
train_ids = [int(id_[1:6]) for id_ in train_spec]
val_ids = [int(id_[1:6]) for id_ in val_spec]
LOGGER.info("Split: %d train, %d val, %d test.",
len(train_ids), len(val_ids), len(test_ids))
LOGGER.info("Writing dataset...")
for im_idx in tqdm.tqdm(train_ids + val_ids + test_ids):
image = scipy.misc.imread(path.join(up3d_fp, '%05d_image.png' % (im_idx)))
with open(path.join(up3d_fp, '%05d_fit_crop_info.txt' % (im_idx)), 'r') as inf:
cropinfo = [int(val) for val in inf.readline().strip().split()]
assert image.ndim == 3
out_exists = (path.exists(path.join(dset_fp, '%05d_image.png' % (running_idx))) and
path.exists(path.join(dset_fp, '%05d_ann_vis.png' % (running_idx))))
if with_rlswap and im_idx not in test_ids:
out_exists = out_exists and (
path.exists(path.join(dset_fp, '%05d_image.png' % (running_idx + 1))) and
path.exists(path.join(dset_fp, '%05d_ann_vis.png' % (running_idx + 1))))
if not (only_missing and out_exists or write_gtjoints_as_lm):
if human_annotations:
landmark_pos = np.load(path.join(up3d_fp, '%05d_joints.npy' % (im_idx)))
else:
landmark_pos = get_landmark_positions(path.join(up3d_fp, '%05d_body.pkl' % (im_idx)),
(cropinfo[1], cropinfo[0]),
landmarks)
fac_y = cropinfo[0] / float(cropinfo[3] - cropinfo[2])
fac_x = cropinfo[1] / float(cropinfo[5] - cropinfo[4])
landmark_pos[:2, :] /= np.mean([fac_x, fac_y])
landmark_pos[0, :] += cropinfo[4]
landmark_pos[1, :] += cropinfo[2]
joints = np.load(path.join(up3d_fp, '%05d_joints.npy' % (im_idx)))
joints = np.vstack((joints, np.all(joints > 0, axis=0)[None, :]))
person_size = robust_person_size(joints)
norm_factor = float(target_person_size) / person_size
joints[:2, :] *= norm_factor
if not (only_missing and out_exists or write_gtjoints_as_lm):
landmark_pos[:2, :] *= norm_factor
if write_gtjoints_as_lm:
landmark_pos = joints.copy()
image = scipy.misc.imresize(image, norm_factor, interp='bilinear')
if im_idx in test_ids:
crop = test_crop
else:
crop = train_crop
if image.shape[0] > crop or image.shape[1] > crop:
LOGGER.debug("Image (original %d, here %d) too large (%s)! Cropping...",
im_idx, running_idx, str(image.shape[:2]))
person_center = np.mean(joints[:2, joints[2, :] == 1], axis=1)
crop_y, crop_x = get_crop(image, person_center, crop)
image = image[crop_y[0]:crop_y[1],
crop_x[0]:crop_x[1], :]
landmark_pos[0, :] -= crop_x[0]
landmark_pos[1, :] -= crop_y[0]
assert image.shape[0] == crop or image.shape[1] == crop, (
"Error cropping image (original %d, here %d)!" % (im_idx,
running_idx))
assert image.shape[0] <= crop and image.shape[1] <= crop and image.shape[2] == 3, (
"Wrong image shape (original %d, here %d)!" % (im_idx, running_idx))
vis_im = vs.visualize_pose(image, landmark_pos, scale=1.)
if not (only_missing and out_exists):
scipy.misc.imsave(path.join(dset_fp, '%05d_image.png' % (running_idx)), image)
scipy.misc.imsave(path.join(dset_fp, '%05d_ann_vis.png' % (running_idx)), vis_im)
if with_rlswap and im_idx not in test_ids:
if landmark_pos.shape[1] == 14:
landmark_pos_swapped = landmark_pos[:, rlswap_lsp]
else:
landmark_pos_swapped = landmark_pos[:, rlswap_landmarks_91]
landmark_pos_swapped[0, :] = image.shape[1] - landmark_pos_swapped[0, :]
image_swapped = image[:, ::-1, :]
# Use core visualization for 14 joints.
vis_im_swapped = vs.visualize_pose(image_swapped,
landmark_pos_swapped,
scale=1)
if not (only_missing and out_exists):
scipy.misc.imsave(path.join(dset_fp, '%05d_image.png' % (running_idx + 1)),
image_swapped)
scipy.misc.imsave(path.join(dset_fp, '%05d_ann_vis.png' % (running_idx + 1)),
vis_im_swapped)
list_fs = []
list_id_ids = []
if im_idx in train_ids:
list_fs.append(train_val_list_f)
list_id_ids.append(2)
list_fs.append(train_list_f)
list_id_ids.append(0)
elif im_idx in val_ids:
list_fs.append(train_val_list_f)
list_id_ids.append(2)
list_fs.append(val_list_f)
list_id_ids.append(1)
elif im_idx in test_ids:
list_fs.append(test_list_f)
list_id_ids.append(3)
for list_f, list_id_idx in zip(list_fs, list_id_ids):
# pylint: disable=bad-continuation
list_f.write(
"""# %d
%s
3
%d
%d
%d
""" % (
list_ids[list_id_idx],
path.join('/' + dset_fromroot, '%05d_image.png' % (running_idx)),
image.shape[0],
image.shape[1],
landmark_pos.shape[1]))
for landmark_idx, landmark_point in enumerate(landmark_pos.T):
list_f.write("%d %d %d\n" % (landmark_idx + 1,
int(landmark_point[0]),
int(landmark_point[1])))
list_f.flush()
list_ids[list_id_idx] += 1
scale_f.write("%05d_image.png %f\n" % (running_idx, norm_factor))
scale_f.flush()
running_idx += 1
if with_rlswap and im_idx not in test_ids:
for list_f, list_id_idx in zip(list_fs, list_id_ids):
# pylint: disable=bad-continuation
list_f.write(
"""# %d
%s
3
%d
%d
%d
""" % (
list_ids[list_id_idx],
path.join('/' + dset_fromroot, '%05d_image.png' % (running_idx)),
image.shape[0],
image.shape[1],
landmark_pos.shape[1]))
for landmark_idx, landmark_point in enumerate(landmark_pos_swapped.T):
list_f.write("%d %d %d\n" % (landmark_idx + 1,
int(landmark_point[0]),
int(landmark_point[1])))
list_f.flush()
list_ids[list_id_idx] += 1
scale_f.write("%05d_image.png %f\n" % (running_idx, norm_factor))
scale_f.flush()
running_idx += 1
return running_idx | 23bb0e4bb86794dbacf6777bfb75c280fb37f4ed | 860 |
def _compute_non_batch_kl(mu_a, sigma_a, mu_b, sigma_b):
"""Non-batch KL for N(mu_a, sigma_a), N(mu_b, sigma_b)."""
# Check using numpy operations
# This mostly repeats the tensorflow code _kl_mvn_mvn(), but in numpy.
# So it is important to also check that KL(mvn, mvn) = 0.
sigma_b_inv = np.linalg.inv(sigma_b)
t = np.trace(sigma_b_inv.dot(sigma_a))
q = (mu_b - mu_a).dot(sigma_b_inv).dot(mu_b - mu_a)
k = mu_a.shape[0]
l = np.log(np.linalg.det(sigma_b) / np.linalg.det(sigma_a))
return 0.5 * (t + q - k + l) | af1fef041ef10f72365ec5e2d7335a6c930ea54a | 861 |
import time
def wait_until(fn, timeout, period, message):
"""
:param fn: callable function
:param timeout:
:param period:
:param message:
:return: bool
"""
mustend = time() + timeout
while time() < mustend:
if fn():
return True
sleep(period)
raise TimeoutError(message) | b73d2711f877754257fe65b607f3f2d261fa7f47 | 862 |
import torch
def single_test(model, testdata, max_seq_len=20):
"""Get accuracy for a single model and dataloader.
Args:
model (nn.Module): MCTN2 Model
testdata (torch.utils.data.DataLoader): Test Dataloader
max_seq_len (int, optional): Maximum sequence length. Defaults to 20.
Returns:
_type_: _description_
"""
model.eval()
print('Start Testing ---------->>')
pred = []
true = []
with torch.no_grad():
for i, inputs in enumerate(testdata):
# process input
src, _, _, labels, _ = _process_input_L2(inputs, max_seq_len)
# We only need the source text as input! No need for target!
_, _, _, head_out = model(src)
pred.append(head_out)
true.append(labels)
eval_results_include = eval_mosei_senti_return(
torch.cat(pred, 0), torch.cat(true, 0), exclude_zero=False)
eval_results_exclude = eval_mosei_senti_return(
torch.cat(pred, 0), torch.cat(true, 0), exclude_zero=True)
mae = eval_results_include[0]
Acc1 = eval_results_include[-1]
Acc2 = eval_results_exclude[-1]
print('Test: MAE: {}, Acc1: {}, Acc2: {}'.format(mae, Acc1, Acc2))
return {'Acc:': Acc2} | 39c7fc2aae7512565abbf1483bb9d93661307654 | 863 |
import itertools
def allocate_memory_addresses(instructions: list[Instruction], reserved_memory_names: set[str]):
"""
Allocate memory addresses for SharedName and Slice, and replace AddressOf and MemoryOf with PrimitiveValue.
"""
allocated_before: tuple[int, int] | None = None # (bank_id, address)
def malloc(size: int) -> tuple[str, int]:
"""
Allocates consecutive memory addresses of size `size` and returns the starting address.
"""
nonlocal allocated_before
assert 1 <= size <= 512
if allocated_before is not None and size <= 512 - allocated_before[1]:
start = allocated_before[1]
allocated_before = (allocated_before[0], allocated_before[1] + size)
return f'bank{allocated_before[0]}', start
for i in itertools.count((0 if allocated_before is None else allocated_before[0]) + 1):
if f'bank{i}' not in reserved_memory_names:
allocated_before = (i, size)
return f'bank{i}', 0
raise LogicError
def process_argument(arg: OperandValue) -> OperandValue:
match arg:
case AddressOf(name=(SharedName() | MemorySlice()) as n) | MemoryOf(name=(SharedName() | MemorySlice()) as n):
assert (n.memory is None and n.index is None) or (n.memory is not None and n.index is not None)
if n.memory is None:
memory_name, memory_address = malloc(n.size if isinstance(n, MemorySlice) else 1)
n.memory = LocalName(memory_name, processor=arg.processor)
n.index = memory_address
if isinstance(arg, AddressOf):
return PrimitiveValue(arg.ctx, n.index)
else:
return n.memory
case _:
return arg
for ins in instructions:
match ins:
case Call(): list(map(process_argument, ins.args))
case Jump(): list(map(process_argument, ins.args))
case LongJump(): process_argument(ins.arg)
case Label(): pass
case _: raise LogicError(ins)
for ins in instructions:
match ins:
case Call(): ins.args = list(map(process_argument, ins.args))
case Jump(): ins.args = list(map(process_argument, ins.args))
case LongJump(): pass
case Label(): pass
case _: raise LogicError(ins) | 6dfd65caca014398e116e30c9c15970d48afe3a2 | 864 |
def find_blobs(B):
"""find and return all blobs in the image, using
eight-connectivity. returns a labeled image, the
bounding boxes of the blobs, and the blob masks cropped
to those bounding boxes"""
B = np.array(B).astype(bool)
labeled, objects = label_blobs(B)
blobs = [labeled[obj] == ix + 1 for ix, obj in zip(range(len(objects)), objects)]
return labeled, objects, blobs | 9cb7b66353e9534bc43aa7a84e30b62d4411d06f | 865 |
def star_dist(a, n_rays=32, mode='cpp'):
"""'a' assumbed to be a label image with integer values that encode object ids. id 0 denotes background."""
n_rays >= 3 or _raise(ValueError("need 'n_rays' >= 3"))
if mode == 'python':
return _py_star_dist(a, n_rays)
elif mode == 'cpp':
return _cpp_star_dist(a, n_rays)
elif mode == 'opencl':
return _ocl_star_dist(a, n_rays)
else:
_raise(ValueError("Unknown mode %s" % mode)) | d19b031bfb6e9ad07537a0ff36037792f78a257b | 866 |
def get_bond_enthalpy(element1: str, element2: str, bond='single bond') -> int:
"""Utility function that retrieves the bond enthalpy between element1 and element2 (regardless or order)
An optional argument, bond, describing the bond (single, double, triple) could be specified
If not specified, bond defaults to 'single'
The optional argument exception is used to distinguish the double bond between carbon atoms in benzene"""
enthalpies_dict = enthalpies[bond]
if element1 in enthalpies_dict and element2 in enthalpies_dict[element1]:
return enthalpies_dict[element1][element2]
elif element2 in enthalpies_dict and element1 in enthalpies_dict[element2]:
return enthalpies_dict[element2][element1]
else:
return 0 | 5349b3b752b25d0e844d6bb8d46fbb1ad8cec085 | 867 |
def approx_sample(num_items: int, num_samples: int) -> np.array:
"""Fast approximate downsampling."""
if num_items <= num_samples:
return np.ones(num_items, dtype=np.bool8)
np.random.seed(125)
# Select each xy with probability (downsample_to / len(x)) to yield
# approximately downsample_to selections.
fraction_kept = float(num_samples) / num_items
return np.random.sample(size=num_items) < fraction_kept | 051447c311ed392f288c4eb0491df6f0b981883b | 868 |
def extract_out_cos(transmat, cos, state):
""" Helper function for building HMMs from matrices: Used for
transition matrices with 'cos' transition classes.
Extract outgoing transitions for 'state' from the complete list
of transition matrices
Allocates: .out_id vector and .out_a array (of size cos x N)
"""
lis = []
# parsing indixes belonging to postive probabilites
for j in range(cos):
for i in range(len(transmat[j][state])):
if transmat[j][state][i] != 0.0 and i not in lis:
lis.append(i)
#lis.sort()
#print "lis: ", lis
trans_id = ghmmwrapper.int_array_alloc(len(lis))
probsarray = ghmmwrapper.double_matrix_alloc(cos, len(lis)) # C-function
# creating list with positive probabilities
for k in range(cos):
for j in range(len(lis)):
ghmmwrapper.double_matrix_setitem(probsarray, k, j, transmat[k][state][lis[j]])
# initializing C state index array
for i in range(len(lis)):
ghmmwrapper.int_array_setitem(trans_id, i, lis[i])
return [len(lis),trans_id,probsarray] | 4f60290ca960fa1b714313b8bdfc0cc35f54ac90 | 869 |
import csv
def import_statistics(sourcefile,starttime):
"""
Forms a dictionary for MarkovModel from the source csv file
input
--------
sourcefile: Source csv file for Markov Model
starttime : For which hour the optimization is run
Returns a dictionary statistics2
keys are (time,iniState,finState)
time:timestep
"""
statistics1={}
statistics2={}
with open(sourcefile, newline='') as myFile:
reader = csv.reader(myFile)
rw_nb=0
for row in reader:
ts=rw_nb//4
statistics1[ts,int(row[1]),int(row[2])]=float(row[3])
rw_nb+=1
if row[0]==starttime:
listTop=ts
for tS,ini,fin in sorted(statistics1.keys()):
if tS-listTop>=0:
statistics2[tS-listTop,ini,fin]=statistics1[tS,ini,fin]
else:
statistics2[tS-listTop+int(len(statistics1.keys())/4),ini,fin]=statistics1[tS,ini,fin]
return statistics2 | d349559e389f08f77cda46a1d40c1cb7914ef0ce | 870 |
import click
def image_repository_validation(func):
"""
Wrapper Validation function that will run last after the all cli parmaters have been loaded
to check for conditions surrounding `--image-repository`, `--image-repositories`, and `--resolve-image-repos`. The
reason they are done last instead of in callback functions, is because the options depend
on each other, and this breaks cyclic dependencies.
:param func: Click command function
:return: Click command function after validation
"""
def wrapped(*args, **kwargs):
ctx = click.get_current_context()
guided = ctx.params.get("guided", False) or ctx.params.get("g", False)
image_repository = ctx.params.get("image_repository", False)
image_repositories = ctx.params.get("image_repositories", False) or {}
resolve_image_repos = ctx.params.get("resolve_image_repos", False)
parameters_overrides = ctx.params.get("parameters_overrides", {})
template_file = (
ctx.params.get("t", False) or ctx.params.get("template_file", False) or ctx.params.get("template", False)
)
# Check if `--image-repository`, `--image-repositories`, or `--resolve-image-repos` are required by
# looking for resources that have an IMAGE based packagetype.
required = any(
[
_template_artifact == IMAGE
for _template_artifact in get_template_artifacts_format(template_file=template_file)
]
)
validators = [
Validator(
validation_function=lambda: bool(image_repository)
+ bool(image_repositories)
+ bool(resolve_image_repos)
> 1,
exception=click.BadOptionUsage(
option_name="--image-repositories",
ctx=ctx,
message="Only one of the following can be provided: '--image-repositories', "
"'--image-repository', or '--resolve-image-repos'. "
"Do you have multiple specified in the command or in a configuration file?",
),
),
Validator(
validation_function=lambda: not guided
and not (image_repository or image_repositories or resolve_image_repos)
and required,
exception=click.BadOptionUsage(
option_name="--image-repositories",
ctx=ctx,
message="Missing option '--image-repository', '--image-repositories', or '--resolve-image-repos'",
),
),
Validator(
validation_function=lambda: not guided
and (
image_repositories
and not resolve_image_repos
and not _is_all_image_funcs_provided(template_file, image_repositories, parameters_overrides)
),
exception=click.BadOptionUsage(
option_name="--image-repositories",
ctx=ctx,
message="Incomplete list of function logical ids specified for '--image-repositories'. "
"You can also add --resolve-image-repos to automatically create missing repositories.",
),
),
]
for validator in validators:
validator.validate()
# Call Original function after validation.
return func(*args, **kwargs)
return wrapped | ab72b2233353f50833e80f662d945702dd20b173 | 871 |
import six
import sys
def safe_encode(text, incoming=None,
encoding='utf-8', errors='strict'):
"""Encodes incoming str/unicode using `encoding`.
If incoming is not specified, text is expected to be encoded with
current python's default encoding. (`sys.getdefaultencoding`)
:param incoming: Text's current encoding
:param encoding: Expected encoding for text (Default UTF-8)
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a bytestring `encoding` encoded
representation of it.
:raises TypeError: If text is not an isntance of str
"""
if not isinstance(text, six.string_types):
raise TypeError("%s can't be encoded" % type(text))
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
if isinstance(text, six.text_type):
return text.encode(encoding, errors)
elif text and encoding != incoming:
# Decode text before encoding it with `encoding`
text = safe_decode(text, incoming, errors)
return text.encode(encoding, errors)
return text | f1f98042d1ab18b81337f8d4df77d31b8678e3e2 | 872 |
import inspect
def create_source_location_str(frame, key):
"""Return string to use as source location key
Keyword arguments:
frame -- List of frame records
key -- Key of the frame with method call
Takes frame and key (usually 1, since 0 is the frame in which getcurrentframe() was called)
Extracts line number and code context, i. e. when TrackingNode's loc-function was called
Turns them into a string to use as key for the source location number
Returns the string
"""
o_frames = inspect.getouterframes(frame)
sl_frame = o_frames.pop(key)
sl_lineno = sl_frame.lineno
# list with 1 element
sl_cc = sl_frame.code_context.pop(0)
# strip it off leading whitespaces
sl_cc = sl_cc.lstrip()
source_location = f"{sl_lineno} {sl_cc}"
return source_location | 4bf3a7d68e5b5c667261710dfcb3e2f1cb613533 | 873 |
def uv_lines(reglist, uv='uv', sty={}, npoints=1001, inf=50., eps=1e-24):
"""
"""
for reg in reglist:
for b in reg.blocks:
smin, smax, ds = -5., 25., 1.
vals = np.arange(smin, smax, ds)
cm = plt.cm.gist_rainbow
cv = np.linspace(0,1,len(vals))
for i in range(len(vals)):
style1 = dict(c=cm(cv[i]), lw=.8, ls='-', zorder=6000)
style1.update(sty)
b.add_curves_uv(xh.cm.uvlines([vals[i]], uv=uv, uvbounds=b.uvbounds, sty=style1, c=0., inf=inf, npoints=npoints))
return reglist | f80f4551707564cf979377bb17908c4151ea61f3 | 874 |
def rand_initialisation(X, n_clusters, seed, cste):
""" Initialize vector centers from X randomly """
index = [];
repeat = n_clusters;
# Take one index
if seed is None:
idx = np.random.RandomState().randint(X.shape[0]);
else:
idx = np.random.RandomState(seed+cste).randint(X.shape[0]);
while repeat != 0:
# Let's check that we haven't taken this index yet
if idx not in index:
index.append(idx);
repeat = repeat - 1;
if seed is not None:
idx = np.random.RandomState(seed+cste+repeat).randint(X.shape[0]);
return X[index]; | e1d4d58018596c2ed3955b8d8d82d040def7b3e1 | 875 |
def l2_hinge_loss(X, Y, W, C, N):
"""
Computes the L2 regularized Hinge Loss function, and its gradient over a mini-batch of data.
:param X: The feature matrix of size (F+1, N).
:param Y: The label vector of size (N, 1).
:param W: The weight vector of size (F+1, 1).
:param C: A hyperparameter of SVM soft margin that determines the number of observations allowed in the margin.
:param N: The number of samples in the mini-batch.
:return: Loss (a scalar) and gradient (a vector of size (F+1, 1)).
"""
l = 1 / N * C
loss = (l / 2) * norm(W) ** 2 + 1 / N * np.sum(np.maximum(np.zeros((1, N)), 1 - np.multiply(Y.T, np.matmul(W.T, X))))
grad = l * W + 1 / N * sum([-Y[i] * X[:, i:i + 1] if Y[i] * np.matmul(W.T, X[:, i:i + 1]) < 1 else 0 for i in range(N)])
return loss, grad | 1de1cf7881466a039e6882161bbea3836dc4bf2f | 876 |
import argparse
def args():
"""Setup argument Parsing."""
parser = argparse.ArgumentParser(
usage='%(prog)s',
description='OpenStack Inventory Generator',
epilog='Inventory Generator Licensed "Apache 2.0"')
parser.add_argument(
'-f',
'--file',
help='Inventory file.',
required=False,
default='openstack_inventory.json'
)
parser.add_argument(
'-s',
'--sort',
help='Sort items based on given key i.e. physical_host',
required=False,
default='component'
)
exclusive_action = parser.add_mutually_exclusive_group(required=True)
exclusive_action.add_argument(
'-r',
'--remove-item',
help='host name to remove from inventory, this can be used multiple'
' times.',
action='append',
default=[]
)
exclusive_action.add_argument(
'-d',
'--remove-group',
help='group name to remove from inventory, this can be used multiple'
' times.',
action='append',
default=[]
)
exclusive_action.add_argument(
'-l',
'--list-host',
help='',
action='store_true',
default=False
)
exclusive_action.add_argument(
'-g',
'--list-groups',
help='List groups and containers in each group',
action='store_true',
default=False
)
exclusive_action.add_argument(
'-G',
'--list-containers',
help='List containers and their groups',
action='store_true',
default=False
)
exclusive_action.add_argument(
'-e',
'--export',
help='Export group and variable information per host in JSON.',
action='store_true',
default=False
)
exclusive_action.add_argument(
'--clear-ips',
help='''Clears IPs from the existing inventory, but leaves
all other information intact. LXC interface files and
load balancers will *not* be modified.''',
action='store_true',
default=False
)
return vars(parser.parse_args()) | 9a92d138d6b73f423b3a98d5fa046be061fe06ff | 877 |
from numpy.core import around, number, float_
from numpy.core.numerictypes import issubdtype
from numpy.core.fromnumeric import any as npany
def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
"""
Raise an assertion if two objects are not equal up to desired precision.
The test verifies identical shapes and verifies values with
abs(desired-actual) < 0.5 * 10**(-decimal)
Given two array_like objects, check that the shape is equal and all
elements of these objects are almost equal. An exception is raised at
shape mismatch or conflicting values. In contrast to the standard usage
in numpy, NaNs are compared like numbers, no assertion is raised if
both objects have NaNs in the same positions.
Parameters
----------
x : array_like
The actual object to check.
y : array_like
The desired, expected object.
decimal : integer (decimal=6)
desired precision
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_almost_equal: simple version for comparing numbers
assert_array_equal: tests objects for equality
Examples
--------
the first assert does not raise an exception
>>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
[1.0,2.333,np.nan])
>>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
\t\t\t[1.0,2.33339,np.nan], decimal=5)
...
<type 'exceptions.AssertionError'>:
AssertionError:
Arrays are not almost equal
<BLANKLINE>
(mismatch 50.0%)
x: array([ 1. , 2.33333, NaN])
y: array([ 1. , 2.33339, NaN])
>>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
\t\t\t[1.0,2.33333, 5], decimal=5)
<type 'exceptions.ValueError'>:
ValueError:
Arrays are not almost equal
x: array([ 1. , 2.33333, NaN])
y: array([ 1. , 2.33333, 5. ])
"""
def compare(x, y):
try:
if npany(gisinf(x)) or npany( gisinf(y)):
xinfid = gisinf(x)
yinfid = gisinf(y)
if not xinfid == yinfid:
return False
# if one item, x and y is +- inf
if x.size == y.size == 1:
return x == y
x = x[~xinfid]
y = y[~yinfid]
except TypeError:
pass
z = abs(x-y)
if not issubdtype(z.dtype, number):
z = z.astype(float_) # handle object arrays
return around(z, decimal) <= 10.0**(-decimal)
assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
header='Arrays are not almost equal') | 95005d6f8782d5b5c4221343be035a905db2c255 | 878 |
def train_val_test_split(dataframe, val_ratio=.2, test_ratio=.2):
"""
Takes a dataframe and returns a random train/validate/test split
param test_ratio: the percentage of data to put into the test portion
- must be between 0.0 and 1.0
param val_ratio: the percentage of data to put into the validation portion
- must be between 0.0 and 1.0
test_ratio + val_ratio must also be <= 1.0
- if test_ratio + val_ratio == 1, train will be empty
returns: train, validate, test
"""
# ensure test_ratio is [0,1]
if (test_ratio > 1.0) | (test_ratio < 0):
raise ValueError('test_ratio must be between 0.0 and 1.0, found',
test_ratio)
# ensure val_ratio is [0,1]
if (val_ratio > 1.0) | (val_ratio < 0):
raise ValueError('test_ratio must be between 0.0 and 1.0, found',
val_ratio)
# ensure test + val <= 1
if (test_ratio + val_ratio > 1.0):
raise ValueError('test_ratio + val_ratio must be <= 1.0, found',
test_ratio + val_ratio)
# split once to get test
train, test = train_test_split(dataframe, test_ratio)
# recalculate ratio and split again to get val
train_ratio = 1 - (val_ratio + test_ratio)
sub_ratio = val_ratio / (val_ratio + train_ratio)
train, val = train_test_split(train, sub_ratio)
# return the results
return train, val, test | 2b3cf8a656c0d66a8481ad50ea6123bab31aaf9d | 879 |
def diff_pf_potential(phi):
""" Derivative of the phase field potential. """
return phi**3-phi | c22af096d27cf817ffee683453ecafb4e5c61cdc | 880 |
def process_image(image, label, height=224, width=224):
""" Resize the images to a fixes input size,
and rescale the input channels to a range of [-1,1].
Args:
image: "tensor, float32", image input.
label: "tensor, int64", image label.
height: "int64", (224, 224, 3) -> (height, 224, 3).
width: "int64", (224, 224, 3) -> (224, width, 3).
Returns:
image input, image label.
"""
image = tf.cast(image, tf.float32)
image = image / 255.
image = tf.image.resize(image, (height, width))
return image, label | 98fa8332603c7fdcc973f38711b982648b1f13f0 | 881 |
def astz(data):
"""
[X] ASTZ - Request actual status
STBY (dyno stopping) or
SSIM (road load) or
SMTR (constant speed) or
SRPM (constant RPM) or
SKZK (constant motor force)
"""
responds = data.split(" ")
if len(responds) > 2:
if responds[1] == 'STBY':
state = 0
elif responds[1] in ["SSIM","SMTR","SRPM","SKZK"]:
state = 1
else:
print responds[1]
state = 2
else:
state = 3
return state | da36efc3d6b477f8e7de96148c9890aa1cf9c560 | 882 |
def get_peer_snappi_chassis(conn_data, dut_hostname):
"""
Get the Snappi chassis connected to the DUT
Note that a DUT can only be connected to a Snappi chassis
Args:
conn_data (dict): the dictionary returned by conn_graph_fact.
Example format of the conn_data is given below:
{u'device_conn': {u'sonic-s6100-dut':
{u'Ethernet64': {u'peerdevice': u'snappi-sonic',
u'peerport': u'Card4/Port1',
u'speed': u'100000'},
u'Ethernet68': {u'peerdevice': u'snappi-sonic',
u'peerport': u'Card4/Port2',
u'speed': u'100000'},
u'Ethernet72': {u'peerdevice': u'snappi-sonic',
u'peerport': u'Card4/Port3',
u'speed': u'100000'},
u'Ethernet76': {u'peerdevice': u'snappi-sonic',
u'peerport': u'Card4/Port4',
u'speed': u'100000'}}},
u'device_console_info': {u'sonic-s6100-dut': {}},
u'device_console_link': {u'sonic-s6100-dut': {}},
u'device_info': {u'sonic-s6100-dut':
{u'HwSku': u'Arista-7060CX-32S-C32',
u'Type': u'DevSonic'}},
u'device_pdu_info': {u'sonic-s6100-dut': {}},
u'device_pdu_links': {u'sonic-s6100-dut': {}},
u'device_port_vlans': {u'sonic-s6100-dut':
{u'Ethernet64': {u'mode': u'Access',
u'vlanids': u'2',
u'vlanlist': [2]},
u'Ethernet68': {u'mode': u'Access',
u'vlanids': u'2',
u'vlanlist': [2]},
u'Ethernet72': {u'mode': u'Access',
u'vlanids': u'2',
u'vlanlist': [2]},
u'Ethernet76': {u'mode': u'Access',
u'vlanids': u'2',
u'vlanlist': [2]}}},
u'device_vlan_list': {u'sonic-s6100-dut': [2, 2, 2, 2]},
u'device_vlan_map_list': {u'sonic-s6100-dut': {u'19': 2}},
u'device_vlan_range': {u'sonic-s6100-dut': [u'2']}}
dut_hostname (str): hostname of the DUT
Returns:
The name of the peer Snappi chassis or None
"""
device_conn = conn_data['device_conn']
if dut_hostname not in device_conn:
return None
dut_device_conn = device_conn[dut_hostname]
peer_devices = [dut_device_conn[port]['peerdevice'] for port in dut_device_conn]
peer_devices = list(set(peer_devices))
if len(peer_devices) == 1:
return peer_devices[0]
else:
return None | 96bfa8d2189b2dea041754fd4daa2a39a96dd687 | 883 |
def gen_hwpc_report():
"""
Return a well formated HWPCReport
"""
cpua = create_core_report('1', 'e0', '0')
cpub = create_core_report('2', 'e0', '1')
cpuc = create_core_report('1', 'e0', '2')
cpud = create_core_report('2', 'e0', '3')
cpue = create_core_report('1', 'e1', '0')
cpuf = create_core_report('2', 'e1', '1')
cpug = create_core_report('1', 'e1', '2')
cpuh = create_core_report('2', 'e1', '3')
socketa = create_socket_report('1', [cpua, cpub])
socketb = create_socket_report('2', [cpuc, cpud])
socketc = create_socket_report('1', [cpue, cpuf])
socketd = create_socket_report('2', [cpug, cpuh])
groupa = create_group_report('1', [socketa, socketb])
groupb = create_group_report('2', [socketc, socketd])
return create_report_root([groupa, groupb]) | 1380ef25281f9dde7626f0df9c4bb59c7a0eff29 | 884 |
def special_crossentropy(y_true, y_pred):
"""特殊的交叉熵
"""
task = K.cast(y_true < 1.5, K.floatx())
mask = K.constant([[0, 0, 1, 1, 1]])
y_pred_1 = y_pred - mask * 1e12
y_pred_2 = y_pred - (1 - mask) * 1e12
y_pred = task * y_pred_1 + (1 - task) * y_pred_2
y_true = K.cast(y_true, 'int32')
loss = K.sparse_categorical_crossentropy(y_true, y_pred, from_logits=True)
return K.mean(loss) | 7cd14e5bc788175ed7022ead942853260a4b2679 | 885 |
def nonensembled_map_fns(data_config):
"""Input pipeline functions which are not ensembled."""
common_cfg = data_config.common
map_fns = [
data_transforms.correct_msa_restypes,
data_transforms.add_distillation_flag(False),
data_transforms.cast_64bit_ints,
data_transforms.squeeze_features,
data_transforms.randomly_replace_msa_with_unknown(0.0),
data_transforms.make_seq_mask,
data_transforms.make_msa_mask,
data_transforms.make_hhblits_profile,
data_transforms.make_random_crop_to_size_seed,
]
if common_cfg.use_templates:
map_fns.extend([data_transforms.fix_templates_aatype, data_transforms.make_pseudo_beta('template_')])
map_fns.extend([data_transforms.make_atom14_masks,])
return map_fns | 223f85312503941a6e90bdc027f3f522e70aaacc | 886 |
import re
def _getMark(text):
"""
Return the mark or text entry on a line. Praat escapes double-quotes
by doubling them, so doubled double-quotes are read as single
double-quotes. Newlines within an entry are allowed.
"""
line = text.readline()
# check that the line begins with a valid entry type
if not re.match(r'^\s*(text|mark) = "', line):
raise ValueError('Bad entry: ' + line)
# read until the number of double-quotes is even
while line.count('"') % 2:
next_line = text.readline()
if not next_line:
raise EOFError('Bad entry: ' + line[:20] + '...')
line += next_line
entry = re.match(r'^\s*(text|mark) = "(.*?)"\s*$', line, re.DOTALL)
return entry.groups()[1].replace('""', '"') | 3f6de6246069a9f1d9cdb127fdcde40de16106d6 | 887 |
def get_screen_resolution_str():
"""
Get a regexp like string with your current screen resolution.
:return: String with your current screen resolution.
"""
sizes = [
[800, [600]],
[1024, [768]],
[1280, [720, 768]],
[1366, [768]],
[1920, [1080, 1200]],
]
sizes_mobile = [[768, [1024]], [720, [1280]], [768, [1280, 1366]], [1080, [1920]]]
default_w = 1920
default_h = 1080
default_mobile_w = 1080
default_mobile_h = 1920
is_mobile = False
window = Gtk.Window()
screen = window.get_screen()
nmons = screen.get_n_monitors()
maxw = 0
maxh = 0
sizew = 0
sizeh = 0
if nmons == 1:
maxw = screen.get_width()
maxh = screen.get_height()
else:
for m in range(nmons):
mg = screen.get_monitor_geometry(m)
if mg.width > maxw or mg.height > maxw:
maxw = mg.width
maxh = mg.height
if maxw > maxh:
v_array = sizes
else:
v_array = sizes_mobile
is_mobile = True
for m in v_array:
if maxw <= m[0]:
sizew = m[0]
sizeh = m[1][len(m[1]) - 1]
for e in m[1]:
if maxh <= e:
sizeh = e
break
break
if sizew == 0:
if is_mobile:
sizew = default_mobile_w
sizeh = default_mobile_h
else:
sizew = default_w
sizeh = default_h
return r"%sx%s" % (sizew, sizeh) | 7de1c15d92dd3582598740a7062d9ef1c5c2adba | 888 |
def page_body_id(context):
"""
Get the CSS class for a given page.
"""
path = slugify_url(context.request.path)
if not path:
path = "home"
return "page-{}".format(path) | 2d4c709111a510c29e5bfa48016f59f65d45f2c4 | 889 |
def load_centers(network, name, eps):
"""Load values of centers from the specified network by name.
:param network: Network to load center values
:param name: Name of parameter with centers
:return: Normalized centers
"""
assert name in network.params.keys(), 'Cannot find name: {} in params'.format(name)
params = network.params[name]
assert len(params) == 1
centers = params[0].data
norms = np.sqrt(np.sum(np.square(centers), axis=1, keepdims=True) + eps)
normalized_centers = centers / norms
return normalized_centers | cf677d4ca387f5095500077f9d704ae3a884c0c9 | 890 |
def naive_sort_w_matrix(array):
"""
:param array: array to be sorted
:return: a sorted version of the array, greatest to least, with the appropriate permutation matrix
"""
size = len(array)
def make_transposition(i, j):
mat = np.identity(size)
mat[i, i] = 0
mat[j, j] = 0
mat[i, j] = 1
mat[j, i] = 1
return mat
sort_array = np.zeros(size)
permutation = np.identity(size)
for i in range(size):
big = -float("inf")
ix = i
for j in range(i, size):
if array[j] > big:
big = array[j]
ix = j
sort_array[i] = big
permutation = make_transposition(i, ix) @ permutation
return sort_array, permutation | aedcf7c39c633b1e958997038d905bed0bc4958d | 891 |
def plot_route(cities, route, name='diagram.png', ax=None):
"""Отрисовка маршрута"""
mpl.rcParams['agg.path.chunksize'] = 10000
if not ax:
fig = plt.figure(figsize=(5, 5), frameon=False)
axis = fig.add_axes([0, 0, 1, 1])
axis.set_aspect('equal', adjustable='datalim')
plt.axis('off')
axis.scatter(cities['x'], cities['y'], color='red', s=4)
route = cities.reindex(route)
route.loc[route.shape[0]] = route.iloc[0]
axis.plot(route['x'], route['y'], color='purple', linewidth=1)
plt.savefig(name, bbox_inches='tight', pad_inches=0, dpi=200)
plt.close()
else:
ax.scatter(cities['x'], cities['y'], color='red', s=4)
route = cities.reindex(route)
route.loc[route.shape[0]] = route.iloc[0]
ax.plot(route['x'], route['y'], color='purple', linewidth=1)
return ax | 748fff61c00c3a186eefa577b3785a400acf0eff | 892 |
def aes_cbc_mac(
key: bytes, b: bytes, iv: bytes=None,
pad=False
) -> bytes:
"""
AES CBC-MAC.
:param key: The verification key.
:param b: The buffer to be authenticated.
:param iv: The initial vector.
:param pad: Whether to apply PKCS-7 padding to the buffer.
:return: A valid MAC for b, with given key and IV.
"""
if pad:
b = matasano.blocks.pkcs_7(b, 16)
return matasano.blocks.aes_cbc(
key=key, b=b, iv=iv,
decrypt=False, random_iv=False
)[0][-16:] | 0b15c5270a29c814b02fcc9502c080e0f507eaad | 893 |
import json
def seamus():
"""
Preview for Seamus page
"""
context = make_context()
# Read the books JSON into the page.
with open('www/static-data/books.json', 'rb') as readfile:
books_data = json.load(readfile)
books = sorted(books_data, key=lambda k: k['title'])
# Harvest long tag names
for book in books:
tag_list = []
for tag in book['tags']:
tag_list.append(context['COPY']['tags'][tag]['value'])
book['tag_list'] = tag_list
context['books'] = books
return render_template('seamus-preview.html', **context) | 4252f5abd7c2d8927395e5f036bf4899b12d555e | 894 |
import os
import json
def get_model_opt(model_path):
""" Get the options to initialize a model evaluator
"""
opt_path = os.path.dirname(model_path.rstrip('/')) + '/opt.json'
# load the options used while training the model
opt = json.load(open(opt_path))
opt = dotdict(opt)
opt.load_weights_folder = model_path
return opt | c1e7afe0fc4dab0ef51998b73c35f1ce01f6c625 | 895 |
def resolve_alias(term: str) -> str:
"""
Resolves search term aliases (e.g., 'loc' for 'locations').
"""
if term in ("loc", "location"):
return "locations"
elif term == "kw":
return "keywords"
elif term == "setting":
return "setting"
elif term == "character":
return "characters"
else:
return term | 8080d6ffb73457fd61aeca610b30b18695ec01bd | 896 |
from typing import Union
from typing import Any
from typing import Iterable
from typing import List
import math
def to_number(value: Union[Any, Iterable[Any]], default: Any = math.nan) -> Union[NumberType, List[NumberType]]:
""" Attempts to convert the passed object to a number.
Returns
-------
value: Scalar
* list,tuple,set -> list of Number
* int,float -> int, float
* str -> int, float
* generic -> float if float() works, else math.nan
"""
if isinstance(value, str):
return _convert_string_to_number(value, default)
if isinstance(value, (list, tuple, set)):
return [to_number(i, default) for i in value]
try:
converted_number = float(value)
except (ValueError, TypeError):
converted_number = default
if not _is_null(converted_number) and math.floor(converted_number) == converted_number:
converted_number = int(converted_number)
return converted_number | d7e4e85fb3ea056a3188b9dce98f5e806beb0a5b | 897 |
def secretValue(value=None, bits=64):
"""
A single secret value
bits: how many bits long the value is
value: if not None, then a specific (concrete or symbolic) value which this value takes on
"""
return AbstractNonPointer(bits=bits, value=value, secret=True) | 55672dbc78a8393a2cb3c39c3165c89180df3bfe | 898 |
def year_frac(d_end, d_start=0,
dates_string=False, trading_calendar=True):
""" :returns year fraction between 2 (business) dates
:params dates are datetimes, if string then insert dates_string=True
"""
delta_days = days_between(d_end, d_start, dates_string, trading_calendar)
year = 252. if trading_calendar else 365.25
return delta_days / year | cd5916e54b969476c335adce3aa9138ce4be68b2 | 899 |