code stringlengths 101 5.91M |
|---|
def read_image_npy(img_path):
img = np.load(img_path)
img = binary_fill_holes(img)
mask = img.copy()
img = img.astype(np.uint8)
img[mask] = 255
(ret, thresh) = cv2.threshold(img, 127, 255, 0)
(_, contours, hierarchy) = cv2.findContours(thresh, 1, 2)
cnt = contours[0]
return (thresh, ... |
class docParamNameList(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, parametername=None):
if (parametername is None):
self.parametername = []
else:
self.parametername = parametername
def factory(*args_, **kwargs_):
if docParamNameL... |
_module()
class ResNet(nn.Module):
arch_settings = {18: (BasicBlock, (2, 2, 2, 2)), 34: (BasicBlock, (3, 4, 6, 3)), 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))}
def __init__(self, depth, in_channels=3, stem_channels=64, base_channels=64, num_stages=4, strid... |
def check_tangent_matrix(conf, vec_x0, fun, fun_grad):
vec_x = vec_x0.copy()
delta = conf.delta
vec_r = fun(vec_x)
mtx_a0 = fun_grad(vec_x)
mtx_a = mtx_a0.tocsc()
mtx_d = mtx_a.copy()
mtx_d.data[:] = 0.0
vec_dx = nm.zeros_like(vec_r)
for ic in range(vec_dx.shape[0]):
vec_dx[i... |
class PeakLocalMaxSuite():
def setup(self):
mask = np.zeros([500, 500], dtype=bool)
(x, y) = np.indices((500, 500))
x_c = (((x // 20) * 20) + 10)
y_c = (((y // 20) * 20) + 10)
mask[((((x - x_c) ** 2) + ((y - y_c) ** 2)) < (8 ** 2))] = True
(self.labels, num_objs) = nd... |
class FiniteWordPath_2d_iter_with_caching(WordDatatype_iter_with_caching, FiniteWordPath_2d, FiniteWord_class):
pass |
def convert_conll03_file(filename, short_name):
assert ('en_conll03.' in filename)
if (not os.path.exists(filename)):
raise FileNotFoundError(('Cannot convert missing file %s' % filename))
new_filename = filename.replace('en_conll03.', (short_name + '.conll03.'))
with open(filename) as fin:
... |
def check_constituents(train_constituents, trees, treebank_name):
constituents = parse_tree.Tree.get_unique_constituent_labels(trees)
for con in constituents:
if (con not in train_constituents):
raise RuntimeError("Found label {} in the {} set which don't exist in the train set".format(con, ... |
def load_detectron_weight(net, detectron_weight_file):
(name_mapping, orphan_in_detectron) = net.detectron_weight_mapping
with open(detectron_weight_file, 'rb') as fp:
src_blobs = pickle.load(fp, encoding='latin1')
if ('blobs' in src_blobs):
src_blobs = src_blobs['blobs']
params = net.st... |
def test_geterr():
err = sc.geterr()
for (key, value) in err.items():
assert_((key in _sf_error_code_map))
assert_((value in _sf_error_actions)) |
def update_config(config, *, impossible_strategy, class_loss_weight):
class IdentificationClassificationConfig(type(config)):
def __init__(self, impossible_strategy='ignore', class_loss_weight=1.0, **kwargs):
super().__init__(**kwargs)
self.impossible_strategy = impossible_strategy
... |
class SchemeMorphism_point(SchemeMorphism):
def _repr_(self):
return self._codomain.ambient_space()._repr_generic_point(self._coords)
def _latex_(self):
return self._codomain.ambient_space()._latex_generic_point(self._coords)
def __getitem__(self, n):
return self._coords[n]
def _... |
def is_whole(x):
try:
x = numpy.float64(x)
except ValueError:
return False
return x.is_integer() |
def hfft2(x, s=None, axes=((- 2), (- 1)), norm=None, overwrite_x=False, workers=None, *, plan=None):
if (plan is not None):
raise NotImplementedError('Passing a precomputed plan is not yet supported by scipy.fft functions')
return hfftn(x, s, axes, norm, overwrite_x, workers) |
_start_docstrings('The bare MobileViT model outputting raw hidden-states without any specific head on top.', MOBILEVIT_START_DOCSTRING)
class MobileViTModel(MobileViTPreTrainedModel):
def __init__(self, config: MobileViTConfig, expand_output: bool=True):
super().__init__(config)
self.config = config... |
def is_even(self, allow_rescaling_flag=True) -> bool:
return (self.parity(allow_rescaling_flag) == 'even') |
def write_syscall_consts(f, arch, mode):
f.write('// This file has been autogenerated. DO NOT MODIFY!\n')
undefined_syscall = (- 1)
valid_syscalls = 0
invalid_syscalls = 0
for (name, obj) in sorted(syscalls.all(), key=(lambda x: arch_syscall_number(arch, x))):
syscall_number = getattr(obj, a... |
class LempDecisionRuleStats(IndexDecisionRuleStats):
def __init__(self, sample_time_col, dec_rule_df, lemp_truth_df, index_truth_df, blocked_mm_truth_df):
super(LempDecisionRuleStats, self).__init__(sample_time_col, dec_rule_df, lemp_truth_df, index_truth_df, blocked_mm_truth_df)
def correct(self, row):... |
class EagerBatcher():
def __init__(self, args, rank=0, nranks=1):
(self.rank, self.nranks) = (rank, nranks)
(self.bsize, self.accumsteps) = (args.bsize, args.accumsteps)
self.query_tokenizer = QueryTokenizer(args.query_maxlen)
self.doc_tokenizer = DocTokenizer(args.doc_maxlen)
... |
class RefCosine(object):
def __init__(self, init_lr, max_iter):
self.init_lr = init_lr
self.max_iter = max_iter
def get_learning_rate(self, iter):
return (self.init_lr * ((math.cos((((iter * 1.0) / self.max_iter) * math.pi)) + 1.0) * 0.5)) |
def test_state_adjoint_problems(CG1, geometry, rng, u, y_d, ocp, bcs, y, p):
trial = TrialFunction(CG1)
test = TestFunction(CG1)
state = Function(CG1)
adjoint = Function(CG1)
a = (inner(grad(trial), grad(test)) * geometry.dx)
L_state = ((u * test) * geometry.dx)
L_adjoint = (((- (state - y_d... |
class GTN(nn.Module):
def __init__(self, num_edge, num_channels, w_in, w_out, num_class, num_nodes, num_layers):
super(GTN, self).__init__()
self.num_edge = num_edge
self.num_channels = num_channels
self.num_nodes = num_nodes
self.w_in = w_in
self.w_out = w_out
... |
_handler
def value_analysis(term, smt, name, exact, restrict):
arg = term._args[0]
try:
return smt.get_analysis(name, arg)
except KeyError:
pass
ty = smt.type(term)
with smt.local_defined(), smt.local_nonpoison() as nx:
x = smt.eval(arg)
z = exact(x, ty)
if isinstance... |
def gaussian_beam_z_axis_x_pol(x_grid, y_grid, z_grid, w0, center, R, omega, polarity, eps_val) -> complex:
x = (((R[(0, 0)] * (x_grid - center[0])) + (R[(0, 1)] * (y_grid - center[1]))) + (R[(0, 2)] * (z_grid - center[2])))
y = (((R[(1, 0)] * (x_grid - center[0])) + (R[(1, 1)] * (y_grid - center[1]))) + (R[(1,... |
def write_dataset_best(documents, test_documents, output_dir, dataset_name):
random.shuffle(documents)
num_train = int((len(documents) * 0.85))
num_dev = int((len(documents) * 0.15))
os.makedirs(output_dir, exist_ok=True)
write_section(output_dir, dataset_name, 'train', documents[:num_train])
wr... |
class FastaDataset(torch.utils.data.Dataset):
def __init__(self, path: str, cache_indices=False):
self.fn = fasta_file_path(path)
self.threadlocal = threading.local()
self.cache = Path(f'{path}.fasta.idx.npy')
if cache_indices:
if self.cache.exists():
(sel... |
def get_init_sup_samples(args, sampler, COMMON, train_samples, OUTD):
previous_pairs = dict()
previous_errors = False
cnd_drop_n = (args.dataset == constants.CAM16)
cnd_drop_n &= (args.al_type != constants.AL_WSL)
cnd = (args.al_type not in [constants.AL_FULL_SUP, constants.AL_WSL])
cnd &= (args... |
class TestClarksonWoodruffTransform():
rng = np.random.RandomState(seed=)
n_rows = 2000
n_cols = 100
density = 0.1
n_sketch_rows = 200
seeds = [, , , , , , , , , ]
A_dense = rng.randn(n_rows, n_cols)
A_csc = rand(n_rows, n_cols, density=density, format='csc', random_state=rng)
A_csr ... |
def _group_params(agg, df):
params = [re.sub('\\[\\d+\\]$', '', x, 1) for x in df.keys() if x.endswith(']')]
param_counts = Counter(params)
for (param_name, count) in param_counts.items():
df[param_name] = agg([df['{}[{}]'.format(param_name, i)] for i in range(1, count)])
return df |
def test_set_action_space(as_custom):
as_custom.set_action_space(custom_action_lower_bound, custom_action_upper_bound)
assert (as_custom.get_action_space().low == custom_action_lower_bound).all()
assert (as_custom.get_action_space().high == custom_action_upper_bound).all()
assert (as_custom.normalize_ac... |
class FlaxXLMRobertaForMaskedLM(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
class Virus(Cell):
def __init__(self, gameServer, owner, position, radius):
Cell.__init__(self, gameServer, owner, position, radius)
self.cellType = 2
self.isSpiked = True
self.isMotherCell = False
self.color = Color(42, 255, 42)
def canEat(self, cell):
if (len(se... |
def export_meta_graph(export_dir, worker_id):
export_meta_graph_path = os.path.join(export_dir, ('worker-%d_metagraph' % worker_id))
parallax_log.debug(('Exporting graph of worker %d to %s' % (worker_id, export_meta_graph_path)))
tf.train.export_meta_graph(export_meta_graph_path, as_text=True) |
def register_Ns3SimpleRefCount__Ns3PbbAddressBlock_Ns3Empty_Ns3DefaultDeleter__lt__ns3PbbAddressBlock__gt___methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::SimpleRefCount< ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter< ns3::PbbAddressBlock > > const &', 'o')])
ret... |
def test_interpretation():
interp = pyhf.compat.interpret_rootname('gamma_foo_0')
assert (interp['constrained'] == 'n/a')
assert (not interp['is_scalar'])
assert (interp['name'] == 'foo')
assert (interp['element'] == 0)
interp = pyhf.compat.interpret_rootname('alpha_foo')
assert interp['cons... |
def ground_formulas(match_parse, formulas, references={}):
core_parse = match_parse.graph_parse.core_parse
singular_variables = set(itertools.chain(*[_get_singular_variables(formula) for formula in formulas]))
grounded_variable_sets = []
for variable in singular_variables:
grounded_variable = _g... |
def simGetDoubleSignal(signalName):
val = ffi.new('double*')
ret = lib.simGetDoubleSignal(signalName.encode('ascii'), val)
_check_return(ret)
return (ret, val[0]) |
def eval_np(module, *args, **kwargs):
torch_args = tuple((torch_ify(x) for x in args))
torch_kwargs = {k: torch_ify(v) for (k, v) in kwargs.items()}
outputs = module(*torch_args, **torch_kwargs)
return elem_or_tuple_to_numpy(outputs) |
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstru... |
def _get_uid(name):
if ((getpwnam is None) or (name is None)):
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if (result is not None):
return result[2]
return None |
class BernoulliMLPRegressor(StochasticRegressor):
def __init__(self, input_shape, output_dim, name='BernoulliMLPRegressor', hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.relu, hidden_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), hidden_b_init=tf.zeros_initializer(), output_nonli... |
def boost_get_version(self, dir):
re_but = re.compile('^#define\\s+BOOST_LIB_VERSION\\s+"(.*)"$', re.M)
try:
val = re_but.search(self.__boost_get_version_file(dir).read()).group(1)
except:
val = self.check_cxx(fragment=BOOST_VERSION_CODE, includes=[dir], execute=True, define_ret=True)
re... |
_utils.test(require=ti.extension.adstack)
def test_mixed_inner_loops():
x = ti.field(dtype=ti.f32, shape=(), needs_grad=True)
arr = ti.field(dtype=ti.f32, shape=5)
loss = ti.field(dtype=ti.f32, shape=(), needs_grad=True)
def mixed_inner_loops():
for i in arr:
loss[None] += ti.sin(x[N... |
.parametrize('estimator', [LinearRegression(), DumbEstimator()])
.parametrize('sample_weight', [None, np.ones_like(y_toy)])
def test_fit_estimator(estimator: Any, sample_weight: Optional[NDArray]) -> None:
estimator = fit_estimator(estimator, X_toy, y_toy, sample_weight)
check_is_fitted(estimator) |
def test_capacitor_error_massages():
error = 'The input unit for the capacitor is not correct. Look at the documentation for the correct input format.'
with pytest.raises(ValueError, match=error):
Capacitor(10, 'H') |
def test_tokenizer():
char_tokenizer = CharacterTokenizer()
phone_tokenizer = default_phoneme_tokenizer()
char_text = 'HELLO WORLD'
char_text_enc = char_tokenizer.encode(char_text)
char_text_dec = char_tokenizer.decode(char_text_enc)
assert isinstance(char_text_enc, list)
assert (char_text =... |
def cast_to_iterable(value):
if isinstance(value, (list, tuple)):
return value
return [value] |
def simRMLPos(dofs, smallestTimeStep, flags, currentPosVelAccel, maxVelAccelJerk, selection, targetPosVel):
smallestTimeStep = ffi.cast('double', smallestTimeStep)
handle = lib.simRMLPos(dofs, smallestTimeStep, flags, currentPosVelAccel, maxVelAccelJerk, selection, targetPosVel, ffi.NULL)
_check_return(hand... |
def get_evaluation_metrics(fs, fp, engine):
assert (len(fs) == len(fp))
for (ls, lp) in tqdm(zip(fs, fp), total=len(fs)):
(correct, match) = eval_fun(ls, lp, engine)
grades.append(correct)
exact_match.append(match)
lf_acc = (sum(exact_match) / len(exact_match))
ex_acc = (sum(grad... |
def get_loader(data_args, transform_args, split, task_sequence, su_frac, nih_frac, batch_size, is_training=False, shuffle=False, study_level=False, frontal_lateral=False, return_info_dict=False):
if is_training:
study_level = data_args.train_on_studies
datasets = []
if (su_frac != 0):
datase... |
class VectorFieldDualFreeModule(DiffFormFreeModule):
def __init__(self, vector_field_module):
DiffFormFreeModule.__init__(self, vector_field_module, 1)
def tensor_type(self):
return (0, 1) |
def test_getSubscription10():
url = (brokerIp + '/ngsi10/updateContext')
headers = {'Content-Type': 'application/json'}
r = requests.post(url, data=json.dumps(data_ngsi10.subdata17), headers=headers)
resp_content = r.content
resInJson = resp_content.decode('utf8').replace("'", '"')
resp = json.l... |
class Evaluator(abc.ABC):
def __init__(self, metrics: typing.List[pymia_metric.Metric]):
self.metrics = metrics
self.results = []
def evaluate(self, prediction: typing.Union[(sitk.Image, np.ndarray)], reference: typing.Union[(sitk.Image, np.ndarray)], id_: str, **kwargs):
raise NotImplem... |
_torch
((not is_torch_greater_or_equal_than_1_10), 'BridgeTower is only available in torch v1.10+')
class BridgeTowerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = ((BridgeTowerModel, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerForContrastiveLear... |
def onehot_from_logits(logits, eps=0.0):
argmax_acs = (logits == logits.max(1, keepdim=True)[0]).float()
if (eps == 0.0):
return argmax_acs
rand_acs = Variable(torch.eye(logits.shape[1])[[np.random.choice(range(logits.shape[1]), size=logits.shape[0])]], requires_grad=False)
return torch.stack([(... |
def minimal_grid(x, y, tol=1e-06, rel=False):
import numpy as np
from scipy.interpolate import CubicSpline as spline
deg = 3
if callable(tol):
next_sample = tol
else:
if rel:
ymax = np.max(np.abs(y))
if (ymax == 0.0):
raise ValueError('All inpu... |
def generate_keypair(pubkey_path: PathLike, pem_path: PathLike):
key = rsa.generate_private_key(backend=crypto_default_backend(), public_exponent=65537, key_size=4096)
private_key = key.private_bytes(crypto_serialization.Encoding.PEM, crypto_serialization.PrivateFormat.TraditionalOpenSSL, crypto_serialization.N... |
class CNNModel(Model):
def __init__(self, filter_dims, num_filters, strides, padding, name=None, hidden_nonlinearity=tf.nn.relu, hidden_w_init=tf.glorot_uniform_initializer(), hidden_b_init=tf.zeros_initializer()):
super().__init__(name)
self._filter_dims = filter_dims
self._num_filters = nu... |
def val_epoch(model, data_loader, epoch, args, summary, device):
model.eval()
iterator = tqdm(data_loader)
(mIoU_ac, mAP_ac, mF1_ac) = ([], [], [])
for (i, (image, mask)) in enumerate(iterator):
image = image.to(device)
mask = mask.to(device)
pred_mask_ac = model(image, args.iter... |
def update_datasplits(cfg):
assert isinstance(cfg.data.sources, (tuple, list))
assert isinstance(cfg.data.sources, (tuple, list))
if isinstance(cfg.data.sources[0], (tuple, list)):
assert (len(cfg.data.sources) == 1)
cfg.data.sources = cfg.data.sources[0]
if isinstance(cfg.data.targets[0... |
class FixedGridODESolver(metaclass=abc.ABCMeta):
def __init__(self, func, y0, grid_constructor=None, transforms=None):
self.func = func
self.y0 = y0
if (grid_constructor is None):
grid_constructor = (lambda f, y0, t: t)
self.grid_constructor = grid_constructor
if ... |
def process_file(bert_model, bert_tokenizer, fasttext_model, batch_size, language, ud_file, output_file):
logging.info('Processing file {}'.format(ud_file))
logging.info('PHASE ONE: reading file and tokenizing')
(all_target_tokens, all_bert_tokens, all_bert2target_map, all_ud) = tokenize_ud(ud_file, bert_to... |
def test_warm_start_equivalence():
(X, y) = make_hastie_10_2(n_samples=20, random_state=1)
(X_train, X_test, y_train, y_test) = train_test_split(X, y, random_state=43)
clf_ws = EasyEnsembleClassifier(n_estimators=5, warm_start=True, random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n... |
def test_workspace_poiless(datadir):
with open(datadir.joinpath('poiless.json'), encoding='utf-8') as spec_file:
spec = json.load(spec_file)
ws = pyhf.Workspace(spec)
model = ws.model()
assert (model.config.poi_name is None)
assert (model.config.poi_index is None) |
def test_calculate_indexes_when_indexes_supplied(msa_sampler):
indexes = [2, 3, 4, 5]
leader_length = 1
max_len = 5
rollover = False
(out_indexes, last_i) = msa_sampler.calculate_indexes(indexes, leader_length, max_len, rollover)
assert (out_indexes == [2, 3, 4, 5])
assert (last_i == (- 1)) |
_optimizer('adam', dataclass=FairseqAdamConfig)
class FairseqAdam(FairseqOptimizer):
def __init__(self, cfg: FairseqAdamConfig, params):
super().__init__(cfg)
fused_adam_cls = get_fused_adam_class()
use_fused_adam = ((not getattr(cfg, 'use_old_adam', False)) and (fused_adam_cls is not None) ... |
def main():
args = parse_args()
benchmark_type = []
if args.basic_arch:
benchmark_type += basic_arch_root
if args.datasets:
benchmark_type += datasets_root
if args.data_pipeline:
benchmark_type += data_pipeline_root
if args.nn_module:
benchmark_type += nn_module_r... |
def subexpressions_list(f, pars=None):
from sage.functions.trig import sin, cos, arcsin, arctan, arccos
variables = f[0].arguments()
if (not pars):
parameters = []
else:
parameters = pars
varpar = (list(parameters) + list(variables))
F = symbolic_expression([i(*variables) for i i... |
def _get_env(environment, name):
value = environment.get(name, _undefined)
if isinstance(value, Undefined):
raise UndefinedEnvironmentName('{0!r} does not exist in evaluation environment.'.format(name))
return value |
class DDFFNet(nn.Module):
def __init__(self, focal_stack_size, output_dims=1, cc1_enabled=False, cc2_enabled=False, cc3_enabled=True, cc4_enabled=False, cc5_enabled=False, bias=False, pretrained='no_bn'):
super(DDFFNet, self).__init__()
self.autoencoder = DDFFAutoEncoder(output_dims, cc1_enabled, cc... |
class ConstructorSlot(InternalMethodSlot):
def __init__(self, slot_name, method, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
self.method = method
def slot_code(self, scope):
entry = scope.lookup_here(self.method)
if ((self.slot_name != 'tp_new') and scope.pare... |
def test_data_frame_integers(tmp_path):
filename = os.path.join(tmp_path, 'test-integers.root')
ak_array_x = ak.Array([1, 2, 3, 4, 5])
ak_array_y = ak.Array([1.1, 2.2, 3.3, 4.4, 5.5])
data_frame = ak.to_rdataframe({'x': ak_array_x, 'y': ak_array_y})
assert (data_frame.GetColumnType('x') == 'int64_t'... |
def build_shared_mlp(mlp_spec: List[int], bn: bool=True):
layers = []
for i in range(1, len(mlp_spec)):
layers.append(nn.Conv2d(mlp_spec[(i - 1)], mlp_spec[i], kernel_size=1, bias=(not bn)))
if bn:
layers.append(nn.BatchNorm2d(mlp_spec[i]))
layers.append(nn.ReLU(True))
re... |
class ModelTraining(ABC):
def __init__(self, *args, **kwargs):
self.model = None
def train_model(self, x_train, y_train, x_val, y_val, force_device):
raise NotImplementedError() |
class Agent():
def __init__(self, state_size, is_eval=False, model_name=''):
self.state_size = state_size
self.action_size = 5
self.memory = deque(maxlen=2000)
self.inventory1 = []
self.inventory2 = []
self.model_name = model_name
self.is_eval = is_eval
... |
def test_loop_inlining_regular_for():
sdfg = dace.SDFG('inlining')
state0 = sdfg.add_state('state0', is_start_block=True)
loop1 = LoopRegion(label='loop1', condition_expr='i < 10', loop_var='i', initialize_expr='i = 0', update_expr='i = i + 1', inverted=False)
sdfg.add_node(loop1)
state1 = loop1.add... |
def predict(path):
img = Image.open(path).resize((224, 224))
x = np.asarray(img.convert('RGB'))
x = ((x - x.mean()) / x.std())
x = np.expand_dims(x, axis=0)
preds = model.predict(x)
np.sort(preds)
print("Model's top 3 predicted:")
top3 = np.argsort((- preds[0]))[:3]
return [classes[i... |
def test_indexedoption_of_union_of_option_1():
with pytest.raises(TypeError, match=' must either be comprised of entirely optional contents'):
ak.contents.UnionArray(ak.index.Index8(np.array([0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1], dtype=np.int8)), ak.index.Index64(np.array([0, 1, 0, 2, 1, 2, 3, 3, 4, 5, 4], dtyp... |
def _download_hook(t):
last_b = [0]
def inner(b=1, bsize=1, tsize=None):
if (tsize is not None):
t.total = tsize
t.update(((b - last_b[0]) * bsize))
last_b[0] = b
return inner |
class TestKerasTPModel(unittest.TestCase):
def test_keras_layers_with_params(self):
conv_with_params = LayerFilterParams(Conv2D, Greater('filters', 2), Smaller('filters', 4), activation='softmax', kernel_size=(3, 4), filters=3)
conv = Conv2D(filters=3, kernel_size=(3, 4), activation='softmax')
... |
def convert_bytes(num):
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if (num < 1024.0):
return ('%3.1f %s' % (num, x))
num /= 1024.0 |
def compute_partial_slices(n_samples, partial_utterance_n_frames=partials_n_frames, min_pad_coverage=0.75, overlap=0.5):
assert (0 <= overlap < 1)
assert (0 < min_pad_coverage <= 1)
samples_per_frame = int(((sampling_rate * mel_window_step) / 1000))
n_frames = int(np.ceil(((n_samples + 1) / samples_per_... |
def _zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo, phi, derphi, phi0, derphi0, c1, c2, extra_condition):
maxiter = 10
i = 0
delta1 = 0.2
delta2 = 0.1
phi_rec = phi0
a_rec = 0
while True:
dalpha = (a_hi - a_lo)
if (dalpha < 0):
(a, b) = (a_hi, a_lo)
else:
... |
class lomax_gen(rv_continuous):
def _shape_info(self):
return [_ShapeInfo('c', False, (0, np.inf), (False, False))]
def _pdf(self, x, c):
return ((c * 1.0) / ((1.0 + x) ** (c + 1.0)))
def _logpdf(self, x, c):
return (np.log(c) - ((c + 1) * sc.log1p(x)))
def _cdf(self, x, c):
... |
def instantiate(cfg):
from omegaconf import ListConfig
if isinstance(cfg, ListConfig):
lst = [instantiate(x) for x in cfg]
return ListConfig(lst, flags={'allow_objects': True})
if isinstance(cfg, list):
return [instantiate(x) for x in cfg]
if (isinstance(cfg, abc.Mapping) and ('_... |
def test_from_wsgi(testdir, graphql_path):
testdir.make_test(f'''
from test.apps._graphql._flask.app import app
schema = schemathesis.graphql.from_wsgi("{graphql_path}", app=app)
()
(max_examples=10, deadline=None, suppress_health_check=[HealthCheck.too_slow, HealthCheck.filter_too_much])
def test_(request, case):
... |
def test_batch_tile():
v = torch.FloatTensor([[1, 2, 3], [4, 5, 6]])
tiled = batch_tile(v, 3)
assert_tensor_equal(tiled, [[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
v = torch.LongTensor([1, 2, 3])
assert_tensor_equal(batch_tile(v, 4), [[1, 2, 3], [1, 2, 3], [1, 2, 3], [... |
_model_architecture('convtransformer', 'convtransformer_espnet')
def convtransformer_espnet(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256)
args.encoder_layers = getattr(args, 'encoder_layers', 12)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.de... |
class CollectorIterator(object):
def __init__(self, collector):
self._collector = collector
self._idx = 0
def __iter__(self):
return self
def next(self, timeout=None):
try:
apply_result = self._collector._get_result(self._idx, timeout)
except IndexError:
... |
def get_data_from_features_or_inputs(tokenizer: BertTokenizer, label_list: List[str], feature: Optional[InputFeatures]=None, inputs: Optional[Dict[(str, torch.Tensor)]]=None) -> Tuple[(str, str, str)]:
if ((feature is not None) and (inputs is None)):
inputs = default_data_collator([feature])
elif ((feat... |
class TestAssertAllclose(object):
def test_simple(self):
x = 0.001
y = 1e-09
assert_allclose(x, y, atol=1)
assert_raises(AssertionError, assert_allclose, x, y)
a = np.array([x, y, x, y])
b = np.array([x, y, x, x])
assert_allclose(a, b, atol=1)
assert_r... |
def train_one_epoch(model, optimizer, train_loader, lr_scheduler, lr_warmup_scheduler, accumulated_iter, train_epoch, optim_cfg, rank, logger, log_buffer, log_interval):
for (i, data_batch) in enumerate(train_loader):
if ((lr_warmup_scheduler is not None) and (accumulated_iter <= lr_warmup_scheduler.T_max))... |
def CppExtension(name, sources, *args, **kwargs):
include_dirs = kwargs.get('include_dirs', [])
include_dirs += include_paths()
kwargs['include_dirs'] = include_dirs
library_dirs = kwargs.get('library_dirs', [])
library_dirs += library_paths()
kwargs['library_dirs'] = library_dirs
libraries ... |
def _read_string(f):
length = _read_long(f)
if (length > 0):
chars = _read_bytes(f, length).decode('latin1')
_align_32(f)
else:
chars = ''
return chars |
def maybe_filter_categories_cocoapi(dataset_name, coco_api):
meta = MetadataCatalog.get(dataset_name)
cont_id_2_cat_id = get_contiguous_id_to_category_id_map(meta)
cat_id_2_cont_id = meta.thing_dataset_id_to_contiguous_id
cats = []
for cat in coco_api.dataset['categories']:
cat_id = cat['id'... |
def create_test_bash_info(commands, model_test_dict, port, script_name, partition):
config = model_test_dict['config']
job_name = model_test_dict['job_name']
checkpoint = model_test_dict['checkpoint']
work_dir = model_test_dict['work_dir']
eval = model_test_dict['eval']
echo_info = f'''
echo '{... |
def _filter_layers(layers, include_tags):
if (include_tags is None):
return layers
include_tags = set(include_tags)
return [l for l in layers if (not include_tags.isdisjoint(l.tags))] |
def register_node_type(node_meta_type: str=NodeMetaType.OPTPLAN_NODE, context_stack: OptplanContextStack=GLOBAL_CONTEXT_STACK):
def decorator(cls):
assert (len(cls._schema.fields['type'].choices) == 1)
node_type = cls._schema.fields['type'].choices[0]
def not_implemented(unused_params, unuse... |
class RootBlock(nn.Module):
def apply(self, x, width):
x = fixed_padding(x, 7)
x = StdConv(x, width, (7, 7), (2, 2), padding='VALID', bias=False, name='conv_root')
x = fixed_padding(x, 3)
x = nn.max_pool(x, (3, 3), strides=(2, 2), padding='VALID')
return x |
class MKException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.