code stringlengths 101 5.91M |
|---|
def Xor(a, b, ctx=None):
ctx = _get_ctx(_ctx_from_ast_arg_list([a, b], ctx))
s = BoolSort(ctx)
a = s.cast(a)
b = s.cast(b)
return BoolRef(Z3_mk_xor(ctx.ref(), a.as_ast(), b.as_ast()), ctx) |
def evaluate(j, e, solver, scores1, scores2, data_loader, logdir, reference_point, split, result_dict):
assert (split in ['train', 'val', 'test'])
mode = 'pf'
if (mode == 'pf'):
assert (len(scores1) == len(scores2) <= 3), 'Cannot generate cirlce points for more than 3 dimensions.'
n_test_ray... |
def _make_pretrained_resnext101_wsl(use_pretrained):
resnet = torch.hub.load('facebookresearch/WSL-Images', 'resnext101_32x8d_wsl')
return _make_resnet_backbone(resnet) |
class BenchmarkingZeroShotDataDataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version('1.1.0')
BUILDER_CONFIGS = [datasets.BuilderConfig(name='topic', version=VERSION, description='Topic classifcation dataset based on Yahoo news groups.'), datasets.BuilderConfig(name='emotion', version=VERSION, desc... |
_criterion('masked_lm')
class MaskedLmLoss(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
def forward(self, model, sample, reduce=True):
masked_tokens = sample['target'].ne(self.padding_idx)
sample_size = masked_tokens.int().sum().item()
if (sampl... |
_utils.test(arch=supported_archs_taichi_ndarray)
def test_compiled_functions():
_test_compiled_functions() |
def main(N, bc):
SD = FunctionSpace(N, 'Laguerre', bc=bcs[bc])
u = TrialFunction(SD)
v = TestFunction(SD)
fj = Array(SD, buffer=fe)
f_hat = Function(SD)
f_hat = inner(v, fj, output_array=f_hat)
A = inner(v, (- div(grad(u))))
sol = la.Solver(A)
u_hat = Function(SD)
u_hat = sol(f_h... |
class PhotometricAug(object):
def __init__(self, transform=None):
self.transform = transform
def __call__(self, img):
n = random.randint(0, 2)
if (n == (- 1)):
transformed_image = TF.invert(img.copy())
elif (n == 0):
transformed_image = img.copy().convert(... |
def write_ranking(corpus_indices, corpus_scores, q_lookup, ranking_save_file):
with open(ranking_save_file, 'w') as f:
for (qid, q_doc_scores, q_doc_indices) in zip(q_lookup, corpus_scores, corpus_indices):
score_list = [(s, idx) for (s, idx) in zip(q_doc_scores, q_doc_indices)]
scor... |
class ResNet101(nn.Module):
def __init__(self, block, layers, num_classes, BatchNorm, bn_clr=False):
super(ResNet101, self).__init__()
self.inplanes = 64
self.bn_clr = bn_clr
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
if BatchNorm:
... |
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout, out_dim=None, search=False):
super().__init__()
self.fc1 = nn.Linear(dim, hidden_dim)
self.act = nn.GELU()
if (out_dim is None):
out_dim = dim
self.fc2 = nn.Linear(hidden_dim, out_dim)
... |
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 16, 3)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(16, 32, 3)
self.fc1 = nn.Linear(((32 * 5) * 5), 32)
self.fc2 = nn.Linear(32, 84)
self.fc3 = nn.Linear(8... |
class SelectFromModel(MetaEstimatorMixin, SelectorMixin, BaseEstimator):
_parameter_constraints: dict = {'estimator': [HasMethods('fit')], 'threshold': [Interval(Real, None, None, closed='both'), str, None], 'prefit': ['boolean'], 'norm_order': [Interval(Integral, None, (- 1), closed='right'), Interval(Integral, 1,... |
def RenderRegion2(points, points2, lines, region, filename):
dwg = svgwrite.Drawing(filename, profile='tiny')
for line in lines:
x1 = (1000 - int((((line[0] - region[0]) / (region[2] - region[0])) * 1000)))
y1 = int((((line[1] - region[1]) / (region[3] - region[1])) * 1000))
x2 = (1000 -... |
class ResNet(object):
def __init__(self, hps, images, labels, mode):
self.hps = hps
self._images = images
self.labels = labels
self.mode = mode
self._extra_train_ops = []
def build_graph(self):
self.global_step = tf.Variable(0, name='global_step', trainable=False)... |
def test_string_operations_unary_with_arg_slice():
pyarrow = pytest.importorskip('pyarrow')
if (packaging.version.Version(pyarrow.__version__) < packaging.version.Version('13')):
pytest.xfail('pyarrow<13 fails to perform this slice')
assert (ak.str.slice([['hello', 'world!'], [], ["it's a beautiful ... |
def train():
if (not os.path.isdir(args.outputpath[0])):
os.mkdir(args.outputpath[0])
output_file_name = os.path.join(args.outputpath[0], args.outputprefix[0])
fname = ((output_file_name + '_{}_'.format(args.actf[0])) + 'x'.join([str(x) for x in args.layers]))
x = Variable('x', dtype=args.dtype[... |
def check_tree(tree, layer):
if (len(tree.children_nodes) > 0):
now_str = ('%snon_leaf: %s:%s, %s:%s\n' % (('\t' * layer), tree.tag, tree.token, tree.node_index, tree.parent_index))
s = ''.join([check_tree(node, (layer + 1)) for node in tree.children_nodes])
return (now_str + s)
else:
... |
class SBDSegmentation(data.Dataset):
URL = '
FILE = 'benchmark.tgz'
MD5 = '82b4d87ceb2ed10f6038a1cba92111cb'
def __init__(self, root=Path.db_root_dir('sbd'), split='val', transform=None, download=False, preprocess=False, area_thres=0, retname=True):
self.root = root
self.transform = tran... |
def test_error_handling():
class NotConvertible(SDFGConvertible):
def __call__(self, a):
import numpy as np
print('A very pythonic method', a)
def __sdfg__(self, *args, **kwargs):
raise NotADirectoryError('I am not really convertible')
def __sdfg_signature... |
class TransformsConfig(object):
def __init__(self):
pass
def get_transforms(self):
pass |
class RationalCuspidalSubgroup(CuspidalSubgroup_generic):
def _repr_(self):
return ('Rational cuspidal subgroup %sover QQ of %s' % (self._invariants_repr(), self.abelian_variety()))
def lattice(self):
try:
return self.__lattice
except AttributeError:
lattice = sel... |
_model
def SReT_T_wo_slice(pretrained=False, **kwargs):
model = RecursiveTransformer(image_size=224, patch_size=16, stride=8, base_dims=[32, 32, 32], depth=[4, 10, 6], recursive_num=[2, 5, 3], heads=[2, 4, 8], mlp_ratio=3.6, **kwargs)
if pretrained:
state_dict = torch.load('SReT_T_wo_slice.pth', map_loc... |
class JointExtractionDecoderConfig(Config, JointExtractionDecoderMixin):
def __init__(self, ck_decoder: Union[(SingleDecoderConfigBase, str)]='span_classification', attr_decoder: Union[(SingleDecoderConfigBase, str)]=None, rel_decoder: Union[(SingleDecoderConfigBase, str)]='span_rel_classification', **kwargs):
... |
def test_net_on_dataset(args, dataset_name, proposal_file, output_dir, multi_gpu=False, gpu_id=0):
dataset = JsonDatasetRel(dataset_name)
test_timer = Timer()
test_timer.tic()
if multi_gpu:
num_images = len(dataset.get_roidb(gt=args.do_val))
all_results = multi_gpu_test_net_on_dataset(ar... |
def concepts2adj(node_ids):
global id2relation
cids = np.array(node_ids, dtype=np.int32)
n_rel = len(id2relation)
n_node = cids.shape[0]
adj = np.zeros((n_rel, n_node, n_node), dtype=np.uint8)
for s in range(n_node):
for t in range(n_node):
(s_c, t_c) = (cids[s], cids[t])
... |
.parametrize('media_type, content, definition', (('application/json', b'{"random": "text"}', {'responses': {'200': {'description': 'text', 'content': {'application/json': {'schema': SUCCESS_SCHEMA}}}}}), ('application/json', b'{"random": "text"}', {'responses': {'default': {'description': 'text', 'content': {'applicati... |
def U_15(params, wires):
qml.RY(params[0], wires=wires[0])
qml.RY(params[1], wires=wires[1])
qml.CNOT(wires=[wires[1], wires[0]])
qml.RY(params[2], wires=wires[0])
qml.RY(params[3], wires=wires[1])
qml.CNOT(wires=[wires[0], wires[1]]) |
class SympyAdam(SympyPredictingOptimizer):
collect_order = ['v', 'm', 'theta']
def __init__(self):
self.theta = Symbol('theta')
self.grad = Symbol('g')
self.weight_decay = 0
(self.exp_avg, self.exp_avg_sq) = (Symbol('m'), Symbol('v'))
(self.beta1, self.beta2) = (Symbol('\... |
_numpy_output(non_zero=True, check_dtype=True)
def test_ufunc_degrees_u(A: dace.uint32[10]):
return np.degrees(A) |
def conv1d(inputs, num_output_channels, kernel_size, scope, stride=1, padding='SAME', use_xavier=True, stddev=0.001, weight_decay=0.0, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None):
with tf.variable_scope(scope) as sc:
num_in_channels = inputs.get_shape()[(- 1)].value
kernel_s... |
()
('configfile')
('reads', nargs=1)
('--queries', nargs=(- 1))
('-f', '--force', is_flag=True)
def init(configfile, force, reads, __queries):
stubname = os.path.basename(configfile)
if configfile.endswith('.conf'):
stubname = stubname[:(- 5)]
else:
configfile += '.conf'
if (os.path.exis... |
def _add_conv(out, channels=1, kernel=1, stride=1, pad=0, num_group=1, active=True, relu6=False, num_sync_bn_devices=(- 1)):
out.add(nn.Conv2D(channels, kernel, stride, pad, groups=num_group, use_bias=False))
if (num_sync_bn_devices == (- 1)):
out.add(nn.BatchNorm(scale=True))
else:
out.add(... |
class TestOpTreeEvaluation():
(scope='module')
def _create_random_circuits(self) -> OpTreeList:
circuit1 = random_circuit(2, 2, seed=2).decompose(reps=1)
circuit2 = random_circuit(2, 2, seed=0).decompose(reps=1)
return OpTreeList([circuit1, circuit2])
(scope='module')
def _create... |
def create_or_update_issue(body=''):
link = f'[{args.ci_name}]({args.link_to_ci_run})'
issue = get_issue()
max_body_length = 60000
original_body_length = len(body)
if (original_body_length > max_body_length):
body = f'''{body[:max_body_length]}
...
Body was too long ({original_body_length} c... |
def info_arrow(source, target, data, keys):
if (data['direction'] == 'fwd'):
m = f'{source.id}->{target.id}'
else:
m = f'{target.id}<-{source.id}'
for key in keys:
if (key == 'a'):
m += f" a={data['a']:.3f}"
elif (key == 'b_size'):
b_size = get_size(da... |
def applyrules(rules, d, var={}):
ret = {}
if isinstance(rules, list):
for r in rules:
rr = applyrules(r, d, var)
ret = dictappend(ret, rr)
if ('_break' in rr):
break
return ret
if (('_check' in rules) and (not rules['_check'](var))):
... |
class Tokenizer():
def __init__(self, vocab_fname=None, bpe_fname=None, lang=None, pad=1, separator=''):
self.separator = separator
self.lang = lang
if bpe_fname:
with open(bpe_fname, 'r') as bpe_codes:
self.bpe = subword_nmt.apply_bpe.BPE(bpe_codes)
if vo... |
def benchmark(clf, custom_name=False):
print(('_' * 80))
print('Training: ')
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = (time() - t0)
print(f'train time: {train_time:.3}s')
t0 = time()
pred = clf.predict(X_test)
test_time = (time() - t0)
print(f'test time: ... |
class InceptionResNetV2(nn.Module):
def __init__(self, num_classes=1001):
super(InceptionResNetV2, self).__init__()
self.input_space = None
self.input_size = (299, 299, 3)
self.mean = None
self.std = None
self.conv2d_1a = BasicConv2d(3, 32, kernel_size=3, stride=2)
... |
class TestDistBackend(MultiProcessTestCase):
def setUpClass(cls):
os.environ['MASTER_ADDR'] = str(MASTER_ADDR)
os.environ['MASTER_PORT'] = str(MASTER_PORT)
os.environ['NCCL_ASYNC_ERROR_HANDLING'] = '1'
super().setUpClass()
def setUp(self):
super().setUp()
initiali... |
_spec_function('ice')
def get_ice_spec(**kwargs) -> RunSpec:
scenario_spec = ScenarioSpec(class_name='helm.benchmark.scenarios.ice_scenario.ICEScenario', args=kwargs)
return RunSpec(name=(('ice' + (':' if (len(kwargs) > 0) else '')) + ','.join((f'{k}={v}' for (k, v) in sorted(kwargs.items())))), scenario_spec=s... |
def register_Ns3Icmpv4L4Protocol_methods(root_module, cls):
cls.add_constructor([param('ns3::Icmpv4L4Protocol const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetDownTarget', 'ns3::IpL4Protocol::DownTargetCallback', [], is_const=True, is_virtual=True)
cls.add_method('GetDownTarget6', 'ns3::Ip... |
class MobileNetV2(nn.Module):
def __init__(self, opt, width_mult=1.0, round_nearest=8, block=None):
super().__init__()
if (block is None):
block = InvertedResidual
input_channel = 32
last_channel = 1280
inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6... |
class FlaxWav2Vec2ForPreTraining(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
.parametrize('input_meters, expected_resolution', [(5000, 6), (50000, 3)])
def test__get_resolution(h3_tess, input_meters, expected_resolution):
assert (h3_tess._get_resolution(base_shape=bbox, meters=input_meters) == expected_resolution) |
def mad(values, n):
if (len(values) < n):
values += ([0] * int((n - len(values))))
values.sort()
if (n == 2):
return (values[0], values[0])
values_m = ((n // 2) if (n % 2) else ((n // 2) - 1))
m = values[values_m]
sd = (sum([abs((m - lv)) for lv in values]) / float(n))
return... |
def mobilenet_v2(pretrained=False, progress=True, filter_size=1, **kwargs):
model = MobileNetV2(filter_size=filter_size, **kwargs)
return model |
def register_Ns3BoxValue_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::Box const &', 'value')])
cls.add_constructor([param('ns3::BoxValue const &', 'arg0')])
cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True)
cls.add_me... |
def fully_connected(x, units, use_bias=True, scope='fully_connected'):
with tf.variable_scope(scope):
x = flatten(x)
x = tf.layers.dense(x, units=units, kernel_initializer=weight_init, kernel_regularizer=weight_regularizer, use_bias=use_bias)
return x |
def tmp_git_index() -> T.Iterator[str]:
try:
tmp = tempfile.NamedTemporaryFile(prefix='gitindex', delete=False)
tmp.close()
(yield tmp.name)
finally:
try:
os.remove(tmp.name)
except OSError:
pass |
class Validator():
def __init__(self, translator, source, reference, batch_size=3, beam_size=0):
self.translator = translator
self.source = source
self.reference = reference
self.sentence_count = len(source)
self.reference_word_count = sum([(len(data.tokenize(sentence)) + 1) ... |
def test_forward_beam_seq_lens():
from returnn.tensor import Dim, batch_dim
def _get_model(**_kwargs):
return torch.nn.Module()
def _forward_step(*, extern_data: TensorDict, **_kwargs):
data = extern_data['data']
assert (data.dims[0] == batch_dim)
time_dim = data.dims[1]
... |
def test_creat_from_J(spectrum):
actual = TARDISSpectrum(spectrum._frequency, spectrum.luminosity.to('J / s'))
compare_spectra(actual, spectrum) |
.parametrize(['energy', 'theta_C'], [(511000.0, 1.0), (255500.0, np.pi), (0.0, (2.0 * np.pi)), (.0, (np.pi / 2.0))])
def test_klein_nishina(energy, theta_C):
actual = util.klein_nishina(energy, theta_C)
kappa = util.kappa_calculation(energy)
expected = (((R_ELECTRON_SQUARED / 2) * ((1.0 + (kappa * (1.0 - np... |
class TestGenerateIndices(TestCase):
def test_make_range_if_int(self):
ind = generate_indices(6, [])
self.assertEqual(ind.all(), np.arange(6).all())
def test_pass_through_index_array(self):
ind = generate_indices(np.arange(6), [])
self.assertEqual(ind.all(), np.arange(6).all())
... |
def boost_get_toolset(self, cc):
toolset = cc
if (not cc):
build_platform = Utils.unversioned_sys_platform()
if (build_platform in BOOST_TOOLSETS):
cc = build_platform
else:
cc = self.env.CXX_NAME
if (cc in BOOST_TOOLSETS):
toolset = BOOST_TOOLSETS[cc]... |
def make_weights(distribution: str, adjacency: sparse.csr_matrix) -> np.ndarray:
n = adjacency.shape[0]
distribution = distribution.lower()
if (distribution == 'degree'):
node_weights_vec = adjacency.dot(np.ones(adjacency.shape[1]))
elif (distribution == 'uniform'):
node_weights_vec = np... |
(scope='module')
def fake_embeddings(tmp_path_factory):
words = sorted(set([x.lower() for y in SENTENCES for x in y]))
words = words[:(- 1)]
embedding_dir = tmp_path_factory.mktemp('data')
embedding_txt = (embedding_dir / 'embedding.txt')
embedding_pt = (embedding_dir / 'embedding.pt')
embedding... |
class GelfandTsetlinPattern(ClonableArray, metaclass=InheritComparisonClasscallMetaclass):
def __classcall_private__(self, gt):
return GelfandTsetlinPatterns()(gt)
def check(self):
assert all(((self[(i - 1)][j] >= self[i][j] >= self[(i - 1)][(j + 1)]) for i in range(1, len(self)) for j in range(... |
class lapack_atlas_threads_info(atlas_threads_info):
_lib_names = (['lapack_atlas'] + atlas_threads_info._lib_names) |
def dirichlet_coefficients(redshift, alpha0, alpha1, z1=1.0, weight=None):
if ((np.ndim(alpha0) != 1) or (np.ndim(alpha1) != 1)):
raise ValueError('alpha0, alpha1 must be 1D arrays')
if (len(alpha0) != len(alpha1)):
raise ValueError('alpha0 and alpha1 must have the same length')
if ((weight ... |
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for (batch_idx, (inputs, targets)) in enumerate(testloader):
(inputs, targets) = (inputs.to(device), targets.to(device))
outputs = net(inputs)
loss = ... |
class TestDistances(unittest.TestCase):
def test_input(self):
adjacency = test_graph()
with self.assertRaises(ValueError):
get_distances(adjacency)
with self.assertRaises(ValueError):
get_distances(adjacency, source=0, source_row=5)
def test_algo(self):
ad... |
def resnest200(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 24, 36, 3], radix=2, groups=1, bottleneck_width=64, deep_stem=True, stem_width=64, avg_down=True, avd=True, avd_first=False, **kwargs)
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from... |
class IDS(object):
def __init__(self, p=50, stationary_p=True, inital_seed=None):
np.random.seed(inital_seed)
self.maxRequiredStep = np.sin(((15.0 / 180.0) * np.pi))
self.gsBound = 1.5
self.gsSetPointDependency = 0.02
self.gsScale = ((2.0 * self.gsBound) + (100.0 * self.gsSet... |
class _BaseCurveDisplay():
def _plot_curve(self, x_data, *, ax=None, negate_score=False, score_name=None, score_type='test', log_scale='deprecated', std_display_style='fill_between', line_kw=None, fill_between_kw=None, errorbar_kw=None):
check_matplotlib_support(f'{self.__class__.__name__}.plot')
im... |
class STVQAAccuracyEvaluator():
def __init__(self):
self.answer_processor = EvalAIAnswerProcessor()
def eval_pred_list(self, pred_list):
pred_scores = []
for entry in pred_list:
pred_answer = self.answer_processor(entry['pred_answer'])
gts = [self.answer_processor... |
def run_coco_eval(anno_json, pred_json, name):
coco_gt = COCO(anno_json)
coco_dt = coco_gt.loadRes(pred_json)
coco_eval = COCOeval(coco_gt, coco_dt, 'bbox')
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
save_eval_results(coco_eval.stats, name)
return coco_eval.stats |
def get_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--name', default='GMM')
parser.add_argument('--gpu_ids', default='')
parser.add_argument('-j', '--workers', type=int, default=1)
parser.add_argument('-b', '--batch-size', type=int, default=4)
parser.add_argument('--dataroot',... |
def test():
mode = int(sys.argv[1])
clusters = int(sys.argv[2])
beta = float(sys.argv[3])
inputName = sys.argv[4]
old_assignmentsName = sys.argv[5]
outputName = sys.argv[6]
if (mode == 1):
runHyperParameterTests(inputName, outputName, clusters, beta, old_assignmentsName)
else:
... |
class TBool(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
Val = _swig_property(_snap.TBool_Val_get, _snap.TBool_Val_set)
Rnd = _swig_property(_snap.TBool_Rnd_get, _snap.TBool_Rnd_set)
def __nonzero__(self):
... |
def intmod_gap_to_sage(x):
from sage.rings.finite_rings.finite_field_constructor import FiniteField
from sage.rings.finite_rings.integer_mod import Mod
from sage.rings.integer import Integer
s = str(x)
m = re.search('Z\\(([0-9]*)\\)', s)
if m:
return gfq_gap_to_sage(x, FiniteField(Intege... |
def digraph_logistic_regression():
digraph = LocalClassifierPerLevel(local_classifier=LogisticRegression())
digraph.hierarchy_ = nx.DiGraph([('a', 'b'), ('a', 'c')])
digraph.y_ = np.array([['a', 'b'], ['a', 'c']])
digraph.X_ = np.array([[1, 2], [3, 4]])
digraph.logger_ = logging.getLogger('LCPL')
... |
def b(tableau, star=0, base_ring=QQ):
t = Tableau(tableau)
if star:
t = t.restrict((t.size() - star))
cs = t.column_stabilizer().list()
n = t.size()
sgalg = SymmetricGroupAlgebra(base_ring, n)
one = base_ring.one()
P = Permutation
if (len(tableau) == 0):
return sgalg.one(... |
class TestComputeAverageFeaturesFromImages():
cases_grid = [(DataLoader(DummyDataset(torch.tensor([[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], [[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]], dtype=torch.float64), ['class_1', 'class_2', 'class_2']), batch_size=3), torch.tensor([[1.0, 1.0, 1.0], [1.0... |
class RANSACRegressor(_RoutingNotSupportedMixin, MetaEstimatorMixin, RegressorMixin, MultiOutputMixin, BaseEstimator):
_parameter_constraints: dict = {'estimator': [HasMethods(['fit', 'score', 'predict']), None], 'min_samples': [Interval(Integral, 1, None, closed='left'), Interval(RealNotInt, 0, 1, closed='both'), ... |
class GiraffeLayer(nn.Module):
def __init__(self, in_channels, strides, fpn_config, inner_fpn_channels, outer_fpn_channels, separable_conv=False, merge_type='conv'):
super(GiraffeLayer, self).__init__()
self.in_channels = in_channels
self.strides = strides
self.num_levels = len(in_ch... |
def test_array_num():
A = np.random.randint(10, size=(N.get(),), dtype=np.int64)
B = array_num(A)
assert np.array_equal((A + 5), B) |
def read_and_decode1(filename_queue):
reader = tf.TFRecordReader()
(_, serialized_example) = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example, features={'file_bytes': tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([NUM_TAGS], tf.float32)})
image = tf.imag... |
class geom_gen(rv_discrete):
def _rvs(self, p):
return self._random_state.geometric(p, size=self._size)
def _argcheck(self, p):
return ((p <= 1) & (p >= 0))
def _pmf(self, k, p):
return (np.power((1 - p), (k - 1)) * p)
def _logpmf(self, k, p):
return (special.xlog1py((k -... |
def analyze_corpus_in_numbers(lengths, dict_paragraphs, labels_train, output_dir):
print('number of files in corpus {}'.format(len(lengths.keys())))
avg_length = []
for (key, value) in lengths.items():
if value.get('intro'):
intro_len = value.get('intro')
else:
intro_... |
class BNFoldingNet(nn.Module):
def __init__(self, test_layer, functional, fold_applied):
super(BNFoldingNet, self).__init__()
self.conv1 = test_layer
self.fold_applied = fold_applied
self.bn = nn.BatchNorm2d(test_layer.out_channels)
self.functional = functional
def forwar... |
def _construct_sparse_coder(Estimator):
dictionary = np.array([[0, 1, 0], [(- 1), (- 1), 2], [1, 1, 1], [0, 1, 1], [0, 2, 1]], dtype=np.float64)
return Estimator(dictionary=dictionary) |
def loadGloveModel(gloveFile):
print('Loading pretrained word vectors...')
with open(gloveFile, 'r') as f:
model = {}
for line in f:
splitLine = line.split()
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
model[word... |
class DoxyClass(DoxyCompound):
__module__ = 'gnuradio.utils.doxyxml'
kind = 'class'
def _parse(self):
if self._parsed:
return
super(DoxyClass, self)._parse()
self.retrieve_data()
if self._error:
return
self.set_descriptions(self._retrieved_data... |
class DictGatherDataParallel(nn.DataParallel):
def gather(self, outputs, output_device):
return dict_gather(outputs, output_device, dim=self.dim) |
def aggregate_metrics(questions):
total = len(questions)
exact_match = np.zeros(2)
f1_scores = np.zeros(2)
for mc in range(2):
exact_match[mc] = ((100 * np.sum(np.array([questions[x].em[mc] for x in questions]))) / total)
f1_scores[mc] = ((100 * np.sum(np.array([questions[x].f1[mc] for x... |
def CVFT(x_sat, x_grd, keep_prob, trainable):
def conv_layer(x, kernel_dim, input_dim, output_dim, stride, trainable, activated, name='ot_conv', activation_function=tf.nn.relu):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
weight = tf.get_variable(name='weights', shape=[kernel_dim, kernel_... |
class NAS_FPN():
def __init__(self):
super(NAS_FPN, self).__init__()
pass
def forward(self, x):
pass |
class Decoder():
def __init__(self, labels, lm_path=None, alpha=1, beta=1.5, cutoff_top_n=40, cutoff_prob=0.99, beam_width=200, num_processes=24, blank_id=0):
self.vocab_list = (['_'] + labels)
self._decoder = CTCBeamDecoder((['_'] + labels[1:]), lm_path, alpha, beta, cutoff_top_n, cutoff_prob, beam... |
def Empty(s):
if isinstance(s, SeqSortRef):
return SeqRef(Z3_mk_seq_empty(s.ctx_ref(), s.ast), s.ctx)
if isinstance(s, ReSortRef):
return ReRef(Z3_mk_re_empty(s.ctx_ref(), s.ast), s.ctx)
raise Z3Exception('Non-sequence, non-regular expression sort passed to Empty') |
class SawyerBoxCloseEnv(SawyerXYZEnv):
def __init__(self):
liftThresh = 0.12
goal_low = ((- 0.1), 0.85, 0.1329)
goal_high = (0.1, 0.95, 0.1331)
hand_low = ((- 0.5), 0.4, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.05), 0.55, 0.02)
obj_high = (0.05, 0.6, 0.... |
class MyTestCase(unittest.TestCase):
def test_simple(self):
def eyetest():
return np.eye(N)
self.assertTrue(np.allclose(eyetest(N=5), np.eye(5)))
def test_rect(self):
def eyetest():
return np.eye(N, (N + 1))
self.assertTrue(np.allclose(eyetest(N=5), np.eye... |
def torch_recovery(obj, path, end_of_epoch, device=None):
del end_of_epoch
try:
obj.load_state_dict(torch.load(path, map_location=device), strict=True)
except TypeError:
obj.load_state_dict(torch.load(path, map_location=device)) |
def manual_seed(args_or_seed: Union[(int, argparse.Namespace)], fix_cudnn=False):
if hasattr(args_or_seed, 'seed'):
args_or_seed = args_or_seed.seed
random.seed(args_or_seed)
np.random.seed(args_or_seed)
torch.manual_seed(args_or_seed)
torch.cuda.manual_seed_all(args_or_seed)
os.environ[... |
def revert_sync_batchnorm(module):
module_output = module
module_checklist = [torch.nn.modules.batchnorm.SyncBatchNorm]
if hasattr(mmcv, 'ops'):
module_checklist.append(mmcv.ops.SyncBatchNorm)
if isinstance(module, tuple(module_checklist)):
module_output = _BatchNormXd(module.num_feature... |
def test_multi_objective_max_loss_negative():
with pytest.raises(ValueError):
MultiObjectiveCDV(analytical, max_empirical_losses=[max_empirical_loss_neg, max_empirical_loss_neg]) |
class LPPool1d(_LPPoolNd):
kernel_size: _size_1_t
stride: _size_1_t
def forward(self, input: Tensor) -> Tensor:
return cF.complex_fcaller(F.lp_pool1d, input, float(self.norm_type), self.kernel_size, self.stride, self.ceil_mode) |
def count_paren_parity(tree):
count = 0
for char in tree:
if (char == '('):
count += 1
elif (char == ')'):
count -= 1
return count |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.