code stringlengths 101 5.91M |
|---|
class Adafactor(torch.optim.Optimizer):
def __init__(self, params, lr=None, eps=1e-30, eps_scale=0.001, clip_threshold=1.0, decay_rate=(- 0.8), betas=None, weight_decay=0.0, scale_parameter=True, warmup_init=False):
relative_step = (lr is None)
if (warmup_init and (not relative_step)):
r... |
def main():
print('loading dataset')
(test_loader, text_proc) = get_dataset(args)
print('building model')
model = get_model(text_proc, args)
recall_area = validate(model, test_loader, args)
print('proposal recall area: {:.6f}'.format(recall_area)) |
class CamVid(data.Dataset):
train_folder = 'train'
train_lbl_folder = 'trainannot'
val_folder = 'val'
val_lbl_folder = 'valannot'
test_folder = 'test'
test_lbl_folder = 'testannot'
img_extension = '.png'
color_encoding = OrderedDict([('sky', (128, 128, 128)), ('building', (128, 0, 0)), (... |
class StarTransEnc(nn.Module):
def __init__(self, embed, hidden_size, num_layers, num_head, head_dim, max_len, emb_dropout, dropout):
super(StarTransEnc, self).__init__()
self.embedding = get_embeddings(embed)
emb_dim = self.embedding.embedding_dim
self.emb_fc = nn.Linear(emb_dim, hi... |
def precision_recall_f1_report(list_tuples_gold: List[List[tuple]], list_tuples_pred: List[List[tuple]], macro_over='types', **kwargs):
assert (len(list_tuples_gold) == len(list_tuples_pred))
if (macro_over == 'types'):
scores = _prf_scores_over_types(list_tuples_gold, list_tuples_pred, **kwargs)
el... |
def linear_algebra_heuristic(d):
d = copy(d)
I = d['I']
def want_la():
if (not I):
return False
n_used_vars = None
bound = None
if next(iter(I)).ring().has_degree_order():
new_bound = 200
n_used_vars = used_vars_set(I, bound=new_bound).deg(... |
def redirect(location, code=302, Response=None):
if (Response is None):
from .wrappers import Response
display_location = escape(location)
if isinstance(location, text_type):
from .urls import iri_to_uri
location = iri_to_uri(location, safe_conversion=True)
response = Response(('... |
def _format(val: Any, output_format: str='standard', errors: str='coarse') -> Any:
val = str(val)
result: Any = []
if (val in NULL_VALUES):
return [np.nan]
if (not validate_mx_rfc(val)):
if (errors == 'raise'):
raise ValueError(f'Unable to parse value {val}')
error_re... |
def get_logger():
logger = logging.getLogger()
logger.handlers = []
ch = logging.StreamHandler()
formatter = logging.Formatter('[%(levelname)s %(asctime)s] %(name)s %(message)s', '%H:%M:%S')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.setLevel('DEBUG')
return logger |
class TestAnalyseDeclarationsTransform(unittest.TestCase):
def test_calculate_pickle_checksums(self):
checksums = _calculate_pickle_checksums(['member1', 'member2', 'member3'])
assert (2 <= len(checksums) <= 3), checksums |
def format_baseline(retrievals, kg_type='atomic'):
saved_rels = {}
if (kg_type == 'atomic'):
for i in range(len(retrievals)):
relations = [ast.literal_eval(r) for r in retrievals[i][1][0][0][1:(- 1)]]
saved_rels[i] = {}
for d in range(len(dimensions_of_interest)):
... |
class ReformerTokenizerFast(metaclass=DummyObject):
_backends = ['tokenizers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tokenizers']) |
class BroadcastParameterRule(str, enum.Enum):
INTERSECT = 'intersect'
ONE_TO_ONE = 'one_to_one'
ALL_OR_NOTHING = 'all_or_nothing'
NONE = 'none' |
class JoinAcceptPayload(Payload):
_OFFSET_APPNONCE = 0
_LEN_APPNONCE = 3
_OFFSET_NETID = (_OFFSET_APPNONCE + _LEN_APPNONCE)
_LEN_NETID = 3
_OFFSET_DEVADDR = (_OFFSET_NETID + _LEN_NETID)
_LEN_DEVADDR = 4
_OFFSET_DLSETTINGS = (_OFFSET_DEVADDR + _LEN_DEVADDR)
_LEN_DLSETTINGS = 1
_MASK_D... |
def module_has_exports(mod):
for name in dir(mod):
if hasattr(mod, name):
item = getattr(mod, name)
if callable(item):
if (get_torchscript_modifier(item) is FunctionModifiers.EXPORT):
return True
return False |
def set_values(params, values):
old_values = [p.value() for p in params]
for (p, v) in zip(params, values):
p.set_value(v)
(yield)
for (p, v) in zip(params, old_values):
p.set_value(v) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output', default='output', type=str)
parser.add_argument('--data', default='val2017', type=str)
parser.add_argument('--annotations', default='annotations', type=str)
parser.add_argument('--inres', default='512,512', type=str)
... |
def module_init():
root_module = Module('ns.csma_layout', cpp_namespace='::ns3')
return root_module |
class Partition(object):
def __init__(self, pid=0):
self.pid = pid
self.meta = []
self.density = None
self.data = None
self.maxdiff = None
def __str__(self):
if (self.density is None):
return f'''{self.pid}: # : {len(self.data)}
MaxDiff: {self.maxdiff}... |
class QuotientOfSimplicialSet(PushoutOfSimplicialSets):
def __init__(self, inclusion, vertex_name='*'):
subcomplex = inclusion.domain()
PushoutOfSimplicialSets.__init__(self, [inclusion, subcomplex.constant_map()], vertex_name=vertex_name)
ambient = inclusion.codomain()
if (ambient.i... |
.parametrize('interval', [Interval(0, 1, False, False), Interval(0, 1, False, True), Interval(0, 1, True, False), Interval(0, 1, True, True), Interval((- np.inf), np.inf, False, False), Interval((- np.inf), np.inf, False, True), Interval((- np.inf), np.inf, True, False), Interval((- np.inf), np.inf, True, True), Interv... |
def mask_v2(val, m, multi_head=False, high_dim=False, name=None):
with tf.name_scope((name or 'new_exp_mask')):
if multi_head:
m = tf.expand_dims(m, 0)
if high_dim:
m = tf.expand_dims(m, (- 1))
m_flt = tf.cast(m, val.dtype)
return (val * m_flt) |
def hexists(file_path: str) -> bool:
if file_path.startswith('hdfs'):
return (os.system('{} dfs -test -e {}'.format(HADOOP_BIN, file_path)) == 0)
return os.path.exists(file_path) |
def get_ft_output_directory(params, makedirs=True):
path = get_output_directory(params, makedirs=makedirs)
if (not params.ut):
path = os.path.join(path, params.target_dataset)
ft_basename = '{:02d}way_{:03d}shot_{}_{}'.format(params.n_way, params.n_shot, params.ft_parts, params.ft_tag)
path = os... |
class Softmin(Module):
def __init__(self, dim=None):
super(Softmin, self).__init__()
self.dim = dim
def forward(self, input):
return F.softmin(input, self.dim, _stacklevel=5) |
def resnet50(num_classes=1000, pretrained=None):
model = ResNet(Bottleneck, [3, 4, 6, 3], num_classes)
if (pretrained is not None):
state_dict = torch.load(pretrained)
load_pretrained_model(model, state_dict)
return model |
def store_recommendation(recommendations, path=''):
with open(path, 'w') as out:
for (u, recs) in recommendations.items():
for (i, value) in recs:
out.write((((((str(u) + '\t') + str(i)) + '\t') + str(value)) + '\n')) |
def indent(str, indent=4):
indent_str = (' ' * indent)
if (str is None):
return indent_str
lines = str.split('\n')
return '\n'.join(((indent_str + l) for l in lines)) |
('/settings')
def settings():
if g.user:
return redirect('/confidential')
else:
return 'You are not logged in' |
def calculate_scores(gold_annotations, system_annotations):
scores = {}
for (example_id, gold_annotation) in gold_annotations.iteritems():
system_annotation = system_annotations[example_id]
name_a_annotations = [gold_annotation.name_a_coref, system_annotation.name_a_coref]
name_b_annotat... |
class OmniSourceDistSamplerSeedHook(Hook):
def before_epoch(self, runner):
for data_loader in runner.data_loaders:
if hasattr(data_loader.sampler, 'set_epoch'):
data_loader.sampler.set_epoch(runner.epoch)
elif hasattr(data_loader.batch_sampler.sampler, 'set_epoch'):
... |
def isogenies_2(E, minimal_models=True):
f2 = E.division_polynomial(2)
x2 = sorted(f2.roots(multiplicities=False))
x = f2.parent().gen()
ff = [(x - x2i) for x2i in x2]
from sage.rings.number_field.number_field_base import NumberField
model = ('minimal' if (minimal_models and isinstance(E.base_fi... |
class BaselineTrain(nn.Module):
def __init__(self, model_func, num_class, loss_type='softmax'):
super(BaselineTrain, self).__init__()
self.feature = model_func
if (loss_type == 'softmax'):
self.classifier = nn.Linear(self.feature.final_feat_dim, num_class)
self.classi... |
def text_clean_phi(text_cleaned, alphabet):
text_cleaned = re.sub('^(IG|SEG|BCH|Agora|vacat) .*\\n?', '', text_cleaned, flags=re.MULTILINE)
text_cleaned = text_cleaned.replace('', '[').replace('', ']')
text_cleaned = re.sub('vacat .*\\n?', '\n', text_cleaned, flags=re.MULTILINE)
text_cleaned = re.sub(' ... |
def load_caviar(data_path, val_split=0.5, canonical_split=True, verbose=0):
((xtr, ytr_deg, *info_tr), (xvalte, yvalte_deg, *info_valte)) = pickle.load(gzip.open(data_path, 'rb'))
def _parse_info(info):
parsed_info = {}
parsed_info['x_coord'] = info[0]
parsed_info['y_coord'] = info[1]
... |
class VigenereCryptosystem(SymmetricKeyCryptosystem):
def __init__(self, S, n):
if (not isinstance(S, StringMonoid_class)):
raise TypeError(('S (= %s) must be a string monoid.' % S))
SymmetricKeyCryptosystem.__init__(self, S, S, S, block_length=1, period=n)
def __call__(self, K):
... |
class Entity(object):
def __init__(self, type_id: List[int]=None, type_prob: List[float]=None, qid: List[int]=None):
self.type_id = type_id
self.type_prob = type_prob
self.qid = qid
def __eq__(self, other):
return (self.__dict__ == other.__dict__)
def flatten(self):
r... |
def annotate_fps(image: Image.Image, fps: int) -> None:
draw = ImageDraw.Draw(image)
font = ImageFont.truetype('fonts/arial.ttf', 25)
draw.text((0, 0), f'FPS: {fps} (Press q to exit.)', fill=(0, 0, 255), font=font) |
def GenerateSM90_TensorOp_tf32_WGMMA_gemm(manifest, cuda_version):
if (not CudaToolkitVersionSatisfies(cuda_version, 12, 0)):
return
layouts_tf32 = [[[LayoutType.ColumnMajor, 1], [LayoutType.ColumnMajor, 4], [LayoutType.ColumnMajor, 1]], [[LayoutType.ColumnMajor, 1], [LayoutType.RowMajor, 1], [LayoutTyp... |
def eval_policy(policy, eval_env, seed, eval_episodes=10):
eval_env.seed((seed + 100))
avg_reward = 0.0
gt = []
pred = []
for _ in range(eval_episodes):
(state, done) = (eval_env.reset(), False)
while (not done):
gt.append(state.copy())
state[0] = 0
... |
class SEResNetBottleneck(Bottleneck):
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None):
super(SEResNetBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False, stride=stride)
self.bn1 = nn.BatchNorm2d(... |
def test_context_window(device):
from speechbrain.processing.features import ContextWindow
inp = torch.tensor([1, 2, 3], device=device).unsqueeze(0).unsqueeze((- 1)).float()
compute_cw = ContextWindow(left_frames=1, right_frames=1).to(device)
out = torch.tensor([[0, 1, 2], [1, 2, 3], [2, 3, 0]], device=... |
class PdfArray(list):
def __bytes__(self):
return ((b'[ ' + b' '.join((pdf_repr(x) for x in self))) + b' ]') |
class SniffTest(AllenNlpTestCase):
def test_config(self):
assert (set(DEFAULT_MODELS.keys()) == {'machine-comprehension', 'semantic-role-labeling', 'textual-entailment', 'coreference-resolution', 'named-entity-recognition'})
def test_machine_comprehension(self):
predictor = DEFAULT_MODELS['machi... |
_start_docstrings('CamemBERT Model with a token classification head on top (a linear layer on top of\n the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. ', CAMEMBERT_START_DOCSTRING)
class CamembertForTokenClassification(RobertaForTokenClassification):
config_class = CamembertConfig
pr... |
def get_args():
def exclusive_group(group, name, default, help):
destname = name.replace('-', '_')
subgroup = group.add_mutually_exclusive_group(required=False)
subgroup.add_argument(f'--{name}', dest=f'{destname}', action='store_true', help=f"{help} (use '--no-{name}' to disable)")
... |
def test_predict_proba():
X = np.array([1, 2, 3])
classifier = ConstantClassifier()
predict_proba = classifier.predict_proba(X)
ground_truth = np.array([[1], [1], [1]])
assert_array_equal(ground_truth, predict_proba) |
def ShuffleV1(**kwargs):
cfg = {'out_planes': [240, 480, 960], 'num_blocks': [4, 8, 4], 'groups': 3}
return ShuffleNet(cfg, **kwargs) |
class CorefResult():
def __init__(self, text, clusters, char_map, reverse_char_map, coref_logit, text_idx):
self.text = text
self.clusters = clusters
self.char_map = char_map
self.reverse_char_map = reverse_char_map
self.coref_logit = coref_logit
self.text_idx = text_... |
def override_options(opt, opt_over, key_stack=None, safe_check=False):
for (key, value) in opt_over.items():
if isinstance(value, dict):
opt[key] = override_options(opt.get(key, dict()), value, key_stack=(key_stack + [key]), safe_check=safe_check)
else:
if (safe_check and (ke... |
def assert_allclose(a, b, rtol=1e-05, atol=1e-08):
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol) |
class LessThanInfinity(_uniq, RingElement):
def __init__(self, parent=UnsignedInfinityRing):
RingElement.__init__(self, parent)
def _repr_(self):
return 'A number less than infinity'
def _latex_(self):
return '(<\\infty)'
def _add_(self, other):
if isinstance(other, Unsig... |
class List(Type):
def __init__(self, elem_type):
self.elem_type = elem_type
def __eq__(self, other):
return ((self.__class__ == other.__class__) and (self.elem_type == other.elem_type))
def from_str(self, s):
if (';' in s):
segments = s.split(';')
elif (',' in s):... |
.verilator
def test_multi_tasklet():
sdfg = dace.SDFG('rtl_multi_tasklet')
state = sdfg.add_state()
sdfg.add_array('A', [1], dtype=dace.int32)
sdfg.add_array('B', [1], dtype=dace.int32)
sdfg.add_array('C', [1], dtype=dace.int32)
tasklet0 = state.add_tasklet(name='rtl_tasklet0', inputs={'a'}, out... |
_utils.test()
def test_check_grad_struct_field_not_placed():
d = ti.Struct.field({'pos': ti.types.vector(3, float), 'vel': ti.types.vector(3, float), 'acc': ti.types.vector(3, float), 'mass': ti.f32}, needs_grad=True)
ti.root.dense(ti.i, 1).place(d)
def foo():
pass
with pytest.raises(RuntimeErro... |
def build_scheduler(optimizer, warmup_epoches, start_epoches, end_epoches, scale=0.1):
def scheduler(epoch):
epoch0 = (epoch + 1.0)
decay_rate = 0.1
decay_steps = (250 * 1000)
new_lrate = (decay_rate ** (epoch0 / decay_steps))
return new_lrate
return torch.optim.lr_schedu... |
class CTRLTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
control_codes = CONTROL_CODES
def __init__(self, vocab_file, merges_file, unk_token='<unk>', **kwargs... |
def cam_loss(source, non_source):
identity_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(source), logits=source))
non_identity_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(non_source), logits=non_source))
loss = (identity_loss + non_i... |
class ConjugateGradientOptimizer(Serializable):
def __init__(self, cg_iters=10, reg_coeff=1e-05, subsample_factor=1.0, backtrack_ratio=0.8, max_backtracks=15, debug_nan=False, accept_violation=False, hvp_approach=None, num_slices=1):
Serializable.quick_init(self, locals())
self._cg_iters = cg_iters
... |
def y_scatter(file=None, query=None, y=None, save=False, title='', label=None):
try:
df = (pd.read_csv(file).query(query) if query else pd.read_csv(file))
rows = np.arange(df.shape[0])
plt.rcParams['figure.figsize'] = [8, 8]
(fig, ax1) = plt.subplots(1, 1)
ax1.scatter(rows, d... |
class DryRunMetric(Metric):
def __init__(self):
self.token_cost_estimator = AutoTokenCostEstimator()
def __repr__(self):
return 'DryRunMetric'
def evaluate(self, scenario_state: ScenarioState, metric_service: MetricService, eval_cache_path: str, parallelism: int) -> MetricResult:
pro... |
class TestGaussianMLPEncoder(TfGraphTestCase):
.parametrize('obs_dim, embedding_dim', [((1,), (1,)), ((1,), (2,)), ((2,), (2,)), ((1, 1), (1, 1)), ((1, 1), (2, 2)), ((2, 2), (2, 2))])
def test_get_embedding(self, obs_dim, embedding_dim):
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=embedding_... |
class TrainingConfig(object):
def __init__(self):
self.num_examples_per_epoch = 586363
self.optimizer = 'SGD'
self.initial_learning_rate = 2.0
self.learning_rate_decay_factor = 0.5
self.num_epochs_per_decay = 8.0
self.train_inception_learning_rate = 0.0005
sel... |
def save_train_history(args, train_loss, train_acc, val_loss, val_acc, test_loss, test_acc):
dict_save_path = os.path.join(args.out_dir, 'dicts', 'train_hist_{}.json'.format(args.experiment_id))
os.makedirs(os.path.dirname(dict_save_path), exist_ok=True)
with open(dict_save_path, 'w') as f:
json.dum... |
class WithinVisitLabeler(Labeler):
def __init__(self, ontology: extension_datasets.Ontology, visit_start_adjust_func: Callable=identity, visit_end_adjust_func: Callable=identity):
self.ontology: extension_datasets.Ontology = ontology
self.visit_start_adjust_func: Callable = visit_start_adjust_func
... |
class TestBuiltinEntityParser(SnipsTest):
def setUp(self):
_BUILTIN_ENTITY_PARSERS.clear()
def test_should_parse_grammar_entities(self):
text = "we'll be 2 at the meeting"
language = 'en'
parser = BuiltinEntityParser.build(language=language)
parse = parser.parse(text)
... |
class SPADEResBlock(nn.Module):
def __init__(self, opt, input_nc, output_nc, use_mask_norm=True):
super(SPADEResBlock, self).__init__()
self.param_opt = opt
self.learned_shortcut = (input_nc != output_nc)
middle_nc = min(input_nc, output_nc)
self.conv_0 = nn.Conv2d(input_nc, ... |
def classify(images, model, adversarial_attack):
images = images.cpu().numpy().transpose(0, 2, 3, 1)
with TFHider.tf.Session(graph=model) as sess:
logits = sess.run('import/logits/output:0', feed_dict={'import/Placeholder:0': images})
outputs = torch.from_numpy(logits).cuda()
return outputs |
class TrajectoryTransformerPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def get_data_cache_path(args):
name = Path(args.data.path).name
return (args.data.output.path, name) |
def parse_schema_kind(schema: str, app: (str | None)) -> SchemaInputKind:
try:
netloc = urlparse(schema).netloc
except ValueError as exc:
raise click.UsageError(INVALID_SCHEMA_MESSAGE) from exc
if (('\x00' in schema) or (not schema)):
raise click.UsageError(INVALID_SCHEMA_MESSAGE)
... |
class Camera():
def GetNumParams(type_):
if ((type_ == 0) or (type_ == 'SIMPLE_PINHOLE')):
return 3
if ((type_ == 1) or (type_ == 'PINHOLE')):
return 4
if ((type_ == 2) or (type_ == 'SIMPLE_RADIAL')):
return 4
if ((type_ == 3) or (type_ == 'RADIAL'... |
def encode_dataset2(*splits, encoder):
encoded_splits = []
for split in splits:
fields = []
field_t = 0
for field in split:
if isinstance(field[0], str):
if (field_t == 0):
special = [[encoder.encoder[('<|' + x.split('<|')[1].replace(' ', '... |
class Test__ExoDataEqn(TestCase):
def test__repr__(self):
eqn = _ExoDataEqn()
self.assertEqual(eqn.__repr__(), '_ExoDataEqn()') |
def test_Detector_get():
efficiency = 0.5
(detector, parent, tl) = create_detector(efficiency=efficiency)
tl.init()
for i in range(1000):
tl.time = (i * .0)
detector.get()
assert (((len(parent.log) / 1000) - efficiency) < 0.1)
dark_count = 100
stop_time = .0
(detector, pa... |
class Resnet18Triplet(nn.Module):
def __init__(self, embedding_dimension=512, pretrained=False):
super(Resnet18Triplet, self).__init__()
self.model = resnet18(pretrained=pretrained)
input_features_fc_layer = self.model.fc.in_features
self.model.fc = nn.Linear(input_features_fc_layer,... |
def random_sparse_pd_matrix(matrix_size, density=0.01, **kwargs):
import math
torch = kwargs.get('torch', globals()['torch'])
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
data = dict([((i, i), (float((i + 1)) / matrix_size)) for i in range(matrix_size)])
def mul... |
class RecordQueue(object):
def __init__(self, fields, name=None, capacity=1, enforce_unique_name=False, num_threads=1):
assert (isinstance(fields, list) or isinstance(fields, Struct)), 'fields must be either a Struct or a list of raw field names.'
if isinstance(fields, list):
fields = fr... |
def get_BertAdam_optimizer(cfg, model):
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [{'params': [p for (n, p) in param_optimizer if (n not in no_decay)], 'weight_decay_rate': 0.01}, {'params': [p for (n, p) in param_optimizer if (n in ... |
def tokenize_corpus(filename, np_filename, print_interval=10000):
print(' > tokenizing {}'.format(filename))
tokenizer = Tokenizer(cache_dir='./cache')
tokenized_docs = []
num_docs = 0
num_tokens = 0
start_time = time.time()
with open(filename, 'r') as f:
for line in f:
t... |
def AUNP_calc(classes, P, POP, AUC_dict):
try:
result = 0
for i in classes:
result += ((P[i] / POP[i]) * AUC_dict[i])
return result
except Exception:
return 'None' |
def erase_3D_path(path, base_pos=5, item=AIR, offset=(0, 0, 0)):
if (len(path) == 0):
return
blocks = []
for pos in path:
blocks.append(Block(position=Point(x=(pos[0] + offset[0]), y=((pos[2] + 5) + offset[2]), z=(pos[1] + offset[1])), type=item))
CLIENT.spawnBlocks(Blocks(blocks=blocks)... |
def get_real(input, input_type='linear', channels_axis=1):
if (input_type == 'linear'):
nb_hidden = input.size()[(- 1)]
if (input.dim() == 2):
return input.narrow(1, 0, (nb_hidden // 2))
elif (input.dim() == 3):
return input.narrow(2, 0, (nb_hidden // 2))
else:
... |
def _random_dataset(n_samples=1000, n_features=1000, representation='dense', dtype=np.float32):
if (representation == 'dense'):
X = np.random.RandomState(0).random_sample((n_samples, n_features))
X = X.astype(dtype, copy=False)
else:
X = sp.random(n_samples, n_features, density=0.05, for... |
class GardensPointDataset(Dataset):
def __init__(self, destination: str='images/GardensPoint/'):
self.destination = destination
def load(self) -> Tuple[(List[np.ndarray], List[np.ndarray], np.ndarray, np.ndarray)]:
print('===== Load dataset GardensPoint day_right--night_right')
if (not o... |
def random_fgp_morphism_0(*args, **kwds):
A = random_fgp_module(*args, **kwds)
return A.hom([(ZZ.random_element() * g) for g in A.smith_form_gens()]) |
def waterfall_legacy(expected_value, shap_values=None, features=None, feature_names=None, max_display=10, show=True):
if (show is False):
plt.ioff()
upper_bounds = None
lower_bounds = None
if str(type(expected_value)).endswith("Explanation'>"):
shap_exp = expected_value
expected_... |
def prepare_bounds(bounds, n):
(lb, ub) = [np.asarray(b, dtype=float) for b in bounds]
if (lb.ndim == 0):
lb = np.resize(lb, n)
if (ub.ndim == 0):
ub = np.resize(ub, n)
return (lb, ub) |
class BertAdam(Optimizer):
def __init__(self, params, lr=required, warmup=(- 1), t_total=(- 1), schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-06, weight_decay=0.01, max_grad_norm=1.0):
if ((lr is not required) and (lr < 0.0)):
raise ValueError('Invalid learning rate: {} - should be >= 0.0'.fo... |
class TestBroadcast(object):
def setup(self):
self.seed =
def test_uniform(self):
random = Generator(MT19937(self.seed))
low = [0]
high = [1]
uniform = random.uniform
desired = np.array([0., 0., 0.])
random = Generator(MT19937(self.seed))
actual =... |
class REO(BaseMetric):
def __init__(self, recommendations, config, params, eval_objects, additional_data):
super().__init__(recommendations, config, params, eval_objects, additional_data)
self._cutoff = self._evaluation_objects.cutoff
self._relevance = self._evaluation_objects.relevance.bina... |
def test_wrap_scalar_function_with_validation():
def func_(x):
return x
(fcalls, func) = optimize._optimize._wrap_scalar_function_maxfun_validation(func_, np.asarray(1), 5)
for i in range(5):
func(np.asarray(i))
assert (fcalls[0] == (i + 1))
msg = 'Too many function calls'
wi... |
def phi_on_basis(L):
F = F_algebra(QQ)
return F.prod((phi_on_multiplicative_basis(compo) for compo in L)) |
def get_top5_vertices(hgraph):
nodes = hgraph['nodes']
v_list = [node['id'] for node in nodes if (node['bipartite'] == 0)]
v_list.sort(key=natural_keys)
v2he_sorted = collections.OrderedDict()
for v in v_list:
v2he_sorted[v] = []
for link in hgraph['links']:
if (link['source'] no... |
def AFMEstimator(linear_feature_columns, dnn_feature_columns, use_attention=True, attention_factor=8, l2_reg_linear=1e-05, l2_reg_embedding=1e-05, l2_reg_att=1e-05, afm_dropout=0, seed=1024, task='binary', model_dir=None, config=None, linear_optimizer='Ftrl', dnn_optimizer='Adagrad', training_chief_hooks=None):
def... |
def _subsample_by_classes(all_examples, labels, num_per_class=None):
if (num_per_class is None):
return all_examples
examples = {label: [] for label in labels}
for example in all_examples:
if (example.label in labels):
examples[example.label].append(example)
picked_examples =... |
def mock_database():
mock_db = Mock(spec=SingleDatabase)
mock_db.get_schema_given.return_value = Mock(name='schema', spec=pd.DataFrame)
mock_db.get_table_given.return_value = Mock(name='table', spec=pd.DataFrame)
return mock_db |
def getintegrator(rhs, u0, solver, context):
params = solver.params
u1 = u0.copy()
if (params.integrator == 'RK4'):
a = np.array([(1.0 / 6.0), (1.0 / 3.0), (1.0 / 3.0), (1.0 / 6.0)], dtype=context.float)
b = np.array([0.5, 0.5, 1.0], dtype=context.float)
u2 = u0.copy()
(RK4)
... |
def check_gradient_numerical(channels=4, grad_value=True, grad_sampling_loc=True, grad_attn_weight=True):
value = (torch.rand(N, S, M, channels).cuda() * 0.01)
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = (torch.rand(N, Lq, M, L, P).cuda() + 1e-05)
attention_weights /= a... |
class XLMRobertaTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['attention_mask']
def __init__(self, vocab_file, bos_token='<s>', eos_toke... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.