code stringlengths 101 5.91M |
|---|
def test_regular_grid_2d_8():
ar = np.zeros((20, 40))
g = regular_grid(ar.shape, 8)
assert_equal(g, [slice(5.0, None, 10.0), slice(5.0, None, 10.0)])
ar[g] = 1
assert_equal(ar.sum(), 8) |
class Partition2(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/T5Block[4]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[5]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:2'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYE... |
def _conditional_combinations() -> list[tuple[(int, int, bool)]]:
args = [0, 1]
conditional_opcodes = range(90, 166)
combinations: list[tuple[(int, int, bool)]] = []
for op in conditional_opcodes:
if (op is opcodes.SETUP_ASYNC_WITH):
continue
for arg in args:
comb... |
_properties
class Map(object):
label = Property(dtype=str, desc='Label of the map')
params = ListProperty(element_type=str, desc='Mapped parameters')
range = RangeProperty(desc='Ranges of map parameters', default=sbs.Range([]))
schedule = EnumProperty(dtype=dtypes.ScheduleType, desc='Map schedule', defa... |
def make_batches(lines, args, task, max_positions, encode_fn):
tokens = [task.source_dictionary.encode_line(encode_fn(src_str), add_if_not_exist=False).long() for src_str in lines]
lengths = torch.LongTensor([t.numel() for t in tokens])
itr = task.get_batch_iterator(dataset=task.build_dataset_for_inference(... |
def add_simple_state_to_sdfg(state: SDFGState, top_sdfg: SDFG, state_name: str):
if (state.last_sdfg_states.get(top_sdfg) is not None):
substate = top_sdfg.add_state(state_name)
else:
substate = top_sdfg.add_state(state_name, is_start_state=True)
finish_add_state_to_sdfg(state, top_sdfg, sub... |
def sequence2frame(accompany_pianoroll, chord_groundtruth):
print('augment chord into frame base...')
accompany_pianoroll_frame = []
chord_groundtruth_frame = []
for (acc_song, truth_song) in zip(accompany_pianoroll, chord_groundtruth):
acc_pianoroll = []
truth_pianoroll = []
for... |
def generate_tp_model(default_config: OpQuantizationConfig, base_config: OpQuantizationConfig, mixed_precision_cfg_list: List[OpQuantizationConfig], name: str) -> TargetPlatformModel:
default_configuration_options = tp.QuantizationConfigOptions([default_config])
generated_tpc = tp.TargetPlatformModel(default_co... |
class WarmUpAndCosine(callbacks.Callback):
def __init__(self, warmup_steps, max_lr, min_lr=0.0, total_steps=None, layer_name='global_step_tracker', **kwargs):
super().__init__(**kwargs)
self.max_lr = max_lr
self.min_lr = min_lr
self.warmup_steps = warmup_steps
self.total_step... |
def _split_persona_and_context(text, eval_type='convai2'):
if ('your persona:' not in text):
return (None, text)
elif (eval_type == 'convai2'):
texts = text.split('\n')
return ('\n'.join(texts[:(- 1)]), texts[(- 1)])
elif (eval_type == 'dnli'):
texts = text.split('\n')
... |
_module('numpy')
def require(a, dtype=None, requirements=None):
possible_flags = {'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C', 'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F', 'A': 'A', 'ALIGNED': 'A', 'W': 'W', 'WRITEABLE': 'W', 'O': 'O', 'OWNDATA': 'O', 'E': 'E', 'ENSUREARRAY': 'E'}
if (not requirements):
... |
def PreActResNet164Basic(num_classes=10):
return ResNet(PreActBlock, layers=([27] * 3), filters=[16, 32, 64], num_classes=num_classes) |
def test_reparameterize_size():
(mean, logvar) = model1.encode(x1.float())
mean_new = model1.reparameterize(mean, logvar)
assert (len(mean_new[0]) == model1.no_latent_features) |
def vector_to_word(vector):
word = ''
for vec in vector:
word = (word + int2char(vec))
return word |
class unit_gcn(nn.Module):
def __init__(self, in_channels, out_channels, A, adaptive=True):
super(unit_gcn, self).__init__()
self.out_c = out_channels
self.in_c = in_channels
self.num_subset = A.shape[0]
self.adaptive = adaptive
if adaptive:
self.PA = nn.P... |
class bcolors():
HEADER = '\x1b[95m'
OKBLUE = '\x1b[94m'
OKCYAN = '\x1b[96m'
OKGREEN = '\x1b[92m'
WARNING = '\x1b[93m'
FAIL = '\x1b[91m'
ENDC = '\x1b[0m'
BOLD = '\x1b[1m'
UNDERLINE = '\x1b[4m' |
def update_config(cfg, filename, ensure_dir=True):
cfg.defrost()
cfg.merge_from_file(filename)
if ensure_dir:
cfg.working_dir = osp.dirname(osp.abspath(__file__))
cfg.root_dir = osp.dirname(_C.working_dir)
cfg.exp_name = '_'.join(_C.modules)
cfg.output_dir = osp.join(_C.root_... |
def index(g, self, index):
if (sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK):
return g.op('ATen', self, index, operator_s='index')
if sym_help._is_packed_list(index):
indices = sym_help._unpack_list(index)
else:
indices = [index]
def try_mas... |
class BinaryMorphology3D():
param_names = ['shape', 'footprint', 'radius', 'decomposition']
params = [((128, 128, 128),), ('ball', 'cube', 'octahedron'), (1, 3, 5, 10), (None, 'sequence', 'separable')]
def setup(self, shape, footprint, radius, decomposition):
rng = np.random.default_rng(123)
... |
def load_pr_tags():
this_dir = os.path.dirname(os.path.abspath(__file__))
json_path = os.path.join(this_dir, 'prtags.json')
details = {}
with open(json_path) as f:
details = json.load(f)
details['release'] = ''
return details |
def check_one_program(helper, script, precond, graph_dict, w_graph_list, modify_graph=True, place_other_objects=True, id_mapping={}, **info):
helper.initialize(graph_dict)
(script, precond) = modify_objects_unity2script(helper, script, precond)
if modify_graph:
helper.set_to_default_state(graph_dict... |
def test_after_test_case_execution():
observer = FooObserver()
result = MagicMock()
with mock.patch.object(observer._assertion_local_state, 'trace') as trace_mock:
clone = object()
trace_mock.clone.return_value = clone
observer.after_test_case_execution_inside_thread(MagicMock(), res... |
class Decanomial(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([(- 10.0)] * self.N), ([10.0] * self.N)))
self.custom_bounds = [(0, 2.5), ((- 2), (- 4))]
self.global_optimum = [[2.0, (- 3.0)]]
self.fglob = 0.0
d... |
def run_remote(params_path, gpu=False, instance_type='m5.large', ami='ami-00b8b0b2dff90dcab', spot_price=0.5):
command = (COMMAND % params_path)
if gpu:
ami = 'ami-03fd6608775f924b8'
instance_type = 'g3.4xlarge'
spot_price = 0.5
command = (GPU_COMMAND % params_path)
instance ... |
def main(args):
parser = get_config()
all_args = parse_args(args, parser)
assert (all_args.algorithm_name in ['mep', 'adaptive'])
if (all_args.cuda and torch.cuda.is_available()):
print('choose to use gpu...')
device = torch.device('cuda:0')
torch.set_num_threads(all_args.n_train... |
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('polynomial', parent_package, top_path)
config.add_data_dir('tests')
return config |
class AgentEvaluator(object):
def __init__(self, env_params, mdp_fn, force_compute=False, mlam_params=NO_COUNTERS_PARAMS, debug=False):
assert callable(mdp_fn), 'mdp generating function must be a callable function'
env_params['mlam_params'] = mlam_params
self.mdp_fn = mdp_fn
self.env... |
_model
def seresnext50_32x4d(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['seresnext50_32x4d']
model = SENet(SEResNeXtBottleneck, [3, 4, 6, 3], groups=32, reduction=16, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, num_classes=num_classes,... |
def register_cv_hardcoded_model(name, *args, **kw):
ParamDictCVMOdelHandler(*args, **kw).register_autogenerated(generated_file_name_or_path=name) |
def test_phi_minus_phi_plus():
for i in range(200):
(k1, k2, k3, k4, a3) = create_scenario(phi_minus, phi_plus, i)
state = correct_order(k1.state, k1.keys)
assert numpy.array_equal(state, phi_minus) |
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
if ('optimizer' in checkpoint):
del checkpoint['optimizer']
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
if out_file.endswith('.pth'):
o... |
def getColsPermutations(cols, num):
if (num == 0):
return []
return ([', '.join(a) for a in permutations(cols, num)] + getColsPermutations(cols, (num - 1))) |
def get_transformation(args):
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
return transform |
def pred_fn_wrapper(pred_rng, params, batch, pred_fn, under_pmap):
idxes = batch.pop('__idx__')
preds = pred_fn(pred_rng=pred_rng, params=params, batch=batch)
preds = {'raw_preds': preds, '__idx__': idxes}
if under_pmap:
return jax.lax.all_gather(preds, axis_name='batch')
else:
retur... |
def default_loader(path):
from torchvision import get_image_backend
if (get_image_backend() == 'accimage'):
return accimage_loader(path)
else:
return pil_loader(path) |
_start_docstrings('\n CamemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', CAMEMBERT_START_DOCSTRING)
class CamembertForMultipleChoice(RobertaForMultipleChoice):
config_class = CamembertConfig |
def run(target, method, thread, round):
exe = ((target + '/') + target)
print(exe, '-m', method, '-t', thread, '-r', round)
with open(tmp_file, 'w') as ofs:
subprocess.call([exe, '-m', method, '-t', str(thread), '-r', str(round)], stdout=ofs)
X = []
Y = []
with open(tmp_file, 'r') as ifs... |
def create_cookie(name, value, **kwargs):
result = {'version': 0, 'name': name, 'value': value, 'port': None, 'domain': '', 'path': '/', 'secure': False, 'expires': None, 'discard': True, 'comment': None, 'comment_url': None, 'rest': {'HttpOnly': None}, 'rfc2109': False}
badargs = (set(kwargs) - set(result))
... |
class ImageReferenceLabelDataset(Dataset):
def __init__(self, image_path: str, reference_path: str, resolution: int, category: str, transform=None):
super().__init__()
self.resolution = resolution
self.transform = transform
self.image_paths = image_path
self.reference_paths =... |
def load_colbert(args, do_print=True):
(colbert, checkpoint) = load_model(args, do_print)
for k in ['query_maxlen', 'doc_maxlen', 'dim', 'similarity', 'amp']:
if (('arguments' in checkpoint) and hasattr(args, k)):
if ((k in checkpoint['arguments']) and (checkpoint['arguments'][k] != getattr(... |
class _CallbacksManager(Copyable):
callbacks: MutableMapping[(str, MutableSequence[Callable])]
def __init__(self):
self.callbacks = {}
pass
def __getstate__(self):
odict = self.__dict__.copy()
del odict['callbacks']
return odict
def get(self, event: str) -> Sequen... |
def img_tensorize(im: str):
assert isinstance(im, str)
if os.path.isfile(im):
img = np.array(Image.open(im).convert('RGB'))
else:
img = get_image_from_url(im)
assert (img is not None), f'could not connect to: {im}'
return img |
class ShuffleProduct(ShuffleProduct_abstract):
def __init__(self, l1, l2, element_constructor=None):
assert (isinstance(l1, Iterable) and isinstance(l2, Iterable))
if (element_constructor is None):
try:
element_constructor = l1.parent()._element_constructor_
e... |
class PDB2Fmap():
def __init__(self, embd_grain='CA', fmap_shape=None):
self.embd_grain = embd_grain
self.fmap_shape = fmap_shape
def fit(self, pdb_file, embd_chain=None):
self.pdb_file = pdb_file
self.pdb = PandasPdb().read_pdb(self.pdb_file)
self.embd_chain = embd_chain... |
class CongruenceSubgroupBase(ArithmeticSubgroup):
def __init__(self, level):
level = ZZ(level)
if (level <= 0):
raise ArithmeticError('Congruence groups only defined for positive levels.')
self.__level = level
ArithmeticSubgroup.__init__(self)
def _an_element_(self):
... |
def main():
parser = ArgumentParser()
parser.add_argument('--batch_size', default=128, type=int)
parser.add_argument('--name', default=None, type=str)
parser.add_argument('--weighted', action='store_true', help='model trains with weighted loss when flag is set')
parser = pl.Trainer.add_argparse_args... |
_function
def does_backend_handle_base_ring(base_ring, backend):
try:
Polyhedra(base_ring, 0, backend)
except ValueError:
return False
return True |
class Getitem(Expr):
fields = ('node', 'arg', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if (self.ctx != 'load'):
raise Impossible()
try:
return self.environment.getitem(self.node.as_const(eval_ctx), self.arg.as_const(eva... |
_params({'data_home': [None, str], 'filter_data': [None, tuple], 'download_if_missing': ['boolean'], 'random_state': ['random_state'], 'shuffle': ['boolean'], 'verbose': ['boolean']}, prefer_skip_nested_validation=True)
def fetch_datasets(*, data_home=None, filter_data=None, download_if_missing=True, random_state=None,... |
class VOCSegmentation(Dataset):
def __init__(self, base_dir=Path.db_root_dir('pascal'), split='train', transform=None):
super().__init__()
self._base_dir = base_dir
self._image_dir = os.path.join(self._base_dir, 'JPEGImages')
self._cat_dir = os.path.join(self._base_dir, 'Segmentation... |
class DataTrainingArguments():
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}... |
def compute_pointer_with_align(model, node_type, prev_state, prev_action_emb, parent_h, parent_action_emb, desc_enc):
(new_state, attention_weights) = model._update_state(node_type, prev_state, prev_action_emb, parent_h, parent_action_emb, desc_enc)
output = new_state[0]
memory_pointer_logits = model.pointe... |
class L2Norm(nn.Module):
def __init__(self, n_channels, scale=1.0):
super(L2Norm, self).__init__()
self.n_channels = n_channels
self.scale = scale
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.weight.data *= 0.0
self.weight.da... |
def test_validate_times():
annotations.validate_times(None)
with pytest.raises(ValueError):
annotations.validate_times(np.array([[0, 1], [0, 2]]))
with pytest.raises(ValueError):
annotations.validate_times(np.array([2, 0]))
with pytest.raises(ValueError):
annotations.validate_tim... |
def get_children(graph, p):
c = []
for key in graph.keys():
if (p in graph[key]):
c.append(key)
return c |
def get_params(argv='1'):
params = dict(quick_test=True, finetune_mode=False, pretrained_model_weights='models/1_1_foa_dev_split6_model.h5', dataset_dir='/scratch/asignal/partha/DCASE2022_SELD_dataset', feat_label_dir='/scratch/asignal/partha/DCASE2022_SELD_dataset/seld_feat_label', model_dir='models/', dcase_outpu... |
def quat_to_rotmat(quaternions: Union[(torch.Tensor, numpy.ndarray)]) -> Union[(torch.Tensor, numpy.ndarray)]:
if (quaternions.shape[(- 1)] != 4):
raise ValueError(f'Invalid input quaternions shape f{quaternions.shape}.')
t = Compose([quaternion_to_matrix])
return t(quaternions) |
class LinearClassifier(nn.Module):
def __init__(self, in_dim, output_size, num_layers=1):
super().__init__()
self.num_layers = num_layers
self.in_dim = in_dim
self.linear1 = nn.Linear(in_dim, in_dim)
self.linear2 = nn.Linear(in_dim, in_dim)
self.linear3 = nn.Linear(in... |
class ASM1684NameBreakpoint(Breakpoint):
type = '1684-asm'
pattern = re.compile('^\\w+')
def match_break(cls, text, tdb: TdbCmdBackend) -> bool:
from ..target_1684.regdef import op_class_dic
if (text in op_class_dic):
return True
return False |
def changeCyclicTriangleC1(G, A, i):
delta = 0
for u in G.outIterator(i):
for v in G.outIterator(u):
if ((v != i) and G.isArc(v, i)):
delta += 1
return delta |
def test_pipeline_ducktyping():
pipeline = make_pipeline(Mult(5))
pipeline.predict
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf())
assert (not hasattr(pipeline, 'predict'))
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline('passth... |
class ProxyAnchorLoss(WeightRegularizerMixin, BaseMetricLossFunction):
def __init__(self, num_classes, embedding_size, margin=0.1, alpha=32, **kwargs):
super().__init__(**kwargs)
self.proxies = torch.nn.Parameter(torch.Tensor(num_classes, embedding_size))
self.weight_init_func(self.proxies)
... |
class EnSpellCorrector():
def __init__(self, word_freq_dict: dict=None, custom_confusion_dict: dict=None, en_dict_path: str=None):
if (word_freq_dict and en_dict_path):
raise ValueError('word_freq_dict and en_dict_path can not be set at the same time.')
if (word_freq_dict is None):
... |
def test_unary():
x = ak.Array([1, 2, 3], behavior={'foo': 'BAR'}, attrs={'hello': 'world'})
y = (- x)
assert (y.attrs is x.attrs)
assert (x.behavior is y.behavior) |
def get_gpu_count():
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices('GPU'))
else:
return 0 |
class EntryPoint(object):
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if (not MODULE(module_name)):
raise ValueError('Invalid module name', module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extra... |
def evaluate(ref_file, trans_file, metric, subword_option=None):
if (metric.lower() == 'bleu'):
evaluation_score = _bleu(ref_file, trans_file, subword_option=subword_option)
elif (metric.lower() == 'rouge'):
evaluation_score = _rouge(ref_file, trans_file, subword_option=subword_option)
elif ... |
def advect(vf: ti.types.ndarray(ndim=2), qf: ti.types.ndarray(ndim=2), new_qf: ti.types.ndarray(ndim=2)):
for (i, j) in vf:
p = (ti.Vector([i, j]) + 0.5)
p = backtrace(vf, p, dt)
new_qf[(i, j)] = (bilerp(qf, p) * dye_decay) |
def ClientStateToString(state):
if (state == ClientState.idle):
return 'IDLE'
if (state == ClientState.training):
return 'TRAINING'
if (state == ClientState.validating):
return 'VALIDATING'
return 'UNKNOWN' |
def create_dir(dir_path, cover=False):
if (cover or (not os.path.exists(dir_path))):
if (cover and os.path.exists(dir_path)):
os.removedirs(dir_path)
os.makedirs(dir_path) |
def _create_luke_config(bert_config, entity_vocab_size, entity_emb_size):
return LukeConfig(entity_vocab_size=entity_vocab_size, bert_model_name=BERT_MODEL_NAME, entity_emb_size=entity_emb_size, **bert_config.to_dict()) |
class SubsetRandomSampler(Sampler):
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return (self.indices[i] for i in torch.randperm(len(self.indices)))
def __len__(self):
return len(self.indices) |
class GraphSearches(base_graph_filter.BaseGraphFilter, ABC):
def _node_filter(self, node_matcher: node_matcher.BaseNodeMatcher) -> list:
return [n for n in self.nodes if node_matcher.apply(n)]
def _edge_filter(self, edge_matcher: edge_matcher.BaseEdgeMatcher) -> list:
edge_list = []
for ... |
class RandomCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img):
region = T.RandomCrop.get_params(img, self.size)
return crop(img, region) |
def register_Ns3MmWaveBeamforming_methods(root_module, cls):
cls.add_constructor([param('ns3::MmWaveBeamforming const &', 'arg0')])
cls.add_constructor([param('uint32_t', 'enbAntenna'), param('uint32_t', 'ueAntenna')])
cls.add_method('CalcRxPowerSpectralDensity', 'ns3::Ptr< ns3::SpectrumValue >', [param('ns... |
def _create_dataset(uri, batch_size, shuffle, no_image_normalization, cache_dir, overwrite_cache, create_cache_explicitly, prepare_data_iterator, dataset_index):
class Dataset():
pass
dataset = Dataset()
dataset.uri = uri
dataset.cache_dir = cache_dir
dataset.normalize = (not no_image_normal... |
def compile_cuda_module(host_args):
libname = ('_cext_gpu.lib' if (sys.platform == 'win32') else 'lib_cext_gpu.a')
lib_out = ('build/' + libname)
if (not os.path.exists('build/')):
os.makedirs('build/')
(_, nvcc) = get_cuda_path()
print('NVCC ==> ', nvcc)
arch_flags = '-arch=sm_37 -genco... |
def visualize_views(views, highlight_silhouette=False, show=True, save_path: Path=None):
if highlight_silhouette:
images = [((v.mask + 1.0) * v.color).clamp_(min=0.0, max=1.0).cpu() for v in views]
else:
images = [v.color.cpu() for v in views]
(fig, axs) = create_mosaic_figure(images)
if... |
def cast_with_native_amp(func: Callable, mixed_precision: Optional[str]=None) -> Callable:
if (mixed_precision not in ('fp16', 'bf16')):
logger.warning(f'Unknown mixed precision mode: {mixed_precision}, falling back to fp32.')
return func
if ((mixed_precision == 'fp16') and is_torch_version('>='... |
class SelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_... |
class VmfSQVAETrainer(TrainerBase):
def __init__(self, cfgs, flgs, train_loader, val_loader, test_loader):
super(VmfSQVAETrainer, self).__init__(cfgs, flgs, train_loader, val_loader, test_loader)
self.metric_semseg = SegmentationMetric(cfgs.network.num_class)
self.plots = {'loss_train': [], ... |
def fgsd(graph):
model = FGSD()
model._set_seed()
model._check_graphs([graph])
embedding = model._calculate_fgsd(graph)
return embedding |
def parse_search_arg(search):
groups = search.split()
entries = {k: vs for (k, vs) in (g.split('=') for g in groups)}
entry_names = list(entries.keys())
sets = [list((f'--{k} {v}' for v in vs.split(':'))) for (k, vs) in entries.items()]
matrix = [list(x) for x in itertools.product(*sets)]
return... |
def test_mortality(tmp_path: pathlib.Path):
ontology = DummyMortalityOntology()
labeler = InpatientMortalityLabeler(ontology)
for outcome_code in ['SNOMED/', 'DEATH_CHILD']:
events_with_labels: EventsWithLabels = [(event((2000, 1, 1), 'Visit/IP', end=datetime.datetime(2000, 1, 1), omop_table='visit_... |
def test_totalvi_auto_transfer_mudata():
adata = synthetic_iid()
protein_adata = synthetic_iid(n_genes=50)
mdata = MuData({'rna': adata, 'protein': protein_adata})
TOTALVI.setup_mudata(mdata, batch_key='batch', modalities={'rna_layer': 'rna', 'batch_key': 'rna', 'protein_layer': 'protein'})
model = ... |
def transcribe(model, device, wav):
inputs = model['tokenizer'](wav, sampling_rate=16000, return_tensors='pt', padding='longest')
input_values = inputs.input_values.to(device)
attention_mask = inputs.attention_mask.to(device)
logits = model['model'](input_values, attention_mask=attention_mask).logits
... |
_config
def task_finetune_nlvr2_randaug():
exp_name = 'finetune_nlvr2_randaug'
datasets = ['nlvr2']
train_transform_keys = ['pixelbert_randaug']
loss_names = _loss_names({'nlvr2': 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
draw_false_image = 0
learnin... |
def show_seg_result_meshlab(data, result, out_dir, palette, show=False, snapshot=False):
points = data['points'][0][0].cpu().numpy()
pts_filename = data['img_metas'][0][0]['pts_filename']
file_name = osp.split(pts_filename)[(- 1)].split('.')[0]
pred_seg = result[0]['semantic_mask'].numpy()
if (palet... |
def fallback_cmd_s3_sync(src_path, dest_path):
return f'aws s3 sync --no-follow-symlinks {src_path} {dest_path}' |
class CosWarmupAdamW(torch.optim.AdamW):
def __init__(self, params, lr, weight_decay, betas, warmup_iter=None, max_iter=None, warmup_ratio=None, power=None, **kwargs):
super().__init__(params, lr=lr, betas=betas, weight_decay=weight_decay, eps=1e-08)
self.global_step = 0
self.warmup_iter = n... |
class BufferDict(dict):
def capacity(self) -> int:
capacities = []
for (_, _, v) in iterate_recursively(self):
capacities.append(v.shape[0])
return max(capacities)
def index(self, indices):
return self.index_func(self, indices)
def index_func(self, x, indices):
... |
def test_packages(packages, only_failures=False):
rows = [['Status', 'Package', 'GAP Output']]
for pkgdir in packages:
pkg = pkgdir.split('-')[0]
orig_warning_level = libgap.InfoLevel(libgap.InfoWarning)
libgap.SetInfoLevel(libgap.InfoWarning, 0)
try:
output = libgap.... |
def _output_csv(file, results):
file.write('benchmark,device,num_threads,numel,shape,contiguous,dim,mean (us),median (us),iqr (us)\n')
for measurement in results:
metadata = measurement.metadata
(device, dim, shape, name, numel, contiguous) = (metadata['device'], metadata['dim'], metadata['shape... |
def _ffc(content, equality=False):
e = list(content)
a = ([(len(e) - 1)] * sum(e))
r = ([0] * sum(e))
a[0] = 0
e[0] -= 1
k = len(e)
rng_k = list(range(k))
rng_k.reverse()
dll = DoublyLinkedList(rng_k)
if (not e[0]):
dll.hide(0)
(yield from _fast_fixed_content(a, e, 2,... |
def encode_sequence(sequence, rnns, embedder, dropout_amount=0.0):
batch_size = 1
layer_states = []
for rnn in rnns:
hidden_size = rnn.weight_hh.size()[1]
if rnn.weight_hh.is_cuda:
h_0 = torch.cuda.FloatTensor(batch_size, hidden_size).fill_(0)
c_0 = torch.cuda.FloatTe... |
def get_data_parallel_group():
global _USE_MEGATRON
if _USE_MEGATRON:
from fairseq.model_parallel.megatron import mpu
return mpu.get_data_parallel_group()
else:
return get_global_group() |
class PadCollate():
def __init__(self, dim=0):
self.dim = dim
def pad_collate(self, batch):
study_lens = list(map((lambda x: x[0].shape[self.dim]), batch))
max_len = max(study_lens)
num_components = max((len(x) for x in batch))
batch = [((pad_tensor(x[0], pad=max_len, dim... |
class Node(object):
def __init__(self):
self.root = False
self.children = []
self.label = None
self.parent = None
self.phrase = ''
self.terminal = False
self.start_idx = 0
self.end_idx = 0
self.parent_idx = None |
def register_Ns3MinMaxAvgTotalCalculator__Unsigned_int_methods(root_module, cls):
cls.add_constructor([param('ns3::MinMaxAvgTotalCalculator< unsigned int > const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Output', 'void', [param('... |
_properties
class BufferTiling(transformation.SingleStateTransformation):
map1_exit = transformation.PatternNode(nodes.MapExit)
array = transformation.PatternNode(nodes.AccessNode)
map2_entry = transformation.PatternNode(nodes.MapEntry)
tile_sizes = ShapeProperty(dtype=tuple, default=(128, 128, 128), de... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.