code stringlengths 101 5.91M |
|---|
class AutoModelForSequenceClassification(object):
def __init__(self):
raise EnvironmentError('AutoModelWithLMHead is designed to be instantiated using the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` method.')
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, *... |
def execute_shifts(v):
shift = change = 0
for w in v.children[::(- 1)]:
w.x += shift
w.mod += shift
change += w.change
shift += (w.shift + change) |
class hyperparams(object):
def __init__(self):
self.train_epoch = 300
self.test_freq = 1
self.exp_name = 'Correct_Roll2MidiNet'
self.channels = 1
self.h = 51
self.w = 100
self.iter_train_g_loss = []
self.iter_train_d_loss = []
self.iter_test_g_... |
def GetTriadParticip(tspec, *args):
if (type(tspec) == PUNGraph):
return GetTriadParticip_PUNGraph(tspec, *args)
if (type(tspec) == PUndirNet):
return GetTriadParticip_PUndirNet(tspec, *args)
if (type(tspec) == PDirNet):
return GetTriadParticip_PDirNet(tspec, *args)
if (type(tspe... |
def memory_usage_hooks() -> HookedMemoryUsage:
usage = HookedMemoryUsage()
def pack(ten: T.Tensor) -> Any:
acc = (usage.forward if usage.forward else 0)
usage.forward = (acc + (ten.numel() * ten.element_size()))
return ten
def unpack(ten: T.Tensor) -> T.Tensor:
acc = (usage.b... |
def test_na_writable_attributes_deletion():
a = np.NA(2)
attr = ['payload', 'dtype']
for s in attr:
assert_raises(AttributeError, delattr, a, s) |
def env_loader(env_name: str, dataset_dir: str, data_percentage: int=100, batch_size: int=8, trajectory_length: int=1, **_: Any) -> Tuple[(dm_env.Environment, tf.data.Dataset)]:
data_name = env_name
if (env_name not in _ENV_FACTORY):
_env_setting = env_name.split('_')
if (len(_env_setting) > 1):... |
class OzaBaggingClassifier(BaseSKMObject, ClassifierMixin, MetaEstimatorMixin):
def __init__(self, base_estimator=KNNADWINClassifier(), n_estimators=10, random_state=None):
super().__init__()
self.ensemble = None
self.actual_n_estimators = None
self.classes = None
self._rando... |
def register_Ns3MmWaveNetDevice_methods(root_module, cls):
cls.add_constructor([param('ns3::MmWaveNetDevice const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::em... |
class SawyerShelfPlaceEnvV2(SawyerXYZEnv):
def __init__(self):
liftThresh = 0.04
goal_low = ((- 0.1), 0.8, 0.299)
goal_high = (0.1, 0.9, 0.301)
hand_low = ((- 0.5), 0.4, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.1), 0.5, 0.019)
obj_high = (0.1, 0.6, 0.02... |
def norm(input, p='fro', dim=None, keepdim=False, out=None, dtype=None):
if (not torch.jit.is_scripting()):
if ((type(input) is not Tensor) and has_torch_function((input,))):
return handle_torch_function(norm, (input,), input, p=p, dim=dim, keepdim=keepdim, out=out, dtype=dtype)
ndim = input... |
def partition_list(vertices, workers):
batch_size = (((len(vertices) - 1) // workers) + 1)
part_list = []
part = []
count = 0
for (v1, nbs) in enumerate(vertices):
part.append((v1, nbs))
count += 1
if ((count % batch_size) == 0):
part_list.append(part)
... |
def validate_control_flow_region(sdfg: 'dace.sdfg.SDFG', region: 'dace.sdfg.state.ControlFlowRegion', initialized_transients: Set[str], symbols: dict, references: Set[int]=None, **context: bool):
from dace.sdfg import SDFGState
from dace.sdfg.scope import is_in_scope
if ((len(region.source_nodes()) > 1) and... |
class BertForMultipleChoice():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def main():
args = parser.parse_args()
if (args.device is None):
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
else:
device = torch.device(args.device)
fid_value = calculate_fid_given_paths(args.path, args.batch_size, device, args.dims)
print('FID: ', fid_va... |
def test_get_reuse_parameters(default_test_case):
float0 = stmt.FloatPrimitiveStatement(default_test_case, 5.0)
float1 = stmt.FloatPrimitiveStatement(default_test_case, 5.0)
default_test_case.add_statement(float0)
default_test_case.add_statement(float1)
sign_mock = MagicMock(inspect.Signature)
p... |
class BatchLogs():
def __init__(self):
self.metric_dict = {}
def append(self, metrics, data):
if (not isinstance(metrics, list)):
sys.exit('Please specify a list of metrics to log')
for (i, metric) in enumerate(metrics):
data[i] = np.array(data[i])
if ... |
def set_log_level(verbose, is_estimator):
assert (0 <= verbose <= 3)
if (((not is_estimator) and (verbose == 1)) or tf_is_version2()):
tf.get_logger().setLevel(((4 - verbose) * 10))
elif (verbose >= 2):
tf.logging.set_verbosity(tf.logging.INFO) |
def log_current_datetime():
current_datetime = datetime.datetime.now()
LOGGER.debug(SEP_STR)
LOGGER.debug(f'Time of execution: {current_datetime}') |
_without_pywt
def test_calibrate_denoiser_extra_output():
parameter_ranges = {'sigma': (np.linspace(0.1, 1, 5) / 2)}
(_, (parameters_tested, losses)) = calibrate_denoiser(noisy_img, _denoise_wavelet, denoise_parameters=parameter_ranges, extra_output=True)
all_denoised = [denoise_invariant(noisy_img, _denois... |
class TextBiLSTM(nn.Module):
def __init__(self, config):
super(TextBiLSTM, self).__init__()
self.num_classes = config['num_classes']
self.learning_rate = config['learning_rate']
self.dropout = config['dropout']
self.hidden_dims = config['hidden_dims']
self.rnn_layers ... |
def parse(exit_code, log, output):
(findings, infos) = ([], set())
(errors, fails) = sb.parse_utils.errors_fails(exit_code, log)
errors.discard('EXIT_CODE_1')
analysis_complete = set()
for line in log:
if (DEPRECATED in line):
infos.add(DEPRECATED)
continue
if... |
def parse_line_ecir(line, query, user):
line = line.strip().split()
if (len(line) == 5):
sub = line[2]
rel = line[3]
obj = line[4]
val = [1]
rank = int(line[1].split('-')[1])
return (sub, rel, obj, val, rank, 1)
elif (len(line) == 3):
rank = int(line[1... |
def test_linfit():
x = N.array([(- 1.7237128), 1.8712276, (- 0.), (- 0.), 1.3416969, 1.3757038, (- 1.3703436), 0., (- 0.), 0.])
y = N.array([0., 6.5807428, 1.4582725, 2.7270851, 5.5969253, 5.624928, 0.787615, 3.2599759, 2.9771762, 4.5936475])
ey = (0.07 * N.ones(y.shape, dtype='float64'))
p0 = N.array([... |
def WebDataset(urls, shardshuffle=True, cache_dir=default_cache_dir, cache_size=default_cache_size, cache_name=default_cache_name, cache_verbose=default_cache_verbose, splitter=split_by_worker, nodesplitter=True, handler=reraise_exception, length=None):
result = ShardList(urls, shuffle=shardshuffle, splitter=splitt... |
def ensure_2d_arguments(f, squeeze_ret=True):
(f)
def wrapped(*args, **kwargs):
new_args = []
for arg in args:
if isinstance(arg, T.TensorVariable):
if (arg.ndim == 1):
arg = arg.dimshuffle('x', 0)
elif (arg.ndim > 2):
... |
def main():
trajs = DataLoader.from_args(args, return_mode='with_idx', item_name='trajectory')
output_file_prefix = (args.output_file_prefix or trajs.base_path)
output_path = f'{output_file_prefix}_eval{args.eval_results_out_suffix}_{args.eval_type}.jsonl'
if (args.critique_rounds > 0):
raise Va... |
class ConvertLmConfig():
checkpoint_path: str
output_dir: str
upload_to_hf: Optional[RepoRef] = None
model: LmConfig = Gpt2Config()
save_tokenizer: bool = True
tokenizer: str = 'gpt2'
override_vocab_size: Optional[int] = None
config_overrides: Optional[dict] = None
_property
def ... |
def getTrainMetricPerEpoch(train_metric, updates_per_epoch):
train_metric_per_epoch = []
temp_sum = 0.0
for i in range(len(train_metric)):
temp_sum += train_metric[i]
if ((i % updates_per_epoch) == (updates_per_epoch - 1)):
train_metric_per_epoch.append((temp_sum / updates_per_ep... |
def test_ufunc_add_outer_simple():
A = np.random.randint(1, 10, size=(3,), dtype=np.int32)
B = np.random.randint(1, 10, size=(3,), dtype=np.int32)
s = ufunc_add_outer_simple(A, B)
assert np.array_equal(np.add.outer(A, B), s) |
def normal_quantile(p, mean=0, std=1):
try:
return (mean + ((std * math.sqrt(2)) * inv_erf(((2 * p) - 1))))
except Exception:
return 'None' |
class ErrorRateStats(MetricStats):
def __init__(self, merge_tokens=False, split_tokens=False, space_token='_', keep_values=True, extract_concepts_values=False, tag_in='', tag_out=''):
self.clear()
self.merge_tokens = merge_tokens
self.split_tokens = split_tokens
self.space_token = sp... |
def mock_library_log_means_and_vars(mock_contrastive_adata_manager, mock_n_batch):
return _init_library_size(mock_contrastive_adata_manager, n_batch=mock_n_batch) |
def register_Ns3AttributeConstructionList_methods(root_module, cls):
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Pt... |
class DepthConv(nn.Module):
def __init__(self, fmiddle, opt, kw=3, padding=1, stride=1):
super().__init__()
self.kw = kw
self.stride = stride
self.unfold = nn.Unfold(kernel_size=(self.kw, self.kw), dilation=1, padding=1, stride=stride)
if opt.mpdist:
BNFunc = nn.S... |
def compute_aspect_term(model, input, label, tokenizer, args):
break_tokens = tokenizer.encode(tokenizer._eos_token.content)
MAX_LEN = args.block_size
batch_pred = []
batch_ground = []
for (inp, ground) in zip(input, label):
inp_text = tokenizer.decode(inp).split('<|term|>')[0].strip()
... |
def test_forbid_value_and_auth():
filter_set = filters.FilterSet()
with pytest.raises(UsageError, match=filters.ERROR_EXPECTED_AND_REGEX):
filter_set.include(method='POST', method_regex='GET') |
class BaseDataLoader():
def __init__(self):
pass
def initialize(self, opt):
self.opt = opt
pass
def load_data(self):
return None |
def test_list_numpy_1():
text = 'var * float64'
parsedtype = deduce_type(text)
assert isinstance(parsedtype, ak.types.ListType)
assert (str(parsedtype) == text) |
def can_change_cost_type(args):
return any(((('S_COST_TYPE' in part) or ('H_COST_TRANSFORM' in part)) for part in args)) |
class Problem3D(Problem):
def __init__(self, cfg: Config):
super().__init__(cfg)
(self._height, self._width, self._length) = cfg.task.map_shape |
def load_candidate(path_to_candidate):
with open(path_to_candidate, 'r') as f:
qid_to_ranked_candidate_documents = load_candidate_from_stream(f)
return qid_to_ranked_candidate_documents |
def RunInitNet(model):
for init_net in model._data_parallel_model_init_nets:
workspace.RunNetOnce(init_net)
CreateNet(model) |
class BaseDataFrameField(BaseAnnDataField):
def __init__(self, registry_key: str, attr_key: Optional[str], field_type: Literal[('obs', 'var')]=None, required: bool=True) -> None:
super().__init__()
if (required and (attr_key is None)):
raise ValueError('`attr_key` cannot be `None` if `re... |
class DDPGradientStatsHook():
def __init__(self, ddp_module):
try:
ddp_module.register_comm_hook(self, self._hook_fn)
except AttributeError:
raise ValueError('DDPGradientStatsHook does not support non-DDP wrapped modules')
self._clear_state()
def _clear_state(self... |
class ProductionCollecotr(Visitor_Recursive):
_type_spec: TypeSpec
_prod_spec: ProductionSpec
def __init__(self, type_spec):
self._type_spec = type_spec
self._prod_spec = ProductionSpec()
def _process_opt_arg(opt_arg):
return str(opt_arg.children[0])
def _create_index_map(opt... |
class ListCommand(Command):
name = 'list'
usage = '\n %prog [options]'
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option('-o', '--outdated', action='store_true', d... |
def get_logger(model_dir, filename='train.log'):
global logger
logger = logging.getLogger(os.path.basename(model_dir))
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s')
if (not os.path.exists(model_dir)):
os.makedirs(model_dir)
... |
def get_evaluation(name):
mod = __import__('evaluations.{}'.format(name), fromlist=[''])
return getattr(mod, _module_to_class(name)) |
def all_reduce(inputs, outputs=None, op=SUM, streams=None, comms=None):
_check_sequence_type(inputs)
if (outputs is None):
outputs = inputs
_check_sequence_type(outputs)
torch._C._nccl_all_reduce(inputs, outputs, op, streams, comms) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output-dir', required=True)
parser.add_argument('--scaling-value', type=int, help='maximum value for scaling in FEXIPRO')
parser.add_argument('--sigma', type=float, help='percentage of SIGMA for SVD incremental prune')
parser.add_... |
_grad()
def calculate_metrics(nets, args, step, mode):
print('Calculating evaluation metrics...')
assert (mode in ['latent', 'reference'])
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
domains = os.listdir(args.val_img_dir)
domains.sort()
num_domains = len(domains)
... |
def filter_type_14_4_22(moves, rival_move):
rival = collections.Counter(rival_move)
rival_rank = my_rank = 0
for (k, v) in rival.items():
if (v == 4):
rival_rank = k
new_moves = list()
for move in moves:
mymove = collections.Counter(move)
for (k, v) in mymove.item... |
def asd(result, reference, voxelspacing=None, connectivity=1):
sds = __surface_distances(result, reference, voxelspacing, connectivity)
asd = sds.mean()
return asd |
def create_kb(path):
print('Loading from items_wikidata_n.json')
entity_items = json.load(open(os.path.join(path, 'items_wikidata_n.json'), 'r'))
max_id = 0
for idx in entity_items:
max_id = max(max_id, get_id(idx))
graph = [{} for i in range((max_id + 1))]
cont = 0
for idx in entity... |
class HitBallWithQueue(Task):
def init_task(self) -> None:
queue = Shape('queue')
success_sensor = ProximitySensor('success')
ball = Shape('ball')
self.register_graspable_objects([queue])
cond_set = ConditionSet([GraspedCondition(self.robot.gripper, queue), DetectedCondition(... |
.experimental
.parametrize('pad_columns', ['user_id'])
.usefixtures('dataframe_pandas')
def test_invalid_column_dtype_pandas(pad_columns, dataframe_pandas):
with pytest.raises(ValueError):
Padder(pad_columns=pad_columns).transform(dataframe_pandas) |
def _sympysage_real_interval(self):
from sage.rings.real_mpfi import RealIntervalField
RIF = RealIntervalField(1024)
domain = self.dom._sage_().fraction_field()
return RIF(domain(self.a)).union(RIF(domain(self.b))) |
def register_Ns3ParfWifiManager_methods(root_module, cls):
cls.add_constructor([param('ns3::ParfWifiManager const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('SetHeSupported', 'void', [param('bool', 'enable')], is_virtual=True)
... |
.expansion
class ExpandBatchedMatMulCuBLAS(ExpandTransformation):
environments = [environments.cublas.cuBLAS]
def expansion(node, state, sdfg):
node.validate(sdfg, state)
(adesc, bdesc, cdesc) = (None, None, None)
for e in state.in_edges(node):
if (e.dst_conn == '_a'):
... |
def histogram(data, axis=0, r=None):
if (not isinstance(data, DataArray)):
data = DataArray(data, axis=axis)
if (r is not None):
return (histogram(d) for d in combinations(data, r=r))
(_, counts) = numpy.unique(data, return_counts=True, axis=1)
return counts |
def load_state(model_dir, model, optimizer=None):
if (not os.path.exists((model_dir + '/checkpoint'))):
print("=> no checkpoint found at '{}', train from scratch".format(model_dir))
return (0, 0)
else:
ckpt = open((model_dir + '/checkpoint'))
model_path = ckpt.readlines()[0].spli... |
(start=cython.Py_ssize_t, end=cython.Py_ssize_t)
def line_iter(source):
if isinstance(source, basestring):
start = 0
while True:
end = source.find('\n', start)
if (end == (- 1)):
(yield source[start:])
return
(yield source[start:end... |
('/get_signers/<lastN>', methods=('GET',))
def get_signers(lastN):
web3 = connect_to_geth(app.web3_url, app.consensus)
latest = web3.eth.getBlock('latest').number
start = ((latest - int(lastN)) + 1)
if (start <= 0):
start = 1
signers = {}
for bk in range(start, (latest + 1)):
bkh... |
def knn_score(train_set, test_set, n_neighbours=2):
index = faiss.IndexFlatL2(train_set.shape[1])
index.add(train_set)
(D, _) = index.search(test_set, n_neighbours)
return np.sum(D, axis=1) |
def get_losses():
try:
return tf.compat.v1.losses
except AttributeError:
return tf.losses |
class GenSampledIndividuals(GenIndividuals):
def __next__(self):
return SampledIndividual() |
def RunEpoch(args, epoch, train_model, test_model, total_batch_size, num_shards, expname, explog):
log.info('Starting epoch {}/{}'.format(epoch, args.num_epochs))
epoch_iters = int(((args.epoch_size / total_batch_size) / num_shards))
test_epoch_iters = int(((args.test_epoch_size / total_batch_size) / num_sh... |
def calc_boomerang_tip(location, orientation):
r_vectors = bm.get_boomerang_r_vectors_15(location, orientation)
tip = r_vectors[0]
return tip |
def get_data(data_subdir):
data_dir = os.path.join('..', 'data', data_subdir)
pro_dir = os.path.join(data_dir, 'pro_sg')
n_items = get_num_items(pro_dir)
train_data = load_train_data(os.path.join(pro_dir, 'train.csv'), n_items)
(vad_data_tr, vad_data_te) = load_tr_te_data(os.path.join(pro_dir, 'vali... |
def test_generalized_iterators():
assert (list(m.IntPairs([(1, 2), (3, 4), (0, 5)]).nonzero()) == [(1, 2), (3, 4)])
assert (list(m.IntPairs([(1, 2), (2, 0), (0, 3), (4, 5)]).nonzero()) == [(1, 2)])
assert (list(m.IntPairs([(0, 3), (1, 2), (3, 4)]).nonzero()) == [])
assert (list(m.IntPairs([(1, 2), (3, 4... |
class ScriptFile(object):
def __init__(self, file):
self._file = file
self.src_record_path = self._file.src_record_path
self.dest_path = self._file.dest_path
self.changed = False
def save(self):
self._file.save()
self.changed = fix_script(self.dest_path) |
class Mixed_4b(nn.Module):
def __init__(self):
super(Mixed_4b, self).__init__()
self.branch0 = nn.Sequential(BasicConv3d(480, 192, kernel_size=1, stride=1))
self.branch1 = nn.Sequential(BasicConv3d(480, 96, kernel_size=1, stride=1), SepConv3d(96, 208, kernel_size=3, stride=1, padding=1))
... |
def silent_net():
n = caffe.NetSpec()
(n.data, n.data2) = L.DummyData(shape=[dict(dim=[3]), dict(dim=[4, 2])], ntop=2)
n.silence_data = L.Silence(n.data, ntop=0)
n.silence_data2 = L.Silence(n.data2, ntop=0)
return n.to_proto() |
class MemoryChunkPythonArguments(MemoryChunk):
def declare_class_members(self):
return (' cdef int _n_%s\n' % self.name)
def init_class_members(self):
return je(ri(8, "\n count = args['{{ myself.name }}']\n self._n_args = count\n "), myself=self)
def setup... |
def setup(opt):
if (opt.caption_model == 'show_tell'):
model = ShowTellModel(opt)
elif (opt.caption_model == 'show_attend_tell'):
model = ShowAttendTellModel(opt)
elif (opt.caption_model == 'all_img'):
model = AllImgModel(opt)
elif (opt.caption_model == 'fc'):
model = FCM... |
class Dropout3d(_DropoutNd):
def forward(self, input: Tensor) -> Tensor:
return F.dropout3d(input, self.p, self.training, self.inplace) |
def quit_with_gc(func_or_gen):
generation = 2
def _quit_with_gc(f):
def decorated(*args, **kw):
import gc
ret = f(*args, **kw)
gc.collect(generation)
return ret
return decorated
if isinstance(func_or_gen, int):
generation = func_or_gen
... |
def make_registry(cls: Type):
def _register(cls: Type, subclass: Type, kwargs: Dict):
cls._registry_[subclass] = kwargs
def _unregister(cls: Type, subclass: Type):
del cls._registry_[subclass]
cls._registry_ = {}
cls.register = (lambda subclass, **kwargs: _register(cls, subclass, kwargs)... |
def build_backbone(args):
position_embedding = build_position_embedding(args)
train_backbone = (args.lr_backbone_ratio > 0)
if ('resnet' in args.backbone):
backbone = ResNet(name=args.backbone, train_backbone=train_backbone, return_interm_layers=False, dilation=False, freeze_bn=args.freeze_bn)
e... |
_properties
class GPUGridStridedTiling(transformation.SingleStateTransformation):
outer_map_entry = transformation.PatternNode(nodes.MapEntry)
inner_map_entry = transformation.PatternNode(nodes.MapEntry)
new_dim_prefix = Property(dtype=str, default='tile', desc='Prefix for new dimension name')
max_grid_... |
def test_count_binary_occurrences():
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names_out())
assert_array_equal([[3, 1, 1, 0, 0], [1, 2, 0, 1, 1]], X)
... |
class Test_Metropolis():
def setup_method(self):
self.T = 2.0
self.met = Metropolis(self.T)
self.res_new = OptimizeResult(success=True, fun=0.0)
self.res_old = OptimizeResult(success=True, fun=1.0)
def test_boolean_return(self):
ret = self.met(res_new=self.res_new, res_ol... |
def get_schema(query_column: str='query_id', item_column: str='item_id', timestamp_column: str='timestamp', rating_column: str='rating', has_timestamp: bool=True, has_rating: bool=True):
base = [StructField(query_column, IntegerType()), StructField(item_column, IntegerType())]
if has_timestamp:
base += ... |
def verification_performance(scores_plda):
ids = []
labels = []
positive_scores = []
negative_scores = []
for line in open(veri_file_path):
lab = int(line.split(' ')[0].rstrip().split('.')[0].strip())
enrol_id = line.split(' ')[1].rstrip().split('.')[0].strip()
test_id = line... |
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
prepare_file_system()
model_info = create_model_info(FLAGS.architecture)
if (not model_info):
tf.logging.error('Did not recognize architecture flag')
return (- 1)
maybe_download_and_extract(model_info['data_url'])
(graph, bot... |
class HearScene(Problem, Trainer):
_cfg(workspace=field('???', "\nWill put the following keys into this workspace:\n 'train_dataset', 'train_sampler', 'valid_dataset', 'valid_sampler', and 'task'", 'str or Path or Workspace'), corpus=dict(CLS=field('???', '\nThe corpus class. You can add the **kwargs right below t... |
class GPU():
def __init__(self, ignore_warnings=False):
self._consumption = 0
self._ignore_warnings = ignore_warnings
self.is_gpu_available = is_gpu_available()
if ((not self.is_gpu_available) and (not self._ignore_warnings)):
warnings.warn(message='\n\nThere is no any av... |
def test_ListArray_RecordArray_NumpyArray():
v2a = ak.contents.listarray.ListArray(ak.index.Index(np.array([4, 100, 1], np.int64)), ak.index.Index(np.array([7, 100, 3, 200], np.int64)), ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([6.6, 4.4, 5.5, 7.7, 1.1, 2.2, 3.3, 8.8]))], ['nes... |
def read_scp_info(filename, limit=numpy.inf):
res = []
with open(filename, 'r') as f:
for line in f:
(uttid, pointer) = line.strip().split()
p = pointer.rfind(':')
(arkfile, offset) = (pointer[:p], int(pointer[(p + 1):]))
with open(arkfile, 'rb') as g:
... |
class Compose(transforms.Compose):
def __init__(self, fns, additional_targets=None):
super().__init__(fns)
self.additional_targets = (additional_targets or {})
self.ignore_fns = {'mask': ['Normalize']}
def _call_fn_given_type(self, fn, k, v):
t = self.additional_targets.get(k)
... |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, radix=1, cardinality=1, bottleneck_width=64, avd=False, avd_first=False, dilation=1, is_first=False, rectified_conv=False, rectify_avg=False, norm_layer=None, dropblock_prob=0.0, last_gamma=False):
... |
def r_cond1(t):
cond = t[0]
def fn(k, n):
if (n > MAX_FUNC_CALL):
return (k, n, False, False)
return cond(k, n)
return [('cond', fn)] |
def create_evaluator(model):
model.reset()
evaluator = ForecastEvaluator(model=model, config=ForecastEvaluatorConfig(cadence='1h', horizon='6h', retrain_freq='12h', train_window='14d'))
return evaluator |
def _format(val: Any, output_format: str='standard', errors: str='coarse') -> Any:
val = str(val)
if (val in NULL_VALUES):
return [np.nan]
if (not validate_bg_egn(val)):
if (errors == 'raise'):
raise ValueError(f'Unable to parse value {val}')
error_result = (val if (error... |
def test_pytest_parametrize_fixture(testdir):
testdir.make_test('\nfrom hypothesis import settings, HealthCheck\n\n\ndef pytest_generate_tests(metafunc):\n metafunc.parametrize("inner", ("A", "B"))\n\()\ndef param(inner):\n return inner * 2\n\()\(suppress_health_check=[HealthCheck.function_scoped_fixture])\nd... |
def GetMxDegNId(tspec, *args):
if (type(tspec) == PUNGraph):
return GetMxDegNId_PUNGraph(tspec, *args)
if (type(tspec) == PUndirNet):
return GetMxDegNId_PUndirNet(tspec, *args)
if (type(tspec) == PDirNet):
return GetMxDegNId_PDirNet(tspec, *args)
if (type(tspec) == PNGraph):
... |
def main(args):
print(args)
cudnn.benchmark = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
solver = Solver(args)
solver.evaluate() |
def attention_pytorch(qkv, dropout_p=0.0, causal=True):
(batch_size, seqlen, _, nheads, d) = qkv.shape
(q, k, v) = qkv.unbind(dim=2)
q = rearrange(q, 'b t h d -> (b h) t d')
k = rearrange(k, 'b s h d -> (b h) d s')
softmax_scale = (1.0 / math.sqrt(d))
scores = torch.empty((batch_size * nheads), ... |
class OneHot(object):
def __init__(self, n_classes):
self.n_classes = n_classes
def __call__(self, x):
import theano.tensor.extra_ops as extra_ops
y = extra_ops.to_one_hot(x.flatten(), self.n_classes)
if (x.ndim == 1):
return y
return y.reshape((x.shape[0], x.... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.