code stringlengths 101 5.91M |
|---|
class OnnxOpOptionalAttrGetter(object):
def __init__(self):
self._optional_attrs = {'ArgMax': {'axis': 0, 'keepdims': 1, 'select_last_index': 0}, 'ArgMin': {'axis': 0, 'keepdims': 1, 'select_last_index': 0}, 'AveragePool': {'auto_pad': 'NOTSET', 'ceil_mode': 0, 'count_include_pad': 0}, 'BatchNormalization':... |
def build_generic_retinanet_model(model, add_conv_body_func, freeze_conv_body=False):
def _single_gpu_build_func(model):
(blobs, dim, spatial_scales) = add_conv_body_func(model)
if (not model.train):
model.conv_body_net = model.net.Clone('conv_body_net')
retinanet_heads.add_fpn_r... |
def _conv2d(channel, kernel, padding, stride, num_sync_bn_devices=(- 1)):
cell = nn.HybridSequential(prefix='')
cell.add(nn.Conv2D(channel, kernel_size=kernel, strides=stride, padding=padding, use_bias=False))
if (num_sync_bn_devices < 1):
cell.add(nn.BatchNorm(epsilon=1e-05, momentum=0.9))
else... |
def pr_curve_raw(tag, tp, fp, tn, fn, precision, recall, num_thresholds=127, weights=None):
if (num_thresholds > 127):
num_thresholds = 127
data = np.stack((tp, fp, tn, fn, precision, recall))
pr_curve_plugin_data = PrCurvePluginData(version=0, num_thresholds=num_thresholds).SerializeToString()
... |
def get_model_list_for_rtl(net):
if (type(net) is not list):
net = [net]
def flatten_list(in_list, out_list):
for model in in_list:
if issubclass(type(model), Switcher):
model = model.get_current_model()
if ((type(model) != Convolution2d) and hasattr(model... |
def evaluation(test_file='../saved/CAR+P_Normal_predict_exa_4_1.txt', maxid=0, display=1):
file = open(test_file, 'r')
batch_in = []
batch_out = []
batch_pred = []
cntline = 0
for line in file.readlines():
cntline += 1
if (cntline <= display):
print(line)
in_ ... |
def submit_alisa_train(datasource, estimator_string, select, validation_select, model_params, model_name, pre_trained_model, **train_params):
params = dict(locals())
del params['train_params']
params.update(train_params)
if estimator_string.lower().startswith('xgboost'):
params['entry_type'] = '... |
def print_assert_equal(test_string, actual, desired):
__tracebackhide__ = True
import pprint
if (not (actual == desired)):
msg = StringIO()
msg.write(test_string)
msg.write(' failed\nACTUAL: \n')
pprint.pprint(actual, msg)
msg.write('DESIRED: \n')
pprint.pprin... |
def get_updated_ranges(ranges, max_live=None):
def _get_max_live(ranges):
max_live = (max((x[1].used for x in ranges if x[1].used)) + 1)
return max_live
def _update_range(x, max_live, size):
cx = x
if (x[1].defined is None):
cx = (cx[0], cx[1]._replace(defined=(- 1)))... |
class SeparationDataset(Dataset):
def __init__(self, data_dir, rate=16000, src=['mix_clean'], tgt=['s1', 's2'], n_fft=512, hop_length=320, win_length=512, window='hann', center=True):
super(SeparationDataset, self).__init__()
self.data_dir = data_dir
self.rate = rate
self.src = src
... |
class DictMetadata(object):
def __init__(self, metadata):
self._metadata = metadata
def has_metadata(self, name):
return (name in self._metadata)
def get_metadata(self, name):
try:
return ensure_str(self._metadata[name])
except UnicodeDecodeError as e:
... |
_torch
class DecisionTransformerModelIntegrationTest(unittest.TestCase):
def test_autoregressive_prediction(self):
NUM_STEPS = 2
TARGET_RETURN = 10
model = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert')
model = model.to(torch_device)
... |
class FP16Compressor(Compressor):
def compress(tensor):
tensor_compressed = tensor
if tensor.dtype.is_floating_point:
tensor_compressed = tensor.type(torch.float16)
return (tensor_compressed, tensor.dtype)
def decompress(tensor, ctx):
tensor_decompressed = tensor
... |
class SpatialGradientFeatures(nn.Module):
def __init__(self, C_inout, with_gradient_rotations=True):
super(SpatialGradientFeatures, self).__init__()
self.C_inout = C_inout
self.with_gradient_rotations = with_gradient_rotations
if self.with_gradient_rotations:
self.A_re = ... |
def _data_dimensions(features: _Features) -> Tuple[(int, int)]:
for inp in features.inputs:
if (inp.location in [_Location.NODE, _Location.EDGE]):
return inp.data.shape[:2]
assert False |
def boost_get_includes(self, *k, **kw):
includes = ((k and k[0]) or kw.get('includes', None))
if (includes and self.__boost_get_version_file(includes)):
return includes
for dir in BOOST_INCLUDES:
if self.__boost_get_version_file(dir):
return dir
if includes:
self.fata... |
def test_stl_ownership():
cstats = ConstructorStats.get(m.Placeholder)
assert (cstats.alive() == 0)
r = m.test_stl_ownership()
assert (len(r) == 1)
del r
assert (cstats.alive() == 0) |
def test_ids2var():
snt_ids = [1, 1, 1, 1, 1]
snt_ids_var = ids2var(snt_ids, 1, 2, 3, addEOS=True)
snt_ids_var_shape = snt_ids_var.data.size()
assert (snt_ids_var_shape == torch.Size([1, 2, 3]))
print('Test (ids2var): passed') |
def keep_file(x):
h = sha256str(x)
new_hash = (not (h in hashes))
if new_hash:
hashes.add(h)
return new_hash |
class DistributedDataParallel(Module):
def __init__(self, module):
super(DistributedDataParallel, self).__init__()
if (not hasattr(dist, '_backend')):
self.warn_on_half = True
else:
self.warn_on_half = (True if (dist._backend == dist.dist_backend.GLOO) else False)
... |
class GenericVisitor(ABC):
def __init__(self):
pass
def visit(self, node):
method_name = self._visit_method_name(node)
visitor = getattr(self, method_name, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
raise Exception('{}: No {} method'.forma... |
def _check_directly_compile_overloaded(obj):
qual_name = _qualified_name(obj)
if (_jit_internal._get_fn_overloads(qual_name) or _try_get_jit_cached_overloads(obj)):
raise RuntimeError('Function {} cannot be directly compiled because it is overloaded. It must be used in a context of a function where its ... |
def TestGE_KL():
def test_gekl_init(self):
pass
def test_gekl_step(self):
pass |
def elsa_architecture(nb_classes, nb_tokens, maxlen, feature_output=False, embed_dropout_rate=0, final_dropout_rate=0, embed_dim=300, embed_l2=1e-06, return_attention=False, load_embedding=False, pre_embedding=None, high=False, LSTM_hidden=512, LSTM_drop=0.5):
class NonMasking(Layer):
def __init__(self, **k... |
class TensorFieldFreeModule(TensorFreeModule):
Element = TensorFieldParal
def __init__(self, vector_field_module, tensor_type):
domain = vector_field_module._domain
dest_map = vector_field_module._dest_map
kcon = tensor_type[0]
lcov = tensor_type[1]
name = 'T^({},{})({}'.... |
class SageTimeitResult():
def __init__(self, stats, series=None):
self.stats = stats
self.series = (series if (not None) else [])
def __repr__(self):
if (self.stats[0] > 1):
s = ('%d loops, best of %d: %.*g %s per loop' % self.stats)
else:
s = ('%d loop, b... |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, atrous=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride, atrous)
self.bn1 = SynchronizedBatchNorm2d(planes)
self.relu = nn.ReLU(inplace=T... |
def test_nrand(m=10000, u=3, v=4):
y = torch.var_mean(diffsptk.nrand(m, mean=u, var=v), unbiased=False)
y_ = U.call(f'nrand -m {m} -u {u} -v {v} | vstat')
assert U.allclose(y[1], y_[0], rtol=0.1)
assert U.allclose(y[0], y_[1], rtol=0.1) |
class TestRmsProp(OptimizerTestBase, LRModificationTestBase, TestCase):
def build_optimizer(self, model, **kwargs):
self._skip_gpu = False
return build_rms_prop(model, base_learning_rate=0.1, epsilon=0.1, **kwargs)
def check_optimizer(self, optimizer):
self.assertFalse(optimizer.get_auxi... |
def shd(true_matrix: pd.DataFrame, estimated_matrix: pd.DataFrame):
true_graph = adjmatrix_to_graph(true_matrix)
estimated_graph = adjmatrix_to_graph(estimated_matrix)
shd = SHD(true_graph, estimated_graph).get_shd()
return shd |
def cal_pxl_roc(gt_mask, scores):
(fpr, tpr, _) = roc_curve(gt_mask.flatten(), scores.flatten())
per_pixel_rocauc = roc_auc_pxl(gt_mask.flatten(), scores.flatten())
return (fpr, tpr, per_pixel_rocauc) |
def remove_malformed_prompt_logs(is_malformed_function):
new_prompt_logs = []
for item in global_variables.prompt_logs:
if (not is_malformed_function(item)):
new_prompt_logs.append(item)
global_variables.prompt_logs = new_prompt_logs |
class OuterNode(object):
def __init__(self, is_tensor=False, tensor_value=None, attr_name=None):
self.output = []
self.is_tensor = is_tensor
self.tensor_value = tensor_value
self.attr_name = attr_name
self.attr_value = None
if (is_tensor == False):
if (ten... |
def run_train_test_cycle(X, Y, L, LS, S, P, model_class, output_root_dir, data_name, target_name, training_programme=None, do_this_if_model_exists='skip', save_data_in_output_dir=True, force_device_for_training=None, force_device_for_evaluation=None):
assert (Y.shape[0] == X.shape[0] == LS.shape[0]), 'Number of sam... |
def test_avgpool1d_stride1_padding_same():
time_dim = Dim(10, name='time')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: rf.Tensor, *, in_spatial_dim: Dim) -> Tuple[(Tensor, Dim)]:
return rf.pool1d(... |
class BertAdam(Optimizer):
def __init__(self, params=None, lr='required', warmup=(- 1), t_total=(- 1), schedule='warmup_linear', betas=(0.9, 0.999), e=1e-06, weight_decay=0.01, max_grad_norm=1.0, **kwargs):
if ((lr == 'required') or (lr < 0.0)):
raise ValueError('Invalid learning rate: {} - shou... |
class HyperInfomax(nn.Module):
def __init__(self, args, n_features, device, writer):
super(HyperInfomax, self).__init__()
self.feature_emb = nn.Embedding(n_features, args.dim)
self.feature_emb_edge = nn.Embedding(n_features, args.dim)
self.hgnn = HGNN(args.dim, args.hid_units, device... |
class Composed(Representation):
kind = 'composed'
def __init__(self, *reps, context={}):
super().__init__(context=context)
self.reps = [from_config(rep, context=self.context) for rep in reps]
def _from_config(cls, config, context={}):
return cls(*config['reps'], context=context)
... |
def test():
aa = ak.contents.NumpyArray(np.frombuffer(b'hellothere', 'u1'))
b = aa.to_backend_array(allow_missing=False)
assert (b.tolist() == [104, 101, 108, 108, 111, 116, 104, 101, 114, 101])
assert (b.dtype == np.dtype(np.uint8))
c = ak.contents.NumpyArray(np.array([0, ], dtype='datetime64[s]'))... |
class IndexedRowTableLinearize(TableLinearize):
def process_table(self, table_content: Dict):
assert (('header' in table_content) and ('rows' in table_content)), self.PROMPT_MESSAGE
_table_str = (self.process_header(table_content['header']) + ' ')
for (i, row_example) in enumerate(table_cont... |
def make_embeddings(opt, word_dict, for_encoder=True, embed_type=None):
embedding_dim = (opt.kb_embed_size if (embed_type == 'kb') else opt.word_vec_size)
word_padding_idx = word_dict.to_ind(markers.PAD)
num_word_embeddings = len(word_dict)
return Embeddings(word_vec_size=embedding_dim, position_encodin... |
.parametrize('batch_size', [1])
def test_examples_cpp_mnist_runtime(tmpdir, nnabla_examples_root, batch_size):
pytest.skip('Temporarily skip due to mnist training data server trouble.')
nn.clear_parameters()
if (not nnabla_examples_root.available):
pytest.skip('`nnabla-examples` can not be found.')
... |
_start_docstrings(ENCODER_DECODER_START_DOCSTRING)
class EncoderDecoderModel(PreTrainedModel):
config_class = EncoderDecoderConfig
base_model_prefix = 'encoder_decoder'
def __init__(self, config: Optional[PretrainedConfig]=None, encoder: Optional[PreTrainedModel]=None, decoder: Optional[PreTrainedModel]=Non... |
class BM25L(BM25):
def __init__(self, corpus, tokenizer=None, k1=1.5, b=0.75, delta=0.5):
self.k1 = k1
self.b = b
self.delta = delta
super().__init__(corpus, tokenizer)
def _calc_idf(self, nd):
for (word, freq) in nd.items():
idf = (math.log((self.corpus_size ... |
class GatewayWriteObjectStore(GatewayOperator):
def __init__(self, bucket_name: str, bucket_region: str, num_connections: int=32, key_prefix: Optional[str]=''):
super().__init__('write_object_store')
self.bucket_name = bucket_name
self.bucket_region = bucket_region
self.num_connectio... |
class ReraiseStatNode(StatNode):
child_attrs = []
is_terminator = True
def analyse_expressions(self, env):
return self
nogil_check = Node.gil_error
gil_message = 'Raising exception'
def generate_execution_code(self, code):
code.mark_pos(self.pos)
vars = code.funcstate.exc... |
def read_config(key=None, default=None):
import json
config_path = (sxs_directory('config') / 'config.json')
if config_path.exists():
config = json.load(config_path.open('r'))
else:
config = {}
if (key is None):
return config
else:
return config.get(key, default) |
def gesummv_distr2(alpha: dc.float64, beta: dc.float64, A: dc.float64[(lM, lN)], B: dc.float64[(lM, lN)], x: dc.float64[lN], y: dc.float64[lMy]):
tmp1 = distr.MatMult(A, x, ((Px * lM), (Py * lN)), c_block_sizes=(lMy, 1))
tmp2 = distr.MatMult(B, x, (M, N), c_block_sizes=(lMy, 1))
y[:] = ((alpha * tmp1) + (be... |
def test_crosstab_basic_3d():
a = 'a'
b = 'b'
x = [0, 0, 9, 9, 0, 0, 9, 9]
y = [a, a, a, a, b, b, b, a]
z = [1, 2, 3, 1, 2, 3, 3, 1]
expected_xvals = [0, 9]
expected_yvals = [a, b]
expected_zvals = [1, 2, 3]
expected_count = np.array([[[1, 1, 0], [0, 1, 1]], [[2, 0, 1], [0, 0, 1]]])
... |
class Examples(SegmentationBase):
def __init__(self, size=256, random_crop=False, interpolation='bicubic'):
super().__init__(data_csv='data/ade20k_examples.txt', data_root='data/ade20k_images', segmentation_root='data/ade20k_segmentations', size=size, random_crop=random_crop, interpolation=interpolation, n_... |
def register_Ns3ComponentCarrierEnb_methods(root_module, cls):
cls.add_constructor([param('ns3::ComponentCarrierEnb const &', 'arg0')])
cls.add_constructor([])
cls.add_method('DoDispose', 'void', [], is_virtual=True)
cls.add_method('GetFfMacScheduler', 'ns3::Ptr< ns3::FfMacScheduler >', [])
cls.add_... |
class TrainArgParser(BaseArgParser):
def __init__(self):
super(TrainArgParser, self).__init__()
self.parser.add_argument('--num_epochs', type=int, default=20, help='Number of epochs to train.')
self.parser.add_argument('--lr', type=float, default=0.001, help='Learning rate.')
self.pa... |
class TestDB(TestCase):
create_statement = 'create table test_db (features text, label int)'
hive_create_statement = 'create table test_db (features string, label int) ROW FORMAT DELIMITED FIELDS TERMINATED BY "\x01"'
select_statement = 'select * from test_db'
drop_statement = 'drop table if exists test... |
def test_to_jams():
default_clipid = 'foa_dev/split1_ir0_ov1_1'
dataset = tau2019sse.Dataset(TEST_DATA_HOME)
clip = dataset.clip(default_clipid)
jam = clip.to_jams()
assert jam.validate() |
def _logit(p):
p = torch.max((torch.ones(1) * 0.1), torch.min((torch.ones(1) * 0.9), p))
return (torch.log((p + 1e-10)) + torch.log(((1 - p) + 1e-10))) |
def process_variant(variant):
if args.debug:
variant['vae_variant']['num_epochs'] = 10
variant['vae_variant']['vis_kwargs']['save_period'] = 2
variant['vae_variant']['vis_kwargs']['num_samples_for_video'] = 2
variant['rl_variant']['vae_wrapped_env_kwargs']['num_samples_for_latent_his... |
class REFERDB(BASE):
def __init__(self, db_config):
super(REFERDB, self).__init__()
self._configs['data_aug'] = True
self._configs['random_flip'] = True
self._configs['random_affine'] = True
self._configs['random_color'] = True
self._configs['random_lighting'] = True
... |
class GlueDataset(Dataset):
args: GlueDataTrainingArguments
output_mode: str
features: List[InputFeatures]
def __init__(self, args: GlueDataTrainingArguments, tokenizer: PreTrainedTokenizerBase, limit_length: Optional[int]=None, mode: Union[(str, Split)]=Split.train, cache_dir: Optional[str]=None):
... |
def run_one_epoch(epoch, loader, model, criterion, optimizer, meters, phase='train', scheduler=None):
t_start = time.time()
assert (phase in ['train', 'val', 'test', 'calib']), 'phase not be in train/val/test/calib.'
train = (phase == 'train')
if train:
model.train()
else:
model.eval... |
def quotient(x, y, *args, **kwds):
try:
return x.quotient(y, *args, **kwds)
except AttributeError:
return (x / y) |
_node_type(optplan.NodeMetaType.TRANSFORMATION)
class ContToDiscThresholding(optplan.TransformationBase):
type = schema_utils.polymorphic_model_type('cont_to_disc_thresholding')
continuous_parametrization = optplan.ReferenceType(optplan.Parametrization)
threshold = types.FloatType() |
class BasicUnit(nn.Module):
def __init__(self, unit, input_dim, increase_rate, droprate):
super(BasicUnit, self).__init__()
rnnunit_map = {'rnn': nn.RNN, 'lstm': nn.LSTM, 'gru': nn.GRU}
self.unit = unit
self.layer = rnnunit_map[unit](input_dim, increase_rate, 1)
if ('lstm' ==... |
class SymmetricIdeal(Ideal_generic):
def __init__(self, ring, gens, coerce=True):
Ideal_generic.__init__(self, ring, gens, coerce=coerce)
def __repr__(self):
return ('Symmetric Ideal %s of %s' % (self._repr_short(), self.ring()))
def _latex_(self):
from sage.misc.latex import latex
... |
def main(args):
scene_manager = SceneManager(args.input_folder)
scene_manager.load()
with open(args.output_file, 'w') as fid:
fid.write('NVM_V3\n \n{:d}\n'.format(len(scene_manager.images)))
image_fmt_str = (' {:.3f} ' + (7 * '{:.7f} '))
for (image_id, image) in scene_manager.images.... |
def CheckForCopyright(filename, lines, error):
for line in xrange(1, min(len(lines), 11)):
if re.search('Copyright', lines[line], re.I):
break
else:
error(filename, 0, 'legal/copyright', 5, 'No copyright message found. You should have a line: "Copyright [year] <Copyright Owner>"') |
def _wandb_log(_dict):
if (wandb.run is not None):
wandb.log(_dict)
else:
log.info(repr(_dict)) |
class BPRMF(RecMixin, BaseRecommenderModel):
_charger
def __init__(self, data, config, params, *args, **kwargs):
self._random = np.random
self._params_list = [('_factors', 'factors', 'f', 10, int, None), ('_learning_rate', 'lr', 'lr', 0.05, None, None), ('_bias_regularization', 'bias_regularizat... |
class TeacherController(object):
def __init__(self, teacher, nb_test_episodes, param_env_bounds, seed=None, teacher_params={}):
self.teacher = teacher
self.nb_test_episodes = nb_test_episodes
self.test_ep_counter = 0
self.eps = 0.001
self.param_env_bounds = copy.deepcopy(para... |
def pre_process_sent(sent, do_filter, lower_case, res_wrds):
if do_filter:
sent = re.sub('-', ' ', sent)
sent = re.sub('', ' ', sent)
if (len(res_wrds) > 0):
wrds = sent.split()
wrds = [((('SPLIT_ME ' + w) + ' SPLIT_ME') if (w in res_wrds) else w) for w in wrds]
sents = [... |
class SparseKernelTests(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.random_state = np.random.RandomState(0)
def test_sample_weights(self):
X = self.random_state.uniform((- 1), 1, size=(4, 5))
X_sparse = self.random_state.uniform... |
def accum_graph_fts(encoders, dp: _DataPoint, graph_fts: _Array) -> _Array:
if ((dp.location == _Location.GRAPH) and (dp.type_ != _Type.POINTER)):
encoding = _encode_inputs(encoders, dp)
graph_fts += encoding
return graph_fts |
class PilBackend(ImageUtilsBackend):
_interpolations_map = {'nearest': Image.Resampling.NEAREST, 'bilinear': Image.Resampling.BILINEAR, 'bicubic': Image.Resampling.BICUBIC}
def __init__(self):
ImageUtilsBackend.__init__(self)
if hasattr(Image.Resampling, 'HAMMING'):
self._interpolati... |
def test_get_subclasses():
class Parent():
pass
class Child(Parent):
pass
class GrandChild(Child):
pass
subclasses = get_subclasses(Parent)
expected_subclasses = {'Child': Child, 'GrandChild': GrandChild}
assert (subclasses == expected_subclasses) |
('analyze_code', 'Analyze Code', '"code": "<full_code_string>"')
def analyze_code(code: str, agent: Agent) -> list[str]:
function_string = 'def analyze_code(code: str) -> list[str]:'
args = [code]
description_string = 'Analyzes the given code and returns a list of suggestions for improvements.'
return c... |
def make_env(args):
if (',' in args.per_dim_threshold):
per_dim_threshold = np.array([float(t) for t in args.per_dim_threshold.split(',')])
else:
per_dim_threshold = float(args.per_dim_threshold)
if (gym.envs.registry.env_specs.get(args.env) is not None):
env_fn = (lambda : gym.make(... |
class RegNetSELayer(nn.Module):
def __init__(self, in_channels: int, reduced_channels: int):
super().__init__()
self.pooler = nn.AdaptiveAvgPool2d((1, 1))
self.attention = nn.Sequential(nn.Conv2d(in_channels, reduced_channels, kernel_size=1), nn.ReLU(), nn.Conv2d(reduced_channels, in_channel... |
def multiple_outputs_activation_model():
inputs = Input(shape=INPUT_SHAPE)
x = Conv2D(2, 3)(inputs)
y = Conv2D(2, 3)(inputs)
x_relu = ReLU()(x)
y_relu = ReLU()(y)
outputs = Add()([x_relu, y_relu])
return keras.Model(inputs=inputs, outputs=outputs) |
def get_Babi_3(args=None):
Babi_3_dataset = Dataset(name='babi_3', path='preprocess/Babi/vec_babi_qa3_three-supporting-facts_.p', args=args)
Babi_3_dataset.vec.word_dim = 50
Babi_3_dataset.bsize = 50
Babi_3_dataset.n_iters = 100
Babi_3_dataset.hidden_size = 32
return Babi_3_dataset |
def measure_net_latency(net, l_type='gpu8', fast=True, input_shape=(3, 224, 224), clean=False):
if isinstance(net, nn.DataParallel):
net = net.module
rm_bn_from_net(net)
if ('gpu' in l_type):
(l_type, batch_size) = (l_type[:3], int(l_type[3:]))
else:
batch_size = 1
data_shape... |
class Stackimg():
def __init__(self, opt):
self.stack_n = opt.stack_n
self.img_size = opt.img_size
self.env = opt.env
self.trans_stack = transforms.Compose([transforms.Resize((self.img_size, self.img_size)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]... |
class AttentionMask(eqx.Module):
is_causal: bool = eqx.static_field()
explicit_mask: Optional[NamedArray] = None
def materialize(self, QPos: Axis, KPos: Axis, q_slice: Optional[haliax.dslice]=None, k_slice: Optional[haliax.dslice]=None) -> Optional[NamedArray]:
if (q_slice is None):
q_sl... |
def util_grid_cost(src: str, dest: str, src_tier: str='PREMIUM', dest_tier: str='PREMIUM'):
with path('skyplane.data', 'throughput.csv') as throughput_grid_path:
solver = ThroughputSolver(throughput_grid_path)
print(solver.get_path_cost(src, dest, src_tier, dest_tier)) |
def get_output_filename(source_filename, cwd, options):
if options.cplus:
c_suffix = '.cpp'
else:
c_suffix = '.c'
suggested_file_name = Utils.replace_suffix(source_filename, c_suffix)
if options.output_file:
out_path = os.path.join(cwd, options.output_file)
if os.path.isd... |
def get_pixel_counts_from_label(src):
label_values = src.read()
print(label_values.shape)
(unique, counts) = np.unique(label_values, return_counts=True)
print(unique, counts)
nochange_pixels_count = counts[0]
change_pixels_count = counts[1]
cloud_pixels_count = 0
if (2 in unique):
... |
def code_to_exprs(code: str, inputs: Set[str], outputs: Set[str]) -> Dict[(str, sp.Expr)]:
inputs = list(inputs)
outputs = list(outputs)
code_fn = '\ndef symbolic_execution({}):\n # define functions from cmath.h\n from sympy import exp, log\n def log2(x):\n return log(x, 2)\n def log10(x)... |
class NumericAttributeBinaryTest(InstanceConditionalTest):
def __init__(self, att_idx, att_value, equal_passes_test):
super().__init__()
self._att_idx = att_idx
self._att_value = att_value
self._equals_passes_test = equal_passes_test
def branch_for_instance(self, X):
if (... |
def test_get_transitive_successors(graph, node, second_node, third_node, fourth_node):
graph.add_node(fourth_node)
graph.add_node(node)
graph.add_node(second_node)
graph.add_node(third_node)
graph.add_edge(node, second_node)
graph.add_edge(second_node, third_node)
graph.add_edge(third_node, ... |
def register_Ns3SsidChecker_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::SsidChecker const &', 'arg0')])
return |
def get_number_of_params_summary(model, name='', print_on=True, include_routers=True):
total_num = get_total_number_of_params(model)
paths_list = model.paths_list
num_list = []
for (nodes, _) in paths_list:
num = get_number_of_params_path(model, nodes, include_routers=include_routers)
nu... |
class CustomConfig(PretrainedConfig):
model_type = 'custom'
def __init__(self, attribute=1, **kwargs):
self.attribute = attribute
super().__init__(**kwargs) |
class BenchmarkThread(Thread):
def __init__(self, par_scheduler, num):
Thread.__init__(self, name=('BenchmarkThread %d' % num))
self._par_scheduler = par_scheduler
self._id = num
self.exception = None
def run(self):
try:
scheduler = self._par_scheduler.get_loc... |
def test_before_search_start(stopping_condition):
stopping_condition.before_statement_execution(None, None, None)
stopping_condition.before_search_start(None)
assert (stopping_condition.current_value() == 0) |
def main(_):
print(FLAGS.lamb_max, FLAGS.delta, FLAGS.epsilon, FLAGS.top_bn, FLAGS.sigma)
numpy.random.seed(seed=FLAGS.seed)
tf.set_random_seed(numpy.random.randint(1234))
with tf.Graph().as_default() as g:
with tf.device('/cpu:0'):
(images_1, images_2, labels) = two_transformed_inpu... |
.parametrize('observation_shape', [(100,), ((100,), (200,))])
.parametrize('action_size', [2])
.parametrize('latent_size', [32])
.parametrize('batch_size', [32])
.parametrize('n', [100])
.parametrize('beta', [0.5])
def test_conditional_vae(observation_shape: Shape, action_size: int, latent_size: int, batch_size: int, n... |
def get_args_and_hdf5_file(cfg):
common_parameters = ['--train:mode', 'world', '--train:samples', '256**3', '--train:batchsize', '64*64*128', '--train:sampler_importance', '0.01', '--val:copy_and_split', '--outputmode', 'density:direct', '--lossmode', 'density', '--activation', BEST_ACTIVATION, '-l1', '1', '--lr_st... |
class EllipticCurve_finite_field(EllipticCurve_field, HyperellipticCurve_finite_field):
_point = ell_point.EllipticCurvePoint_finite_field
def plot(self, *args, **kwds):
R = self.base_ring()
if (not R.is_prime_field()):
raise NotImplementedError
from sage.plot.point import po... |
def write_log_task(filename='Changelog'):
st = subprocess.Popen(['git', 'log', f'{LOG_START}..{LOG_END}'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(output, error) = st.communicate()
if (not (st.returncode == 0)):
raise RuntimeError(('%s failed' % str(error)))
out = st.communicate()[0].de... |
class TicTacToeNNet():
def __init__(self, game, args):
(self.board_z, self.board_x, self.board_y) = game.getBoardSize()
self.action_size = game.getActionSize()
self.args = args
self.input_boards = Input(shape=(self.board_z, self.board_x, self.board_y))
x_image = Reshape((self... |
def build_optimizers(model, cfgs):
optimizers = {}
if hasattr(model, 'module'):
model = model.module
is_dict_of_dict = True
for (key, cfg) in cfgs.items():
if (not isinstance(cfg, dict)):
is_dict_of_dict = False
if is_dict_of_dict:
for (key, cfg) in cfgs.items():
... |
class UnetSplit(nn.Module):
def __init__(self, in_channels=3, depth=5, shared_depth=0, blocks=1, out_channels_image=3, out_channels_mask=1, start_filters=32, residual=True, batch_norm=nn.BatchNorm2d, transpose=True, concat=True, transfer_data=True, long_skip=False):
super(UnetSplit, self).__init__()
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.