code stringlengths 101 5.91M |
|---|
def is_even_matrix(A):
for i in range(A.nrows()):
if (A[(i, i)] % 2):
return (False, i)
return (True, (- 1)) |
def showOrigDec(orig, dec, num=10):
import matplotlib.pyplot as plt
n = num
plt.figure(figsize=(20, 4))
for i in range(n):
ax = plt.subplot(2, n, (i + 1))
plt.imshow(orig[i].reshape(32, 32, 3))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax... |
def list_of_subfunctions(root, only_local_functions=True):
if inspect.ismodule(root):
ismodule = True
elif inspect.isclass(root):
ismodule = False
superclasses = inspect.getmro(root)[1:]
else:
raise ValueError("'root' must be a module or a class.")
def local_filter(f, nam... |
def register_Ns3MmWaveMacCschedSapUserCschedLcConfigCnfParameters_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::MmWaveMacCschedSapUser::CschedLcConfigCnfParameters const &', 'arg0')])
cls.add_instance_attribute('m_logicalChannelIdentity', 'std::vector< unsigned char >',... |
class Encoder(abc.ABC):
def spec(self):
def input_dim(self):
def output_dim(self):
def reset(self, do_resets=None): |
def getDistanceByHaversine(loc1, loc2):
(lat1, lon1) = loc1
(lat2, lon2) = loc2
lon1 = ((lon1 * pi) / 180.0)
lon2 = ((lon2 * pi) / 180.0)
lat1 = ((lat1 * pi) / 180.0)
lat2 = ((lat2 * pi) / 180.0)
dlon = (lon2 - lon1)
dlat = (lat2 - lat1)
a = ((sin((dlat / 2)) ** 2) + ((cos(lat1) * co... |
class Exif(MutableMapping):
endian = '<'
def __init__(self):
self._data = {}
self._ifds = {}
self._info = None
self._loaded_exif = None
def _fixup(self, value):
try:
if ((len(value) == 1) and (not isinstance(value, dict))):
return value[0]
... |
def test_interval_raises():
with pytest.raises(ValueError, match='One must have low <= high; got low=1, high=0.'):
Interval(1, 0, False, False) |
def test_mrmr_classif_without_scores():
selected_features = mrmr.polars.mrmr_classif(df=df_polars, K=4, target_column=target_column_classif, features=features, denominator='mean', only_same_domain=False, return_scores=False, show_progress=True)
assert (set(selected_features) == set(['some_null', 'feature_a', 'f... |
def getenv(name, default):
try:
return os.environ[name].strip(' "\'')
except:
return default |
def get_observed_stats_from_network_attr(edgelist_filename, param_func_list, labels, outcome_bin_filename, binattr_filename=None, contattr_filename=None, catattr_filename=None, directed=False, bipartite=False):
assert (len(param_func_list) == len(labels))
if directed:
if bipartite:
raise Exc... |
class BayesLSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers=1, bias=True, batch_first=False, dropout=0.0, bidirectional=False, prior=None, mu_lower=(- 0.05), mu_upper=0.05, rho_lower=math.log((math.exp((1.0 / 4.0)) - 1.0)), rho_upper=math.log((math.exp((1.0 / 2.0)) - 1.0))):
super()._... |
class SparseTransformerSentenceEncoderLayer(TransformerSentenceEncoderLayer):
def __init__(self, embedding_dim: int=768, ffn_embedding_dim: int=3072, num_attention_heads: int=8, dropout: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, activation_fn: str='relu', add_bias_kv: bool=False, add_z... |
class SimpleTokenizer(object):
def __init__(self, bpe_path: str=default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode('utf-8').split('\n')
merges = merges[1:(((49152 - 256) ... |
def conv2d2(inputs, num_outputs, kernel_size, sn, stride=1, rate=1, data_format='NCHW', activation_fn=tf.nn.relu, normalizer_fn=None, normalizer_params=None, weights_regularizer=None, weights_initializer=ly.xavier_initializer(), biases_initializer=init_ops.zeros_initializer(), biases_regularizer=None, reuse=None, scope... |
def get_preprocessing(name, is_training=False):
preprocessing_fn_map = {'cifarnet': cifarnet_preprocessing, 'inception': inception_preprocessing, 'inception_v1': inception_preprocessing, 'inception_v2': inception_preprocessing, 'inception_v3': inception_preprocessing, 'inception_v3_bap': inception_preprocessing, 'i... |
class CLI(LightningCLI):
def __init__(self, model_class, run=True, **kwargs):
trainer_defaults = {'default_config_files': [os.path.join('perceiver', 'trainer.yaml')]}
super().__init__(model_class, run=run, save_config_overwrite=True, parser_kwargs={'fit': trainer_defaults, 'test': trainer_defaults, ... |
def get_all_E_gt_func(Js, Trange):
E_gt = [E_gt_func(j, Js, Trange) for j in range(len(Js))]
return E_gt |
class exponweib_gen(rv_continuous):
def _shape_info(self):
ia = _ShapeInfo('a', False, (0, np.inf), (False, False))
ic = _ShapeInfo('c', False, (0, np.inf), (False, False))
return [ia, ic]
def _pdf(self, x, a, c):
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c... |
class AdjustedRandScore(EfficientMI):
def _calc_score(self, *args, **kwargs):
return self.calc_ARand(*args, **kwargs)
def calc_ARand(self, last):
N = last['N']
a = last['a']
b = last['b']
n = last['n']
Nc = tensor_calc_combination(N, 2).sum(dim=[(- 1), (- 2)])
... |
class miniImageNetGenerator(object):
def __init__(self, data_file, nb_classes=5, nb_samples_per_class=15, max_iter=None, xp=np):
super(miniImageNetGenerator, self).__init__()
self.data_file = data_file
self.nb_classes = nb_classes
self.nb_samples_per_class = nb_samples_per_class
... |
class TestMultipleFields(object):
def setup(self):
self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8')
def _bad_call(self):
return self.ary[('f0', 'f1')]
def test_no_tuple(self):
assert_raises(IndexError, self._bad_call)
def test_return(self):
res = sel... |
class DataTrainingArguments():
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}... |
class Protocol(object):
_SERIALIZER = ':'
def __init__(self, protocol):
self._name = protocol['name']
self._mode = protocol['mode']
try:
from minicps import __file__
index = __file__.rfind('minicps')
self._minicps_path = (__file__[:(index + 7)] + '/')
... |
class DialogTracker():
def __init__(self, bot_url):
self._bot = convai_api.ConvApiBot(bot_url)
self._bot_url = bot_url
self._chat_fsm = {}
self._users = {}
self._text = 'God'
self._factoid_qas = []
def start(self):
while True:
try:
... |
class Convolution2d(Sequential):
def __init__(self, sub_layer, filter_size=(1, 1), stride=(1, 1), *, input_shape=None, padding='valid', border_mode='reflect_101', border_value=0.0, name=None, fw_dtype=bb.DType.FP32, bw_dtype=bb.DType.FP32):
self.fw_dtype = fw_dtype
self.bw_dtype = bw_dtype
s... |
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, drop=0.0, attn_drop=0.0, drop_path=0.0, act_args={'act': 'gelu'}, norm_args={'norm': 'ln'}):
super().__init__()
self.norm1 = create_norm(norm_args, dim)
self.attn = Attention(dim, num_heads=num_heads, q... |
def register_Ns3EpcS11SapMmeModifyBearerResponseMessage_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::EpcS11SapMme::ModifyBearerResponseMessage const &', 'arg0')])
cls.add_instance_attribute('cause', 'ns3::EpcS11SapMme::ModifyBearerResponseMessage::Cause', is_const=Fals... |
def conv_init(conv):
if (conv.weight is not None):
nn.init.kaiming_normal_(conv.weight, mode='fan_out')
if (conv.bias is not None):
nn.init.constant_(conv.bias, 0) |
def test_RecordArray_NumpyArray():
v2a = ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([0, 1, 2, 3, 4], np.int64)), ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5]))], ['x', 'y'])
roundtrip(v2a)
array = ak.highlevel.Array(v2a)
memoryleak(array,... |
def load_labelmap(path):
with tf.gfile.GFile(path, 'r') as fid:
label_map_string = fid.read()
label_map = string_int_label_map_pb2.StringIntLabelMap()
try:
text_format.Merge(label_map_string, label_map)
except text_format.ParseError:
label_map.ParseFromString(... |
def from_dc_to_ip_survey(dc_survey, dim='2.5D'):
source_list = dc_survey.source_list
ip_survey = Survey(source_list)
return ip_survey |
def eval_distinct_detail(hyps_resp):
if (len(hyps_resp) == 0):
print('ERROR, eval_distinct get empty input')
return
if (type(hyps_resp[0]) != list):
print("ERROR, eval_distinct takes in a list of <class 'list'>, get a list of {} instead".format(type(hyps_resp[0])))
return
hyp... |
def run(args, kwargs):
args.model_signature = str(datetime.datetime.now())[0:19].replace(' ', '_')
args.model_signature = args.model_signature.replace(':', '_')
snapshots_path = os.path.join(args.out_dir, (('vae_' + args.dataset) + '_'))
snap_dir = (snapshots_path + args.flow)
if (args.flow != 'no_f... |
def apply_template_plan(prefix, template):
from openfl.federated.plan import Plan
from openfl.interface.cli_helper import WORKSPACE
template_plan = Plan.parse((((WORKSPACE / template) / 'plan') / 'plan.yaml'))
Plan.dump(((prefix / 'plan') / 'plan.yaml'), template_plan.config) |
def get_cmd_reg(wb, names, cmd_reg):
for name in names:
sheet = wb[name]
name = name.replace(' ', '')
name = name.split('(')[0].split('(')[0]
cmd_reg[name] = read_sheet(sheet) |
class MultiWozDB(object):
domains = ['restaurant', 'hotel', 'attraction', 'train', 'taxi', 'hospital']
dbs = {}
CUR_DIR = os.path.dirname(__file__)
for domain in domains:
db = os.path.join('utils/multiwoz/db/{}-dbase.db'.format(domain))
conn = sqlite3.connect(db)
c = conn.cursor(... |
class Kernelf(Component):
def __init__(self, ls, context={}):
super().__init__(context=context)
self.ls = ls
def __call__(self, x, z=None, diagonal=False, distance=False):
qmmlpack = import_qmmlpack('use cmlkit.regression.qmml')
kernelf = getattr(qmmlpack, kernelfs[self.kind])
... |
def main():
args = get_arg()
random.seed(RAND_SEED)
np.random.seed(RAND_SEED)
torch.manual_seed(RAND_SEED)
data = load_stage2_data(datatrack=args.datatrack, feat_type=args.feat_type, i_cv=args.i_cv)
method = args.method
if (method == 'autogp'):
if (args.datatrack == 'phase1-main'):
... |
def quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
message = ''
allowed = True
candidate_set = set(qids_to_ranked_candidate_passages.keys())
ref_set = set(qids_to_relevant_passageids.keys())
for qid in qids_to_ranked_candidate_passages:
duplicate_pids = ... |
.parametrize('n_neighbors, expected_risk', [(1, 0.25), (2, (5 / 6)), (3, 1), (4, 1)])
def test_baseline(n_neighbors, expected_risk):
ori = pd.DataFrame(rng.choice(['a', 'b'], size=(400, 2)), columns=['c0', 'c1'])
syn = pd.DataFrame([['a', 'a'], ['b', 'b'], ['a', 'a'], ['a', 'a']], columns=['c0', 'c1'])
eval... |
class DataTrainingArguments():
max_len: Optional[int] = field(default=128, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite t... |
.parametrize('a_shape, b_shape', [([2, 4], [4, 3]), pytest.param([3, 5], [5], marks=pytest.mark.skip('issues in dace')), pytest.param([5], [5, 6], marks=pytest.mark.skip('issues in dace'))])
.pure
def test_matmul_expansion(a_shape, b_shape, sdfg_name):
blas.Gemm.default_implementation = 'pure'
sdfg = dace.SDFG(... |
class MinimizeDegree(EdgeSelection):
def __call__(self, graph):
degrees = dict(graph.degree_iterator(labels=True))
edges = graph.edges(labels=True, sort=False)
if edges:
return min(edges, key=(lambda x: (degrees[x[0]] + degrees[x[1]])))
raise RuntimeError('no edges left t... |
class JavascriptProcessor():
def create_dead_for_loop(cls, body):
control_variable = ('_i_' + str(np.random.choice(list(range(10)))))
p = np.random.uniform(0, 1)
if (p < 0.5):
prefix = (((((('for ( let ' + control_variable) + ' = 0 ; ') + control_variable) + ' > 0 ; ') + control_... |
def spacy_nlp():
if (getattr(spacy_nlp, '_nlp', None) is None):
try:
from spacy.lang.en import English
spacy_nlp._nlp = English()
except ImportError:
raise ImportError('Please install spacy with: pip install spacy')
return spacy_nlp._nlp |
class Partition6(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/T5Block[18]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[19]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[20]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:6'):
super().__i... |
class f_model(nn.Module):
def __init__(self, freeze_param=False, inter_dim=INTER_DIM, num_classes=CATEGORIES, model_path=None):
super(f_model, self).__init__()
self.backbone = torchvision.models.resnet50(pretrained=True)
state_dict = self.backbone.state_dict()
num_features = self.bac... |
def test_epoch_eval_hook():
with pytest.raises(TypeError):
test_dataset = ExampleModel()
data_loader = [DataLoader(test_dataset, batch_size=1, sampler=None, num_worker=0, shuffle=False)]
EvalHook(data_loader, by_epoch=True)
test_dataset = ExampleDataset()
test_dataset.pre_eval = Magi... |
def get_atoms(molecule):
logger.debug('Entering get_atoms()')
conformer = molecule.GetConformer()
num_atoms = conformer.GetNumAtoms()
list_heavyatoms = []
list_heavyatomnames = []
atoms = np.arange(num_atoms)
for i in np.nditer(atoms):
atom_name = molecule.GetAtomWithIdx(int(atoms[i]... |
class GBasicBlockSig(nn.Module):
def __init__(self, in_channels, out_channels, ksize=3, stride=1, pad=1):
super(GBasicBlockSig, self).__init__()
self.body = nn.Sequential(nn.Conv2d(in_channels, out_channels, ksize, stride, pad, groups=4), nn.Sigmoid())
init_weights(self.modules)
def forw... |
def get_base_config():
return tp.OpQuantizationConfig(activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, activation_n_bits=8, weights_n_bits=8, weights_per_channel_threshold=True, enable_weights_quantization=True, enable_activation_quan... |
def _setup_logging(verbosity: int, no_rich: bool) -> (Console | None):
level = logging.WARNING
if (verbosity == 1):
level = logging.INFO
if (verbosity >= 2):
level = logging.DEBUG
console = None
if no_rich:
handler: logging.Handler = logging.StreamHandler()
else:
... |
class IdiomPreproc(abstract_preproc.AbstractPreproc):
def __init__(self, grammar, save_path, censor_pointers):
self.save_path = save_path
self.censor_pointers = censor_pointers
self.grammar = registry.construct('grammar', grammar)
self.ast_wrapper = self.grammar.ast_wrapper
s... |
class SimpleMLPRegressor(Regressor):
def __init__(self, input_shape, output_dim, name, *args, **kwargs):
super().__init__(input_shape, output_dim, name)
del args, kwargs
self.model = SimpleMLPModel(output_dim=self._output_dim, name='SimpleMLPModel')
self._ys = None
self._netw... |
class Vidit(BaseDataset):
def __init__(self, config, device):
super().__init__(config, device)
self._root_dir = Path(os.path.expanduser(config['data_path']))
self._paths = {}
np.random.seed(config['seed'])
files = [str(path) for path in self._root_dir.iterdir()]
files... |
def print_vocabulary(mylist_freq, filename):
print('Printing vocabulary information to file', filename)
with open((filename + '_freq.txt'), 'w') as f:
f.write('{:>6} {}\n'.format('# occ', 'statement (in alphabetical order)'))
for (key, value) in sorted(mylist_freq.items()):
f.write... |
def test_volume(problem):
from sfepy.discrete import FieldVariable
ok = True
field_map = {'u': 'vector', 'p': 'scalar'}
volumes = {}
avg = 0.0
for (key, term) in expressions.items():
var_name = key[(- 1)]
field = problem.fields[field_map[var_name]]
var = FieldVariable(var... |
def gen_vocab(corpus, unk_threshold):
vocab = collections.defaultdict((lambda : len(vocab)))
freqs = collections.defaultdict((lambda : 0))
vocab[PAD]
vocab[GO]
vocab[EOS]
vocab[UNK]
with open(corpus) as f:
for sentence in f:
tokens = sentence.strip().split()
f... |
class Custom(BaseVRMWaveform):
def __init__(self, waveform_function):
self.waveform_function = waveform_function
def waveform_function(self):
return self._waveform_function
_function.setter
def waveform_function(self, value):
self._waveform_function = validate_callable('waveform_... |
def get_dcmdjpeg_exe():
fname = ('dcmdjpeg' + ('.exe' * sys.platform.startswith('win')))
for dir in ('c:\\dcmtk', 'c:\\Program Files', 'c:\\Program Files\\dcmtk', 'c:\\Program Files (x86)\\dcmtk'):
filename = os.path.join(dir, fname)
if os.path.isfile(filename):
return filename
t... |
class HeadNet():
def __init__(self, config, num_outputs, name):
self.num_levels = config.num_levels
self.bn_level_first = getattr(config, 'head_bn_level_first', False)
norm_layer = (config.norm_layer or tf.keras.layers.BatchNormalization)
if config.norm_kwargs:
norm_kwarg... |
def register_Ns3Ipv6Route_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_constructor([param('ns3::Ipv6Route const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetDestination', 'ns3::Ipv6Address', [], is_const=True)
cls.add_method('GetGateway', 'ns3::Ipv6Address', [], is... |
class NonMaximumSuppressionTest(tf.test.TestCase):
def setUp(self):
self._boxes = np.array([[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, (- 0.1), 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]], dtype=float)
self._boxlist = np_box_list.BoxList(self._boxes)
def test_with_no_scores_field(sel... |
class ContrastCLIPBottleneckBase(AbstractCLIPBottleneck):
def __init__(self, feature_dim, num_classes, num_domains, hparams, pretrained, idx2class):
super(ContrastCLIPBottleneckBase, self).__init__(feature_dim, num_classes, num_domains, hparams, pretrained, idx2class, DummyBottleneck, use_clip_contrast=True... |
def add_cam_tracking_constraint(camera, lookat):
cam_constraint = camera.constraints.new(type='TRACK_TO')
cam_constraint.track_axis = 'TRACK_NEGATIVE_Z'
cam_constraint.up_axis = 'UP_Y'
track_to = bpy.data.objects.new('Empty', None)
track_to.location = lookat
camera.parent = track_to
bpy.cont... |
class SecMin(Function):
def forward(ctx, inp, offsets):
nProposal = (offsets.size(0) - 1)
C = inp.size(1)
assert inp.is_contiguous()
assert offsets.is_contiguous()
out = torch.cuda.FloatTensor(nProposal, C).zero_()
pointgroup_ops_ext.sec_min(inp, offsets, out, nPropos... |
def _seg_62():
return [(120220, 'M', u'w'), (120221, 'M', u'x'), (120222, 'M', u'y'), (120223, 'M', u'z'), (120224, 'M', u'a'), (120225, 'M', u'b'), (120226, 'M', u'c'), (120227, 'M', u'd'), (120228, 'M', u'e'), (120229, 'M', u'f'), (120230, 'M', u'g'), (120231, 'M', u'h'), (120232, 'M', u'i'), (120233, 'M', u'j'),... |
class FuncParamType(ParamType):
def __init__(self, func):
self.name = func.__name__
self.func = func
def convert(self, value, param, ctx):
try:
return self.func(value)
except ValueError:
try:
value = text_type(value)
except Unic... |
class TransfoXLTokenizationTest(CommonTestCases.CommonTokenizerTester):
tokenizer_class = TransfoXLTokenizer
def setUp(self):
super(TransfoXLTokenizationTest, self).setUp()
vocab_tokens = ['<unk>', '[CLS]', '[SEP]', 'want', 'unwanted', 'wa', 'un', 'running', ',', 'low', 'l']
self.vocab_f... |
def _with_metaclass(cls):
if DebugFlags.debug_trace_code_generation:
return add_metaclass(VerboseCodeWriter)(cls)
return cls |
def discriminator_fill_statedict(statedict, vars, size):
log_size = int(math.log(size, 2))
update(statedict, convert_conv(vars, f'{size}x{size}/FromRGB', 'convs.0'))
conv_i = 1
for i in range((log_size - 2), 0, (- 1)):
reso = (4 * (2 ** i))
update(statedict, convert_conv(vars, f'{reso}x{... |
def initialize_compiler_options(cmd):
cmd.fcompiler = None
cmd.f2py = None
cmd.compiler = None
cmd.f77exec = None
cmd.f90exec = None |
def test_dedupe_parameters():
parameters = [{'name': 'SigXsecOverSM', 'bounds': [[0.0, 10.0]]}, {'name': 'SigXsecOverSM', 'bounds': [[0.0, 10.0]]}]
assert (len(pyhf.readxml.dedupe_parameters(parameters)) == 1)
parameters[1]['bounds'] = [[0.0, 2.0]]
with pytest.raises(RuntimeError, match='SigXsecOverSM')... |
class XLMRobertaForQuestionAnswering():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def _get_google_drive_file_id(url: str) -> Optional[str]:
parts = urlparse(url)
if (re.match('(drive|docs)[.]google[.]com', parts.netloc) is None):
return None
match = re.match('/file/d/(?P<id>[^/]*)', parts.path)
if (match is None):
return None
return match.group('id') |
def get_end_date(start_date: datetime) -> datetime:
if (start_date.month == 12):
end_date = start_date.replace(year=(start_date.year + 1), month=1)
else:
end_date = start_date.replace(month=(start_date.month + 1))
return end_date |
def compile_model(model, learning_rate=0.005):
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
loss = tf.keras.losses.MeanSquaredError()
metrics = [tf.keras.metrics.MeanSquaredError()]
model.compile(optimizer=optimizer, loss=loss, metrics=metrics) |
def test_hessian_vector_product():
a = torch.tensor([5.0])
x = torch.tensor([10.0], requires_grad=True)
def f():
return (a * (x ** 2))
expected_hessian = (2 * a)
vector = torch.tensor([10.0])
expected_hvp = (expected_hessian * vector).detach()
f_Ax = _build_hessian_vector_product(f, ... |
def _serialize_json_and_commit(path, obj):
with fsspec.open(f'{path}.tmp', 'w') as file:
file.write(obj.to_json())
fs: AbstractFileSystem = fsspec.core.url_to_fs(path)[0]
fs.mkdirs(os.path.dirname(path), exist_ok=True)
if fs.exists(path):
fs.copy(path, f'{path}.bak')
fs.rename(f'{pat... |
class LocationTimeAttack(Attack):
def __init__(self, knowledge_length, time_precision='Hour'):
self.time_precision = time_precision
super(LocationTimeAttack, self).__init__(knowledge_length)
def time_precision(self):
return self._time_precision
_precision.setter
def time_precisio... |
def convert_tokens_to_ids(vocab, tokens):
ids = []
for token in tokens:
ids.append(vocab[token])
return ids |
class RNNField(Dense):
def __init__(self, units=1, name=None, rnn_type='SimpleRNN', activation=linear, kernel_initializer=default_kernel_initializer(), recurrent_initializer=default_kernel_initializer(), bias_initializer=default_bias_initializer(), kernel_regularizer=None, recurrent_regularizer=None, bias_regulariz... |
def create_syncube(modelname, voxelpos):
print('Creating simulated cube data ...')
(xxx, yyy, zzz) = voxelpos
x3 = xxx.reshape(yNcube, xNcube, zNcube)
y3 = yyy.reshape(yNcube, xNcube, zNcube)
z3 = zzz.reshape(yNcube, xNcube, zNcube)
if (modelname == 'layers_2'):
zshift = (((zLcube / 8.0)... |
def _resolve_random_state(random_state: Union[(int, np.random.RandomState)]) -> np.random.RandomState:
if isinstance(random_state, int):
return np.random.RandomState(random_state)
elif isinstance(random_state, np.random.RandomState):
return random_state
else:
raise NotImplementedErro... |
class AveragePooling2D(_Pooling2D):
_pooling2d_support
def __init__(self, pool_size=(2, 2), strides=None, padding='valid', data_format=None, **kwargs):
super(AveragePooling2D, self).__init__(pool_size, strides, padding, data_format, **kwargs)
def _pooling_function(self, inputs, pool_size, strides, p... |
class SimpleQueue(multiprocessing.queues.SimpleQueue):
def _make_methods(self):
if (not isinstance(self._reader, ConnectionWrapper)):
self._reader = ConnectionWrapper(self._reader)
self._writer = ConnectionWrapper(self._writer)
super(SimpleQueue, self)._make_methods() |
class MultiWozDB(object):
def __init__(self, db_paths):
self.dbs = {}
self.sql_dbs = {}
for domain in all_domains:
with open(db_paths[domain], 'r') as f:
self.dbs[domain] = json.loads(f.read().lower())
def oneHotVector(self, domain, num):
vector = [0, ... |
class Sine(SignalGenerator):
def __init__(self, freq, **kwargs):
super(Sine, self).__init__(**kwargs)
self.freq = freq
def generate(self):
sine_of = (((self.freq * 2) * math.pi) / self.sample_rate)
sample_n = 0
while True:
(yield math.sin((sine_of * sample_n))... |
def unzip(zip_path: str, dest_dir: str) -> None:
with ZipFile(zip_path, 'r') as zipObj:
zipObj.extractall(dest_dir) |
def modifies_known_mutable(obj, attr):
for (typespec, unsafe) in _mutable_spec:
if isinstance(obj, typespec):
return (attr in unsafe)
return False |
def filter_roberta_detectors(_, pretrained_name: str):
return ('detector' not in pretrained_name) |
_REGISTRY.register()
class DIVO(ImageDataset):
_junk_pids = [0, (- 1)]
dataset_dir = ''
dataset_url = '
dataset_name = 'market1501'
def __init__(self, root='datasets', divo=False, **kwargs):
self.root = root
self.dataset_dir = osp.join(self.root, self.dataset_dir)
self.data_d... |
def get_source_index(scale, dst_index, half_pixel):
return (np.maximum(0, ((scale * (dst_index + 0.5)) - 0.5)) if half_pixel else (scale * dst_index)) |
def convert_example_to_features(example, tokenizer, max_seq_length):
tokens = example['tokens']
segment_ids = example['segment_ids']
is_random_next = example['is_random_next']
masked_lm_positions = example['masked_lm_positions']
masked_lm_labels = example['masked_lm_labels']
assert (len(tokens) ... |
def view(g, self, size):
if _is_value(size):
shape = size
else:
if self.isTensor():
self_sizes = self.type().sizes()
if (self_sizes and (len(size) == 2) and (self_sizes[0] == size[0])):
return g.op('Flatten', self, axis_i=1)
shape = g.op('Constant'... |
def test_logging(capsys, tmp_path):
config_filename = get_pkg_data_filename('data/test_config.yml')
output_filename = str((tmp_path / 'logging.fits'))
skypy.main([config_filename, output_filename])
(out, err) = capsys.readouterr()
assert (not err)
with pytest.raises(SystemExit):
skypy.ma... |
class SAGE(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, num_layers, dropout):
super(SAGE, self).__init__()
self.convs = torch.nn.ModuleList()
self.convs.append(SAGEConv(in_channels, hidden_channels))
for _ in range((num_layers - 2)):
se... |
def make_destination_dataset(ws, schema, name=None):
name = (name or 'dst')
dst_init = core.Net('{}_init'.format(name))
with core.NameScope(name):
dst_ds = Dataset(schema, name=name)
dst_ds.init_empty(dst_init)
ws.run(dst_init)
return dst_ds |
class SG2260Context(BModelContext):
device = Target.SG2260
memmap = memmap
dma_sys = dma_sys
tiu_sys = tiu_sys
local_layout_to_stride = local_layout_to_stride
valid_tag = {1: 0, 2: 1}
base_addr = [0, , GET_LMEM_START_ADDR]
def __init__(self) -> None:
super().__init__()
se... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.