code stringlengths 101 5.91M |
|---|
class BaseAction(object):
def __init__(self, gt_graph, env, rewards, strict=True):
self.gt_graph = gt_graph
self.env = env
self.rewards = rewards
self.strict = strict
def get_reward(self, state, prev_state, expert_plan, goal_idx):
(reward, done) = (self.rewards['neutral']... |
class AbstractPartitionDiagrams(Parent, UniqueRepresentation):
Element = AbstractPartitionDiagram
def __init__(self, order, category=None):
if (category is None):
category = FiniteEnumeratedSets()
Parent.__init__(self, category=category)
if (order in ZZ):
self.ord... |
def module_init():
root_module = Module('ns.applications', cpp_namespace='::ns3')
return root_module |
def _lu_impl(A, pivot=True, get_infos=False, out=None):
return torch._lu_with_info(A, pivot=pivot, check_errors=(not get_infos)) |
class SHD():
__SHD = 0
def __init__(self, truth: Graph, est: Graph):
nodes = truth.get_nodes()
nodes_name = [node.get_name() for node in nodes]
self.__SHD: int = 0
for i in list(range(0, len(nodes))):
for j in list(range((i + 1), len(nodes))):
if (trut... |
def rprop(params: List[Tensor], grads: List[Tensor], prevs: List[Tensor], step_sizes: List[Tensor], *, step_size_min: float, step_size_max: float, etaminus: float, etaplus: float):
for (i, param) in enumerate(params):
grad = grads[i]
prev = prevs[i]
step_size = step_sizes[i]
sign = g... |
def point_accuracy(expected, observed, data=None, start=None, end=None):
return _accuracy(expected, observed, data, start, end, cm=point_confusion_matrix) |
def compute_grad2(d_out, x_in):
batch_size = x_in.size(0)
grad_dout = autograd.grad(outputs=d_out.sum(), inputs=x_in, create_graph=True, retain_graph=True, only_inputs=True)[0]
grad_dout2 = grad_dout.pow(2)
assert (grad_dout2.size() == x_in.size())
reg = grad_dout2.view(batch_size, (- 1)).sum(1)
... |
class RegLog(nn.Module):
def __init__(self, num_labels, arch='resnet50', global_avg=False, use_bn=True):
super(RegLog, self).__init__()
self.bn = None
if global_avg:
if (arch == 'resnet18'):
s = 2048
if (arch == 'resnet50'):
s = 2048
... |
def count_node_freq(fname, filter_size=100):
node_dict = {}
with open(fname, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
ctr = 0
for row in csv_reader:
if (ctr == 0):
ctr += 1
continue
else:
to... |
def test_fista_multiclass_classes(mult_dense_train_data):
(X, y) = mult_dense_train_data
clf = FistaClassifier()
clf.fit(X, y)
assert (list(clf.classes_) == [0, 1, 2]) |
def convert_sr(inpath, sr, output_path=None):
if (not output_path):
output_path = generate_tmp_filename('wav')
cmd = f'sox {inpath} -r {sr} {output_path}'
os.system(cmd)
return output_path |
def uce_loss_and_reg(alpha: torch.Tensor, y: torch.Tensor, beta_reg: float, reduction: str='sum') -> torch.Tensor:
uce = uce_loss(alpha, y, reduction='none')
reg = entropy_reg(alpha, beta_reg, reduction='none')
loss = (uce + reg)
return loss_reduce(loss, reduction=reduction) |
def add_extras(cfg, i, batch_norm=False):
layers = []
in_channels = i
flag = False
for (k, v) in enumerate(cfg):
if (in_channels != 'S'):
if (v == 'S'):
layers += [nn.Conv2d(in_channels, cfg[(k + 1)], kernel_size=(1, 3)[flag], stride=2, padding=1)]
else:
... |
class BaseInstrumenter(_BaseInstrumenter):
def register(self):
pass
def unregister(self):
pass
def get_registered(self):
return None
def run(self, cmd, globals=None, locals=None):
pass
def region_begin(self, module_name, function_name, file_name, line_number, code_obj... |
def get_file_path(*paths):
path = '/'.join(paths)
return pkg_resources.resource_filename(_package_name, path) |
class SolarPlant(BaseDataset):
def __init__(self, rootdir=None, num_columns=100):
super().__init__()
if (rootdir is None):
fdir = os.path.dirname(os.path.abspath(__file__))
merlion_root = os.path.abspath(os.path.join(fdir, '..', '..', '..'))
rootdir = os.path.join... |
class VectorFieldFreeModule(FiniteRankFreeModule):
Element = VectorFieldParal
def __init__(self, domain, dest_map=None):
from sage.manifolds.differentiable.scalarfield import DiffScalarField
self._domain = domain
if (dest_map is None):
dest_map = domain.identity_map()
... |
class PetDataset(Dataset):
def __init__(self, data_cfg, dictionary=None, transform=None, target_transform=None, stage='train'):
super(PetDataset, self).__init__()
self.data_cfg = data_cfg
self.dictionary = dictionary
self.transform = transform
self.target_transform = target_t... |
class FreeCommutativeAdditiveSemigroup(UniqueRepresentation, Parent):
def __init__(self, alphabet=('a', 'b', 'c', 'd')):
self.alphabet = alphabet
Parent.__init__(self, category=CommutativeAdditiveSemigroups())
def _repr_(self):
return ('An example of a commutative semigroup: the free com... |
def test_RecordArray_NumpyArray_four():
ak_array_four = ak.contents.recordarray.RecordArray([], None, 10)
data_frame_four = ak.to_rdataframe({'four': ak_array_four})
assert str(data_frame_four.GetColumnType('four')).startswith('awkward::Record_') |
def test_prod_two_funs():
var1 = optplan.Parameter()
var2 = optplan.Parameter()
prod1 = (var1 * var2)
assert isinstance(prod1, optplan.Product)
assert (prod1.functions == [var1, var2]) |
_vision
class ChineseCLIPProcessorTest(unittest.TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '', '', '', '', '15', '', 'alex', '##andra', ',', '', '-', 't', 'shirt']
self.vocab_file = os.path.join(self.tmpdirna... |
def get_nnet(name, **kwargs):
if (name == 'uvit'):
from libs.uvit import UViT
return UViT(**kwargs)
elif (name == 'uvit_t2i'):
from libs.uvit_t2i import UViT
return UViT(**kwargs)
else:
raise NotImplementedError(name) |
def gen_template_struct(struct_name, args, codeBody, speicalized=None, set_default=True, export_args=True):
code_gen = ''
code_gen += gen_template_head(args, set_default)
code = (export_template_args(args) + codeBody)
if (export_args is False):
code = codeBody
code_gen += gen_struct(struct_n... |
def _get_worker_env(worker_id, config, partitions, search):
workers = config.resource_info['worker']
worker_info = workers[worker_id]
num_workers = len(workers)
try:
parallax_log_level = os.environ['PARALLAX_LOG_LEVEL']
except:
parallax_log_level = logging.INFO
env = {'CUDA_VISIB... |
def test_event():
e1 = Event(0, None)
assert ((e1.time == 0) and (e1.priority == math.inf))
e2 = Event(5, None)
assert ((e2.time == 5) and (e2.priority == math.inf))
e3 = Event(5, None, 1)
assert ((e3.time == 5) and (e3.priority == 1))
assert (e1 < e2)
assert (e1 < e3)
assert (e3 < e... |
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
epoch_name = str(epoch)
checkpoint_paths = [(output_dir / ('checkpoint-%s.pth' % epoch_name))]
for checkpoint_path in checkpoint_paths:
to_save = {'model': model_with... |
class AutoTuner():
def __init__(self, sdfg: dace.SDFG) -> None:
self._sdfg = sdfg
def optimize(self, apply: bool=True, measurements: int=30) -> Dict[(Any, Any)]:
return {} |
def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, ignore_index=(- 100), avg_non_ignore=False, **kwargs):
if (pred.size(1) == 1):
assert (label[(label != ignore_index)].max() <= 1), 'For pred with shape [N, 1, H, W], its label must have at most 2 classes... |
def val_test_split(dataset, val_size=5000, batch_size=512, num_workers=5, pin_memory=False):
test_size = (len(dataset) - val_size)
(dataset_val, dataset_test) = data_utils.random_split(dataset, (val_size, test_size), generator=torch.Generator().manual_seed(42))
val_loader = data_utils.DataLoader(dataset_val... |
class ExperimentConfig(BaseConfig):
wandb: Any
steps: Steps
framework: str
loss: LossConfig
network: NetworkConfig
conv: ConvolutionConfig
net_weights: NetWeights
dynamics: DynamicsConfig
learning_rate: LearningRateConfig
annealing_schedule: AnnealingSchedule
gradient_accumul... |
def conv3d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = grad_outpu... |
def log_custom(new_meter_fn: Callable[([], Meter)], key: str, *args, priority: int=50, **kwargs):
for agg in get_active_aggregators():
if (key not in agg):
agg.add_meter(key, new_meter_fn(), priority)
agg[key].update(*args, **kwargs) |
def vset(seq, idfun=None, as_list=True):
def _uniq_normal(seq):
d_ = {}
for s in seq:
if (s not in d_):
d_[s] = None
(yield s)
def _uniq_idfun(seq, idfun):
d_ = {}
for s in seq:
h_ = idfun(s)
if (h_ not in d_):
... |
def person_embed(speaker_ids, person_vec):
speaker_vec = []
for t in speaker_ids:
speaker_vec.append([(person_vec[int(i)].tolist() if (i != (- 1)) else ([0] * 100)) for i in t])
speaker_vec = torch.FloatTensor(speaker_vec)
return speaker_vec |
def best_saving(working_dir, epoch, model, fusion_model, optimizer):
best_name = '{}/model_best.pt'.format(working_dir)
torch.save({'epoch': epoch, 'model_state_dict': model.state_dict(), 'fusion_model_state_dict': fusion_model.state_dict(), 'optimizer_state_dict': optimizer.state_dict()}, best_name) |
def train_epoch(logger, model, optimizer, scheduler, dataset, train=True):
model.train()
time_start = time.time()
for i in range((len(dataset) - cfg.transaction.horizon)):
optimizer.zero_grad()
batch = dataset[i].clone()
pdb.set_trace()
batch.node_degree_new = node_degree(bat... |
def cnnmodel(frame1_xyz, frame1_rgb, frame2_xyz, frame2_rgb):
frame1_rgb = tf.image.resize_images(frame1_rgb, [480, 640])
frame2_rgb = tf.image.resize_images(frame2_rgb, [480, 640])
(frame1_feat_rgb, _) = get_network('resnet50', frame1_rgb, weight_decay=1e-05, is_training=True)
(frame2_feat_rgb, _) = ge... |
def main(unused_argv):
tf.config.experimental.set_visible_devices([], 'GPU')
tf.config.experimental.set_visible_devices([], 'TPU')
config = configs.load_config(save_config=False)
dataset = datasets.load_dataset('test', config.data_dir, config)
(model, init_variables) = models.construct_mipnerf(rando... |
def test_shapefactor(backend):
mc = MockConfig(par_map={'shapefac1': {'paramset': unconstrained(name='shapefac1', is_scalar=False, n_parameters=1, inits=[0], bounds=[[0, 10]], fixed=False), 'slice': slice(0, 1)}, 'shapefac2': {'paramset': unconstrained(name='shapefac2', is_scalar=False, n_parameters=2, inits=[0, 0]... |
def _make_tuple_bunch(typename, field_names, extra_field_names=None, module=None):
if (len(field_names) == 0):
raise ValueError('field_names must contain at least one name')
if (extra_field_names is None):
extra_field_names = []
_validate_names(typename, field_names, extra_field_names)
t... |
def pre_release_work(patch=False):
default_version = get_version()
if (patch and default_version.is_devrelease):
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!")
if default_version.is_devrelease:
default_version = default_version.base_version... |
class InfinitePolynomialRing_dense(InfinitePolynomialRing_sparse):
def __init__(self, R, names, order):
if (not names):
names = ['x']
self._max = 0
InfinitePolynomialRing_sparse.__init__(self, R, names, order)
self._P = self._minP
def construction(self):
retur... |
def param_analysis_options(output_dir):
options = model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS.copy()
options['select'] = ['params', 'bytes']
options['order_by'] = 'params'
options['account_type_regexes'] = ['Variable']
if output_dir:
options['dump_to_file'] = os.path.join(output_dir, '... |
def dump_paths(Graph, rating_pair, maxLen, sample_size, fw_file):
for pair in rating_pair:
user_id = pair[0]
location_id = pair[1]
user_node = ('u' + user_id)
location_node = ('i' + location_id)
if (Graph.has_node(user_node) and Graph.has_node(location_node)):
min... |
def read_in_all_data(data_path=DATA_PATH):
training_data = json.load(open(os.path.join(data_path, 'train_spider.json')))
tables_org = json.load(open(os.path.join(data_path, 'tables.json')))
tables = {tab['db_id']: tab for tab in tables_org}
return (training_data, tables) |
def generate_length(args, tr_set, audio_extension):
for (i, s) in enumerate(tr_set):
if os.path.isdir(os.path.join(args.input_data, s.lower())):
s = s.lower()
elif os.path.isdir(os.path.join(args.input_data, s.upper())):
s = s.upper()
else:
assert NotImple... |
def sine_init(m):
with torch.no_grad():
if hasattr(m, 'weight'):
num_input = m.weight.size((- 1))
m.weight.uniform_(((- np.sqrt((6 / num_input))) / 30), (np.sqrt((6 / num_input)) / 30)) |
class AttentionModule(nn.Module):
def __init__(self, **kwargs):
super().__init__()
self.dim_v = kwargs['dim_v']
self.attendNode = AttendNodeModule()
self.attnAnd = AndModule()
def forward(self, attn, feat, query):
new_attn = self.attendNode(feat, query)
out = self... |
def pred(datasource, estimator_string, select, result_table, feature_columns, feature_column_names, feature_column_names_map, train_label_name, result_col_name, feature_metas={}, model_params={}, pred_params={}, save='', batch_size=1, pai_table=''):
estimator = import_model(estimator_string)
model_params.update... |
('dependency_label')
class DepLabelIndexer(TokenIndexer[int]):
def __init__(self, namespace: str='dep_labels') -> None:
self.namespace = namespace
self._logged_errors: Set[str] = set()
def count_vocab_items(self, token: Token, counter: Dict[(str, Dict[(str, int)])]):
dep_label = token.de... |
def test(epoch):
global best_acc
model.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for (batch_idx, (inputs, targets)) in enumerate(testloader):
inputs = inputs.to(device)
targets = targets.to(device)
outputs = model(inputs, None, N... |
('/api/v1.0/bird', methods=['POST'])
def create_bird():
if ((not request.json) or (not ('caption' in request.json))):
abort(400)
caption = request.json['caption']
t0 = time.time()
urls = generate(caption, wordtoix, ixtoword, text_encoder, netG, blob_service)
t1 = time.time()
response = {... |
def time_add(t1, t2, all_seconds=False):
st1 = time_to_seconds(t1)
st2 = time_to_seconds(t2)
return seconds_to_time((st1 + st2), all_seconds) |
def eval(opt):
model = CycleGANModel(opt)
dataset = Mydata.get_loader(opt)
(img_logs, weight_logs) = init_logs(opt)
model.load(weight_logs)
for (batch_id, data) in enumerate(dataset):
print('===> Epoch({}/{})'.format(batch_id, len(dataset)))
model.set_input(data)
model.test()... |
class Squares(object):
def __init__(self):
super(Squares, self).__init__()
self.template = 'inputs: {inputs}\noutput: {output}\nconst: {const}\naggrs: {aggrs}\nattrs: {attrs}\nbools:\nloc: {loc}\n'
def synthesize(self, inputs, output_ex, const='', aggrs='', attrs='', loc=0):
global argv,... |
def mrmr_regression(df, target_column, K, features=None, denominator='mean', only_same_domain=False, return_scores=False, show_progress=True):
if (features is None):
features = get_numeric_features(df=df, target_column=target_column)
if ((type(denominator) == str) and (denominator == 'mean')):
d... |
def get_root_logger(log_file=None, log_level=logging.INFO):
return get_logger('mmhuman3d', log_file, log_level) |
def read_dataset_t2t_format(data_dir, num_parallel_calls, mode, max_frames, max_symbols, t2t_problem_name, features_hparams_override=''):
class CustomProblem(SpeechRecognitionProblem):
def hparams(self, defaults, model_hparams):
super().hparams(defaults, model_hparams)
model_hparams.... |
def test_optimization_result_status_for_failed_optimization() -> None:
result: OptimizationResult[object] = OptimizationResult(Err(_Whoops()), [])
assert result.is_err
assert (not result.is_ok) |
class CodeGenForCausalLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def register_Ns3HtOperationValue_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::HtOperation const &', 'value')])
cls.add_constructor([param('ns3::HtOperationValue const &', 'arg0')])
cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virt... |
class WeakHopper(ModifiableRoboschoolHopper):
def __init__(self):
RoboschoolForwardWalkerMujocoXML.__init__(self, 'hopper.xml', 'torso', action_dim=3, obs_dim=15, power=0.4)
def parameters(self):
parameters = super(WeakHopper, self).parameters
parameters.update({'power': self.power})
... |
_numpy_output(check_dtype=True)
def test_ufunc_logical_or_ff(A: dace.float32[10], B: dace.float32[10]):
return np.logical_or(A, B) |
_test(assert_ii_1=False)
def test_4_interface_to_2_banks_hbm_decoupled_interface():
return four_interface_to_2_banks(mem_type='HBM', decouple_interfaces=True) |
def get_class_name_lineno(method) -> Tuple[(str, int)]:
current_frame = inspect.currentframe()
for i in range(2):
assert (current_frame is not None)
current_frame = current_frame.f_back
assert (current_frame is not None)
class_name = current_frame.f_code.co_name
line_no = current_fra... |
def test_fortran_frontend_arr2loop_2d():
test_string = '\n PROGRAM index_offset_test\n implicit none\n double precision, dimension(5,3) :: d\n double precision, dimension(4) :: res\n CALL index_test_function(d,res)\n ... |
class RNet(nn.Module):
def __init__(self):
super(RNet, self).__init__()
self.features = nn.Sequential(OrderedDict([('conv1', nn.Conv2d(3, 28, 3, 1)), ('prelu1', nn.PReLU(28)), ('pool1', nn.MaxPool2d(3, 2, ceil_mode=True)), ('conv2', nn.Conv2d(28, 48, 3, 1)), ('prelu2', nn.PReLU(48)), ('pool2', nn.Ma... |
.parametrize('observation_shape', [(100,)])
.parametrize('action_size', [2])
.parametrize('episode_length', [10])
def test_compare_discrete_action_diff_with_algos(observation_shape: Sequence[int], action_size: int, episode_length: int) -> None:
discrete_episode = create_episode(observation_shape, action_size, lengt... |
class SqueezeExcitation(nn.Module):
def __init__(self, n_channels, amplifying_ratio) -> None:
super(SqueezeExcitation, self).__init__()
self.n_channels = n_channels
self.amplifying_ratio = amplifying_ratio
n_channels_expanded = (self.amplifying_ratio * self.n_channels)
self.n... |
def read_in_samples_task1(dict_paragraphs, qrels, bm25_dir, no_hard_neg_docs):
samples = []
for query_id in qrels.keys():
print('now we start with this query {}'.format(query_id))
paragraph_id = 0
for paragraph in dict_paragraphs.get(query_id):
if dict_paragraphs.get(query_id... |
class TrunkConfig():
num_blocks: int = 48
sequence_state_dim: int = 1024
pairwise_state_dim: int = 128
sequence_head_width: int = 32
pairwise_head_width: int = 32
position_bins: int = 32
dropout: float = 0
layer_drop: float = 0
cpu_grad_checkpoint: bool = False
max_recycles: int ... |
class OidDataset(Dataset):
def __init__(self, main_dir, subset, version='v4', annotation_cache_dir='.', transform=None):
if (version == 'v4'):
metadata = '2018_04'
elif (version == 'challenge2018'):
metadata = 'challenge2018'
elif (version == 'v3'):
metada... |
def get_random_k_combinations(k: int, n_items: int, n_combinations: int, random_state: np.random) -> np.ndarray:
return np.array([random_state.choice(range(n_items), k, replace=False) for _ in range(n_combinations)]) |
def get_filename_from_annotations(annotations, dataset):
if (dataset == 'VOC'):
filename = annotations[0]['annotation']['filename']
elif (dataset == 'COCO'):
filename = annotations[0]['filename']
elif (dataset == 'CUB'):
filename = annotations[0]['filename']
else:
raise E... |
_scheme(prefixes='s3://')
def load_from_ceph(filename, map_location=None, backend='petrel'):
allowed_backends = ['ceph', 'petrel']
if (backend not in allowed_backends):
raise ValueError(f'Load from Backend {backend} is not supported.')
if (backend == 'ceph'):
warnings.warn('CephBackend will ... |
def sce_criterion(logits, labels):
return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)) |
def test_scalar_reduction():
gamma = 1.4
def eigenvalues(u: dace.float64[3]):
rho = u[0]
rhov = u[1]
E = u[2]
v = (rhov / rho)
p = ((E - ((0.5 * rhov) * v)) * (gamma - 1))
c = np.sqrt(((gamma * p) / rho))
ret = np.empty_like(u)
ret[0] = (v - c)
... |
def register_types(module):
root_module = module.get_root()
module.add_class('Address', import_from_module='ns.network')
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
module.add_class('AsciiTraceHelper', import_from_module='ns.networ... |
class TestREPS(TfGraphTestCase):
.large
def test_reps_cartpole(self):
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
env = GarageEnv(gym.make('CartPole-v0'))
policy = CategoricalMLPPolicy(env_spec=env.spec, hidden_sizes=[32, 32])
baseline = LinearFeatu... |
def _get_all_k_combinations_rec(offset: int, k: int, combination: deque, original_size: int, combinations: deque):
if (k == 0):
combinations.append(deepcopy(combination))
return
for i in range(offset, ((original_size - k) + 1), 1):
combination.append(i)
_get_all_k_combinations_re... |
class YaLMWindowService(LocalWindowService):
def __init__(self, service: TokenizerService):
super().__init__(service)
def tokenizer_name(self) -> str:
return 'Yandex/yalm'
def max_sequence_length(self) -> int:
return YaLMTokenizer.MAX_SEQUENCE_LENGTH
def max_request_length(self) ... |
class AggregateSkeletonFragmentsOperator(OperatorBase):
def __init__(self, fragments_path: str, output_path: str, name: str='aggregate-skeleton-fragments'):
super().__init__(name=name)
self.fragments_storage = CloudFiles(fragments_path)
self.output_storage = CloudFiles(output_path)
def _... |
class RemoveSelfLoops(BaseTransform):
def __call__(self, data: Data) -> Data:
if (hasattr(data, 'edge_index') and (data.edge_index is not None)):
(data.edge_index, _) = remove_self_loops(data.edge_index)
if hasattr(data, 'adj_t'):
data.adj_t = data.adj_t.remove_diag()
... |
def init():
ax.add_patch(car)
ax.add_patch(drone)
ax.add_patch(obstacle1)
ax.add_patch(obstacle2)
ax.add_patch(obstacle3)
return (car, drone) |
def Dynamics_LC_Filter(para_LC, i_ld0, i_lq0, v_od0, v_oq0, v_id0, v_iq0, i_od0, i_oq0, w0):
r_f = para_LC['r_f']
L_f = para_LC['L_f']
C_f = para_LC['C_f']
di_ld = (((((- r_f) / L_f) * i_ld0) + (w0 * i_lq0)) + ((1 / L_f) * (v_id0 - v_od0)))
di_lq = (((((- r_f) / L_f) * i_lq0) - (w0 * i_ld0)) + ((1 /... |
(params=['csr', 'csc', 'coo', 'bsr'])
def X_64bit(request):
X = sp.rand(20, 10, format=request.param)
for attr in ['indices', 'indptr', 'row', 'col']:
if hasattr(X, attr):
setattr(X, attr, getattr(X, attr).astype('int64'))
(yield X) |
def setup_test_equal_bounds():
np.random.seed(0)
x0 = np.random.rand(4)
lb = np.array([0, 2, (- 1), (- 1.0)])
ub = np.array([3, 2, 2, (- 1.0)])
i_eb = (lb == ub)
def check_x(x, check_size=True, check_values=True):
if check_size:
assert (x.size == 4)
if check_values:
... |
def prepare_data(dataset):
dataloader = DataLoader(dataset, batch_size=16, shuffle=True, pin_memory=True, timeout=60, num_workers=1, drop_last=True)
sentences = []
for (bix, data) in tqdm(enumerate(dataloader)):
for i in range(len(data[0])):
input = data[0][i]
label = data[1]... |
def is_disjoint(T1, T2):
for i in range(T1.nrows()):
for j in range(T1.ncols()):
if ((T1[(i, j)] < 0) and (T2[(i, j)] < 0)):
continue
if (T1[(i, j)] == T2[(i, j)]):
return False
return True |
class _SearchStatistics():
_logger = logging.getLogger(__name__)
def __init__(self):
self._backend: (None | sb.AbstractStatisticsBackend) = self._initialise_backend()
self._output_variables: dict[(str, sb.OutputVariable)] = {}
self._variable_factories: dict[(str, ovf.ChromosomeOutputVari... |
def rbf_mmd2_and_ratio(X, Y, sigma=1, biased=True):
return mix_rbf_mmd2_and_ratio(X, Y, sigmas=[sigma], biased=biased) |
def file_exists(filepath):
if filepath.startswith('gs://'):
(bucket_name, file_name) = split_gcs_bucket_and_filepath(filepath)
bucket = gcs_bucket(bucket_name)
return bucket.blob(file_name).exists()
else:
return os.path.exists(filepath) |
_mock.Mocker(kw='mock')
def test_parse_results_amz(**kwargs):
mock_file = open('tests/transfer/mocks/mock_parse_results_amz', 'rb')
mock_body = mock_file.read()
mock_file.close()
mock_query = 'red basketball shoes'
query = mock_query.replace(' ', '+')
kwargs['mock'].get(f' content=mock_body)
... |
def world_extract(x, fs, f0min, f0max):
x = (x * np.iinfo(np.int16).max)
x = np.array(x, dtype=np.float64)
x = low_cut_filter(x, fs)
(f0, time_axis) = pw.harvest(x, fs, f0_floor=f0min, f0_ceil=f0max, frame_period=MCEP_SHIFT)
sp = pw.cheaptrick(x, f0, time_axis, fs, fft_size=MCEP_FFTL)
ap = pw.d4... |
def find_span_from_text(context, tokens, answer):
assert (answer in context)
offset = 0
spans = []
scanning = None
process = []
for (i, token) in enumerate(tokens):
token = token.replace(' ##', '').replace('##', '')
while (context[offset:(offset + len(token))] != token):
... |
def test_RegularArray_RecordArray_NumpyArray():
v2a = ak.contents.regulararray.RegularArray(ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6]))], ['nest']), 3)
resultv2 = v2a._carry(ak.index.Index(np.array([0], np.int64)), False)
assert (to_l... |
def train():
if (args.run_mode == 'debug'):
print_iteration = 10
save_image_iteration = 10
add_scalar_iteration = 1
add_histogram_iteration = 10
else:
print_iteration = 10
add_scalar_iteration = 100
save_image_iteration = 1000
add_histogram_iterati... |
def rename_state_dict_keys(state_dict):
new_state_dict = OrderedDict()
for (key, value) in state_dict.items():
new_key = str(key).replace('model.', '')
new_state_dict[new_key] = value
return new_state_dict |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.