code
stringlengths
101
5.91M
class EventMention(Mention): def __init__(self, doc_id, sent_id, tokens_numbers, tokens, mention_str, head_text, head_lemma, is_singleton, is_continuous, coref_chain): super(EventMention, self).__init__(doc_id, sent_id, tokens_numbers, tokens, mention_str, head_text, head_lemma, is_singleton, is_continuous,...
def _simple_fixed_content(a, content, t, p, k, equality=False): n = len(a) if (t > n): if equality: if (n == p): (yield a) elif (not (n % p)): (yield a) else: r = list(range(a[((t - p) - 1)], k)) for j in r: if (content[j] >...
.parametrize('loss_class', [IoULoss, BoundedIoULoss, GIoULoss, DIoULoss, CIoULoss]) def test_iou_type_loss_zeros_weight(loss_class): pred = torch.rand((10, 4)) target = torch.rand((10, 4)) weight = torch.zeros(10) loss = loss_class()(pred, target, weight) assert (loss == 0.0)
.parametrize('n_unique_action, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, decay_function, click_model, eta, behavior_policy_function, is_factorizable, reward_function, return_pscore_item_position, description', valid_input_of_obtain_batch_bandit_feedback) def test_synthetic_slate_usin...
def plot_gallery(images, titles, h, w, n_row=3, n_col=4): plt.figure(figsize=((1.8 * n_col), (2.4 * n_row))) plt.subplots_adjust(bottom=0, left=0.01, right=0.99, top=0.9, hspace=0.35) for i in range((n_row * n_col)): plt.subplot(n_row, n_col, (i + 1)) plt.imshow(images[i].reshape((h, w)), cm...
def test_cora_load_str() -> None: (g, subjects) = Cora().load(str_node_ids=True) assert (type(g.nodes()[0]) == str) assert all(((type(n) == str) for n in g.nodes())) assert (set(subjects.index) == set(g.nodes()))
_start_docstrings(AutoModelWithLMHead.__doc__) def modelWithLMHead(*args, **kwargs): return AutoModelWithLMHead.from_pretrained(*args, **kwargs)
def create_model(args, data_shape, regularization_fns): hidden_dims = tuple(map(int, args.dims.split(','))) strides = tuple(map(int, args.strides.split(','))) if args.multiscale: model = odenvp.ODENVP((args.batch_size, *data_shape), n_blocks=args.num_blocks, intermediate_dims=hidden_dims, nonlineari...
_kl(Poisson, Bernoulli) _kl(Poisson, Binomial) def _kl_poisson_infinity(p, q): return _infinite_like(p.rate)
class TestCategoricalCNNPolicyImageObs(TfGraphTestCase): def setup_method(self): super().setup_method() self.env = GarageEnv(DummyDiscretePixelEnv(), is_image=True) self.sess.run(tf.compat.v1.global_variables_initializer()) self.env.reset() .parametrize('filters, strides, padding...
def extract_keyphrases(embedding_distrib, ptagger, raw_text, N, lang, beta=0.55, alias_threshold=0.7): tagged = ptagger.pos_tag_raw_text(raw_text) text_obj = InputTextObj(tagged, lang) return MMRPhrase(embedding_distrib, text_obj, N=N, beta=beta, alias_threshold=alias_threshold)
class TextDetector(object): def __init__(self, model): self.model = model model.eval() def detect1(self, image): with torch.no_grad(): (pointer_pred, dail_pred, text_pred, pred_recog, std_points) = self.model.forward_test(image) image = image[0].data.cpu().numpy() ...
def test_tokenize_replay_buffer() -> None: tokenizer = FloatTokenizer(num_bins=100) episode1 = create_episode(observation_shape=(100,), action_size=2, length=100) episode2 = create_episode(observation_shape=(100,), action_size=2, length=100) replay_buffer = ReplayBuffer(InfiniteBuffer(), episodes=[episo...
_bpe('fastbpe') class fastBPE(object): def add_args(parser): parser.add_argument('--bpe-codes', type=str, help='path to fastBPE BPE') def __init__(self, args): if (args.bpe_codes is None): raise ValueError('--bpe-codes is required for --bpe=fastbpe') codes = file_utils.cached...
def pytorch_apply_second_moment_correction(quantized_model: Any, core_config: CoreConfig, representative_data_gen: Callable, graph: common.Graph): model = copy.deepcopy(quantized_model) set_model(model) for (name, module) in model.named_modules(): if (len(graph.find_node_by_name(name)) > 0): ...
class PhobertTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = PhobertTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() vocab = ['', 'i', 'I', '', 'r', ''] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ['#version: ...
def tag_normalize(tag): tag = tag.replace("'n'", '').replace("'", '').replace('(', '').replace(')', '').replace('/', ' ').replace('-', ' ').replace(' & ', 'n').replace('&', 'n') tag = unique_word(tag) return tag
def str2bool(v): if isinstance(v, bool): return v if (v.lower() in ('yes', 'true', 't', 'y', '1')): return True elif (v.lower() in ('no', 'false', 'f', 'n', '0')): return False else: raise argparse.ArgumentTypeError('Boolean value expected.')
class MIDRAN(): def __init__(self, config): self.trainingImagePath = config['trainingImagePath'] self.checkpointPath = config['checkpointPath'] self.logPath = config['logPath'] self.testImagesPath = config['testImagePath'] self.resultDir = config['resultDir'] self.mod...
def consolidate_ckpt(src_path, dst_path): print('Loading model') auto_upgrade(src_path) src_model = AutoModelForCausalLM.from_pretrained(src_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) src_tokenizer = AutoTokenizer.from_pretrained(src_path) src_model.save_pretrained(dst_path) src_to...
(config_path='../config', config_name='text_classification') def main(config): try: (config, logger) = startup(config) (train_loader, test_loader, train_splits, vocab_size) = get_text_loaders(config) tb_logger = SummaryWriter(log_dir=f'./{config.job_name}/{config.timestamp}') if conf...
def tensor2map(var): mask = np.argmax(var.data.cpu().numpy(), axis=0) colors = get_colors() mask_image = np.ones(shape=(mask.shape[0], mask.shape[1], 3)) for class_idx in np.unique(mask): mask_image[(mask == class_idx)] = colors[class_idx] mask_image = mask_image.astype('uint8') return I...
class PythonStoreTest(TestCase): def setUp(self): super(PythonStoreTest, self).setUp() def test_set_get(self): c10d._test_python_store(MyPythonStore())
def get_metrics_list(sd): metrics_tuple_set = set([tuple(sorted(list(x['scores'].keys()))) for d in sd.values() for x in d['sys_summs'].values()]) assert (len(metrics_tuple_set) == 1), (metrics_tuple_set, 'all system summary score dicts should have the same set of all_metrics') metrics_list = list(list(metr...
def register_Ns3EventGarbageCollector_methods(root_module, cls): cls.add_constructor([]) cls.add_method('Track', 'void', [param('ns3::EventId', 'event')]) cls.add_constructor([param('ns3::EventGarbageCollector const &', 'arg0')]) return
def idx_list_converter(split_line, idx): if isinstance(idx, int): return [split_line[idx]] return [split_line[i] for i in idx]
class CoxeterGroups(Category_singleton): def super_categories(self): return [GeneralizedCoxeterGroups()] def additional_structure(self): return None Finite = LazyImport('sage.categories.finite_coxeter_groups', 'FiniteCoxeterGroups') Algebras = LazyImport('sage.categories.coxeter_group_al...
def hypotest(pdf, data): return pyhf.infer.hypotest(1.0, data, pdf, pdf.config.suggested_init(), pdf.config.suggested_bounds(), return_tail_probs=True, return_expected=True, return_expected_set=True)
def calculate_loss(rule_model, criterion, info, gt): (pred, mask) = get_prediction(rule_model, info) n_valid_entries = torch.sum((mask.view((- 1)) != 0)) loss = criterion(pred, gt) loss = loss.masked_fill((mask == 0), 0) loss = (torch.sum(loss) / n_valid_entries) mean_highest_success_correct = g...
class Solarize(DauphinTransform): value_range = (0, 256) def __init__(self, name=None, prob=1.0, level=0): super().__init__(name, prob, level) def transform(self, pil_img, label, **kwargs): degree = categorize_value(self.level, self.value_range, 'float') return (ImageOps.solarize(pil...
def get_src_findex_by_pad(s, S, padding_mode, align_corners): if (padding_mode == 'zero'): return get_src_findex_with_zero_pad(s, S) elif (padding_mode == 'reflect'): if align_corners: return get_src_findex_with_reflect_pad(s, S, True) else: sf = get_src_findex_wi...
def save_deblur_checkpoints(file_path, epoch_idx, deblurnet, deblurnet_solver, Best_Img_PSNR, Best_Epoch): print(('[INFO] %s Saving checkpoint to %s ...\n' % (dt.now(), file_path))) checkpoint = {'epoch_idx': epoch_idx, 'Best_Img_PSNR': Best_Img_PSNR, 'Best_Epoch': Best_Epoch, 'deblurnet_state_dict': deblurnet....
def compute_rhs(ui_hat, bh_hat): global ui, uiuj, uiuj_hat, V1, bh_hat0 bh_hat.fill(0) ui = W1.backward(ui_hat, ui) uiuj = outer(ui, ui, uiuj) uiuj_hat = uiuj.forward(uiuj_hat) bi_hat = bh_hat[0] bi_hat = inner(v, div(uiuj_hat), output_array=bi_hat) return bh_hat
def log_mel_filterbank_from_raw(raw_audio: Tensor, *, in_spatial_dim: Dim, out_dim: Dim, sampling_rate: int=16000, window_len: float=0.025, step_len: float=0.01, n_fft: Optional[int]=None, log_base: Union[(int, float)]=10) -> Tuple[(Tensor, Dim)]: if (raw_audio.feature_dim and (raw_audio.feature_dim.dimension == 1)...
_params(name='unroll') class UnrollCodeGen(TargetCodeGenerator): target_name = 'unroll' title = 'Unrolled' language = 'cpp' def __init__(self, frame_codegen: DaCeCodeGenerator, sdfg: dace.SDFG): self._frame = frame_codegen self._dispatcher = frame_codegen.dispatcher dispatcher = ...
def test_sample_missing_data(): spec = {'channels': [{'name': 'channel', 'samples': [{'name': 'sample', 'data': [], 'modifiers': []}]}]} with pytest.raises(pyhf.exceptions.InvalidSpecification): pyhf.Model(spec)
class BaseOptions(): def __init__(self): self.initialized = False def initialize(self, parser): parser.add_argument('--root', type=str, default='datasets', help='path to dataset') parser.add_argument('--dataset', type=str, default='kitti', help='dataset name') parser.add_argument...
_module() class HRNet(nn.Module): blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} def __init__(self, extra, in_channels=3, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=False, with_cp=False, zero_init_residual=False): super(HRNet, self).__init__() self.extr...
class Market1501(BaseImageDataset): dataset_dir = 'market1501' def __init__(self, root='/home/haoluo/data', verbose=True, **kwargs): super(Market1501, self).__init__() self.dataset_dir = osp.join(root, self.dataset_dir) self.train_dir = osp.join(self.dataset_dir, 'train') self.va...
.parametrize('seed', [313]) .parametrize('test', [True]) .parametrize('graph_ref, graph_act', [(resnet_ref, small_bn_resnet)]) def test_fused_batch_normalization(seed, test, graph_ref, graph_act): from .graph_converter_test_utils import structure_tester, value_tester np.random.seed(seed) rng = np.random.Ran...
_node_type() class CompositeParametrization(optplan.Parametrization): type = schema_utils.polymorphic_model_type('parametrization.composite') param_list = types.ListType(optplan.ReferenceType(optplan.Parametrization))
class BertTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES slow_tokenizer_class = BertToke...
def test_point_confusion_matrix(expected, observed): expected_return = (3, 4, 2, 1) returned = point_confusion_matrix(expected, observed) np.testing.assert_array_equal(np.array(returned), np.array(expected_return))
class TanhShrink(Module): def __init__(self): super(TanhShrink, self).__init__() self.tanh = Tanh() def updateOutput(self, input): th = self.tanh.updateOutput(input) self.output.resize_as_(input).copy_(input) self.output.add_((- 1), th) return self.output def ...
class BaseDataLoader(): def __init__(self): pass def initialize(self): pass def load_data(): return None
class StatsTest(unittest.TestCase): def setUp(self): self._integers = list(range(1, 50)) self._floats = [float(x) for x in self._integers] self._floats2 = [(float(x) + 2.31) for x in self._integers] self._mixed = [(x if ((x % 2) == 0) else (float(x) + 4.5)) for x in self._integers] ...
def se_resnet152(num_classes, loss='softmax', pretrained=True, **kwargs): model = SENet(num_classes=num_classes, loss=loss, block=SEResNetBottleneck, layers=[3, 8, 36, 3], groups=1, reduction=16, dropout_p=None, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, last_stride=2, fc_dims=Non...
def create_dist(latent_flat): l = latent_flat[0].shape[0] latent_dist = [[] for _ in range(l)] for single_lat in tqdm(latent_flat): for i in range(l): latent_dist[i].append(single_lat[i]) return latent_dist
def batch_all_triplet_loss(labels, embeddings, margin, squared=False): pairwise_dist = _pairwise_distances(embeddings, squared=squared) anchor_positive_dist = pairwise_dist.unsqueeze(2) anchor_negative_dist = pairwise_dist.unsqueeze(1) triplet_loss = ((anchor_positive_dist - anchor_negative_dist) + marg...
class AnnotatedSkipQuantModel(torch.nn.Module): def __init__(self, qengine): super().__init__() self.qconfig = torch.quantization.get_default_qconfig(qengine) self.sub = QuantWrapper(InnerModule()) self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float) self.fc.qconfig = None ...
def train(train_loader, model, criterion, optimizer, epoch, args): batch_time = AverageMeter('Time', ':6.3f') data_time = AverageMeter('Data', ':6.3f') losses = AverageMeter('Loss', ':.4e') top1 = AverageMeter('', ':6.2f') top5 = AverageMeter('', ':6.2f') progress = ProgressMeter(len(train_loade...
class ComputeHistogramForBlobs(NetModifier): def __init__(self, blobs, logging_frequency, num_buckets=30, lower_bound=0.0, upper_bound=1.0, accumulate=False): self._blobs = blobs self._logging_frequency = logging_frequency self._accumulate = accumulate if self._accumulate: ...
class Bernoulli(ExponentialFamily): arg_constraints = {'probs': constraints.unit_interval} support = constraints.boolean has_enumerate_support = True _mean_carrier_measure = 0 def __init__(self, probs=None, logits=None, validate_args=None): if ((probs is None) == (logits is None)): ...
class Cifar10Data(Dataset): def __init__(self, data_dir=None): super(Cifar10Data, self).__init__('cifar10', 32, 32, data_dir=data_dir, queue_runner_required=True, num_classes=10) def read_data_files(self, subset='train'): assert self.data_dir, 'Cannot call `read_data_files` when using synthetic ...
class SequenceField(Field[DataArray]): def sequence_length(self) -> int: raise NotImplementedError
def test_clean_fix_missing(df_dates: pd.DataFrame) -> None: df_clean_minimum = clean_date(df_dates, 'date', fix_missing='minimum') df_clean_empty = clean_date(df_dates, 'date', fix_missing='empty') df_check_minimum = df_dates.copy() df_check_minimum['date_clean'] = ['1996-07-10 15:08:56', '2003-09-25 10...
def infixNotation(baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')')): ret = Forward() lastExpr = (baseExpr | ((lpar + ret) + rpar)) for (i, operDef) in enumerate(opList): (opExpr, arity, rightLeftAssoc, pa) = (operDef + (None,))[:4] termName = (('%s term' % opExpr) if (arity < 3) else...
class MM_reg(atomic_reg): OP_NAME = 'MM' _fields_ = [('cmd_short', ctypes.c_uint64, 1), ('op_code', ctypes.c_uint64, 16), ('cmd_id_dep', ctypes.c_uint64, 23), ('dbg_mode', ctypes.c_uint64, 1), ('tsk_typ', ctypes.c_uint64, 4), ('tsk_eu_typ', ctypes.c_uint64, 5), ('opt_rq', ctypes.c_uint64, 1), ('tsk_opd_num', ct...
class CharadesProcessor(): def __init__(self): super(CharadesProcessor, self).__init__() self.idx_counter = 0 def reset_idx_counter(self): self.idx_counter = 0 def process_data(self, data, charades, scope): results = [] for line in tqdm(data, total=len(data), desc='pr...
def main(unused_args): if (not gfile.Exists(FLAGS.input)): print((("Input graph file '" + FLAGS.input) + "' does not exist!")) return (- 1) known_modes = ['round', 'quantize', 'eightbit', 'weights', 'test', 'weights_rounded'] if (not any(((FLAGS.mode in s) for s in known_modes))): pr...
class InputFeatures(object): def __init__(self, unique_id, entities, example_index, doc_span_index, word_ids, word_segment_ids, word_attention_mask, placeholder_position_ids, entity_position_ids, labels): self.unique_id = unique_id self.entities = entities self.example_index = example_index ...
def entropy_surrogate(estimator, samples): dlog_q = estimator.compute_gradients(samples) surrogate = tf.reduce_mean(tf.reduce_sum((tf.stop_gradient((- dlog_q)) * samples), (- 1))) return surrogate
def compute_link_entropy(G, a, b): ka = int(G.indeg_vec[a]) kb = int(G.indeg_vec[b]) m = (G.csr.nnz / 2) onevec = np.ones(kb) vec = np.arange(kb) num = (((m - ka) * onevec) - vec) deno = ((m * onevec) - vec) prob = (1.0 - np.prod((num / deno))) link_ent = (- np.log2(prob)) return...
def load_examples_agn(path): topics = [' politics', ' sports', ' business', ' technology'] label_path = '/gscratch/zlab/swj0419/knnlm/data/label_word/datasets/agnews/label_names_kb.txt' label2synonym = load_label(label_path) examples = [] with open(path) as fp: reader = csv.DictReader(fp) ...
class DLDeviceType(ctypes.c_int): kDLCPU = 1 kDLGPU = 2 kDLCPUPinned = 3 kDLOpenCL = 4 kDLVulkan = 7 kDLMetal = 8 kDLVPI = 9 kDLROCM = 10 kDLExtDev = 12
def test_add_call_for_rollback(method_mock, variable_reference_mock, default_test_case): def side_effect(tc, f, p, callee=None): tc.add_statement(stmt.IntPrimitiveStatement(tc, 5), position=p) tc.add_statement(stmt.IntPrimitiveStatement(tc, 5), position=p) tc.add_statement(stmt.IntPrimitiveS...
def r_cond_without_not3(t): def fn(k, n): return ['rightIsClear'] return [('cond_without_not', fn)]
def get_daily_ci_runs(token, num_runs=7): headers = None if (token is not None): headers = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'} workflow_id = '636036' url = f' url += f'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}' re...
class ConstMember(Member): def __init__(self, type_ref, name, value_str): super(ConstMember, self).__init__(type_ref, name) if (not type_ref.is_const_type()): raise TypeError("Constant '{}' from line {} must be one of {}. '{}' found.".format(name, type_ref.lineno, CONST_TYPES, type_ref.n...
def svc_classify(x, y, search): kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=None) accuracies = [] for (train_index, test_index) in kf.split(x, y): (x_train, x_test) = (x[train_index], x[test_index]) (y_train, y_test) = (y[train_index], y[test_index]) if search: ...
class DataIterator(): def __init__(self, mode, data, batch_size=128, neg_sample=1, all_items=None, items_usr_clicked=None, shuffle=True): self.mode = mode self.data = data self.datasize = data.shape[0] self.neg_count = neg_sample self.batch_size = batch_size self.item...
def _findLine(comp, fileLines): c = 0 found = [] for line in fileLines: if (comp in line): found.append(c) c += 1 return found
class SearchBPEtoWords(Job): def __init__(self, search_output_bpe, script=Path('scripts/search-bpe-to-words.py')): self.search_output_bpe = search_output_bpe self.script = script self.out = self.output_path('search_output.words') def run(self): self.sh('python3 {script} {search_o...
class KleeMinty(Benchmark): params = [methods, [3, 6, 9]] param_names = ['method', 'dimensions'] def setup(self, meth, dims): (self.c, self.A_ub, self.b_ub, self.xf, self.obj) = klee_minty(dims) self.fun = None def time_klee_minty(self, meth, dims): (method, options) = meth ...
def test_montage_fill_gray(): (n_images, n_rows, n_cols) = (3, 2, 3) arr_in = np.arange(((n_images * n_rows) * n_cols), dtype=float) arr_in = arr_in.reshape(n_images, n_rows, n_cols) arr_out = montage(arr_in, fill=0) arr_ref = np.array([[0.0, 1.0, 2.0, 6.0, 7.0, 8.0], [3.0, 4.0, 5.0, 9.0, 10.0, 11.0...
def cross_entropy_torch(x, y): x_softmax = [F.softmax(x[i]) for i in range(len(x))] x_log = torch.tensor([torch.log(x_softmax[i][y[i]]) for i in range(len(y))]) loss = ((- torch.sum(x_log)) / len(y)) return loss
def parse_task(domain_pddl, task_pddl): (domain_name, domain_requirements, types, type_dict, constants, predicates, predicate_dict, functions, actions, axioms) = parse_domain_pddl(domain_pddl) (task_name, task_domain_name, task_requirements, objects, init, goal, use_metric) = parse_task_pddl(task_pddl, type_dic...
class Semantic_loss_functions(object): def __init__(self): print('semantic loss functions initialized') def dice_coef(self, y_true, y_pred): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum((y_true_f * y_pred_f)) return (((2.0 * intersection)...
def haar_like_feature_coord(width, height, feature_type=None): feature_type_ = _validate_feature_type(feature_type) (feat_coord, feat_type) = zip(*[haar_like_feature_coord_wrapper(width, height, feat_t) for feat_t in feature_type_]) return (np.concatenate(feat_coord), np.hstack(feat_type))
class ExperimentParameter(): name: str default: Any values: Any = None description: str = ''
def analyze(data_path): if data_path.endswith('.gz'): with gzip.open(data_path, 'r') as f: (S, true_model) = pickle.load(f) else: with open(data_path, 'r') as f: (S, true_model) = pickle.load(f) print('True model:') print(true_model) T = float(S.shape[0]) ...
def register_Ns3OlsrRoutingProtocol_methods(root_module, cls): cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) cls.add_constructor([]) cls.add_method('SetMainInterface', 'void', [param('uint32_t', 'interface')]) cls.add_method('Dump', 'void', []) cls.add_method('GetRoutingTableEntries...
def measure_exploitability(game: Union[(str, pyspiel.Game)], populations: Dict[(AgentID, Dict[(PolicyID, Policy)])], policy_mixture_dict: Dict[(AgentID, Dict[(PolicyID, float)])], use_observation: bool=False, use_cpp_br: bool=False): if isinstance(game, str): game = pyspiel.load_game(game) weights = {} ...
def _get_all_misuses(data_base_path: str) -> List[str]: misuses = [] project_dirs = [join(data_base_path, subdir) for subdir in listdir(data_base_path) if isdir(join(data_base_path, subdir))] for project_dir in project_dirs: misuses_dir = join(project_dir, 'misuses') if (not exists(misuses_d...
class ConvFeatureExtractionModel(nn.Module): def __init__(self, input_nc, num_decoders=5, inner_nc=128, num_additional_ids=0, smaller=False, num_masks=0): super(ConvFeatureExtractionModel, self).__init__() self.encoder = self.generate_encoder_layers(output_size=inner_nc, num_filters=num_additional_i...
class TeacherStudentKLLoss(BaseClassificationDistillationLoss): def teacher_student_loss(teacher_logits, student_logits, temp): teacher_dist = Categorical(logits=(teacher_logits / temp)) student_dist = Categorical(logits=(student_logits / temp)) return kl_divergence(teacher_dist, student_dis...
def parse_org_table(table_lines): table_lines.pop(1) table_list = [[b.strip() for b in a[1:(- 2)].split('|')] for a in table_lines] column_list = table_list.pop(0) table_data = [] for param in table_list: param_dict = {} for (column, value) in zip(column_list, param): par...
def compute_norm(x, axis, keepdims): return (tf.math.reduce_sum((x ** 2), axis=axis, keepdims=keepdims) ** 0.5)
class GaussianDiffusion(nn.Module): def __init__(self, model, *, image_size, timesteps=1000, sampling_timesteps=None, loss_type='l1', objective='pred_noise', beta_schedule='cosine', p2_loss_weight_gamma=0.0, p2_loss_weight_k=1, ddim_sampling_eta=1.0): super().__init__() self.model = model se...
class ABReLU_VGG(nn.Module): def __init__(self, vgg_name): super(ABReLU_VGG, self).__init__() self.features = self._make_layers(cfg[vgg_name]) self.classifier = nn.Linear(512, 100) def forward(self, x): out = self.features(x) out = out.view(out.size(0), (- 1)) out...
class _OpLinearTerm(): def from_dim(cls, dim: Dim) -> _OpLinearTerm: res = cls.zero() res.extend_add_sub_(dim, kind='add', right=True) return res def zero(cls) -> _OpLinearTerm: return _OpLinearTerm([]) def __init__(self, terms: List[_OpMultTerm]): self.terms = terms ...
def _map_module(root: T.nn.Module, func: Callable[([T.nn.Module, str], T.nn.Module)], patt: Pattern, path: str) -> T.nn.Module: for (name, child) in root.named_children(): node = _map_module(child, func, patt, f'{path}/{name}') if (node != child): setattr(root, name, node) if patt.ma...
class ADULT(data.Dataset): def __init__(self, root='data/adult', split='train', sensible_attribute='gender', **kwargs): assert (split in ['train', 'val', 'test']) path = os.path.join(root, 'adult.csv') (x, y, s) = load_dataset(path, sensible_attribute) x = torch.from_numpy(x).float()...
def create_pipeline_configuration(DEBUG=False, batch_size=1): config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (Dropout, Softmax, Tanh, Embedding, Linear, LayerNorm), 'model_inputs': {'attention_mask': {'shape': torch.Size([1, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'input_ids': {...
class UpSampling(nn.Module): def __init__(self, c1, c2, c3): super(UpSampling, self).__init__() self.conv1 = Conv(c1, c3, 1, 1) self.upsampling = nn.UpsamplingNearest2d(scale_factor=2) self.conv2 = Conv(c2, c3, 1, 1) def forward(self, x, y): x = self.conv1(x) y = ...
def test_angles_4(): resi = ['RG_69_0', 'RU_37_0'] angles = ['gamma', 'alpha'] (angles_b, rr) = bb.backbone_angles(fname1, topology=fname, residues=resi, angles=angles) stri = '' for p in range(angles_b.shape[0]): for k in range(angles_b.shape[2]): stri += (' %10.4f %10.4f ' % (a...
class ResNet(nn.Module): def __init__(self, dataset, depth, num_classes, norm_type='batch', size=(- 1), nch=3): super(ResNet, self).__init__() self.dataset = dataset self.norm_type = norm_type if (self.dataset.startswith('cifar') or ((0 < size) and (size <= 64))): self.ne...
def check_var_expect(distfn, arg, m, v, msg): dist_looser_tolerances = {'rv_histogram_instance', 'ksone'} kwargs = ({'rtol': 5e-06} if (msg in dist_looser_tolerances) else {}) if np.isfinite(v): m2 = distfn.expect((lambda x: (x * x)), arg) npt.assert_allclose(m2, (v + (m * m)), **kwargs)
def generating_function_of_integral_points(polyhedron, split=False, result_as_tuple=None, name=None, names=None, **kwds): import logging logger = logging.getLogger(__name__) from sage.combinat.permutation import Permutations from sage.geometry.polyhedron.constructor import Polyhedron from sage.rings...
def _train(config): data_filter = get_squad_data_filter(config) train_data = read_data(config, 'train', config.load, data_filter=data_filter) dev_data = read_data(config, config.dev_name, True, data_filter=None) update_config(config, [train_data, dev_data]) _config_debug(config) word2vec_dict = ...