seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf hard_negative_loss, hard_negative_active_loss, hard_negative_summaries = ( compute_loss_and_create_summaries(use_semi_hard=False)) (semi_hard_negative_loss, semi_hard_negative_active_loss, semi_hard_negative_summaries) = ( compute_loss_and_create_summaries(use_semi_hard=True)) summaries = { 'triplet_loss/Margin': tf.constant(margin), 'triplet_loss/Anchor/Positive/Distance/Mean': tf.math.reduce_mean(anchor_positive_distances), 'triplet_mining/Anchor/Positive/Distance/Mean': tf.math.reduce_mean(anchor_positive_mining_distances), } if summarize_percentiles: summaries.update({ 'triplet_loss/Anchor/Positive/Distance/Median': tfp.stats.percentile(anchor_positive_distances, q=50), 'triplet_mining/Anchor/Positive/Distance/Median': tfp.stats.percentile(anchor_positive_mining_distances, q=50), }) summaries.update(hard_negative_summaries) summaries.update(semi_hard_negative_summaries) if use_semi_hard:
tensorflow.math.reduce_mean
400
import tensorflow as tf sync_warmup_vfn = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(warmup_vfn.parameters(), vfn.parameters())]) model = DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes) lazy_model = DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes) warmup_model = DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes) sync_warmup_model = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(warmup_model.parameters(), model.parameters())]) shadow_models = [DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes) for n in range(FLAGS.warmup.n_shadow_models)] sync_model_from_lazymodel = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(model.parameters(), lazy_model.parameters())]) sync_model_to_lazymodel = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(lazy_model.parameters(), model.parameters())])
tensorflow.assign
401
from tensorflow.python.framework import ops return math_ops.select( math_ops.greater(true_positives + false_negatives, 0), math_ops.div(true_positives, true_positives + false_negatives), 0, name) recall = compute_recall(true_positives, false_negatives, 'value') with ops.control_dependencies([true_positives_update_op, false_negatives_update_op]): update_op = compute_recall(true_positives, false_negatives, 'update_op') if metrics_collections: ops.add_to_collections(metrics_collections, recall)
tensorflow.python.framework.ops.control_dependencies
402
import tensorflow as tf with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state('./model_pretrain') if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path): print("loading checkpoint...")
tensorflow.train.checkpoint_exists
403
import tensorflow as tf cast0 = tf.dtypes.as_string(add if not swap else sub, name="TOSTR0") else: cast0 = tf.cast(add if not swap else sub, tf_output0_dtype, "CAST0") if tf_output1_dtype == tf.string: cast1 = tf.dtypes.as_string(sub if not swap else add, name="TOSTR1") else: cast1 = tf.cast(sub if not swap else add, tf_output1_dtype, "CAST1") out0 = tf.identity(cast0, "OUTPUT0")
tensorflow.dtypes.as_string
404
import tensorflow as tf def __init__(self, include_mask=False): self._include_mask = include_mask self._keys_to_features = { 'image/encoded': tf.io.FixedLenFeature((), tf.string), 'image/source_id': tf.io.FixedLenFeature((), tf.string), 'image/height': tf.io.FixedLenFeature((), tf.int64), 'image/width': tf.io.FixedLenFeature((), tf.int64), 'image/object/bbox/xmin': tf.io.VarLenFeature(tf.float32), 'image/object/bbox/xmax': tf.io.VarLenFeature(tf.float32), 'image/object/bbox/ymin': tf.io.VarLenFeature(tf.float32), 'image/object/bbox/ymax': tf.io.VarLenFeature(tf.float32), 'image/object/class/label':
tensorflow.io.FixedLenFeature
405
import tensorflow as tf correct = tf.cast(correct, tf.float32) accuracy = tf.reduce_mean(correct)*100.0 tf.summary.scalar(scope+'accuracy',accuracy) return accuracy def num_correct_prediction(logits, labels): ''' Evaluate the quality of the logits at predicting the label ''' correct = tf.equal(tf.arg_max(logits,1), tf.arg_max(labels,1)) correct = tf.cast(correct, tf.int32) n_correct = tf.reduce_sum(correct) return n_correct def optimize(loss, learning_rate, global_step): ''' Optimization, use Gradient Descent as default '''
tensorflow.arg_max
406
import tensorflow as tf facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag) d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts)) output = d_layer_2_all return output
tensorflow.layers.dense
407
import tensorflow as tf :param padding: [string] Convolution padding method, `VALID` or `SAME`. """ assert len(mask.get_shape()) in [3, 4], 'Mask shape must be 3D or 4D.' if len(mask.get_shape()) == 3: mask_ = tf.expand_dims(mask, 3) elif len(mask.get_shape()) == 4: mask_ = mask assert mask.get_shape()[-1] == 1, '4D mask last dimension must be 1.' ksize = [int(ss) for ss in w.get_shape()] psize = [1, ksize[0], ksize[1], 1] mask_ = tf.nn.max_pool(mask_, psize, strides, padding) return tf.nn.conv2d(x, w, strides, padding) * mask_
tensorflow.nn.max_pool
408
import tensorflow as tf N, PL, QL, CL, d, dc, nh = config.batch_size if not self.demo else config.batch_size, self.c_maxlen, self.q_maxlen, config.char_limit, config.hidden, config.char_dim, config.num_heads with tf.variable_scope("Input_Embedding_Layer"): ch_emb = tf.reshape(tf.nn.embedding_lookup( self.char_mat, self.ch), [N * PL, CL, dc]) qh_emb = tf.reshape(tf.nn.embedding_lookup( self.char_mat, self.qh), [N * QL, CL, dc]) ch_emb = tf.nn.dropout(ch_emb, 1.0 - 0.5 * self.dropout) qh_emb = tf.nn.dropout(qh_emb, 1.0 - 0.5 * self.dropout)
tensorflow.nn.embedding_lookup
409
import tensorflow as tf dec_inp_dict2 = {} dec_inp_dict2["0"] = [ tf.constant(0, tf.int32, shape=[2]) for _ in range(3)] dec_inp_dict2["1"] = [ tf.constant(0, tf.int32, shape=[2]) for _ in range(4)] with tf.variable_scope("other"): outputs_dict3, _ = tf.nn.seq2seq.one2many_rnn_seq2seq( enc_inp, dec_inp_dict2, cell, 2, dec_symbols_dict, embedding_size=2, feed_previous=tf.constant(True)) sess.run([tf.global_variables_initializer()])
tensorflow.variable_scope
410
import tensorflow as tf samples_object = centernet_utils.transform_pointcloud( tf.reshape(samples_world, [1, 1, -1, 3]), tf.reshape(poses[2][i], [1, 1, 3]), tf.reshape(poses[0][i], [1, 1, 3, 3]), tf.reshape(poses[1][i], [1, 1, 3]), inverse=True) * 2.0 samples_object = \ (samples_object * (29.0/32.0) / 2.0 + 0.5) * 32.0 - 0.5 samples = tf.squeeze(samples_object) interpolated = trilinear.interpolate(sdf, samples) sdf_values += tf.math.sign(tf.nn.relu(interpolated + self.tol)) status2 = False if status2: a = 2
tensorflow.squeeze
411
import tensorflow as tf params = [self.w0, self.b0, self.w1, self.b1] grads = [tf.cast(grad, tf.float32) for grad in grads] with tf.name_scope('update'): update_op = tf.group(*[ param.assign(param - grad * self.LEARNING_RATE) for param, grad in zip(params, grads) ]) # return update_op with tf.name_scope('validate'): x, y = self._build_data_pipeline() y_hat, loss = self._build_validation_model(x, y) with tf.control_dependencies([update_op]): return tf.print('expect', loss, y, y_hat, summarize=50) class DataOwner:
tensorflow.name_scope
412
import tensorflow as tf output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") with tf.gfile.GFile(output_predict_file, "w") as writer: num_written_lines = 0 tf.logging.info("***** Predict results *****") for (i, prediction) in enumerate(result): probabilities = prediction["probabilities"]
tensorflow.logging.info
413
import tensorflow as tf self._dim = None self._p = p def _compute(self, x, y): self._dim = x._rank() kernel = np.zeros((tf.size(x), tf.size(y))) for l in tf.range(start=0, limit=tf.size(x), delta=1, dtype=None, name='l_range'): for m in tf.range(start=0, limit=tf.size(y), delta=1, dtype=None, name='m_range'): vx = tf.contrib.lookup.MutableHashTable(key_dtype=tf.string, value_dtype=tf.int64, default_value=-1) vz = tf.contrib.lookup.MutableHashTable(key_dtype=tf.string, value_dtype=tf.int64, default_value=-1) vx_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1)) vz_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1))
tensorflow.contrib.lookup.MutableHashTable
414
import tensorflow as tf # session info sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = False self.sess = tf.Session(config=sess_config) self._build_graph() # save info self.saver = tf.train.Saver() # initialize the model self.sess.run(tf.global_variables_initializer()) def _build_graph(self): """ Builds the computation graph with Tensorflow
tensorflow.train.Saver
415
import tensorflow as tf time_steps = tf.shape(hidden)[1] if encoder.attn_keep_prob is not None: state_noise_shape = [1, tf.shape(state)[1]] if encoder.pervasive_dropout else None state = tf.nn.dropout(state, keep_prob=encoder.attn_keep_prob, noise_shape=state_noise_shape) hidden_noise_shape = [1, 1, tf.shape(hidden)[2]] if encoder.pervasive_dropout else None
tensorflow.shape
416
import tensorflow as tf z1 = z0 + 1 x0_clip = tf.clip_by_value(x0, zero, max_x) x1_clip = tf.clip_by_value(x1, zero, max_x) y0_clip = tf.clip_by_value(y0, zero, max_y) y1_clip = tf.clip_by_value(y1, zero, max_y) z0_clip = tf.clip_by_value(z0, zero, max_z) z1_clip = tf.clip_by_value(z1, zero, max_z) dim3 = width dim2 = width * height dim1 = width * height * depth base = _repeat( tf.range(num_batch) * dim1, out_depth * out_height * out_width) base_z0_y0 = base + z0_clip * dim2 + y0_clip * dim3
tensorflow.clip_by_value
417
import tensorflow as tf context_size = initial_context.shape[1].value def get_logits(state, ids, time): # for beam-search decoding with tf.variable_scope('decoder_{}'.format(decoder.name)): state, context, pos, prev_weights = tf.split(state, [cell_state_size, context_size, 1, -1], axis=1) input_ = embed(ids) pos = tf.squeeze(pos, axis=1) pos = tf.cond(tf.equal(time, 0), lambda: pos, lambda: update_pos(pos, ids, encoder_input_length[align_encoder_id])) if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state: output = state else: # Output is always the right-most part of the state (even with multi-layer RNNs)
tensorflow.equal
418
import tensorflow as tf c = c h = h z = tf.matmul(x, wx) + tf.matmul(h, wh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
tensorflow.matmul
419
import tensorflow as tf .inference_with_all_cores) else: save_checkpoints_steps = (FLAGS.save_checkpoints_steps or FLAGS.iterations_per_loop) run_config = tf.estimator.RunConfig( model_dir=FLAGS.model_dir, save_checkpoints_steps=save_checkpoints_steps) image_classifier = tf.estimator.Estimator( model_fn=model.model_fn, config=run_config, params=estimator_parmas) # Input pipelines are slightly different (with regards to shuffling and # preprocessing) between training and evaluation.
tensorflow.estimator.Estimator
420
import tensorflow as tf self.q = max(q,1) # Initialize NN self.weights, self.biases = self.initialize_NN(layers) # Initialize parameters self.lambda_1 = tf.Variable([0.0], dtype=tf.float32) self.lambda_2 = tf.Variable([-6.0], dtype=tf.float32) # Load IRK weights tmp = np.float32(np.loadtxt('../../Utilities/IRK_weights/Butcher_IRK%d.txt' % (q), ndmin = 2)) weights = np.reshape(tmp[0:q**2+q], (q+1,q))
tensorflow.Variable
421
from tensorflow.python.framework import ops @ops.RegisterShape("Any") @ops.RegisterShape("Max") @ops.RegisterShape("Mean") @ops.RegisterShape("Min") @ops.RegisterShape("Prod") @ops.RegisterShape("Sum") def _ReductionShape(op): """Common shape function for reduction ops."""
tensorflow.python.framework.ops.RegisterShape
422
import tensorflow.contrib as contrib "scale": True, "updates_collections": None } ): fc1_1 = contrib.layers.fully_connected(X, 32, scope="fc1_1") fc1_2 = contrib.layers.fully_connected(X, 32, scope="fc1_2") if cross_stitch_enabled: with tf.variable_scope("cross_stitch_1"): stitch1_1, stitch1_2 = apply_cross_stitch(fc1_1, fc1_2) else: stitch1_1, stitch1_2 = fc1_1, fc1_2 fc2_1 = contrib.layers.fully_connected(stitch1_1, 32, scope="fc2_1") fc2_2 = contrib.layers.fully_connected(stitch1_2, 32, scope="fc2_2") if cross_stitch_enabled: with tf.variable_scope("cross_stitch_2"): stitch2_1, stitch2_2 = apply_cross_stitch(fc2_1, fc2_2) else: stitch2_1, stitch2_2 = fc2_1, fc2_2 dropout2_1 = contrib.layers.dropout(stitch2_1, keep_prob=keep_prob, is_training=is_training, scope="dropout2_1") dropout2_2 = contrib.layers.dropout(stitch2_2, keep_prob=keep_prob, is_training=is_training, scope="dropout2_2")
tensorflow.contrib.layers.fully_connected
423
import tensorflow as tf b_init_args = {} self.inputs = prev_layer.outputs if self.inputs.get_shape().ndims != 2: raise Exception("The input dimension must be rank 2") n_in = int(self.inputs.get_shape()[-1]) self.n_units = n_units with tf.variable_scope(name): W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) b = tf.get_variable(name='b', shape=(n_units), initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) # self.outputs = act(tf.matmul(self.inputs, W) + b) LayersConfig.set_keep[name] = tf.placeholder(tf.float32) W_dropcon = tf.nn.dropout(W, LayersConfig.set_keep[name]) self.outputs = act(tf.matmul(self.inputs, W_dropcon) + b) # self.all_layers = list(layer.all_layers) # self.all_params = list(layer.all_params) # self.all_drop = dict(layer.all_drop) self.all_drop.update({LayersConfig.set_keep[name]: keep}) self.all_layers.append(self.outputs) self.all_params.extend([W, b])
tensorflow.placeholder
424
import tensorflow as tf # tf.divide(tf.cast(n_neg_to_select, tf.float32), n_negtives), # tf.zeros_like(tf.cast(n_neg_to_select, tf.float32)), # name='rand_select_negtive') # include both selected negtive and all positive examples final_mask = tf.stop_gradient(tf.logical_or(tf.logical_and(negtive_mask, selected_neg_mask), positive_mask)) total_examples = tf.reduce_sum(tf.cast(final_mask, tf.float32)) # add mask for glabels and cls_pred here glabels = tf.boolean_mask(tf.clip_by_value(glabels, 0, FLAGS.num_classes), tf.stop_gradient(final_mask)) cls_pred = tf.boolean_mask(cls_pred, tf.stop_gradient(final_mask)) location_pred = tf.boolean_mask(location_pred, tf.stop_gradient(positive_mask)) gtargets = tf.boolean_mask(gtargets, tf.stop_gradient(positive_mask)) predictions = { 'classes': tf.argmax(cls_pred, axis=-1), 'probabilities': tf.reduce_max(tf.nn.softmax(cls_pred, name='softmax_tensor'), axis=-1), 'bboxes_predict': tf.reshape(bboxes_pred, [-1, 4]) }
tensorflow.stop_gradient
425
import tensorflow as tf l1, l2 = tf.nn.sigmoid(hyp_params[0]), tf.exp(hyp_params[1]) epsilon = tf.sinh(epsilon*l2)/tf.cosh(epsilon*l2)**l1/l2 # Compute A_{h+1} A = tf.tile(alpha_mean+epsilon*alpha_std, [1, tf.shape(X)[0], 1, 1]) # Compute z_{h}A_{h+1} Z1 = tf.matmul(Z, A[:,:,:n_basis//2,:])/tf.sqrt(n_basis*.5) Z2 = tf.matmul(Z, A[:,:,n_basis//2:,:])/tf.sqrt(n_basis*.5) # Compute u_{h+1} and v_{h+1} U, V = tf.cos(Z1)+tf.cos(Z2), tf.sin(Z1)+tf.sin(Z2) Z = tf.concat([U, V], 3)/tf.sqrt(n_out*1.) KL += tf.reduce_mean(alpha_std**2+alpha_mean**2-2*alpha_logstd-1)/2. # Output layer else: F = tf.squeeze(tf.layers.dense(Z, n_out), [2]) return F, KL
tensorflow.reduce_mean
426
import tensorflow as tf norm_grads_q, norm_grads_policy, avg_norm_grads_f = None, None, None avg_norm_k, avg_norm_g, avg_norm_k_dot_g, avg_norm_adj = None, None, None, None if self.trust_region: # [n_envs * n_steps, n_act] grad = tf.gradients(- (loss_policy - self.ent_coef * entropy) * self.n_steps * self.n_envs, phi_i) # [n_envs * n_steps, n_act] # Directly computed gradient of KL divergence wrt f kl_grad = - f_polyak_i / (f_i_ + eps) k_dot_g = tf.reduce_sum(kl_grad * grad, axis=-1) adj = tf.maximum(0.0, (tf.reduce_sum(kl_grad * grad, axis=-1) - self.delta) / ( tf.reduce_sum(tf.square(kl_grad), axis=-1) + eps)) # [n_envs * n_steps] # Calculate stats (before doing adjustment) for logging. avg_norm_k = avg_norm(kl_grad) avg_norm_g = avg_norm(grad) avg_norm_k_dot_g = tf.reduce_mean(tf.abs(k_dot_g)) avg_norm_adj = tf.reduce_mean(tf.abs(adj))
tensorflow.reduce_sum
427
import tensorflow as tf self.assertEqual((2, 2), res[0].h.shape) def testEmbeddingRNNSeq2Seq(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)] dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)] cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) dec, mem = tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res))
tensorflow.nn.rnn_cell.BasicLSTMCell
428
import tensorflow as tf trans_z = linear(tf.nn.dropout(trans_h0, keep_prob), featsize, 'trans_z') self.translated_z = trans_z s_h, s_w = self.output_height, self.output_width s_h0, s_h1, s_h2, s_h3 = \ int(s_h/ns0), int(s_h/ns0/ns1), int(s_h/ns0/ns1/ns2), int(s_h/ns0/ns1/ns2/ns3) s_w0, s_w1, s_w2, s_w3 = \ int(s_w/ns0), int(s_w/ns0/ns1), int(s_w/ns0/ns1/ns2), int(s_w/ns0/ns1/ns2/ns3) def decode(z, skip_h3, skip_h2, skip_h1, skip_h0): z_ = lrelu(linear(tf.nn.dropout(z, keep_prob), nf3*s_h3*s_w3, 'd_h0_lin')) h0 = tf.nn.dropout(tf.reshape(z_, [-1, s_h3, s_w3, nf3]), keep_prob) import IPython IPython.embed() h1 = lrelu(deconv2d(tf.concat([h0, skip_h3], 3), [self.batch_size, s_h2, s_w2, nf2], name='d_h1', d_h=ns3, d_w=ns3)) h2 = lrelu(deconv2d(tf.concat([h1, skip_h2], 3), [self.batch_size, s_h1, s_w1, nf1], name='d_h2', d_h=ns2, d_w=ns2)) h3 = lrelu(deconv2d(tf.concat([h2, skip_h1], 3), [self.batch_size, s_h0, s_w0, nf0], name='d_h3', d_h=ns1, d_w=ns1)) print(h3.get_shape()) h4 = deconv2d(tf.concat([h3, skip_h0], 3), [self.batch_size, s_h, s_w, self.c_dim], name='d_h4', d_h=ns0, d_w=ns0) return h4 with tf.variable_scope("deconv") as scope: output_h4 = decode(trans_z, tgtctx_h3, tgtctx_h2, tgtctx_h1, tgtctx_h0) scope.reuse_variables() truthoutput_h4 = decode(tgtimg_z, tgtctx_h3, tgtctx_h2, tgtctx_h1, tgtctx_h0)
tensorflow.concat
429
from tensorflow.python.framework import ops hessians[a][b][c], where [a] runs over the epoch. For fixed [a], hessians stores the value of the hessian matrix evaluated at the critical points; this is a nxn matrix indexed by [b][c]. The size of the matrix is predetermined by the number of parameters in the network. residuals -- list containing the value of the residuals at predefinite training intervals. As we are only interested in the sign of the residuals, we define it as the difference between the predicted output \hat{y} (an in the code) and the training labels y (Y in the code). """ ops.reset_default_graph() # reset the computational graph tf.set_random_seed(1) # to keep consistent results #----------training/test set features------------------------- X_tr = np.transpose(X_train) # the transpose is taken to adapt to TF convenntion. This is also f , m = X_tr.shape # f: number of features, m: number of training examples X_tst = np.transpose(X_test) # the transpose is taken to adapt to TF convenntion. This is also
tensorflow.python.framework.ops.reset_default_graph
430
import tensorflow as tf obs_shape = common_layers.shape_list(observations) (frame_height, frame_width) = obs_shape[2:4] # TODO(afrozm): We have these dummy problems mainly for hparams, so cleanup # when possible and do this properly. if hparams.policy_problem_name == "dummy_policy_problem_ttt": tf.logging.info("Using DummyPolicyProblemTTT for the policy.") policy_problem = tic_tac_toe_env.DummyPolicyProblemTTT() else: tf.logging.info("Using DummyPolicyProblem for the policy.") policy_problem = DummyPolicyProblem(action_space, frame_height, frame_width) trainer_lib.add_problem_hparams(hparams, policy_problem) hparams.force_full_predict = True model = registry.model(hparams.policy_network)( hparams, tf.estimator.ModeKeys.TRAIN ) try: num_target_frames = hparams.video_num_target_frames
tensorflow.logging.info
431
import tensorflow as tf net = tf.layers.dense(net, 20, activation=tf.nn.relu, kernel_initializer=init_w, bias_initializer=init_b, name='l2', trainable=trainable) with tf.variable_scope('q'): q = tf.layers.dense(net, 1, kernel_initializer=init_w, bias_initializer=init_b, trainable=trainable) # Q(s,a) return q def learn(self, s, a, r, s_, ISW): _, abs_td = self.sess.run([self.train_op, self.abs_td], feed_dict={S: s, self.a: a, R: r, S_: s_, self.ISWeights: ISW}) if self.t_replace_counter % self.t_replace_iter == 0: self.sess.run([tf.assign(t, e) for t, e in zip(self.t_params, self.e_params)]) self.t_replace_counter += 1 return abs_td class SumTree(object): """ This SumTree code is modified version and the original code is from: https://github.com/jaara/AI-blog/blob/master/SumTree.py
tensorflow.assign
432
import tensorflow as tf weights = np.random.normal(scale=0.01, size=shape).astype('f') return safe_get(name, list(shape), initializer=tf.constant_initializer(weights), dtype=tf.float32) def init_bias(shape, name=None): return safe_get(name, initializer=tf.zeros(shape, dtype=tf.float32)) def init_fc_weights_xavier(shape, name=None): fc_initializer = tf.contrib.layers.xavier_initializer(dtype=tf.float32) return safe_get(name, list(shape), initializer=fc_initializer, dtype=tf.float32) def init_conv_weights_xavier(shape, name=None): conv_initializer = tf.contrib.layers.xavier_initializer_conv2d(dtype=tf.float32) return safe_get(name, list(shape), initializer=conv_initializer, dtype=tf.float32)
tensorflow.contrib.layers.xavier_initializer
433
import tensorflow as tf - Output class tensor of the model. """ # Weights and biases for output softmax layer. out_weights = tf.Variable( tf.random_normal([self.num_units, self.n_classes])) out_bias = tf.Variable(tf.random_normal([self.n_classes])) # input image placeholder
tensorflow.random_normal
434
import tensorflow as tf query = tf.matmul(query, self.v_attn) logits = tf.reshape(query, [1, layer_id]) if self.temperature is not None: logits /= self.temperature if self.tanh_constant is not None: logits = self.tanh_constant * tf.tanh(logits) index = tf.multinomial(logits, 1) index = tf.to_int32(index) index = tf.reshape(index, [1]) arc_seq = arc_seq.write(start_id + 2 * i, index) curr_log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=index) log_prob += curr_log_prob curr_ent = tf.stop_gradient(tf.nn.softmax_cross_entropy_with_logits( logits=logits, labels=tf.nn.softmax(logits))) entropy += curr_ent
tensorflow.reshape
435
import tensorflow as tf cast0 = tf.dtypes.as_string(add if not swap else sub, name="TOSTR0") else: cast0 = tf.cast(add if not swap else sub, tf_output0_dtype, "CAST0") if tf_output1_dtype == tf.string: cast1 = tf.dtypes.as_string(sub if not swap else add, name="TOSTR1") else: cast1 = tf.cast(sub if not swap else add, tf_output1_dtype, "CAST1") out0 = tf.identity(cast0, "TENSOR_OUTPUT0") out1 = tf.identity(cast1, "TENSOR_OUTPUT1") # Use a different model name for the non-batching variant model_name = tu.get_model_name( "savedmodel_nobatch" if max_batch == 0 else "savedmodel", input_dtype, output0_dtype, output1_dtype) model_version_dir = models_dir + "/" + model_name + "/" + str(model_version) try: os.makedirs(model_version_dir)
tensorflow.identity
436
import tensorflow as tf log_lambdas = self.parameterizer(x1) x2, ildj = exponentiate(z2, log_lambdas, inverse=tf.constant(True))
tensorflow.constant
437
import tensorflow as tf epoch = features["epoch"] + tf.zeros([x_shape[0]], dtype=tf.int32) # Randomly set epoch to 0 in some cases as that's the inference value. rand = tf.random.uniform([x_shape[0]]) epoch = tf.where(rand < 0.1, tf.zeros_like(epoch), epoch) # Embed the epoch number. emb_epoch = common_layers.embedding(epoch, 32, 32) # [batch, 32] flat_x = tf.concat([flat_x, emb_epoch], axis=1) flat_x = tf.layers.dropout(flat_x, rate=dropout) x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu) logits = tf.layers.dense( x, self.hparams.problem.num_actions, name="dense2"
tensorflow.concat
438
import tensorflow as tf pi_dropout_mask_phs = pi_dropout_mask_generator.generate_dropout_mask_placeholders() pi, pi_reg = mlp_variational(x, pi_dropout_mask_phs, list(hidden_sizes) + [act_dim], activation, output_activation, dropout_rate) pi = act_limit * pi with tf.variable_scope('q1'): q1_in_ph = tf.concat([x, a], axis=-1) q1_in_dim = q1_in_ph.shape.as_list()[1] q1_dropout_mask_generator = DropoutMaskGenerator(q1_in_dim, hidden_sizes, model_prob=1.0 - dropout_rate) q1_dropout_mask_phs = q1_dropout_mask_generator.generate_dropout_mask_placeholders() q1, q1_reg = mlp_variational(q1_in_ph, q1_dropout_mask_phs, list(hidden_sizes) + [1], activation, None, dropout_rate) q1 = tf.squeeze(q1, axis=2) with tf.variable_scope('q1', reuse=True): q1_pi, q1_pi_reg = mlp_variational(tf.concat([x, pi[0]], axis=-1), q1_dropout_mask_phs, list(hidden_sizes) + [1], activation, None, dropout_rate) q1_pi = tf.squeeze(q1_pi, axis=2) with tf.variable_scope('q2'): q2_in_ph = tf.concat([x, a], axis=-1) q2_in_dim = q2_in_ph.shape.as_list()[1] q2_dropout_mask_generator = DropoutMaskGenerator(q2_in_dim, hidden_sizes, model_prob=1.0 - dropout_rate) q2_dropout_mask_phs = q2_dropout_mask_generator.generate_dropout_mask_placeholders() q2, q2_reg = mlp_variational(q2_in_ph, q2_dropout_mask_phs, list(hidden_sizes) + [1], activation, None, dropout_rate) q2 = tf.squeeze(q2, axis=2)
tensorflow.variable_scope
439
import tensorflow as tf n_l1 = 1000 n_l2 = 1000 z_dim = 10 batch_size = 100 n_epochs = 1000 learning_rate = 0.001 beta1 = 0.9 results_path = './Results/Semi_Supervised' n_labels = 10 n_labeled = 1000 # Placeholders for input data and the targets x_input = tf.placeholder(dtype=tf.float32, shape=[batch_size, input_dim], name='Input') x_input_l = tf.placeholder(dtype=tf.float32, shape=[batch_size, input_dim], name='Labeled_Input') y_input = tf.placeholder(dtype=tf.float32, shape=[batch_size, n_labels], name='Labels') x_target = tf.placeholder(dtype=tf.float32, shape=[batch_size, input_dim], name='Target') real_distribution = tf.placeholder(dtype=tf.float32, shape=[batch_size, z_dim], name='Real_distribution') categorial_distribution = tf.placeholder(dtype=tf.float32, shape=[batch_size, n_labels], name='Categorical_distribution') manual_decoder_input = tf.placeholder(dtype=tf.float32, shape=[1, z_dim + n_labels], name='Decoder_input') def form_results(): """ Forms folders for each run to store the tensorboard files, saved models and the log files. :return: three string pointing to tensorboard, saved models and log paths respectively. """ folder_name = "/{0}_{1}_{2}_{3}_{4}_{5}_Semi_Supervised". \ format(datetime.datetime.now(), z_dim, learning_rate, batch_size, n_epochs, beta1) tensorboard_path = results_path + folder_name + '/Tensorboard'
tensorflow.placeholder
440
import tensorflow as tf clipped_grads_and_vars = self._clip_gradients(self.grads_and_vars, self._grad_clipping_tuple) # compute norms in case they need to be logged self.gradient_norms = [tf.norm(g) + NUMTOL for (g, v) in clipped_grads_and_vars] self.weight_norms = [tf.norm(v) + NUMTOL for (g, v) in clipped_grads_and_vars] # check that gradients are finite grads = [tf.check_numerics(g, "grads is not finite") for (g, v) in clipped_grads_and_vars] variables = [tf.check_numerics(v, "grads is not finite") for (g, v) in clipped_grads_and_vars]
tensorflow.norm
441
import tensorflow as tf self.assertEqual(pdf.shape, (6,)) self.assertAllClose( self.evaluate(pdf), self._scipy_pareto(concentration_v, scale_v).pdf(x)) def testParetoLogPdfValidateArgs(self): batch_size = 3 scale = tf.constant([2., 3., 4.]) concentration = tf.constant([2.] * batch_size) pareto = tfd.Pareto(concentration, scale, validate_args=True) with self.assertRaisesOpError("not in the support"): x = tf.placeholder_with_default(input=[2., 3., 3.], shape=[3]) log_prob = pareto.log_prob(x)
tensorflow.constant
442
from tensorflow.python.framework import ops # avoid division by zero epsilon = 1e-7 def compute_recall(name): recall = math_ops.div(true_positives, epsilon + true_positives + false_negatives, name='recall_' + name) return recall recall = compute_recall('value') with ops.control_dependencies([true_positives_compute_op, false_negatives_compute_op]): update_op = compute_recall('update_op') if metrics_collections: ops.add_to_collections(metrics_collections, recall) if updates_collections: ops.add_to_collections(updates_collections, update_op)
tensorflow.python.framework.ops.control_dependencies
443
import tensorflow as tf input_dtype, output0_dtype, output1_dtype, swap=False): if not tu.validate_for_tf_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return tf_input_dtype = np_to_tf_dtype(input_dtype) tf_output0_dtype = np_to_tf_dtype(output0_dtype) tf_output1_dtype = np_to_tf_dtype(output1_dtype) # Create the model. If non-batching then don't include the batch # dimension. tf.reset_default_graph() if max_batch == 0: in0 = tf.placeholder(tf_input_dtype, tu.shape_to_tf_shape(input_shape), "INPUT0") in1 = tf.placeholder(tf_input_dtype, tu.shape_to_tf_shape(input_shape), "INPUT1") else: in0 = tf.placeholder(tf_input_dtype, [ None, ] + tu.shape_to_tf_shape(input_shape), "INPUT0") in1 = tf.placeholder(tf_input_dtype, [ None, ] + tu.shape_to_tf_shape(input_shape), "INPUT1") # If the input is a string, then convert each string to the
tensorflow.reset_default_graph
444
import tensorflow as tf average_grads.append(grad_and_var) return average_grads def train(): """Train CIFAR-10 for a number of steps.""" with tf.Graph().as_default(), tf.device('/cpu:0'): # Create a variable to count the number of train() calls. This equals the # number of batches processed * FLAGS.num_gpus. global_step = tf.get_variable( 'global_step', [], initializer=tf.constant_initializer(0), trainable=False)
tensorflow.Graph
445
import tensorflow as tf name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64), "is_real_example": tf.FixedLenFeature([], tf.int64), } def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example."""
tensorflow.FixedLenFeature
446
import tensorflow as tf w = tf.get_variable("w", wshape, initializer=ortho_init(init_scale)) b = tf.get_variable("b", bias_var_shape, initializer=tf.constant_initializer(0.0))
tensorflow.constant_initializer
447
from tensorflow.python.framework import ops next_size = _next_array_size(new_size) next_shape = array_ops.pack([next_size] + fixed_shape) new_value = array_ops.zeros(next_shape, dtype=values.dtype) old_value = array.value() assign_op = state_ops.assign(array, new_value, validate_shape=False) with ops.control_dependencies([assign_op]): copy_op = array[:size].assign(old_value[:size]) # return value needs to be the same dtype as no_op() for cond with ops.control_dependencies([copy_op]): return control_flow_ops.no_op()
tensorflow.python.framework.ops.control_dependencies
448
import tensorflow as tf Returns ------- A tf.Variable, filled with drawn samples. """ shape = tuple(map(int, shape)) if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e8) value = tf.random_normal_initializer( mean, scale, dtype=dtype, seed=seed)(shape) return tf.Variable(value, dtype=dtype, name=name) def max(x, axis=None, keepdims=False): """Maximum value in a tensor.
tensorflow.random_normal_initializer
449
import tensorflow as tf updates_collections=None, scope=scope, reuse=self.reuse, is_training=self.train) _, l3_h2 = self.hgru_ops( i0=i0, x=processed_l2_h2, h2=l3_h2, layer='h3', layer_idx=1) if self.batch_norm: with tf.variable_scope( 'l3_h2_bn', reuse=self.scope_reuse) as scope: l3_h2 = tf.contrib.layers.batch_norm( inputs=l3_h2, scale=True, center=True, fused=True, renorm=False, param_initializers=self.param_initializer, updates_collections=None, scope=scope, reuse=self.reuse, is_training=self.train) # l3-l2 feedback (FEEDBACK KERNEL is 2x channels) _, temp_l2_h2 = self.hgru_ops(
tensorflow.contrib.layers.batch_norm
450
import tensorflow as tf wall_time = (time.time() - start_time) / hparams.n_iters examples_per_sec = hparams.n_samples / wall_time self.report_benchmark( name="eager_train_%s%s_%d" % ("gpu" if tf.test.is_gpu_available() else "cpu", "_defun" if defun else "", sample_size), iters=hparams.n_iters, extras={"examples_per_sec": examples_per_sec}, wall_time=wall_time)
tensorflow.test.is_gpu_available
451
from tensorflow.python.framework import ops self._lr = learning_rate self._beta1 = beta1 self._beta2 = beta2 # Tensor versions of the constructor arguments, created in _prepare(). self._lr_t = None self._beta1_t = None self._beta2_t = None def _prepare(self): self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate") self._beta1_t = ops.convert_to_tensor(self._beta1, name="beta1") self._beta2_t = ops.convert_to_tensor(self._beta2, name="beta2") def _create_slots(self, var_list): # Create slots for the first and second moments. for v in var_list: self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name) self._zeros_slot(v, "g", self._name) def _apply_dense(self, grad, var): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
tensorflow.python.framework.ops.convert_to_tensor
452
import tensorflow as tf # Loss op and other outputs. The log(2.0) term corrects for # logloss not being an upper bound on the indicator function. weighted_loss = weights * losses_utils.weighted_surrogate_loss( labels, logits, surrogate_type=surrogate_type, positive_weights=1.0, negative_weights=lambdas) maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0 maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype) lambda_term = lambdas * target_rate * (1.0 - label_priors) * maybe_log2 loss = tf.reshape(weighted_loss - lambda_term, original_shape) other_outputs = { 'lambdas': lambdas_variable, 'label_priors': label_priors, 'true_positives_lower_bound': true_positives_lower_bound(labels, logits, weights, surrogate_type), 'false_positives_upper_bound': false_positives_upper_bound(labels, logits, weights, surrogate_type) }
tensorflow.reshape
453
import tensorflow as tf # pad_input_data_to_static_shape assumes that image is already concatenated # with additional channels. self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.image].shape.as_list(), [5, 6, 5]) self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.image_additional_channels] .shape.as_list(), [5, 6, 2]) def test_images_and_additional_channels_errors(self): input_tensor_dict = { fields.InputDataFields.image: tf.placeholder(tf.float32, [None, None, 3]), fields.InputDataFields.image_additional_channels: tf.placeholder(tf.float32, [None, None, 2]), fields.InputDataFields.original_image: tf.placeholder(tf.float32, [None, None, 3]), } with self.assertRaises(ValueError): _ = inputs.pad_input_data_to_static_shapes( tensor_dict=input_tensor_dict, max_num_boxes=3, num_classes=3, spatial_image_shape=[5, 6])
tensorflow.placeholder
454
from tensorflow.contrib.summary import summary_test_util dev_data = data.SnliData(fake_train_file, word2index) test_data = data.SnliData(fake_train_file, word2index) # 2. Create a fake config. config = _test_spinn_config( data.WORD_VECTOR_LEN, 4, logdir=os.path.join(self._temp_data_dir, "logdir")) # 3. Test training of a SPINN model. trainer = spinn.train_or_infer_spinn( embed, word2index, train_data, dev_data, test_data, config) # 4. Load train loss values from the summary files and verify that they # decrease with training. summary_file = glob.glob(os.path.join(config.logdir, "events.out.*"))[0] events = summary_test_util.events_from_file(summary_file) train_losses = [event.summary.value[0].simple_value for event in events if event.summary.value and event.summary.value[0].tag == "train/loss"] self.assertEqual(config.epochs, len(train_losses)) self.assertLess(train_losses[-1], train_losses[0]) # 5. Verify that checkpoints exist and contains all the expected variables. self.assertTrue(glob.glob(os.path.join(config.logdir, "ckpt*"))) ckpt_variable_names = [ item[0] for item in checkpoint_utils.list_variables(config.logdir)] self.assertIn("global_step", ckpt_variable_names) for v in trainer.variables: variable_name = v.name[:v.name.index(":")] if ":" in v.name else v.name self.assertIn(variable_name, ckpt_variable_names)
tensorflow.contrib.summary.summary_test_util.events_from_file
455
import tensorflow as tf elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): # predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) # ones=tf.get_variable('ones',shape=logits.shape,initializer=tf.ones_initializer) # zeros=tf.get_variable('zeros',shape=logits.shape,initializer=tf.zeros_initializer) predictions=tf.where(logits>=0,tf.ones(tf.shape(logits)),tf.zeros(tf.shape(logits))) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, }
tensorflow.metrics.accuracy
456
import tensorflow as tf with tf.Session(config=config) as sess: t6 = time.time() sv3 = sess.run(approx_scskconv) t5 = time.time() for i in range(0, num_trials): sess.run(approx_scskconv) t6 = time.time() ts = abs(t6 - t5) / max(num_trials,1) print("time approx sparse: ", ts) tf.reset_default_graph() time.sleep(1) if dense: td = 0 with tf.device("/gpu:0"): conv = nn_ops.conv3d(d1, d2, strides, padding) with tf.Session(config=config) as sess: t22 = time.time() expected = sess.run(conv) t11 = time.time() for i in range(0, num_trials): sess.run(conv) t22 = time.time() td = abs(t22 - t11) / max(num_trials,1) print("time dense gpu: ", td) tf.reset_default_graph() print("time ratio: ", ts / td) return [expected, sv3, ts, td]
tensorflow.device
457
import tensorflow as tf with tf.control_dependencies([train_op]), tf.name_scope('ema'): ema = tf.train.ExponentialMovingAverage(decay=MOVING_AVERAGE_DECAY, num_updates=global_step) train_op = ema.apply(tf.trainable_variables()) return tf.estimator.EstimatorSpec(mode, loss=total_loss, train_op=train_op) def add_weight_decay(weight_decay): """Add L2 regularization to all (or some) trainable kernel weights."""
tensorflow.estimator.EstimatorSpec
458
import tensorflow as tf log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) else: probabilities = logits logits = tf.squeeze(logits, [-1]) predictions = logits per_example_loss = tf.square(logits - labels) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, probabilities, logits, predictions)
tensorflow.squeeze
459
import tensorflow as tf y = tf.py_func(lambda x: x + 1, [x], [tf.float32]) z = tf.py_func(lambda x: x * 2, [x], [tf.float32])
tensorflow.py_func
460
import tensorflow as tf h_conv1 = self.conv2d('h_conv1', s, W_conv1, 4, b_conv1) # 构造第一个卷积层输出为conv1 h_pool1 = self.max_pool_2x2('h_pool1', h_conv1) h_conv2 = self.conv2d('h_conv2', h_pool1, W_conv2, 2, b_conv2) h_conv3 = self.conv2d('h_conv3', h_conv2, W_conv3, 1, b_conv3) h_conv3_flat = tf.reshape(h_conv3, [-1, 1600], 'h_conv3_flat') h_fc1 = tf.nn.relu(tf.add(tf.matmul(h_conv3_flat, W_fc1), b_fc1, 'h_fc1')) readout = tf.add(tf.matmul(h_fc1, W_fc2), b_fc2, 'h_fc2') return s, readout, h_fc1 def creat_optimizer(self,readout):
tensorflow.reshape
461
import tensorflow as tf if FLAGS.do_serve: def serving_input_fn(): with tf.variable_scope("foo"): feature_spec = { "input_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), "input_mask": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64),
tensorflow.variable_scope
462
import tensorflow as tf Returns: X -- placeholder for the data input, of shape [n_x, None] and dtype "float" Y -- placeholder for the input labels, of shape [n_y, None] and dtype "float" """ X = tf.placeholder(shape= [Nfeat, None], dtype= "float64" ) Y = tf.placeholder(shape= [Nlab, None], dtype= "float64" ) return X, Y
tensorflow.placeholder
463
import tensorflow as tf ''' def set_optimizer(self): with tf.variable_scope('optimizer'): self._optimizer, self._learning_rate = TFOptimizers.instantiate_optimizer(self, self.optimizer_tuple) def set_training_op(self):
tensorflow.variable_scope
464
import tensorflow as tf if __name__ == "__main__": tf.app.run()
tensorflow.app.run
465
import tensorflow as tf reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example( serialized_example, # Defaults are not specified since both keys are required. features={ 'image_raw': tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([], tf.int64), }) if FLAGS.contrast_norm == 'areafactor': image = tf.decode_raw(features['image_raw'], tf.float32)
tensorflow.FixedLenFeature
466
import tensorflow as tf if params['data_format'] == 'channels_first': cls_pred = tf.transpose(cls_pred, [0, 2, 3, 1])
tensorflow.transpose
467
import tensorflow as tf with tf.variable_scope(name): # Encoder definition o_c1 = self.general_conv2d(input_data, self.base_number_of_features, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_1') o_mp1 = tf.layers.max_pooling2d(o_c1, 2, 2, name = name + '_maxpooling_1') o_c2 = self.general_conv2d(o_mp1, self.base_number_of_features * 2, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_2') o_mp2 = tf.layers.max_pooling2d(o_c2, 2, 2, name = name + '_maxpooling_2')
tensorflow.layers.max_pooling2d
468
from tensorflow.python.ops import math_ops total = _create_local('total', shape=[]) count = _create_local('count', shape=[]) if weights is not None: weights = math_ops.to_float(weights) values = math_ops.mul(values, weights) num_values = math_ops.reduce_sum(_broadcast_weights(weights, values)) else: num_values = math_ops.to_float(array_ops.size(values)) total_compute_op = state_ops.assign_add(total, math_ops.reduce_sum(values))
tensorflow.python.ops.math_ops.mul
469
import tensorflow as tf def get_batch(image,label,batch_size,crop_size): #print(image.shape) #print(label.shape) images,labels=tf.train.shuffle_batch([image,label], batch_size=batch_size,num_threads=10,capacity=10000,min_after_dequeue=200) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) def get_test_batch(image,label,batch_size): images,labels=tf.train.batch([image,label],batch_size=batch_size) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) def get_valid_batch(image,label,batch_size): images,labels=tf.train.batch([image,label],batch_size=batch_size) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) class trainwork(object): def __init__(self): with tf.variable_scope('scop'):
tensorflow.reshape
470
import tensorflow as tf sliced_output = tf.unstack(output, axis=1) sliced_label = tf.unstack(labels, axis=1) sliced_propensity = tf.unstack(propensity_weights, axis=1) for i in range(len(sliced_output)): for j in range(i+1, len(sliced_output)): cur_label_weight = tf.math.sign(sliced_label[i] - sliced_label[j]) cur_propensity = sliced_propensity[i] * sliced_label[i] + sliced_propensity[j] * sliced_label[j] cur_pair_loss = -tf.exp(sliced_output[i]) / (tf.exp(sliced_output[i]) + tf.exp(sliced_output[j])) if loss == None: loss = cur_label_weight * cur_pair_loss * cur_propensity
tensorflow.math.sign
471
from tensorflow.python.framework import ops ops.RegisterShape("Imag")(common_shapes.unchanged_shape) ops.RegisterShape("Inv")(common_shapes.unchanged_shape) ops.RegisterShape("IsFinite")(common_shapes.unchanged_shape) ops.RegisterShape("IsInf")(common_shapes.unchanged_shape) ops.RegisterShape("IsNan")(common_shapes.unchanged_shape) ops.RegisterShape("Log")(common_shapes.unchanged_shape) ops.RegisterShape("LogicalNot")(common_shapes.unchanged_shape) ops.RegisterShape("Neg")(common_shapes.unchanged_shape) ops.RegisterShape("Real")(common_shapes.unchanged_shape) ops.RegisterShape("Rsqrt")(common_shapes.unchanged_shape) ops.RegisterShape("Sign")(common_shapes.unchanged_shape) ops.RegisterShape("Sin")(common_shapes.unchanged_shape) ops.RegisterShape("Sqrt")(common_shapes.unchanged_shape) ops.RegisterShape("Square")(common_shapes.unchanged_shape) ops.RegisterShape("Sigmoid")(common_shapes.unchanged_shape) ops.RegisterShape("Tanh")(common_shapes.unchanged_shape) ops.RegisterShape("Cast")(common_shapes.unchanged_shape) ops.RegisterShape("ComplexAbs")(common_shapes.unchanged_shape) @ops.RegisterShape("Add") @ops.RegisterShape("Complex") @ops.RegisterShape("Div") @ops.RegisterShape("Equal") @ops.RegisterShape("Greater") @ops.RegisterShape("GreaterEqual") @ops.RegisterShape("Less") @ops.RegisterShape("LessEqual") @ops.RegisterShape("LogicalAnd") @ops.RegisterShape("LogicalOr")
tensorflow.python.framework.ops.RegisterShape
472
import tensorflow as tf """Define a L2Loss, useful for regularize, i.e. weight decay. Args: var: tensor to regularize. weight_l1: an optional weight to modulate the l1 loss. weight_l2: an optional weight to modulate the l2 loss. name: Optional scope/name for op_scope. Returns: the l1+L2 loss op. """ with tf.name_scope(name): weight_l1_t = tf.convert_to_tensor(weight_l1, dtype=var.dtype.base_dtype, name='weight_l1') weight_l2_t = tf.convert_to_tensor(weight_l2, dtype=var.dtype.base_dtype, name='weight_l2') reg_l1 = tf.multiply(weight_l1_t, tf.reduce_sum(tf.abs(var)), name='value_l1') reg_l2 = tf.multiply(weight_l2_t, tf.nn.l2_loss(var), name='value_l2') return tf.add(reg_l1, reg_l2, name='value') def l1_regularizer(scale, name='l1_regularizer'): """Returns a function that can be used to apply L1 regularization to weights. L1 regularization encourages sparsity.
tensorflow.name_scope
473
import tensorflow as tf coord = tf.train.Coordinator() tf.train.start_queue_runners(coord=coord) # Sleep to make sure the queue runner has started the first run call. time.sleep(_SLEEP_TIME) # Session closed. coord.request_stop() coord.join() def test_batcher_closed(self): with tf.Graph().as_default(): @dynamic_batching.batch_fn def f(a): return a f(tf.constant([1])) # Intentionally using tf.Session() instead of self.test_session() to have # control over closing the session. test_session() is a cached session. with tf.Session(): coord = tf.train.Coordinator() tf.train.start_queue_runners(coord=coord) time.sleep(_SLEEP_TIME) coord.request_stop() # Calls close operation. coord.join() # Session closed. def test_minimum_batch_size(self): with self.test_session() as session: @dynamic_batching.batch_fn_with_options(
tensorflow.constant
474
import tensorflow as tf if reuse: tf.get_variable_scope().reuse_variables() else: assert tf.get_variable_scope().reuse is False return tf.nn.relu(x) - alpha * tf.nn.relu(-x) def instance_norm(x,name='instance_norm'): with tf.variable_scope(name):
tensorflow.nn.relu
475
import tensorflow as tf new_value = assign.eval() return p.eval(), new_value def _initAssignAddFetch(self, x, y, use_gpu=False): """Initialize a param to init, and compute param += y.""" with self.test_session(use_gpu=use_gpu): p = tf.Variable(x) add = tf.assign_add(p, y) p.initializer.run() new_value = add.eval() return p.eval(), new_value def _initAssignSubFetch(self, x, y, use_gpu=False): """Initialize a param to init, and compute param -= y."""
tensorflow.assign_add
476
import tensorflow as tf # There should have been a timeout here because only one sample was added # and the minimum batch size is 2. self.assertLessEqual(.9, duration.total_seconds()) self.assertGreaterEqual(1.5, duration.total_seconds()) outputs = [ f(tf.constant([[1, 3]]), tf.constant([2])), f(tf.constant([[1, 3]]), tf.constant([2])) ] start = datetime.datetime.now() (_, batch_size), _ = session.run(outputs) duration = datetime.datetime.now() - start
tensorflow.constant
477
import tensorflow as tf raw_data = reader.ptb_raw_data(FLAGS.data_path) train_data, valid_data, test_data, _ = raw_data config = get_config() eval_config = get_config() eval_config.batch_size = 1 eval_config.num_steps = 1 train_graph = tf.Graph() eval_graph = tf.Graph() infer_graph = tf.Graph() with train_graph.as_default(): initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale) with tf.name_scope('Train'): train_input = DataInput(config=config, data=train_data, name='TrainInput') with tf.variable_scope('Model', reuse=None, initializer=initializer): m = Model(is_training=True, config=config, input_=train_input, graph=train_graph) tf.summary.scalar('Training Loss', m.cost) tf.summary.scalar('Learning rate', m.lr) latest_ckpt = tf.train.latest_checkpoint(FLAGS.save_path)
tensorflow.random_uniform_initializer
478
import tensorflow as tf phi: TensorLike, name: str = "spherical_harmonics_evaluate_spherical_harmonics") -> TensorLike: # pylint: disable=line-too-long with tf.name_scope(name): degree_l = tf.convert_to_tensor(value=degree_l) order_m = tf.convert_to_tensor(value=order_m) theta = tf.convert_to_tensor(value=theta) phi = tf.convert_to_tensor(value=phi) var_type = theta.dtype sign_m = tf.math.sign(order_m) order_m = tf.abs(order_m) zeros = tf.zeros_like(order_m) result_m_zero = _spherical_harmonics_normalization( degree_l, zeros, var_type) * evaluate_legendre_polynomial( degree_l, zeros, tf.cos(theta)) result_branch = _evaluate_spherical_harmonics_branch( degree_l, order_m, theta, phi, sign_m, var_type) return tf.where(tf.equal(order_m, zeros), result_m_zero, result_branch)
tensorflow.abs
479
import tensorflow as tf # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2])
tensorflow.array_ops.transpose
480
from tensorflow.python.ops import array_ops # total average precision. with ops.name_scope(None, 'max', (average_precision,)) as max_scope: # `max` is the max possible precision. Since max for any row is 1.0: # - For the unweighted case, this is just the number of rows. # - For the weighted case, it's the sum of the weights broadcast across # `average_precision` rows. max_var = contrib_variables.local_variable( array_ops.zeros([], dtype=dtypes.float64), name=max_scope) if weights is None: batch_max = math_ops.to_double( array_ops.size(average_precision, name='batch_max')) else: # TODO(ptucker): More efficient way to broadcast? broadcast_weights = math_ops.mul( weights, array_ops.ones_like(average_precision), name='broadcast_weights') batch_max = math_ops.reduce_sum(broadcast_weights, name='batch_max') max_update = state_ops.assign_add(max_var, batch_max, name='update') with ops.name_scope(None, 'total', (average_precision,)) as total_scope: total_var = contrib_variables.local_variable( array_ops.zeros([], dtype=dtypes.float64), name=total_scope) batch_total = math_ops.reduce_sum(average_precision, name='batch_total') total_update = state_ops.assign_add(total_var, batch_total, name='update') # Divide total by max to get mean, for both vars and the update ops. mean_average_precision = _safe_scalar_div(total_var, max_var, name='mean') update = _safe_scalar_div(total_update, max_update, name=scope)
tensorflow.python.ops.array_ops.ones_like
481
import tensorflow as tf in0 = tf.strings.to_number(in0, tf.int32) in1 = tf.strings.to_number(in1, tf.int32) add = tf.add(in0, in1, "ADD") sub = tf.subtract(in0, in1, "SUB") # Cast or convert result to the output dtype. if tf_output0_dtype == tf.string: cast0 = tf.dtypes.as_string(add if not swap else sub, name="TOSTR0") else: cast0 = tf.cast(add if not swap else sub, tf_output0_dtype, "CAST0") if tf_output1_dtype == tf.string: cast1 = tf.dtypes.as_string(sub if not swap else add, name="TOSTR1") else: cast1 = tf.cast(sub if not swap else add, tf_output1_dtype, "CAST1")
tensorflow.dtypes.as_string
482
import tensorflow as tf "question") self.ch = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_p_len, self.config.max_ch_len], "context_char") self.qh = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_q_len, self.config.max_ch_len], "question_char") self.start_label = tf.placeholder(tf.int32, [self.config.batch_size], "answer_label1") self.end_label = tf.placeholder(tf.int32, [self.config.batch_size], "answer_label2") self.position_emb = position_embedding(self.c, 2 * self.config.hidden_size) self.c_mask = tf.cast(self.c, tf.bool) # index 0 is padding symbol N x self.max_p_num, max_p_len self.q_mask = tf.cast(self.q, tf.bool) self.c_len = tf.reduce_sum(tf.cast(self.c_mask, tf.int32), axis=1) self.q_len = tf.reduce_sum(tf.cast(self.q_mask, tf.int32), axis=1) self.dropout = tf.placeholder(tf.float32, name="dropout") self.global_step = tf.Variable(0, name="global_step", trainable=False) """ :descrition: The embedding layer, question and passage share embeddings """
tensorflow.cast
483
import tensorflow as tf # metrics_correct_rate, golden, predict = correct_rate(tags, pred_ids) # metrics_correct_rate = correct_rate(tags, pred_ids, weights) metrics = { "acc": tf.metrics.accuracy(tags, pred_ids, weights), "precision": precision(tags, pred_ids, num_tags, indices, weights), "recall": recall(tags, pred_ids, num_tags, indices, weights), "f1": f1(tags, pred_ids, num_tags, indices, weights),
tensorflow.metrics.accuracy
484
import tensorflow as tf self.U1_pred = self.net_U1(self.x1_tf) # N1 x q self.loss = tf.reduce_sum(tf.square(self.u0_tf - self.U0_pred)) + \ tf.reduce_sum(tf.square(self.u1_tf - self.U1_pred))
tensorflow.square
485
import tensorflow as tf initialized_variable_names, ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info( " name = %s, shape = %s%s", var.name, var.shape, init_string )
tensorflow.train.init_from_checkpoint
486
import tensorflow as tf return merge_states(tf.transpose(x, [0, 2, 1, 3])) def conv1d(x, scope, nf, rf, w_init=tf.random_normal_initializer(stddev=0.02), b_init=tf.constant_initializer(0), pad='VALID', train=False): with tf.variable_scope(scope): #x = [-1,n_ctx,512] nx = shape_list(x)[-1] #rf = 1,nx=emb,nf=3*emb w = tf.get_variable("w", [rf, nx, nf], initializer=w_init) b = tf.get_variable("b", [nf], initializer=b_init) if rf == 1: #faster 1x1 conv c = tf.reshape(tf.matmul(tf.reshape(x, [-1, nx]), tf.reshape(w, [-1, nf]))+b, shape_list(x)[:-1]+[nf]) else: #was used to train LM c = tf.nn.conv1d(x, w, stride=1, padding=pad)+b return c def attn(x, scope, n_state, n_head, train=False, scale=False): assert n_state%n_head==0 with tf.variable_scope(scope): #c [-1,n_ctx,3*emb] c = conv1d(x, 'c_attn', n_state*3, 1, train=train)
tensorflow.reshape
487
import tensorflow as tf V = tf.random_normal(V_shape, dtype=L.dtype) samples = tf.expand_dims(tf.transpose(mu), -1) + tf.batch_matmul(L, V) return tf.transpose(samples) #samples = []
tensorflow.transpose
488
import tensorflow as tf tf.float32, shape=(None, None, None, 4)) self._scores = tf.placeholder( tf.float32, shape=(None, None, self._num_classes)) self._boxes_predi, self._scores_predi, self._classes_predi,\ self._valid_detections_predi = \ tf.image.combined_non_max_suppression( boxes=self._boxes, scores=self._scores, max_output_size_per_class=50, max_total_size=50, iou_threshold=0.45, score_threshold=self._score_threshold) self._label_map = self._load_labelmap(self._label_file)
tensorflow.image.combined_non_max_suppression
489
from tensorflow.python.ops import check_ops return (self.alpha * math_ops.log(self.beta) - math_ops.lgamma(self.alpha) - (self.alpha + 1.) * math_ops.log(x) - self.beta / x) def _prob(self, x): return math_ops.exp(self._log_prob(x)) def _log_cdf(self, x): return math_ops.log(self._cdf(x)) def _cdf(self, x): x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if self.validate_args else [], x) # Note that igammac returns the upper regularized incomplete gamma # function Q(a, x), which is what we want for the CDF. return math_ops.igammac(self.alpha, self.beta / x) @distribution_util.AppendDocstring( """This is defined to be ``` entropy = alpha - log(beta) + log(Gamma(alpha))
tensorflow.python.ops.check_ops.assert_positive
490
import tensorflow as tf def get_config(self): return { "vocab_path": self._args["vocab_path"], "spm_model": self._args["spm_model"], "languages": self._args["languages"], "with_src_lang_tag": self._with_src_lang_tag, "trg_lang_tag_position": self._trg_lang_tag_position, } def inputs_signature(self, mode): """ Returns the input dtypes and signatures. """ dtypes = {"feature": tf.int64, "src_lang": tf.int64, "trg_lang": tf.int64} signatures = {"feature": tf.TensorShape([None, None]), "src_lang": tf.TensorShape([None, ]), "trg_lang": tf.TensorShape([None, ])} if mode == compat.ModeKeys.INFER: return dtypes, signatures dtypes["label"] = tf.int64 signatures["label"] = tf.TensorShape([None, None]) return dtypes, signatures def build_model(self, args, name=None): """ Builds and return a keras model. """ model = build_model(args, self._multilingual_dp.meta,
tensorflow.TensorShape
491
import tensorflow as tf label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = tf.reduce_sum(label_weights * per_example_loss) denominator = tf.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return (loss, per_example_loss, log_probs) def get_next_sentence_output(bert_config, input_tensor, labels): """Get loss and log probs for the next sentence prediction.""" # Simple binary classification. Note that 0 is "next sentence" and 1 is # "random sentence". This weight matrix is not used after pre-training.
tensorflow.reduce_sum
492
import tensorflow as tf name="start_pointer"), -1) end_logits = tf.squeeze( conv(self._attention(tf.concat([self.enc[1], self.enc[3]], axis=-1), name="attn2"), 1, bias=False, name="end_pointer"), -1) else: start_logits = tf.squeeze( conv(tf.concat([self.enc[1], self.enc[2]], axis=-1), 1, bias=False, name="start_pointer"), -1) end_logits = tf.squeeze( conv(tf.concat([self.enc[1], self.enc[3]], axis=-1), 1, bias=False, name="end_pointer"), -1) self.logits = [mask_logits(start_logits, mask=tf.reshape(self.c_mask, [N, -1])), mask_logits(end_logits, mask=tf.reshape(self.c_mask, [N, -1]))] self.logits1, self.logits2 = [l for l in self.logits] outer = tf.matmul(tf.expand_dims(tf.nn.softmax(self.logits1), axis=2), tf.expand_dims(tf.nn.softmax(self.logits2), axis=1)) outer = tf.matrix_band_part(outer, 0, self.max_a_len) self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1)
tensorflow.reshape
493
import tensorflow as tf config=bert_config, is_training=False, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) masked_lm_example_loss = get_masked_lm_output( bert_config, model.get_sequence_output(), model.get_embedding_table(), masked_lm_positions, masked_lm_ids) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold()
tensorflow.trainable_variables
494
import tensorflow as tf from sklearn.metrics import classification_report slim = tf.contrib.slim global first first = True classnum=12 testnum = tf.placeholder(tf.int32) trainnum = tf.placeholder(tf.int32) validnum = tf.placeholder(tf.int32) learnrate = tf.placeholder(tf.float32) def getinputs(path): filename_queue=tf.train.string_input_producer([path]) reader=tf.TFRecordReader() _,serialized_example=reader.read(filename_queue) features=tf.parse_single_example(serialized_example, features={ 'label':tf.FixedLenFeature([], tf.int64), 'img_raw' : tf.FixedLenFeature([], tf.string), }) image=tf.decode_raw(features['img_raw'],tf.uint8) label=tf.cast(features['label'],tf.int32) image=tf.reshape(image,[4096,1]) return image,label def get_batch(image,label,batch_size,crop_size): #print(image.shape) #print(label.shape)
tensorflow.TFRecordReader
495
import tensorflow as tf din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]]) scores = d_layer_3_all # Mask # key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T] key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(scores) * (-2 ** 32 + 1) if not forCnn: scores = tf.where(key_masks, scores, paddings) # [B, 1, T] # Scale # scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)
tensorflow.expand_dims
496
from tensorflow.python.client import graph_util @ops.RegisterStatistics("Conv2D", "weight_parameters") def _calc_conv_weight_params(graph, node): """Calculates the on-disk size of the weights for Conv2D.""" input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) input_shape.assert_is_fully_defined() filter_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1]) filter_shape.assert_is_fully_defined() output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name) output_shape.assert_is_fully_defined() filter_height = int(filter_shape[0]) filter_width = int(filter_shape[1]) filter_in_depth = int(filter_shape[2]) filter_out_depth = int(filter_shape[3]) return ops.OpStats("weight_parameters", (filter_height * filter_width * filter_in_depth * filter_out_depth))
tensorflow.python.client.graph_util.tensor_shape_from_node_def_name
497
import tensorflow as tf tf.app.flags.DEFINE_integer('save_summary_steps', 100, '') tf.app.flags.DEFINE_string('pretrained_model_path', None, '') tf.app.flags.DEFINE_boolean('allow_mix_precision', False, 'whether to allow mix precision') tf.app.flags.DEFINE_boolean('auto_tune', False, 'whether to autotune') tf.app.flags.DEFINE_boolean('use_processed_data', False, 'whether to use processed data') tf.app.flags.DEFINE_string('processed_data', './processed_dataset/', 'where to save preprocessed datasets') import model
tensorflow.app.flags.DEFINE_boolean
498
import tensorflow as tf for i in range(classes.shape[0]): sdf = tf.expand_dims(sdfs[i], -1) sdf = sdf * -1.0 # inside positive, outside zero samples_object = centernet_utils.transform_pointcloud( tf.reshape(samples_world, [1, 1, -1, 3]), tf.reshape(poses[2][i], [1, 1, 3]), tf.reshape(poses[0][i], [1, 1, 3, 3]), tf.reshape(poses[1][i], [1, 1, 3]), inverse=True) * 2.0 samples_object = (samples_object * (29.0/32.0) / 2.0 + 0.5) * 32.0 - 0.5 samples = tf.squeeze(samples_object)
tensorflow.reshape
499