seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf x = tf.cast(x, tf.float32) / 255.0 x = tf.nn.dropout(x, rate=dropout)
tensorflow.nn.dropout
1,400
import tensorflow as tf tf.zeros([1, num_units])), [batch_size, 1]) mask_fw = dropout(tf.ones([batch_size, 1, input_size_], dtype=tf.float32), keep_prob=keep_prob, is_train=is_train, mode=None) mask_bw = dropout(tf.ones([batch_size, 1, input_size_], dtype=tf.float32), keep_prob=keep_prob, is_train=is_train, mode=None) self.grus.append((gru_fw, gru_bw, )) self.inits.append((init_fw, init_bw, )) self.dropout_mask.append((mask_fw, mask_bw, )) def __call__(self, inputs, seq_len, keep_prob=1.0, is_train=None, concat_layers=True): outputs = [inputs] with tf.variable_scope(self.scope): for layer in range(self.num_layers): gru_fw, gru_bw = self.grus[layer] init_fw, init_bw = self.inits[layer] mask_fw, mask_bw = self.dropout_mask[layer] with tf.variable_scope("fw_{}".format(layer)): out_fw, _ = tf.nn.dynamic_rnn( gru_fw, outputs[-1] * mask_fw, seq_len, initial_state=init_fw, dtype=tf.float32) with tf.variable_scope("bw_{}".format(layer)): inputs_bw = tf.reverse_sequence( outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=1, batch_dim=0)
tensorflow.variable_scope
1,401
import tensorflow as tf # for highway and projection layers n_highway = cnn_options.get('n_highway') use_highway = n_highway is not None and n_highway > 0 use_proj = n_filters != projection_dim if use_highway or use_proj: # reshape from (batch_size, n_tokens, dim) to (-1, dim) batch_size_n_tokens = tf.shape(embedding)[0:2] embedding = tf.reshape(embedding, [-1, n_filters]) # set up weights for projection if use_proj: assert n_filters > projection_dim with tf.variable_scope('CNN_proj') as scope: W_proj_cnn = tf.get_variable( "W_proj", [n_filters, projection_dim], initializer=tf.random_normal_initializer( mean=0.0, stddev=np.sqrt(1.0 / n_filters)), dtype=DTYPE) b_proj_cnn = tf.get_variable( "b_proj", [projection_dim], initializer=tf.constant_initializer(0.0), dtype=DTYPE) # apply highways layers def high(x, ww_carry, bb_carry, ww_tr, bb_tr):
tensorflow.variable_scope
1,402
import tensorflow as tf try: summary_writer = tf.summary.FileWriter(tensorboard_dir, sess.graph) sess.run(tf.global_variables_initializer()) # saved model restoring if args.restore: # Restore saved model if the user requested it, default = True try: checkpoint_state = tf.train.get_checkpoint_state(save_dir) if checkpoint_state and checkpoint_state.model_checkpoint_path: log("Loading checkpoint {}".format(checkpoint_state.model_checkpoint_path), slack=True) saver.restore(sess, checkpoint_state.model_checkpoint_path) else: log("No model to load at {}".format(save_dir), slack=True)
tensorflow.train.get_checkpoint_state
1,403
from tensorflow.python.ops import array_ops b = ((min(-2, -1 - self._event_ndims_static),) if self._batch_ndims_is_0 else ()) e = (-1,) if self._event_ndims_is_0 else () x = array_ops.squeeze(x, squeeze_dims=b + e) _, batch_shape, event_shape = self.get_shape(x) else:
tensorflow.python.ops.array_ops.squeeze
1,404
import tensorflow as tf scope.reuse_variables() truthoutput_z_ = lrelu(linear(tgtimg_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin')) truthoutput_h0 = tf.reshape(truthoutput_z_, [-1, s_h16, s_w16, self.gf_dim * 8]) truthoutput_h1 = lrelu(deconv2d(tf.concat([truthoutput_h0, tgtctx_h3], 3), [self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1')) truthoutput_h2 = lrelu(deconv2d(tf.concat([truthoutput_h1, tgtctx_h2], 3), [self.batch_size, s_h4, s_w4, self.gf_dim*2], name='d_h2'))
tensorflow.concat
1,405
import tensorflow as tf if params.initializer == "xavier": return tf.contrib.layers.xavier_initializer() elif params.initializer == "uniform": max_val = params.initializer_gain return tf.random_uniform_initializer(-max_val, max_val) elif params.initializer == "normal": return tf.random_normal_initializer(0.0, params.initializer_gain) elif params.initializer == "normal_unit_scaling": return tf.variance_scaling_initializer(params.initializer_gain, mode="fan_avg", distribution="normal") elif params.initializer == "uniform_unit_scaling": return tf.variance_scaling_initializer(params.initializer_gain, mode="fan_avg", distribution="uniform") else:
tensorflow.variance_scaling_initializer
1,406
import tensorflow as tf def res_block_3_layers(self, bottom, channel_list, name, change_dimension = False): if (change_dimension): block_conv_input = self.conv_layer(bottom = bottom, kernal_size = 1, in_channels = bottom.get_shape().as_list()[-1], out_channels = channel_list[2], stride = 1, name = name + "_branch1") else: block_conv_input = bottom input_filter = bottom.get_shape().as_list()[-1] block_conv_1 = self.conv_layer(bottom, 1, input_filter, channel_list[0], 1, name + "_branch2a") block_norm_1 = tf.layers.batch_normalization(inputs=block_conv_1, axis = 3, momentum=configs['_BATCH_NORM_DECAY'], epsilon=configs['_BATCH_NORM_EPSILON'], center=True, scale=True, training=self.is_training, fused=True) block_relu_1 = tf.nn.relu(block_norm_1) block_conv_2 = self.conv_layer(block_relu_1, 3, channel_list[0], channel_list[1], 1, name + "_branch2b") block_norm_2 = tf.layers.batch_normalization(inputs=block_conv_2, axis = 3, momentum=configs['_BATCH_NORM_DECAY'], epsilon=configs['_BATCH_NORM_EPSILON'], center=True, scale=True, training=self.is_training, fused=True) block_relu_2 = tf.nn.relu(block_norm_2) block_conv_3 = self.conv_layer(block_relu_2, 1, channel_list[1], channel_list[2], 1, name + "_branch2c") block_res = tf.add(block_conv_input, block_conv_3) relu = tf.nn.relu(block_res) return relu def avg_pool(self, bottom, kernal_size = 2, stride = 2, name = "avg"): return tf.nn.avg_pool(bottom, ksize=[1, kernal_size, kernal_size, 1], strides=[1, stride, stride, 1], padding='VALID', name=name) def max_pool(self, bottom, kernal_size = 2, stride = 2, name = "max"):
tensorflow.nn.relu
1,407
import tensorflow as tf if __name__ == "__main__": flags.mark_flag_as_required("data_dir") flags.mark_flag_as_required("task_name") flags.mark_flag_as_required("vocab_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") tf.app.run()
tensorflow.app.run
1,408
import tensorflow as tf for i in range(dim): out = tf.reshape(out, [self.mat_ranks[i] * self.inp_modes[i], -1])
tensorflow.reshape
1,409
import tensorflow as tf global_step_t = tf.reshape(global_step, [1]) total_loss_t = tf.reshape(total_loss, [1]) total_rpn_loss_t = tf.reshape(total_rpn_loss, [1]) rpn_score_loss_t = tf.reshape(rpn_score_loss, [1]) rpn_box_loss_t = tf.reshape(rpn_box_loss, [1]) total_fast_rcnn_loss_t = tf.reshape(total_fast_rcnn_loss, [1]) fast_rcnn_class_loss_t = tf.reshape(fast_rcnn_class_loss, [1]) fast_rcnn_box_loss_t = tf.reshape(fast_rcnn_box_loss, [1])
tensorflow.reshape
1,410
import tensorflow as tf with tf.variable_scope('scop'): self.w1=tf.get_variable('w1', [4096,1024],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w2=tf.get_variable('w2', [1024,classnum],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.b1 = tf.get_variable('b1', [1024],initializer=tf.constant_initializer(0.0)) self.b2 = tf.get_variable('b2', [classnum],initializer=tf.constant_initializer(0.0)) def inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) out = tf.matmul(l1, self.w2)+self.b2 return out def test_inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) out = tf.matmul(l1, self.w2)+self.b2 return out
tensorflow.matmul
1,411
import tensorflow as tf outputs, h, c = self._cell(inputs, h, c, self._rnn_params, is_training) outputs = tf.transpose(outputs, [1, 0, 2])
tensorflow.transpose
1,412
from tensorflow.contrib.framework import deprecated @deprecated(
tensorflow.contrib.framework.deprecated
1,413
import tensorflow as tf tf.estimator.ModeKeys.PREDICT, predictions=predictions, export_outputs={ "output": tf.estimator.export.PredictOutput(export_out) }) def _normalize_body_output(self, body_out): if isinstance(body_out, tuple): output, losses = body_out if not isinstance(losses, dict): losses = {"extra": tf.reduce_mean(losses)} else: output = body_out losses = {"extra": 0.0} return output, losses def _warn_changed_modality_type(new_name, old_name, feature_name): new_type, new_name = registry.parse_modality_name(new_name)
tensorflow.reduce_mean
1,414
import tensorflow as tf mode, loss=total_loss, eval_metric_ops=eval_metric_ops ) assert mode == tf.estimator.ModeKeys.TRAIN with tf.variable_scope('learning_rate'): global_step = tf.train.get_global_step() learning_rate = tf.train.cosine_decay( params['initial_learning_rate'], global_step, decay_steps=params['num_steps']
tensorflow.variable_scope
1,415
from tensorflow.python.ops import math_ops self._beta1_t = ops.convert_to_tensor(self._beta1, name="beta1") self._beta2_t = ops.convert_to_tensor(self._beta2, name="beta2") def _create_slots(self, var_list): # Create slots for the first and second moments. for v in var_list: self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name) self._zeros_slot(v, "g", self._name) def _apply_dense(self, grad, var): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype) beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype) if var.dtype.base_dtype == tf.float16: eps = 1e-7 # Can't use 1e-8 due to underflow -- not sure if it makes a big difference. else: eps = 1e-8 v = self.get_slot(var, "v") v_t = v.assign(beta2_t * v + (1. - beta2_t) * tf.square(grad)) m = self.get_slot(var, "m")
tensorflow.python.ops.math_ops.cast
1,416
import tensorflow as tf # Embedding Lookup 16 with tf.device('/cpu:0'), tf.name_scope("embedding"): if use_he_uniform: self.embedding_W = tf.get_variable(name='lookup_W', shape=[num_quantized_chars, embedding_size], initializer=tf.contrib.layers.variance_scaling_initializer()) else: self.embedding_W = tf.Variable(tf.random_uniform([num_quantized_chars, embedding_size], -1.0, 1.0),name="embedding_W") self.embedded_characters = tf.nn.embedding_lookup(self.embedding_W, self.input_x) embedded_text_expand = tf.expand_dims(self.embedded_characters, -1)
tensorflow.contrib.layers.variance_scaling_initializer
1,417
import tensorflow as tf print(sess.run(tf.matrix_inverse(D))) print('\ndeterminant(D)={:.1f}'.format(sess.run(tf.matrix_determinant(D)))) print('\ncholesky(D):') print(sess.run(tf.cholesky(identity_matrix))) print('\nselfAdjointEig(D):') print(sess.run(tf.self_adjoint_eig(D))) print(sess.run(tf.div(13, 4))) print(sess.run(tf.truediv(13, 4))) print(sess.run(tf.floordiv(13, 4))) print(sess.run(tf.mod(13.2, 4))) print(sess.run(tf.cross([1, 0, 0], [0, 1, 0]))) print(sess.run(tf.square([1, 2, 3]))) def custom_polynomial(local_tf, value): return local_tf.subtract(3 * local_tf.square(value), value) + 10 print((sess.run(custom_polynomial(tf, 11)))) alpha = 0.1
tensorflow.mod
1,418
import tensorflow as tf if scale: n_state = shape_list(v)[-1] w = w*tf.rsqrt(tf.cast(n_state, tf.float32))
tensorflow.cast
1,419
import tensorflow as tf self._new_lr = tf.get_collection_ref("new_lr")[0] self._lr_update = tf.get_collection_ref("lr_update")[0]
tensorflow.get_collection_ref
1,420
import tensorflow as tf with self.test_session(use_gpu=False) as sess: sp_input = self._SparseTensorPlaceholder() input0_val = self._SparseTensorValue_5x6(np.arange(6)) input1_val = self._SparseTensorValue_1x1x1() handle = add_sparse_to_tensors_map(sp_input) handle0_value = sess.run( handle, feed_dict={sp_input: input0_val}) handle1_value = sess.run( handle, feed_dict={sp_input: input1_val}) handle_concat = tf.convert_to_tensor( [handle0_value, handle1_value], dtype=tf.int64) sp_roundtrip = take_many_sparse_from_tensors_map( sparse_map_op=handle.op, sparse_handles=handle_concat) with self.assertRaisesOpError( r"Inconsistent rank across SparseTensors: rank prior to " r"SparseTensor\[1\] was: 3 but rank of SparseTensor\[1\] is: 4"): sess.run(sp_roundtrip)
tensorflow.convert_to_tensor
1,421
import tensorflow as tf cdf = pareto.cdf(x) self.assertEqual(cdf.shape, (6, 3)) self.assertAllClose( self.evaluate(cdf), self._scipy_pareto(concentration_v, scale_v).cdf(x)) def testParetoPDFGradientZeroOutsideSupport(self): scale = tf.constant(1.) concentration = tf.constant(3.) # Check the gradient on the undefined portion. x = scale - 1 pareto = tfd.Pareto(concentration, scale) compute_pdf = lambda x: pareto.prob(x) # pylint:disable=unnecessary-lambda self.assertAlmostEqual(self.compute_gradients(
tensorflow.constant
1,422
import tensorflow as tf self.n_batches_per_epoch), tf.int64, "global_epoch") tf.add_to_collection("global_epoch", self.global_epoch) # this creates an operation to add to all trainable variables a white noise of param # std = tf.sqrt(variance)/10 def create_random_update_op(self): vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) update_opts = [] for var in vars: _, variance = tf.nn.moments(tf.reshape(var,[-1]),axes=[0]) normal = tf.distributions.Normal(loc=0.0, scale=tf.sqrt(variance)/10) white_noise = normal.sample(var.get_shape())
tensorflow.get_collection
1,423
import tensorflow as tf self.loss_log = [] def initialize_NN(self, layers): weights = [] biases = [] num_layers = len(layers) for l in range(0,num_layers-1): W1 = self.xavier_init(size=[layers[l], layers[l+1]]) W2 = self.xavier_init(size=[layers[l], layers[l+1]]) b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32) weights.append((W1, W2)) biases.append(b) return weights, biases def xavier_init(self, size): in_dim = size[0] out_dim = size[1] xavier_stddev = np.sqrt(2/(in_dim + out_dim))
tensorflow.zeros
1,424
import tensorflow as tf def testParetoShapeBroadcast(self): scale = tf.constant([[3., 2.]]) concentration = tf.constant([[4.], [5.], [6.]]) pareto = tfd.Pareto(concentration, scale) self.assertAllEqual(self.evaluate(pareto.batch_shape_tensor()), (3, 2)) self.assertAllEqual(pareto.batch_shape, tf.TensorShape([3, 2])) self.assertAllEqual(self.evaluate(pareto.event_shape_tensor()), []) self.assertEqual(pareto.event_shape, tf.TensorShape([])) def testInvalidScale(self): invalid_scales = [-.01, 0., -2.] concentration = 3.
tensorflow.TensorShape
1,425
import tensorflow as tf ''' Evaluate the quality of the logits at predicting the label ''' correct = tf.equal(tf.arg_max(logits,1), tf.arg_max(labels,1)) correct = tf.cast(correct, tf.int32) n_correct = tf.reduce_sum(correct) return n_correct def optimize(loss, learning_rate, global_step):
tensorflow.reduce_sum
1,426
import tensorflow as tf epoch = step // steps_per_epoch if use_sgdr is True: # Apply Stoachastic Gradient Descent with Warm Restarts (SGDR) lr = tf.train.cosine_decay_restarts(lr, epoch, sgdr_decay_epochs, t_mul=sgdr_t_mul, alpha=sgdr_alpha) return lr
tensorflow.train.cosine_decay_restarts
1,427
import tensorflow as tf A tuple (images, labels), where: * images is a float tensor with shape [batch_size, FLAGS.num_scales, FLAGS.crop_size, FLAGS.crop_size] * labels is an int32 tensor with shape [batch_size] with the true label, a number in the range [0, NUM_CLASSES()). Note that an tf.train.QueueRunner is added to the graph, which must be run using e.g. tf.train.start_queue_runners(). """ if not num_epochs: num_epochs = None filename = os.path.join(FLAGS.train_dir, 'data', '{}_{}.tfrecords'.format(name, records.tfrecord_name())) with tf.name_scope('input'): filename_queue = tf.train.string_input_producer( [filename], num_epochs=num_epochs) # Even when reading in multiple threads, share the filename # queue. image, label = read_and_decode(filename_queue) # Shuffle the examples and collect them into batch_size batches. # (Internally uses a RandomShuffleQueue.) # We run this in two threads to avoid being a bottleneck.
tensorflow.name_scope
1,428
import tensorflow as tf grl = gradient_reverse(samples) grl = tf.reshape(grl, (-1, samples.get_shape().as_list()[1])) grl = fc(grl, 100, True, None, activation=relu, name='fc1') logits = fc(grl, 1, True, None, activation=None, name='fc2') domain_predictions = tf.sigmoid(logits) domain_loss = tf.losses.log_loss(domain_selection_mask, domain_predictions, weights=weight) domain_accuracy = util.accuracy_tf(domain_selection_mask, tf.round(domain_predictions)) assert_op = tf.Assert(tf.is_finite(domain_loss), [domain_loss]) with tf.control_dependencies([assert_op]): tag_loss = 'losses/domain_loss' barrier = tf.no_op(tag_loss) return domain_loss def difference_loss(private_samples, shared_samples, weight=1.0, name='difference_loss'): """Adds the difference loss between the private and shared representations. Args:
tensorflow.is_finite
1,429
import tensorflow as tf batch_size = tf.shape(attention_weights)[0] src_len = tf.shape(attention_weights)[2] trg_len = tf.shape(attention_weights)[1] src_indices = tf.tile(tf.reshape(tf.range(src_len), shape=[1, 1, src_len]), [batch_size, trg_len, 1]) trg_indices = tf.tile(tf.reshape(tf.range(trg_len), shape=[1, trg_len, 1]), [batch_size, 1, src_len]) source_length = encoder_input_length[0]
tensorflow.range
1,430
import tensorflow as tf Q = tf.tile(tf.expand_dims(self.q_embed_encoding, 1), [1, self.max_p_len, 1, 1]) S = trilinear([C, Q, C * Q], input_keep_prob=1.0 - self.dropout) mask_q = tf.expand_dims(self.q_mask, 1) S_ = tf.nn.softmax(mask_logits(S, mask=mask_q))
tensorflow.expand_dims
1,431
import tensorflow as tf reg_user=self.regs[0], reg_item=self.regs[1], seed=self.seed) logits = tf.layers.dense(self.interaction, units=1, name='logits', kernel_initializer=tf.initializers.lecun_uniform(self.seed)) self.prediction = tf.nn.sigmoid(logits) self.loss = loss_fn(labels=self.labels, logits=logits) train_op = train_fn(self.loss, learning_rate=self.learning_rate, learner=self.learner) initializer = tf.global_variables_initializer() config = tf.ConfigProto() config.gpu_options.allow_growth = True self.sess = tf.Session(graph=graph, config=config) self.sess.run(initializer) loop = trange(self.num_epochs, disable=not self.verbose) for _ in loop:
tensorflow.global_variables_initializer
1,432
import tensorflow as tf if not isinstance(action_space, gym.spaces.box.Box): raise ValueError("Expecting continuous action space.") mean_weights_initializer = tf.initializers.variance_scaling( scale=config.init_mean_factor) logstd_initializer = tf.random_normal_initializer(config.init_logstd, 1e-10) flat_observations = tf.reshape(observations, [ tf.shape(observations)[0], tf.shape(observations)[1], functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)]) with tf.variable_scope("network_parameters"): with tf.variable_scope("policy"): x = flat_observations for size in config.policy_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) mean = tf.layers.dense( x, action_space.shape[0], activation=tf.tanh, kernel_initializer=mean_weights_initializer) logstd = tf.get_variable( "logstd", mean.shape[2:], tf.float32, logstd_initializer) logstd = tf.tile( logstd[None, None], [tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2)) with tf.variable_scope("value"): x = flat_observations for size in config.value_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) value = tf.layers.dense(x, 1)[..., 0] mean = tf.check_numerics(mean, "mean")
tensorflow.layers.dense
1,433
import tensorflow as tf soft_placement = True util.auto_parallel(metagraph, m) with tf.Graph().as_default(): tf.train.import_meta_graph(metagraph) for model in models.values(): model.import_ops() sv = tf.train.Supervisor(logdir=FLAGS.save_path) config_proto = tf.ConfigProto(allow_soft_placement=soft_placement) with sv.managed_session(config=config_proto) as session: for i in range(config.max_max_epoch): lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0) m.assign_lr(session, config.learning_rate * lr_decay) print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr))) train_perplexity = run_epoch(session, m, eval_op=m.train_op,
tensorflow.ConfigProto
1,434
import tensorflow as tf mask_ = tf.ones([FLAGS.batch_size,64,64,3]) mask = tf.pad(mask_, [[0,0],[32,32],[32,32],[0,0]]) mask2__ = tf.ones([FLAGS.batch_size,78,78,3]) mask2_ = tf.pad(mask2__, [[0,0],[25,25],[25,25],[0,0]]) mask2 = mask2_ - mask pred_annotation, logits = inference((1-mask)*image + mask*255, keep_probability,z)
tensorflow.pad
1,435
import tensorflow as tf self.assertEqual(x_duplicate.name, "x_1/") self.assertTrue(x_duplicate_sample.name.startswith( "x_1/custom_sample")) self.assertTrue(x_sample_duplicate.name.startswith("x/custom_sample_1")) def testStrWorksCorrectlyScalar(self): # Usually we'd write np.float(X) here, but a recent Eager bug would # erroneously coerce the value to float32 anyway. We therefore use constants # here, until the bug is resolved in TensorFlow 1.12. normal = tfd.Normal(loc=tf.constant(0, tf.float16), scale=tf.constant(1, tf.float16)) self.assertEqual( str(normal), "tfp.distributions.Normal(" "\"Normal/\", " "batch_shape=(), " "event_shape=(), " "dtype=float16)") chi2 = tfd.Chi2(df=np.float32([1., 2.]), name="silly")
tensorflow.constant
1,436
import tensorflow as tf def __init__(self,name,input_dim,output_dim,k_h=4,k_w=4,d_h=2,d_w=2, stddev=0.02, data_format='NHWC',padding='SAME',epsilon=1e-9) : with tf.variable_scope(name) : assert data_format == 'NHWC'
tensorflow.variable_scope
1,437
import tensorflow as tf def init_eta_omega(self, beta, epsilon, init_eta, init_omega): # Here we define the symbolic function for the dual and the gradient self.beta = beta self.epsilon = epsilon # Init dual param values self.param_eta = init_eta self.param_omega = init_omega self.param_eta_non_lin = init_eta self.param_omega_non_lin = init_omega param_eta = tf.placeholder(dtype=tf.float32, shape=[], name="param_eta") param_omega = tf.placeholder(dtype=tf.float32, shape=[], name="param_omega") old_entropy = tf.placeholder(dtype=tf.float32, shape=[], name="old_entropy") varphis = tf.placeholder(dtype=tf.float32, shape=[None, None], name="varphis") Kt = tf.placeholder(dtype=tf.float32, shape=[None, None], name="Kt") prec = tf.placeholder(dtype=tf.float32, shape=[None, None], name="prec") Waa = tf.placeholder(dtype=tf.float32, shape=[None, None], name="Waa") Wsa = tf.placeholder(dtype=tf.float32, shape=[None, None], name="Wsa") wa = tf.placeholder(dtype=tf.float32, shape=[None, None], name="wa") # varphis = ext.new_tensor( # 'varphis', # ndim=2, # dtype=theano.config.floatX # )
tensorflow.placeholder
1,438
import tensorflow as tf o_d4 = self.general_deconv2d(o_me3, self.base_number_of_features, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_4') o_me4 = tf.concat([o_d4, o_c1], 3) # Skip connection logits = tf.layers.conv2d(o_me4, self.args.num_classes, 1, 1, 'SAME', activation = None) prediction = tf.nn.softmax(logits, name = name + '_softmax') return logits, prediction
tensorflow.nn.softmax
1,439
from tensorflow.python.ops import math_ops """ with ops.name_scope(None, 'false_positives', (predictions_idx, labels)): labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx, class_id) fp = set_ops.set_size(set_ops.set_difference( predictions_idx, labels, aminusb=True)) fp = math_ops.to_double(fp) if weights is not None: weights = math_ops.to_double(weights) fp = math_ops.mul(fp, weights) return fp
tensorflow.python.ops.math_ops.to_double
1,440
import tensorflow as tf INPUT_DIM = 5 OUTPUT_DIM = 3 def generator_fn(generator_inputs): outputs = tf.layers.dense(generator_inputs, OUTPUT_DIM) return outputs def discriminator_fn(data, generator_inputs): outputs = tf.layers.dense(data, 1) return outputs def model_fn(features, labels, mode, params): # build model global_step = tf.train.get_global_step() generator_inputs = features real_data = labels
tensorflow.layers.dense
1,441
import tensorflow as tf tf.app.flags.DEFINE_float('ws_lrn_rate_rg', 3e-2, 'WS: learning rate for layerwise regression') tf.app.flags.DEFINE_integer('ws_nb_iters_rg', 20, 'WS: # of iterations for layerwise regression') tf.app.flags.DEFINE_float('ws_lrn_rate_ft', 3e-4, 'WS: learning rate for global fine-tuning') tf.app.flags.DEFINE_integer('ws_nb_iters_ft', 400, 'WS: # of iterations for global fine-tuning')
tensorflow.app.flags.DEFINE_float
1,442
import tensorflow as tf if __name__ == "__main__": tf.test.main()
tensorflow.test.main
1,443
import tensorflow as tf w = x_shape[2] pad_h1 = tf.mod(-h + bsize[1], bstrides[1]) pad_w1 = tf.mod(-w + bsize[2], bstrides[2]) return tf.cond( tf.logical_or(tf.greater(pad_h1, 0), tf.greater(pad_w1, 0)), lambda: tf.pad(x, [[0, 0], [0, pad_h1], [0, pad_w1], [0, 0]]), lambda: x) else: return x
tensorflow.greater
1,444
import tensorflow as tf sequence_shape = get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor # add sequence mask for: # 1. random shuffle lm modeling---xlnet with random shuffled input # 2. left2right and right2left language modeling # 3. conditional generation def generate_seq2seq_mask(attention_mask, mask_sequence, seq_type, **kargs): if seq_type == 'seq2seq': if mask_sequence is not None:
tensorflow.gather
1,445
import tensorflow as tf name=name+'/gamma', trainable=True, dtype=x.dtype) batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments') ema = tf.train.ExponentialMovingAverage(decay=0.9) def mean_var_with_update(): ema_apply_op = ema.apply([batch_mean, batch_var]) with tf.control_dependencies([ema_apply_op]): return tf.identity(batch_mean), tf.identity(batch_var) mean, var = control_flow_ops.cond(phase_train, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var))) normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3) return normed def inception(inp, inSize, ks, o1s, o2s1, o2s2, o3s1, o3s2, o4s1, o4s2, o4s3, poolType, name, phase_train=True, use_batch_norm=True, weight_decay=0.0): print('name = ', name) print('inputSize = ', inSize) print('kernelSize = {3,5}') print('kernelStride = {%d,%d}' % (ks,ks)) print('outputSize = {%d,%d}' % (o2s2,o3s2))
tensorflow.nn.batch_normalization
1,446
import tensorflow as tf class_prediction = tf.contrib.layers.fully_connected( flattened_inputs, self._num_classes) box_prediction = tf.contrib.layers.fully_connected(flattened_inputs, 4) return { 'class_predictions_with_background': tf.reshape( class_prediction, [-1, 1, self._num_classes]), 'box_encodings': tf.reshape(box_prediction, [-1, 1, 4]) }
tensorflow.reshape
1,447
import tensorflow as tf # Number of disparity interpolated. out_depth = out_size[0] out_height = out_size[1] out_width = out_size[2] zero = tf.zeros([], dtype='int32') # 0 <= z < depth, 0 <= y < height & 0 <= x < width. max_z = tf.to_int32(tf.shape(im)[1] - 1) max_y = tf.to_int32(tf.shape(im)[2] - 1) max_x = tf.to_int32(tf.shape(im)[3] - 1) # Converts scale indices from [-1, 1] to [0, width/height/depth]. x = (x + 1.0) * (width_f) / 2.0 y = (y + 1.0) * (height_f) / 2.0 z = (z + 1.0) * (depth_f) / 2.0
tensorflow.shape
1,448
import tensorflow as tf conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net["pool3"])) fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2") shape = tf.shape(image) deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSESS]) W_t3 = utils.weight_variable([16, 16, NUM_OF_CLASSESS, deconv_shape2[3].value], name="W_t3") b_t3 = utils.bias_variable([NUM_OF_CLASSESS], name="b_t3") conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8) annotation_pred = tf.argmax(conv_t3, dimension=3, name="prediction") return tf.expand_dims(annotation_pred, dim=3), conv_t3 def train(loss_val, var_list): optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate) grads = optimizer.compute_gradients(loss_val, var_list=var_list) if FLAGS.debug: # print(len(var_list)) for grad, var in grads: utils.add_gradient_summary(grad, var) return optimizer.apply_gradients(grads) def main(argv=None): keep_probability = tf.placeholder(tf.float32, name="keep_probabilty") image = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="input_image") #debug annotation = tf.placeholder(tf.int32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 1], name="annotation")
tensorflow.train.AdamOptimizer
1,449
import tensorflow as tf Args: ckpt: Path to existing checkpoint. If present, returns only the subset of variables that exist in given checkpoint. Returns: List of all variables that need to be saved/restored. """ model_vars = tf.trainable_variables() # Add batchnorm variables. bn_vars = [v for v in tf.global_variables() if 'moving_mean' in v.op.name or 'moving_variance' in v.op.name or 'mu' in v.op.name or 'sigma' in v.op.name or 'global_scale_var' in v.op.name] model_vars.extend(bn_vars) model_vars = sorted(model_vars, key=lambda x: x.op.name) mapping = {} if ckpt is not None: ckpt_var = tf.contrib.framework.list_variables(ckpt) ckpt_var_names = [name for (name, unused_shape) in ckpt_var]
tensorflow.global_variables
1,450
import tensorflow as tf inputs=x, filter_depth=filter_depth3, kernel_size=1, padding="same", stride=1 ) x = self.__batch_norm("{}2c".format(bn_name_base), x) x = tf.add(x, shortcut) return tf.nn.relu(x) def __identity_block(self, stage, block, inputs, filter_depths, kernel_size): filter_depth1, filter_depth2, filter_depth3 = filter_depths conv_name_base = "conv_stage{}_block{}_branch".format(stage, block) bn_name_base = "bn_stage{}_block{}_branch".format(stage, block)
tensorflow.nn.relu
1,451
import tensorflow as tf h = o*tf.tanh(c) xs[idx] = h s = tf.concat(axis=1, values=[c, h]) return xs, s def _ln(x, g, b, e=1e-5, axes=[1]): u, s = tf.nn.moments(x, axes=axes, keep_dims=True) x = (x-u)/tf.sqrt(s+e) x = x*g+b return x def lnlstm(xs, ms, s, scope, nh, init_scale=1.0): nbatch, nin = [v.value for v in xs[0].get_shape()] with tf.variable_scope(scope):
tensorflow.sqrt
1,452
import tensorflow as tf initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.01), ) lookup_table = tf.concat((tf.zeros(shape=[1, size_layers]), lookup_table[1:, :]), 0) forward = tf.nn.embedding_lookup(lookup_table, self.X) self.Y = tf.placeholder(tf.float32, (None, None, n_mels * resampled)) self.decoder_inputs = tf.concat((tf.zeros_like(self.Y[:, :1, :]), self.Y[:, :-1, :]), 1) self.decoder_inputs = self.decoder_inputs[:, :, -n_mels:] self.Z = tf.placeholder(tf.float32, (None, None, fourier_window_size // 2 + 1)) batch_size = tf.shape(self.X)[0]
tensorflow.zeros_like
1,453
import tensorflow as tf (images, labels) = random_batch(batch_size, config) model = revnet.RevNet(config=config) optimizer = tf.train.GradientDescentOptimizer(0.1) if defun: model.call = tfe.function(model.call) num_burn = 3 num_iters = 10 with tf.device(device): iterator = make_iterator((images, labels)) for _ in range(num_burn): (images, labels) = iterator.next() train_one_iter(model, images, labels, optimizer) if execution_mode: tfe.async_wait() self._force_device_sync()
tensorflow.device
1,454
from tensorflow.python.training import ftrl metrics = classifier.fit(input_fn=_input_fn, steps=_ITERS).evaluate( input_fn=_input_fn, steps=100) self._assertSingleClassMetrics(metrics) def benchmarkCustomOptimizer(self): iris = test_data.prepare_iris_data_for_logistic_regression() cont_feature = feature_column.real_valued_column('feature', dimension=4) bucketized_feature = feature_column.bucketized_column( cont_feature, test_data.get_quantile_based_buckets(iris.data, 10)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( model_dir=tempfile.mkdtemp(), linear_feature_columns=(bucketized_feature,), linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1), dnn_feature_columns=(cont_feature,), dnn_hidden_units=(3, 3), dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1)) input_fn = test_data.iris_input_logistic_fn metrics = classifier.fit(input_fn=input_fn, steps=_ITERS).evaluate( input_fn=input_fn, steps=100) self._assertSingleClassMetrics(metrics) def benchmarkMultiClass(self): iris = base.load_iris() cont_feature = feature_column.real_valued_column('feature', dimension=4) bucketized_feature = feature_column.bucketized_column(
tensorflow.python.training.ftrl.FtrlOptimizer
1,455
import tensorflow as tf final_dim] else: l2_shape = tf.identity(x_shape) # Initialize hidden layer activities if self.hidden_init == 'identity': l1_h2 = tf.identity(x) l2_h2 = tf.zeros(l2_shape, dtype=self.dtype) l3_h2 = tf.zeros(l3_shape, dtype=self.dtype) elif self.hidden_init == 'random': l1_h2 = tf.random_normal(x_shape, dtype=self.dtype) l2_h2 = tf.random_normal(l2_shape, dtype=self.dtype) l3_h2 = tf.random_normal(l3_shape, dtype=self.dtype) elif self.hidden_init == 'zeros': l1_h2 = tf.zeros(x_shape, dtype=self.dtype) l2_h2 = tf.zeros(l2_shape, dtype=self.dtype)
tensorflow.zeros
1,456
import tensorflow as tf self.loss_d_rot = self.loss_d + self.weight_rotation_loss_d * self.real_pc_rot_loss self.loss_g_rot = self.loss_g + self.weight_rotation_loss_g * self.gen_out_rot_loss # Compute gradient penalty at interpolated points ndims = self.real_pc.get_shape().ndims #(1024, 3) alpha = tf.random_uniform(shape=[self.batch_size] + [1] * (ndims - 1), minval=0., maxval=1.) differences = self.generator_out - self.real_pc interpolates = self.real_pc + (alpha * differences) with tf.variable_scope('discriminator') as scope:
tensorflow.random_uniform
1,457
import tensorflow as tf def test_minimum_batch_size(self): with self.test_session() as session: @dynamic_batching.batch_fn_with_options( minimum_batch_size=2, timeout_ms=1000) def f(a, b): batch_size = tf.shape(a)[0] return a + b, tf.tile([batch_size], [batch_size]) output = f(tf.constant([[1, 3]]), tf.constant([2])) tf.train.start_queue_runners()
tensorflow.tile
1,458
import tensorflow as tf cand_features = np.concatenate(cand_features, axis=0).astype(np.float32, copy=False) cand_choices = np.asarray(cand_choices).astype(np.int32, copy=False) cand_scoress = np.concatenate(cand_scoress, axis=0).astype(np.float32, copy=False) n_cands_per_sample = np.asarray(n_cands_per_sample).astype(np.int32, copy=False) return cand_features, n_cands_per_sample, cand_choices, cand_scoress def padding(output, n_vars_per_sample, fill=-1e8): n_vars_max = tf.reduce_max(n_vars_per_sample) output = tf.split( value=output, num_or_size_splits=n_vars_per_sample, axis=1, ) output = tf.concat([ tf.pad( x, paddings=[[0, 0], [0, n_vars_max - tf.shape(x)[1]]], mode='CONSTANT', constant_values=fill) for x in output
tensorflow.split
1,459
import tensorflow as tf else: tf.logging.info('Mode not found.')
tensorflow.logging.info
1,460
import tensorflow as tf tf.placeholder(tf.float32, [None, 4]), fields.InputDataFields.groundtruth_classes: tf.placeholder(tf.int32, [None, 3]), fields.InputDataFields.num_groundtruth_boxes: tf.placeholder(tf.int32, []) } padded_tensor_dict = inputs.pad_input_data_to_static_shapes( tensor_dict=input_tensor_dict,
tensorflow.placeholder
1,461
import tensorflow as tf if direction == 'forward': self.lstm_outputs[direction].append(layer_output) else: self.lstm_outputs[direction].append( tf.reverse_sequence( layer_output, sequence_lengths, seq_axis=1,
tensorflow.reverse_sequence
1,462
import tensorflow as tf valid_pre = tf.reshape(valid_inf, [validnum, classnum]) valid_correct_prediction=tf.equal(tf.argmax(valid_inf,1),tf.argmax(valid_labels,1)) valid_accuracy=tf.reduce_mean(tf.cast(valid_correct_prediction,tf.float32)) valid_pre = tf.argmax(valid_pre, 1)
tensorflow.cast
1,463
import tensorflow as tf save_path = os.path.join(self.get_temp_dir(), "variables") with tf.Session("", graph=tf.Graph()) as sess: one = tf.Variable(1.0) twos = tf.Variable([2.0, 2.0, 2.0]) init = tf.initialize_all_variables() save = tf.train.Saver(tf.all_variables())
tensorflow.Variable
1,464
import tensorflow as tf def test_tensor_rearrange(): tensor_rearrange = TensorRearrange(seed=713) in_node_a = tensor_rearrange.get_placeholder("input_0") in_node_b = tensor_rearrange.get_placeholder("input_1") in_node_c = tensor_rearrange.get_placeholder("input_2") stitched = tf.dynamic_stitch([[1, 10], [[0, 7, 9], [5, 8, 3]], [[6], [4], [2]]], [in_node_a, in_node_b, in_node_c]) # should be 11,5,4 list_of_parts = tf.dynamic_partition(tf.transpose(stitched, perm=[1, 2, 0]), [[0, 1, 2, 3], [1, 0, 2, 3], [2, 3, 1, 0], [2, 1, 0, 3], [0, 1, 2, 3]], num_partitions=4) # after permute becomes 5,4,11, return all partitions 5,11 node_a = tf.div(list_of_parts[0], list_of_parts[1]) node_b = tf.divide(list_of_parts[2], list_of_parts[3]) trace_node = tf.trace(node_a) + node_b # there is a broadcast here out_node = tf.cast(tf.count_nonzero(trace_node), dtype=tf.float32) + tf.Variable(tf.random_normal(shape=(2, 3))) placeholders = [in_node_a, in_node_b, in_node_c] predictions = [out_node] # Run and persist tfp = TensorFlowPersistor(save_dir="partition_stitch_misc") tfp.set_placeholders(placeholders) \ .set_output_tensors(predictions) \ .set_test_data(tensor_rearrange.get_test_data()) \ .build_save_frozen_graph()
tensorflow.trace
1,465
import tensorflow as tf output_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True, element_shape=(facts[:, 0, :].get_shape())) _, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0]) self_attention = output_op.stack() self_attention = tf.transpose(self_attention, perm = [1, 0, 2]) return self_attention def self_all_attention(facts, ATTENTION_SIZE, mask, stag='null'): if len(facts.get_shape().as_list()) == 2: facts = tf.expand_dims(facts, 1)
tensorflow.transpose
1,466
import tensorflow as tf # First convolutional layer - maps one image to 32 feature maps. with tf.variable_scope('Conv_1'): conv1 = tf.layers.conv2d( inputs=x_image, filters=32, kernel_size=[5,5], padding='same', use_bias=False, name='conv1' ) conv1_bn = tf.nn.relu(tf.layers.batch_normalization(conv1, training=train)) pool1 = tf.layers.max_pooling2d( inputs=conv1_bn, pool_size=[2, 2], strides=2, name='pool1' ) conv2 = tf.layers.conv2d( inputs=pool1,
tensorflow.layers.batch_normalization
1,467
import tensorflow as tf d.attention_mask: tf.io.VarLenFeature(tf.int64), d.labels: tf.io.VarLenFeature(tf.int64), } dataset = dataset.map( lambda x: tf.io.parse_example(x, features), num_parallel_calls=utils.AUTOTUNE, ).prefetch(utils.AUTOTUNE) dataset = dataset.map( lambda x: ( tf.cast(tf.sparse.to_dense(x[d.input_ids]), tf.int32), tf.cast(tf.sparse.to_dense(x[d.token_type_ids]), tf.int32), tf.cast(tf.sparse.to_dense(x[d.attention_mask]), tf.int32), tf.cast(tf.sparse.to_dense(x[d.labels]), tf.int32), ), num_parallel_calls=utils.AUTOTUNE, ).prefetch(utils.AUTOTUNE) # do transformation return d(dataset, **kwargs) def parse_examples_to_dataset(self): if not self.examples: logging.info("self.examples is empty or None, skipped.")
tensorflow.sparse.to_dense
1,468
from tensorflow.python.ops import array_ops batch_size = config["batch_size"] seq_length = config["seq_length"] with ops.Graph().as_default(), ops.device("/device:GPU:0"): model = cudnn_rnn_ops.CudnnLSTM(num_layers, num_units, num_units) params_size_t = model.params_size() input_data = variables.Variable( array_ops.ones([seq_length, batch_size, num_units])) input_h = variables.Variable( array_ops.ones([num_layers, batch_size, num_units])) input_c = variables.Variable( array_ops.ones([num_layers, batch_size, num_units])) params = variables.Variable( array_ops.ones([params_size_t]), validate_shape=False) output, output_h, output_c = model( is_training=True, input_data=input_data, input_h=input_h, input_c=input_c, params=params) all_grads = gradients_impl.gradients( [output, output_h, output_c],
tensorflow.python.ops.array_ops.ones
1,469
import tensorflow as tf from scipy import interpolate import time def main(args): with tf.Graph().as_default(): config = tf.ConfigProto(inter_op_parallelism_threads=args.num_inter_threads, intra_op_parallelism_threads=args.num_intra_threads) with tf.Session(config = config) as sess: # Read the file containing the pairs used for testing pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
tensorflow.ConfigProto
1,470
from tensorflow.python.framework import ops recall = compute_recall(true_positives, false_negatives, 'value') with ops.control_dependencies([true_positives_update_op, false_negatives_update_op]): update_op = compute_recall(true_positives, false_negatives, 'update_op') if metrics_collections: ops.add_to_collections(metrics_collections, recall) if updates_collections: ops.add_to_collections(updates_collections, update_op) return recall, update_op def _tp_fn_tn_fp(predictions, labels, thresholds, weights=None): """Computes true_positives, false_negatives, true_negatives, false_positives. The `_tp_fn_tn_fp` function creates four local variables, `true_positives`,
tensorflow.python.framework.ops.add_to_collections
1,471
import tensorflow as tf layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg) # sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg)
tensorflow.layers.dense
1,472
import tensorflow as tf return final_loss, cstr_pct def contra_traj_lossV7(pred, tgt, horizon=12, temp=100): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon) # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) pred_flat1, pred_flat2 = tf.reshape(horizon_pred, [-1, 1]), tf.reshape(horizon_pred, [1, -1]) tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt, [-1, 1]), tf.reshape(horizon_tgt, [1, -1]) tgt_dif = tgt_flat1 - tgt_flat2 pred_dif = pred_flat1 - pred_flat2 geq = tf.cast(tgt_dif > 0, tf.bool) tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif)
tensorflow.reshape
1,473
import tensorflow as tf else: out = tf.pow(subsamp_sum, 1/pnorm) return out def mpool(inpOp, kH, kW, dH, dW, padding, name): with tf.variable_scope(name): maxpool = tf.nn.max_pool(inpOp, ksize=[1, kH, kW, 1], strides=[1, dH, dW, 1], padding=padding) return maxpool def apool(inpOp, kH, kW, dH, dW, padding, name): with tf.variable_scope(name): avgpool = tf.nn.avg_pool(inpOp, ksize=[1, kH, kW, 1], strides=[1, dH, dW, 1], padding=padding) return avgpool # def mfmpool(input1, input2, name): # with tf.variable_scope(name): # res = tf.maximum(input1, input2) # return res def batch_norm(x, phase_train): """
tensorflow.variable_scope
1,474
import tensorflow as tf return (summary_op, monitored_values) def _make_var(self, name, shape, dtype=None, no_reg=False, initializer=None, init_constant=None, trainable=True): if initializer is None: if init_constant is not None: initializer = tf.constant_initializer(init_constant, dtype=tf.float32) else: initializer = tf.contrib.keras.initializers.he_normal() # Ensure that name is unique by shape too name += '-shape-{}'.format('x'.join([str(x) for x in shape])) var = tf.get_variable(name, shape=shape, dtype=dtype, initializer=initializer, trainable=trainable) # Add L2 regularization node for trainable var if trainable and not no_reg: l2_loss = tf.nn.l2_loss(var) tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, l2_loss) return var class TimedRepeatCondition(): def __init__(self, every_secs=60): self._every_secs = every_secs
tensorflow.get_variable
1,475
import tensorflow as tf if last == 'sigmoid' or last == 'softmax': #use cross entropy loss function logits= tf.transpose(betan*zn[1]) cost = tf.reduce_mean(tf.losses.sigmoid_cross_entropy(logits = logits, multi_class_labels=labels)) elif last == 'esp' or last == 'relu': #use minimum squared error (L2 loss) out = tf.transpose(zn[0]) cost = tf.reduce_mean(tf.squared_difference(out, labels))/2 return cost #------------Hessian-------------------
tensorflow.squared_difference
1,476
import tensorflow as tf self.embedding_W = tf.Variable(tf.random_uniform([num_quantized_chars, embedding_size], -1.0, 1.0),name="embedding_W") self.embedded_characters = tf.nn.embedding_lookup(self.embedding_W, self.input_x) embedded_text_expand = tf.expand_dims(self.embedded_characters, -1) with tf.device('/cpu:0'), tf.name_scope("embedding_tags"): W_tags = tf.get_variable("embed_W_tags", [tags_vocab_size, embedding_size], initializer=initializer) embedded_tags = tf.nn.embedding_lookup(W_tags, self.input_tags) embedded_tags_expanded = tf.expand_dims(embedded_tags, -1) with tf.device('/cpu:0'), tf.name_scope("embedding_deps"): W_deps = tf.get_variable("embed_W_deps", [deps_vocab_size, embedding_size], initializer=initializer) embedded_deps = tf.nn.embedding_lookup(W_deps, self.input_deps) embedded_deps_expanded = tf.expand_dims(embedded_deps, -1) with tf.device('/cpu:0'), tf.name_scope("embedding_head"): W_head = tf.get_variable("embed_W_head", [num_quantized_chars, embedding_size], initializer=initializer) embedded_head = tf.nn.embedding_lookup(W_head, self.input_head) embedded_head_expanded = tf.expand_dims(embedded_head, -1)
tensorflow.device
1,477
import tensorflow as tf np.random.seed(1234) tf.set_random_seed(1234)
tensorflow.set_random_seed
1,478
import tensorflow as tf def get_symbolic_attribution(self): return [g * x for g, x in zip( tf.gradients(ys=self.T, xs=self.X), self.X if self.has_multiple_inputs else [self.X])] @classmethod def nonlinearity_grad_override(cls, op, grad): output = op.outputs[0] input = op.inputs[0] return grad * output / (input + eps * tf.compat.v1.where(input >= 0, tf.ones_like(input), -1 * tf.ones_like(input))) """ Integrated Gradients https://arxiv.org/pdf/1703.01365.pdf """ class IntegratedGradients(GradientBasedMethod): def __init__(self, T, X, session, keras_learning_phase, steps=100, baseline=None, Y_shape=None): self.steps = steps
tensorflow.ones_like
1,479
import tensorflow as tf :param expected_extra_dims: Expected extra dimensions. :returns: Tensor of same shape as expanded_tensor, but with `value_if_masked` filled in masked dimensions. """ mask_shape = list(map(int, self.mask.shape)) graph_typecheck.assert_shape(expanded_tensor, mask_shape + expected_extra_dims) value_if_masked = expanded_tensor.dtype.as_numpy_dtype(value_if_masked) if_masked_tensor = tf.fill(expanded_tensor.shape, value_if_masked) mask = self.mask for i in range(2, 2 + len(expected_extra_dims)): mask = tf.expand_dims(mask, axis=i) mask = tf.tile(mask, [1, 1] + expected_extra_dims) return tf.where(mask, expanded_tensor, if_masked_tensor) def initial_layer( window_feature: WindowFeatures, *, clip_magnitude=10.0, include_flux_and_time=False ) -> tf.Tensor: features = tf.expand_dims(window_feature.dflux_dt(clip_magnitude=clip_magnitude), 2) if include_flux_and_time: dflux = tf.expand_dims(window_feature.dflux, 2) dtime = tf.expand_dims(window_feature.dtime, 2) features = tf.concat([features, dflux, dtime], axis=2, name="initial_layer_concat")
tensorflow.where
1,480
import tensorflow as tf features["label_ids"] = create_int_feature(feature.label_ids) tf_example = tf.train.Example(features=tf.train.Features(feature=features))
tensorflow.train.Features
1,481
import tensorflow as tf def res_block_bottleneck(x, ksize_list, strides, is_training, data_format='NHWC', w_project=None, no_activation=False): """ Computes y = x + F(x), where F(x) is the residual block function. At downsample layers, applies a downsample function on x as well. """ if w_project is not None: x_ = tf.conv2d(x, w_project, strides, padding='SAME', data_format=data_format) else: x_ = x return x_ + _bottleneck_residual( x, ksize_list, strides, 'SAME', is_training, data_format=data_format, no_activation=no_activation)
tensorflow.conv2d
1,482
import tensorflow as tf feat = tf.nn.l2_normalize(inputs, 1, 1e-10, name='feat') inputs = tf.layers.dense(inputs=inputs, units=class_num) # inputs = tf.layers.dense(inputs=feat, units=class_num) inputs = tf.identity(inputs, 'final_dense') return inputs, feat # image_size = 32, img_channels = 3, class_num = 10 in cifar10 x = tf.placeholder(tf.float32, shape=[None, image_size, image_size, img_channels]) label = tf.placeholder(tf.float32, shape=[None,]) one_hot_labels = tf.one_hot(indices=tf.cast(label, tf.int32), depth=class_num) training_flag = tf.placeholder(tf.bool) learning_rate = tf.placeholder(tf.float32, name='learning_rate') logits, feat = resnet_model_fn(x, training=training_flag) cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_labels, logits=logits)) Focal_loss = tf.reduce_mean(focal_loss(one_hot_labels, logits, alpha=0.5)) l2_loss = weight_decay * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()]) Center_loss, Centers = center_loss(feat, tf.cast(label, dtype=tf.int32), 0.95, class_num) Total_loss = cost + l2_loss optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=momentum, use_nesterov=True) # Batch norm requires update_ops to be added as a train_op dependency. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
tensorflow.placeholder
1,483
import tensorflow as tf scale = tf.stack([1, h_scale, w_scale]) indices *= scale # Since we always use VALID to perform pooling, shift is needed here. shift = tf.stack([0, (ksize[1] - 1) // 2, (ksize[2] - 1) // 2]) indices += shift indices_ = tf.expand_dims(tf.expand_dims(indices, 1), 2) # indices_ = tf.tile(indices_, [1, ksize[1], ksize[2], 1]) offset = _get_offset_array(ksize[0:3]) indices_ += offset return indices_
tensorflow.expand_dims
1,484
from tensorflow.contrib import tpu as contrib_tpu run_config = contrib_tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=int(FLAGS.save_checkpoints_steps), keep_checkpoint_max=0, tpu_config=contrib_tpu.TPUConfig( iterations_per_loop=iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) train_examples = None
tensorflow.contrib.tpu.TPUConfig
1,485
import tensorflow as tf query = tf.concat(values = [ query, query, ], axis=1) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]]) scores = d_layer_3_all # Mask # key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T] key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(scores) * (-2 ** 32 + 1) scores = tf.where(key_masks, scores, paddings) # [B, 1, T] # Scale # scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)
tensorflow.layers.dense
1,486
import tensorflow as tf def test_tensor_rearrange(): tensor_rearrange = TensorRearrange(seed=713) in_node_a = tensor_rearrange.get_placeholder("input_0") in_node_b = tensor_rearrange.get_placeholder("input_1") in_node_c = tensor_rearrange.get_placeholder("input_2") stitched = tf.dynamic_stitch([[1, 10], [[0, 7, 9], [5, 8, 3]], [[6], [4], [2]]], [in_node_a, in_node_b, in_node_c]) # should be 11,5,4 list_of_parts = tf.dynamic_partition(tf.transpose(stitched, perm=[1, 2, 0]), [[0, 1, 2, 3], [1, 0, 2, 3], [2, 3, 1, 0], [2, 1, 0, 3], [0, 1, 2, 3]], num_partitions=4) # after permute becomes 5,4,11, return all partitions 5,11 node_a = tf.div(list_of_parts[0], list_of_parts[1]) node_b = tf.divide(list_of_parts[2], list_of_parts[3]) trace_node = tf.trace(node_a) + node_b # there is a broadcast here out_node = tf.cast(tf.count_nonzero(trace_node), dtype=tf.float32) + tf.Variable(tf.random_normal(shape=(2, 3))) placeholders = [in_node_a, in_node_b, in_node_c]
tensorflow.transpose
1,487
import tensorflow as tf use_skip_connections = self.options['lstm']['use_skip_connections'] if use_skip_connections: print("USING SKIP CONNECTIONS") else: print("NOT USING SKIP CONNECTIONS") # the sequence lengths from input mask if self.use_character_inputs: mask = tf.reduce_any(self.ids_placeholder > 0, axis=2) else: mask = self.ids_placeholder > 0 sequence_lengths = tf.reduce_sum(tf.cast(mask, tf.int32), axis=1) batch_size = tf.shape(sequence_lengths)[0] # for each direction, we'll store tensors for each layer self.lstm_outputs = {'forward': [], 'backward': []}
tensorflow.reduce_any
1,488
import tensorflow as tf new_mean = tf.assign_sub( mean, tf.check_numerics( decay * (mean - cur_mean), "NaN in moving mean.")) with tf.name_scope(name, "AssignMovingAvg", [var, cur_var, decay]): with ops.colocate_with(var): new_var = tf.assign_sub( var,
tensorflow.name_scope
1,489
import tensorflow as tf current_image_id_ph = tf.placeholder(tf.int32, []) progress = networks.compute_progress( current_image_id_ph, stable_stage_num_images, transition_stage_num_images, num_blocks=3) z = tf.random_normal([2, 10], dtype=tf.float32) x, _ = networks.generator( z, progress, _num_filters_stub, networks.ResolutionSchedule( start_resolutions=(4, 4), scale_base=2, num_resolutions=3)) fake_loss = tf.reduce_sum(tf.square(x))
tensorflow.random_normal
1,490
import tensorflow.contrib.layers as layers from tensorflow.contrib.layers.python.layers import initializers def atari_model(img_in, num_actions, scope, reuse=False): # as described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out
tensorflow.contrib.layers.convolution2d
1,491
import tensorflow as tf tf.summary.scalar("Learning Rate", m.lr) with tf.name_scope("Valid"): valid_input = PTBInput(config=config, data=valid_data, name="ValidInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mvalid = PTBModel(is_training=False, config=config, input_=valid_input) tf.summary.scalar("Validation Loss", mvalid.cost)
tensorflow.variable_scope
1,492
import tensorflow as tf
tensorflow.enable_eager_execution
1,493
from tensorflow.python.platform import gfile # Adding s2 again (old s2 is removed first, then new s2 appended) s2 = save2.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s3, s2], save2.last_checkpoints) # Created by the first helper. self.assertTrue(gfile.Exists(s1)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) # Deleted by the first helper. self.assertFalse(gfile.Exists(s3)) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3))) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) # Adding s1 (s3 should now be deleted as oldest in list) s1 = save2.save(sess, os.path.join(save_dir, "s1"))
tensorflow.python.platform.gfile.Exists
1,494
import tensorflow as tf self.cell = tf.contrib.rnn.LSTMCell( options.gen_hidden_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=113), state_is_tuple=True) self.placeholders = placeholders with tf.variable_scope("embedding"), tf.device('/cpu:0'): self.embedding = tf.get_variable('word_embedding', trainable=(options.fix_word_vec==False), initializer=tf.constant(self.vocab.word_vecs), dtype=tf.float32) if options.with_phrase_projection: self.max_phrase_size = placeholders.max_phrase_size
tensorflow.device
1,495
import tensorflow as tf try: from tensorflow_transform import annotations_pb2 except ImportError: return # pylint: enable=g-import-not-at-top with tf.compat.v1.Graph().as_default() as graph: inputs = { 'foo': tf.convert_to_tensor([0, 1, 2, 3]), 'bar': tf.convert_to_tensor([0, 2, 0, 2]), } boundaries_foo = tf.expand_dims(tf.convert_to_tensor([.5, 1.5]), axis=0) boundaries_bar = tf.expand_dims(tf.convert_to_tensor([.1, .2]), axis=0) outputs = {} # tft.apply_buckets will annotate the feature in the output schema to # indicate the bucket boundaries that were applied.
tensorflow.convert_to_tensor
1,496
import tensorflow as tf u = tf.string_join(x_t[i:i + self._p], '') vx_keys, r = tf.cond( tf.greater(vx.lookup(u), -1), true_fn=lambda: (vx_keys, tf.add(vx.lookup(u), 1)), false_fn=lambda: (tf.concat([vx_keys, tf.reshape(u, (-1, 1))], axis=0), tf.constant(1, dtype=tf.int64, name='constant')) ) vx.insert(u, r) for i in tf.range(start=0, limit=z_t_len - self._p + 1, delta=1, dtype=None, name='range'): u = tf.string_join(z_t[i:i + self._p], '') vz_keys, r = tf.cond( tf.greater(vz.lookup(u), -1), true_fn=lambda: (vz_keys, tf.add(vz.lookup(u), 1)), false_fn=lambda: ( tf.concat([vz_keys, tf.reshape(u, (-1, 1))], axis=0), tf.constant(1, dtype=tf.int64)) ) vz.insert(u, r) kk = tf.Variable(0, dtype=tf.int64) for i in tf.range(start=0, limit=tf.size(vx_keys), delta=1, dtype=None, name='range'): for j in tf.range(start=0, limit=tf.size(vz_keys), delta=1, dtype=None, name='range'): to_add = tf.cond( tf.greater(vz.lookup(vx_keys[i]), -1), true_fn=lambda: tf.math.multiply(vx.lookup(vx_keys[i]), vz.lookup(vz_keys[j])), false_fn=lambda: tf.constant(0, dtype=tf.int64) ) kk = tf.math.add(kk, to_add) kernel[l][m] = kk
tensorflow.reshape
1,497
import tensorflow as tf # Combine to constant channels with tf.variable_scope('combine'): W = self._make_var('W', (ni, block_ch * block_ch))
tensorflow.variable_scope
1,498
from tensorflow.contrib.learn.python.learn.datasets import base local_file = base.maybe_download(TRAIN_IMAGES, train_dir, SOURCE_URL + TRAIN_IMAGES) train_images = extract_images(local_file) local_file = base.maybe_download(TRAIN_LABELS, train_dir, SOURCE_URL + TRAIN_LABELS) train_labels = extract_labels(local_file, one_hot=one_hot)
tensorflow.contrib.learn.python.learn.datasets.base.maybe_download
1,499