seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64), "is_real_example": tf.FixedLenFeature([], tf.int64), } def _decode_record(record, name_to_features):
tensorflow.FixedLenFeature
1,600
import tensorflow as tf else: i_direction = 1 variable_scope_name = 'RNN_{0}/RNN/MultiRNNCell/Cell{1}'.format( i_direction, i) with tf.variable_scope(variable_scope_name): layer_output, final_state = tf.nn.dynamic_rnn( lstm_cell, layer_input, sequence_length=sequence_lengths, initial_state=tf.nn.rnn_cell.LSTMStateTuple( *batch_init_states), ) self.lstm_state_sizes[direction].append(lstm_cell.state_size) self.lstm_init_states[direction].append(init_states) self.lstm_final_states[direction].append(final_state) if direction == 'forward': self.lstm_outputs[direction].append(layer_output)
tensorflow.nn.rnn_cell.LSTMStateTuple
1,601
import tensorflow as tf self.loss_GABA_sum = tf.summary.scalar("g_loss_a2b", self.loss_GABA) self.loss_GBAB_sum = tf.summary.scalar("g_loss_b2a", self.loss_GBAB) self.g_total_loss_sum = tf.summary.scalar("g_loss", self.generator_loss) self.g_sum = tf.summary.merge([self.loss_GABA_sum,self.loss_GBAB_sum,self.g_total_loss_sum]) self.loss_db_sum = tf.summary.scalar("db_loss", self.D_B_loss) self.loss_da_sum = tf.summary.scalar("da_loss", self.D_A_loss) self.loss_d_sum = tf.summary.scalar("d_loss",self.discriminator_loss) self.db_loss_real_sum = tf.summary.scalar("db_loss_real", self.D_B_loss_real) self.db_loss_fake_sum = tf.summary.scalar("db_loss_fake", self.D_B_loss_fake) self.da_loss_real_sum = tf.summary.scalar("da_loss_real", self.D_A_loss_real) self.da_loss_fake_sum = tf.summary.scalar("da_loss_fake", self.D_A_loss_fake) self.d_sum = tf.summary.merge(
tensorflow.summary.scalar
1,602
import tensorflow as tf @staticmethod def _relu(name, x): with tf.variable_scope(name): return tf.nn.relu(x) @staticmethod def _fc(name, x, output_dim=128, initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0): with tf.variable_scope(name): n_in = x.get_shape()[-1].value w = variable_with_weight_decay([n_in, output_dim], initializer, l2_strength) variable_summaries(w) if isinstance(bias, float): bias = tf.get_variable("biases", [output_dim], tf.float32, tf.constant_initializer(bias))
tensorflow.variable_scope
1,603
import tensorflow as tf with tf.variable_scope('pool'): X = tf.nn.relu(X) X = tf.nn.avg_pool(X, ksize=(1, pool_ksize, pool_ksize, 1), strides=(1, pool_stride, pool_stride, 1), padding='VALID')
tensorflow.nn.avg_pool
1,604
import tensorflow as tf if cfgs.ADD_BOX_IN_TENSORBOARD: detections_in_img = self.drawer.draw_boxes_with_categories_and_scores( img_batch=img, boxes=outputs[0], scores=outputs[1], labels=outputs[2], method=1, is_csl=True) tf.summary.image('Compare/final_detection_gpu:%d' % i, detections_in_img) loss_dict = outputs[-1] total_loss_dict, total_losses = self.loss_dict(loss_dict, num_gpu) if i == num_gpu - 1: regularization_losses = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) # weight_decay_loss = tf.add_n(slim.losses.get_regularization_losses()) total_losses = total_losses + tf.add_n(regularization_losses) tf.get_variable_scope().reuse_variables() grads = optimizer.compute_gradients(total_losses) if cfgs.GRADIENT_CLIPPING_BY_NORM is not None: grads = slim.learning.clip_gradient_norms(grads, cfgs.GRADIENT_CLIPPING_BY_NORM) tower_grads.append(grads) self.log_printer(r3det_dcl, optimizer, global_step, tower_grads, total_loss_dict, num_gpu, graph) if __name__ == '__main__':
tensorflow.get_collection
1,605
import tensorflow as tf layers = [] input = tf.concat([inputs, targets], axis=3)
tensorflow.concat
1,606
import tensorflow as tf self.capped_g_grads = self._clip_grad_norms( g_optimizer.compute_gradients(self.g_losses[-1], t_vars['g_vars'])) global_step = tf.get_variable( 'global_step', [], initializer=tf.constant_initializer(0), trainable=False) if self.gradient_multipliers is not None: with tf.name_scope('multiply_grads'): self.capped_d_grads = self._multiply_gradients(self.capped_d_grads, self.gradient_multipliers) apply_d_gradient_op = d_optimizer.apply_gradients(self.capped_d_grads, global_step=global_step) apply_g_gradient_op = g_optimizer.apply_gradients(self.capped_g_grads, global_step=global_step)
tensorflow.name_scope
1,607
import tensorflow as tf self.task = task self.freeze = freeze worker_device = "/job:worker/task:{}/cpu:0".format(task) with tf.device(tf.train.replica_device_setter(1, worker_device=worker_device)): with tf.variable_scope("global"): self.network = LSTMPolicy(env.observation_space.shape, env.action_space.n)
tensorflow.train.replica_device_setter
1,608
import tensorflow as tf loc_loss = tf.cond(n_positives > 0., lambda: modified_smooth_l1(location_pred, tf.stop_gradient(gtargets), sigma=1.), lambda: tf.zeros_like(location_pred)) #loc_loss = modified_smooth_l1(location_pred, tf.stop_gradient(gtargets)) loc_loss = tf.reduce_mean(tf.reduce_sum(loc_loss, axis=-1)) loc_loss = tf.identity(loc_loss, name='location_loss') tf.summary.scalar('location_loss', loc_loss)
tensorflow.reduce_sum
1,609
import tensorflow as tf # Create optimizer opt = tf.train.AdamOptimizer(learning_rate, beta1=params.adam_beta1, beta2=params.adam_beta2, epsilon=params.adam_epsilon) if params.update_cycle == 1: train_op = tf.contrib.layers.optimize_loss( name="training", loss=loss, global_step=global_step, learning_rate=learning_rate, clip_gradients=params.clip_grad_norm or None, optimizer=opt, colocate_gradients_with_ops=True
tensorflow.contrib.layers.optimize_loss
1,610
import tensorflow as tf zero_indexed_groundtruth_classes, num_classes) if use_multiclass_scores: tensor_dict[fields.InputDataFields.groundtruth_classes] = tensor_dict[ fields.InputDataFields.multiclass_scores] tensor_dict.pop(fields.InputDataFields.multiclass_scores, None) if fields.InputDataFields.groundtruth_confidences in tensor_dict: groundtruth_confidences = tensor_dict[ fields.InputDataFields.groundtruth_confidences] # Map the confidences to the one-hot encoding of classes tensor_dict[fields.InputDataFields.groundtruth_confidences] = ( tf.reshape(groundtruth_confidences, [-1, 1]) * tensor_dict[fields.InputDataFields.groundtruth_classes]) else: groundtruth_confidences = tf.ones_like( zero_indexed_groundtruth_classes, dtype=tf.float32) tensor_dict[fields.InputDataFields.groundtruth_confidences] = ( tensor_dict[fields.InputDataFields.groundtruth_classes]) if merge_multiple_boxes: merged_boxes, merged_classes, merged_confidences, _ = ( util_ops.merge_boxes_with_multiple_labels( tensor_dict[fields.InputDataFields.groundtruth_boxes], zero_indexed_groundtruth_classes, groundtruth_confidences, num_classes)) merged_classes = tf.cast(merged_classes, tf.float32) tensor_dict[fields.InputDataFields.groundtruth_boxes] = merged_boxes tensor_dict[fields.InputDataFields.groundtruth_classes] = merged_classes
tensorflow.ones_like
1,611
import tensorflow as tf f = conv(x, scope='f_conv', filter_dims=[1, 1, channels//8], stride_dims=[1, 1], non_linear_fn=act_func) f = tf.layers.max_pooling2d(f, pool_size=2, strides=2, padding='SAME') print('attention f dims: ' + str(f.get_shape().as_list())) g = conv(x, scope='g_conv', filter_dims=[1, 1, channels//8], stride_dims=[1, 1], non_linear_fn=act_func) print('attention g dims: ' + str(g.get_shape().as_list())) h = conv(x, scope='h_conv', filter_dims=[1, 1, channels//2], stride_dims=[1, 1], non_linear_fn=act_func) h = tf.layers.max_pooling2d(h, pool_size=2, strides=2, padding='SAME') print('attention h dims: ' + str(h.get_shape().as_list())) # N = h * w g = tf.reshape(g, shape=[-1, g.shape[1]*g.shape[2], g.get_shape().as_list()[-1]]) print('attention g flat dims: ' + str(g.get_shape().as_list())) f = tf.reshape(f, shape=[-1, f.shape[1]*f.shape[2], f.shape[-1]])
tensorflow.layers.max_pooling2d
1,612
from tensorflow.contrib.learn.python.learn import ops self.assertEqual(embed_np.shape, embed_tf.shape) self.assertAllClose(embed_np, embed_tf) def test_categorical_variable(self): random_seed.set_random_seed(42) with self.cached_session() as sess: cat_var_idx = array_ops.placeholder(dtypes.int64, [2, 2]) embeddings = ops.categorical_variable( cat_var_idx, n_classes=5, embedding_size=10, name="my_cat_var") sess.run(variables.global_variables_initializer()) emb1 = sess.run(embeddings, feed_dict={cat_var_idx.name: [[0, 1], [2, 3]]}) emb2 = sess.run(embeddings, feed_dict={cat_var_idx.name: [[0, 2], [1, 3]]})
tensorflow.contrib.learn.python.learn.ops.categorical_variable
1,613
import tensorflow as tf if encoder.time_pooling: for stride in encoder.time_pooling[:encoder.layers - 1]: encoder_input_length_ = (encoder_input_length_ + stride - 1) // stride # rounding up last_backward = encoder_outputs_[:, 0, cell_output_size:] indices = tf.stack([tf.range(batch_size), encoder_input_length_ - 1], axis=1) last_forward = tf.gather_nd(encoder_outputs_[:, :, :cell_output_size], indices) last_forward.set_shape([None, cell_output_size]) if encoder.final_state == 'concat_last': # concats last states of all backward layers (full LSTM states) encoder_state_ = tf.concat(encoder_states_, axis=1) elif encoder.final_state == 'average': mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_outputs_)[1], dtype=tf.float32) mask = tf.expand_dims(mask, axis=2) encoder_state_ = tf.reduce_sum(mask * encoder_outputs_, axis=1) / tf.reduce_sum(mask, axis=1) elif encoder.final_state == 'average_inputs': mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_inputs_)[1], dtype=tf.float32) mask = tf.expand_dims(mask, axis=2) encoder_state_ = tf.reduce_sum(mask * encoder_inputs_, axis=1) / tf.reduce_sum(mask, axis=1) elif encoder.bidir and encoder.final_state == 'last_both': encoder_state_ = tf.concat([last_forward, last_backward], axis=1) elif encoder.final_state == 'none': encoder_state_ = tf.zeros(shape=[batch_size, 0]) elif encoder.bidir and not encoder.final_state == 'last_forward': # last backward hidden state encoder_state_ = last_backward else: # last forward hidden state encoder_state_ = last_forward
tensorflow.reduce_sum
1,614
import tensorflow as tf deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1]) biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0)) deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
tensorflow.constant_initializer
1,615
import tensorflow as tf else: size = shape[-1].value with tf.variable_scope(layer_name): w = tf.get_variable(name='weight', shape=[size, out_nodes], initializer=tf.constant_initializer(0.0)) b = tf.get_variable(name='bias', shape=[out_nodes], initializer=tf.constant_initializer(0.0)) # batch? flat_x = tf.reshape(x, [-1,size])
tensorflow.constant_initializer
1,616
import tensorflow as tf eval_examples.append(classifier_utils.PaddingInputExample()) cached_dir = FLAGS.cached_dir if not cached_dir: cached_dir = FLAGS.output_dir eval_file = os.path.join(cached_dir, task_name + "_eval.tf_record") if not tf.gfile.Exists(eval_file): classifier_utils.file_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file, task_name) tf.logging.info("***** Running evaluation *****") tf.logging.info(" Num examples = %d (%d actual, %d padding)", len(eval_examples), num_actual_eval_examples, len(eval_examples) - num_actual_eval_examples) tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) # This tells the estimator to run through the entire set. eval_steps = None # However, if running eval on the TPU, you will need to specify the # number of steps. if FLAGS.use_tpu: assert len(eval_examples) % FLAGS.eval_batch_size == 0 eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size) eval_drop_remainder = True if FLAGS.use_tpu else False eval_input_fn = classifier_utils.file_based_input_fn_builder( input_file=eval_file, seq_length=FLAGS.max_seq_length, is_training=False,
tensorflow.logging.info
1,617
import tensorflow as tf with tf.name_scope('data'): batch_histories = tf.Variable(data.batch_histories, name='histories', trainable=False) batch_actions_template = tf.Variable(data.batch_actions_template, name='actions', trainable=False) batch_action_arguments = tf.Variable(data.batch_actions_arguments, name='actions_arguments', trainable=False)
tensorflow.Variable
1,618
import tensorflow as tf rew_t_ph = tf.placeholder(tf.float32, [None], name="reward") obs_tp1_input = make_obs_ph("obs_tp1") done_mask_ph = tf.placeholder(tf.float32, [None], name="done") importance_weights_ph = tf.placeholder(tf.float32, [None], name="weight") # q network evaluation
tensorflow.placeholder
1,619
import tensorflow as tf def model_fn_builder(bert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings, adjust_lr, use_hvd, use_compression, use_fp16, clip, cos_decay, use_lamb, previous_train_steps, post_train_steps): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] masked_lm_positions = features["masked_lm_positions"] masked_lm_ids = features["masked_lm_ids"] masked_lm_weights = features["masked_lm_weights"]
tensorflow.logging.info
1,620
import tensorflow as tf print('attention o dims: ' + str(o.get_shape().as_list())) gamma = tf.get_variable("gamma", [1], initializer=tf.constant_initializer(0.0))
tensorflow.constant_initializer
1,621
import tensorflow as tf [[0, 1, 2, 3], [1, 0, 2, 3], [2, 3, 1, 0], [2, 1, 0, 3], [0, 1, 2, 3]], num_partitions=4) # after permute becomes 5,4,11, return all partitions 5,11 node_a = tf.div(list_of_parts[0], list_of_parts[1]) node_b = tf.divide(list_of_parts[2], list_of_parts[3])
tensorflow.div
1,622
import tensorflow as tf rel_init = tf.truncated_normal([rel_cnt, self.embedding_size], stddev=init_sd) tail_init = tf.truncated_normal([tail_cnt, self.embedding_size], stddev=init_sd) if self.maxnorm is not None: # Ensure maxnorm constraints are initially satisfied head_init = dense_maxnorm(head_init, self.maxnorm) rel_init = dense_maxnorm(rel_init, self.maxnorm) tail_init = dense_maxnorm(tail_init, self.maxnorm) self.head_embedding_vars = tf.Variable(head_init) self.rel_embedding_vars = tf.Variable(rel_init) self.tail_embedding_vars = tf.Variable(tail_init) # Embedding layer for each (head, rel, tail) triple being fed in as input head_embed = tf.nn.embedding_lookup(self.head_embedding_vars, self.head_input) rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input) tail_embed = tf.nn.embedding_lookup(self.tail_embedding_vars, self.tail_input) # Model output raw_output = tf.reduce_sum(tf.mul(tf.mul(head_embed, rel_embed), tail_embed), 1) self.output, self.loss = self._create_output_and_loss(raw_output) # Optimization self.train_step = self.opt.minimize(self.loss) if self.maxnorm is not None: # Post-processing to limit embedding vars to L2 ball head_constraint = self._norm_constraint_op(self.head_embedding_vars,
tensorflow.nn.embedding_lookup
1,623
import tensorflow as tf shape = tf.shape(output) output = tf.reshape(output, [-1, 2 * self.config.hidden_size]) atten_hidden = tf.tanh( tf.add( tf.matmul(self.position_emb, W), tf.matmul(output, U))) alpha = tf.nn.softmax( tf.reshape(tf.matmul(atten_hidden, V), [-1, shape[1], 1]), axis=1) output = tf.reshape(output, [-1, shape[1], 2 * self.config.hidden_size]) C = tf.multiply(alpha, output) return tf.concat([output, C], axis=-1)
tensorflow.matmul
1,624
import tensorflow as tf self.assertEqual(3, len(res)) self.assertEqual((2, 2), res[0].shape) # Test that previous-feeding model ignores inputs after the first. dec_inp2 = [tf.constant(0, tf.int32, shape=[2])] * 3 with tf.variable_scope("other"): d3, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq( enc_inp, dec_inp2, cell, num_symbols=5, embedding_size=2, feed_previous=tf.constant(True)) sess.run([tf.global_variables_initializer()]) tf.get_variable_scope().reuse_variables() d1, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2, feed_previous=True) d2, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq( enc_inp, dec_inp2, cell, num_symbols=5, embedding_size=2, feed_previous=True) res1 = sess.run(d1) res2 = sess.run(d2) res3 = sess.run(d3) self.assertAllClose(res1, res2) self.assertAllClose(res1, res3)
tensorflow.nn.seq2seq.embedding_tied_rnn_seq2seq
1,625
import tensorflow as tf tf.logging.info(eval_results) tf.logging.info('Finished model {}.'.format(model_scope)) def main(_): # Using the Winograd non-fused algorithms provides a small performance boost. os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1' gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction) sess_config = tf.ConfigProto(allow_soft_placement = True, log_device_placement = False, intra_op_parallelism_threads = FLAGS.num_cpu_threads, inter_op_parallelism_threads = FLAGS.num_cpu_threads, gpu_options = gpu_options) # Set up a RunConfig to only save checkpoints once per training cycle. run_config = tf.estimator.RunConfig().replace( save_checkpoints_secs=FLAGS.save_checkpoints_secs).replace( save_checkpoints_steps=None).replace( save_summary_steps=FLAGS.save_summary_steps).replace( keep_checkpoint_max=5).replace(
tensorflow.ConfigProto
1,626
import tensorflow as tf input_.get_shape().as_list()[3] ]) res = tf.concat(axis=1, values=[pad_1, res]) res = tf.concat(axis=2, values=[pad_2, res]) res = tf.nn.conv2d( input=res, filter=weights, strides=strides,
tensorflow.nn.conv2d
1,627
import tensorflow as tf # labels = tf.cast(labels,dtype=tf.float32) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
tensorflow.one_hot
1,628
import tensorflow as tf def inject_latent(self, layer, features, filters): """Inject a VAE-style latent.""" # Latent for stochastic model input_frames = tf.to_float(features["inputs_raw"]) target_frames = tf.to_float(features["targets_raw"]) full_video = tf.concat([input_frames, target_frames], axis=1) latent_mean, latent_std = self.construct_latent_tower( full_video, time_axis=1) latent = common_video.get_gaussian_tensor(latent_mean, latent_std) latent = tf.layers.flatten(latent) latent = tf.expand_dims(latent, axis=1)
tensorflow.concat
1,629
import tensorflow as tf render = False # display the game environment running_reward = None tf.reset_default_graph() ## Define Q-network q(a,s) that ouput the rewards of 4 actions by given state, i.e. Action-Value Function. # 4x4 grid can be represented by one-hot vector with 16 integers. inputs = tf.placeholder(shape=[1, 16], dtype=tf.float32) net = InputLayer(inputs, name='observation') net = DenseLayer(net, n_units=4, act=tf.identity, W_init=tf.random_uniform_initializer(0, 0.01), b_init=None, name='q_a_s') y = net.outputs # action-value / rewards of 4 actions predict = tf.argmax(y, 1) # chose action greedily with reward. in Q-Learning, policy is greedy, so we use "max" to select the next action. ## Below we obtain the loss by taking the sum of squares difference between the target and prediction Q values. nextQ = tf.placeholder(shape=[1, 4], dtype=tf.float32) loss = tl.cost.mean_squared_error(nextQ, y, is_mean=False) # tf.reduce_sum(tf.square(nextQ - y)) train_op = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(loss)
tensorflow.random_uniform_initializer
1,630
import tensorflow as tf output_weights = tf.get_variable( "output_weights", [hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable(
tensorflow.truncated_normal_initializer
1,631
import tensorflow as tf Args: predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network . labels: 2D or array tensor, [batch_size, num_classes] ground truth labels or target labels. eps: a constant to set upper or lower limit for labels, smoothening factor name: Optional scope/name for op_scope. Returns: A tensor with the log loss. """ with tf.name_scope(name): predictions = tf.to_float(predictions) labels = tf.to_float(labels) predictions = tf.clip_by_value(predictions, eps, 1 - eps) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) loss = -tf.reduce_mean(labels * tf.log(predictions)) return loss def log_loss_tf(predictions, labels, eps=1e-7, weights=1.0, name='log_loss'): """Define a log loss. Args: predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network . labels: 2D or array tensor, [batch_size, num_classes] ground truth labels or target labels. eps: a constant to set upper or lower limit for labels, smoothening factor name: Optional scope/name for op_scope. Returns:
tensorflow.log
1,632
import tensorflow as tf if param_noise_filter_func is None: param_noise_filter_func = default_param_noise_filter with tf.variable_scope(scope, reuse=reuse): observations_ph = U.ensure_tf_input(make_obs_ph("observation")) stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold") update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale") reset_ph = tf.placeholder(tf.bool, (), name="reset") eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False) param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05), trainable=False) # Unmodified Q. q_values = q_func(observations_ph.get(), num_actions, scope="q_func") # Perturbable Q used for the actual rollout. q_values_perturbed = q_func(observations_ph.get(), num_actions, scope="perturbed_q_func") # We have to wrap this code into a function due to the way tf.cond() works. See # https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for
tensorflow.constant_initializer
1,633
import tensorflow as tf def decode(z, skip_h3, skip_h2, skip_h1, skip_h0): z_ = lrelu(linear(tf.nn.dropout(z, keep_prob), nf3*s_h3*s_w3, 'd_h0_lin')) h0 = tf.nn.dropout(tf.reshape(z_, [-1, s_h3, s_w3, nf3]), keep_prob) h1 = lrelu(deconv2d(tf.concat([h0, skip_h3], 3),
tensorflow.reshape
1,634
import tensorflow as tf self.args = args self.kwargs = kwargs self.name = self.kwargs.get("name", self.func.__name__) self._template = tf.make_template(self.name, self.func, create_scope_now_=True) self._unique_name = self._template.variable_scope.name.split("/")[-1] self._summary_added = False
tensorflow.make_template
1,635
from tensorflow.python.platform import tf_logging as logging feed_fn=None, device_fn=None, monitor=None, log_every_steps=100, fail_on_nan_loss=True): if self._config.execution_mode not in ('all', 'train'): return # Stagger startup of worker sessions based on task id. sleep_secs = min(self._config.training_worker_max_startup_secs, self._config.task * self._config.training_worker_session_startup_stagger_secs) if sleep_secs: logging.info('Waiting %d secs before starting task %d.', sleep_secs, self._config.task) time.sleep(sleep_secs) # Device allocation device_fn = device_fn or self._device_fn with ops.Graph().as_default() as g, g.device(device_fn): random_seed.set_random_seed(self._config.tf_random_seed) global_step = contrib_framework.create_global_step(g) features, targets = input_fn() self._check_inputs(features, targets) train_op, loss_op = self._get_train_ops(features, targets)
tensorflow.python.platform.tf_logging.info
1,636
import tensorflow as tf def _clean_up_temporary_files(dataset_dir): """Removes temporary files used to create the dataset. Args: dataset_dir: The directory where the temporary files are stored. """ filename = _DATA_URL.split('/')[-1] filepath = os.path.join(dataset_dir, filename) tf.gfile.Remove(filepath) tmp_dir = os.path.join(dataset_dir, 'cifar-10-batches-py') tf.gfile.DeleteRecursively(tmp_dir) def run(dataset_dir): """Runs the download and conversion operation. Args: dataset_dir: The dataset directory where the dataset is stored. """ if not tf.gfile.Exists(dataset_dir): tf.gfile.MakeDirs(dataset_dir) dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
tensorflow.gfile.DeleteRecursively
1,637
import tensorflow as tf else: with tf.variable_scope('', reuse=True):
tensorflow.variable_scope
1,638
import tensorflow as tf sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir) if ckpt and ckpt.model_checkpoint_path:
tensorflow.train.get_checkpoint_state
1,639
import tensorflow as tf generated_images = tf.reshape(decoder_output, [-1, 28, 28, 1]) # Tensorboard visualization tf.summary.scalar(name='Autoencoder Loss', tensor=autoencoder_loss) tf.summary.scalar(name='Discriminator gauss Loss', tensor=dc_g_loss) tf.summary.scalar(name='Discriminator categorical Loss', tensor=dc_c_loss) tf.summary.scalar(name='Generator Loss', tensor=generator_loss) tf.summary.scalar(name='Supervised Encoder Loss', tensor=supervised_encoder_loss) tf.summary.histogram(name='Encoder Gauss Distribution', values=encoder_output_latent) tf.summary.histogram(name='Real Gauss Distribution', values=real_distribution) tf.summary.histogram(name='Encoder Categorical Distribution', values=encoder_output_label) tf.summary.histogram(name='Real Categorical Distribution', values=categorial_distribution) tf.summary.image(name='Input Images', tensor=input_images, max_outputs=10) tf.summary.image(name='Generated Images', tensor=generated_images, max_outputs=10) summary_op = tf.summary.merge_all() # Saving the model saver = tf.train.Saver() step = 0 with tf.Session() as sess: if train_model: tensorboard_path, saved_model_path, log_path = form_results() sess.run(init) writer = tf.summary.FileWriter(logdir=tensorboard_path, graph=sess.graph) x_l, y_l = mnist.test.next_batch(n_labeled) for i in range(n_epochs): n_batches = int(n_labeled / batch_size) print("------------------Epoch {}/{}------------------".format(i, n_epochs))
tensorflow.summary.merge_all
1,640
from tensorflow.python.ops import variable_scope as vs def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name): """Find max_norm given norm and previous average.""" with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]): log_norm = math_ops.log(norm + epsilon) def moving_average(name, value, decay):
tensorflow.python.ops.variable_scope.variable_scope
1,641
import tensorflow as tf def conv2d(x, dim=(32, [3, 3], [1, 1]), pad='SAME', scope="conv2d", training=True, ema=None, init=False, bias_initializer=tf.constant_initializer(0.)): num_filters, filter_size, stride = dim with tf.variable_scope(scope): V = tf.get_variable('V', shape=list(filter_size) + [int(x.get_shape()[-1]), num_filters], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.05), trainable=True) g = tf.get_variable('g', shape=[num_filters], dtype=tf.float32, initializer=tf.constant_initializer(1.), trainable=True) b = tf.get_variable('b', shape=[num_filters], dtype=tf.float32,
tensorflow.random_normal_initializer
1,642
import tensorflow as tf print(f"policy.processed_obs: {policy.processed_obs}") print(f"Obs_phs space: {obs_phs}") #assert 5 == 1 ####################### for var in tf.all_variables(): print(var) batch_size = tf.shape(policy.obs_ph)[0] n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=n_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) _act = tf_util.function(inputs=[policy.obs_ph, stochastic_ph, update_eps_ph], outputs=output_actions, givens={update_eps_ph: -1.0, stochastic_ph: True}, updates=[update_eps_expr])
tensorflow.stack
1,643
import tensorflow as tf return blk_indices_crop def _strides_one(): # Calculate otuput indices when strides = 1. return blk_indices[:, :q_shape[1], :q_shape[2], :] strides_gt_one = tf.logical_or(tf.greater(strides[1], 1), tf.greater(strides[2], 1)) blk_indices_crop = tf.cond(strides_gt_one, _strides_gt_one, _strides_one) y = tf.scatter_nd(blk_indices_crop, q, out_shape) return y return tf.cond( tf.equal(tf.size(blk_indices_), 0), lambda: tf.zeros(out_shape, dtype=x.dtype), _conv_nonzero) # returns an int64 start timer handle that should be passed to cuda_timer_end_op def cuda_timer_start_op(): return sbnet_module.cuda_timer_start() # returns a float def cuda_timer_end_op(start_timer): return sbnet_module.cuda_timer_end(start_timer)
tensorflow.size
1,644
import tensorflow as tf Of shape (n_test, n_support) """ test = tf.expand_dims(test, 1) support = tf.expand_dims(support, 0) g = -tf.maximum(tf.reduce_sum(tf.square(test - support), 2), max_dist_sq) return g
tensorflow.square
1,645
import tensorflow as tf p = tf.cond(tf.random_uniform((), dtype=tf.float32) < 1e-4, lambda: tf.print('csrt acc ', [pct]), lambda: tf.no_op()) with tf.control_dependencies([p]): return tf.reduce_mean(loss)
tensorflow.no_op
1,646
import tensorflow as tf def standardization(x): x_reshaped = x.reshape([x.shape[0], -1]) result = (x_reshaped - train_X_means) / (train_X_stds + 1e-9) return result.reshape(x.shape) normalized_test_X = standardization(test_X) with tf.Session() as sess, tf.summary.FileWriter( "./tf_logs/fashion_minst_multi_task_learning/" + str(datetime.now().timestamp()), graph=tf.get_default_graph()) as f: sess.run(tf.global_variables_initializer()) # similar logic as mnist's next_batch() epoch = 0
tensorflow.Session
1,647
import tensorflow as tf d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]]) scores = d_layer_3_all # Mask # key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T] key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(scores) * (-2 ** 32 + 1) scores = tf.where(key_masks, scores, paddings) # [B, 1, T] # Scale # scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)
tensorflow.ones_like
1,648
import tensorflow as tf self.w_attn_2 = tf.get_variable("w_2", [self.lstm_size, self.lstm_size]) self.v_attn = tf.get_variable("v", [self.lstm_size, 1]) def _build_sampler(self, prev_c=None, prev_h=None, use_bias=False): """Build the sampler ops and the log_prob ops.""" print ("-" * 80) print ("Build controller sampler") anchors = tf.TensorArray( tf.float32, size=self.num_cells + 2, clear_after_read=False) anchors_w_1 = tf.TensorArray( tf.float32, size=self.num_cells + 2, clear_after_read=False) arc_seq = tf.TensorArray(tf.int32, size=self.num_cells * 4) if prev_c is None: assert prev_h is None, "prev_c and prev_h must both be None" prev_c = [tf.zeros([1, self.lstm_size], tf.float32) for _ in range(self.lstm_num_layers)] prev_h = [tf.zeros([1, self.lstm_size], tf.float32) for _ in range(self.lstm_num_layers)] inputs = self.g_emb
tensorflow.TensorArray
1,649
import tensorflow as tf tf.logging.set_verbosity(tf.logging.INFO) if not FLAGS.do_train and not FLAGS.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) tf.gfile.MakeDirs(FLAGS.output_dir) input_files = [] for input_pattern in FLAGS.input_file.split(","): input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info("*** Input Files ***")
tensorflow.gfile.MakeDirs
1,650
import tensorflow as tf top_antecedent_cluster_ids = tf.gather(top_span_cluster_ids, top_antecedents) # [k, c]
tensorflow.gather
1,651
from tensorflow.python.framework import ops non_zero_count = math_ops.maximum(count, array_ops.ones_like(count), name=name) return math_ops.truediv(total, non_zero_count, name=name) mean = compute_mean(total, count, 'value') with ops.control_dependencies([total_compute_op, count_compute_op]): update_op = compute_mean(total, count, 'update_op') if metrics_collections: ops.add_to_collections(metrics_collections, mean) if updates_collections: ops.add_to_collections(updates_collections, update_op) return mean, update_op def streaming_accuracy(predictions, labels, weights=None, metrics_collections=None, updates_collections=None,
tensorflow.python.framework.ops.add_to_collections
1,652
import tensorflow as tf self.assertTrue(vals[0, 0] > 0) self.assertTrue(vals[0, 0] <= 20) self.assertAllEqual(vals, np.ones([1024, 1024]) * vals[0, 0]) if __name__ == "__main__": tf.test.main()
tensorflow.test.main
1,653
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data parser = argparse.ArgumentParser('MNIST Softmax') parser.add_argument('--data_dir', type=str, default='/tmp/mnist-data', help='the directory of MNIST dataset') parser.add_argument('--lr', type=float, default=0.01, help='learning rate') parser.add_argument('--batch_size', type=int, default=32, help='batch size') parser.add_argument('--max_train_step', type=int, default=50000, help='the maximum training step') parser.add_argument('--model_path', type=str, default='', help='the path of checkpoint file') args = parser.parse_args() def model(): x = tf.placeholder(tf.float32, [None, 784], name='x') gt = tf.placeholder(tf.float32, [None, 10], name='groundtruth') with tf.variable_scope('layer1'): w1 = tf.get_variable('weight1', [784, 1024], initializer=tf.random_normal_initializer()) b1 = tf.get_variable('bias1', [1024], initializer=tf.constant_initializer(0.0)) h1 = tf.nn.relu(tf.matmul(x, w1) + b1) with tf.variable_scope('layer2'): w2 = tf.get_variable('weight2', [1024, 1024], initializer=tf.random_normal_initializer()) b2 = tf.get_variable('bias2', [1024], initializer=tf.constant_initializer(0.0)) h2 = tf.nn.relu(tf.matmul(h1, w2) + b2) with tf.variable_scope('layer3'): w3 = tf.get_variable('weight3', [1024, 10], initializer=tf.random_normal_initializer()) b3 = tf.get_variable('bias3', [10], initializer=tf.constant_initializer(0.0)) y = tf.matmul(h2, w3) + b3
tensorflow.placeholder
1,654
import tensorflow as tf sequence_lengths = tf.convert_to_tensor(sequence_lengths)
tensorflow.convert_to_tensor
1,655
import tensorflow as tf h_stack1, h_stack2 = tf.split(out, 2, 3) sigmoid_out = tf.sigmoid(h_stack2) out = (h_stack1 * sigmoid_out) out_shp = out.get_shape().as_list() if out_shp[1:-1] < in_shp[1:-1]: x = tf.nn.avg_pool(x, [1, dim[2][0], dim[2][1], 1], strides=[1, dim[2][0], dim[2][1], 1], padding='SAME') elif out_shp[1:-1] > in_shp[1:-1]: warnings.warn( "The height and width of the output are larger than the input. There will be no residual connection.") residual = False
tensorflow.nn.avg_pool
1,656
import tensorflow as tf units=self.vf_hidden_size, activation=tf.nn.elu) w = tf.get_variable("weights", (self.vf_hidden_size, 1)) return tf.matmul(hidden, w) def build_loss(self): cutoff_vf_manager = tf.reshape(tf.stop_gradient(self.manager_vf), [-1]) dot = tf.reduce_sum(tf.multiply(self.s_diff, self.g), axis=1) gcut = tf.stop_gradient(self.g) mag = tf.norm(self.s_diff, axis=1) * tf.norm(gcut, axis=1) + .0001 dcos = dot / mag manager_loss = -tf.reduce_sum((self.r - cutoff_vf_manager) * dcos) cutoff_vf_worker = tf.reshape(tf.stop_gradient(self.worker_vf), [-1]) log_p = tf.reduce_sum(self.log_pi * self.ac, [1])
tensorflow.multiply
1,657
from tensorflow.python.ops import variable_scope ValueError: If `predictions` and `labels` have mismatched shapes, if `weights` is not `None` and its shape doesn't match `predictions`, or if `specificity` is not between 0 and 1, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ if specificity < 0 or specificity > 1: raise ValueError('`specificity` must be in the range [0, 1].') with variable_scope.variable_scope(name, 'sensitivity_at_specificity', [predictions, labels]): kepsilon = 1e-7 # to account for floating point imprecisions thresholds = [(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds-2)] thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon] (tp, fn, tn, fp, tp_update_op, fn_update_op, tn_update_op,
tensorflow.python.ops.variable_scope.variable_scope
1,658
import tensorflow as tf name='input_example_tensor') receiver_tensors = {'examples': serialized_tf_example} features = tf.parse_example(serialized_tf_example, feature_spec) return tf.estimator.export.ServingInputReceiver(features, receiver_tensors) estimator._export_to_tpu = False # this is important
tensorflow.estimator.export.ServingInputReceiver
1,659
import tensorflow as tf return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars:
tensorflow.train.init_from_checkpoint
1,660
import tensorflow as tf A tensor. """ if max_value is not None and max_value < min_value: max_value = min_value min_value = _to_tensor(min_value, x.dtype.base_dtype) max_value = _to_tensor(max_value, x.dtype.base_dtype) return tf.clip_by_value(x, min_value, max_value) def epsilon(): """Returns the value of the fuzz factor used in numeric expressions.
tensorflow.clip_by_value
1,661
import tensorflow as tf output_bias = tf.get_variable( "output_bias", shape=[2], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) if clip: log_probs = tf.log(tf.clip_by_value(tf.nn.softmax(logits, axis=-1), 1e-6, 1.0 - 1e-6)) else: log_probs = tf.nn.log_softmax(logits, axis=-1) labels = tf.reshape(labels, [-1]) one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs) def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = tf.reshape(
tensorflow.reduce_mean
1,662
import tensorflow as tf Returns: (outputs, out_paddings, segment_ids) tuple. `outputs` is of the shape [time, batch, depth], and `out_paddings` has shape [time, batch]. If is_transparent is True, can return a list of num_transformer_layers tensors of shape [time, batch, depth] if `p.is_eval` is False, and a [time, batch, depth, num_transparent_outputs] tensor if `p.is_eval` is True. If packed_input is True, also returns segment_id, otherwise returns None. """ p = self.params if p.packed_input: assert src_segment_id is not None, ('Need to specify src_segment_id if ' 'packed input is supported.') outputs_list = [transformer_input] with tf.name_scope(p.name): for i, transformer_l in enumerate(self.trans): # For encoder, keys, values and queries are the same transformer_output, _ = transformer_l.FProp( theta.trans[i], transformer_input, paddings, aux_vecs=aux_vecs, aux_paddings=aux_paddings, source_segment_id=src_segment_id, aux_segment_id=aux_segment_id) transformer_input = transformer_output outputs_list.append(transformer_output)
tensorflow.name_scope
1,663
import tensorflow as tf Tout=tf.float32) gtboxes_and_label_r_ = tf.reshape(gtboxes_and_label_r_, [-1, 6]) gt_encode_label = tf.py_func(angle_label_encode, inp=[gtboxes_and_label_r_[:, -2], cfgs.ANGLE_RANGE, cfgs.OMEGA, cfgs.ANGLE_MODE],
tensorflow.py_func
1,664
import tensorflow as tf else: raise ValueError("Unrecognized initializer: %s" % params.initializer) def get_learning_rate_decay(learning_rate, global_step, params): if params.learning_rate_decay == "noam": step = tf.to_float(global_step) warmup_steps = tf.to_float(params.warmup_steps) multiplier = params.hidden_size ** -0.5 decay = multiplier * tf.minimum((step + 1) * (warmup_steps ** -1.5), (step + 1) ** -0.5) return learning_rate * decay elif params.learning_rate_decay == "new_warmup_rsqrt_decay": step = tf.to_float(global_step) warmup_steps = tf.to_float(params.warmup_steps) multiplier = params.hidden_size ** -0.5 decay = params.r0 * multiplier * tf.minimum((step + 1) * (warmup_steps ** -1.0) * (warmup_steps ** -0.5), (step + 1) ** -0.5) return learning_rate * decay elif params.learning_rate_decay == "rnnplus_warmup_decay": step = tf.to_float(global_step) n = float(len(params.device_list)) warmup_steps = tf.to_float(params.warmup_steps) decay = tf.minimum(1 + step * (n - 1) / (n * warmup_steps), tf.minimum(n, n * ((2*n) ** ((params.s - n * step) / (params.e - params.s))))) return tf.maximum(learning_rate * decay, 5e-6)
tensorflow.to_float
1,665
import tensorflow as tf input_shape = tf.shape(bottom) # tf.reshape()中-1的应用,-1表示不知道该填什么数字合适的情况下,可以选择,由python通过原数组和其他的值推测出来 # 每一行是1个anchor的前景、背景得分,先显示所有点产生的第一种anchor,然后是所有点产生的第二种anchor,........ bottom_reshaped = tf.reshape(bottom, [-1, input_shape[-1]]) reshaped_score = tf.nn.softmax(bottom_reshaped, name=name) return tf.reshape(reshaped_score, input_shape) # [1,none,none,2] return tf.nn.softmax(bottom, name=name) def _proposal_top_layer(self, rpn_cls_prob, rpn_bbox_pred, name): with tf.variable_scope(name): rois, rpn_scores = tf.py_func(proposal_top_layer, [rpn_cls_prob, rpn_bbox_pred, self._im_info, self._feat_stride, self._anchors, self._num_anchors],
tensorflow.nn.softmax
1,666
import tensorflow as tf for _ in range(stack_size): stack.append(tf.random_normal((batch_size, size * 2)))
tensorflow.random_normal
1,667
from tensorflow.python.ops import math_ops weight_tensor = self.get_weight_tensor(features) if weight_tensor is None: return math_ops.reduce_mean(loss_unweighted, name=name) loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor) return math_ops.reduce_mean(loss_weighted, name=name) def loss(self, logits, target, features): """Returns loss tensor for this head.
tensorflow.python.ops.math_ops.reduce_mean
1,668
import tensorflow as tf self.vf, vf_params, self.vf_state_init, self.vf_state_final = self.build_cnet(batch['state'], 'vf') self.vf_eval, _, self.vf_eval_state_init, self.vf_eval_state_final = self.build_cnet(self.state, 'vf', reuse=True, batch_size=1) self.sample_action = tf.squeeze(pi_eval.sample(1), axis=0) self.eval_action = pi_eval.mode() self.global_step = tf.train.get_or_create_global_step() self.saver = tf.train.Saver() # Loss functions and training epsilon_decay = tf.train.polynomial_decay(self.EPSILON, self.global_step, self.EPS_LEN, 0.1, power=1) ratio = tf.maximum(pi.prob(batch['actions']), 1e-6) / tf.maximum(pi_old.prob(batch['actions']), 1e-6) ratio = tf.clip_by_value(ratio, 0, 10) surr1 = batch['advantage'] * ratio surr2 = batch['advantage'] * tf.clip_by_value(ratio, 1 - epsilon_decay, 1 + epsilon_decay) loss_pg = - 2.0 * tf.reduce_mean(tf.minimum(surr1, surr2)) loss_vf = 0.5 * tf.reduce_mean(tf.square(batch['rewards'] - self.vf)) loss_entropy = - 0.01 * tf.reduce_mean(pi.entropy()) loss = loss_pg + loss_vf + loss_entropy opt = tf.train.AdamOptimizer(self.LR) self.train_op = opt.minimize(loss, global_step=self.global_step, var_list=pi_params + vf_params) self.pi_new_params = [oldp.assign(p) for p, oldp in zip(pi_params, pi_old_params)] self.vf_new_params = [oldp.assign(p) for p, oldp in zip(vf_params, vf_old_params)] self.sess.run(tf.global_variables_initializer()) # Tensorboard if summary_dir is not None:
tensorflow.minimum
1,669
import tensorflow as tf fc2_1 = contrib.layers.fully_connected(stitch1_1, 32, scope="fc2_1") fc2_2 = contrib.layers.fully_connected(stitch1_2, 32, scope="fc2_2") if cross_stitch_enabled: with tf.variable_scope("cross_stitch_2"): stitch2_1, stitch2_2 = apply_cross_stitch(fc2_1, fc2_2) else: stitch2_1, stitch2_2 = fc2_1, fc2_2
tensorflow.variable_scope
1,670
import tensorflow as tf self.opt_pred = self.optimizer(lr_pred, beta, self.real_pc_rot_loss, rot_params, batch) #only use real pics to update self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=None) self.init = tf.global_variables_initializer() #Launch the session
tensorflow.global_variables_initializer
1,671
import tensorflow as tf self.input_tensors = queue.dequeue() self.predictions, self.loss = self.get_predictions_and_loss(*self.input_tensors) self.global_step = tf.Variable(0, name="global_step", trainable=False) self.reset_global_step = tf.assign(self.global_step, 0) learning_rate = tf.train.exponential_decay(self.config["learning_rate"], self.global_step, self.config["decay_frequency"], self.config["decay_rate"], staircase=True) trainable_params = tf.trainable_variables() gradients = tf.gradients(self.loss, trainable_params) gradients, _ = tf.clip_by_global_norm(gradients, self.config["max_gradient_norm"])
tensorflow.train.exponential_decay
1,672
from tensorflow.python.framework import ops x: A Tensor with type `float`, `double`, `int32`, `complex64`, `int64`, or `qint32`. name: A name for the operation (optional). Returns: A Tensor with the same type as `x` if `x.dtype != qint32` otherwise the return type is `quint8`. """ with ops.op_scope([x], name, "Sigmoid") as name: x = ops.convert_to_tensor(x, name="x") return gen_math_ops._sigmoid(x, name=name) def tanh(x, name=None): """Computes hyperbolic tangent of `x` element-wise. Args: x: A Tensor with type `float`, `double`, `int32`, `complex64`, `int64`,
tensorflow.python.framework.ops.convert_to_tensor
1,673
from tensorflow.python.ops import variable_scope with variable_scope.variable_scope(
tensorflow.python.ops.variable_scope.variable_scope
1,674
import tensorflow as tf adv_images = adv_craft_func(hps, images, FLAGS.attack_method, eps=FLAGS.eps, RCE_train=FLAGS.RCE_train) model_nor = model_name.ResNet(hps, images, FLAGS.mode, Reuse=True) model_nor.build_graph() model_adv = model_name.ResNet(hps, adv_images, FLAGS.mode, Reuse=True) model_adv.build_graph() # Open session and restore checkpoint sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) tf.train.start_queue_runners(sess) sess.run(tf.global_variables_initializer()) ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root) # Choose dir according to rt tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path) saver.restore(sess, ckpt_state.model_checkpoint_path) logits_nor = model_nor.t_SNE_logits
tensorflow.train.start_queue_runners
1,675
import tensorflow as tf sigmas = [ 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100, 1e3, 1e4, 1e5, 1e6 ] gaussian_kernel = partial(util.gaussian_kernel_matrix, sigmas=tf.constant(sigmas)) loss_value = maximum_mean_discrepancy(source_samples, target_samples, kernel=gaussian_kernel) loss_value = tf.maximum(1e-4, loss_value) * weight assert_op = tf.Assert(tf.is_finite(loss_value), [loss_value]) with tf.control_dependencies([assert_op]): tag = 'MMD_Loss' barrier = tf.no_op(tag) return loss_value def dann_loss(source_samples, target_samples, weight, name='dann_loss'): """Adds the domain adversarial (DANN) loss. Args: source_samples: a tensor of shape [num_samples, num_features]. target_samples: a tensor of shape [num_samples, num_features].
tensorflow.no_op
1,676
import tensorflow as tf z = tf.py_func(my_func, [x, y], [tf.float64]) self.assertAllEqual( z[0].eval(), my_func([1.0, 2.0], [2.0, 3.0]).astype(np.float64)) # a bit exotic type (complex64) with self.test_session(): x = tf.constant(1+2j, tf.complex64) y = tf.constant(3+4j, tf.complex64) z, = tf.py_func(my_func, [x, y], [tf.complex64]) self.assertAllClose(z.eval(), my_func(1+2j, 3+4j)) # a bit excotic function (rfft) with self.test_session(): x = tf.constant([1., 2., 3., 4.], tf.float32) def rfft(x): return np.fft.rfft(x).astype(np.complex64) y, = tf.py_func(rfft, [x], [tf.complex64])
tensorflow.py_func
1,677
import tensorflow as tf elif out_shp[-1] < in_shp[-1]: warnings.warn("The input has more feature maps than the output. There will be no residual connection.") residual = False if residual: out += x return out def deconv2d(x, dim=(32, [3, 3], [1, 1]), pad='SAME', scope="deconv2d", training=True, ema=None, init=False, bias_initializer=tf.constant_initializer(0.)): num_filters, filter_size, stride = dim xs = x.get_shape().as_list() if pad=='SAME': target_shape = [tf.shape(x)[0], xs[1]*stride[0], xs[2]*stride[1], num_filters] else: target_shape = [tf.shape(x)[0], xs[1]*stride[0] + filter_size[0]-1, xs[2]*stride[1] + filter_size[1]-1, num_filters] with tf.variable_scope(scope): V = tf.get_variable("V", shape=list(filter_size) + [num_filters, int(x.get_shape()[-1])], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.05), trainable=True) g = tf.get_variable("g", shape=[num_filters], dtype=tf.float32, initializer=tf.constant_initializer(1.), trainable=True) b = tf.get_variable("b", shape=[num_filters], dtype=tf.float32, initializer=bias_initializer, trainable=True) def maybe_avg(v): if ema is not None and not init: v = tf.cond(training, lambda: v, lambda: ema.average(v)) return v if init:
tensorflow.shape
1,678
import tensorflow as tf (1 - self.terminals_ph) * self.gamma * self.value_target ) # Compute Q-Function loss # TODO: test with huber loss (it would avoid too high values) qf1_loss = 0.5 * tf.reduce_mean(((q_backup - qf1) ** 2)*self.weight_ph) qf1_loss_col = tf.reduce_mean(((q_backup - qf1) ** 2),1) qf2_loss = 0.5 * tf.reduce_mean(((q_backup - qf2) ** 2)*self.weight_ph) if self.n_step: q_backup_n = tf.stop_gradient( self.rewards_ph_n + (1 - self.terminals_ph_n) *( self.gamma**self.n_step_length ) * self.value_target_n) qf1_loss_n = 0.5 * tf.reduce_mean(((q_backup_n - qf1) ** 2)*self.weight_ph) qf1_loss_n_col = tf.reduce_mean(((q_backup_n - qf1) ** 2),1)
tensorflow.reduce_mean
1,679
import tensorflow as tf if self.datasets: self.dataset_handles = {} for n, i in self.dataset_iterators.items(): self.dataset_handles[n] = self.sess.run(i.string_handle()) self.sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()]) def train(self, iterations, validation_interval=100, output_dir=None, save_interval=None, checkpoint_path=None, keep_checkpoints=1): assert 'training' in self.datasets, 'Training dataset is required.' if output_dir is not None: train_writer = tf.summary.FileWriter(output_dir) if not hasattr(self, 'saver'): with tf.device('/cpu:0'): self.saver = tf.train.Saver(save_relative_paths=True, max_to_keep=keep_checkpoints) if not self.graph.finalized: self.graph.finalize() tf.logging.info('Start training') for i in range(iterations): loss, summaries, _ = self.sess.run( [self.loss, self.summaries, self.trainer], feed_dict={self.handle: self.dataset_handles['training']}) if save_interval and checkpoint_path and i != 0 and i % save_interval == 0: self.save(checkpoint_path) if 'validation' in self.datasets and i % validation_interval == 0: metrics = self.evaluate('validation', mute=True)
tensorflow.train.Saver
1,680
import tensorflow as tf tf.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, num_parallel_batches=num_cpu_threads, drop_remainder=True)) return d return input_fn def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example
tensorflow.parse_single_example
1,681
import tensorflow as tf def _add_aux_head(self, X, in_w, in_h, in_ch, K, is_train): pool_ksize = 5 pool_stride = 3 conv_ch = 128 global_conv_ch = 768 w = in_w h = in_h ch = in_ch # Pool with tf.variable_scope('pool'): X = tf.nn.relu(X) X = tf.nn.avg_pool(X, ksize=(1, pool_ksize, pool_ksize, 1), strides=(1, pool_stride, pool_stride, 1), padding='VALID') w //= pool_stride h //= pool_stride # Conv 1x1 with tf.variable_scope('conv_0'): X = self._do_conv(X, w, h, ch, conv_ch, filter_size=1, no_reg=True, is_train=is_train) ch = conv_ch
tensorflow.variable_scope
1,682
from tensorflow.python.ops import math_ops loss_unweighted = self._loss_fn(logits, target) weight_tensor = self.get_weight_tensor(features) if weight_tensor is None: return math_ops.reduce_mean(loss_unweighted, name="loss") loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor) return math_ops.div(math_ops.reduce_sum(loss_weighted), math_ops.to_float(math_ops.reduce_sum(weight_tensor)), name="loss") class _RegressionTargetColumn(_TargetColumn): """_TargetColumn for regression."""
tensorflow.python.ops.math_ops.reduce_sum
1,683
import tensorflow as tf if isinstance(data_sources, (list, tuple)): data_files = [] for source in data_sources: data_files += self.get_data_files(source) else: if '*' in data_sources or '?' in data_sources or '[' in data_sources: data_files = tf.gfile.Glob(data_sources) else: data_files = [data_sources] if not data_files: raise ValueError('No data files found in %s' % (data_sources,)) return data_files
tensorflow.gfile.Glob
1,684
import tensorflow as tf if tf.test.is_built_with_cuda(): pass # 7. 我们希望分配指定操作给GPU。下面是一个示例代码,做了一些简单的计算,并将它们分配给主CPU和两个副GPU with tf.device('/cpu:0'): a = tf.constant([1.0, 3.0, 5.0], shape=[1,3]) b = tf.constant([2.0, 4.0, 6.0], shape=[3, 1]) with tf.device('/gpu:0'): c = tf.matmul(a,b) c = tf.reshape(c, [-1]) with tf.device('/gpu:1'): d = tf.matmul(b, a) flat_d = tf.reshape(d, [-1]) combined = tf.multiply(c, flat_d) print(sess.run(combined))
tensorflow.device
1,685
import tensorflow as tf vz = tf.contrib.lookup.MutableHashTable(key_dtype=tf.string, value_dtype=tf.int64, default_value=-1) vx_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1)) vz_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1)) x_t = tf.gather(x, l) x_t_len = tf.strings.length(x_t) x_t = tf.string_split([x_t], delimiter='').values z_t = tf.gather(y, m) z_t_len = tf.strings.length(z_t) z_t = tf.string_split([z_t], delimiter='').values for i in tf.range(start=0, limit=x_t_len - self._p + 1, delta=1, dtype=None, name='range'): u = tf.string_join(x_t[i:i + self._p], '') vx_keys, r = tf.cond( tf.greater(vx.lookup(u), -1),
tensorflow.string_split
1,686
from tensorflow.python.ops import parsing_ops return math_ops.argmax(logits, 1) def _get_feature_ops_from_example(self, examples_batch): column_types = layers.create_dict_for_parse_example( (self._get_linear_feature_columns() or []) + (self._get_dnn_feature_columns() or [])) features = parsing_ops.parse_example(examples_batch, column_types) return features def _num_label_columns(self): return 1 if self._n_classes <= 2 else self._n_classes
tensorflow.python.ops.parsing_ops.parse_example
1,687
import tensorflow as tf tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train', """Directory where to write event logs """ """and checkpoint.""") tf.app.flags.DEFINE_integer('max_steps', 1000000, """Number of batches to run.""") tf.app.flags.DEFINE_integer('num_gpus', 1,
tensorflow.app.flags.DEFINE_integer
1,688
import tensorflow as tf if pt[0] > box_limits_x[1]: box_limits_x[1] = pt[0] if pt[1] < box_limits_z[0]: box_limits_z[0] = pt[1] if pt[1] > box_limits_z[1]: box_limits_z[1] = pt[1] mean_x = tf.reduce_mean(box_limits_x) mean_z = tf.reduce_mean(box_limits_z) else: mean_x = tf.reduce_mean(labeled_translations[:, 0]) mean_z = tf.reduce_mean(labeled_translations[:, 2]) samples_world = grid.generate( (mean_x - 0.5, 0.0, mean_z - 0.5), (mean_x + 0.5, 1.0, mean_z + 0.5), [self.resolution, self.resolution, self.resolution]) # samples_world = grid.generate(
tensorflow.reduce_mean
1,689
import tensorflow as tf self.max_seq_len = tf.reduce_max(seq_lengths) self.inputs_ta = base.ta_for_tensor(inputs, clear_after_read=False) self.targets_ta = base.ta_for_tensor(targets, clear_after_read=False) targets_encoded = base.encode_all(targets, self.data_encoder) self.targets_encoded_ta = base.ta_for_tensor(targets_encoded, clear_after_read=False) if self.rev_rnn_cell: reverse_targets_encoded = tf.reverse_sequence( targets_encoded, seq_lengths, seq_axis=0, batch_axis=1) # Compute the reverse rnn over the targets. reverse_rnn_out, _ = tf.nn.dynamic_rnn(self.rev_rnn_cell, reverse_targets_encoded, time_major=True, dtype=tf.float32) reverse_rnn_out = tf.reverse_sequence(reverse_rnn_out, seq_lengths, seq_axis=0, batch_axis=1) self.reverse_rnn_ta = base.ta_for_tensor(reverse_rnn_out, clear_after_read=False) def _filtering_proposal(self, rnn_out, prior, t):
tensorflow.nn.dynamic_rnn
1,690
import tensorflow as tf mean, var = tf.nn.moments( x, reduction_axes, shift=None, name=None, keep_dims=False) if sorted(reduction_axes) == range(ndim(x))[:-1]: normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon) else: # need broadcasting target_shape = [] for axis in range(get_ndim(x)): if axis in reduction_axes: target_shape.append(1) else: target_shape.append(tf.shape(x)[axis]) target_shape = stack(target_shape) broadcast_mean = tf.reshape(mean, target_shape) broadcast_var = tf.reshape(var, target_shape) broadcast_gamma = tf.reshape(gamma, target_shape) broadcast_beta = tf.reshape(beta, target_shape) normed = tf.nn.batch_normalization(x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon) return normed, mean, var def ones(shape, dtype=None, name=None): """Instantiates an all-ones tensor variable and returns it. Parameters ---------- shape: Tuple of integers, shape of returned Keras variable. dtype: Tensorflow dtype
tensorflow.reshape
1,691
import tensorflow as tf model_loss = model.loss(score_maps, f_score, geo_maps, f_geometry, training_masks) total_loss = tf.add_n([model_loss] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)) # add summary if reuse_variables is None: tf.summary.image('input', images) tf.summary.image('score_map', score_maps) tf.summary.image('score_map_pred', f_score * 255) tf.summary.image('geo_map_0', geo_maps[:, :, :, 0:1]) tf.summary.image('geo_map_0_pred', f_geometry[:, :, :, 0:1]) tf.summary.image('training_masks', training_masks) tf.summary.scalar('model_loss', model_loss) tf.summary.scalar('total_loss', total_loss) return total_loss, model_loss def average_gradients(tower_grads): average_grads = [] for grad_and_vars in zip(*tower_grads): grads = [] for g, _ in grad_and_vars: expanded_g = tf.expand_dims(g, 0) grads.append(expanded_g) grad = tf.concat(grads, 0) grad = tf.reduce_mean(grad, 0)
tensorflow.summary.scalar
1,692
import tensorflow as tf activation = tf.tanh if encoder.batch_norm: encoder_inputs_ = tf.layers.batch_normalization(encoder_inputs_, training=training, name='input_batch_norm_{}'.format(j + 1)) encoder_inputs_ = dense(encoder_inputs_, layer_size, activation=activation, use_bias=True, name='layer_{}'.format(j)) if encoder.use_dropout: encoder_inputs_ = tf.nn.dropout(encoder_inputs_, keep_prob=encoder.input_layer_keep_prob) if encoder.conv_filters: encoder_inputs_ = tf.expand_dims(encoder_inputs_, axis=3) for k, out_channels in enumerate(encoder.conv_filters, 1): in_channels = encoder_inputs_.get_shape()[-1].value filter_height, filter_width = encoder.conv_size
tensorflow.nn.dropout
1,693
import tensorflow as tf config = tf.ConfigProto(allow_soft_placement=True) with tf.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) for k in range(n_tokens): token = vocab.id_to_word(k)
tensorflow.global_variables_initializer
1,694
import tensorflow as tf Returns: a filtered `tf.data.Dataset`. """ if (len_map is None) or (not training and not filter_on_eval): return dataset assert isinstance(len_map, dict) for k, bounds in len_map.items(): # pylint: disable=cell-var-from-loop # TODO(afrozm): Investigate `cell-var-from-loop` - since this is WAI and # there is a test too. def within_bounds(x, key, len_bounds): size = tf.shape(x[key])[0] min_len, max_len = len_bounds return (min_len <= size) and (size <= max_len) dataset = dataset.filter(lambda x: within_bounds(x, k, bounds)) # pylint: enable=cell-var-from-loop return dataset @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def truncate_dataset_on_len(dataset, training,
tensorflow.shape
1,695
import tensorflow as tf decoded = tf.cast(tf.sparse.to_dense(decoded), tf.int32) decoded_u = tf.sparse.SparseTensor(indices_u[0], values_u[0], shape_u[0]) decoded_u = tf.cast(tf.sparse.to_dense(decoded_u), tf.int32) # Adjust event vals according to representation decoded = tf.where(tf.not_equal(decoded, 0), decoded+shift, decoded) decoded_u = tf.where(tf.not_equal(decoded_u, 0), decoded_u+shift, decoded_u) # Set default vals decoded = tf.where(tf.equal(decoded, 0), def_val, decoded)
tensorflow.not_equal
1,696
import tensorflow as tf def __call__(self,input_var,name=None,**kwargs) : if( input_var.shape.ndims > 2 ) : dims = tf.reduce_prod(tf.shape(input_var)[1:]) input_var = tf.reshape(input_var,[-1,dims]) def _init(): v_norm = tf.nn.l2_normalize(self.v,axis=0) t = tf.matmul(input_var,v_norm) mu,var = tf.nn.moments(t,axes=[0]) std = tf.sqrt(var+self.epsilon) return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)] require_init = tf.reduce_any(tf.is_nan(self.g))
tensorflow.nn.l2_normalize
1,697
import tensorflow as tf :param name: name of the entire dense layer.i.e, variable scope name. :return: tensor with shape [batch_size, n2] """ with tf.variable_scope(name, reuse=None): weights = tf.get_variable("weights", shape=[n1, n2], initializer=tf.random_normal_initializer(mean=0., stddev=0.01)) bias = tf.get_variable("bias", shape=[n2], initializer=tf.constant_initializer(0.0)) out = tf.add(tf.matmul(x, weights), bias, name='matmul') return out # The autoencoder network
tensorflow.constant_initializer
1,698
import tensorflow as tf blk_indices_crop = blk_indices[:, 0, 0, :] # Project back to an image. y = tf.scatter_nd(blk_indices_crop, q, out_shape) return y with tf.control_dependencies([assert_shape, assert_strides]): return tf.cond( tf.equal(tf.size(blk_indices_), 0), lambda: tf.zeros(out_shape, dtype=x.dtype), _conv_nonzero) def mask_conv2d(x, w, mask, strides, padding): """Masked 2D convolution. Used to check 2D sparse convolution. :param x: [Tensor] Convolution feature map, 4D, dtype float32.
tensorflow.size
1,699