seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf # until we collected as many observations in it # as there were points in the batch all_new_data = Dataset( tf.zeros((0, initial_data.query_points.shape[1]), tf.float64), tf.zeros((0, initial_data.observations.shape[1]), tf.float64), ) while len(all_new_data) < num_workers: # this line blocks the process until new data is available in the queue
tensorflow.zeros
2,300
import tensorflow as tf olda.assign(a) for a, olda in zip( action_op_params, old_action_op_params)] # 定義輸入變數 self.tfa = tf.placeholder(tf.float32, [None, action_dim], 'action') self.tfadv = tf.placeholder(tf.float32, [None, 1], 'advantage') # 機率比較 ratio = action_op.prob(self.tfa) / \ (old_action_op.prob(self.tfa) + 1e-5) # 替代損失 surr = ratio * self.tfadv # 減少代理損失 self.aloss = -tf.reduce_mean(tf.minimum( surr, tf.clip_by_value(ratio, 1. - EPSILON, 1. + EPSILON) * self.tfadv)) self.atrain_op = tf.train.AdamOptimizer(A_LR).minimize(self.aloss) # log self.train_writer = tf.summary.FileWriter("logs/", self.sess.graph) self.sess.run(tf.global_variables_initializer()) self.tableAction = self.createActionTable() def createActionTable(self): tableAction = [] for a in range(0, 3): for b in range(0, 3): for c in range(0, 2): tableAction.append([a, b, c, 0]) # print("Action option: ", tableAction[0:17]) return tableAction
tensorflow.train.AdamOptimizer
2,301
import tensorflow as tf trg_indices = tf.tile(tf.reshape(tf.range(trg_len), shape=[1, trg_len, 1]), [batch_size, 1, src_len]) source_length = encoder_input_length[0] target_length = tf.to_int32(tf.reduce_sum(trg_mask, axis=1)) true_src_len = tf.reshape(source_length, shape=[batch_size, 1, 1]) - 1 true_trg_len = tf.reshape(target_length, shape=[batch_size, 1, 1]) - 1 src_mask = tf.to_float(tf.sequence_mask(source_length, maxlen=src_len)) mask = tf.matmul(tf.expand_dims(trg_mask, axis=2), tf.expand_dims(src_mask, axis=1)) monotonous = tf.sqrt(((true_trg_len * src_indices - true_src_len * trg_indices) ** 2) / (true_trg_len**2 + true_src_len**2)) monotonous = tf.to_float(monotonous < monotonicity_dist) non_monotonous = (1 - monotonous) * mask attn_loss = tf.reduce_sum(attention_weights * tf.stop_gradient(non_monotonous)) / tf.to_float(batch_size)
tensorflow.expand_dims
2,302
import tensorflow as tf """ # loss = None with tf.name_scope(name, "click_loglikelihood"): ob_prob=tf.nn.softmax(propensity) rel_prob=tf.nn.softmax(train_output)
tensorflow.name_scope
2,303
import tensorflow as tf class EpsilonLRP(GradientBasedMethod): eps = None def __init__(self, T, X, session, keras_learning_phase, epsilon=1e-4, Y_shape=None): assert epsilon > 0.0, 'LRP epsilon must be greater than zero' global eps eps = epsilon super(EpsilonLRP, self).__init__(T, X, session, keras_learning_phase, Y_shape) def get_symbolic_attribution(self): return [g * x for g, x in zip( tf.gradients(ys=self.T, xs=self.X), self.X if self.has_multiple_inputs else [self.X])] @classmethod def nonlinearity_grad_override(cls, op, grad): output = op.outputs[0] input = op.inputs[0] return grad * output / (input + eps * tf.compat.v1.where(input >= 0, tf.ones_like(input), -1 * tf.ones_like(input)))
tensorflow.gradients
2,304
import tensorflow as tf def get_input_function(): """A function to get test inputs. Returns an image with one box.""" image = tf.random_uniform([32, 32, 3], dtype=tf.float32) key = tf.constant('image_000000') class_label = tf.random_uniform( [1], minval=0, maxval=NUMBER_OF_CLASSES, dtype=tf.int32) box_label = tf.random_uniform( [1, 4], minval=0.4, maxval=0.6, dtype=tf.float32) return {
tensorflow.random_uniform
2,305
from tensorflow.python.ops import array_ops "Instead got %s." % target.dtype) # sparse_softmax_cross_entropy_with_logits requires [batch_size] target. if len(target.get_shape()) == 2: target = array_ops.squeeze(target, squeeze_dims=[1]) loss_vec = nn.sparse_softmax_cross_entropy_with_logits( labels=target, logits=logits)
tensorflow.python.ops.array_ops.squeeze
2,306
import tensorflow as tf else: tf.logging.info('Mode not found.') if FLAGS.export_dir is not None: tf.logging.info('Starting exporting saved model ...') serving_shape = [hparams.image_size, hparams.image_size, 3] export_path = image_classifier.export_saved_model( export_dir_base=FLAGS.export_dir,
tensorflow.logging.info
2,307
import tensorflow as tf [t2ind, t2val, t2sh] = sp.createRandomSparseTensor(rho_filter, filter_in_sizes) s2 = tf.SparseTensor(indices=t2ind, values=t2val, dense_shape=t2sh) d2 = sp.sparse_to_dense(t2ind, t2val, t2sh) print("strides: \n", strides) print("input shape", tensor_in_sizes) print("filter shape", filter_in_sizes) config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.7 with tf.device("/gpu:0"): convd = sc_module.direct_sparse_data_conversion(t1ind, t1val, t1sh) convf = sc_module.direct_sparse_filter_conversion(t2ind, t2val, t2sh, t1sh) with tf.Session(config=config) as sess: pd = sess.run(convd) pf = sess.run(convf) tf.reset_default_graph() ts = 0 with tf.device("/gpu:0"): approx_scskconv = sc_module.direct_sparse_conv_kd(pd.out_indices, pd.out_values, pd.out_shape, pd.out_block_channel_mapping, pf.out_indices, pf.out_values, pf.out_shape, pf.out_channel_mapping, bias, strides, padding, out_entry_count, dim, max_density, filter_type); with tf.Session(config=config) as sess: t6 = time.time() sv3 = sess.run(approx_scskconv) t5 = time.time() for i in range(0, num_trials):
tensorflow.Session
2,308
import tensorflow as tf block_relu_2 = tf.nn.relu(block_norm_2) block_conv_3 = self.conv_layer(block_relu_2, 1, channel_list[1], channel_list[2], 1, name + "_branch2c") block_res = tf.add(block_conv_input, block_conv_3) relu = tf.nn.relu(block_res) return relu def avg_pool(self, bottom, kernal_size = 2, stride = 2, name = "avg"): return tf.nn.avg_pool(bottom, ksize=[1, kernal_size, kernal_size, 1], strides=[1, stride, stride, 1], padding='VALID', name=name) def max_pool(self, bottom, kernal_size = 2, stride = 2, name = "max"): return tf.nn.max_pool(bottom, ksize=[1, kernal_size, kernal_size, 1], strides=[1, stride, stride, 1], padding='SAME', name=name) def conv_layer(self, bottom, kernal_size, in_channels, out_channels, stride, name): with tf.variable_scope(name): filt, conv_biases = self.get_conv_var(kernal_size, in_channels, out_channels, name) conv = tf.nn.conv2d(bottom, filt, [1,stride,stride,1], padding='SAME') bias = tf.nn.bias_add(conv, conv_biases)
tensorflow.nn.avg_pool
2,309
import tensorflow as tf # Trainable parameters w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1)) w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1)) b = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) v = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) with tf.name_scope('v'): # Applying fully connected layer with non-linear activation to each of the B*T timestamps; # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size tmp1 = tf.tensordot(facts, w1, axes=1) tmp2 = tf.tensordot(query, w2, axes=1) tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]]) tmp = tf.tanh((tmp1 + tmp2) + b)
tensorflow.name_scope
2,310
import tensorflow as tf try: pred_norm = pred_ / \ (eps + tf.reshape(tf.reduce_sum(pred_, 1), [-1, 1])) except Exception: pred_norm = pred_ / \ (eps + tf.reshape(tf.reduce_sum(pred_, 1), [batch_size, 1])) hist_rater_a = tf.reduce_sum(pred_norm, 0) hist_rater_b = tf.reduce_sum(labels, 0) conf_mat = tf.matmul(tf.transpose(pred_norm), labels) nom = tf.reduce_sum(weights * conf_mat) denom = tf.reduce_sum(weights * tf.matmul( tf.reshape(hist_rater_a, [num_ratings, 1]), tf.reshape(hist_rater_b, [1, num_ratings])) / tf.to_float(batch_size))
tensorflow.reduce_sum
2,311
import tensorflow as tf x = tf.placeholder_with_default( input=np.ones((6, 7, 2, 3, 5), dtype=np.float32), shape=None) dist = fake_distribution(batch_shape=None, event_shape=None) sample_shape = tf.convert_to_tensor([6, 7], dtype=tf.int32) y = dist._set_sample_static_shape(x, sample_shape) self.assertTrue(y.shape.ndims is None)
tensorflow.convert_to_tensor
2,312
import tensorflow as tf if use_token_type: if token_type_ids is None: raise ValueError("`token_type_ids` must be specified if" "`use_token_type` is True.") token_type_table = tf.get_variable( name=token_type_embedding_name, shape=[token_type_vocab_size, width], initializer=create_initializer(initializer_range)) # This vocab will be small so we always do one-hot here, since it is always # faster for a small vocabulary. flat_token_type_ids = tf.reshape(token_type_ids, [-1]) one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size) token_type_embeddings = tf.matmul(one_hot_ids, token_type_table) token_type_embeddings = tf.reshape(token_type_embeddings, [batch_size, seq_length, width]) output += token_type_embeddings if use_position_embeddings: assert_op = tf.assert_less_equal(seq_length, max_position_embeddings) with tf.control_dependencies([assert_op]): full_position_embeddings = tf.get_variable(
tensorflow.reshape
2,313
import tensorflow as tf new_rewards = tf.concat([rewards[:, None], next_rewards[:, None]], axis=1) new_dones = tf.concat([dones[:, None], next_dones[:, None]], axis=1) # 0 if episode is done, 1 if episode is continuing
tensorflow.concat
2,314
import tensorflow as tf if hparams.wavenet_test_size is None: assert hparams.wavenet_test_batches == self.test_steps #Get conditioning status self.local_condition, self.global_condition = self._check_conditions() with tf.device('/cpu:0'): # Create placeholders for inputs and targets. Don't specify batch size because we want # to be able to feed different batch sizes at eval time. if is_scalar_input(hparams.input_type): input_placeholder = tf.placeholder(tf.float32, shape=(None, 1, None), name='audio_inputs') target_placeholder = tf.placeholder(tf.float32, shape=(None, None, 1), name='audio_targets') target_type = tf.float32 else: input_placeholder = tf.placeholder(tf.float32, shape=(None, hparams.quantize_channels, None), name='audio_inputs') target_placeholder = tf.placeholder(tf.int32, shape=(None, None, 1), name='audio_targets') target_type = tf.int32 self._placeholders = [ input_placeholder, target_placeholder, tf.placeholder(tf.int32, shape=(None, ), name='input_lengths'), ]
tensorflow.placeholder
2,315
from tensorflow.python.ops import math_ops recall = math_ops.div(tp + epsilon, tp + fn + epsilon) if curve == 'ROC': fp_rate = math_ops.div(fp, fp + tn + epsilon) x = fp_rate
tensorflow.python.ops.math_ops.div
2,316
from tensorflow.contrib.eager.python.examples.spinn import data inference_sentences=("( foo ( bar . ) )", "( bar ( foo . ) )")) logits = spinn.train_or_infer_spinn( embed, word2index, None, None, None, config) self.assertEqual(tf.float32, logits.dtype) self.assertEqual((3,), logits.shape) def testInferSpinnThrowsErrorIfOnlyOneSentenceIsSpecified(self): snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0") self._create_test_data(snli_1_0_dir) vocab = data.load_vocabulary(self._temp_data_dir) word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab) config = _test_spinn_config( data.WORD_VECTOR_LEN, 4, logdir=os.path.join(self._temp_data_dir, "logdir"), inference_sentences=("( foo ( bar . ) )", None)) with self.assertRaises(ValueError): spinn.train_or_infer_spinn(embed, word2index, None, None, None, config) def testTrainSpinn(self): """Test with fake toy SNLI data and GloVe vectors."""
tensorflow.contrib.eager.python.examples.spinn.data.load_word_vectors
2,317
import tensorflow as tf return candidate_labels def get_dropout(self, dropout_rate, is_training): return 1 - (tf.to_float(is_training) * dropout_rate) def coarse_to_fine_pruning(self, top_span_emb, top_span_mention_scores, c): k = util.shape(top_span_emb, 0) top_span_range = tf.range(k) # [k] antecedent_offsets = tf.expand_dims(top_span_range, 1) - tf.expand_dims(top_span_range, 0) # [k, k] antecedents_mask = antecedent_offsets >= 1 # [k, k] fast_antecedent_scores = tf.expand_dims(top_span_mention_scores, 1) + tf.expand_dims(top_span_mention_scores, 0) # [k, k] fast_antecedent_scores += tf.log(tf.to_float(antecedents_mask)) # [k, k] fast_antecedent_scores += self.get_fast_antecedent_scores(top_span_emb) # [k, k] _, top_antecedents = tf.nn.top_k(fast_antecedent_scores, c, sorted=False) # [k, c] top_antecedents_mask = util.batch_gather(antecedents_mask, top_antecedents) # [k, c]
tensorflow.expand_dims
2,318
import tensorflow as tf output, state = self._build_rnn_graph(inputs, config, is_training) softmax_w = tf.get_variable( 'softmax_w', [hidden_size, vocab_size], dtype=tf.float32)
tensorflow.get_variable
2,319
from tensorflow.python.framework import ops """x ^ y = (x | y) & ~(x & y).""" # TODO(alemi) Make this a cwise op if people end up relying on it. return logical_and(logical_or(x, y), logical_not(logical_and(x, y)), name=name) _OverrideBinaryOperatorHelper(logical_and, "and") _OverrideBinaryOperatorHelper(logical_or, "or") _OverrideBinaryOperatorHelper(logical_xor, "xor") ops.Tensor._override_operator("__lt__", less) ops.Tensor._override_operator("__le__", less_equal) ops.Tensor._override_operator("__gt__", greater) ops.Tensor._override_operator("__ge__", greater_equal) def range(start, limit, delta=1, name="range"): """Creates a sequence of integers.
tensorflow.python.framework.ops.Tensor._override_operator
2,320
import tensorflow as tf # an additional layer to predict answerability with tf.variable_scope("answer_class"): # get the representation of CLS cls_index = tf.one_hot(cls_index, seq_len, axis=-1, dtype=tf.float32) cls_feature = tf.einsum("lbh,bl->bh", output, cls_index) # get the representation of START start_p = tf.nn.softmax(start_logits_masked, axis=-1, name="softmax_start") start_feature = tf.einsum("lbh,bl->bh", output, start_p) # note(zhiliny): no dependency on end_feature so that we can obtain # one single `cls_logits` for each sample ans_feature = tf.concat([start_feature, cls_feature], -1) ans_feature = tf.layers.dense( ans_feature, xlnet_config.d_model, activation=tf.tanh, kernel_initializer=initializer, name="dense_0") ans_feature = tf.layers.dropout(ans_feature, FLAGS.dropout, training=is_training) cls_logits = tf.layers.dense( ans_feature, 1, kernel_initializer=initializer, name="dense_1", use_bias=False) cls_logits = tf.squeeze(cls_logits, -1)
tensorflow.layers.dense
2,321
import tensorflow as tf if encoder.attn_keep_prob is not None: state_noise_shape = [1, tf.shape(state)[1]] if encoder.pervasive_dropout else None state = tf.nn.dropout(state, keep_prob=encoder.attn_keep_prob, noise_shape=state_noise_shape) hidden_noise_shape = [1, 1, tf.shape(hidden)[2]] if encoder.pervasive_dropout else None hidden = tf.nn.dropout(hidden, keep_prob=encoder.attn_keep_prob, noise_shape=hidden_noise_shape) if encoder.mult_attn:
tensorflow.shape
2,322
from tensorflow.python.framework import ops ops.RegisterShape("LogicalNot")(common_shapes.unchanged_shape) ops.RegisterShape("Neg")(common_shapes.unchanged_shape) ops.RegisterShape("Real")(common_shapes.unchanged_shape) ops.RegisterShape("Rsqrt")(common_shapes.unchanged_shape) ops.RegisterShape("Sign")(common_shapes.unchanged_shape) ops.RegisterShape("Sin")(common_shapes.unchanged_shape) ops.RegisterShape("Sqrt")(common_shapes.unchanged_shape) ops.RegisterShape("Square")(common_shapes.unchanged_shape) ops.RegisterShape("Sigmoid")(common_shapes.unchanged_shape) ops.RegisterShape("Tanh")(common_shapes.unchanged_shape) ops.RegisterShape("Cast")(common_shapes.unchanged_shape) ops.RegisterShape("ComplexAbs")(common_shapes.unchanged_shape) @ops.RegisterShape("Add") @ops.RegisterShape("Complex") @ops.RegisterShape("Div") @ops.RegisterShape("Equal") @ops.RegisterShape("Greater") @ops.RegisterShape("GreaterEqual") @ops.RegisterShape("Less") @ops.RegisterShape("LessEqual") @ops.RegisterShape("LogicalAnd") @ops.RegisterShape("LogicalOr") @ops.RegisterShape("Maximum") @ops.RegisterShape("Minimum") @ops.RegisterShape("Mod") @ops.RegisterShape("Mul")
tensorflow.python.framework.ops.RegisterShape
2,323
import tensorflow as tf total_loss = (total_rpn_loss + total_fast_rcnn_loss + mask_loss + l2_regularization_loss) host_call = None if mode == tf.estimator.ModeKeys.TRAIN: optimizer = create_optimizer(learning_rate, params) optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer) if not params['resnet_checkpoint']: scaffold_fn = None else:
tensorflow.contrib.tpu.CrossShardOptimizer
2,324
import tensorflow as tf check_shape([qret, q_i], [[self.n_envs * self.n_steps]] * 2) explained_variance = q_explained_variance(tf.reshape(q_i, [self.n_envs, self.n_steps]), tf.reshape(qret, [self.n_envs, self.n_steps])) loss_q = tf.reduce_mean(tf.square(tf.stop_gradient(qret) - q_i) * 0.5)
tensorflow.reshape
2,325
from tensorflow.python.ops import array_ops # Use static shape if known. num_predictions = predictions_2d.get_shape().as_list()[0] # Otherwise use dynamic shape. if num_predictions is None: num_predictions = array_ops.shape(predictions_2d)[0] thresh_tiled = array_ops.tile( array_ops.expand_dims(array_ops.constant(thresholds), [1]), array_ops.pack([1, num_predictions])) # Tile the predictions after thresholding them across different thresholds. pred_is_pos = math_ops.greater( array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]), thresh_tiled) pred_is_neg = math_ops.logical_not(pred_is_pos)
tensorflow.python.ops.array_ops.constant
2,326
import tensorflow as tf :param x: [Tensor] Input to the downsample. :param ksize [list] 4-D kernel shape. :param strides: [list] 4-D strides array. :param padding: [string] Convolution padding strategy. :param data_format: [string] 'NHWC' or 'NCHW'. :return: [Tensor] Convolution output. """ with tf.variable_scope(name): in_filters = ksize[2] out_filters = ksize[3] n = ksize[0] * ksize[1] * out_filters init = tf.truncated_normal_initializer( mean=0.0, stddev=np.sqrt(2.0 / n), seed=0, dtype=dtype) def _reg(x):
tensorflow.variable_scope
2,327
import tensorflow as tf train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss,
tensorflow.contrib.tpu.TPUEstimatorSpec
2,328
import tensorflow as tf for _ in range(num_power_iteration): d = _scale_l2(d, small_constant_for_finite_diff) d_logits = logits_from_embedding_fn(embedded + d) kl = _kl_divergence_with_logits(logits, d_logits, weights, num_classes) d, = tf.gradients(kl, d, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N) d = tf.stop_gradient(d) perturb = _scale_l2(_mask_by_length(d, length), perturb_norm_length) vadv_logits = logits_from_embedding_fn(embedded + perturb) return _kl_divergence_with_logits(logits, vadv_logits, weights, num_classes)
tensorflow.stop_gradient
2,329
import tensorflow as tf def __init__(self,name,input_dim,channel_multiplier,k_h=4,k_w=4,d_h=2,d_w=2, stddev=0.02, data_format='NCHW', padding='SAME') : with tf.variable_scope(name) : assert(data_format == 'NCHW' or data_format == 'NHWC') self.w = tf.get_variable('w', [k_h, k_w, input_dim, channel_multiplier], initializer=tf.truncated_normal_initializer(stddev=stddev)) self.b = tf.get_variable('b',[input_dim*channel_multiplier], initializer=tf.constant_initializer(0.0)) if( data_format == 'NCHW' ) : self.strides = [1, 1, d_h, d_w] else : self.strides = [1, d_h, d_w, 1] self.data_format = data_format
tensorflow.constant_initializer
2,330
import tensorflow as tf sess.run(tf.global_variables_initializer()) pc_lr_, pc_lr_true_, ned_lr_, ned_lr_true_ = sess.run( [pc_lr, pc_lr_true, ned_lr, ned_lr_true]) self.assertEqual(pc_lr_, pc_lr_true_) self.assertEqual(ned_lr_, ned_lr_true_) def test_get_gradient_clip_fn(self): # pylint: disable=too-many-locals """Tests get_gradient_clip_fn. """ default_grad_clip_fn = opt.get_gradient_clip_fn( opt.default_optimization_hparams()["gradient_clip"]) self.assertIsNone(default_grad_clip_fn) grads = [tf.random_uniform([10, 10], -1., 1.) for _ in range(5)] grads_and_vars = list(zip(grads, range(5))) hparams = { "type": "clip_by_global_norm", "kwargs": { "clip_norm": 0.1 } } gn_grad_clip_fn = opt.get_gradient_clip_fn(hparams) gn_grads_and_vars = gn_grad_clip_fn(grads_and_vars) gn_grads, _ = zip(*gn_grads_and_vars) gn_grads_true, _ = tf.clip_by_global_norm(
tensorflow.random_uniform
2,331
import tensorflow as tf tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) scaled_time = ( tf.expand_dims(tf.to_float(position), 2) * tf.expand_dims( tf.expand_dims(inv_timescales, 0), 0)) signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2) signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]]) return signal
tensorflow.sin
2,332
import tensorflow as tf import numpy as np import tensorflow as tf import random from tensorflow.contrib import slim from npu_bridge.estimator import npu_ops from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig tf.app.flags.DEFINE_integer('input_size', 512, '') tf.app.flags.DEFINE_integer('batch_size_per_gpu', 14, '') tf.app.flags.DEFINE_integer('num_readers', 16, '') tf.app.flags.DEFINE_float('learning_rate', 0.0001, '') tf.app.flags.DEFINE_integer('max_steps', 100000, '') tf.app.flags.DEFINE_integer('loss_scale', 1024, '') tf.app.flags.DEFINE_float('moving_average_decay', 0.997, '') tf.app.flags.DEFINE_string('gpu_list', '1', '')
tensorflow.app.flags.DEFINE_integer
2,333
import tensorflow as tf flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") tf.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.")
tensorflow.flags.DEFINE_string
2,334
import tensorflow as tf coord.join() def test_output_must_have_same_batch_dimension_size_as_input(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(_): return tf.constant([1, 2, 3, 4]) output = f(tf.constant([1])) coord = tf.train.Coordinator() tf.train.start_queue_runners(coord=coord)
tensorflow.constant
2,335
from tensorflow.python.framework import ops Must be the same type as `value` unless `value` is a quantized type, in which case a different quantized type may be used. name: A name for the operation (optional). Returns: A `Tensor` with the same type as `value`. """ with ops.op_scope([value, bias], name, "BiasAddV1") as name: value = ops.convert_to_tensor(value, name="input") bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias") return gen_nn_ops._bias_add_v1(value, bias, name=name) ops.RegisterShape("BiasAddV1")(common_shapes.bias_add_shape)
tensorflow.python.framework.ops.convert_to_tensor
2,336
import tensorflow as tf def _create_model(self, train_triples): # Count unique items to determine embedding matrix sizes entity_cnt = len(set(train_triples[:,0]).union(train_triples[:,2])) rel_cnt = len(set(train_triples[:,1])) init_sd = 1.0 / np.sqrt(self.embedding_size) # Embedding variables for all entities and relationship types entity_embedding_shape = [entity_cnt, self.embedding_size] # Relationship embeddings will be stored in flattened format to make # applying maxnorm constraints easier rel_embedding_shape = [rel_cnt, self.embedding_size * self.embedding_size] entity_init = tf.truncated_normal(entity_embedding_shape, stddev=init_sd) rel_init = tf.truncated_normal(rel_embedding_shape, stddev=init_sd) if self.maxnorm is not None: # Ensure maxnorm constraints are initially satisfied entity_init = dense_maxnorm(entity_init, self.maxnorm) rel_init = dense_maxnorm(rel_init, self.maxnorm) self.entity_embedding_vars = tf.Variable(entity_init) self.rel_embedding_vars = tf.Variable(rel_init) # Embedding layer for each (head, rel, tail) triple being fed in as input head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input) tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input) rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input)
tensorflow.truncated_normal
2,337
import tensorflow as tf initial_query_points = search_space.sample(num_initial_points) initial_observations = objective(initial_query_points.numpy(), sleep=False) initial_data = Dataset( query_points=initial_query_points, observations=tf.constant(initial_observations, dtype=tf.float64), ) import gpflow
tensorflow.constant
2,338
import tensorflow as tf for t in threads: t.start() for t in threads: t.join() vals = p.eval() ones = np.ones((1024, 1024)).astype(np.float32) self.assertAllEqual(vals, ones * 20) # NOTE(mrry): See also # dense_update_ops_no_tsan_test.[...].testParallelAssignWithoutLocking, # which contains a benign data race and must run without TSAN. def testParallelAssignWithLocking(self): with self.test_session() as sess: zeros_t = tf.fill([1024, 1024], 0.0) ones_t = tf.fill([1024, 1024], 1.0) p = tf.Variable(zeros_t) assigns = [tf.assign(p, tf.mul(ones_t, float(i)), use_locking=True) for i in range(1, 21)] p.initializer.run() def run_assign(assign_op): sess.run(assign_op) threads = [self.checkedThread(target=run_assign, args=(assign_op,)) for assign_op in assigns] for t in threads: t.start() for t in threads: t.join()
tensorflow.fill
2,339
import tensorflow as tf top_antecedent_labels = tf.concat([dummy_labels, pairwise_labels], 1) # [k, c + 1] loss = self.softmax_loss(top_antecedent_scores, top_antecedent_labels) # [k] loss = tf.reduce_sum(loss) # [] return [candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores], loss def get_span_emb(self, head_emb, context_outputs, span_starts, span_ends): span_emb_list = [] span_start_emb = tf.gather(context_outputs, span_starts) # [k, emb] span_emb_list.append(span_start_emb) span_end_emb = tf.gather(context_outputs, span_ends) # [k, emb] span_emb_list.append(span_end_emb) span_width = 1 + span_ends - span_starts # [k] if self.config["use_features"]:
tensorflow.gather
2,340
import tensorflow as tf head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input) tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input) rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input) # Relationship vector acts as a translation in entity embedding space diff_vec = tail_embed - (head_embed + rel_embed) # negative dist so higher scores are better (important for pairwise loss) if self.dist == 'manhattan': raw_output = -tf.reduce_sum(tf.abs(diff_vec), 1) elif self.dist == 'euclidean': # +eps because gradients can misbehave for small values in sqrt raw_output = -tf.sqrt(tf.reduce_sum(tf.square(diff_vec), 1) + self.EPS) elif self.dist == 'sqeuclidean': raw_output = -tf.reduce_sum(tf.square(diff_vec), 1) else: raise Exception('Unknown distance type') # Model output self.output, self.loss = ranking_margin_objective(raw_output, self.margin) # Optimization with postprocessing to limit embedding vars to L2 ball self.train_step = self.opt.minimize(self.loss) unique_ent_indices = tf.unique(tf.concat(0, [self.head_input, self.tail_input]))[0]
tensorflow.square
2,341
import tensorflow as tf top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets = self.distance_pruning(top_span_emb, top_span_mention_scores, c) dummy_scores = tf.zeros([k, 1]) # [k, 1] for i in range(self.config["coref_depth"]): with tf.variable_scope("coref_layer", reuse=(i > 0)): top_antecedent_emb = tf.gather(top_span_emb, top_antecedents) # [k, c, emb] top_antecedent_scores = top_fast_antecedent_scores + self.get_slow_antecedent_scores(top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb) # [k, c] top_antecedent_weights = tf.nn.softmax(tf.concat([dummy_scores, top_antecedent_scores], 1)) # [k, c + 1] top_antecedent_emb = tf.concat([tf.expand_dims(top_span_emb, 1), top_antecedent_emb], 1) # [k, c + 1, emb] attended_span_emb = tf.reduce_sum(tf.expand_dims(top_antecedent_weights, 2) * top_antecedent_emb, 1) # [k, emb] with tf.variable_scope("f"): f = tf.sigmoid(util.projection(tf.concat([top_span_emb, attended_span_emb], 1), util.shape(top_span_emb, -1))) # [k, emb] top_span_emb = f * attended_span_emb + (1 - f) * top_span_emb # [k, emb] top_antecedent_scores = tf.concat([dummy_scores, top_antecedent_scores], 1) # [k, c + 1] top_antecedent_cluster_ids = tf.gather(top_span_cluster_ids, top_antecedents) # [k, c] top_antecedent_cluster_ids += tf.to_int32(tf.log(tf.to_float(top_antecedents_mask))) # [k, c]
tensorflow.expand_dims
2,342
import tensorflow as tf direct_mask_un = tf.greater(unhead_idxs, undep_idxs) # [bs, sluh, sld] else: direct_mask_un = tf.less(unhead_idxs, undep_idxs) # [bs, sluh, sld] # [bs, sluh, sld] rep_mask_tile_un = tf.logical_and(tf.expand_dims(rep_dep_mask, 1), tf.expand_dims(rep_unhead_mask, 2)) pooling_mask = tf.logical_and(direct_mask_un, rep_mask_tile_un) # [bs, sluh, sld] # data for pooling pooling_data = tf.tile(tf.expand_dims(rep_dep_tensor, 1), [1, sl_unhead, 1, 1]) # bs,sluh,sld,hn # execute mean pooling based on pooling_mask[bs, sluh, sld] and pooling_data[bs,sluh,sld,hn] pooling_data = mask_for_high_rank(pooling_data, pooling_mask) # [bs,sluh,sld,hn] pooling_data_sum = tf.reduce_sum(pooling_data, -2) # [bs,sluh,hn] pooling_den = tf.reduce_sum(tf.cast(pooling_mask, tf.int32), -1, keep_dims=True) # [bs,sluh] pooling_den = tf.where(tf.equal(pooling_den, 0), tf.ones_like(pooling_den), pooling_den) pooling_result = pooling_data_sum / tf.cast(pooling_den, tf.float32) return pooling_result def scaled_tanh(x, scale=5.): return scale * tf.nn.tanh(1./scale * x)
tensorflow.cast
2,343
import tensorflow as tf "lstm_params", initializer=tf.random_uniform( [params_size_t], -config.init_scale, config.init_scale), validate_shape=False) c = tf.zeros([config.num_layers, self.batch_size, config.hidden_size], tf.float32) h = tf.zeros([config.num_layers, self.batch_size, config.hidden_size], tf.float32) self._initial_state = (tf.contrib.rnn.LSTMStateTuple(h=h, c=c),) outputs, h, c = self._cell(inputs, h, c, self._rnn_params, is_training) outputs = tf.transpose(outputs, [1, 0, 2]) outputs = tf.reshape(outputs, [-1, config.hidden_size]) return outputs, (tf.contrib.rnn.LSTMStateTuple(h=h, c=c),) def _get_lstm_cell(self, config, is_training): #if config.rnn_mode == BASIC: # return tf.contrib.rnn.BasicLSTMCell( # config.hidden_size, forget_bias=0.0, state_is_tuple=True, # reuse=not is_training) #if config.rnn_mode == BLOCK: # return tf.contrib.rnn.LSTMBlockCell( # config.hidden_size, forget_bias=0.0)
tensorflow.reshape
2,344
import tensorflow as tf with tf.variable_scope(name, reuse=reuse): layer_c1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_c2 = tf.layers.dense(layer_c1, 256, tf.nn.relu, kernel_regularizer=reg) lstm_c = tf.nn.rnn_cell.LSTMCell(num_units=256) lstm_c = tf.nn.rnn_cell.DropoutWrapper(lstm_c, output_keep_prob=self.keep_prob) state_init_c = lstm_c.zero_state(batch_size=batch_size, dtype=tf.float32) lstm_cin = tf.expand_dims(layer_c2, axis=1) out_c, state_final_c = tf.nn.dynamic_rnn(cell=lstm_c, inputs=lstm_cin, initial_state=state_init_c) cell_out_c = tf.reshape(out_c, [-1, 256]) vf = tf.layers.dense(cell_out_c, 1, kernel_regularizer=reg) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return vf, params, state_init_c, state_final_c
tensorflow.expand_dims
2,345
import tensorflow as tf logger.info('Tasks dump!') assert (task_generator == 'fixed') test_summary['task'].append(task.goal_velocity) if FLAGS.task.reset_policy: # NOTE: reset policy and valuefunc logger.info("Resetting Policy") pol_params = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters())]) tf.get_default_session().run(tf.variables_initializer(policy.parameters())) pol_params_after = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters())]) print ("pol_params:", np.linalg.norm(pol_params), "pol_params_after_reset:", np.linalg.norm(pol_params_after)) logger.info("Resetting Valuefunc") tf.get_default_session().run(tf.variables_initializer(vfn.parameters())) tf.get_default_session().run(tf.variables_initializer(warmup_policy.parameters())) tf.get_default_session().run(tf.variables_initializer(warmup_vfn.parameters()))
tensorflow.get_default_session
2,346
from tensorflow.python.framework import tensor_util if input_shape.ndims is None: return [tensor_shape.unknown_shape()] elif input_shape.ndims <= 1: return [tensor_shape.scalar()] dimension = tensor_util.ConstantValue(op.inputs[1]) if dimension is None: return [tensor_shape.unknown_shape(ndims=input_shape.ndims - 1)] elif 0 <= dimension and dimension < input_shape.ndims: returned_shape = []
tensorflow.python.framework.tensor_util.ConstantValue
2,347
import tensorflow as tf # Ensure maxnorm constraints are initially satisfied entity_init = dense_maxnorm(entity_init, self.maxnorm) rel_init = dense_maxnorm(rel_init, self.maxnorm) self.entity_embedding_vars = tf.Variable(entity_init) self.rel_embedding_vars = tf.Variable(rel_init) # Embedding layer for each (head, rel, tail) triple being fed in as input head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input) tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input) rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input) # Reshape rel_embed into square D x D matrices rel_embed_square = tf.reshape(rel_embed, (-1, self.embedding_size, self.embedding_size)) # Reshape head_embed and tail_embed to be suitable for the matrix multiplication
tensorflow.nn.embedding_lookup
2,348
import tensorflow as tf _x = x[:, :, :, ::-1] tf.summary.image('x', _x, 4) summary_op = tf.summary.merge_all() epoch_learning_rate = init_learning_rate for epoch in range(1, total_epochs + 1):
tensorflow.summary.merge_all
2,349
import tensorflow as tf # step3: Let's get serious and build the neural network # ------------------------------------------------------ # [none, 128, 9] X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs]) # [none, 6] Y = tf.placeholder(tf.float32, [None, config.n_classes]) print("-------X Y----------") print(X) X = tf.reshape(X, shape=[-1, 32, 36]) print(X) print(Y) Y = tf.reshape(Y, shape=[-1, 6]) print(Y) # Weight Initialization def weight_variable(shape):
tensorflow.reshape
2,350
from tensorflow.python.feature_column import feature_column_lib as core_feature_column model = estimator.GradientBoostedDecisionTreeEstimator( head=head_fn, learner_config=learner_config, num_trees=1, examples_per_layer=3, model_dir=model_dir, config=config, feature_columns=[core_feature_column.numeric_column("x")], use_core_libs=True) model.fit(input_fn=_train_input_fn, steps=15) model.evaluate(input_fn=_eval_input_fn, steps=1) model.export(self._export_dir_base)
tensorflow.python.feature_column.feature_column_lib.numeric_column
2,351
import tensorflow as tf normed: batch-normalized maps """ with tf.variable_scope(scope) as sc: num_channels = inputs.get_shape()[-1].value beta = tf.Variable(tf.constant(0.0, shape=[num_channels]), name='beta', trainable=True) gamma = tf.Variable(tf.constant(1.0, shape=[num_channels]), name='gamma', trainable=True) batch_mean, batch_var = tf.nn.moments(inputs, moments_dims, name='moments') decay = bn_decay if bn_decay is not None else 0.9 ema = tf.train.ExponentialMovingAverage(decay=decay) # Operator that maintains moving averages of variables. ema_apply_op = tf.cond(is_training, lambda: ema.apply([batch_mean, batch_var]), lambda: tf.no_op()) # Update moving average and return current batch's avg and var. def mean_var_with_update(): with tf.control_dependencies([ema_apply_op]): return tf.identity(batch_mean), tf.identity(batch_var) # ema.average returns the Variable holding the average of var. mean, var = tf.cond(is_training, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var))) normed = tf.nn.batch_normalization(inputs, mean, var, beta, gamma, 1e-3) return normed
tensorflow.no_op
2,352
import tensorflow as tf FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string('dataset', '', 'cifar10 or cifar100.') tf.app.flags.DEFINE_string('mode', 'train', 'train or eval.') tf.app.flags.DEFINE_string('train_data_path', '', 'Filepattern for training data.') tf.app.flags.DEFINE_string('eval_data_path', '', 'Filepattern for eval data')
tensorflow.app.flags.DEFINE_string
2,353
from tensorflow.contrib.learn.python.learn import session_run_hook self._last_step = run_context.session.run(self._global_step_tensor) + 1 request = {self._global_step_tensor: self._global_step_tensor} monitor_fetches = [] for m in self._monitors: monitor_requests = m.step_begin(self._last_step) if monitor_requests: if not isinstance(monitor_requests, list): raise ValueError("Monitor.step_begin should return a list.") monitor_fetches.extend(monitor_requests) if monitor_fetches: request["monitors"] = dict( zip(monitor_fetches, [_as_graph_element(f) for f in monitor_fetches])) return session_run_hook.SessionRunArgs(request) def after_run(self, run_context, run_values): result = run_values.results[ "monitors"] if "monitors" in run_values.results else {} for m in self._monitors: induce_stop = m.step_end(self._last_step, result) if induce_stop: run_context.request_stop() for m in self._monitors: m.post_step(self._last_step, run_context.session) self._last_step = run_values.results[self._global_step_tensor] + 1
tensorflow.contrib.learn.python.learn.session_run_hook.SessionRunArgs
2,354
import tensorflow as tf else: y = sbnet_module.sparse_scatter( q, indices.bin_counts, indices.active_block_indices, x, dynamic_bsize=tf.constant(block_params.bsize_out, dtype=tf.int32), dynamic_bstride=tf.constant(block_params.bsize_out, dtype=tf.int32), dynamic_boffset=tf.constant([0, 0], dtype=tf.int32), add=False, transpose=transpose, atomic=atomic) return y
tensorflow.constant
2,355
import tensorflow as tf self.assertAllEqual(x - y, var_value) self.assertAllEqual(x - y, op_value) def testBasic(self): self._testTypes(np.arange(0, 20).reshape([4, 5])) def testAssignNonStrictShapeChecking(self): with self.test_session(): data = tf.fill([1024, 1024], 0) p = tf.Variable([1]) a = tf.assign(p, data, validate_shape=False) a.op.run() self.assertAllEqual(p.eval(), data.eval()) # Assign to yet another shape data2 = tf.fill([10, 10], 1) a2 = tf.assign(p, data2, validate_shape=False) a2.op.run() self.assertAllEqual(p.eval(), data2.eval()) def testInitRequiredAssignAdd(self): with self.test_session(): p = tf.Variable(tf.fill([1024, 1024], 1), tf.int32) a = tf.assign_add(p, tf.fill([1024, 1024], 0)) with self.assertRaisesOpError("use uninitialized"): a.op.run() def testInitRequiredAssignSub(self): with self.test_session():
tensorflow.fill
2,356
import tensorflow as tf stddev=0.02, data_format='NCHW', padding='SAME') : with tf.variable_scope(name) : assert(data_format == 'NCHW' or data_format == 'NHWC') self.w = tf.get_variable('w', [k_h, k_w, input_dim, channel_multiplier], initializer=tf.truncated_normal_initializer(stddev=stddev)) self.b = tf.get_variable('b',[input_dim*channel_multiplier], initializer=tf.constant_initializer(0.0)) if( data_format == 'NCHW' ) : self.strides = [1, 1, d_h, d_w]
tensorflow.truncated_normal_initializer
2,357
import tensorflow as tf res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape) def testDynamicAttentionDecoder2(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): cell = tf.nn.rnn_cell.GRUCell(2) inp = tf.constant(0.5, shape=[2, 2, 2]) enc_outputs, enc_state = tf.nn.dynamic_rnn(cell, inp, dtype=tf.float32) attn_states = enc_outputs dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3 dec, mem = tf.nn.seq2seq.attention_decoder( dec_inp, enc_state, attn_states, cell, output_size=4,
tensorflow.constant
2,358
import tensorflow as tf output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
tensorflow.matmul
2,359
from tensorflow.python.framework import ops output_dir=eval_dir, checkpoint_path=checkpoint_path, eval_dict=eval_dict, global_step_tensor=global_step, supervisor_master=self._config.master, feed_fn=feed_fn, max_steps=steps) return eval_results def _infer_model(self, x, batch_size=None, axis=None, proba=False): # Converts inputs into tf.DataFrame / tf.Series. batch_size = -1 if batch_size is None else batch_size input_fn, feed_fn = _get_predict_input_fn(x, batch_size) checkpoint_path = saver.latest_checkpoint(self._model_dir) with ops.Graph().as_default() as g: random_seed.set_random_seed(self._config.tf_random_seed) contrib_framework.create_global_step(g) features, _ = input_fn() feed_dict = feed_fn() if feed_fn is not None else None predictions = self._get_predict_ops(features) if not isinstance(predictions, dict): predictions = {'predictions': predictions} # TODO(ipolosukhin): Support batching return infer(checkpoint_path, predictions, feed_dict=feed_dict) class Estimator(BaseEstimator): """Estimator class is the basic TensorFlow model trainer/evaluator.
tensorflow.python.framework.ops.Graph
2,360
import tensorflow as tf U = tf.reshape(flat_logits, [-1, num_acts, self.k]) # Calculate w cut_g = tf.stop_gradient(self.g) cut_g = tf.expand_dims(cut_g, [1]) gstack = tf.concat([self.prev_g, cut_g], axis=1) self.last_c_g = gstack[:, 1:] # print self.last_c_g gsum = tf.reduce_sum(gstack, axis=1) phi = tf.get_variable("phi", (self.g_dim, self.k)) w = tf.matmul(gsum, phi) w = tf.expand_dims(w, [2]) # Calculate policy and sample logits = tf.reshape(tf.matmul(U, w), [-1, num_acts]) self.pi = tf.nn.softmax(logits) self.log_pi = tf.nn.log_softmax(logits) self.sample = policy_utils.categorical_sample( tf.reshape(logits, [-1, num_acts]), num_acts)[0, :] def build_value(self, _input):
tensorflow.get_variable
2,361
import tensorflow as tf self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path)) self.assertEqual( len(ckpt.all_model_checkpoint_paths), len(paths) if paths else 1) self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path) def testUpdateCheckpointState(self): save_dir = self._TestDir("update_checkpoint_state") os.chdir(save_dir) # Make a temporary train directory. train_dir = "train" os.mkdir(train_dir) abs_path = os.path.join(save_dir, "model-0") rel_path = "train/model-2" tf.train.update_checkpoint_state( train_dir, rel_path, all_model_checkpoint_paths=[abs_path, rel_path]) ckpt = tf.train.get_checkpoint_state(train_dir) self.assertEqual(ckpt.model_checkpoint_path, rel_path) self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2) self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path) self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path) class MetaGraphTest(tf.test.TestCase):
tensorflow.train.update_checkpoint_state
2,362
import tensorflow as tf Args: batch: A batch of images and labels. Returns: The same batch where cutout has been applied to the images. """ length, replace = FLAGS.cutout_length, 0.0 images, labels = batch['image'], batch['label'] num_channels = tf.shape(images)[3] image_height, image_width = tf.shape(images)[1], tf.shape(images)[2] cutout_center_height = tf.random.uniform( shape=[], minval=0, maxval=image_height, dtype=tf.int32) cutout_center_width = tf.random.uniform( shape=[], minval=0, maxval=image_width, dtype=tf.int32) lower_pad = tf.maximum(0, cutout_center_height - length // 2) upper_pad = tf.maximum(0, image_height - cutout_center_height - length // 2) left_pad = tf.maximum(0, cutout_center_width - length // 2) right_pad = tf.maximum(0, image_width - cutout_center_width - length // 2) cutout_shape = [image_height - (lower_pad + upper_pad), image_width - (left_pad + right_pad)] padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]] mask = tf.pad(
tensorflow.random.uniform
2,363
import tensorflow as tf the reduced dimension is retained with length 1. Returns ------- A tensor with sum of x. """ axis = _normalize_axis(axis, get_ndim(x)) return tf.reduce_sum(x, axis=axis, keep_dims=keepdims) # TODO(rbharath): Need to rename this. This makes a variable, not just creates # a tensor. Confusing with tf.zeros... def zeros(shape, dtype=tf.float32, name=None): """Instantiates an all-zeros variable and returns it.
tensorflow.reduce_sum
2,364
import tensorflow as tf widths, x_centers = tf.meshgrid(widths, x_centers) heights, y_centers = tf.meshgrid(heights, y_centers)
tensorflow.meshgrid
2,365
from tensorflow.python.ops import math_ops return x def _introspect_ndims(self, ndims): """Helper to establish some properties of input ndims args.""" if self._is_all_constant_helper(ndims): return (tensor_util.constant_value(ndims), tensor_util.constant_value(ndims) == 0) return None, math_ops.equal(ndims, 0)
tensorflow.python.ops.math_ops.equal
2,366
import tensorflow as tf tf.expand_dims(tf.expand_dims(direct_mask, 0), 0), [bs, bn, 1, 1]) # bs,bn,bl,bl rep_mask_tile_1 = tf.tile(tf.expand_dims(rep_mask_split, 2), [1, 1, bl, 1]) # bs,bn,bl,bl rep_mask_tile_2 = tf.tile(tf.expand_dims(rep_mask_split, 3), [1, 1, 1, bl]) # bs,bn,bl,bl rep_mask_tile = tf.logical_and(rep_mask_tile_1, rep_mask_tile_2) attn_mask = tf.logical_and(direct_mask_tile, rep_mask_tile, name='attn_mask') # bs,bn,bl,bl # attention f_bias = tf.get_variable('f_bias', [ivec], tf.float32, tf.constant_initializer(0.)) dependent_head = linear( rep_map, 2 * ivec, False, 0., 'linear_dependent_head', False, wd, keep_prob, is_train) # bs,bn,bl,2vec dependent, head = tf.split(dependent_head, 2, 3) dependent_etd = tf.expand_dims(dependent, 2) # bs,bn,1,bl,vec head_etd = tf.expand_dims(head, 3) # bs,bn,bl,1,vec logits = scaled_tanh(dependent_etd + head_etd + f_bias, 5.0) # bs,bn,bl,bl,vec logits_masked = exp_mask_for_high_rank(logits, attn_mask) attn_score = tf.nn.softmax(logits_masked, 3) # bs,bn,bl,bl,vec attn_score = mask_for_high_rank(attn_score, attn_mask) # bs,bn,bl,bl,vec self_attn_result = tf.reduce_sum(attn_score * rep_map_tile, 3) # bs,bn,bl,vec with tf.variable_scope('source2token_self_attn'): inter_block_logits = bn_dense_layer(self_attn_result, ivec, True, 0., 'bn_dense_map', 'linear', False, wd, keep_prob, is_train) # bs,bn,bl,vec
tensorflow.expand_dims
2,367
import tensorflow as tf self.normal_dist = tf.contrib.distributions.Normal(self.mu, self.sigma) self.action = tf.squeeze(self.normal_dist.sample(1),axis=0); self.action = tf.clip_by_value(self.action, action_bound[0], action_bound[1]) # Loss and train op self.loss = -self.normal_dist.log_prob(self.a_his) * self.target # Add cross entropy cost to encourage exploration self.loss -= entropy_beta * self.normal_dist.entropy() self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) self.grads_and_vars = self.optimizer.compute_gradients(self.loss) self.grads=[]; self.vars=[]; for i in range(len(self.grads_and_vars)): self.grads.append(self.grads_and_vars[i][0]); self.vars.append(self.grads_and_vars[i][1]); self.grads=self.grads[-1*NUM_VARS:];
tensorflow.train.AdamOptimizer
2,368
import tensorflow as tf return contrib_layers.layer_norm( inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name) def layer_norm_and_dropout(input_tensor, dropout_prob, name=None): """Runs layer normalization followed by dropout.""" output_tensor = layer_norm(input_tensor, name) output_tensor = dropout(output_tensor, dropout_prob) return output_tensor def create_initializer(initializer_range=0.02): """Creates a `truncated_normal_initializer` with the given range.""" return tf.truncated_normal_initializer(stddev=initializer_range) def get_timing_signal_1d_given_position(channels, position, min_timescale=1.0, max_timescale=1.0e4): """Get sinusoids of diff frequencies, with timing position given. Adapted from add_timing_signal_1d_given_position in //third_party/py/tensor2tensor/layers/common_attention.py Args:
tensorflow.truncated_normal_initializer
2,369
import tensorflow as tf elif optimizer == 'momentum': train_op = tf.train.MomentumOptimizer(learning_rate, momentum)
tensorflow.train.MomentumOptimizer
2,370
import tensorflow as tf with tf.device('/cpu:0'), tf.name_scope("embedding_head"): W_head = tf.get_variable("embed_W_head", [num_quantized_chars, embedding_size], initializer=initializer) embedded_head = tf.nn.embedding_lookup(W_head, self.input_head) embedded_head_expanded = tf.expand_dims(embedded_head, -1)
tensorflow.nn.embedding_lookup
2,371
import tensorflow as tf return video_num_input_frames, video_num_target_frames @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def bair_robot_pushing_preprocess(dataset, training): """Pre-processing function that concatenates input and target frames.""" del training def concat_and_add_mask(features, targets): """Concatenate input and output frames to form a language modeling setup.""" inp = features['inputs'] concat = tf.concat([inp, targets], axis=0) mask = tf.concat([tf.zeros_like(inp), tf.ones_like(targets)], axis=0) concat = tf.reshape(concat, (-1,)) mask = tf.reshape(mask, (-1,)) concat = tf.cast(concat, tf.int32) mask = tf.cast(mask, tf.float32) features['inputs'] = features['targets'] = concat features['mask'] = mask return features, concat dataset = dataset.map(concat_and_add_mask) return dataset def sentencepiece_tokenize(stream, spm_path=None, extra_ids=0):
tensorflow.reshape
2,372
import tensorflow as tf with tf.control_dependencies([update_means]): loss += self.hparams.beta * e_loss else: # Use a gradient based loss for learning the cluster centers loss += q_loss + self.hparams.beta * e_loss # Get the discrete latent representation x_means_idx = tf.argmax(x_means_hot, axis=-1) # Get the binary representation num_bits = int(self.hparams.z_size // self.hparams.num_blocks) x_means_bits = self.int_to_bit(x_means_idx, num_bits=num_bits, base=2) x_discrete = self.bit_to_int( tf.to_int32(x_means_bits), num_bits=self.hparams.z_size, base=2)
tensorflow.argmax
2,373
import tensorflow as tf acc_test = [] #store test accuracy for each epoch if actL == 'sigmoid': #accuracy score for binary class classification Yp = tf.greater(an , 0.5) accuracy = tf.reduce_mean(tf.cast(tf.equal(Yp, tf.equal(Y,1.0)), "float")) elif actL == 'esp' or actL == 'relu': #r2 score norm= tf.reduce_mean( tf.squared_difference(Y,tf.reduce_mean(Y)) ) accuracy = 1 - tf.divide( tf.reduce_mean(tf.squared_difference(an, Y)), norm) elif actL == 'softmax': #accuracy score for multiclass classification Yp = tf.sigmoid(betan*hn) correct = tf.equal(tf.argmax(Yp), tf.argmax(Y)) accuracy= tf.reduce_mean(tf.cast(correct, "float")) #-----------------Initialize the graph and start the session------------------------------------------------- init = tf.global_variables_initializer() with tf.Session() as sess: # Run the initialization sess.run(init) jj=0
tensorflow.sigmoid
2,374
import tensorflow as tf assert self._candidate_task_type == "tasks" candidate_tasks = tasks actions = experience.action[:, 0] num_tasks = tasks.shape[0] batch_size = states.shape[0] task_dim = tasks.shape[1] obs_dim = states.shape[1] action_dim = actions.shape[1] action_spec = self._actor.output_tensor_spec states_tiled = tf.tile(states[:, None], [1, num_tasks, 1]) # B x B x D states_tiled = tf.reshape(states_tiled, [batch_size * num_tasks, obs_dim]) # B*B x D actions_tiled = tf.tile(actions[:, None], [1, num_tasks, 1]) # B x B x D actions_tiled = tf.reshape(actions_tiled, [batch_size * num_tasks, action_dim]) # B*B x D tasks_tiled = tf.tile(tasks[None], [batch_size, 1, 1]) # B x B x D tasks_tiled = tf.reshape(tasks_tiled, [batch_size * num_tasks, task_dim]) # B*B x D next_states_tiled = tf.tile(next_states[:, None], [1, num_tasks, 1])
tensorflow.tile
2,375
import tensorflow as tf logits=logits, labels=index) log_prob += curr_log_prob curr_ent = tf.stop_gradient(tf.nn.softmax_cross_entropy_with_logits( logits=logits, labels=tf.nn.softmax(logits))) entropy += curr_ent prev_layers.append(anchors.read(tf.reduce_sum(index))) inputs = prev_layers[-1] for i in range(2): # op_1, op_2 next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
tensorflow.reduce_sum
2,376
from tensorflow.python.framework import ops @ops.RegisterShape("SegmentMax") @ops.RegisterShape("SegmentMean") @ops.RegisterShape("SegmentMin") @ops.RegisterShape("SegmentProd") @ops.RegisterShape("SegmentSum") def _SegmentReductionShape(op): """Common shape function for segment reduction ops.""" data_shape = op.inputs[0].get_shape() segment_ids_shape = op.inputs[1].get_shape()
tensorflow.python.framework.ops.RegisterShape
2,377
import tensorflow as tf downsample_layers ) return downsample_layers def instantiate_discriminator_logits_layer(self): """Instantiates discriminator flatten and logits layers. Returns: Flatten and logits layers of discriminator. """ with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE): # Flatten layer to ready final block conv tensor for dense layer. flatten_layer = tf.layers.Flatten( name="{}_flatten_layer".format(self.name) ) print_obj( "\ncreate_discriminator_logits_layer", "flatten_layer", flatten_layer )
tensorflow.variable_scope
2,378
import tensorflow as tf dataset = tf.data.Dataset.from_tensors(data).repeat(
tensorflow.data.Dataset.from_tensors
2,379
import tensorflow as tf tf.flags.DEFINE_string('model', 'trivial', 'name of the model to run') # The code will first check if it's running under benchmarking mode # or evaluation mode, depending on FLAGS.eval: # Under the evaluation mode, this script will read a saved model, # and compute the accuracy of the model against a validation dataset. # Additional ops for accuracy and top_k predictors are only used under this # mode. # Under the benchmarking mode, user can specify whether nor not to use # the forward-only option, which will only compute the loss function. # forward-only cannot be enabled with eval at the same time. tf.flags.DEFINE_boolean('eval', False, 'whether use eval or benchmarking') tf.flags.DEFINE_boolean('forward_only', False, """whether use forward-only or training for benchmarking""") tf.flags.DEFINE_integer('batch_size', 0, 'batch size per compute device') tf.flags.DEFINE_integer('num_batches', 100, 'number of batches to run, excluding warmup') tf.flags.DEFINE_integer('num_warmup_batches', None, 'number of batches to run before timing') tf.flags.DEFINE_integer('autotune_threshold', None, 'The autotune threshold for the models') tf.flags.DEFINE_integer('num_gpus', 1, 'the number of GPUs to run on') tf.flags.DEFINE_integer('display_every', 10, """Number of local steps after which progress is printed
tensorflow.flags.DEFINE_boolean
2,380
from tensorflow.contrib.learn.python.learn.graph_actions import evaluate global_step = contrib_framework.create_global_step(g) features, targets = input_fn() self._check_inputs(features, targets) eval_dict = self._get_eval_ops(features, targets, metrics or self._get_default_metric_functions()) eval_results, _ = evaluate( graph=g, output_dir=eval_dir, checkpoint_path=checkpoint_path, eval_dict=eval_dict,
tensorflow.contrib.learn.python.learn.graph_actions.evaluate
2,381
import tensorflow as tf if type(a[0]) == tf.Tensor: return tf.stack(a, 0)
tensorflow.stack
2,382
import tensorflow as tf save._add_collection_def(meta_graph_def, "int_collection") self.assertEqual(len(meta_graph_def.collection_def), 0) def _testMultiSaverCollectionSave(self): test_dir = self._TestDir("saver_collection") filename = os.path.join(test_dir, "metafile") saver0_ckpt = os.path.join(test_dir, "saver0.ckpt") saver1_ckpt = os.path.join(test_dir, "saver1.ckpt") with self.test_session(graph=tf.Graph()) as sess: # Creates a graph. v0 = tf.Variable(10.0, name="v0") v1 = tf.Variable(11.0, name="v1") # Creates 2 savers. saver0 = tf.train.Saver({"v0": v0}, name="saver0") saver1 = tf.train.Saver({"v1": v1}, name="saver1") tf.add_to_collection("savers", saver0) tf.add_to_collection("savers", saver1) tf.initialize_all_variables().run() # Saves to different checkpoints. saver0.save(sess, saver0_ckpt) saver1.save(sess, saver1_ckpt) # Generates MetaGraphDef. meta_graph_def = tf.train.export_meta_graph(filename) meta_graph_def0 = saver0.export_meta_graph() meta_graph_def1 = saver1.export_meta_graph() # Verifies that there is no saver_def in meta_graph_def. self.assertFalse(meta_graph_def.HasField("saver_def"))
tensorflow.train.Saver
2,383
import tensorflow as tf self.assertTrue(tf.contrib.util.constant_value(mvn.is_scalar_batch())) mvn = tfd.MultivariateNormalDiag([[mu]], [[sigma]], validate_args=True) self.assertFalse(tf.contrib.util.constant_value(mvn.is_scalar_event())) self.assertFalse(tf.contrib.util.constant_value(mvn.is_scalar_batch())) # We now test every codepath within the underlying is_scalar_helper # function. # Test case 1, 2. x = tf.placeholder_with_default(input=1, shape=[]) # None would fire an exception were it actually executed. self.assertTrue(normal._is_scalar_helper(x.shape, lambda: None)) self.assertTrue( normal._is_scalar_helper(tf.TensorShape(None), lambda: tf.shape(x))) x = tf.placeholder_with_default(input=[1], shape=[1]) # None would fire an exception were it actually executed. self.assertFalse(normal._is_scalar_helper(x.shape, lambda: None)) self.assertFalse(
tensorflow.placeholder_with_default
2,384
from tensorflow.python.framework import ops for input_tensor in inputs: op = state_ops.assign_add(var, input_tensor, use_locking=True) update_ops.append(op) with ops.control_dependencies(update_ops): return gen_state_ops._destroy_temporary_variable(var, var_name=var_name, name=name) @ops.RegisterShape("BatchMatMul") def _BatchMatMulShape(op): """Shape function for BatchMatMul op.""" a_shape = op.inputs[0].get_shape() adj_a = op.get_attr("adj_x") b_shape = op.inputs[1].get_shape() adj_b = op.get_attr("adj_y") if not a_shape.is_fully_defined() or not b_shape.is_fully_defined(): return [tensor_shape.unknown_shape()]
tensorflow.python.framework.ops.RegisterShape
2,385
import tensorflow as tf with tf.variable_scope('eval_net'): a_fc1 = tf.layers.dense(self.s, 128, tf.nn.relu, kernel_initializer=w_initializer,
tensorflow.layers.dense
2,386
from tensorflow.python.framework import tensor_shape cannot be inferred. """ if tensor_dtype is None: if not inputs or not isinstance(inputs, (list, tuple)): raise ValueError("inputs must be a list of at least one Tensor with the " "same dtype and shape") inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs) if not all(isinstance(x, ops.Tensor) for x in inputs): raise ValueError("inputs must be a list of at least one Tensor with the " "same dtype and shape") if not all(x.dtype == inputs[0].dtype for x in inputs): raise ValueError("inputs must be a list of at least one Tensor with the " "same dtype and shape") tensor_dtype = inputs[0].dtype if shape is not None: shape = tensor_shape.as_shape(shape) else: shape = tensor_shape.unknown_shape() for input_tensor in inputs: if isinstance(input_tensor, ops.Tensor): shape = shape.merge_with(input_tensor.get_shape()) if not shape.is_fully_defined(): # TODO(pbar): Make a version of assign_add that accepts an uninitialized # lvalue, and takes its shape from that? This would allow accumulate_n to # work in all situations that add_n currently works. raise ValueError("Cannot infer the shape of the accumulator for " "accumulate_n. Pass the shape argument, or set the shape " "of at least one of the inputs.") with ops.op_scope(inputs, name, "AccumulateN") as name: var = gen_state_ops._temporary_variable(shape=shape, dtype=tensor_dtype)
tensorflow.python.framework.tensor_shape.as_shape
2,387
import tensorflow as tf def predict(self, queries): image_size = self._train_params['image_size'] images = utils.dataset.transform_images(queries, image_size=image_size, mode='RGB') with self._graph.as_default(): probs = self._predict_with_model(images, **self._knobs) return probs.tolist() def dump_parameters(self): params = {} # Add train params params['train_params'] = json.dumps(self._train_params) # Add model parameters with self._graph.as_default(): tf_vars = tf.global_variables() values = self._sess.run(tf_vars) for (tf_var, value) in zip(tf_vars, values): params[tf_var.name] = np.asarray(value) # Add an ID for diffing vars_id = np.random.rand() params['vars_id'] = vars_id # Memo ID TfEnas._loaded_tf_vars_id_memo = vars_id return params
tensorflow.global_variables
2,388
import tensorflow as tf out_string = "" start_string = request.data.decode().lower() n_words = 5 hidden = [tf.zeros((1, units))] for i in range(n_words):
tensorflow.zeros
2,389
import tensorflow as tf tf.add_to_collection('mu_sigma_bn', mu) sigma = tf.get_variable('sigma', batch_var.shape, dtype=tf.float32, initializer=tf.ones_initializer(), trainable=False) tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, sigma) tf.add_to_collection('mu_sigma_bn', sigma) beta = tf.get_variable('beta', batch_mean.shape, dtype=tf.float32, initializer=tf.zeros_initializer()) gamma = tf.get_variable('gamma', batch_var.shape, dtype=tf.float32, initializer=tf.ones_initializer()) # BN when training update = 1.0 - decay update_mu = mu.assign_sub(update * (mu - batch_mean)) update_sigma = sigma.assign_sub(update * (sigma - batch_var)) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mu) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_sigma) mean, var = tf.cond(self.train_flag, lambda: (batch_mean, batch_var), lambda: (mu, sigma)) bn = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-5) tf.add_to_collection('debug_layers', bn) return bn
tensorflow.add_to_collection
2,390
import tensorflow as tf target = tf.reshape(target, shape=(-1, )) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=tensor, labels=target) mask = tf.cast(tf.not_equal(target, tf.zeros_like(target)), dtype=tf.float32) out = cross_entropy * mask return out @layer def sigmoid_cross_entropy_layer(tensor, target, **opts): out = tf.nn.sigmoid_cross_entropy_with_logits(logits=tensor, labels=target) return out @layer def mean_loss_by_example_layer(tensor, sequence_length, **opts): loss = tf.div( tf.reduce_sum(tensor, axis=1), tf.cast(sequence_length, dtype=tf.float32) )
tensorflow.nn.sigmoid_cross_entropy_with_logits
2,391
import tensorflow as tf 'a', py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0))) var_a = task.theta.a # Make a NaN gradient. var_grads = py_utils.NestedMap(a=(var_a, 0. * tf.log(0.))) has_nan_or_inf, grad_scale, final_var_grads = task.ScaleGradients(var_grads) with self.session(): tf.global_variables_initializer().run() self.assertTrue(has_nan_or_inf.eval()) self.assertEqual(0., grad_scale.eval()) # The final gradient must be finite. self.assertFalse(tf.is_nan(final_var_grads.a[1]).eval()) self.assertTrue(tf.is_finite(final_var_grads.a[1]).eval()) def testScaleGradientsCheckNumerics(self): """ScaleGradients when enable_check_numerics=True.""" FLAGS.enable_check_numerics = True p = self.TestParams() p.input = base_input_generator.BaseSequenceInputGenerator.Params() task = p.cls(p) task.CreateVariable( 'a', py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))
tensorflow.is_nan
2,392
from tensorflow.python.framework import ops tn[tf_index] + fp[tf_index] + kepsilon, name) specificity = compute_specificity_at_sensitivity('value') with ops.control_dependencies( [tp_update_op, fn_update_op, tn_update_op, fp_update_op]): update_op = compute_specificity_at_sensitivity('update_op') if metrics_collections: ops.add_to_collections(metrics_collections, specificity) if updates_collections: ops.add_to_collections(updates_collections, update_op) return specificity, update_op def streaming_sensitivity_at_specificity( predictions, labels, specificity, weights=None, num_thresholds=200, metrics_collections=None, updates_collections=None, name=None): """Computes the the specificity at a given sensitivity. The `streaming_sensitivity_at_specificity` function creates four local variables, `true_positives`, `true_negatives`, `false_positives` and
tensorflow.python.framework.ops.add_to_collections
2,393
import tensorflow as tf state = self.rnn_step(rnn_input, state) output = self.rnn_output(state) rnn_outputs.append(output) rnn_states.append(state) return tf.transpose(rnn_outputs, [1, 0, 2]), rnn_states def compute_predictions_scan(self):
tensorflow.transpose
2,394
import tensorflow as tf num_dims = shape[0].size if tt_rank.size == 1: tt_rank = tt_rank * np.ones(num_dims - 1) tt_rank = np.concatenate([[1], tt_rank, [1]]) tt_rank = tt_rank.astype(int) tt_cores = [None] * num_dims with tf.name_scope(name): for i in range(num_dims): curr_core_shape = (tt_rank[i], shape[0][i], shape[1][i], tt_rank[i + 1]) tt_cores[i] = tf.random_normal(curr_core_shape, mean=mean, stddev=stddev, dtype=dtype) return TensorTrain(tt_cores, shape, tt_rank) def matrix_batch_with_random_cores(shape, tt_rank=2, batch_size=1, mean=0., stddev=1., dtype=tf.float32, name='t3f_matrix_batch_with_random_cores'): """Generate a batch of TT-matrices of given shape with N(mean, stddev^2) cores.
tensorflow.random_normal
2,395
import tensorflow as tf max_axis = tf.reduce_max(target, axis, keep_dims=True) target_exp = tf.exp(target - max_axis) normalize = tf.reduce_sum(target_exp, axis, keep_dims=True) softmax = target_exp / normalize
tensorflow.reduce_sum
2,396
import tensorflow as tf def gradsafe_sqrt(x, clip_low=1e-18, name=None): with tf.name_scope(name, "gradsafe_sqrt"): return tf.sqrt(tf.clip_by_value(x, clip_low, x)) def argus_integral_phalf(m_low, m_high, m0, c): """ Only valid for argus_pdf with p=0.5! Otherwise need to do numerical integral. """ def F(m_bound, name=None): with tf.name_scope(name, "argus_integral_phalf_primitive"): a = tf.minimum(m_bound, m0) x = 1 - tf.pow(a / m0, 2) primitive = -0.5 * m0 * m0 * (tf.exp(c * x) * tf.sqrt(x) / c + 0.5 / tf.pow(-c, 1.5) * tf.sqrt(pi) * tf.erf(gradsafe_sqrt(-c * x))) # We have to safeguard the sqrt, because otherwise the analytic # derivative blows up for x = 0 return primitive area = tf.sub(F(m_high, name="F2"), F(m_low, name="F1"), name="argus_integral_phalf") return area def argus_pdf_phalf_WN(m, m0, c, m_low, m_high):
tensorflow.minimum
2,397
import tensorflow as tf """Stochastic version of basic next-frame model.""" def inject_latent(self, layer, features, filters): """Inject a VAE-style latent.""" # Latent for stochastic model input_frames = tf.to_float(features["inputs_raw"]) target_frames = tf.to_float(features["targets_raw"]) full_video = tf.concat([input_frames, target_frames], axis=1) latent_mean, latent_std = self.construct_latent_tower( full_video, time_axis=1) latent = common_video.get_gaussian_tensor(latent_mean, latent_std) latent = tf.layers.flatten(latent)
tensorflow.to_float
2,398
import tensorflow as tf with self.test_session() as sess: zeros_t = tf.fill([1024, 1024], 0.0)
tensorflow.fill
2,399