seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf # Careful with GPU memory allocation, TF never releases it. TF starts with almost # all of the GPU memory allocated. We can slowly grow to that limit with an # option setting: config.gpu_options.allow_growth = True sess_grow = tf.Session(config=config) # Also, we can limit the size of GPU memory used, with the following option config.gpu_options.per_process_gpu_memory_fraction = 0.4 sess_limited = tf.Session(config=config)
tensorflow.Session
1,300
import tensorflow as tf else: activation = non_linear_fn(output) return activation def batch_norm(x, b_train, scope, reuse=False): with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): n_out = x.get_shape().as_list()[-1] beta = tf.get_variable('beta', initializer=tf.constant(0.0, shape=[n_out])) gamma = tf.get_variable('gamma', initializer=tf.constant(1.0, shape=[n_out])) batch_mean, batch_var = tf.nn.moments(x, [0], name='moments') ema = tf.train.ExponentialMovingAverage(decay=0.9) def mean_var_with_update(): ema_apply_op = ema.apply([batch_mean, batch_var]) with tf.control_dependencies([ema_apply_op]): return tf.identity(batch_mean), tf.identity(batch_var) mean, var = tf.cond(b_train,
tensorflow.constant
1,301
import tensorflow as tf if norm == 'I': X = tf.contrib.layers.instance_norm(X, scope=scope, reuse=reuse, epsilon=0.001) elif norm == 'B': X = tf.layers.batch_normalization(X, reuse=reuse, training=True) elif norm == 'G': X = tf.contrib.layers.group_norm(X, groups=16, scope=scope, reuse=reuse) if nonlin: X = tf.nn.leaky_relu(X, 0.2) return X with tf.variable_scope('discriminator') as scope: if reuse: scope.reuse_variables() print('D in:', X.get_shape().as_list()) X = self.conv('DZ1', X, 512, 1, 1) X = tf.nn.leaky_relu(X, 0.2) X = self.conv('DZ2', X, 512, 1, 1) X = tf.nn.leaky_relu(X, 0.2)
tensorflow.variable_scope
1,302
from tensorflow.python.ops import control_flow_ops # Create gradient updates. grad_updates = opt.apply_gradients( gradients, global_step=global_step if increment_global_step else None, name="train") # Ensure the train_tensor computes grad_updates. train_tensor = control_flow_ops.with_dependencies([grad_updates], loss) return train_tensor def _clip_gradients_by_norm(grads_and_vars, clip_gradients): """Clips gradients by global norm."""
tensorflow.python.ops.control_flow_ops.with_dependencies
1,303
from tensorflow.contrib.framework import deprecated from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn @deprecated( "2016-11-12", "This file will be removed after the deprecation date." "Please switch to " "third_party/tensorflow/contrib/learn/python/learn/estimators/head.py") def regression_target(label_name=None,
tensorflow.contrib.framework.deprecated
1,304
from tensorflow.python.framework import ops if i not in reduction_indices: returned_dims.append(dim) return [tensor_shape.TensorShape(returned_dims)] @ops.RegisterShape("SegmentMax") @ops.RegisterShape("SegmentMean") @ops.RegisterShape("SegmentMin") @ops.RegisterShape("SegmentProd") @ops.RegisterShape("SegmentSum") def _SegmentReductionShape(op): """Common shape function for segment reduction ops.""" data_shape = op.inputs[0].get_shape() segment_ids_shape = op.inputs[1].get_shape() segment_ids_shape.assert_has_rank(1) return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])]
tensorflow.python.framework.ops.RegisterShape
1,305
import tensorflow as tf "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64), "is_real_example": tf.FixedLenFeature([], tf.int64), }
tensorflow.FixedLenFeature
1,306
import tensorflow as tf hess = [] #list for i in range (dim): dg_i = tf.gradients(flat_grads[i], par) #for each element of grads evaluate the gradients dg_i_flat = flatten(dg_i) #flatten the resulting hessian onto a 1 d array hess.append(dg_i_flat) #store row by row return tf.reshape(hess,[dim, dim]) #returns the reshaped matrix #======================= # Main #======================= def Run_DNN(X_train, Y_train, X_test, Y_test, layers, activation, epoch_sample, stdbeta, starter_learning, num_iterations, with_hessian, save_model, Plot):
tensorflow.reshape
1,307
from tensorflow.python.framework import constant_op from tensorflow.python.ops.losses import losses from tensorflow.python.platform import gfile from tensorflow.python.platform import googletest def _train_input_fn(): features = {"x": constant_op.constant([[2.], [1.], [1.]])} label = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32) return features, label def _ranking_train_input_fn(): features = { "a.f1": constant_op.constant([[3.], [0.3], [1.]]), "a.f2": constant_op.constant([[0.1], [3.], [1.]]), "b.f1": constant_op.constant([[13.], [0.4], [5.]]), "b.f2": constant_op.constant([[1.], [3.], [0.01]]), } label = constant_op.constant([[0], [0], [1]], dtype=dtypes.int32) return features, label def _eval_input_fn(): features = {"x": constant_op.constant([[1.], [2.], [2.]])} label = constant_op.constant([[0], [1], [1]], dtype=dtypes.int32) return features, label
tensorflow.python.framework.constant_op.constant
1,308
import tensorflow as tf } logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=FLAGS.log_every_n_steps) print('Starting a training cycle.') xdetector.train(input_fn=input_pipeline(), hooks=[logging_hook]) if __name__ == '__main__': tf.logging.set_verbosity(tf.logging.INFO) tf.app.run()
tensorflow.app.run
1,309
import tensorflow as tf if add_flipped_images: scales_to_logits_reversed = ( outputs_to_scales_to_logits_reversed[output]) logits_reversed = tf.image.resize_bilinear( tf.reverse_v2(scales_to_logits_reversed[_MERGED_LOGITS_SCOPE], [2]), tf.shape(images)[1:3], align_corners=True) outputs_to_predictions[output].append( tf.expand_dims(tf.nn.softmax(logits_reversed), 4))
tensorflow.reverse_v2
1,310
import tensorflow as tf sequence_lengths: Tensor of shape `[B]` containing sequence lengths to be (reversed and then) summed, same as in `scan_discounted_sum`. name: Sets the name_scope for this op. Returns: Tensor of shape `[T, B]` containing multistep returns. """ with tf.name_scope(name, values=[rewards, pcontinues, state_values]): # Regroup: # result[t] = (rewards[t] + pcontinues[t]*(1-lambda_)*state_values[t]) + # pcontinues[t]*lambda_*result[t + 1] # Define: # sequence[t] = rewards[t] + pcontinues[t]*(1-lambda_)*state_values[t] # discount[t] = pcontinues[t]*lambda_
tensorflow.name_scope
1,311
import tensorflow as tf ) def attention(encoder_out, seq_len, reuse=False): attention_mechanism = tf.contrib.seq2seq.LuongAttention( num_units=size_layers, memory=encoder_out, memory_sequence_length=seq_len ) return tf.contrib.seq2seq.AttentionWrapper( cell=tf.nn.rnn_cell.MultiRNNCell([cells(reuse) for _ in range(num_layers)]), attention_mechanism=attention_mechanism, attention_layer_size=size_layers, alignment_history=True, ) encoder_cells = tf.nn.rnn_cell.MultiRNNCell([cells() for _ in range(num_layers)]) encoder_out, encoder_state = tf.nn.dynamic_rnn( cell=encoder_cells, inputs=forward, sequence_length=seq_lens, dtype=tf.float32 ) encoder_state = tuple(encoder_state[-1] for _ in range(num_layers)) decoder_cell = attention(encoder_out, seq_lens) dense_layer = tf.layers.Dense(n_mels * resampled) training_helper = tf.contrib.seq2seq.TrainingHelper( inputs=self.decoder_inputs, sequence_length=seq_lens, time_major=False ) training_decoder = tf.contrib.seq2seq.BasicDecoder( cell=decoder_cell, helper=training_helper,
tensorflow.nn.dynamic_rnn
1,312
import tensorflow as tf # Build fromRGB 1x1 `Conv2D` layers internals through call. from_rgb_conv_tensors = self.build_discriminator_from_rgb_layers( params=params ) print_obj( "\nbuild_discriminator_layers", "from_rgb_conv_tensors", from_rgb_conv_tensors ) with tf.control_dependencies(control_inputs=from_rgb_conv_tensors): # Create base convolutional block's layer internals using call. conv_block_tensors = [ self.build_discriminator_base_conv_layer_block( params=params ) ] # Build growth `Conv2D` layer block internals through call. conv_block_tensors.extend(
tensorflow.control_dependencies
1,313
import tensorflow as tf Args: inputs: tensor is_training: boolean tf.Variable scope: string keep_prob: float in [0,1] noise_shape: list of ints Returns: tensor variable """ with tf.variable_scope(scope) as sc: outputs = tf.cond(is_training, lambda: tf.nn.dropout(inputs, keep_prob, noise_shape), lambda: inputs) return outputs
tensorflow.variable_scope
1,314
import tensorflow as tf self.pi = tf.nn.softmax(logits) self.log_pi = tf.nn.log_softmax(logits) self.sample = policy_utils.categorical_sample( tf.reshape(logits, [-1, num_acts]), num_acts)[0, :] def build_value(self, _input): with tf.variable_scope('VF'): hidden = tf.layers.dense(inputs=_input, units=self.vf_hidden_size, activation=tf.nn.elu) w = tf.get_variable("weights", (self.vf_hidden_size, 1)) return tf.matmul(hidden, w) def build_loss(self): cutoff_vf_manager = tf.reshape(tf.stop_gradient(self.manager_vf), [-1]) dot = tf.reduce_sum(tf.multiply(self.s_diff, self.g), axis=1) gcut = tf.stop_gradient(self.g) mag = tf.norm(self.s_diff, axis=1) * tf.norm(gcut, axis=1) + .0001 dcos = dot / mag manager_loss = -tf.reduce_sum((self.r - cutoff_vf_manager) * dcos)
tensorflow.get_variable
1,315
import tensorflow as tf x = (x - u) * tf.rsqrt(s + e) if g is not None and b is not None: x = x*g + b return x def norm(x, scope, axis=[-1]): with tf.variable_scope(scope): n_state = shape_list(x)[-1] g = tf.get_variable("g", [n_state], initializer=tf.constant_initializer(1)) b = tf.get_variable("b", [n_state], initializer=tf.constant_initializer(0)) return _norm(x, g, b, axis=axis) def dropout(x, pdrop, train): if train and pdrop > 0: x = tf.nn.dropout(x, 1-pdrop) return x
tensorflow.constant_initializer
1,316
from tensorflow.python.ops import math_ops if metrics is None and self._n_classes > 1: metrics = {"accuracy": metrics_lib.streaming_accuracy} if self._n_classes == 2: predictions = math_ops.sigmoid(logits) result["eval_auc"] = metrics_lib.streaming_auc(predictions, targets) if metrics:
tensorflow.python.ops.math_ops.sigmoid
1,317
from tensorflow.python.ops import array_ops array_ops.expand_dims(array_ops.constant(thresholds), [1]), array_ops.pack([1, num_predictions])) # Tile the predictions after thresholding them across different thresholds. pred_is_pos = math_ops.greater( array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]), thresh_tiled) pred_is_neg = math_ops.logical_not(pred_is_pos) # Tile labels by number of thresholds label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1]) label_is_neg = math_ops.logical_not(label_is_pos) true_positives = _create_local('true_positives', shape=[num_thresholds]) false_negatives = _create_local('false_negatives', shape=[num_thresholds]) true_negatives = _create_local('true_negatives', shape=[num_thresholds]) false_positives = _create_local('false_positives', shape=[num_thresholds]) is_true_positive = math_ops.to_float( math_ops.logical_and(label_is_pos, pred_is_pos))
tensorflow.python.ops.array_ops.tile
1,318
import tensorflow as tf STATE_DIM = env.observation_space.shape[0] # 24 ACTION_DIM = env.action_space.shape[0] # 4 ACTION_BOUND = env.action_space.high # [1, 1, 1, 1] # all placeholder for tf with tf.name_scope('S'): S = tf.placeholder(tf.float32, shape=[None, STATE_DIM], name='s') with tf.name_scope('R'): R = tf.placeholder(tf.float32, [None, 1], name='r') with tf.name_scope('S_'): S_ = tf.placeholder(tf.float32, shape=[None, STATE_DIM], name='s_') ############################### Actor #################################### class Actor(object): def __init__(self, sess, action_dim, action_bound, learning_rate, t_replace_iter): self.sess = sess self.a_dim = action_dim self.action_bound = action_bound
tensorflow.name_scope
1,319
import tensorflow as tf pooling_mask = tf.logical_and(direct_mask_un, rep_mask_tile_un) # [bs, sluh, sld] # data for pooling pooling_data = tf.tile(tf.expand_dims(rep_dep_tensor, 1), [1, sl_unhead, 1, 1]) # bs,sluh,sld,hn # execute mean pooling based on pooling_mask[bs, sluh, sld] and pooling_data[bs,sluh,sld,hn] pooling_data = mask_for_high_rank(pooling_data, pooling_mask) # [bs,sluh,sld,hn] pooling_data_sum = tf.reduce_sum(pooling_data, -2) # [bs,sluh,hn] pooling_den = tf.reduce_sum(tf.cast(pooling_mask, tf.int32), -1, keep_dims=True) # [bs,sluh] pooling_den = tf.where(tf.equal(pooling_den, 0), tf.ones_like(pooling_den), pooling_den) pooling_result = pooling_data_sum / tf.cast(pooling_den, tf.float32) return pooling_result
tensorflow.reduce_sum
1,320
import tensorflow as tf inverted_rgb2lms = tf.linalg.inv(self.rgb2lms) product1 = tf.matmul(inverted_rgb2lms, self.color_matrix) product2 = tf.matmul(product1, self.rgb2lms) original_image_shape = image.shape
tensorflow.matmul
1,321
import tensorflow as tf if scale < 0.: raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale) if scale == 0.: return lambda _: None def l1(weights, name='l1_regularizer'): """Applies L1 regularization to weights.""" with tf.name_scope(name): my_scale = tf.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') return tf.multiply(my_scale, tf.reduce_sum(tf.abs(weights)), name=name) return l1
tensorflow.name_scope
1,322
import tensorflow as tf # Residual connection: highway_input = proj2_output + inputs half_depth = depth // 2 assert half_depth * 2 == depth, 'encoder and postnet depths must be even.' # Handle dimensionality mismatch: if highway_input.shape[2] != half_depth: highway_input = tf.layers.dense(highway_input, half_depth) # 4-layer HighwayNet: for i in range(4): highway_input = highwaynet(highway_input, 'highway_%d' % (i + 1), half_depth) rnn_input = highway_input # Bidirectional RNN
tensorflow.layers.dense
1,323
from tensorflow.python.ops import array_ops return None feature_column_ops.check_feature_columns(self._dnn_feature_columns) return sorted(set(self._dnn_feature_columns), key=lambda x: x.key) def _dnn_logits(self, features, is_training): return self._dnn_model.build_model( features, self._dnn_feature_columns, is_training) def _linear_logits(self, features, is_training): return self._linear_model.build_model( features, self._linear_feature_columns, is_training) def _centered_bias(self): centered_bias = variables.Variable( array_ops.zeros([self._target_column.num_label_columns]), collections=[self._centered_bias_weight_collection, ops.GraphKeys.VARIABLES], name="centered_bias_weight") logging_ops.scalar_summary( ["centered_bias_%d" % cb for cb in range( self._target_column.num_label_columns)], array_ops.reshape(centered_bias, [-1])) return centered_bias def _centered_bias_step(self, targets, features): centered_bias = ops.get_collection(self._centered_bias_weight_collection) batch_size = array_ops.shape(targets)[0] logits = array_ops.reshape(
tensorflow.python.ops.array_ops.zeros
1,324
from tensorflow.python.ops import math_ops mean_average_precision: Scalar `float64` `Tensor` with the mean average precision values. update: `Operation` that increments variables appropriately, and whose value matches `metric`. """ default_name = _at_k_name('average_precision', k) with ops.name_scope(name, default_name, (predictions, labels)) as scope: # Calculate per-example average precision, and apply weights. average_precision = sparse_average_precision_at_k( predictions=predictions, labels=labels, k=k) if weights is not None: weights = math_ops.to_double(weights) average_precision = math_ops.mul(average_precision, weights) # Create accumulation variables and update ops for max average precision and # total average precision. with ops.name_scope(None, 'max', (average_precision,)) as max_scope: # `max` is the max possible precision. Since max for any row is 1.0: # - For the unweighted case, this is just the number of rows. # - For the weighted case, it's the sum of the weights broadcast across # `average_precision` rows. max_var = contrib_variables.local_variable(
tensorflow.python.ops.math_ops.to_double
1,325
import tensorflow as tf distances1 = isu.inputs_distances_to_centers(inputs, centers) num_centers = tf.shape(centers)[0] inputs_reshaped = tf.tile(tf.expand_dims(inputs, axis=1), tf.stack([1, num_centers, 1])) distances2 = tf.reduce_sum(tf.square(inputs_reshaped - centers), axis=2) self.assertAllClose(distances1.numpy(), distances2.numpy(), atol=0.001) def test_pairwise_iou_matrix(self): mask0 = tf.constant([[1, 0], [0, 1]], dtype=tf.float32) mask1 = tf.constant([[1, 1], [0, 1]], dtype=tf.float32) mask2 = tf.constant([[1, 0], [1, 1]], dtype=tf.float32) mask3 = tf.constant([[1, 1], [1, 1]], dtype=tf.float32) mask4 = tf.constant([[0, 0], [0, 0]], dtype=tf.float32) mask5 = tf.constant([[1, 0], [1, 0]], dtype=tf.float32)
tensorflow.constant
1,326
import tensorflow as tf with tf.control_dependencies(assertions): product = tf.multiply(predictions, labels) internal_dot_products = tf.reduce_sum(product, [1]) logcost = tf.log(1e-4 + 1 - tf.abs(internal_dot_products)) return logcost
tensorflow.abs
1,327
import tensorflow as tf :param sess: (TensorFlow session) The current TensorFlow session :param param_noise_filter_func: (function (TensorFlow Tensor): bool) function that decides whether or not a variable should be perturbed. Only applicable if param_noise is True. If set to None, default_param_noise_filter is used by default. :return: (function (TensorFlow Tensor, bool, float): TensorFlow Tensor, (TensorFlow Tensor, TensorFlow Tensor) act function to select and action given observation (See the top of the file for details), A tuple containing the observation placeholder and the processed observation placeholder respectively. """ if param_noise_filter_func is None: param_noise_filter_func = default_param_noise_filter update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold") update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale") reset_ph = tf.placeholder(tf.bool, (), name="reset") eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False) param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05), trainable=False) # Unmodified Q.
tensorflow.placeholder
1,328
from tensorflow.python.ops import math_ops (tp, fn, tn, fp, tp_update_op, fn_update_op, tn_update_op, fp_update_op) = _tp_fn_tn_fp(predictions, labels, thresholds, weights) assert array_ops.squeeze(fp).get_shape().as_list()[0] == num_thresholds def compute_sensitivity_at_specificity(name): specificities = math_ops.div(tn, tn + fp + kepsilon) tf_index = math_ops.argmin(math_ops.abs(specificities - specificity), 0) tf_index = math_ops.cast(tf_index, dtypes.int32) # Now, we have the implicit threshold, so compute the sensitivity: return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + kepsilon,
tensorflow.python.ops.math_ops.abs
1,329
import tensorflow as tf def body(self, features): observations = features["inputs"] x = tf.transpose(observations, [0, 2, 3, 1, 4]) x_shape = common_layers.shape_list(x)
tensorflow.transpose
1,330
import tensorflow as tf for embedding_name, path_to_meta in zip(embedding_names, paths_to_meta): # Initialize config embedding = config.embeddings.add() # Specifiy the embedding variable and the metadata embedding.tensor_name = embedding_name embedding.metadata_path = path_to_meta # Project the embeddings to space dimensions for visualization tf.contrib.tensorboard.plugins.projector.visualize_embeddings(summary_writer, config) def add_train_stats(model, hparams): with tf.variable_scope("stats") as scope: for i in range(hparams.tacotron_num_gpus): tf.summary.histogram("mel_outputs %d" % i, model.tower_mel_outputs[i]) tf.summary.histogram("mel_targets %d" % i, model.tower_mel_targets[i]) tf.summary.scalar("before_loss", model.before_loss)
tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings
1,331
import tensorflow as tf with tf.name_scope("loss"): self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.fc2, labels=self.y)) # Optimizer with tf.name_scope("training_op"): self.training_op = tf.compat.v1.train.AdamOptimizer(self.learning_rate).minimize(self.loss) # Perf metrics with tf.name_scope("accuracy"): prediction = tf.equal(tf.argmax(self.fc2, 1), tf.argmax(self.y, 1)) self.accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
tensorflow.cast
1,332
import tensorflow as tf keep_prob = args.keep_prob cross_stitch_enabled = args.cross_stitch_enabled with tf.variable_scope("placeholder"): X = tf.placeholder(tf.float32, (None, 128), "X") y_1 = tf.placeholder(tf.float32, (None, n_output_1), "y_1") y_2 = tf.placeholder(tf.float32, (None, n_output_2), "y_2") is_training = tf.placeholder(tf.bool, (), "is_training") with tf.variable_scope("network"): with contrib.framework.arg_scope( [contrib.layers.fully_connected],
tensorflow.placeholder
1,333
import tensorflow as tf a[i] = 1 return a render = False # display the game environment running_reward = None tf.reset_default_graph() ## Define Q-network q(a,s) that ouput the rewards of 4 actions by given state, i.e. Action-Value Function. # 4x4 grid can be represented by one-hot vector with 16 integers. inputs = tf.placeholder(shape=[1, 16], dtype=tf.float32) net = InputLayer(inputs, name='observation') net = DenseLayer(net, n_units=4, act=tf.identity,
tensorflow.reset_default_graph
1,334
import tensorflow as tf self.train_data['X'], self.train_data['E'], \ self.train_data['T'], self.train_data['failures'], \ self.train_data['atrisk'], self.train_data['ties'] = utils.parse_data(X, label) # New Graph G = tf.Graph() with G.as_default(): # Data input X = tf.placeholder(tf.float32, [None, input_node], name = 'x-Input') y_ = tf.placeholder(tf.float32, [None, output_node], name = 'label-Input') # hidden layers self.nnweights = [] # collect weights of network prev_node = input_node prev_x = X for i in range(len(hidden_layers_node)): layer_name = 'layer' + str(i+1) with tf.variable_scope(layer_name, reuse=tf.AUTO_REUSE):
tensorflow.placeholder
1,335
import tensorflow as tf self.vf_new_params = [oldp.assign(p) for p, oldp in zip(vf_params, vf_old_params)] self.sess.run(tf.global_variables_initializer()) # Tensorboard if summary_dir is not None: self.writer = tf.summary.FileWriter(summary_dir) tf.summary.scalar('Loss/Policy', loss_pg) tf.summary.scalar('Loss/Value', loss_vf) tf.summary.scalar('Loss/Entropy', loss_entropy) tf.summary.scalar('Loss/Total', loss) tf.summary.scalar('Var/Epsilon', epsilon_decay) tf.summary.scalar('Var/Policy Mode', tf.reduce_mean(pi.mode())) tf.summary.scalar('Var/Policy Sigma', tf.reduce_mean(pi.stddev())) tf.summary.scalar('Var/Value', tf.reduce_mean(self.vf)) self.summarise = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES)) # AC net def build_anet(self, state_in, name, reuse=False, batch_size=64): reg = None with tf.variable_scope(name, reuse=reuse):
tensorflow.summary.scalar
1,336
import tensorflow as tf logits_ = tf.reshape(logits, tf.stack([time_steps * batch_size, logits.get_shape()[2].value])) targets_ = tf.reshape(targets, tf.stack([time_steps * batch_size]))
tensorflow.stack
1,337
import tensorflow as tf if isinstance(bias, float): bias = tf.get_variable("biases", [output_dim], tf.float32, tf.constant_initializer(bias)) variable_summaries(bias) output = tf.nn.bias_add(tf.matmul(x, w), bias) return output def _bn(self, name, x): with tf.variable_scope(name): moving_average_decay = 0.9 decay = moving_average_decay batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2]) mu = tf.get_variable('mu', batch_mean.shape, dtype=tf.float32, initializer=tf.zeros_initializer(), trainable=False) tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, mu) tf.add_to_collection('mu_sigma_bn', mu) sigma = tf.get_variable('sigma', batch_var.shape, dtype=tf.float32, initializer=tf.ones_initializer(), trainable=False) tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, sigma) tf.add_to_collection('mu_sigma_bn', sigma) beta = tf.get_variable('beta', batch_mean.shape, dtype=tf.float32, initializer=tf.zeros_initializer()) gamma = tf.get_variable('gamma', batch_var.shape, dtype=tf.float32, initializer=tf.ones_initializer())
tensorflow.nn.moments
1,338
import tensorflow as tf with tf.name_scope('weight_decay'): add_weight_decay(params['weight_decay']) regularization_loss = tf.losses.get_regularization_loss()
tensorflow.losses.get_regularization_loss
1,339
import tensorflow as tf log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32) per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) loss = tf.reshape(per_example_loss, [-1, tf.shape(positions)[1]]) # TODO: dynamic gather from per_example_loss return loss
tensorflow.reduce_sum
1,340
import tensorflow as tf precise_logits = tf.cast(logits, tf.float32) if ( logits.dtype == tf.float16) else logits onehot_labels = tf.cast(onehot_labels, precise_logits.dtype) predictions = tf.nn.sigmoid(logits) predictions_pt = tf.where(tf.equal(onehot_labels, 1), predictions, 1.-predictions) # add small value to avoid 0
tensorflow.nn.sigmoid
1,341
import tensorflow as tf # Target for value fn regression # We update the vf towards the min of two Q-functions in order to # reduce overestimation bias from function approximation error. v_backup = tf.stop_gradient(min_qf_pi - self.ent_coef * logp_pi) value_loss = 0.5 * tf.reduce_mean(((value_fn - v_backup) ** 2)*self.weight_ph) #value_for_priority = tf.reduce_mean((value_fn - v_backup) ** 2,1) regularizervf = tf.contrib.layers.l1_l2_regularizer(scale_l1=0.0, scale_l2=1e-5, scope='model/values_fn') all_trainable_weights_vf = tf_util.get_trainable_vars('model/values_fn') regularization_penalty_vf = tf.contrib.layers.apply_regularization(regularizervf, all_trainable_weights_vf) if self.n_step: values_losses = qf1_loss + qf2_loss + value_loss + regularization_penalty_vf + qf1_loss_n + qf2_loss_n else: values_losses = qf1_loss + qf2_loss + value_loss + regularization_penalty_vf
tensorflow.contrib.layers.l1_l2_regularizer
1,342
import tensorflow as tf with tf.device('/cpu:0'): OPT = tf.train.AdamOptimizer(1e-4) # 后续主要是使用该optimizer中的apply—gradients操作 # OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC') # 定义critic训练过程 GLOBAL_AC = ACnet(scope=GLOBAL_NET_SCOPE) # 创建中央大脑GLOBALE_AC,只创建结构(A和C的参数) workers = [] for i in range(N_workers): # N—workers等于cpu数量 i_name = 'W_%i' % i # worker name workers.append(Worker(name=i_name, globalAC=GLOBAL_AC)) # 创建独立的worker COORD = tf.train.Coordinator() # 多线程 SESS.run(tf.global_variables_initializer()) # 初始化所有参数 worker_threads = [] for worker in workers: # 并行过程 job = lambda: worker.work() # worker的工作目标,此处调用Worker类中的work t = threading.Thread(target=job) # 每一个线程完成一个worker的工作目标 t.start() # 启动每一个worker worker_threads.append(t) # 每一个worker的工作都加入thread中
tensorflow.train.Coordinator
1,343
import tensorflow as tf x = tf.constant(1+2j, tf.complex64) y = tf.constant(3+4j, tf.complex64) z, = tf.py_func(my_func, [x, y], [tf.complex64]) self.assertAllClose(z.eval(), my_func(1+2j, 3+4j)) # a bit excotic function (rfft) with self.test_session(): x = tf.constant([1., 2., 3., 4.], tf.float32) def rfft(x): return np.fft.rfft(x).astype(np.complex64) y, = tf.py_func(rfft, [x], [tf.complex64]) self.assertAllClose(y.eval(), np.fft.rfft([1., 2., 3., 4.])) # returns a python literal. with self.test_session(): def literal(x): return 1.0 if x == 0.0 else 0.0 x = tf.constant(0.0, tf.float64) y, = tf.py_func(literal, [x], [tf.float64]) self.assertAllClose(y.eval(), 1.0)
tensorflow.py_func
1,344
import tensorflow as tf """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features)
tensorflow.parse_single_example
1,345
import tensorflow as tf }, labels else: dupe_mask = tf.cast(tf.random_uniform([batch_size], dtype=tf.int32, minval=0, maxval=2), tf.bool)
tensorflow.random_uniform
1,346
import tensorflow as tf with tf.variable_scope(scope or 'directional_attention_%s' % direction or 'diag'): # non-linear rep_map = bn_dense_layer(rep_tensor, ivec, True, 0., 'bn_dense_map', activation, False, wd, keep_prob, is_train) # ensure the seletion is right dep_selection = tf.logical_and(rep_mask, dep_selection) head_selection = tf.logical_and(rep_mask, head_selection) rep_dep_tensor, rep_dep_mask, dep_org_idx = reduce_data_rep_max_len(rep_map, dep_selection) rep_head_tensor,rep_head_mask, head_org_idx = reduce_data_rep_max_len(rep_map, head_selection) sl_dep, sl_head = tf.shape(rep_dep_tensor)[1], tf.shape(rep_head_tensor)[1] if keep_unselected: unhead_selection = tf.logical_and(rep_mask, tf.logical_not(head_selection)) rep_unhead_tensor, rep_unhead_mask, unhead_org_idx = reduce_data_rep_max_len(rep_map, unhead_selection) sl_unhead = tf.shape(rep_unhead_tensor)[1] attn_result = tf.cond( tf.equal(sl_head, 0), lambda: tf.zeros([bs, 0, hn], tf.float32), lambda: self_attention_for_selected_head( head_selection, head_org_idx, sl_head, rep_head_mask, dep_selection, dep_org_idx, sl_dep, rep_dep_mask, rep_map, rep_dep_tensor, keep_prob, is_train, direction, ivec ) )
tensorflow.logical_not
1,347
import tensorflow as tf output0 = f(tf.constant([1]), tf.constant([2]))
tensorflow.constant
1,348
import tensorflow as tf flat_x = tf.layers.flatten(x) flat_x = tf.nn.dropout(flat_x, rate=dropout) x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu, name="dense1") logits = tf.layers.dense( x, self.hparams.problem.num_actions, name="dense2" ) logits = tf.expand_dims(logits, axis=1) logits = clip_logits(logits, self.hparams) value = tf.layers.dense(x, 1, name="value") return {"target_policy": logits, "target_value": value} @registry.register_model
tensorflow.expand_dims
1,349
from tensorflow.python.platform import googletest model_dir=model_dir, config=config, feature_columns=[core_feature_column.numeric_column("x")]) # Train for a few steps. est.train(input_fn=_train_input_fn, steps=1000) est.evaluate(input_fn=_eval_input_fn, steps=1) est.predict(input_fn=_eval_input_fn) if __name__ == "__main__": googletest.main()
tensorflow.python.platform.googletest.main
1,350
import tensorflow as tf bounded = tf.where(tf.greater(denom, 1e-20), tf.div(num, 1.0 * denom), tf.ones_like(num)) nelems = tf.reduce_mean(tf.where(tf.greater(denom, 1e-20), 1.0 * tf.ones_like(num), 1.0 * tf.zeros_like(num)), axis=1)
tensorflow.ones_like
1,351
import tensorflow as tf print('Conv layer {0} -> {1}'.format(bottom.get_shape().as_list(),conv_layer.get_shape().as_list())) return conv_layer def batch_norm_layer(self, name, input_tensor,training): with tf.variable_scope(name) as scope: return tf.contrib.layers.batch_norm(input_tensor,scope=scope,is_training=training,decay=0.99) def deconv_bn_relu(self, bottom, name, kernel_size, output_channels, initializer, stride = 1, bn=False, training=False, relu=True): input_shape = bottom.get_shape().as_list() input_channels = input_shape[-1]
tensorflow.contrib.layers.batch_norm
1,352
import tensorflow as tf tf.app.flags.DEFINE_string('dataset', 'cifar10', "{cifar10, svhn}") tf.app.flags.DEFINE_string('log_dir', "", "log_dir") tf.app.flags.DEFINE_integer('seed', 1, "initial random seed") tf.app.flags.DEFINE_bool('validation', False, "") tf.app.flags.DEFINE_integer('batch_size', 32, "the number of examples in a batch") tf.app.flags.DEFINE_integer('ul_batch_size', 128, "the number of unlabeled examples in a batch") tf.app.flags.DEFINE_integer('eval_batch_size', 100, "the number of eval examples in a batch") tf.app.flags.DEFINE_integer('eval_freq', 5, "") tf.app.flags.DEFINE_integer('num_epochs', 120, "the number of epochs for training") tf.app.flags.DEFINE_integer('epoch_decay_start', 80, "epoch of starting learning rate decay") tf.app.flags.DEFINE_integer('num_iter_per_epoch', 400, "the number of updates per epoch") tf.app.flags.DEFINE_float('learning_rate', 0.001, "initial leanring rate")
tensorflow.app.flags.DEFINE_integer
1,353
import tensorflow as tf ) bw_result = directional_attention_with_selections( rep_tensor, rep_mask, dep_selection, head_selection, 'backward', hn, keep_unselected, 'backward_resa', keep_prob, is_train, wd, activation ) return tf.concat([fw_result, bw_result], -1) def directional_attention_with_selections( rep_tensor, rep_mask, dep_selection, head_selection, direction=None, hn=None, keep_unselected=True, scope=None, keep_prob=1., is_train=None, wd=0., activation='elu'): bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2] org_ivec = rep_tensor.get_shape().as_list()[2] ivec = hn or org_ivec with tf.variable_scope(scope or 'directional_attention_%s' % direction or 'diag'): # non-linear rep_map = bn_dense_layer(rep_tensor, ivec, True, 0., 'bn_dense_map', activation, False, wd, keep_prob, is_train) # ensure the seletion is right dep_selection = tf.logical_and(rep_mask, dep_selection) head_selection = tf.logical_and(rep_mask, head_selection) rep_dep_tensor, rep_dep_mask, dep_org_idx = reduce_data_rep_max_len(rep_map, dep_selection) rep_head_tensor,rep_head_mask, head_org_idx = reduce_data_rep_max_len(rep_map, head_selection)
tensorflow.shape
1,354
import tensorflow as tf extended_vsize += self.max_phrase_size extra_zeros = tf.zeros((batch_size, self.max_phrase_size)) vocab_dist = tf.concat(values=[vocab_dist, extra_zeros], axis=1) # [batch_size, extended_vsize] if self.options.add_first_word_prob_for_phrase: # add prob of the first word to each phrase attn_dist = add_first_word_prob_to_atten_dists(self.in_passage_words, self.phrase_starts, vocab_dist, attn_dist) # match attn_dist[batch_size, passage_length] to sparse one-hot representation [batch_size, passage_length, extended_vsize] batch_nums = tf.range(0, limit=batch_size) # shape (batch_size) batch_nums = tf.expand_dims(batch_nums, axis=1) # shape (batch_size, 1) batch_nums = tf.tile(batch_nums, [1, passage_length]) # shape (batch_size, passage_length) step_nums = tf.range(0, limit=passage_length) # [passage_length] step_nums = tf.expand_dims(step_nums, axis=0) # shape (1, passage_length) step_nums = tf.tile(step_nums, [batch_size, 1]) # shape (batch_size, passage_length) indices = tf.stack((batch_nums, step_nums, passage_word_idx), axis=2) # shape (batch_size, passage_length, 3) indices = tf.reshape(indices, [-1, 3]) #[batch_size * passage_length, 3] indices = tf.cast(indices, tf.int64) shape = [batch_size, passage_length, extended_vsize] shape = tf.cast(shape, tf.int64) attn_dist = tf.reshape(attn_dist, shape=[-1]) # [batch_size*passage_length] one_hot_spare_rep = tf.SparseTensor(indices=indices, values=attn_dist, dense_shape=shape) # [batch_size, passage_length, extended_vsize]
tensorflow.expand_dims
1,355
import tensorflow as tf def literal(x): return 1.0 if x == 0.0 else 0.0 x = tf.constant(0.0, tf.float64) y, = tf.py_func(literal, [x], [tf.float64])
tensorflow.constant
1,356
import tensorflow as tf # call proper policy and value estimators for each envs for i in range(n_particles): if(ENV_NAME=="Pendulum-v0"): policy_estimator[i] = PolicyEstimator_Pendulum(entropy_beta=ENTROPY_BETA,learning_rate=POLICY_LR,par_idx=i) value_estimator[i] = ValueEstimator_Pendulum(learning_rate=VALUE_LR,par_idx=i) if(ENV_NAME=="MountainCarContinuous-v0"): policy_estimator[i] = PolicyEstimator_MountainCarContinuous(entropy_beta=ENTROPY_BETA,learning_rate=POLICY_LR,par_idx=i) value_estimator[i] = ValueEstimator_MountainCarContinuous(learning_rate=VALUE_LR,par_idx=i) svpg=SVPG(policy_estimator,independent_flag_svpg,learning_rate=POLICY_LR); with tf.Session() as sess: sess.run(tf.global_variables_initializer()) # Note, due to randomness in the policy the number of episodes you need varies # TODO: Sometimes the algorithm gets stuck, I'm not sure what exactly is happening there. stats = advantage_actor_critic(env, policy_estimator, value_estimator, svpg, NUM_EPISODES, MAX_EPI_STEP,discount_factor=DISCOUNT_FACTOR)
tensorflow.global_variables_initializer
1,357
import tensorflow as tf random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) updates = [ update_eps_expr,
tensorflow.cond
1,358
import tensorflow as tf cell_basic = tf.contrib.rnn.BasicRNNCell(state_size,activation=tf.identity) cell_drop=tf.contrib.rnn.DropoutWrapper(cell_basic,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob) elif activation == 'relu': cell_basic = tf.contrib.rnn.BasicRNNCell(state_size, activation=tf.nn.relu) cell_drop = tf.contrib.rnn.DropoutWrapper(cell_basic, variational_recurrent=True, dtype=tf.float32, input_size=num_input, input_keep_prob=input_prob, state_keep_prob=state_prob)
tensorflow.contrib.rnn.BasicRNNCell
1,359
import tensorflow as tf X = discrim_conv('d_out', X, 1, 1, norm=False, nonlin=False, init_stddev=0.02) print('D out:', X.get_shape().as_list()) return X def atrous_discriminator(self, X, reuse): def atrous_convs(net, scope, rate=None, depth=256, reuse=None): """ ASPP layer 1×1 convolution and three 3×3 atrous convolutions """ with tf.variable_scope(scope, reuse=reuse): pyram_1x1_0 = self.conv('_1x1', net, depth, size=1, stride=1, padding="SAME") pyram_3x3_1 = self.conv('_3x3', net, depth, size=3, stride=1, padding="SAME") pyram_3x3_2 = self.conv('_atr_3x3_1', net, depth, size=3, stride=1, padding="SAME", dilation=rate[0]) pyram_3x3_3 = self.conv('_atr_3x3_2', net, depth, size=3, stride=1, padding="SAME", dilation=rate[1]) # pyram_3x3_4 = self.z_conv('_atr_3x3_3', net, depth/2, size=3, stride=1, padding="SAME", dilation=rate[2]) net = tf.concat((pyram_1x1_0, pyram_3x3_1, pyram_3x3_2, pyram_3x3_3), axis=3, name="concat") net = self.conv('_1x1_output', net, depth, size=1, stride=1, padding="SAME") # pyram_1x1_0 = self.conv('_1x1', net, depth, size=1, stride=1, padding="SAME")
tensorflow.variable_scope
1,360
from tensorflow.python.ops import array_ops # lvalue, and takes its shape from that? This would allow accumulate_n to # work in all situations that add_n currently works. raise ValueError("Cannot infer the shape of the accumulator for " "accumulate_n. Pass the shape argument, or set the shape " "of at least one of the inputs.") with ops.op_scope(inputs, name, "AccumulateN") as name: var = gen_state_ops._temporary_variable(shape=shape, dtype=tensor_dtype) var_name = var.op.name var = state_ops.assign(var, array_ops.zeros_like(inputs[0])) update_ops = [] for input_tensor in inputs: op = state_ops.assign_add(var, input_tensor, use_locking=True) update_ops.append(op) with ops.control_dependencies(update_ops): return gen_state_ops._destroy_temporary_variable(var, var_name=var_name,
tensorflow.python.ops.array_ops.zeros_like
1,361
import tensorflow as tf sess.run(my_var) row_dim = 2 col_dim = 3 zero_var = tf.Variable(tf.zeros([row_dim, col_dim])) ones_var = tf.Variable(tf.ones([row_dim, col_dim])) sess.run(zero_var.initializer) sess.run(ones_var.initializer)
tensorflow.zeros
1,362
import tensorflow as tf tf.placeholder(dtype), tf.placeholder(tf.int64))
tensorflow.placeholder
1,363
import tensorflow as tf self.assertEqual(tf.float32, tracker.state.c.dtype) self.assertEqual((batch_size, tracker_size), tracker.state.c.shape) self.assertEqual(tf.float32, tracker.state.h.dtype) self.assertEqual((batch_size, tracker_size), tracker.state.h.shape) def testSPINN(self): with tf.device(self._test_device): embedding_dims = 10 d_tracker = 8 sequence_length = 15 num_transitions = 27 config_tuple = collections.namedtuple(
tensorflow.device
1,364
import tensorflow as tf params.learning_rate_boundaries, params.learning_rate_values) elif params.learning_rate_decay == "none": return learning_rate else: raise ValueError("Unknown learning_rate_decay") def session_config(params): optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L1, do_function_inlining=True) graph_options = tf.GraphOptions(optimizer_options=optimizer_options) config = tf.ConfigProto(allow_soft_placement=True, graph_options=graph_options) if params.device_list: device_str = ",".join([str(i) for i in params.device_list]) config.gpu_options.visible_device_list = device_str config.gpu_options.per_process_gpu_memory_fraction = params.gpu_memory_fraction config.gpu_options.allow_growth = True return config
tensorflow.GraphOptions
1,365
import tensorflow as tf self.L2_firing_rate = params.get('L2_firing_rate', 0) self.sussillo_constant = params.get('sussillo_constant', 0) # trainable features self.W_in_train = params.get('W_in_train', True) self.W_rec_train = params.get('W_rec_train', True) self.W_out_train = params.get('W_out_train', True) self.b_rec_train = params.get('b_rec_train', True) self.b_out_train = params.get('b_out_train', True) self.init_state_train = params.get('init_state_train', True) # Tensorflow initializations self.x = tf.placeholder("float", [N_batch, N_steps, N_in]) self.y = tf.placeholder("float", [N_batch, N_steps, N_out]) self.output_mask = tf.placeholder("float", [N_batch, N_steps, N_out]) # trainable variables with tf.variable_scope("model"): # ------------------------------------------------ # Random initialization Load weights from weights path # for Initial state, Weight matrices, and bias weights # ------------------------------------------------ if self.load_weights_path is None: # random initializations init_state_initializer = tf.random_normal_initializer(mean=0.1, stddev=0.01) W_in_initializer = tf.constant_initializer( 0.1 * np.random.uniform(-1, 1, size=(self.N_rec, self.N_in)))
tensorflow.placeholder
1,366
import tensorflow as tf def _get_avgpool_scale(kw, kh): if kh > 255 or kw > 255: return 1.0 elif kh == 3 and kw == 3: return 9.0 * 7.0 / 64.0 elif kh == 5 and kw == 5: return 25.0 * 10.0 / 256.0 elif kh == 6 and kw == 6: return 36.0 * 7.0 / 256.0 elif kh == 7 and kw == 7: return 49.0 * 21.0 / 1024.0 elif kh == 14 and kw == 14: return 196.0 * 21.0 / 4096.0 else: rec = tf.cast(kw * kh, tf.float32) n_max = 7 + tf.math.ceil(tf.math.log(rec) / tf.math.log(2.)) ns = tf.range(0., n_max) ns_pow = tf.pow(2., ns) ks = tf.round(ns_pow / rec) diffs = tf.math.abs(ks / ns_pow - 1 / rec) n = tf.argmin(diffs) k = ks[n] scale = k / tf.pow(2., tf.cast(n, tf.float32)) scale *= rec return scale @register_keras_serializable( package='Vitis', name='VitisGlobalAveragePooling2D') class VitisGlobalAveragePooling2D(tf.keras.layers.GlobalAveragePooling2D):
tensorflow.math.log
1,367
import tensorflow as tf memory_ = tf.nn.relu( dense(d_memory, hidden, use_bias=False, scope="memory")) outputs = tf.matmul(inputs_, tf.transpose( memory_, [0, 2, 1])) / (hidden ** 0.5) mask = tf.tile(tf.expand_dims(mask, axis=1), [1, JX, 1]) logits = tf.nn.softmax(softmax_mask(outputs, mask)) outputs = tf.matmul(logits, memory) res = tf.concat([inputs, outputs], axis=2)
tensorflow.expand_dims
1,368
import tensorflow as tf raise RuntimeError('Unexpected features in schema') def test_global_annotation(self): # TODO(b/132098015): Schema annotations aren't yet supported in OSS builds. # pylint: disable=g-import-not-at-top try: from tensorflow_transform import annotations_pb2 except ImportError: return # pylint: enable=g-import-not-at-top with tf.compat.v1.Graph().as_default() as graph: outputs = { 'foo': tf.convert_to_tensor([0, 1, 2, 3], dtype=tf.int64), 'bar': tf.convert_to_tensor([0, 2, 0, 2], dtype=tf.int64), } # Annotate an arbitrary proto at the schema level (not sure what global # schema boundaries would mean, but hey I'm just a test). boundaries = tf.constant([[1.0]]) message_type = annotations_pb2.BucketBoundaries.DESCRIPTOR.full_name sizes = tf.expand_dims([tf.size(boundaries)], axis=0) message_proto = tf.raw_ops.EncodeProto( sizes=sizes, values=[tf.cast(boundaries, tf.float32)], field_names=['boundaries'], message_type=message_type)[0] type_url = os.path.join('type.googleapis.com', message_type) schema_inference.annotate(type_url, message_proto)
tensorflow.convert_to_tensor
1,369
import tensorflow as tf # for each key I get the collection of summary nodes # I set up a filewriter for each summary node self.summary_nodes = {sk: tf.get_collection(sk) for sk in self.summary_keys} for sk in self.summary_keys: self.summary_writers[sk] = [tf.compat.v1.summary.FileWriter(self._tensorboard_dir+sn.name) for sn in self.summary_nodes[sk]] def create_hooks(self, config):
tensorflow.compat.v1.summary.FileWriter
1,370
import tensorflow as tf # number of steps. if FLAGS.use_tpu: assert len(eval_examples) % FLAGS.eval_batch_size == 0 eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size) eval_drop_remainder = True if FLAGS.use_tpu else False eval_input_fn = file_based_input_fn_builder( input_file=eval_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=eval_drop_remainder) result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with tf.gfile.GFile(output_eval_file, "w") as writer: tf.logging.info("***** Eval results *****") for key in sorted(result.keys()): tf.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if FLAGS.do_predict: predict_examples = processor.get_test_examples(FLAGS.data_dir) num_actual_predict_examples = len(predict_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. while len(predict_examples) % FLAGS.predict_batch_size != 0:
tensorflow.gfile.GFile
1,371
import tensorflow as tf def testMultiSaverCollection(self): self._testMultiSaverCollectionSave() self._testMultiSaverCollectionRestore() def testBinaryAndTextFormat(self): test_dir = self._TestDir("binary_and_text") filename = os.path.join(test_dir, "metafile") with self.test_session(graph=tf.Graph()): # Creates a graph. tf.Variable(10.0, name="v0") # Exports the graph as binary format. tf.train.export_meta_graph(filename, as_text=False) with self.test_session(graph=tf.Graph()): # Imports the binary format graph. saver = tf.train.import_meta_graph(filename) # Exports the graph as text format. saver.export_meta_graph(filename, as_text=True) with self.test_session(graph=tf.Graph()):
tensorflow.Variable
1,372
import tensorflow as tf
tensorflow.reshape
1,373
import tensorflow as tf # The output is (mean, var). if self._compute_variance and not self._compute_weighted: return [ analyzer_nodes.TensorInfo( tf.as_dtype(self._output_numpy_dtype), self._output_shape, None) ] * 2 else: return [ analyzer_nodes.TensorInfo( tf.as_dtype(np.int64), self._output_shape, None), analyzer_nodes.TensorInfo( tf.as_dtype(self._output_numpy_dtype), self._output_shape, None), analyzer_nodes.TensorInfo( tf.as_dtype(self._output_numpy_dtype), self._output_shape, None), analyzer_nodes.TensorInfo( tf.as_dtype(self._output_numpy_dtype), self._output_shape, None) ]
tensorflow.as_dtype
1,374
import tensorflow as tf _MergeOneToken(tokens, best_id - 1)], axis=0) left_candidates = tf.cond(tf.equal(best_id, 0), lambda: empty, _MergeLeft) def _MergeRight():
tensorflow.equal
1,375
import tensorflow as tf + self.alpha * ( tf.matmul( tf.nn.relu(state), self.W_rec * self.rec_Connectivity,
tensorflow.nn.relu
1,376
import tensorflow as tf do not have a Euclidean norm greater than maxnorm. Rows that exceed it are scaled to length. Args: var_matrix: 2D mutable tensor (Variable) to operate on indices: 1D tensor with the row indices to constrain maxnorm: the maximum Euclidean norm Returns: An operation that will update var_matrix when run in a Session ''' selected_rows = tf.nn.embedding_lookup(var_matrix, indices) row_norms = tf.sqrt(tf.reduce_sum(tf.square(selected_rows), 1)) scaling = maxnorm / tf.maximum(row_norms, maxnorm) scaled = selected_rows * tf.expand_dims(scaling, 1) return tf.scatter_update(var_matrix, indices, scaled) def dense_maxnorm_update(var_matrix, maxnorm=1.0): '''Dense update operation that ensures all rows in var_matrix do not have a Euclidean norm greater than maxnorm. Rows that exceed it are scaled to length. Args: var_matrix: 2D mutable tensor (Variable) to operate on maxnorm: the maximum Euclidean norm
tensorflow.maximum
1,377
import tensorflow as tf slim = tf.contrib.slim global first first = True classnum=12 testnum = tf.placeholder(tf.int32) trainnum = tf.placeholder(tf.int32) validnum = tf.placeholder(tf.int32) learnrate = tf.placeholder(tf.float32) def getinputs(path): filename_queue=tf.train.string_input_producer([path]) reader=tf.TFRecordReader() _,serialized_example=reader.read(filename_queue) features=tf.parse_single_example(serialized_example,
tensorflow.placeholder
1,378
import tensorflow as tf with self.test_session(): # Creates a graph. v0 = tf.Variable(0.0) var = tf.Variable(10.0) tf.add(v0, var) @function.Defun(x=tf.float32)
tensorflow.Variable
1,379
import tensorflow as tf biases = tf.get_variable('biases', [output_node], initializer=tf.constant_initializer(0.0)) layer_out = tf.matmul(prev_x, weights) + biases # Output of Network y = layer_out # Global step with tf.variable_scope('training_step', reuse=tf.AUTO_REUSE): global_step = tf.get_variable("global_step", [], dtype=tf.int32, initializer=tf.constant_initializer(0), trainable=False) # Loss value reg_item = tf.contrib.layers.l1_l2_regularizer(L1_reg, L2_reg) reg_term = tf.contrib.layers.apply_regularization(reg_item, self.nnweights) loss_fun = self._negative_log_likelihood(y_, y) loss = loss_fun + reg_term # SGD Optimizer if optimizer == 'sgd': lr = tf.train.exponential_decay( learning_rate, global_step, 1, learning_rate_decay ) train_step = tf.train.GradientDescentOptimizer(lr).minimize(loss, global_step=global_step)
tensorflow.contrib.layers.l1_l2_regularizer
1,380
import tensorflow as tf
tensorflow.Session
1,381
import tensorflow as tf ref=self.internals_memory[name], indices=indices, updates=internals[name] )) for name in sorted(actions): assignments.append(tf.scatter_update( ref=self.actions_memory[name], indices=indices, updates=actions[name] )) assignments.append(tf.scatter_update(ref=self.terminal_memory, indices=indices, updates=terminal)) assignments.append(tf.scatter_update(ref=self.reward_memory, indices=indices, updates=reward)) # Add episode indices. with tf.control_dependencies(control_inputs=assignments): num_episodes = tf.count_nonzero(input_tensor=terminal, axis=0, dtype=util.tf_dtype('int')) assignment = tf.assign( ref=self.episode_indices[self.episode_count: self.episode_count + num_episodes], value=tf.boolean_mask(tensor=indices, mask=terminal) ) # Increment episode count. with tf.control_dependencies(control_inputs=(assignment,)): assignment = tf.assign_add(ref=self.episode_count, value=num_episodes) # Increment memory index. with tf.control_dependencies(control_inputs=(assignment,)): assignment = tf.assign(
tensorflow.control_dependencies
1,382
import tensorflow as tf reuse=True if count else None, is_training=is_training, fine_tune_batch_norm=fine_tune_batch_norm) # Resize the logits to have the same dimension before merging. for output in sorted(outputs_to_logits): outputs_to_logits[output] = tf.image.resize_bilinear( outputs_to_logits[output], [logits_height, logits_width], align_corners=True) # Return when only one input scale. if len(image_pyramid) == 1:
tensorflow.image.resize_bilinear
1,383
import tensorflow as tf depth=depth) def cbhg(inputs, input_lengths, is_training, scope, K, projections, depth): with tf.variable_scope(scope): with tf.variable_scope('conv_bank'): # Convolution bank: concatenate on the last axis to stack channels from all convolutions conv_outputs = tf.concat( [conv1d(inputs, k, 128, tf.nn.relu, is_training, 'conv1d_%d' % k) for k in range(1, K + 1)], axis=-1
tensorflow.variable_scope
1,384
import tensorflow as tf dist = fake_distribution(batch_shape=[None, 3], event_shape=None) # There's no notion of partially known shapes in eager mode, so exit # early. sample_shape = tf.convert_to_tensor([6, 7], dtype=tf.int32) y = dist._set_sample_static_shape(x, sample_shape) self.assertTrue(y.shape.ndims is None)
tensorflow.convert_to_tensor
1,385
import tensorflow as tf facts = tf.concat(facts, 2) print ("query_size mismatch") query = tf.concat(values = [ query, query, ], axis=1) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]]) scores = d_layer_3_all if mask is not None: mask = tf.equal(mask, tf.ones_like(mask)) key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(scores) * (-2 ** 32 + 1) scores = tf.where(key_masks, scores, paddings) # [B, 1, T] # Activation
tensorflow.concat
1,386
import tensorflow as tf mask2 = tf.constant([[1, 0], [1, 1]], dtype=tf.float32) mask3 = tf.constant([[1, 1], [1, 1]], dtype=tf.float32) mask4 = tf.constant([[0, 0], [0, 0]], dtype=tf.float32) mask5 = tf.constant([[1, 0], [1, 0]], dtype=tf.float32) masks1 = tf.stack([mask0, mask1, mask2]) masks2 = tf.stack([mask3, mask4, mask5]) ious = isu.get_pairwise_iou_matrix(masks1, masks2) expected_ious = tf.constant([[0.5, 0.0, 1.0/3.0],
tensorflow.constant
1,387
import tensorflow as tf dropout_keep_prob=None, weights=None): super(WordAvgModel, self).__init__(batch_size, max_sequence_len, out_vocab_size, c2v) super(WordAvgModel, self)._DoPredictions(c2v.embedding_dims, self._inputs) self.cost = tf.reduce_mean(self.example_weights * self._xent) class WordSeqModel(BaseModel): """A bag of word embeddings.""" def __init__(self, out_vocab_size=None, batch_size=10, model_params=None,
tensorflow.reduce_mean
1,388
from tensorflow.python.framework import tensor_shape name=name) @ops.RegisterShape("BatchMatMul") def _BatchMatMulShape(op): """Shape function for BatchMatMul op.""" a_shape = op.inputs[0].get_shape() adj_a = op.get_attr("adj_x") b_shape = op.inputs[1].get_shape() adj_b = op.get_attr("adj_y") if not a_shape.is_fully_defined() or not b_shape.is_fully_defined(): return [tensor_shape.unknown_shape()] batch_dims = a_shape[:-2].merge_with(b_shape[:-2]) output_rows = a_shape[-1] if adj_a else a_shape[-2] output_cols = b_shape[-2] if adj_b else b_shape[-1] inner_a = a_shape[-2] if adj_a else a_shape[-1] inner_b = b_shape[-1] if adj_b else b_shape[-2] inner_a.assert_is_compatible_with(inner_b) return [batch_dims.concatenate([output_rows, output_cols])] def sigmoid(x, name=None):
tensorflow.python.framework.tensor_shape.unknown_shape
1,389
import tensorflow as tf predict_batch_size=eval_batch_size, train_batch_size=hparams.train_batch_size, eval_batch_size=eval_batch_size, export_to_tpu=FLAGS.export_to_tpu, experimental_exported_model_uses_all_cores=FLAGS .inference_with_all_cores) else: save_checkpoints_steps = (FLAGS.save_checkpoints_steps or FLAGS.iterations_per_loop) run_config = tf.estimator.RunConfig( model_dir=FLAGS.model_dir, save_checkpoints_steps=save_checkpoints_steps) image_classifier = tf.estimator.Estimator( model_fn=model.model_fn, config=run_config, params=estimator_parmas) # Input pipelines are slightly different (with regards to shuffling and
tensorflow.estimator.RunConfig
1,390
from tensorflow.python.framework import ops eval_dir = os.path.join(self._model_dir, 'eval') with ops.Graph().as_default() as g:
tensorflow.python.framework.ops.Graph
1,391
import tensorflow as tf for var in tf.trainable_variables(): self._train_summaries.append(var) if mode == 'TEST': # FLAGS2["bbox_normalize_means"] = (0.0, 0.0, 0.0, 0.0) # FLAGS2["bbox_normalize_stds"] = (0.1, 0.1, 0.1, 0.1) stds = np.tile(np.array(cfg.FLAGS2["bbox_normalize_stds"]), (self._num_classes)) means = np.tile(np.array(cfg.FLAGS2["bbox_normalize_means"]), (self._num_classes)) self._predictions["bbox_pred"] *= stds self._predictions["bbox_pred"] += means else: self._add_losses() layers_to_output.update(self._losses) val_summaries = [] # 保存添加tf.summary.image和添加self._losses的操作 with tf.device("/cpu:0"): val_summaries.append(self._add_image_summary(self._image, self._gt_boxes)) for key, var in self._event_summaries.items(): #添加self._losses val_summaries.append(tf.summary.scalar(key, var)) for key, var in self._score_summaries.items(): #self._score_summaries.update(self._anchor_targets) self._score_summaries.update(self._proposal_targets) self._add_score_summary(key, var) for var in self._act_summaries: # 添加head网络和rpn层 self._add_act_summary(var) ''' for var in tf.trainable_variables(): self._train_summaries.append(var) ''' for var in self._train_summaries: #添加tf.trainable_variables(),显示张量分布监控数据随着迭代轮数的变化趋势 self._add_train_summary(var)
tensorflow.device
1,392
import tensorflow as tf opt = tf.train.AdamOptimizer(self.LR) self.update_a_op = opt.apply_gradients(zip(self.a_grads, self.pi_params)) self.update_c_op = opt.apply_gradients(zip(self.c_grads, self.vf_params)) self.sess.run(tf.global_variables_initializer()) # Tensorboard if summary_dir is not None: self.writer = tf.summary.FileWriter(summary_dir) tf.summary.scalar('Loss/Policy', loss_pg) tf.summary.scalar('Loss/Value', loss_vf) tf.summary.scalar('Loss/Entropy', - 0.01 * tf.reduce_mean(pi.entropy())) tf.summary.scalar('Var/Policy Mode', tf.reduce_mean(pi.mode())) tf.summary.scalar('Var/Policy Sigma', tf.reduce_mean(pi.stddev())) tf.summary.scalar('Var/Value', tf.reduce_mean(self.vf)) self.summarise = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES)) # AC net def build_anet(self, state_in, name, reuse=False): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg) # sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg) sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5)) sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
tensorflow.get_collection
1,393
import tensorflow as tf atten_hidden = tf.tanh( tf.add( tf.matmul(self.position_emb, W), tf.matmul(output, U))) alpha = tf.nn.softmax( tf.reshape(tf.matmul(atten_hidden, V), [-1, shape[1], 1]), axis=1) output = tf.reshape(output, [-1, shape[1], 2 * self.config.hidden_size]) C = tf.multiply(alpha, output) return tf.concat([output, C], axis=-1) def _train_epoch(self, train_batches, dropout):
tensorflow.matmul
1,394
import tensorflow as tf d_all, d_all_logits, d_latents = self.discriminator( x=all_images, y=all_y, is_training=is_training) z_projs = self._latent_projections(d_latents) d_real, d_fake, _, _ = tf.split(d_all, 4) d_real_logits, d_fake_logits, _, _ = tf.split(d_all_logits, 4) z_projs_real, z_projs_fake, z_aug_projs_real, z_aug_projs_fake = tf.split(z_projs, 4) self.d_loss, _, _, self.g_loss = loss_lib.get_losses( d_real=d_real, d_fake=d_fake, d_real_logits=d_real_logits, d_fake_logits=d_fake_logits) penalty_loss = penalty_lib.get_penalty_loss( x=images, x_fake=generated, y=y, is_training=is_training,
tensorflow.split
1,395
import tensorflow as tf dec, mem = tf.nn.seq2seq.attention_decoder( dec_inp, enc_state, attn_states, cell, output_size=4) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape) def testDynamicAttentionDecoder2(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): cell = tf.nn.rnn_cell.GRUCell(2) inp = tf.constant(0.5, shape=[2, 2, 2]) enc_outputs, enc_state = tf.nn.dynamic_rnn(cell, inp, dtype=tf.float32) attn_states = enc_outputs dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3 dec, mem = tf.nn.seq2seq.attention_decoder( dec_inp, enc_state, attn_states, cell, output_size=4, num_heads=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res))
tensorflow.constant_initializer
1,396
import tensorflow as tf max_src_len = tf.shape(reconstructed_weights)[1] batch_size = tf.shape(reconstructed_weights)[0] attn_loss = tf.matmul(reconstructed_weights, attention_weights) - tf.eye(max_src_len) src_mask = tf.sequence_mask(encoder_input_length[0], maxlen=max_src_len, dtype=tf.float32) src_mask = tf.einsum('ij,ik->ijk', src_mask, src_mask) attn_loss *= tf.to_float(src_mask) # don't take padding words into account attn_loss = tf.norm(attn_loss) / tf.to_float(batch_size)
tensorflow.sequence_mask
1,397
import tensorflow as tf name = 'dropout' + str(self.counts['dropout']) with tf.variable_scope(name):
tensorflow.variable_scope
1,398
import tensorflow as tf pred_heatmap = tf.reshape(pred_heatmap, [-1, heatmap_size, heatmap_size]) if data_format == 'channels_first': image_ = tf.transpose(image_, perm=(1, 2, 0)) save_image_op = tf.py_func(save_image_with_heatmap, [image_, height, width, heatmap_size, tf.reshape(pred_heatmap * 255., [-1, heatmap_size, heatmap_size]), tf.reshape(predictions, [-1, heatmap_size, heatmap_size]), config.left_right_group_map[category][0], config.left_right_group_map[category][1], config.left_right_group_map[category][2]], tf.int64, stateful=True) with tf.control_dependencies([save_image_op]): pred_x, pred_y = pred_x * 1., pred_y * 1.
tensorflow.reshape
1,399