seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf losses[loss_name] = mean_loss return losses def summarize_features(features, num_shards=1): with tf.name_scope("input_stats"): for (k, v) in six.iteritems(features): if isinstance(v, tf.Tensor) and v.get_shape().ndims > 1: tf.summary.scalar("%s_batch" % k, tf.shape(v)[0] // num_shards) tf.summary.scalar("%s_length" % k, tf.shape(v)[1]) nonpadding = tf.to_float(tf.not_equal(v, 0)) nonpadding_tokens = tf.reduce_sum(nonpadding) tf.summary.scalar("%s_nonpadding_tokens" % k, nonpadding_tokens) tf.summary.scalar("%s_nonpadding_fraction" % k, tf.reduce_mean(nonpadding)) _already_logged = set() def _eager_log(level, *args): if context.in_eager_mode() and args in _already_logged: return _already_logged.add(args)
tensorflow.summary.scalar
300
import tensorflow as tf def binary_mask(shape, p=0.7): samples = tf.random_uniform(shape, minval=0.0, maxval=1.0) mask = tf.less_equal(samples, p) return tf.cast(mask, tf.float32) def weighted_arithmetic_mean(w, x): numer = tf.reduce_sum(w*x) denom = tf.reduce_sum(w) return tf.div(numer, denom)
tensorflow.reduce_sum
301
import tensorflow as tf masked_lm_mean_loss = tf.metrics.mean( values=masked_lm_example_loss, weights=masked_lm_weights) next_sentence_log_probs = tf.reshape( next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]]) next_sentence_predictions = tf.argmax( next_sentence_log_probs, axis=-1, output_type=tf.int32) next_sentence_labels = tf.reshape(next_sentence_labels, [-1]) next_sentence_accuracy = tf.metrics.accuracy( labels=next_sentence_labels, predictions=next_sentence_predictions)
tensorflow.argmax
302
import tensorflow as tf f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_int_feature([feature.label_id]) features["is_real_example"] = create_int_feature( [int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64),
tensorflow.train.Features
303
import tensorflow as tf with tf.Graph().as_default(): initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale) with tf.name_scope("Train"): train_input = PTBInput(config=config, data=train_data, name="TrainInput") with tf.variable_scope("Model", reuse=None, initializer=initializer): m = PTBModel(is_training=True, config=config, input_=train_input) tf.summary.scalar("Training Loss", m.cost)
tensorflow.name_scope
304
import tensorflow as tf """ idx_iter_beg = int(self.nb_iters_train * FLAGS.ws_iter_ratio_beg) idx_iter_end = int(self.nb_iters_train * FLAGS.ws_iter_ratio_end) base = tf.cast(self.global_step - idx_iter_beg, tf.float32) / (idx_iter_end - idx_iter_beg) base = tf.minimum(1.0, tf.maximum(0.0, base)) prune_ratio_dyn = prune_ratio_fnl * (1.0 - tf.pow(1.0 - base, FLAGS.ws_prune_ratio_exp)) return prune_ratio_dyn def __calc_grads_pruned(self, grads_origin): """Calculate the mask-pruned gradients.
tensorflow.pow
305
import tensorflow as tf # image_size, # num_channels # ) centered_grouped_image = tf.subtract( x=grouped_image, y=grouped_mean, name="centered_grouped_image" )
tensorflow.subtract
306
from tensorflow.python.training import moving_averages """Builds the exponential moving average update ops.""" update_mean_op = moving_averages.assign_moving_average( variable=self._moving_mean, value=mean, decay=self._decay_rate, name="update_moving_mean").op update_variance_op = moving_averages.assign_moving_average( variable=self._moving_variance, value=variance, decay=self._decay_rate, name="update_moving_variance").op return update_mean_op, update_variance_op
tensorflow.python.training.moving_averages.assign_moving_average
307
import tensorflow as tf 'The nearest distance of the crop border to al keypoints.') tf.app.flags.DEFINE_integer( 'train_epochs', 50, 'The number of epochs to use for training.') tf.app.flags.DEFINE_integer( 'epochs_per_eval', 20, 'The number of training epochs to run between evaluations.') tf.app.flags.DEFINE_integer( 'batch_size', 10, 'Batch size for training and evaluation.') tf.app.flags.DEFINE_integer( 'xt_batch_size', 10, 'Batch size for training and evaluation.') tf.app.flags.DEFINE_boolean(
tensorflow.app.flags.DEFINE_integer
308
import tensorflow as tf with tf.variable_scope('conv1_x'): print('Building unit: conv1') self.conv1 = self._conv('conv1', self.x_preprocessed, padding= [[0,0],[3,3],[3,3],[0,0]], num_filters=64, kernel_size=(7, 7), stride=(2, 2), l2_strength=self.wd, bias=self.bias) self.conv1 = self._bn('bn1', self.conv1) self.conv1 = self._relu('relu1', self.conv1) _debug(self.conv1) self.conv1= tf.pad(self.conv1, tf.constant([[0,0],[1,1],[1,1],[0,0]]), "CONSTANT") self.conv1 = tf.nn.max_pool(self.conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID', name='max_pool1') _debug(self.conv1) print('conv1-shape: ' + str(self.conv1.shape.as_list())) with tf.variable_scope('conv2_x'): self.conv2 = self._residual_block('conv2_1', self.conv1, 64) _debug(self.conv2) self.conv2 = self._residual_block('conv2_2', self.conv2, 64) _debug(self.conv2)
tensorflow.constant
309
import tensorflow as tf def _conv_nonzero(): # Gather patches. p = tf.gather_nd(x_, blk_indices_) # Reshape patches. p = tf.reshape(p, [blk_shape[0], blk_shape[1], blk_shape[2], -1]) # Convolution on patches. q = tf.nn.conv2d(p, w, strides, 'VALID', use_cudnn_on_gpu=True) # Paste convolution results. q_shape = tf.shape(q) def _strides_gt_one(): # Calculate output indices when strides > 1. blk_indices_crop = tf.strided_slice(blk_indices, [0, 0, 0, 0], [ blk_shape[0], q_shape[1] * strides[1], q_shape[2] * strides[2], 3 ], strides) blk_indices_crop = blk_indices_crop // tf.stack([1, strides[1], strides[2]]) return blk_indices_crop def _strides_one():
tensorflow.shape
310
import tensorflow as tf global_step = tf.get_variable("global_step", [], dtype=tf.int32, initializer=tf.constant_initializer(0), trainable=False) # Loss value reg_item = tf.contrib.layers.l1_l2_regularizer(L1_reg, L2_reg) reg_term = tf.contrib.layers.apply_regularization(reg_item, self.nnweights) loss_fun = self._negative_log_likelihood(y_, y) loss = loss_fun + reg_term # SGD Optimizer if optimizer == 'sgd': lr = tf.train.exponential_decay( learning_rate,
tensorflow.contrib.layers.apply_regularization
311
import tensorflow as tf else: graph_def.ParseFromString(f.read()) with graph.as_default(): tf.import_graph_def(graph_def, name='') tf.io.write_graph(graph_def, '/tmp/', 'optimized_graph.pb',as_text=False) return graph
tensorflow.io.write_graph
312
import tensorflow as tf [1, 0]], dtype=tf.float32) masks = tf.stack([mask0, mask1, mask2, mask3, mask4, mask5])
tensorflow.stack
313
from tensorflow.python.framework import constant_op label = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32) return features, label def _ranking_train_input_fn(): features = { "a.f1": constant_op.constant([[3.], [0.3], [1.]]), "a.f2": constant_op.constant([[0.1], [3.], [1.]]), "b.f1": constant_op.constant([[13.], [0.4], [5.]]), "b.f2": constant_op.constant([[1.], [3.], [0.01]]), } label = constant_op.constant([[0], [0], [1]], dtype=dtypes.int32) return features, label def _eval_input_fn(): features = {"x": constant_op.constant([[1.], [2.], [2.]])} label = constant_op.constant([[0], [1], [1]], dtype=dtypes.int32) return features, label def _infer_ranking_train_input_fn():
tensorflow.python.framework.constant_op.constant
314
import tensorflow as tf tf.summary.scalar('policy_loss', policy_loss) tf.summary.scalar('qf1_loss', qf1_loss) tf.summary.scalar('qf2_loss', qf2_loss) tf.summary.scalar('value_loss', value_loss) tf.summary.scalar("Imitation_loss",self.actor_loss_di) tf.summary.scalar('entropy', self.entropy) tf.summary.scalar('importance weight',tf.reduce_mean(self.weight_ph)) if ent_coef_loss is not None: tf.summary.scalar('ent_coef_loss', ent_coef_loss) tf.summary.scalar('ent_coef', self.ent_coef) tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate_ph)) # Retrieve parameters that must be saved self.params = tf_util.get_trainable_vars("model") self.target_params = tf_util.get_trainable_vars("target/values_fn/vf") # Initialize Variables and target network
tensorflow.summary.scalar
315
import tensorflow as tf data = self.dropout_layer(data) data = self.layer_normalization_layer(data) with tf.variable_scope("task_dependent"): logits = self.dense_layer(data, num_tags) crf_params = tf.get_variable("crf", [num_tags, num_tags], dtype=tf.float32)
tensorflow.variable_scope
316
import tensorflow as tf x = _conv( 'conv2', x, ksize_list[1], _stride_arr(1, data_format), padding, data_format=data_format) with tf.variable_scope('sub3'): x = _batch_norm('bn3', x, is_training, data_format) x = _relu('relu3', x) x = _conv( 'conv3', x, ksize_list[2], _stride_arr(1, data_format),
tensorflow.variable_scope
317
import tensorflow as tf with tf.variable_scope(target_modality.name): new_features["targets"] = target_modality.targets_bottom_sharded( new_targets, dp) with tf.variable_scope("body"): body_outputs, losses = model.model_fn_sharded(new_features) if not isinstance(losses, dict): # If it's a single extra loss.
tensorflow.variable_scope
318
import tensorflow as tf return initial_learning_rate + ( maximal_learning_rate - initial_learning_rate ) * tf.maximum(tf.cast(0, dtype), (1 - x)) * self.scale_fn(mode_step) def get_config(self):
tensorflow.cast
319
import tensorflow as tf with tf.variable_scope("lstm"): self.w_lstm = [] for layer_id in range(self.lstm_num_layers): with tf.variable_scope("layer_{}".format(layer_id)): w = tf.get_variable("w", [2 * self.lstm_size, 4 * self.lstm_size]) self.w_lstm.append(w) self.g_emb = tf.get_variable("g_emb", [1, self.lstm_size]) with tf.variable_scope("emb"): self.w_emb = tf.get_variable("w", [self.num_branches, self.lstm_size]) with tf.variable_scope("softmax"): self.w_soft = tf.get_variable("w", [self.lstm_size, self.num_branches]) b_init = np.array([10.0, 10.0] + [0] * (self.num_branches - 2), dtype=np.float32) self.b_soft = tf.get_variable( "b", [1, self.num_branches], initializer=tf.constant_initializer(b_init)) b_soft_no_learn = np.array( [0.25, 0.25] + [-0.25] * (self.num_branches - 2), dtype=np.float32) b_soft_no_learn = np.reshape(b_soft_no_learn, [1, self.num_branches]) self.b_soft_no_learn = tf.constant(b_soft_no_learn, dtype=tf.float32)
tensorflow.get_variable
320
import tensorflow as tf # // --- Generate a toyMC sample from composite PDF --- # RooDataSet *data = sum.generate(mes,2000) ; def sum_pdf(mes, nsig, sigmean, sigwidth, nbkg, m0, argpar, mes_low, mes_high): add = tf.add(nsig * gaussian_pdf(mes, sigmean, sigwidth), nbkg * argus_pdf_phalf_WN(mes, m0, argpar, mes_low, mes_high), name="sum_pdf") return tf.div(add, nsig + nbkg, name="sum_pdf_normalized") # data in RooFit genereren en importeren # draai dit in ROOT: # data.write("roofit_demo_random_data_values.dat"); # om het weer in te lezen: # RooDataSet *data; # data->RooDataSet.read("roofit_demo_random_data_values.dat", RooArgList(mes))
tensorflow.div
321
import tensorflow as tf wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0)) c, h = tf.split(axis=1, num_or_size_splits=2, value=s) for idx, (x, m) in enumerate(zip(xs, ms)): c = c*(1-m) h = h*(1-m) z = tf.matmul(x, wx) + tf.matmul(h, wh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o) u = tf.tanh(u) c = f*c + i*u h = o*tf.tanh(c) xs[idx] = h s = tf.concat(axis=1, values=[c, h]) return xs, s def _ln(x, g, b, e=1e-5, axes=[1]): u, s = tf.nn.moments(x, axes=axes, keep_dims=True) x = (x-u)/tf.sqrt(s+e) x = x*g+b return x
tensorflow.tanh
322
import tensorflow as tf if gpu_idx == 0: G_means = tf.reduce_mean(self.end_points_G['softmax'], 0, keep_dims=True) G_vars = tf.reduce_mean(tf.square(self.end_points_G['softmax'] - G_means), 0, keep_dims=True) G = tf.Print( self.end_points_G['softmax'], [tf.reduce_mean(G_means), tf.reduce_mean(G_vars)], "generator mean and average var", first_n=1) inputs_means = tf.reduce_mean(inputs, 0, keep_dims=True) inputs_vars = tf.reduce_mean(tf.square(inputs - inputs_means), 0, keep_dims=True) inputs = tf.Print( inputs, [tf.reduce_mean(inputs_means), tf.reduce_mean(inputs_vars)], "image mean and average var", first_n=1)
tensorflow.reduce_mean
323
from tensorflow.python.framework import op_def_library as _op_def_library """ result = _op_def_lib.apply_op("UnpackPath", path=path, path_values=path_values, name=name) return result def _InitOpDefLibrary(): op_list = _op_def_pb2.OpList() _text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list) _op_def_registry.register_op_list(op_list) op_def_lib = _op_def_library.OpDefLibrary() op_def_lib.add_op_list(op_list) return op_def_lib _InitOpDefLibrary.op_list_ascii = """op { name: "HardRoutingFunction" input_arg { name: "input_data" type: DT_FLOAT
tensorflow.python.framework.op_def_library.OpDefLibrary
324
import tensorflow as tf states = self.states dxt_list = tf.gradients(self.error, states) #dxt_list[0] = tf.Print(dxt_list[0], [dxt_list[0]], "dxt 0: ") test = tf.gradients(states[0], states[-1]) dxt = tf.stack(dxt_list) xt = tf.stack(states) num = (1 - self.alpha) * dxt + tf.tensordot(self.alpha * dxt , tf.transpose( tf.matmul(tf.abs(self.W_rec) * self.rec_Connectivity,self.Dale_rec)), axes=1) * \ tf.where(tf.greater(xt, 0), tf.ones_like(xt), tf.zeros_like(xt)) denom = dxt # sum over hidden units num = tf.reduce_sum(tf.square(num), axis=2) denom = tf.reduce_sum(tf.square(denom), axis=2) bounded = tf.where(tf.greater(denom, 1e-20), tf.div(num, 1.0 * denom), tf.ones_like(num)) nelems = tf.reduce_mean(tf.where(tf.greater(denom, 1e-20), 1.0 * tf.ones_like(num), 1.0 * tf.zeros_like(num)), axis=1)
tensorflow.abs
325
import tensorflow as tf lr = tf.Variable(0.0, trainable=False) self._lr = lr self._lr_summary = tf.summary.scalar('learning_rate', self._lr) tvars = tf.trainable_variables() grads = tf.gradients(avg_neg_log_lhood, tvars) if grad_clip > 0.0: grads, _ = tf.clip_by_global_norm(grads, grad_clip) if opt == 'sgd': optimizer = tf.train.GradientDescentOptimizer(lr) else: raise NotImplementedError()
tensorflow.clip_by_global_norm
326
import tensorflow as tf l1 = tf.contrib.layers.l1_regularizer(alpha)(val) l2 = tf.contrib.layers.l2_regularizer(alpha)(val) A = [[0.8, 0.6, 0.3], [0.1, 0.6, 0.4]] B = [1, 1] top_k = tf.nn.top_k(A, 2) in_top_k = tf.nn.in_top_k(A, B, 1) sess.run(tf.global_variables_initializer()) print(f'\nl1={sess.run(l1)} l2={sess.run(l2)}') a = np.array([1, 2, 3], dtype=np.float32) tf_v = tf.Variable(5, dtype=tf.float32) sess.run(tf.global_variables_initializer())
tensorflow.global_variables_initializer
327
import tensorflow as tf self.EPS_LEN = 100000 # GPU setup os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, device_count={'GPU': gpu}) config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = 0.5
tensorflow.ConfigProto
328
import tensorflow as tf import numpy as np import tensorflow as tf from evaluation import factory def write_summary(logs, summary_writer, current_step): """Write out summaries of current training step for the checkpoint.""" with tf.Graph().as_default(): summaries = [tf.Summary.Value(tag=tag, simple_value=value) for tag, value in logs.items()] tf_summary = tf.Summary(value=summaries) summary_writer.add_summary(tf_summary, current_step) class TpuExecutor(object): """An executor class for running jobs on TPUs.""" def __init__(self, model_fn, params): self._model_dir = params.model_dir # Sets up evaluator. self._evaluator = factory.evaluator_generator(params.eval)
tensorflow.Summary
329
import tensorflow as tf """ with tf.name_scope(scope):
tensorflow.name_scope
330
import tensorflow.contrib as contrib if cross_stitch_enabled: with tf.variable_scope("cross_stitch_2"): stitch2_1, stitch2_2 = apply_cross_stitch(fc2_1, fc2_2) else: stitch2_1, stitch2_2 = fc2_1, fc2_2 dropout2_1 = contrib.layers.dropout(stitch2_1, keep_prob=keep_prob, is_training=is_training, scope="dropout2_1") dropout2_2 = contrib.layers.dropout(stitch2_2, keep_prob=keep_prob, is_training=is_training, scope="dropout2_2") fc3_1 = contrib.layers.fully_connected(dropout2_1, 32, scope="fc3_1")
tensorflow.contrib.layers.dropout
331
import tensorflow as tf # Performance tuning flags. tf.flags.DEFINE_boolean('winograd_nonfused', True, """Enable/disable using the Winograd non-fused algorithms.""") tf.flags.DEFINE_boolean('sync_on_finish', False, """Enable/disable whether the devices are synced after each step.""") tf.flags.DEFINE_boolean('staged_vars', False, """whether the variables are staged from the main
tensorflow.flags.DEFINE_boolean
332
import tensorflow as tf pi_loaded.append(load_pi_ckpt(pi_ckpt_path, agent)) return pi_loaded def create_default_writer_and_save_dir(root_dir): """Creates default directories.""" base_dir = osp.expanduser(root_dir) if not tf.io.gfile.exists(base_dir): tf.io.gfile.makedirs(base_dir) tag = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') tb_logdir = osp.join(base_dir, tag, 'tb') save_dir = osp.join(base_dir, tag, 'train') tf.io.gfile.makedirs(tb_logdir) tf.io.gfile.makedirs(save_dir)
tensorflow.io.gfile.exists
333
from tensorflow.python.ops import math_ops or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ with variable_scope.variable_scope(name, 'mean', [values, weights]): values = math_ops.to_float(values) total = _create_local('total', shape=[]) count = _create_local('count', shape=[])
tensorflow.python.ops.math_ops.to_float
334
import tensorflow as tf Returns ------- A Keras variable, filled with `1.0`. """ if dtype is None: dtype = tf.float32 shape = tuple(map(int, shape)) return tf.Variable( tf.constant_initializer(1., dtype=dtype)(shape), dtype, name) def cast_to_floatx(x): """Cast a Numpy array to the default Keras float type. Parameters ----------
tensorflow.constant_initializer
335
import tensorflow as tf def global_avg_pool(input_data, output_length=1, padding='VALID', scope='gloval_avg_pool'): input_dims = input_data.get_shape().as_list() assert (len(input_dims) == 4) # batch_size, height, width, num_channels_in num_channels_in = input_dims[-1] height = input_dims[1] width = input_dims[2] with tf.variable_scope(scope): if output_length == 1: pool = tf.nn.avg_pool(input_data, [1, height, width, 1], strides=[1, 1, 1, 1], padding=padding) pool = tf.reduce_mean(pool, axis=[1, 2]) pool = tf.squeeze(pool, axis=[1, 2]) return pool else: if num_channels_in != output_length:
tensorflow.variable_scope
336
import tensorflow as tf with tf.control_dependencies([save_image_op]): pred_x, pred_y = pred_x * 1., pred_y * 1. return pred_x, pred_y def gaussian_blur(inputs, inputs_filters, sigma, data_format, name=None): with tf.name_scope(name, "gaussian_blur", [inputs]): data_format_ = 'NHWC' if data_format=='channels_last' else 'NCHW' if data_format_ == 'NHWC': inputs = tf.transpose(inputs, [0, 2, 3, 1]) ksize = int(6 * sigma + 1.)
tensorflow.name_scope
337
import tensorflow as tf weights=is_real_example) return {"pred": concat1, "label_ids": concat2, "pearson": pearson, "MSE": mse, "eval_loss": loss,} elif task_name == "cola": def metric_fn(per_example_loss, label_ids, logits, is_real_example): """Compute Matthew's correlations for STS-B.""" predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) # https://en.wikipedia.org/wiki/Matthews_correlation_coefficient tp, tp_op = tf.metrics.true_positives( predictions, label_ids, weights=is_real_example) tn, tn_op = tf.metrics.true_negatives( predictions, label_ids, weights=is_real_example) fp, fp_op = tf.metrics.false_positives( predictions, label_ids, weights=is_real_example) fn, fn_op = tf.metrics.false_negatives( predictions, label_ids, weights=is_real_example)
tensorflow.metrics.true_positives
338
import tensorflow as tf # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % (example.guid)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) tf.logging.info("label: %s (id = %d)" % (example.label, label_id)) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids,
tensorflow.logging.info
339
import tensorflow as tf with argscope([Conv2D, FullyConnected], nl=tf.nn.relu): with tf.variable_scope('STN1'): sampled1 = get_stn(image) with tf.variable_scope('STN2'): sampled2 = get_stn(image) # For visualization in tensorboard with tf.name_scope('visualization'): padded1 = tf.pad(sampled1, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]]) padded2 = tf.pad(sampled2, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]]) img_orig = tf.concat([image[:, :, :, 0], image[:, :, :, 1]], 1) # b x 2h x w transform1 = tf.concat([padded1[:, :, :, 0], padded1[:, :, :, 1]], 1) transform2 = tf.concat([padded2[:, :, :, 0], padded2[:, :, :, 1]], 1) stacked = tf.concat([img_orig, transform1, transform2], 2, 'viz') tf.summary.image('visualize', tf.expand_dims(stacked, -1), max_outputs=30) sampled = tf.concat([sampled1, sampled2], 3, 'sampled_concat') logits = (LinearWrap(sampled) .FullyConnected('fc1', out_dim=256, nl=tf.nn.relu)
tensorflow.concat
340
import tensorflow as tf # instead. output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1)
tensorflow.zeros_initializer
341
import tensorflow as tf # There are two ways to obtain the final prediction results: (1) bilinear # upsampling the logits followed by argmax, or (2) argmax followed by # nearest neighbor upsampling. The second option may introduce the "blocking # effect" but is computationally efficient. if model_options.prediction_with_upsampled_logits: logits = _resize_bilinear(logits, #tf.shape(images)[1:3], tf.TensorShape([512,512]), scales_to_logits[MERGED_LOGITS_SCOPE].dtype) predictions[output] = tf.argmax(logits, 3, output_type=tf.dtypes.int32) #predictions[output + PROB_SUFFIX] = tf.nn.softmax(logits) else: argmax_results = tf.argmax(logits, 3, output_type=tf.dtypes.int32) argmax_results = tf.image.resize_nearest_neighbor(
tensorflow.TensorShape
342
import tensorflow as tf learnrate = tf.placeholder(tf.float32) def getinputs(path): filename_queue=tf.train.string_input_producer([path]) reader=tf.TFRecordReader() _,serialized_example=reader.read(filename_queue)
tensorflow.train.string_input_producer
343
import tensorflow as tf mask = tf.pad(mask_, [[0,0],[32,32],[32,32],[0,0]]) mask2__ = tf.ones([FLAGS.batch_size,78,78,3]) mask2_ = tf.pad(mask2__, [[0,0],[25,25],[25,25],[0,0]]) mask2 = mask2_ - mask pred_annotation, logits = inference((1-mask)*image + mask*255, keep_probability,z) tf.summary.image("input_image", image, max_outputs=2) tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2) tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2) # loss0 = tf.reduce_mean(tf.abs(z)) loss = tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square((image - logits)),[1,2,3]))) # loss2 = tf.reduce_mean(tf.square((image - logits)*mask2)) # loss = loss1 + loss2 + loss0
tensorflow.summary.image
344
import tensorflow as tf return gtboxes_and_label_h[:int(num_objects), :].astype(np.float32), \ gtboxes_and_label_r[:int(num_objects), :].astype(np.float32) def main(self): with tf.Graph().as_default() as graph, tf.device('/cpu:0'): num_gpu = len(cfgs.GPU_GROUP.strip().split(',')) global_step = slim.get_or_create_global_step() lr = self.warmup_lr(cfgs.LR, global_step, cfgs.WARM_SETP, num_gpu) tf.summary.scalar('lr', lr) optimizer = tf.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM) r3det_dcl = build_whole_network.DetectionNetworkR3DetDCL(cfgs=self.cfgs, is_training=True) with tf.name_scope('get_batch'): if cfgs.IMAGE_PYRAMID: shortside_len_list = tf.constant(cfgs.IMG_SHORT_SIDE_LEN) shortside_len = tf.random_shuffle(shortside_len_list)[0] else: shortside_len = cfgs.IMG_SHORT_SIDE_LEN img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch, img_h_batch, img_w_batch = \ self.reader.next_batch(dataset_name=cfgs.DATASET_NAME, batch_size=cfgs.BATCH_SIZE * num_gpu, shortside_len=shortside_len, is_training=True) # data processing
tensorflow.name_scope
345
import tensorflow as tf opt_momentum = knobs['opt_momentum'] # Momentum optimizer momentum grad_clip_norm = knobs['grad_clip_norm'] # L2 norm to clip gradients by # Compute learning rate, gradients tf_trainable_vars = tf.trainable_variables() lr = self._get_learning_rate(step, **knobs) grads = tf.gradients(loss, tf_trainable_vars) self._mark_for_monitoring('lr', lr)
tensorflow.trainable_variables
346
import tensorflow as tf return outputs def highwaynet(inputs, num_units=None, scope="highwaynet"): if not num_units: num_units = inputs.get_shape()[-1] with tf.variable_scope(scope): H = tf.layers.dense(inputs, units=num_units, activation=tf.nn.relu, name="dense1") T = tf.layers.dense( inputs, units=num_units, activation=tf.nn.sigmoid, bias_initializer=tf.constant_initializer(-1.0), name="dense2", ) outputs = H * T + inputs * (1.0 - T) return outputs def conv1d_banks(inputs, K=16, is_training=True, scope="conv1d_banks"): with tf.variable_scope(scope): outputs = tf.layers.conv1d(inputs, embed_size // 2, 1, padding="SAME") for k in range(2, K + 1): with tf.variable_scope("num_{}".format(k)):
tensorflow.constant_initializer
347
from tensorflow.core.protobuf import meta_graph_pb2 v0 = tf.Variable(10.0, name="v0") # Creates a saver. save = tf.train.Saver({"v0": v0}) # Generates MetaGraphDef. meta_graph_def = meta_graph_pb2.MetaGraphDef() # Verifies that collection with unsupported key will not be added. tf.add_to_collection(save, 3)
tensorflow.core.protobuf.meta_graph_pb2.MetaGraphDef
348
import tensorflow as tf tf.set_random_seed(1) # defines the seed of the random number generator
tensorflow.set_random_seed
349
import tensorflow as tf return x def read_and_decode(filename_queue, canvas_size, preemph=0.): reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example( serialized_example, features={ 'wav_raw': tf.FixedLenFeature([], tf.string), 'noisy_raw': tf.FixedLenFeature([], tf.string), }) wave = tf.decode_raw(features['wav_raw'], tf.int32) wave.set_shape(canvas_size) wave = (2./65535.) * tf.cast((wave - 32767), tf.float32) + 1. noisy = tf.decode_raw(features['noisy_raw'], tf.int32) noisy.set_shape(canvas_size) noisy = (2./65535.) * tf.cast((noisy - 32767), tf.float32) + 1.
tensorflow.FixedLenFeature
350
import tensorflow as tf key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(scores) * (-2 ** 32 + 1)
tensorflow.ones_like
351
import tensorflow as tf act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable function to select and action given observation. ` See the top of the file for details. """ if param_noise_filter_func is None: param_noise_filter_func = default_param_noise_filter with tf.variable_scope(scope, reuse=reuse): observations_ph = U.ensure_tf_input(make_obs_ph("observation")) stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold") update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale") reset_ph = tf.placeholder(tf.bool, (), name="reset")
tensorflow.variable_scope
352
import tensorflow as tf m.a: 0.001 >>> print("m.b: {:.3f}".format(np.asscalar(m.b.value))) m.b: 1.000 """ X_key = X if isinstance(X, tf.Tensor) else None Y_key = Y if isinstance(Y, tf.Tensor) else None key = ("_Model__loss", X_key, Y_key) if key not in self.cache: X_tensor = (X if isinstance(X, tf.Tensor) else tf.placeholder(tf.as_dtype(X.dtype))) Y_tensor = (Y if isinstance(Y, tf.Tensor) else tf.placeholder(tf.as_dtype(Y.dtype))) self.cache[key] = (self._compile_loss(X_tensor, Y_tensor), X_tensor, Y_tensor) loss, X_tensor, Y_tensor = self.cache[key] feed_dict = self.feed_dict if not isinstance(X, tf.Tensor): feed_dict[X_tensor] = X if not isinstance(Y, tf.Tensor): feed_dict[Y_tensor] = Y variables = [p.free_state for p in self.params if not p.fixed] variables = utils.unique(variables) free_state = tf.concat(0, [tf.reshape(v, [-1]) for v in variables])
tensorflow.as_dtype
353
import tensorflow as tf ) tf.summary.scalar('learning_rate', learning_rate) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops), tf.variable_scope('optimizer'): optimizer = tf.train.AdamOptimizer(learning_rate) grads_and_vars = optimizer.compute_gradients(total_loss) train_op = optimizer.apply_gradients(grads_and_vars, global_step) for g, v in grads_and_vars: tf.summary.histogram(v.name[:-2] + '_hist', v) tf.summary.histogram(v.name[:-2] + '_grad_hist', g) with tf.control_dependencies([train_op]), tf.name_scope('ema'): ema = tf.train.ExponentialMovingAverage(decay=MOVING_AVERAGE_DECAY, num_updates=global_step) train_op = ema.apply(tf.trainable_variables()) return tf.estimator.EstimatorSpec(mode, loss=total_loss, train_op=train_op) def add_weight_decay(weight_decay): """Add L2 regularization to all (or some) trainable kernel weights.""" weight_decay = tf.constant( weight_decay, tf.float32, [], 'weight_decay' ) trainable_vars = tf.trainable_variables() kernels = [
tensorflow.train.ExponentialMovingAverage
354
import tensorflow as tf tf.app.flags.DEFINE_float('alpha', 10, 'Predictive reconstruction loss weight') tf.app.flags.DEFINE_float('beta', 0.0005, 'Reconstruction from noisy data loss weight') tf.app.flags.DEFINE_float('epsilon', 0.000001, 'Diameter of epsilon sphere comparing to distance to a neighbour. <= 0.5') tf.app.flags.DEFINE_float('gamma', 50., 'Loss weight for large distances') tf.app.flags.DEFINE_float('distance', 0.01, 'Maximum allowed interpoint distance') tf.app.flags.DEFINE_float('delta', 1., 'Loss weight for stacked objective') tf.app.flags.DEFINE_string('comment', '', 'Comment to leave by the model') tf.app.flags.DEFINE_float('test_max', 10000, 'max number of examples in the test set') tf.app.flags.DEFINE_integer('max_epochs', 0, 'Train for at most this number of epochs') tf.app.flags.DEFINE_integer('save_every', 250, 'Save model state every INT epochs') tf.app.flags.DEFINE_integer('eval_every', 25, 'Save encoding and visualizations every') tf.app.flags.DEFINE_integer('visualiza_max', 10, 'Max pairs to show on visualization') tf.app.flags.DEFINE_boolean('load_state', True, 'Load state if possible ') tf.app.flags.DEFINE_boolean('kill_depth', False, 'Ignore depth information') tf.app.flags.DEFINE_boolean('dev', False, 'Indicate development mode') tf.app.flags.DEFINE_integer('batch_size', 128, 'Batch size') tf.app.flags.DEFINE_float('learning_rate', 0.0001, 'Create visualization of ') tf.app.flags.DEFINE_float('blur', 5.0, 'Max sigma value for Gaussian blur applied to training set') tf.app.flags.DEFINE_boolean('new_blur', False, 'Use data augmentation as blur info') tf.app.flags.DEFINE_integer('blur_decrease', 10000, 'Decrease image blur every X steps') FLAGS = tf.app.flags.FLAGS slim = tf.contrib.slim
tensorflow.app.flags.DEFINE_integer
355
import tensorflow as tf tf.summary.scalar("model/entropy", entropy / bs) tf.summary.image("model/state", pi.x)
tensorflow.summary.image
356
import tensorflow as tf input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) else: is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32) is_training = (mode == tf.estimator.ModeKeys.TRAIN)
tensorflow.cast
357
from tensorflow.python.platform import tf_logging as logging @property def best_value(self): """Returns the best early stopping metric value found so far.""" return self._best_value def every_n_step_end(self, step, outputs): super(ValidationMonitor, self).every_n_step_end(step, outputs) # TODO(mdan): The use of step below is probably misleading. # The code should probably use the step from the checkpoint, because # that's what is being evaluated. if self._estimator is None: raise ValueError("Missing call to set_estimator.") # Check that we are not running evaluation on the same checkpoint. latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir) if latest_path is None: logging.debug("Skipping evaluation since model has not been saved yet " "at step %d.", step) return False if latest_path is not None and latest_path == self._latest_path: logging.debug("Skipping evaluation due to same checkpoint %s for step %d " "as for step %d.", latest_path, step, self._latest_path_step) return False self._latest_path = latest_path self._latest_path_step = step # Run evaluation and log it. validation_outputs = self._estimator.evaluate( x=self.x, y=self.y, input_fn=self.input_fn, batch_size=self.batch_size, steps=self.eval_steps, metrics=self.metrics, name=self.name)
tensorflow.python.platform.tf_logging.debug
358
import tensorflow as tf # Test that previous-feeding model ignores inputs after the first. dec_inp2 = [tf.constant(0, tf.int32, shape=[2]) for _ in range(3)] with tf.variable_scope("other"): d3, _ = tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp2, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, feed_previous=tf.constant(True)) sess.run([tf.global_variables_initializer()]) tf.get_variable_scope().reuse_variables() d1, _ = tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, feed_previous=True) d2, _ = tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp2, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, feed_previous=True) res1 = sess.run(d1) res2 = sess.run(d2)
tensorflow.get_variable_scope
359
import tensorflow as tf tail_init = dense_maxnorm(tail_init, self.maxnorm) self.head_embedding_vars = tf.Variable(head_init) self.rel_embedding_vars = tf.Variable(rel_init) self.tail_embedding_vars = tf.Variable(tail_init)
tensorflow.Variable
360
import tensorflow as tf with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True)
tensorflow.nn.dropout
361
import tensorflow as tf output_shape=shapes, strides=self.strides, padding='SAME', data_format='NHWC') mu,var = tf.nn.moments(t,axes=[0,1,2]) std = tf.sqrt(var+self.epsilon) return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)] require_init = tf.reduce_any(tf.is_nan(self.g)) init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b]) with tf.control_dependencies(init_ops): w = tf.reshape(self.g,[1,1,tf.shape(self.v)[2],1]) * tf.nn.l2_normalize(self.v,axis=[0,1,3]) return tf.nn.bias_add( tf.nn.conv2d_transpose(input_var,w, output_shape=shapes, strides=self.strides, padding='SAME', data_format='NHWC'), self.b,data_format='NHWC',name=name) def get_variables(self): #TODO: self.v should be l2-normalized or not? / currently not. return {'v':self.v,'b':self.b,'g':self.g}
tensorflow.nn.l2_normalize
362
import tensorflow as tf def train(loss, global_step): """Train eccentricity model. Create an optimizer and apply to all trainable variables. Args: total_loss: Total loss from loss(). global_step: Integer Variable counting the number of training steps processed. Returns: train_op: op for training. """ # Compute gradients. tf.scalar_summary(loss.op.name, loss) optimizer = tf.train.AdagradOptimizer(FLAGS.learning_rate) # Use the optimizer to apply the gradients that minimize the loss # (and also increment the global step counter) as a single training step. train_op = optimizer.minimize(loss, global_step=global_step) return train_op return train_op
tensorflow.scalar_summary
363
import tensorflow.contrib as contrib else: stitch2_1, stitch2_2 = fc2_1, fc2_2 dropout2_1 = contrib.layers.dropout(stitch2_1, keep_prob=keep_prob, is_training=is_training, scope="dropout2_1") dropout2_2 = contrib.layers.dropout(stitch2_2, keep_prob=keep_prob, is_training=is_training, scope="dropout2_2") fc3_1 = contrib.layers.fully_connected(dropout2_1, 32, scope="fc3_1") fc3_2 = contrib.layers.fully_connected(dropout2_2, 32, scope="fc3_2") if cross_stitch_enabled: with tf.variable_scope("cross_stitch_3"): stitch3_1, stitch3_2 = apply_cross_stitch(fc3_1, fc3_2) else: stitch3_1, stitch3_2 = fc3_1, fc3_2 dropout3_1 = contrib.layers.dropout(stitch3_1, keep_prob=keep_prob, is_training=is_training,
tensorflow.contrib.layers.fully_connected
364
from tensorflow.python.framework import ops if input_shape is not None: return [tensor_shape.TensorShape(input_shape.tolist())] else: return [tensor_shape.unknown_shape(ndims=4)] @ops.RegisterShape("MaxPoolGrad") @ops.RegisterShape("MaxPoolGradWithArgmax") def _MaxPoolGradShape(op): """Shape function for the MaxPoolGrad op.""" orig_input_shape = op.inputs[0].get_shape().with_rank(4) return [orig_input_shape]
tensorflow.python.framework.ops.RegisterShape
365
import tensorflow as tf with tf.variable_scope(scope): shape = tf.shape(inputs) dim = inputs.get_shape().as_list()[-1] out_shape = [shape[idx] for idx in range( len(inputs.get_shape().as_list()) - 1)] + [hidden] flat_inputs = tf.reshape(inputs, [-1, dim]) W = tf.get_variable("W", [dim, hidden]) res = tf.matmul(flat_inputs, W) if use_bias: b = tf.get_variable( "b", [hidden], initializer=tf.constant_initializer(0.)) res = tf.nn.bias_add(res, b)
tensorflow.get_variable
366
import tensorflow as tf tf.summary.histogram("mel_targets %d" % i, model.tower_linear_targets[i]) tf.summary.scalar("regularization_loss", model.regularization_loss) tf.summary.scalar("stop_token_loss", model.stop_token_loss) tf.summary.scalar("loss", model.loss) tf.summary.scalar("learning_rate", model.learning_rate) # Control learning rate decay speed
tensorflow.summary.scalar
367
import tensorflow as tf self.assertAllEqual(augmented_tensor_dict[fields.InputDataFields.image], np_image + 1) self.assertAllEqual( augmented_tensor_dict[fields.InputDataFields.groundtruth_classes], [[0, 0, 0, 1], [0, 1, 0, 0]]) def test_applies_data_augmentation_fn_before_model_preprocess_fn(self): np_image = np.random.randint(256, size=(4, 4, 3)) tensor_dict = { fields.InputDataFields.image: tf.constant(np_image), fields.InputDataFields.groundtruth_classes: tf.constant(np.array([3, 1], np.int32)) } def mul_two_model_preprocessor_fn(image): return (image * 2, tf.expand_dims(tf.shape(image)[1:], axis=0)) def add_five_to_image_data_augmentation_fn(tensor_dict): tensor_dict[fields.InputDataFields.image] += 5 return tensor_dict
tensorflow.constant
368
import tensorflow as tf # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.get_variable( "output_bias", shape=[bert_config.vocab_size], initializer=tf.zeros_initializer(), ) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32 )
tensorflow.nn.log_softmax
369
import tensorflow as tf input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) else: is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32) is_training = (mode == tf.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = tf.trainable_variables()
tensorflow.shape
370
import tensorflow as tf with tf.variable_scope(layer_name): w = tf.get_variable(name='weight', trainable=is_pretrain, shape=[kernel_size[0],kernel_size[1],kernel_size[2],in_channels,out_channels], initializer=tf.contrib.layers.xavier_initializer()) b = tf.get_variable(name='bias', trainable=is_pretrain, shape=[out_channels], initializer=tf.contrib.layers.xavier_initializer()) x = tf.nn.conv3d(x, w, strides=strides, padding='SAME', data_format=data_format, name='conv3d') x = tf.nn.bias_add(x, b, name='bias_add') x = tf.nn.relu(x, name='relu') return x def conv(layer_name, x, out_channels, kernel_size=[3,3], strides=[1,1,1,1], is_pretrain=True): ''' Convolution op wrapper, use RELU activation after convolution Args: layer_name:
tensorflow.nn.bias_add
371
import tensorflow as tf self.drop1 = tf.layers.dropout(self.conv1, self.config.cifar10_cnn["keep_prob"], training=self.train) self.pool1 = tf.layers.max_pooling2d(self.drop1, 2, 2) self.conv2 = tf.layers.conv2d(self.pool1, self.config.cifar10_cnn["num_filters"], self.config.cifar10_cnn["filter_size"],
tensorflow.layers.conv2d
372
import tensorflow as tf # Restores from MetaGraphDef. new_saver = tf.train.import_meta_graph(filename) # Generates a new MetaGraphDef. new_meta_graph_def = new_saver.export_meta_graph() # It should be the same as the original. self.assertProtoEquals(meta_graph_def, new_meta_graph_def) def _testGraphExtensionSave(self): test_dir = self._TestDir("graph_extension") filename = os.path.join(test_dir, "metafile") saver0_ckpt = os.path.join(test_dir, "saver0.ckpt") with self.test_session(graph=tf.Graph()) as sess: # Creates an inference graph. # Hidden 1 images = tf.constant(1.2, tf.float32, shape=[100, 28]) with tf.name_scope("hidden1"): weights = tf.Variable( tf.truncated_normal([28, 128], stddev=1.0 / math.sqrt(float(28))), name="weights") biases = tf.Variable(tf.zeros([128]), name="biases") hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases) # Hidden 2 with tf.name_scope("hidden2"): weights = tf.Variable( tf.truncated_normal([128, 32], stddev=1.0 / math.sqrt(float(128))),
tensorflow.constant
373
import tensorflow as tf Returns: coverage_loss: scalar """ coverage = tf.zeros_like(attn_dists[0]) # shape (batch_size, attn_length). Initial coverage is zero. covlosses = [] # Coverage loss per decoder timestep. Will be list length max_dec_steps containing shape (batch_size). for a in attn_dists:
tensorflow.zeros_like
374
import tensorflow as tf hidden = tf.layers.dense(inputs=_input, units=self.vf_hidden_size, activation=tf.nn.elu) w = tf.get_variable("weights", (self.vf_hidden_size, 1)) return tf.matmul(hidden, w) def build_loss(self): cutoff_vf_manager = tf.reshape(tf.stop_gradient(self.manager_vf), [-1]) dot = tf.reduce_sum(tf.multiply(self.s_diff, self.g), axis=1) gcut = tf.stop_gradient(self.g) mag = tf.norm(self.s_diff, axis=1) * tf.norm(gcut, axis=1) + .0001 dcos = dot / mag manager_loss = -tf.reduce_sum((self.r - cutoff_vf_manager) * dcos) cutoff_vf_worker = tf.reshape(tf.stop_gradient(self.worker_vf), [-1]) log_p = tf.reduce_sum(self.log_pi * self.ac, [1]) worker_loss = (self.r + self.alpha * self.ri - cutoff_vf_worker) * log_p worker_loss = -tf.reduce_sum(worker_loss, axis=0) Am = self.r - self.manager_vf manager_vf_loss = .5 * tf.reduce_sum(tf.square(Am))
tensorflow.norm
375
import tensorflow as tf sample_masks = 1. * tf.cast(sample, tf.float32) / num_choices sample_log_prob = tf.reduce_mean(dist.log_prob(sample)) return (dist_entropy, sample_masks, sample_log_prob) def get_loss_weights(name=None): """Returns the weight for loss.""" with tf.variable_scope(name, 'rl_op_selection'): logits = tf.get_variable( name='loss_logits_rl_w', initializer=tf.initializers.zeros(), shape=[ FLAGS.num_choices, ],
tensorflow.variable_scope
376
import tensorflow as tf return tf.estimator.EstimatorSpec( mode, loss=total_loss, eval_metric_ops=eval_metric_ops ) assert mode == tf.estimator.ModeKeys.TRAIN with tf.variable_scope('learning_rate'): global_step = tf.train.get_global_step() learning_rate = tf.train.cosine_decay( params['initial_learning_rate'], global_step, decay_steps=params['num_steps'] ) tf.summary.scalar('learning_rate', learning_rate) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops), tf.variable_scope('optimizer'): optimizer = tf.train.AdamOptimizer(learning_rate) grads_and_vars = optimizer.compute_gradients(total_loss) train_op = optimizer.apply_gradients(grads_and_vars, global_step) for g, v in grads_and_vars: tf.summary.histogram(v.name[:-2] + '_hist', v) tf.summary.histogram(v.name[:-2] + '_grad_hist', g)
tensorflow.summary.scalar
377
import tensorflow as tf self.g_dim, step_size=tf.shape(self.obs)[:1]) g_hat = self.manager_lstm.output self.g = tf.nn.l2_normalize(g_hat, dim=1) self.manager_vf = self.build_value(g_hat) def build_worker(self): with tf.variable_scope('worker'): num_acts = self.act_space # Calculate U self.worker_lstm = SingleStepLSTM(tf.expand_dims(self.z, [0]), size=num_acts * self.k, step_size=tf.shape(self.obs)[:1]) flat_logits = self.worker_lstm.output self.worker_vf = self.build_value(flat_logits) U = tf.reshape(flat_logits, [-1, num_acts, self.k]) # Calculate w cut_g = tf.stop_gradient(self.g) cut_g = tf.expand_dims(cut_g, [1])
tensorflow.expand_dims
378
import tensorflow as tf def build_anet(self, state_in, name, reuse=False, batch_size=64): reg = None with tf.variable_scope(name, reuse=reuse): layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) lstm_a = tf.nn.rnn_cell.LSTMCell(num_units=256) lstm_a = tf.nn.rnn_cell.DropoutWrapper(lstm_a, output_keep_prob=self.keep_prob) state_init_a = lstm_a.zero_state(batch_size=batch_size, dtype=tf.float32) lstm_ain = tf.expand_dims(layer_a2, axis=1) out_a, state_final_a = tf.nn.dynamic_rnn(cell=lstm_a, inputs=lstm_ain, initial_state=state_init_a) cell_out_a = tf.reshape(out_a, [-1, 256]) mu = tf.layers.dense(cell_out_a, self.a_dim, tf.nn.tanh, kernel_regularizer=reg) sigma = tf.layers.dense(cell_out_a, self.a_dim, tf.nn.softplus, kernel_regularizer=reg) # sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5)) sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params, state_init_a, state_final_a
tensorflow.nn.dynamic_rnn
379
import tensorflow as tf val = save.save(sess, save_path, global_step=global_step_int) expected_save_path = "%s-%d" % (save_path, global_step_int) self.assertEqual(expected_save_path, val) class SaveRestoreShardedTest(tf.test.TestCase): def testBasics(self): save_path = os.path.join(self.get_temp_dir(), "sharded") # Build a graph with 2 parameter nodes on different devices. with tf.Session( target="", config=tf.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v0 = tf.Variable(10, name="v0") with sess.graph.device("/cpu:1"): v1 = tf.Variable(20, name="v1") save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True) tf.initialize_all_variables().run() val = save.save(sess, save_path) self.assertEqual(save_path + "-?????-of-00002", val) meta_graph_filename = save._MetaGraphFilename(val) self.assertEqual(save_path + ".meta", meta_graph_filename) # Restore a different "v0" from shard 0 of the saved files. with tf.Session( target="", config=tf.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"):
tensorflow.Variable
380
import tensorflow as tf self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual(2, len(res[0])) self.assertEqual((2, 2), res[0][0].c.shape) self.assertEqual((2, 2), res[0][0].h.shape) self.assertEqual((2, 2), res[0][1].c.shape) self.assertEqual((2, 2), res[0][1].h.shape) def testEmbeddingAttentionDecoder(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): inp = [tf.constant(0.5, shape=[2, 2])] * 2 cell = tf.nn.rnn_cell.GRUCell(2) enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32) attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs]) dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)] dec, mem = tf.nn.seq2seq.embedding_attention_decoder( dec_inp, enc_state, attn_states, cell, num_symbols=4, embedding_size=2, output_size=3) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res))
tensorflow.constant
381
import tensorflow as tf if tf_util.is_image(obs_phs[0]): tf.summary.image('observation', obs_phs[0])
tensorflow.summary.image
382
import tensorflow as tf class ObjectDetector: def __init__(self, model_path='./model', label_file='./model/label.names', num_classes=2, score_threshold=0.5, image_sz=(416, 416, 3)): self._model_path = model_path self._label_file = label_file self._num_classes = num_classes self._score_threshold = score_threshold self._image_sz = image_sz[0:2] self._config = ConfigProto() self._config.gpu_options.allow_growth = True self._graph = tf.Graph() with self._graph.as_default(): self._sess = tf.Session(config=self._config) tf.saved_model.load( self._sess, [tag_constants.SERVING], self._model_path) self._image_tensor = self._sess.graph.get_tensor_by_name( 'serving_default_input_1:0') self._output_tensor = self._sess.graph.get_tensor_by_name( 'StatefulPartitionedCall:0')
tensorflow.Graph
383
import tensorflow as tf if FLAGS.do_serve: def serving_input_fn(): with tf.variable_scope("foo"): feature_spec = { "input_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), "input_mask": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64), } serialized_tf_example = tf.placeholder(dtype=tf.string, shape=[None], name='input_example_tensor') receiver_tensors = {'examples': serialized_tf_example} features = tf.parse_example(serialized_tf_example, feature_spec) return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
tensorflow.FixedLenFeature
384
import tensorflow as tf i0, x, l1_h2, l2_h2, l3_h2 ] returned = tf.while_loop( self.condition, self.full, loop_vars=elems, back_prop=True, swap_memory=False)
tensorflow.while_loop
385
import tensorflow as tf return tuple(restored) def import_ops(self): if self._is_training: self._train_op = tf.get_collection_ref('train_op')[0] self._lr = tf.get_collection_ref('lr')[0] self._new_lr = tf.get_collection_ref('new_lr')[0] self._lr_update = tf.get_collection_ref('lr_update')[0] rnn_params = tf.get_collection_ref('rnn_params') if self._cell and rnn_params: params_saveable = tf.contrib.cudnn_rnn.RNNParamsSaveable(
tensorflow.get_collection_ref
386
import tensorflow as tf self.logger.info("applying optimize %s" % self.optim_type) trainable_vars = tf.trainable_variables() if self.config.clip_weight:
tensorflow.trainable_variables
387
import tensorflow as tf self.generated_image = tflib.convert_images_to_uint8(self.generator_output, nchw_to_nhwc=True, uint8_cast=False) self.generated_image_uint8 = tf.saturate_cast(self.generated_image, tf.uint8)
tensorflow.saturate_cast
388
import tensorflow as tf # define train_op gen_optimizer = tf.train.RMSPropOptimizer(learning_rate=0.05) dis_optimizer = tf.train.RMSPropOptimizer(learning_rate=0.05) # wrapper to make the optimizer work with TPUs if params['use_tpu']: gen_optimizer = tf.contrib.tpu.CrossShardOptimizer(gen_optimizer) dis_optimizer = tf.contrib.tpu.CrossShardOptimizer(dis_optimizer) gan_train_ops = tf.contrib.gan.gan_train_ops(gan_model, gan_loss, gen_optimizer, dis_optimizer) while_loop = tf.contrib.tpu.while_loop if params['use_tpu'] else tf.while_loop # train the discriminator 100 steps inputs = [tf.constant(0), tf.constant(0.0)] cond = lambda i, x: tf.less(i, 100) def body(i, x): return tf.add(i, 1), gan_train_ops.discriminator_train_op
tensorflow.contrib.gan.gan_train_ops
389
import tensorflow as tf flags.mark_flag_as_required("input_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") tf.app.run()
tensorflow.app.run
390
import tensorflow as tf Actor-Critics """ def mlp_actor_critic(x, a, hidden_sizes=(400,300), activation=tf.nn.relu, output_activation=tf.tanh, action_space=None, dropout_rate=0, nn_type='mlp_variational'): act_dim = a.shape.as_list()[-1] act_limit = action_space.high[0] if nn_type == 'mlp': with tf.variable_scope('pi'): pi = act_limit * mlp(x, list(hidden_sizes) + [act_dim], activation, output_activation) with tf.variable_scope('q1'): q1 = tf.squeeze(mlp(tf.concat([x, a], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) with tf.variable_scope('q2'): q2 = tf.squeeze(mlp(tf.concat([x, a], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) with tf.variable_scope('q1', reuse=True): q1_pi = tf.squeeze(mlp(tf.concat([x, pi], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1)
tensorflow.variable_scope
391
import tensorflow as tf n_filters = sum(f[1] for f in filters) max_chars = cnn_options['max_characters_per_token'] char_embed_dim = cnn_options['embedding']['dim'] n_chars = cnn_options['n_characters'] if n_chars != 262: raise InvalidNumberOfCharacters( "Set n_characters=262 after training see the README.md" ) if cnn_options['activation'] == 'tanh': activation = tf.nn.tanh elif cnn_options['activation'] == 'relu': activation = tf.nn.relu # the character embeddings with tf.device("/cpu:0"): self.embedding_weights = tf.get_variable( "char_embed", [n_chars, char_embed_dim], dtype=DTYPE, initializer=tf.random_uniform_initializer(-1.0, 1.0) ) # shape (batch_size, unroll_steps, max_chars, embed_dim) self.char_embedding = tf.nn.embedding_lookup(self.embedding_weights, self.ids_placeholder) # the convolutions def make_convolutions(inp): with tf.variable_scope('CNN') as scope: convolutions = []
tensorflow.device
392
import tensorflow as tf num_out_blocks = tf.size(unused_indices) # Select only unused blocks with tf.variable_scope('select'): stacked_blocks = tf.stack(cell_inputs + blocks) out_blocks = tf.gather(stacked_blocks, unused_indices, axis=0) out_blocks = tf.transpose(out_blocks, (1, 2, 3, 0, 4)) # Combine to constant channels with tf.variable_scope('combine'): W = self._make_var('W', (ni, block_ch * block_ch)) W = tf.gather(W, unused_indices, axis=0)
tensorflow.transpose
393
import tensorflow as tf return tf.nn.batch_normalization(inp, moving_mean, moving_variance, offset, scale, 0.01, name='norm') def pool(inp, name, kind, size, stride, padding='SAME'): assert kind in ['max', 'avg'] strides = [1, stride, stride, 1] sizes = [1, size, size, 1] with tf.variable_scope(name): if kind == 'max': out = tf.nn.max_pool(inp, sizes, strides=strides, padding=padding, name=kind) else: out = tf.nn.avg_pool(inp, sizes, strides=strides, padding=padding, name=kind) return out def ResNet18(inp, phase, num_outputs=1000, alpha=0.0): def residual_block(inp, phase, alpha=0.0,nom='a',increase_dim=False,last=False): input_num_filters = inp.get_shape().as_list()[3] if increase_dim: first_stride = [1, 2, 2, 1]
tensorflow.nn.max_pool
394
import tensorflow as tf def singel_instance(x): cur_passage_words = x[0] # [passage_length] cur_phrase_starts = x[1] # [phrase_length] cur_vocab_dist = x[2] # [vsize] cur_attn_dist = x[3] # [passage_length] # first: get the first word for each phrase first_words = tf.gather(cur_passage_words, cur_phrase_starts) # [phrase_length] # second: get the probs for each word first_word_probs = tf.gather(cur_vocab_dist, first_words) # [phrase_length] return cur_attn_dist + first_word_probs elems = (in_passage_words, phrase_starts, vocab_dist, attn_dist) return tf.map_fn(singel_instance, elems, dtype=tf.float32) # [batch_size, phrase_length] class CovCopyAttenGen: def __init__(self, placeholders, options, vocab): self.options = options self.vocab = vocab self.cell = tf.contrib.rnn.LSTMCell( options.gen_hidden_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=113), state_is_tuple=True)
tensorflow.map_fn
395
import tensorflow as tf # Dense NN dnn_output = rnn_output dnn_output_size = rnn_output_size if do_dnn: last_layer = rnn_output last_layer_size = rnn_output_size for i, layer_size in enumerate(dnn_sizes): layer_name = 'dnn_{}'.format(i) with tf.variable_scope(layer_name): dnn_w = tf.get_variable('W', shape=[last_layer_size, layer_size], initializer=dnn_init, dtype=dtype) dnn_b = tf.get_variable('b', shape=[layer_size], initializer=tf.constant_initializer(0.0), dtype=dtype) projected = tf.nn.bias_add(tf.matmul(last_layer, dnn_w), dnn_b) # TODO: argument nonlinearity, change bias to 0.1 if relu if dnn_nonlin == 'tanh': last_layer = tf.nn.tanh(projected) elif dnn_nonlin == 'sigmoid': last_layer = tf.nn.sigmoid(projected) elif dnn_nonlin == 'relu': last_layer = tf.nn.relu(projected) else: raise NotImplementedError()
tensorflow.constant_initializer
396
import tensorflow as tf progress = networks.compute_progress( current_image_id_ph, stable_stage_num_images, transition_stage_num_images, num_blocks=3) x = tf.random_normal([2, 16, 16, 3]) logits, _ = networks.discriminator( x, progress, _num_filters_stub, networks.ResolutionSchedule( start_resolutions=(4, 4), scale_base=2, num_resolutions=3)) fake_loss = tf.reduce_sum(tf.square(logits)) grad_norms = [ _get_grad_norm( fake_loss, tf.trainable_variables('.*/progressive_gan_block_1/.*')), _get_grad_norm( fake_loss, tf.trainable_variables('.*/progressive_gan_block_2/.*')), _get_grad_norm( fake_loss, tf.trainable_variables('.*/progressive_gan_block_3/.*')) ]
tensorflow.square
397
import tensorflow as tf def fc(input_data, out_dim, non_linear_fn=None, initial_value=None, use_bias=True, scope='fc'): with tf.variable_scope(scope): input_dims = input_data.get_shape().as_list() if len(input_dims) == 4: _, input_h, input_w, num_channels = input_dims in_dim = input_h * input_w * num_channels flat_input = tf.reshape(input_data, [-1, in_dim]) else: in_dim = input_dims[-1] flat_input = input_data if initial_value is None: fc_weight = tf.get_variable("weights", shape=[in_dim, out_dim], initializer=tf.random_normal_initializer(mean=0., stddev=0.01)) fc_bias = tf.get_variable("bias", shape=[out_dim], initializer=tf.constant_initializer(0.0)) else: fc_weight = tf.get_variable("weights", initializer=initial_value[0]) fc_bias = tf.get_variable("bias", shape=[out_dim], initializer=initial_value[1]) if use_bias: output = tf.add(tf.matmul(flat_input, fc_weight), fc_bias) else: output = tf.matmul(flat_input, fc_weight) if non_linear_fn is None: return output else: activation = non_linear_fn(output)
tensorflow.constant_initializer
398
import tensorflow as tf self.assertTrue(tf.contrib.util.constant_value(mvn.is_scalar_batch())) mvn = tfd.MultivariateNormalDiag([[mu]], [[sigma]], validate_args=True) self.assertFalse(tf.contrib.util.constant_value(mvn.is_scalar_event())) self.assertFalse(tf.contrib.util.constant_value(mvn.is_scalar_batch())) # We now test every codepath within the underlying is_scalar_helper # function. # Test case 1, 2. x = tf.placeholder_with_default(input=1, shape=[]) # None would fire an exception were it actually executed. self.assertTrue(normal._is_scalar_helper(x.shape, lambda: None)) self.assertTrue( normal._is_scalar_helper(tf.TensorShape(None), lambda: tf.shape(x))) x = tf.placeholder_with_default(input=[1], shape=[1]) # None would fire an exception were it actually executed. self.assertFalse(normal._is_scalar_helper(x.shape, lambda: None)) self.assertFalse( normal._is_scalar_helper(tf.TensorShape(None), lambda: tf.shape(x))) # There's no notion of partially known shapes in eager mode, so exit # early. if tf.executing_eagerly(): return # Test case 3.
tensorflow.TensorShape
399