seed
stringlengths
25
1.88k
seed_api
stringlengths
14
102
index
int64
0
1.05k
import tensorflow as tf return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) # Calculate loss, which includes softmax cross entropy and L2 regularization. cross_entropy = tf.cond(n_positives > 0., lambda: tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred), lambda: 0.) #cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred)
tensorflow.losses.sparse_softmax_cross_entropy
100
import tensorflow as tf 'zero_debias_moving_mean': True, 'fused': fused_batch_norm, } inputs.get_shape().assert_has_rank(2) if log(final_size, 2) != int(log(final_size, 2)): raise ValueError('`final_size` (%i) must be a power of 2.' % final_size) if final_size < 8: raise ValueError('`final_size` (%i) must be greater than 8.' % final_size) end_points = {} num_layers = int(log(final_size, 2)) - 1 with tf.compat.v1.variable_scope(scope, values=[inputs], reuse=reuse) as scope: with slim.arg_scope([normalizer_fn], **normalizer_fn_args): with slim.arg_scope([slim.conv2d_transpose], normalizer_fn=normalizer_fn, stride=2, kernel_size=4): net = tf.expand_dims(tf.expand_dims(inputs, 1), 1) # First upscaling is different because it takes the input vector. current_depth = depth * 2 ** (num_layers - 1) scope = 'deconv1' net = slim.conv2d_transpose(
tensorflow.compat.v1.variable_scope
101
from tensorflow.compat.v1 import ConfigProto, InteractiveSession import pickle from tensorflow.compat.v1 import ConfigProto, InteractiveSession import tensorflow as tf from speech_utils.ACRNN.tf.model_utils import train config = ConfigProto(log_device_placement=True) config.gpu_options.allow_growth = True session = tf.Session(config=config).as_default() def main(args): # Verify
tensorflow.compat.v1.ConfigProto
102
import tensorflow as tf entropy_bottleneck = EntropyBottleneck() conditional_entropy_model = SymmetricConditional() checkpoint = tf.train.Checkpoint(analysis_transform=analysis_transform, hyper_encoder=hyper_encoder, hyper_decoder=hyper_decoder,
tensorflow.train.Checkpoint
103
from tensorflow.python.ops import array_ops b_grads = b_module.bspmm(a_indices, a_values, a_shape, grad, adjoint_a=True, adjoint_b=False) bg_row=tf.shape(b_grads[0])[0] bg_col=tf.shape(b_grads[0])[1] b_grads = tf.reshape(b_grads, (numTensors * bg_row, bg_col)) if adj_b: b_grads = [array_ops.transpose(b_g) for b_g in b_grads] for t in range(numTensors): rows = a_indices[t][:, 0] cols = a_indices[t][:, 1] parts_a = array_ops.gather(grad[t], rows if not adj_a else cols) parts_b = array_ops.gather(b_list[t] if not adj_b else array_ops.transpose(b_list[t]), cols if not adj_a else rows) a_values_grads.append(math_ops.reduce_sum(parts_a * parts_b, reduction_indices=1)) return_val = [None for _ in range(numTensors)] + a_values_grads + [None for _ in range(numTensors)] + [b_grads] return tuple(return_val)
tensorflow.python.ops.array_ops.gather
104
from tensorflow.python.ops import check_ops `log_prob(x)`. If `validate_args` is `False` and the inputs are invalid, correct behavior is not guaranteed. allow_nan_stats: `Boolean`, default `True`. If `False`, raise an exception if a statistic (e.g. mean/mode/etc...) is undefined for any batch member. If `True`, batch members with valid parameters leading to undefined statistics will return NaN for this statistic. name: The name to prepend to all ops created by this distribution. Raises: TypeError: if `alpha` and `beta` are different dtypes. """ parameters = locals() parameters.pop("self") with ops.name_scope(name, values=[alpha, beta]) as ns: with ops.control_dependencies([ check_ops.assert_positive(alpha), check_ops.assert_positive(beta), ] if validate_args else []): self._alpha = array_ops.identity(alpha, name="alpha") self._beta = array_ops.identity(beta, name="beta") super(InverseGamma, self).__init__( dtype=self._alpha.dtype, validate_args=validate_args, allow_nan_stats=allow_nan_stats, is_continuous=True, is_reparameterized=False, parameters=parameters, graph_parents=[self._alpha, self._beta], name=ns)
tensorflow.python.ops.check_ops.assert_positive
105
import tensorflow as tf validnum = tf.placeholder(tf.int32) learnrate = tf.placeholder(tf.float32) def getinputs(path): filename_queue=tf.train.string_input_producer([path]) reader=tf.TFRecordReader() _,serialized_example=reader.read(filename_queue) features=tf.parse_single_example(serialized_example, features={ 'label':tf.FixedLenFeature([], tf.int64), 'img_raw' : tf.FixedLenFeature([], tf.string), }) image=tf.decode_raw(features['img_raw'],tf.uint8) label=tf.cast(features['label'],tf.int32) image=tf.reshape(image,[4096,1]) return image,label def get_batch(image,label,batch_size,crop_size): #print(image.shape) #print(label.shape) images,labels=tf.train.shuffle_batch([image,label], batch_size=batch_size,num_threads=10,capacity=10000,min_after_dequeue=200) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size])
tensorflow.decode_raw
106
import tensorflow as tf def batch_norm(x, train, name, decay=0.99, epsilon=1e-5): shape = x.get_shape().as_list() with tf.variable_scope(name): beta = tf.get_variable('beta', [shape[-1]], initializer=tf.constant_initializer(0.)) gamma = tf.get_variable('gamma', [shape[-1]], initializer=tf.random_normal_initializer(1., 0.02)) pop_mean = tf.get_variable('pop_mean', [shape[-1]], initializer=tf.constant_initializer(0.), trainable=False) pop_var = tf.get_variable('pop_var', [shape[-1]], initializer=tf.constant_initializer(1.), trainable=False) if pop_mean not in tf.moving_average_variables(): tf.add_to_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES, pop_mean) tf.add_to_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES, pop_var) def func1(): # execute at training time batch_mean, batch_var = tf.nn.moments(x, range(len(shape) - 1)) update_mean = tf.assign_sub(pop_mean, (1 - decay)*(pop_mean - batch_mean)) update_var = tf.assign_sub(pop_var, (1 - decay)*(pop_var - batch_var))
tensorflow.moving_average_variables
107
from tensorflow.contrib.framework.python.ops import variables as contrib_variables def begin(self): self._last_step = None self._global_step_tensor = contrib_variables.get_global_step() for m in self._monitors:
tensorflow.contrib.framework.python.ops.variables.get_global_step
108
import tensorflow as tf actor.add_grad_to_graph(critic.a_grads) M = Memory(MEMORY_CAPACITY) saver = tf.train.Saver(max_to_keep=100) if LOAD_MODEL: all_ckpt = tf.train.get_checkpoint_state('./data', 'checkpoint').all_model_checkpoint_paths saver.restore(sess, all_ckpt[-1]) else: if os.path.isdir(DATA_PATH): shutil.rmtree(DATA_PATH) os.mkdir(DATA_PATH) sess.run(tf.global_variables_initializer()) if OUTPUT_GRAPH: tf.summary.FileWriter('logs', graph=sess.graph) var = 3 # control exploration var_min = 0.01 for i_episode in range(MAX_EPISODES): # s = (hull angle speed, angular velocity, horizontal speed, vertical speed, position of joints and joints angular speed, legs contact with ground, and 10 lidar rangefinder measurements.) s = env.reset() ep_r = 0 while True: if RENDER: env.render() a = actor.choose_action(s) a = np.clip(np.random.normal(a, var), -1, 1) # add randomness to action selection for exploration s_, r, done, _ = env.step(a) # r = total 300+ points up to the far end. If the robot falls, it gets -100.
tensorflow.summary.FileWriter
109
from tensorflow.python.ops import control_flow_ops [cell() for _ in range(num_layers)]) outputs, final_state = core_rnn.static_rnn( multi_cell, inputs, dtype=dtypes.float32) trainable_variables = ops.get_collection( ops.GraphKeys.TRAINABLE_VARIABLES) gradients = gradients_impl.gradients([outputs, final_state], trainable_variables) training_op = control_flow_ops.group(*gradients) self._BenchmarkOp(training_op, "tf_rnn_lstm_block_cell %s %s" % (config_name, self._GetConfigDesc(config))) if __name__ == "__main__": test.main()
tensorflow.python.ops.control_flow_ops.group
110
import tensorflow as tf if not no_moving_average: moving_mean = self._make_var('moving_mean', (in_ch,), trainable=False, init_constant=0) moving_variance = self._make_var('moving_variance', (in_ch,), trainable=False, init_constant=1) if is_train: # For training, do batch norm with batch mean & variance # Update moving averages if training (X, mean, variance) = tf.nn.fused_batch_norm(X, scale, offset, epsilon=epsilon, is_training=True) update_mean = moving_averages.assign_moving_average(moving_mean, mean, decay) update_variance = moving_averages.assign_moving_average(moving_variance, variance, decay) with tf.control_dependencies([update_mean, update_variance]): X = tf.identity(X) else: # For prediction, do batch norm with computed moving mean & variance from training # Don't update moving averages if predicting
tensorflow.nn.fused_batch_norm
111
from tensorflow.python.framework import ops with ops.name_scope( name, 'expand_and_tile', (tensor, multiple, dim)) as scope: # Sparse. if isinstance(tensor, ops.SparseTensorValue): tensor = ops.SparseTensor.from_value(tensor) if isinstance(tensor, ops.SparseTensor): if dim < 0: expand_dims = array_ops.reshape(
tensorflow.python.framework.ops.SparseTensor.from_value
112
import tensorflow as tf # run_config = tf.estimator.RunConfig( # experimental_distribute=tf.contrib.distribute.DistributeConfig( # train_distribute=distribution, # remote_cluster={ # 'worker': ['localhost:5000', 'localhost:5001'], # }, # ) # ) os.environ["TF_CONFIG"] = json.dumps( { "cluster": {"worker": worker}, "task": {"type": "worker", "index": task_index}, } ) strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() run_config = tf.estimator.RunConfig( save_summary_steps=1, train_distribute=strategy, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_ckpt_steps, log_step_count_steps=1, ) else: distribution = tf.contrib.distribute.MirroredStrategy( num_gpus=FLAGS.num_gpus ) run_config = tf.estimator.RunConfig(train_distribute=distribution)
tensorflow.distribute.experimental.MultiWorkerMirroredStrategy
113
from tensorflow.python.estimator.canned import head as head_lib regressor.export(self._export_dir_base) def testRankingDontThrowExceptionForForEstimator(self): learner_config = learner_pb2.LearnerConfig() learner_config.num_classes = 2 learner_config.constraints.max_tree_depth = 1 model_dir = tempfile.mkdtemp() config = run_config.RunConfig() head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS) model = estimator.GradientBoostedDecisionTreeRanker( head=head_fn, learner_config=learner_config, num_trees=1, examples_per_layer=3, model_dir=model_dir,
tensorflow.python.estimator.canned.head._binary_logistic_head_with_sigmoid_cross_entropy_loss
114
from tensorflow.python.framework import ops Raises: TypeError: If `x` cannot be cast to the `bfloat16`. """ return cast(x, types.bfloat16, name=name) ops.Tensor._override_operator("__neg__", neg) ops.Tensor._override_operator("__abs__", abs) # __invert__ corresponds to the ~ operator. Here we follow the numpy convention # ~ marks an elementwise bit-wise inverse. This is only implemented for boolean # tensors and will throw a TypeError if used on nonboolean arrays ops.Tensor._override_operator("__invert__", logical_not) def _OverrideBinaryOperatorHelper(func, op_name): """Register operators with different tensor and scalar versions. Args: func: the operator op_name: name of the operator being overridden """ def binary_op_wrapper(x, y):
tensorflow.python.framework.ops.Tensor._override_operator
115
import tensorflow as tf self.dataset = datasets.FlowersData(FLAGS.data_dir) else: raise ValueError('Unknown dataset. Must be one of imagenet or flowers.') self.local_parameter_device_flag = FLAGS.local_parameter_device if self.job_name: self.task_index = FLAGS.task_index self.cluster = tf.train.ClusterSpec({'ps': self.ps_hosts, 'worker': self.worker_hosts}) self.server = None if not self.server: self.server = tf.train.Server(self.cluster, job_name=self.job_name, task_index=self.task_index,
tensorflow.train.ClusterSpec
116
import tensorflow as tf )) assignments.append(tf.scatter_update(ref=self.terminal_memory, indices=indices, updates=terminal)) assignments.append(tf.scatter_update(ref=self.reward_memory, indices=indices, updates=reward)) # Add episode indices. with tf.control_dependencies(control_inputs=assignments): num_episodes = tf.count_nonzero(input_tensor=terminal, axis=0, dtype=util.tf_dtype('int')) assignment = tf.assign( ref=self.episode_indices[self.episode_count: self.episode_count + num_episodes], value=tf.boolean_mask(tensor=indices, mask=terminal) ) # Increment episode count. with tf.control_dependencies(control_inputs=(assignment,)): assignment = tf.assign_add(ref=self.episode_count, value=num_episodes) # Increment memory index. with tf.control_dependencies(control_inputs=(assignment,)): assignment = tf.assign( ref=self.episode_indices[-1], value=tf.where(self.memory_index + num_instances > self.capacity, self.episode_indices[self.episode_count - 1], self.capacity - 1) ) with tf.control_dependencies(control_inputs=(assignment,)): assignment = tf.assign(ref=self.memory_index, value=((self.memory_index + num_instances) % self.capacity)) with tf.control_dependencies(control_inputs=(assignment,)):
tensorflow.assign_add
117
import tensorflow as tf self.initialize_tf_vars() logger.log(self.sess.graph) self.has_setup = True self.setup_args = SimpleNamespace( sampler_cls=sampler_cls, sampler_args=sampler_args) def initialize_tf_vars(self): """Initialize all uninitialized variables in session.""" with tf.name_scope('initialize_tf_vars'): uninited_set = [ e.decode() for e in self.sess.run(tf.report_uninitialized_variables()) ] self.sess.run( tf.variables_initializer([ v for v in tf.global_variables() if v.name.split(':')[0] in uninited_set ])) def _start_worker(self): """Start Plotter and Sampler workers.""" self.sampler.start_worker() if self.plot: from garage.tf.plotter import Plotter
tensorflow.report_uninitialized_variables
118
import tensorflow as tf fname = os.path.join(tf.resource_loader.get_data_files_path(), 'samples/configs/' + model_name + '.config') label_map_path = os.path.join(tf.resource_loader.get_data_files_path(), 'data/pet_label_map.pbtxt') data_path = os.path.join(tf.resource_loader.get_data_files_path(), 'test_data/pets_examples.record') configs = config_util.get_configs_from_pipeline_file(fname) override_dict = {
tensorflow.resource_loader.get_data_files_path
119
import tensorflow as tf loss = tf.maximum(0., (tgt_larg - tgt_small) - (pred_larg - pred_small)) if hard_ratio < 1.0: hard_num = tf.cast(tools.shape(pred1)[0] * hard_ratio, tf.int32) loss = tf.reshape(loss, [-1]) hard_loss, _ = tf.math.top_k(loss, k=hard_num) return hard_loss return loss
tensorflow.math.top_k
120
import tensorflow as tf def get_valid_batch(image,label,batch_size): images,labels=tf.train.batch([image,label],batch_size=batch_size)
tensorflow.train.batch
121
from tensorflow.python.ops import array_ops # Accumulate the prediction to current confusion matrix. current_cm = confusion_matrix_ops.confusion_matrix( predictions, labels, num_classes, weights=weights, dtype=cm_dtype) update_op = state_ops.assign_add(total_cm, current_cm) def compute_mean_iou(name): """Compute the mean intersection-over-union via the confusion matrix.""" sum_over_row = math_ops.to_float(math_ops.reduce_sum(total_cm, 0)) sum_over_col = math_ops.to_float(math_ops.reduce_sum(total_cm, 1)) cm_diag = math_ops.to_float(array_ops.diag_part(total_cm)) denominator = sum_over_row + sum_over_col - cm_diag # If the value of the denominator is 0, set it to 1 to avoid # zero division. denominator = math_ops.select( math_ops.greater(denominator, 0), denominator, array_ops.ones_like(denominator))
tensorflow.python.ops.array_ops.diag_part
122
from tensorflow.contrib.layers.python.layers import feature_column_ops def _get_linear_feature_columns(self): if not self._linear_feature_columns: return None feature_column_ops.check_feature_columns(self._linear_feature_columns) return sorted(set(self._linear_feature_columns), key=lambda x: x.key) def _get_dnn_feature_columns(self): if not self._dnn_feature_columns: return None feature_column_ops.check_feature_columns(self._dnn_feature_columns) return sorted(set(self._dnn_feature_columns), key=lambda x: x.key) def _dnn_logits(self, features, is_training): return self._dnn_model.build_model( features, self._dnn_feature_columns, is_training) def _linear_logits(self, features, is_training): return self._linear_model.build_model( features, self._linear_feature_columns, is_training)
tensorflow.contrib.layers.python.layers.feature_column_ops.check_feature_columns
123
import tensorflow as tf self.saver = tf.train.Saver(tf.global_variables()) def _get_lstm_cell(self, config, is_training): if config.rnn_mode == BASIC: return tf.contrib.rnn.BasicLSTMCell( config.hidden_size, forget_bias=0., state_is_tuple=True, reuse=not is_training) if config.rnn_mode == BLOCK: return tf.contrib.rnn.LSTMBlockCell( config.hidden_size, forget_bias=0.) raise ValueError('rnn_mode {} not supported'.format(config.rnn_mode)) def _build_rnn_graph(self, inputs, config, is_training): def make_cell(): cell = self._get_lstm_cell(config, is_training)
tensorflow.contrib.rnn.LSTMBlockCell
124
from tensorflow.core.protobuf import queue_runner_pb2 tf.initialize_all_variables() # Creates a saver. save = tf.train.Saver({"v0": v0}) # Adds a set of collections. tf.add_to_collection("int_collection", 3) tf.add_to_collection("float_collection", 3.5) tf.add_to_collection("string_collection", "hello") tf.add_to_collection("variable_collection", v0) # Add QueueRunners. tf.train.add_queue_runner(qr) # Adds user_defined proto in three formats: string, bytes and Any. queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue") tf.add_to_collection("user_defined_string_collection", str(queue_runner)) tf.add_to_collection("user_defined_bytes_collection", queue_runner.SerializeToString()) any_buf = Any() any_buf.Pack(queue_runner) tf.add_to_collection("user_defined_any_collection", any_buf) # Generates MetaGraphDef. meta_graph_def = save.export_meta_graph(filename) self.assertTrue(meta_graph_def.HasField("saver_def"))
tensorflow.core.protobuf.queue_runner_pb2.QueueRunnerDef
125
import tensorflow as tf for key, value in zip(act_values_dict.keys(), act_values): act_values_dict[key] += value summary = tf.Summary() current_global_step = sess.run(global_step)
tensorflow.Summary
126
from tensorflow.python.ops import math_ops ``` entropy = alpha - log(beta) + log(Gamma(alpha)) + (1-alpha)digamma(alpha) ``` where digamma(alpha) is the digamma function.""") def _entropy(self): return (self.alpha + math_ops.log(self.beta) + math_ops.lgamma(self.alpha) - (1. + self.alpha) * math_ops.digamma(self.alpha)) @distribution_util.AppendDocstring( """The mean of an inverse gamma distribution is `beta / (alpha - 1)`, when `alpha > 1`, and `NaN` otherwise. If `self.allow_nan_stats` is `False`, an exception will be raised rather than returning `NaN`""") def _mean(self): mean = self.beta / (self.alpha - 1.) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) return array_ops.where(
tensorflow.python.ops.math_ops.digamma
127
import tensorflow as tf # make some fake noise data_size = 100 noise_tensor = tf.random_normal((data_size, INPUT_DIM)) real_data_tensor = tf.random_uniform((data_size, OUTPUT_DIM)) dataset = tf.data.Dataset.from_tensor_slices((noise_tensor, real_data_tensor)) dataset = dataset.repeat().shuffle(10) # TPUEstimator passes params when calling input_fn batch_size = params.get('train_batch_size', 16)
tensorflow.data.Dataset.from_tensor_slices
128
from tensorflow.python.ops import math_ops `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ is_below_threshold = math_ops.to_float(math_ops.less(values, threshold)) return streaming_mean(is_below_threshold, _mask_weights(ignore_mask, weights), metrics_collections,
tensorflow.python.ops.math_ops.less
129
import tensorflow as tf assert size[0] % 2 == 1 and size[1] % 2 == 1, "REFLECTION PAD ONLY WORKING FOR ODD FILTER SIZE.. " + str(size) pad_x = size[0] // 2 pad_y = size[1] // 2 input = tf.pad(input, [[0, 0], [pad_x, pad_x], [pad_y, pad_y], [0, 0]], "REFLECT") padding = "VALID" return tf.layers.conv2d(input, channels, kernel_size=size, strides=[stride, stride], padding=padding, kernel_initializer=init, name='conv' + id, use_bias=use_bias, dilation_rate=(dilation, dilation)) def z_conv(self, id, input, channels, size, stride=1, padding="SAME", use_bias=False, dilation=1): # zero mean conv
tensorflow.layers.conv2d
130
import tensorflow as tf self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1) self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1) losses = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits1, labels=self.y1) losses2 = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits2, labels=self.y2) self.loss = tf.reduce_mean(losses + losses2) if config.l2_norm is not None: variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) l2_loss = tf.contrib.layers.apply_regularization(regularizer, variables) self.loss += l2_loss if config.decay is not None: self.var_ema = tf.train.ExponentialMovingAverage(config.decay) ema_op = self.var_ema.apply(tf.trainable_variables()) with tf.control_dependencies([ema_op]): self.loss = tf.identity(self.loss) self.assign_vars = []
tensorflow.contrib.layers.apply_regularization
131
from tensorflow.contrib.eager.python.examples.revnet import config as config_ for grad, var in zip(grads, vars_): if grad is not None: self.assertEqual(grad.shape, var.shape) def test_training_graph(self): """Test model training in graph mode.""" with tf.Graph().as_default(): config = config_.get_hparams_cifar_38() config.add_hparam("n_classes", 10) config.add_hparam("dataset", "cifar-10") x = tf.random_normal( shape=(self.config.batch_size,) + self.config.input_shape) t = tf.random_uniform(
tensorflow.contrib.eager.python.examples.revnet.config.get_hparams_cifar_38
132
import tensorflow as tf for path in paths: spectrograms.append(np.load("spectrogram/" + path + ".npy")) if spectrograms[-1].shape[0] > max_x: max_x = spectrograms[-1].shape[0] return spectrograms, max_x # In[4]: tf.reset_default_graph() sess = tf.InteractiveSession() model = Model() sess.run(tf.global_variables_initializer()) # In[5]: for e in range(30): pbar = tqdm(range(0, len(text_files), batch_size), desc="minibatch loop") total_cost, total_acc = 0, 0
tensorflow.InteractiveSession
133
from tensorflow.python.ops import gen_nn_ops type `tf.float32`. ksize: A list of ints that has length >= 4. The size of the window for each dimension of the input tensor. strides: A list of ints that has length >= 4. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. data_format: A string. 'NHWC' and 'NCHW" are supported. name: Optional name for the operation. Returns: A `Tensor` with type `tf.float32`. The max pooled output tensor. """ with ops.op_scope([value], name, "MaxPool") as name: value = ops.convert_to_tensor(value, name="input") return gen_nn_ops._max_pool(value, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name) ops.RegisterShape("Relu")(common_shapes.unchanged_shape) ops.RegisterShape("Relu6")(common_shapes.unchanged_shape) ops.RegisterShape("Elu")(common_shapes.unchanged_shape) ops.RegisterShape("Softplus")(common_shapes.unchanged_shape) ops.RegisterShape("Softsign")(common_shapes.unchanged_shape) @ops.RegisterShape("ReluGrad")
tensorflow.python.ops.gen_nn_ops._max_pool
134
import tensorflow as tf round(FLAGS.train_batch_size * FLAGS.target_train_batch_multiplier)) finetune_data = tfds.load(name=FLAGS.target_dataset, split='train') finetune_data = finetune_data.shuffle(512).repeat().batch( target_train_batch_size) target_val_batch_size = int( round(FLAGS.train_batch_size * FLAGS.target_val_batch_multiplier)) target_data = tfds.load(name=FLAGS.target_dataset, split='validation') target_data = target_data.shuffle(512).repeat().batch(target_val_batch_size) dataset = tf.data.Dataset.zip((train_data, finetune_data, target_data)) dataset = dataset.map(_merge_datasets) dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE) return dataset max_train_steps = FLAGS.train_steps l2tl_classifier.train(make_input_dataset, max_steps=max_train_steps) if __name__ == '__main__': tf.logging.set_verbosity(tf.logging.INFO)
tensorflow.data.Dataset.zip
135
from tensorflow.python.ops import math_ops thresh_tiled) pred_is_neg = math_ops.logical_not(pred_is_pos) # Tile labels by number of thresholds label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1]) label_is_neg = math_ops.logical_not(label_is_pos) true_positives = _create_local('true_positives', shape=[num_thresholds]) false_negatives = _create_local('false_negatives', shape=[num_thresholds]) true_negatives = _create_local('true_negatives', shape=[num_thresholds]) false_positives = _create_local('false_positives', shape=[num_thresholds]) is_true_positive = math_ops.to_float( math_ops.logical_and(label_is_pos, pred_is_pos)) is_false_negative = math_ops.to_float( math_ops.logical_and(label_is_pos, pred_is_neg)) is_false_positive = math_ops.to_float( math_ops.logical_and(label_is_neg, pred_is_pos)) is_true_negative = math_ops.to_float( math_ops.logical_and(label_is_neg, pred_is_neg)) if weights is not None: weights = math_ops.to_float(weights) weights_tiled = array_ops.tile(array_ops.reshape( _broadcast_weights(weights, predictions), [1, -1]), [num_thresholds, 1]) thresh_tiled.get_shape().assert_is_compatible_with(
tensorflow.python.ops.math_ops.logical_and
136
from tensorflow.python.ops import gen_math_ops TypeError: If `x` cannot be cast to the `dtype`. """ with ops.op_scope([x], name, "Cast") as name: if isinstance(x, ops.SparseTensor): values_cast = cast(x.values, dtype, name=name) return ops.SparseTensor(x.indices, values_cast, x.shape) else: # TODO(touts): Handle what Josh said. # # Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that # allows some conversions that cast() can't do, e.g. casting numbers to # strings. x = ops.convert_to_tensor(x, name="x") if x.dtype.base_dtype == dtype: return x return gen_math_ops.cast(x, dtype, name=name) def to_float(x, name="ToFloat"): """Casts a tensor to type `float32`. Args: x: A `Tensor` or `SparseTensor`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`. Raises: TypeError: If `x` cannot be cast to the `float32`.
tensorflow.python.ops.gen_math_ops.cast
137
import tensorflow as tf Args: private_samples: a tensor of shape [num_samples, num_features]. shared_samples: a tensor of shape [num_samples, num_features]. weight: the weight of the incoherence loss. name: the name of the tf summary. """ with tf.name_scope(name): private_samples -= tf.reduce_mean(private_samples, 0) shared_samples -= tf.reduce_mean(shared_samples, 0) private_samples = tf.nn.l2_normalize(private_samples, 1) shared_samples = tf.nn.l2_normalize(shared_samples, 1) correlation_matrix = tf.matmul(private_samples, shared_samples, transpose_a=True) cost = tf.reduce_mean(tf.square(correlation_matrix)) * weight cost = tf.where(cost > 0, cost, 0, name='value') assert_op = tf.Assert(tf.is_finite(cost), [cost]) with tf.control_dependencies([assert_op]): barrier = tf.no_op(name)
tensorflow.nn.l2_normalize
138
import tensorflow as tf initializer=tf.constant_initializer(0), trainable=False) with tf.colocate_with(self.means): self.ema_means = tf.get_variable(
tensorflow.colocate_with
139
import tensorflow.contrib.slim as slim def get_gtboxes_and_label(self, gtboxes_and_label_h, gtboxes_and_label_r, num_objects): return gtboxes_and_label_h[:int(num_objects), :].astype(np.float32), \ gtboxes_and_label_r[:int(num_objects), :].astype(np.float32) def main(self): with tf.Graph().as_default() as graph, tf.device('/cpu:0'): num_gpu = len(cfgs.GPU_GROUP.strip().split(',')) global_step = slim.get_or_create_global_step() lr = self.warmup_lr(cfgs.LR, global_step, cfgs.WARM_SETP, num_gpu) tf.summary.scalar('lr', lr) optimizer = tf.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM) r3det_dcl = build_whole_network.DetectionNetworkR3DetDCL(cfgs=self.cfgs, is_training=True)
tensorflow.contrib.slim.get_or_create_global_step
140
import tensorflow as tf ) ) ''' with tf.train.MonitoredTrainingSession( checkpoint_dir=params.output, hooks=train_hooks, save_checkpoint_secs=None, config=config) as sess: while not sess.should_stop():
tensorflow.train.MonitoredTrainingSession
141
import tensorflow as tf if not white: q_mu = tf.matrix_triangular_solve(Luu, q_mu, lower=True) Luu_tiled = tf.tile(Luu[None, :, :], [num_func, 1, 1]) # remove line once issue 216 is fixed q_sqrt_r = tf.matrix_triangular_solve(Luu_tiled, q_sqrt_r, lower=True) Li_eKuf = tf.matrix_triangular_solve(Luu, eKuf, lower=True) # M x N fmean = tf.matmul(Li_eKuf, q_mu, transpose_a=True)
tensorflow.matrix_triangular_solve
142
import tensorflow as tf self.epsilon = epsilon self.axis = axis self.center=center self.scale=scale with tf.variable_scope(name) as scope: with tf.variable_scope('bn') : self.gamma= tf.get_variable('gamma',[dims], initializer=tf.constant_initializer(1.0)) self.beta = tf.get_variable('beta',[dims], initializer=tf.constant_initializer(0.0)) self.moving_mean = tf.get_variable('moving_mean',[dims], initializer=tf.constant_initializer(0.0), trainable=False) self.moving_variance = tf.get_variable('moving_variance',[dims], initializer=tf.constant_initializer(1.0), trainable=False) self.scope = scope def __call__(self,input_var,is_training,**xargs) : with tf.variable_scope(self.scope) : return tf.layers.batch_normalization( input_var, axis=self.axis, momentum=self.momentum, epsilon=self.epsilon, center=self.center, scale=self.scale, training=is_training, reuse=True, name='bn') """ ---Do NOT forget to add update_ops dependencies for your loss function.--- update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,tf.get_default_graph().get_name_scope()) #And, do not make any scope inside map_fn, since scope.name will not work...(it is corrupted by map_fn.)
tensorflow.layers.batch_normalization
143
import tensorflow as tf # use the TPU version of RunConfig config = tf.contrib.tpu.RunConfig(
tensorflow.contrib.tpu.RunConfig
144
import tensorflow as tf objectives.append((Objective(name, contra_loss, min, include, exclude))) elif name == 'reward' and config.r_loss == 'l2': pred = heads[name](features) l2_loss = tf.compat.v1.losses.mean_squared_error(target[name], pred) # l2_loss = tf.nn.l2_loss(pred - target[name]) objectives.append((Objective(name, l2_loss, min, include, exclude)))
tensorflow.compat.v1.losses.mean_squared_error
145
import tensorflow as tf tf.reshape(byte, shape=[]), 3, **JPEG_OPT) image = resize_shortest_edge(image, jpeg_shape, 224) image = center_crop(image, 224) return image image = tf.cond(is_bad, bad, good) # TODO other imgproc image = lighting(image, 0.1, eigval=np.array([0.2175, 0.0188, 0.0045], dtype='float32') * 255.0, eigvec=np.array([[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]], dtype='float32')) image = tf.image.random_flip_left_right(image) image = tf.reverse(image, axis=[2]) # to BGR return image return training_mapper if isTrain else validation_mapper """ ====== Model & Evaluation ======= """
tensorflow.image.random_flip_left_right
146
import tensorflow as tf else: fvar = ( (eKff - tf.trace(Li_eKuffu_Lit))[:, None] + tf.einsum("nij,dji->nd", Li_eKuffu_Lit, cov) + tf.einsum("ig,nij,jg->ng", q_mu, Li_eKuffu_Lit, q_mu) - fmean ** 2 + tf.matrix_diag_part(e_related_to_mean) ) return fmean, fvar
tensorflow.matrix_diag_part
147
from tensorflow.python.ops import variable_scope @contextlib.contextmanager def as_default(self): yield def create_eager_var_store(): if context.in_eager_mode(): return variable_scope.EagerVariableStore() else: return DummyVariableStore() def scheduled_sampling(hparams, problem_hparams, dp, sharded_logits, losses, sharded_features, transformed_features, model): """Scheduled sampling."""
tensorflow.python.ops.variable_scope.EagerVariableStore
148
import tensorflow as tf trg_len = tf.shape(attention_weights)[1] src_indices = tf.tile(tf.reshape(tf.range(src_len), shape=[1, 1, src_len]), [batch_size, trg_len, 1]) trg_indices = tf.tile(tf.reshape(tf.range(trg_len), shape=[1, trg_len, 1]), [batch_size, 1, src_len]) source_length = encoder_input_length[0] target_length = tf.to_int32(tf.reduce_sum(trg_mask, axis=1)) true_src_len = tf.reshape(source_length, shape=[batch_size, 1, 1]) - 1 true_trg_len = tf.reshape(target_length, shape=[batch_size, 1, 1]) - 1 src_mask = tf.to_float(tf.sequence_mask(source_length, maxlen=src_len)) mask = tf.matmul(tf.expand_dims(trg_mask, axis=2), tf.expand_dims(src_mask, axis=1)) monotonous = tf.sqrt(((true_trg_len * src_indices - true_src_len * trg_indices) ** 2) / (true_trg_len**2 + true_src_len**2)) monotonous = tf.to_float(monotonous < monotonicity_dist) non_monotonous = (1 - monotonous) * mask attn_loss = tf.reduce_sum(attention_weights * tf.stop_gradient(non_monotonous)) / tf.to_float(batch_size) if monotonicity_decay:
tensorflow.sequence_mask
149
import tensorflow as tf else: i_direction = 1 variable_scope_name = 'RNN_{0}/RNN/MultiRNNCell/Cell{1}'.format( i_direction, i) with tf.variable_scope(variable_scope_name): layer_output, final_state = tf.nn.dynamic_rnn( lstm_cell, layer_input, sequence_length=sequence_lengths, initial_state=tf.nn.rnn_cell.LSTMStateTuple( *batch_init_states), ) self.lstm_state_sizes[direction].append(lstm_cell.state_size) self.lstm_init_states[direction].append(init_states) self.lstm_final_states[direction].append(final_state) if direction == 'forward': self.lstm_outputs[direction].append(layer_output)
tensorflow.nn.rnn_cell.LSTMStateTuple
150
import tensorflow as tf images,labels=tf.train.batch([image,label],batch_size=batch_size) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) def get_valid_batch(image,label,batch_size): images,labels=tf.train.batch([image,label],batch_size=batch_size) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) class trainwork(object): def __init__(self): with tf.variable_scope('scop'): self.w1=tf.get_variable('w1', [4096,1024],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w2=tf.get_variable('w2', [1024,classnum],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.b1 = tf.get_variable('b1', [1024],initializer=tf.constant_initializer(0.0)) self.b2 = tf.get_variable('b2', [classnum],initializer=tf.constant_initializer(0.0)) def inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) out = tf.matmul(l1, self.w2)+self.b2
tensorflow.contrib.layers.xavier_initializer_conv2d
151
from tensorflow.core.framework import op_def_pb2 inputs: A list of (name, data type) pairs of function arguments. outputs: A list of (name, data type) pairs of function return values. """ self._sig = op_def_pb2.OpDef() self._sig.name = func_name
tensorflow.core.framework.op_def_pb2.OpDef
152
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten #removed GOAL_SIZE flat1b = Dense(units=RNN_SIZE-loc_layer_size)(flat1a) # FC layers for goal_pos input # goal_layer1 = Dense(units=GOAL_SIZE)(goal_pos) # goal_layer2 = Dense(units=GOAL_SIZE)(goal_layer1) # FC layers to find next location loc_layer1 = Dense(units=loc_layer_size)(prev_loc) loc_layer2 = Dense(units=loc_layer_size)(loc_layer1) # Concatenationation of above layers, followed by FC layer concat = tf.concat([flat1b, loc_layer2],1) # goal_layer2 h1 = Dense(units=RNN_SIZE)(concat) h2 = Dense(units=RNN_SIZE)(h1) self.h3 = tf.nn.relu(h2+concat) #Recurrent network for temporal dependencies
tensorflow.keras.layers.Dense
153
from tensorflow.contrib.distributions.python.ops import distribution_util @distribution_util.AppendDocstring( """Note: when `rate` is an integer, there are actually two modes: `rate` and `rate - 1`. In this case we return the larger, i.e., `rate`.""") def _mode(self): return math_ops.floor(self.rate) def _assert_valid_sample(self, x, check_integer=True): if not self.validate_args: return x dependencies = [check_ops.assert_non_negative(x)] if check_integer: dependencies += [distribution_util.assert_integer_form( x, message="x has non-integer components.")] return control_flow_ops.with_dependencies(dependencies, x)
tensorflow.contrib.distributions.python.ops.distribution_util.assert_integer_form
154
import tensorflow as tf neighbor, weight, _ = get_full_neighbor(nodes, hop_edge_types) next_nodes, next_idx = tf.unique(neighbor.values, out_idx=tf.int64) next_indices = tf.stack([neighbor.indices[:, 0], next_idx], 1) next_values = weight.values next_shape = tf.stack([tf.size(nodes), tf.size(next_nodes)]) next_shape = tf.cast(next_shape, tf.int64) next_adj = tf.SparseTensor(next_indices, next_values, next_shape) next_adj = tf.sparse_reorder(next_adj)
tensorflow.size
155
from tensorflow.python.framework import tensor_util x = ops.convert_to_tensor(x, name="x") def slice_shape(start_sum, size, name): """Closure to slice out shape.""" start_sum = start_sum if start_sum else ( array_ops.zeros((), dtype=dtypes.int32, name="zero"),) if (x.get_shape().ndims is not None and self._is_all_constant_helper(size, *start_sum)): start = sum(tensor_util.constant_value(s) for s in start_sum) stop = start + tensor_util.constant_value(size) slice_ = x.get_shape()[start:stop].as_list() if all(s is not None for s in slice_): return ops.convert_to_tensor(slice_, dtype=dtypes.int32, name=name) # Fall-through intended. return array_ops.slice(array_ops.shape(x), (sum(start_sum),), (size,))
tensorflow.python.framework.tensor_util.constant_value
156
from tensorflow.contrib.summary import summary_test_util dev_data = data.SnliData(fake_train_file, word2index) test_data = data.SnliData(fake_train_file, word2index) # 2. Create a fake config. config = _test_spinn_config( data.WORD_VECTOR_LEN, 4, logdir=os.path.join(self._temp_data_dir, "logdir")) # 3. Test training of a SPINN model. trainer = spinn.train_or_infer_spinn( embed, word2index, train_data, dev_data, test_data, config) # 4. Load train loss values from the summary files and verify that they # decrease with training. summary_file = glob.glob(os.path.join(config.logdir, "events.out.*"))[0] events = summary_test_util.events_from_file(summary_file) train_losses = [event.summary.value[0].simple_value for event in events if event.summary.value and event.summary.value[0].tag == "train/loss"] self.assertEqual(config.epochs, len(train_losses)) self.assertLess(train_losses[-1], train_losses[0]) # 5. Verify that checkpoints exist and contains all the expected variables. self.assertTrue(glob.glob(os.path.join(config.logdir, "ckpt*"))) ckpt_variable_names = [ item[0] for item in checkpoint_utils.list_variables(config.logdir)] self.assertIn("global_step", ckpt_variable_names) for v in trainer.variables: variable_name = v.name[:v.name.index(":")] if ":" in v.name else v.name self.assertIn(variable_name, ckpt_variable_names)
tensorflow.contrib.summary.summary_test_util.events_from_file
157
import tensorflow as tf with tf.Graph().as_default() as graph, tf.device('/cpu:0'): num_gpu = len(cfgs.GPU_GROUP.strip().split(',')) global_step = slim.get_or_create_global_step() lr = self.warmup_lr(cfgs.LR, global_step, cfgs.WARM_SETP, num_gpu) tf.summary.scalar('lr', lr) optimizer = tf.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM) r3det_gwd = build_whole_network.DetectionNetworkR3DetGWD(cfgs=self.cfgs, is_training=True) with tf.name_scope('get_batch'): if cfgs.IMAGE_PYRAMID: shortside_len_list = tf.constant(cfgs.IMG_SHORT_SIDE_LEN) shortside_len = tf.random_shuffle(shortside_len_list)[0] else: shortside_len = cfgs.IMG_SHORT_SIDE_LEN img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch, img_h_batch, img_w_batch = \ self.reader.next_batch(dataset_name=cfgs.DATASET_NAME, batch_size=cfgs.BATCH_SIZE * num_gpu, shortside_len=shortside_len, is_training=True) # data processing inputs_list = [] for i in range(num_gpu):
tensorflow.random_shuffle
158
import tensorflow as tf features[spec.name] = feature return tf.train.Example(features=tf.train.Features(feature=features)) def _input_fn_builder(self, input_file, is_training): """Creates an `input_fn` closure to be passed to TPUEstimator.""" def input_fn(params): """The actual input function.""" d = tf.data.TFRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) return d.apply( tf.contrib.data.map_and_batch( self._decode_tfrecord, batch_size=params["batch_size"], drop_remainder=True ) ) return input_fn def _decode_tfrecord(self, record): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, self._name_to_feature_config) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name, tensor in example.items():
tensorflow.contrib.data.map_and_batch
159
from tensorflow.python.summary import summary if grad_values is not None: var_name = variable.name.replace(":", "_") if "gradients" in summaries: summary.histogram("gradients/%s" % var_name, grad_values) if "gradient_norm" in summaries: summary.scalar("gradient_norm/%s" % var_name, clip_ops.global_norm([grad_values]))
tensorflow.python.summary.summary.histogram
160
from tensorflow.python.framework import ops grad, use_locking=self._use_locking).op def _apply_sparse(self, grad, var): delta = ops.IndexedSlices(grad.values * self._learning_rate_tensor, grad.indices, grad.dense_shape) return var.scatter_sub(delta, use_locking=self._use_locking)
tensorflow.python.framework.ops.IndexedSlices
161
from tensorflow.contrib.metrics.python.ops import confusion_matrix_ops labels = array_ops.reshape(labels, [-1]) weights = _mask_weights(ignore_mask, weights) if weights is not None: weights_rank = weights.get_shape().ndims if weights_rank > 1: weights = array_ops.reshape(weights, [-1]) # Accumulate the prediction to current confusion matrix. current_cm = confusion_matrix_ops.confusion_matrix( predictions, labels, num_classes, weights=weights, dtype=cm_dtype) update_op = state_ops.assign_add(total_cm, current_cm) def compute_mean_iou(name): """Compute the mean intersection-over-union via the confusion matrix.""" sum_over_row = math_ops.to_float(math_ops.reduce_sum(total_cm, 0)) sum_over_col = math_ops.to_float(math_ops.reduce_sum(total_cm, 1)) cm_diag = math_ops.to_float(array_ops.diag_part(total_cm))
tensorflow.contrib.metrics.python.ops.confusion_matrix_ops.confusion_matrix
162
import tensorflow as tf ious = iou_of(tf.expand_dims(gt_boxes, axis=0), tf.expand_dims(corner_form_priors, axis=1)) # size: num_priors best_target_per_prior = tf.math.reduce_max(ious, axis=1) best_target_per_prior_index = tf.math.argmax(ious, axis=1) # size: num_targets best_prior_per_target = tf.math.reduce_max(ious, axis=0) best_prior_per_target_index = tf.math.argmax(ious, axis=0) targets = tf.range(tf.shape(best_prior_per_target_index)[0], dtype='int64') best_target_per_prior_index = tf.tensor_scatter_nd_update(best_target_per_prior_index, tf.expand_dims(best_prior_per_target_index, 1), targets) # 2.0 is used to make sure every target has a prior assigned best_target_per_prior = tf.tensor_scatter_nd_update(best_target_per_prior, tf.expand_dims(best_prior_per_target_index, 1), tf.ones_like(best_prior_per_target_index, dtype=tf.float32)*2.0)
tensorflow.math.argmax
163
from tensorflow.python.training import gradient_descent def _setupSparse(self, is_distributed, dtype): with self._maybeWithDevice("/job:ps" if is_distributed else None): var0 = variables.Variable( [[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]], dtype=dtype) var1 = variables.Variable( [[0.0, 1.0], [0.0, 3.0], [0.0, 5.0]], dtype=dtype) with self._maybeWithDevice("/job:worker" if is_distributed else None): grads = ops.IndexedSlices( constant_op.constant( [[0.1, 0.1], [0.1, 0.1]], dtype=dtype), [0, 2], [3, 2]) sgd = gradient_descent.GradientDescentOptimizer(3.0) clip_opt = variable_clipping_optimizer.VariableClippingOptimizer( sgd, {var0: [1], var1: [0]}, 2.0) update_op = clip_opt.apply_gradients( list(zip([grads, grads], [var0, var1]))) variables.global_variables_initializer().run() return var0, var1, update_op def _assertSparseCorrect(self, var0, var1, update_op):
tensorflow.python.training.gradient_descent.GradientDescentOptimizer
164
from tensorflow.python.ops import math_ops predictions, labels = tensor_util.remove_squeezable_dimensions( predictions, labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) if labels.dtype != predictions.dtype: predictions = math_ops.cast(predictions, labels.dtype) is_correct = math_ops.to_float(math_ops.equal(predictions, labels)) return streaming_mean(is_correct, weights, metrics_collections, updates_collections, name or 'accuracy')
tensorflow.python.ops.math_ops.equal
165
from tensorflow.python.ops import data_flow_ops aggmeth = tf.AggregationMethod.DEFAULT grads = tf.gradients(loss, params, aggregation_method=aggmeth) if FLAGS.staged_vars: grad_dtypes = [grad.dtype for grad in grads] grad_shapes = [grad.shape for grad in grads] grad_stage = data_flow_ops.StagingArea(grad_dtypes, grad_shapes) grad_stage_op = grad_stage.put(grads) # In general, this decouples the computation of the gradients and # the updates of the weights. # During the pipeline warm up, this runs enough training to produce # the first set of gradients. gpu_grad_stage_ops.append(grad_stage_op)
tensorflow.python.ops.data_flow_ops.StagingArea
166
import tensorflow.contrib.graph_editor as ge ts_all = ge.filter_ts(fwd_ops, True) # get the tensors ts_all = [t for t in ts_all if '/read' not in t.name] ts_all = set(ts_all) - set(xs) - set(ys) # construct list of tensors to checkpoint during forward pass, if not # given as input if type(checkpoints) is not list: if checkpoints == 'collection': checkpoints = tf.get_collection('checkpoints') elif checkpoints == 'speed': # checkpoint all expensive ops to maximize running speed checkpoints = ge.filter_ts_from_regex(fwd_ops, 'conv2d|Conv|MatMul') elif checkpoints == 'memory': # remove very small tensors and some weird ops def fixdims(t): # tf.Dimension values are not compatible with int, convert manually try: return [int(e if e.value is not None else 64) for e in t] except: return [0] # unknown shape ts_all = [t for t in ts_all if np.prod(fixdims(t.shape)) > MIN_CHECKPOINT_NODE_SIZE] ts_all = [t for t in ts_all if 'L2Loss' not in t.name]
tensorflow.contrib.graph_editor.filter_ts_from_regex
167
import tensorflow as tf 'bounding_box_samples': _float_feature(d['bounding_box_samples']), 'depth_renders': _float_feature(d['depth_renders']), 'mesh_name': _bytes_feature(d['mesh_name']), 'near_surface_samples': _float_feature(d['near_surface_samples']), 'grid': _float_feature(d['grid']), 'world2grid': _float_feature(d['world2grid']), 'surface_point_samples': _float_feature(d['surface_point_samples']) } example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString() def full_featurespec(): return { 'bounding_box_samples': tf.io.FixedLenFeature([100000, 4], tf.float32), 'depth_renders': tf.io.FixedLenFeature([20, 224, 224, 1], tf.float32), 'mesh_name': tf.io.FixedLenFeature([], tf.string), 'near_surface_samples': tf.io.FixedLenFeature([100000, 4], tf.float32), 'grid': tf.io.FixedLenFeature([32, 32, 32], tf.float32), 'world2grid': tf.io.FixedLenFeature([4, 4], tf.float32), 'surface_point_samples': tf.io.FixedLenFeature([10000, 6], tf.float32) } def parse_tf_example(example_proto): d = tf.io.parse_single_example(example_proto, full_featurespec()) return (d['bounding_box_samples'], d['depth_renders'], d['mesh_name'], d['near_surface_samples'], d['grid'], d['world2grid'], d['surface_point_samples'])
tensorflow.io.FixedLenFeature
168
from tensorflow.python.ops import math_ops indices_at_minval = math_ops.equal( math_ops.abs(sensitivities - sensitivity), min_val) indices_at_minval = math_ops.to_int64(indices_at_minval) indices_at_minval = math_ops.cumsum(indices_at_minval) tf_index = math_ops.argmax(indices_at_minval, 0) tf_index = math_ops.cast(tf_index, dtypes.int32)
tensorflow.python.ops.math_ops.cumsum
169
import tensorflow as tf token_type_ids.append(e.token_type_ids) attention_mask.append(e.attention_mask) labels.append(e.label_ids) # parse examples to dataset def _to_dataset(x, dtype=tf.int32): x = tf.ragged.constant(x, dtype=dtype) d = tf.data.Dataset.from_tensor_slices(x) d = d.map(lambda x: x) return d dataset = tf.data.Dataset.zip(
tensorflow.ragged.constant
170
import tensorflow as tf if int(X.get_shape()[-1]) != (r**2) * n_out_channels: raise Exception(_err_log) # bsize, a, b, c = X.get_shape().as_list() # bsize = tf.shape(X)[0] # Handling Dimension(None) type for undefined batch dim # Xs=tf.split(X,r,3) #b*h*w*r*r # Xr=tf.concat(Xs,2) #b*h*(r*w)*r # X=tf.reshape(Xr,(bsize,r*a,r*b,n_out_channel)) # b*(r*h)*(r*w)*c X = tf.depth_to_space(X, r) else: raise RuntimeError(_err_log) return X class SubpixelConv1d(Layer):
tensorflow.depth_to_space
171
import tensorflow as tf st = tf.SparseTensor(indices, values, shape) st_handles = add_many_sparse_to_tensors_map(st) st_roundtrip = take_many_sparse_from_tensors_map( sparse_map_op=st_handles.op, sparse_handles=st_handles) st_roundtrip_op = st_roundtrip.values.op st_serialized = tf.serialize_many_sparse(st) st_deserialized = tf.deserialize_many_sparse( st_serialized, dtype=values.dtype) st_deserialized_op = st_deserialized.values.op tf.global_variables_initializer().run() st_roundtrip_values = sess.run(st_roundtrip) st_deserialized_values = sess.run(st_deserialized)
tensorflow.deserialize_many_sparse
172
import tensorflow as tf layer = tf.contrib.layers.batch_norm(layer, is_training=True, center=True, scale=False, decay=decay, activation_fn=activation_fn, updates_collections=None, scope=vs, reuse=True) # updates_collections=None else: layer = tf.contrib.layers.batch_norm(layer, is_training=False, center=True, scale=False, decay=decay, activation_fn=activation_fn, updates_collections=None, scope=vs, reuse=True) # updates_collections=None elif norm_type == 'layer_norm': # layer_norm # Take activation_fn out to apply lrelu try: layer = activation_fn(tf.contrib.layers.layer_norm(layer, center=True, scale=False, scope=vs)) # updates_collections=None except ValueError: layer = activation_fn(tf.contrib.layers.layer_norm(layer, center=True, scale=False, scope=vs, reuse=True)) elif norm_type == 'selu': layer = selu(layer)
tensorflow.contrib.layers.layer_norm
173
import tensorflow as tf x = tf.image.random_brightness(x, max_delta=0.8*s) x = tf.image.random_contrast(x, lower=lower, upper=upper) x = tf.image.random_saturation(x, lower=lower, upper=upper) x = tf.image.random_hue(x, max_delta=0.2*s) x = tf.clip_by_value(x, 0, 1) return x def color_drop(image): image = tf.image.rgb_to_grayscale(image) image = tf.tile(image, [1, 1, 1, 3]) return image # pylint: disable=not-callable @gin.configurable(blacklist=["kwargs"]) class CLGAN(modular_gan.ModularGAN): """Self-Supervised GAN with Contrastive Loss"""
tensorflow.image.rgb_to_grayscale
174
import tensorflow as tf "float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE") # x is shaped [batch_size,time_steps,num_inputs] if is_dynamic_rnn: lstm_input = tf.transpose(x, perm=[1, 0, 2]) outputs, _ = tf.lite.experimental.nn.dynamic_rnn( lstm_layer, lstm_input, dtype="float32") outputs = tf.unstack(outputs, axis=0) else: lstm_input = tf.unstack(x, self.time_steps, 1) outputs, _ = tf.nn.static_rnn(lstm_layer, lstm_input, dtype="float32") # Compute logits by multiplying outputs[-1] of shape [batch_size,num_units] # by the softmax layer's out_weight of shape [num_units,n_classes] # plus out_bias prediction = tf.matmul(outputs[-1], out_weights) + out_bias output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS") return x, prediction, output_class
tensorflow.nn.static_rnn
175
import tensorflow as tf KK = tf.matmul(K, K, transpose_b=True) K_trace = tf.expand_dims(tf.expand_dims(tf.trace(KK), -1), -1) K_loss = tf.reduce_mean(tf.abs(KK / K_trace - tf.eye(2))) loss_total_gen = crit_gen + rep_loss + K_loss gen_var = model.get_gen_vars() dis_var = model.dis.trainable_variables grads = tape.gradient([loss_total_gen, crit_dis], [gen_var, dis_var]) return grads, [crit_dis, crit_gen, rep_loss, K_loss] reader = datareader.DataReader(16) model = network.RepNet() optim = tf.optimizers.Adam(0.0001, 0.5) saver = M.Saver(model) saver.restore('./model/') MAXITER = 10000 bar = tqdm(range(MAXITER+1)) for i in bar: batch = reader.get_next() grads, lss = grad_loss(batch, model) gen_var = model.get_gen_vars() dis_var = model.dis.trainable_variables
tensorflow.optimizers.Adam
176
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature def predict_proba(self, x, batch_size=None): """Returns prediction probabilities for given features (classification). Args: x: features. batch_size: OVerride default batch size. Returns: Numpy array of predicted probabilities. """ return self._infer_model(x=x, batch_size=batch_size, proba=True) def _check_inputs(self, features, targets): if self._features_info is not None: if not tensor_signature.tensors_compatible(features, self._features_info): raise ValueError('Features are incompatible with given information. ' 'Given features: %s, required signatures: %s.' % (str(features), str(self._features_info))) else: self._features_info = tensor_signature.create_signatures(features) if self._targets_info is not None: if not tensor_signature.tensors_compatible(targets, self._targets_info): raise ValueError('Targets are incompatible with given information. ' 'Given targets: %s, required signatures: %s.' % (str(targets), str(self._targets_info))) else: self._targets_info = tensor_signature.create_signatures(targets) def _train_model(self,
tensorflow.contrib.learn.python.learn.estimators.tensor_signature.tensors_compatible
177
import tensorflow as tf """ reg_l2 = tf.keras.regularizers.l2(5e-7) if padding == 'SYMMETRIC' or padding == 'REFLECT': p = (kernel_size - 1) // 2 x = tf.pad(x, [[0,0],[p,p],[p,p], [p,p],[0,0]], padding) x = tf.keras.layers.Conv3D(filters, kernel_size, activation=activation, kernel_initializer=initialization, use_bias=use_bias, kernel_regularizer=reg_l2)(x) else: assert padding in ['SAME', 'VALID'] x = tf.keras.layers.Conv3D(filters, kernel_size, activation=activation, kernel_initializer=initialization, use_bias=use_bias, kernel_regularizer=reg_l2)(x) return x
tensorflow.keras.layers.Conv3D
178
import tensorflow as tf w1 = tf.get_variable('weight1', [784, 1024], initializer=tf.random_normal_initializer()) b1 = tf.get_variable('bias1', [1024], initializer=tf.constant_initializer(0.0)) h1 = tf.nn.relu(tf.matmul(x, w1) + b1) with tf.variable_scope('layer2'): w2 = tf.get_variable('weight2', [1024, 1024], initializer=tf.random_normal_initializer()) b2 = tf.get_variable('bias2', [1024], initializer=tf.constant_initializer(0.0)) h2 = tf.nn.relu(tf.matmul(h1, w2) + b2) with tf.variable_scope('layer3'): w3 = tf.get_variable('weight3', [1024, 10], initializer=tf.random_normal_initializer()) b3 = tf.get_variable('bias3', [10], initializer=tf.constant_initializer(0.0)) y = tf.matmul(h2, w3) + b3 # losses cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=gt, logits=y)) # optimizer optimizer = tf.train.GradientDescentOptimizer(args.lr) # define one-step train ops train_op = optimizer.minimize(cross_entropy) return x, y, gt, train_op if __name__ == "__main__": max_train_step = args.max_train_step batch_size = args.batch_size mnist = input_data.read_data_sets(args.data_dir, one_hot=True) x, y, gt, train_op = model() # create saver saver = tf.train.Saver() if os.path.exists('./mnist'):
tensorflow.train.GradientDescentOptimizer
179
import tensorflow as tf next_sentence_log_probs) = get_next_sentence_output( bert_config, model.get_pooled_output(), next_sentence_labels, clip) total_loss = masked_lm_loss + next_sentence_loss tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string)
tensorflow.train.init_from_checkpoint
180
from tensorflow.contrib.learn.python.learn.datasets import base fake_data=False, one_hot=False, dtype=dtypes.float32, reshape=True): if fake_data: def fake(): return DataSet([], [], fake_data=True, one_hot=one_hot, dtype=dtype) train = fake() validation = fake() test = fake() return base.Datasets(train=train, validation=validation, test=test) TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' TRAIN_LABELS = 'train-labels-idx1-ubyte.gz' TEST_IMAGES = 't10k-images-idx3-ubyte.gz' TEST_LABELS = 't10k-labels-idx1-ubyte.gz' VALIDATION_SIZE = 5000 local_file = base.maybe_download(TRAIN_IMAGES, train_dir, SOURCE_URL + TRAIN_IMAGES) train_images = extract_images(local_file)
tensorflow.contrib.learn.python.learn.datasets.base.Datasets
181
import tensorflow as tf sentence_embeddings = tf.divide(
tensorflow.divide
182
import tensorflow as tf average_loss_per_example = tf.nn.seq2seq.sequence_loss_by_example(
tensorflow.nn.seq2seq.sequence_loss_by_example
183
import tensorflow as tf # Note: tf.nn.softmax_cross_entropy_with_logits # expects logits, Keras expects probabilities. if not from_logits: # transform back to logits epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype) output = tf.clip_by_value(output, epsilon, 1 - epsilon) output = tf.log(output / (1 - output)) try: return tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output) except TypeError: return tf.nn.sigmoid_cross_entropy_with_logits(logits=output, labels=target) def sum(x, axis=None, keepdims=False): """Sum of the values in a tensor, alongside the specified axis. Parameters ---------- x: A tensor or variable. axis: An integer, the axis to sum over.
tensorflow.nn.sigmoid_cross_entropy_with_logits
184
from tensorflow.python.framework import ops @ops.RegisterGradient("SparseScatter") def _sparse_scatter_grad(op, grad):
tensorflow.python.framework.ops.RegisterGradient
185
from tensorflow.python.ops import clip_ops return train_tensor def _clip_gradients_by_norm(grads_and_vars, clip_gradients): """Clips gradients by global norm.""" gradients, variables = zip(*grads_and_vars) clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_gradients) return list(zip(clipped_gradients, variables)) def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name): """Find max_norm given norm and previous average."""
tensorflow.python.ops.clip_ops.clip_by_global_norm
186
import tensorflow as tf logits = tf.reduce_sum(tf.multiply(output_layer,output_weights),-1)
tensorflow.multiply
187
import tensorflow as tf """ with tf.variable_scope(scope) as sc: kernel_d, kernel_h, kernel_w = kernel_size num_in_channels = inputs.get_shape()[-1].value kernel_shape = [kernel_d, kernel_h, kernel_w, num_in_channels, num_output_channels] kernel = _variable_with_weight_decay('weights', shape=kernel_shape, use_xavier=use_xavier, stddev=stddev, wd=weight_decay) stride_d, stride_h, stride_w = stride outputs = tf.nn.conv3d(inputs, kernel, [1, stride_d, stride_h, stride_w, 1], padding=padding) biases = _variable_on_cpu('biases', [num_output_channels], tf.constant_initializer(0.0)) outputs = tf.nn.bias_add(outputs, biases) if bn: outputs = batch_norm_for_conv3d(outputs, is_training, bn_decay=bn_decay, scope='bn') if activation_fn is not None:
tensorflow.nn.conv3d
188
import tensorflow as tf vname = var.name from_name = vname var_value = tf.contrib.framework.load_variable(MODEL_DIR, from_name) assign_ops.append(tf.assign(var, var_value))
tensorflow.contrib.framework.load_variable
189
import tensorflow as tf dataset = tf.data.Dataset.from_tensors(data).repeat(
tensorflow.data.Dataset.from_tensors
190
import tensorflow as tf if single_file: dataset_path = os.path.join(dataset_path, 'train_annotated.json') else: dataset_path = os.path.join(dataset_path, 'dev_annotated.json') def load_dataset(): dataset = [] if single_file: # Opening with GFile allows to use remotely stored files, e.g. # in a gs bucket. dataset_handle = tf.io.gfile.GFile(dataset_path, 'r') for line in dataset_handle: dataset.append(json.loads(line)) else: all_files = tf.io.gfile.listdir(dataset_path) for filename in all_files: if 'json' in filename: print('Loading data from file {}'.format(filename)) with tf.io.gfile.GFile(os.path.join(dataset_path, filename)) as f: for line in f: dataset.append(json.loads(line)) print('The total size of the dataset {}'.format(len(dataset))) return dataset[:int(len(dataset) * percentile)] def drop_annotated_yield_examples(generator=None): del generator while True: passages = set()
tensorflow.io.gfile.listdir
191
import tensorflow as tf ## End new version if self._normalize_cols: logits_vec = logits_vec - tf.math.reduce_logsumexp( logits_vec, axis=0)[None] relabel_indices = tf.random.categorical(logits=logits_vec, num_samples=1)
tensorflow.math.reduce_logsumexp
192
import tensorflow as tf def main(argv=None): start1 = time.time() import os os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_list if not tf.gfile.Exists(FLAGS.checkpoint_path): tf.gfile.MkDir(FLAGS.checkpoint_path) else: if not FLAGS.restore: tf.gfile.DeleteRecursively(FLAGS.checkpoint_path) tf.gfile.MkDir(FLAGS.checkpoint_path) input_images = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_images') input_score_maps = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_score_maps') if FLAGS.geometry == 'RBOX': input_geo_maps = tf.placeholder(tf.float32, shape=[None, None, None, 5], name='input_geo_maps') else: input_geo_maps = tf.placeholder(tf.float32, shape=[None, None, None, 8], name='input_geo_maps')
tensorflow.gfile.DeleteRecursively
193
from tensorflow.python.ops import partitioned_variables weight_collections=[parent_scope], scope=scope) hidden_layer_partitioner = ( partitioned_variables.min_max_variable_partitioner( max_partitions=num_ps_replicas)) for layer_id, num_hidden_units in enumerate(hidden_units): with variable_scope.variable_scope(
tensorflow.python.ops.partitioned_variables.min_max_variable_partitioner
194
from tensorflow.contrib.layers.python.layers import utils return mean, variance def build_moving_stats(): return ( tf.identity(self._moving_mean), tf.identity(self._moving_variance), ) mean, variance = utils.smart_cond( use_batch_stats, build_batch_stats, build_moving_stats, ) return mean, variance
tensorflow.contrib.layers.python.layers.utils.smart_cond
195
import tensorflow as tf [per_example_loss, label_ids, logits, is_real_example]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: # The code to modify out_put nodes output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={"probabilities": probabilities}, scaffold_fn=scaffold_fn) return output_spec return model_fn
tensorflow.contrib.tpu.TPUEstimatorSpec
196
from tensorflow.python.ops import math_ops batch_dims: `Tensor` (1D, `int32`). event_dims: `Tensor` (1D, `int32`). """ with self._name_scope(name, values=[x]): def make_dims(start_sum, size, name): """Closure to make dims range.""" start_sum = start_sum if start_sum else ( array_ops.zeros((), dtype=dtypes.int32, name="zero"),) if self._is_all_constant_helper(size, *start_sum): start = sum(tensor_util.constant_value(s) for s in start_sum) stop = start + tensor_util.constant_value(size) return ops.convert_to_tensor( list(range(start, stop)), dtype=dtypes.int32, name=name) else: start = sum(start_sum) return math_ops.range(start, start + size) sample_ndims = self.get_sample_ndims(x, name=name) return (make_dims((), sample_ndims, name="sample_dims"), make_dims((sample_ndims,), self.batch_ndims, name="batch_dims"), make_dims((sample_ndims, self.batch_ndims), self.event_ndims, name="event_dims")) def get_shape(self, x, name="get_shape"): """Returns `Tensor`'s shape partitioned into `sample`, `batch`, `event`. Args: x: `Tensor`. name: `String`. The name to give this op. Returns:
tensorflow.python.ops.math_ops.range
197
import tensorflow as tf # data for self-attention rep_map_dp = dropout(rep_map, keep_prob, is_train) rep_dep_tensor_dp, _, _ = reduce_data_rep_max_len(rep_map_dp, dep_selection) rep_head_tensor_dp, _, _ = reduce_data_rep_max_len(rep_map_dp, head_selection) # mask generation dep_idxs = tf.tile(tf.expand_dims(dep_org_idx, 1), [1, sl_head, 1]) head_idxs = tf.tile(tf.expand_dims(head_org_idx, 2), [1, 1, sl_dep]) if direction is None: direct_mask = tf.not_equal(head_idxs, dep_idxs) # [bs, slh, sld] else: if direction == 'forward': direct_mask = tf.greater(head_idxs, dep_idxs) # [bs, slh, sld] else: direct_mask = tf.less(head_idxs, dep_idxs) # [bs, slh, sld] # [bs, slh, slh] rep_mask_tile = tf.logical_and(tf.expand_dims(rep_dep_mask, 1), tf.expand_dims(rep_head_mask, 2)) attn_mask = tf.logical_and(direct_mask, rep_mask_tile) # [bs, slh, sld]
tensorflow.not_equal
198
import tensorflow as tf def get_config(self): return { "initial_learning_rate": self.initial_learning_rate, "maximal_learning_rate": self.maximal_learning_rate, "step_size": self.step_size, "scale_mode": self.scale_mode, } @tf.keras.utils.register_keras_serializable(package="Addons") class TriangularCyclicalLearningRate(CyclicalLearningRate): def __init__( self, initial_learning_rate, maximal_learning_rate, step_size, scale_mode="cycle", name="TriangularCyclicalLearningRate", ):
tensorflow.keras.utils.register_keras_serializable
199