seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf with sess.graph.device("/cpu:0"): v0 = tf.Variable(111, name="v0")
tensorflow.Variable
2,000
import tensorflow as tf out = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)] weights = [tf.ones_like(inp[0], dtype=tf.float32) for _ in range(8)] with tf.variable_scope("root"): _, losses = SampleGRUSeq2Seq(inp, out, weights)
tensorflow.variable_scope
2,001
import tensorflow as tf 'worker': self.worker_hosts}) self.server = None if not self.server: self.server = tf.train.Server(self.cluster, job_name=self.job_name, task_index=self.task_index, config=create_config_proto(), protocol=FLAGS.server_protocol) worker_prefix = '/job:worker/task:%s' % self.task_index self.param_server_device = tf.train.replica_device_setter( worker_device=worker_prefix + '/cpu:0', cluster=self.cluster) # This device on which the queues for managing synchronization between # servers should be stored. num_ps = len(self.ps_hosts) self.sync_queue_devices = ['/job:ps/task:%s/cpu:0' % i for i in range(num_ps)] else: self.task_index = 0
tensorflow.train.replica_device_setter
2,002
from tensorflow.python.ops import array_ops e = (-1,) if self._event_ndims_is_0 else () x = array_ops.squeeze(x, squeeze_dims=b + e) _, batch_shape, event_shape = self.get_shape(x) else: s = (x.get_shape().as_list() if x.get_shape().is_fully_defined() else array_ops.shape(x)) batch_shape = array_ops.slice(s, (1,), (self.batch_ndims,)) # Since sample_dims=1 and is left-most, we add 1 to the number of # batch_ndims to get the event start dim. event_start = array_ops.where( self._batch_ndims_is_0, 2, 1 + self.batch_ndims) event_shape = array_ops.slice(s, (event_start,), (self.event_ndims,))
tensorflow.python.ops.array_ops.slice
2,003
import tensorflow as tf dataset = tf.data.TFRecordDataset(["./data/train.tfrecord"]) dataset = dataset.map(decode) dataset = dataset.map(normalize) dataset = dataset.repeat() dataset = dataset.batch(self.BATCH_SIZE) iterator = dataset.make_one_shot_iterator() return iterator.get_next() def compute_gradient(self): with tf.name_scope('data_loading'): x, y = self._build_data_pipeline() with tf.name_scope('gradient_computation'): grads = self._build_training_model(x, y) return grads model_owner = ModelOwner('model-owner') data_owners = [ DataOwner('data-owner-0', model_owner.build_training_model), DataOwner('data-owner-1', model_owner.build_training_model), DataOwner('data-owner-2', model_owner.build_training_model), ] model_grads = zip(*(
tensorflow.name_scope
2,004
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten # self.glimpses = tf.concat([glimpse1,glimpse2,glimpse3],axis=-1) # Block 1 conv1a = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[8, 8], strides=4, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(self.inputs) conv1b = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv1a) conv1c = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv1b) pool1 = MaxPool2D(pool_size=[2,2])(conv1c) # Block 2 conv2a = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(pool1) conv2b = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv2a) conv2c = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv2b) pool2 = MaxPool2D(pool_size=[2,2])(conv2c) # Block 3 conv3a = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(pool2) conv3b = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv3a) conv3c = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv3b) pool3 = MaxPool2D(pool_size=[2,2])(conv3c) # final convolutional layer
tensorflow.keras.layers.Conv2D
2,005
from tensorflow.python.framework import tensor_util normal = dists.Normal([mu], [sigma], validate_args=True) self.assertTrue(tensor_util.constant_value(normal.is_scalar_event)) self.assertFalse(tensor_util.constant_value(normal.is_scalar_batch)) mvn = dists.MultivariateNormalDiag([mu], [sigma], validate_args=True)
tensorflow.python.framework.tensor_util.constant_value
2,006
import tensorflow as tf inputdata = (inputdata - mean) / tf.sqrt(var + esp) # 每个通道的gamma和beta gamma = tf.Variable(tf.constant(1.0, shape=[c]), dtype=tf.float32, name='gamma') beta = tf.Variable(tf.constant(0.0, shape=[c]), dtype=tf.float32, name='beta') gamma = tf.reshape(gamma, [1, c, 1, 1]) beta = tf.reshape(beta, [1, c, 1, 1]) # 根据论文进行转换 [n, c, h, w, c] 到 [n, h, w, c] output = tf.reshape(inputdata, [-1, c, h, w]) output = output * gamma + beta output = tf.transpose(output, [0, 2, 3, 1])
tensorflow.reshape
2,007
import tensorflow as tf width: an int32 scalar tensor indicating the current width. smallest_side: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: new_height: an int32 scalar tensor indicating the new height. new_width: and int32 scalar tensor indicating the new width. """ smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) height = tf.to_float(height) width = tf.to_float(width) smallest_side = tf.to_float(smallest_side) scale = tf.cond(tf.greater(height, width), lambda: smallest_side / width, lambda: smallest_side / height) new_height = tf.to_int32(height * scale) new_width = tf.to_int32(width * scale) return new_height, new_width
tensorflow.to_float
2,008
import tensorflow as tf seg_id = _transform_features(features["segment_ids"]) inp_mask = _transform_features(features["input_mask"]) label = tf.reshape(features["label_ids"], [bsz_per_core])
tensorflow.reshape
2,009
import tensorflow as tf def guard_nan(x): return x if not math.isnan(x) else -1. def _blur_expand(input): k_size = 9 kernels = [2, 4, 6] channels = [input] + [nut.blur_gaussian(input, k, k_size)[0] for k in kernels] res = tf.concat(channels, axis=3) return res class Autoencoder: train_set, test_set = None, None permutation = None batch_shape = None
tensorflow.concat
2,010
import tensorflow as tf self.y = tf.placeholder(tf.float32, [batch_size, max_sequence_len, out_vocab_size], name='y') # The bidirectional rnn code requires seq_lens as int64 self.seq_lens = tf.placeholder(tf.int64, [batch_size], name='seq_lens') self.example_weights = tf.placeholder(tf.float32, [batch_size], name='example_weights') embeddings = c2v.GetEmbeddings(self.x) self._inputs = [tf.squeeze(input_, [1]) for input_ in tf.split(1, max_sequence_len, embeddings)] # Need to prepare a mask to zero out the padding symbols. # Make a batch_size x max_sequence_len matrix where each # row contains the length repeated max_sequence_len times. lengths_transposed = tf.expand_dims(tf.to_int32(self.seq_lens), 1) lengths_tiled = tf.tile(lengths_transposed, [1, max_sequence_len])
tensorflow.squeeze
2,011
import tensorflow as tf total_accuracy1, total_accuracy5 = (0.0, 0.0) num_processed_images = 0 num_remaining_images = 5000 top1 = 0 with tf.compat.v1.Session() as sess: sess_graph = tf.compat.v1.Session(graph=graph, config=config) while num_remaining_images >= batch_size: # Reads and preprocess data
tensorflow.compat.v1.Session
2,012
import tensorflow as tf # We now test every codepath within the underlying is_scalar_helper # function. # Test case 1, 2. x = tf.placeholder(dtype=tf.int32, shape=[]) # None would fire an exception were it actually executed. self.assertTrue(normal._is_scalar_helper(x.get_shape, lambda: None)) self.assertTrue(normal._is_scalar_helper(lambda: tf.TensorShape(None), lambda: tf.shape(x))) x = tf.placeholder(dtype=tf.int32, shape=[1]) # None would fire an exception were it actually executed. self.assertFalse(normal._is_scalar_helper(x.get_shape, lambda: None)) self.assertFalse(normal._is_scalar_helper(lambda: tf.TensorShape(None), lambda: tf.shape(x))) # Test case 3. x = tf.placeholder(dtype=tf.int32) is_scalar = normal._is_scalar_helper(x.get_shape, lambda: tf.shape(x)) self.assertTrue(is_scalar.eval(feed_dict={x: 1}))
tensorflow.placeholder
2,013
import tensorflow as tf if self._max_diffusion_step == 0: pass else: for support in self._supports: x1 = tf.sparse_tensor_dense_matmul(support, x0) x = self._concat(x, x1) for _ in range(2, self._max_diffusion_step + 1): x2 = 2 * tf.sparse_tensor_dense_matmul(support, x1) - x0 x = self._concat(x, x2) x1, x0 = x2, x1 num_matrices = len(self._supports) * self._max_diffusion_step + 1 # Adds for x itself. x = tf.reshape(x, shape=[num_matrices, self._num_nodes, input_size, batch_size]) x = tf.transpose(x, perm=[3, 1, 2, 0]) # (batch_size, num_nodes, input_size, order) x = tf.reshape(x, shape=[batch_size * self._num_nodes, input_size * num_matrices]) weights = tf.get_variable( 'weights', [input_size * num_matrices, output_size], dtype=dtype, initializer=tf.contrib.layers.xavier_initializer()) x = tf.matmul( x, weights) # (batch_size * self._num_nodes, output_size) biases = tf.get_variable("biases", [output_size], dtype=dtype, initializer=tf.constant_initializer( bias_start, dtype=dtype)) x = tf.nn.bias_add(x, biases)
tensorflow.transpose
2,014
import tensorflow as tf for layer_idx in range(num_hidden_layers): group_idx = int(layer_idx / num_hidden_layers * num_hidden_groups) with tf.variable_scope("group_%d" % group_idx): with tf.name_scope("layer_%d" % layer_idx):
tensorflow.variable_scope
2,015
from tensorflow.python.framework import tensor_shape reduction_indices = tensor_util.ConstantValue(op.inputs[1]) keep_dims = op.get_attr("keep_dims") if reduction_indices is None or input_shape.ndims is None: if keep_dims: return [tensor_shape.unknown_shape(ndims=input_shape.ndims)] else: return [tensor_shape.unknown_shape()]
tensorflow.python.framework.tensor_shape.unknown_shape
2,016
import tensorflow as tf q_online_tp1 = q_online(obs_tp1_float) q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope='online_q_func') q_target = q_func(obs_tp1_float,num_actions,scope="target_q_func",reuse=False) target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope='target_q_func') # Bellman training error if double_q:
tensorflow.get_collection
2,017
import tensorflow as tf name=name), dropout_prob) def softmax_N(tensor, name=None): """Apply softmax across last dimension of a tensor. Args: tensor: Input tensor. name: Name for this op. If None, defaults to 'softmax_N'. Returns: A tensor with softmax-normalized values on the last dimension. """ with tf.name_scope(name, 'softmax_N', [tensor]): exp_tensor = tf.exp(tensor) reduction_indices = [tensor.get_shape().ndims - 1] return tf.div(exp_tensor, tf.reduce_sum( exp_tensor, axis=reduction_indices, keep_dims=True)) def optimizer(optimizer="adam", learning_rate=.001, momentum=.9): """Create model optimizer. Parameters ----------
tensorflow.name_scope
2,018
import tensorflow as tf feed_previous=tf.constant(True)) sess.run([tf.global_variables_initializer()])
tensorflow.global_variables_initializer
2,019
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature self._features_info = tensor_signature.create_signatures(features) if self._targets_info is not None: if not tensor_signature.tensors_compatible(targets, self._targets_info): raise ValueError('Targets are incompatible with given information. '
tensorflow.contrib.learn.python.learn.estimators.tensor_signature.tensors_compatible
2,020
import tensorflow as tf Returns: """ with tf.variable_scope('anchor_generator'): if offset is None: offset = [stride[0]/2, stride[1]/2] features_width = tf.cast(features_width, tf.int32) features_height = tf.cast(features_height, tf.int32) scales = tf.convert_to_tensor(scales, dtype=tf.float32) ratios = tf.convert_to_tensor(ratios, dtype=tf.float32) offset = tf.convert_to_tensor(offset, dtype=tf.float32) scales_grid, ratios_grid = tf.meshgrid(scales, ratios)
tensorflow.cast
2,021
import tensorflow as tf class IoOpsTest(tf.test.TestCase): def testReadFile(self): cases = ['', 'Some contents', 'Неки садржаји на српском'] for contents in cases: contents = tf.compat.as_bytes(contents) temp = tempfile.NamedTemporaryFile(prefix='ReadFileTest') open(temp.name, 'wb').write(contents) with self.test_session(): read = tf.read_file(temp.name) self.assertEqual([], read.get_shape()) self.assertEqual(read.eval(), contents) def _subset(self, files, indices): return set(tf.compat.as_bytes(files[i].name) for i in range(len(files)) if i in indices) def testMatchingFiles(self):
tensorflow.read_file
2,022
import tensorflow as tf if FLAGS.input_file is not None: for input_pattern in FLAGS.input_file.split(","): input_files.extend(tf.gfile.Glob(input_pattern)) if FLAGS.input_dir is not None: for filename in tf.gfile.ListDirectory(FLAGS.input_dir): input_files.extend(tf.gfile.Glob(os.path.join(FLAGS.input_dir, filename))) tf.logging.info("*** Input Files ***")
tensorflow.gfile.ListDirectory
2,023
import tensorflow as tf worker_replicas=1, clone_on_cpu=True, ps_tasks=0, worker_job_name='worker', is_chief=True, train_dir=train_dir) if __name__ == '__main__': tf.test.main()
tensorflow.test.main
2,024
import tensorflow as tf def train(train_num=64,test_num=32,lr=1e-4,loop_count=10000,report_step=100,save_step=1000,restore=False): with tf.Session(config=config) as sess: sess.run(init) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) if restore: tf.train.Saver().restore(sess,path) feed_dict={
tensorflow.train.start_queue_runners
2,025
import tensorflow as tf image = tf.placeholder(tf.float32, shape=[hps.batch_size, image_size, image_size, num_channel]) ############MNIST and CIFAR10 are different ar here adv_image = tf.placeholder(tf.float32, shape=[hps.batch_size, image_size, image_size, num_channel]) ############MNIST and CIFAR10 are different ar here
tensorflow.placeholder
2,026
import tensorflow as tf # TODO: should i normalize? @layer def word_dropout_layer(tensor, keep_prob=1.0, **opts): keep_prob = _global_keep_prob(keep_prob) rank = _rank(tensor) assert rank == 3, "Use embedding lookup layer" binary_mask = _apply_dropout_mask(tf.shape(tensor)[:2], keep_prob, normalize=False) binary_mask = tf.expand_dims(binary_mask, axis=-1) # proper broadcasting to zero out entire word vectors out = tensor * binary_mask return out @layer def relu_layer(tensor): out = tf.nn.relu(tensor) return out
tensorflow.expand_dims
2,027
import tensorflow as tf assert hparams.wavenet_test_batches == self.test_steps #Get conditioning status self.local_condition, self.global_condition = self._check_conditions() with tf.device('/cpu:0'): # Create placeholders for inputs and targets. Don't specify batch size because we want # to be able to feed different batch sizes at eval time. if is_scalar_input(hparams.input_type): input_placeholder = tf.placeholder(tf.float32, shape=(None, 1, None), name='audio_inputs') target_placeholder = tf.placeholder(tf.float32, shape=(None, None, 1), name='audio_targets') target_type = tf.float32 else: input_placeholder = tf.placeholder(tf.float32, shape=(None, hparams.quantize_channels, None), name='audio_inputs') target_placeholder = tf.placeholder(tf.int32, shape=(None, None, 1), name='audio_targets') target_type = tf.int32 self._placeholders = [ input_placeholder, target_placeholder, tf.placeholder(tf.int32, shape=(None, ), name='input_lengths'), ] queue_types = [tf.float32, target_type, tf.int32] if self.local_condition: self._placeholders.append(tf.placeholder(tf.float32, shape=(None, hparams.num_mels, None), name='local_condition_features')) queue_types.append(tf.float32)
tensorflow.placeholder
2,028
import tensorflow as tf with tf.variable_scope("input_info", reuse=False): tf.summary.scalar('rewards', tf.reduce_mean(self.reward_ph)) tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate)) tf.summary.scalar('advantage', tf.reduce_mean(adv)) tf.summary.scalar('action_probability', tf.reduce_mean(self.mu_ph)) if self.full_tensorboard_log: tf.summary.histogram('rewards', self.reward_ph) tf.summary.histogram('learning_rate', self.learning_rate) tf.summary.histogram('advantage', adv) tf.summary.histogram('action_probability', self.mu_ph) if tf_util.is_image(self.observation_space): tf.summary.image('observation', train_model.obs_ph) else: tf.summary.histogram('observation', train_model.obs_ph) trainer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate_ph, decay=self.rprop_alpha,
tensorflow.summary.histogram
2,029
import tensorflow as tf # Prepare train dataset def _preprocess_train(image, clazz): # Do random crop + horizontal flip for each train image image = tf.pad(image, [[4, 4], [4, 4], [0, 0]]) image = tf.image.random_crop(image, (w, h, in_ch)) image = tf.image.random_flip_left_right(image) if cutout_size > 0: image = self._do_cutout(image, w, h, cutout_size)
tensorflow.image.random_crop
2,030
import tensorflow as tf span_head_scores += tf.log(span_mask) # [k, max_span_width, 1] span_attention = tf.nn.softmax(span_head_scores, 1) # [k, max_span_width, 1] span_head_emb = tf.reduce_sum(span_attention * span_text_emb, 1) # [k, emb] span_emb_list.append(span_head_emb) span_emb = tf.concat(span_emb_list, 1) # [k, emb] return span_emb # [k, emb] def get_mention_scores(self, span_emb): with tf.variable_scope("mention_scores"): return util.ffnn(span_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [k, 1] def softmax_loss(self, antecedent_scores, antecedent_labels): gold_scores = antecedent_scores + tf.log(tf.to_float(antecedent_labels)) # [k, max_ant + 1] marginalized_gold_scores = tf.reduce_logsumexp(gold_scores, [1]) # [k] log_norm = tf.reduce_logsumexp(antecedent_scores, [1]) # [k] return log_norm - marginalized_gold_scores # [k] def bucket_distance(self, distances): """ Places the given values (designed for distances) into 10 semi-logscale buckets: [0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+]. """ logspace_idx = tf.to_int32(tf.floor(tf.log(tf.to_float(distances))/math.log(2))) + 3 use_identity = tf.to_int32(distances <= 4) combined_idx = use_identity * distances + (1 - use_identity) * logspace_idx
tensorflow.to_float
2,031
import tensorflow as tf def clip_logits(logits, config): logits_clip = getattr(config, "logits_clip", 0.) if logits_clip > 0: min_logit = tf.reduce_min(logits) return tf.minimum(logits - min_logit, logits_clip) else:
tensorflow.reduce_min
2,032
import tensorflow as tf Li_eKuf = tf.matrix_triangular_solve(Luu, eKuf, lower=True) # M x N fmean = tf.matmul(Li_eKuf, q_mu, transpose_a=True) eKff = expectation(pXnew, kern) # N (psi0) eKuffu = expectation(pXnew, (kern, feat), (kern, feat)) # N x M x M (psi2) Luu_tiled = tf.tile(Luu[None, :, :], [num_data, 1, 1]) # remove this line, once issue 216 is fixed Li_eKuffu = tf.matrix_triangular_solve(Luu_tiled, eKuffu, lower=True) Li_eKuffu_Lit = tf.matrix_triangular_solve(Luu_tiled, tf.matrix_transpose(Li_eKuffu), lower=True) # N x M x M cov = tf.matmul(q_sqrt_r, q_sqrt_r, transpose_b=True) # D x M x M if mean_function is None or isinstance(mean_function, mean_functions.Zero): e_related_to_mean = tf.zeros((num_data, num_func, num_func), dtype=settings.float_type) else: # Update mean: \mu(x) + m(x)
tensorflow.matrix_transpose
2,033
import tensorflow as tf # Agent目標替換率 EPSILON = 0.3 # Reward discount factor GAMMA = 0.7 # Actor 學習率 # A_LR = 0.0001 A_LR = 0.001 # Critic 學習率 # C_LR = 0.0002 C_LR = 0.002 class MODEL(object): def __init__(self): self.sess = tf.Session() self.tfs = tf.placeholder(tf.float32, [None, 84, 84, 3], 'state') c0 = tf.cast(self.tfs, tf.float32) / 255. c1 = tf.nn.relu(self.conv(c0, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2))) c2 = tf.nn.relu( self.conv( c1, 'c2', nf=64, rf=4, stride=2,
tensorflow.placeholder
2,034
import tensorflow as tf tf.app.flags.DEFINE_integer('loss_scale', 1024, '') tf.app.flags.DEFINE_float('moving_average_decay', 0.997, '') tf.app.flags.DEFINE_string('gpu_list', '1', '') tf.app.flags.DEFINE_string('checkpoint_path', '/tmp/east_resnet_v1_50_rbox/', '') tf.app.flags.DEFINE_boolean('restore', False, 'whether to resotre from checkpoint') tf.app.flags.DEFINE_integer('save_checkpoint_steps', 1000, '') tf.app.flags.DEFINE_integer('save_summary_steps', 100, '') tf.app.flags.DEFINE_string('pretrained_model_path', None, '') tf.app.flags.DEFINE_boolean('allow_mix_precision', False, 'whether to allow mix precision') tf.app.flags.DEFINE_boolean('auto_tune', False, 'whether to autotune') tf.app.flags.DEFINE_boolean('use_processed_data', False, 'whether to use processed data') tf.app.flags.DEFINE_string('processed_data', './processed_dataset/', 'where to save preprocessed datasets')
tensorflow.app.flags.DEFINE_integer
2,035
import tensorflow as tf try: src_path, dst_path = sess.run(dequeue_op) except tf.errors.OutOfRangeError: coord.request_stop() break process(src_path, dst_path) complete() # init epoch counter for the queue local_init_op = tf.local_variables_initializer() with tf.Session() as sess: sess.run(local_init_op) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) for i in range(a.workers): t = threading.Thread(target=worker, args=(coord,)) t.start() threads.append(t)
tensorflow.local_variables_initializer
2,036
from tensorflow.python.ops import variable_scope raise ValueError('`sensitivity` must be in the range [0, 1].') with variable_scope.variable_scope(name, 'specificity_at_sensitivity', [predictions, labels]):
tensorflow.python.ops.variable_scope.variable_scope
2,037
import tensorflow as tf # Track the moving averages of all trainable variables. variable_averages = tf.train.ExponentialMovingAverage( cifar10.MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) # Group all updates to into a single train op. train_op = tf.group(apply_gradient_op, variables_averages_op) # Create a saver. saver = tf.train.Saver(tf.global_variables()) # Build the summary operation from the last tower summaries. summary_op = tf.summary.merge(summaries) # Build an initialization operation to run below. init = tf.global_variables_initializer() # Start running operations on the Graph. allow_soft_placement must be set to # True to build towers on GPU, as some of the ops do not have GPU # implementations. sess = tf.Session(config=tf.ConfigProto( allow_soft_placement=True, log_device_placement=FLAGS.log_device_placement)) sess.run(init)
tensorflow.summary.merge
2,038
import tensorflow as tf @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with tf.gfile.Open(input_file, "r") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = []
tensorflow.gfile.Open
2,039
import tensorflow as tf tf.add_to_collection("user_defined_bytes_collection", queue_runner.SerializeToString()) any_buf = Any() any_buf.Pack(queue_runner) tf.add_to_collection("user_defined_any_collection", any_buf) # Generates MetaGraphDef. meta_graph_def = save.export_meta_graph(filename) self.assertTrue(meta_graph_def.HasField("saver_def")) self.assertTrue(meta_graph_def.HasField("graph_def")) collection_def = meta_graph_def.collection_def self.assertEqual(len(collection_def), 10) with tf.Graph().as_default(): # Restores from MetaGraphDef. new_saver = tf.train.import_meta_graph(filename) # Generates a new MetaGraphDef. new_meta_graph_def = new_saver.export_meta_graph() # It should be the same as the original. self.assertProtoEquals(meta_graph_def, new_meta_graph_def) def testAddCollectionDefFails(self): with self.test_session(): # Creates a graph. v0 = tf.Variable(10.0, name="v0") # Creates a saver.
tensorflow.Graph
2,040
import tensorflow as tf dataset, bucket_fn=lambda a, b, c, y: tf.size(a),
tensorflow.size
2,041
import tensorflow as tf def _compute(self, x, y): self._dim = x._rank() kernel = np.zeros((tf.size(x), tf.size(y))) for l in tf.range(start=0, limit=tf.size(x), delta=1, dtype=None, name='l_range'): for m in tf.range(start=0, limit=tf.size(y), delta=1, dtype=None, name='m_range'): vx = tf.contrib.lookup.MutableHashTable(key_dtype=tf.string, value_dtype=tf.int64,
tensorflow.size
2,042
import tensorflow as tf elif validate_args: assertions += [ tf.compat.v1.assert_equal( tf.sort(perm), tf.range(tf.size(input=perm)), message=msg) ]
tensorflow.size
2,043
import tensorflow as tf fn_loss: A callable returning the loss of the current model. **kwargs: Additional arguments, not used. Returns: List of delta tensors corresponding to the updates for each optimized variable. """ learning_rate = self.learning_rate.value() unperturbed_loss = fn_loss(**arguments) deltas = [tf.zeros_like(tensor=variable) for variable in variables] previous_perturbations = [tf.zeros_like(tensor=variable) for variable in variables] if self.unroll_loop: # Unrolled for loop for sample in range(self.num_samples): with tf.control_dependencies(control_inputs=deltas): perturbations = [ tf.random_normal(shape=util.shape(variable)) * learning_rate
tensorflow.zeros_like
2,044
import tensorflow as tf variable_summaries(bias) output = tf.nn.bias_add(tf.matmul(x, w), bias) return output def _bn(self, name, x): with tf.variable_scope(name): moving_average_decay = 0.9 decay = moving_average_decay batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2]) mu = tf.get_variable('mu', batch_mean.shape, dtype=tf.float32, initializer=tf.zeros_initializer(), trainable=False) tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, mu) tf.add_to_collection('mu_sigma_bn', mu) sigma = tf.get_variable('sigma', batch_var.shape, dtype=tf.float32, initializer=tf.ones_initializer(), trainable=False) tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, sigma) tf.add_to_collection('mu_sigma_bn', sigma) beta = tf.get_variable('beta', batch_mean.shape, dtype=tf.float32, initializer=tf.zeros_initializer()) gamma = tf.get_variable('gamma', batch_var.shape, dtype=tf.float32, initializer=tf.ones_initializer()) # BN when training update = 1.0 - decay
tensorflow.zeros_initializer
2,045
import tensorflow as tf schedule.ContinuousLearningRateSchedule.Params().Set( start_step=350000, half_life_steps=45000)) mdl = p.Instantiate() mdl.FPropDefaultTheta() mdl.BProp() tf.global_variables_initializer().run() test_utils.CompareToGoldenSingleFloat(self, 4.472597, mdl.loss.eval()) mdl.train_op.run() def testAllLayerParams(self): with self.session(use_gpu=False, graph=tf.Graph()): p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() lps = base_layer.RecursiveFindLayerParams(mdl.params) l_names = sorted([p.cls.__name__ for p in lps]) expected_layers = sorted([ 'Adam', 'AdditiveAttention', 'AsciiTokenizer',
tensorflow.Graph
2,046
import tensorflow as tf self.loss_GBAB_sum = tf.summary.scalar("g_loss_b2a", self.loss_GBAB) self.g_total_loss_sum = tf.summary.scalar("g_loss", self.generator_loss) self.g_sum = tf.summary.merge([self.loss_GABA_sum,self.loss_GBAB_sum,self.g_total_loss_sum])
tensorflow.summary.merge
2,047
import tensorflow as tf output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities)
tensorflow.matmul
2,048
import tensorflow as tf # need to call .value to convert Dimension objects to normal value input1_shape = list(-1 if s.value is None else s.value for s in input1.shape) input2_shape = list(-1 if s.value is None else s.value for s in input2.shape) output1 = tf.reshape(output[:, :input1_reshaped.shape[1]], shape=input1_shape) output2 = tf.reshape(output[:, input1_reshaped.shape[1]:], shape=input2_shape) return output1, output2
tensorflow.reshape
2,049
import tensorflow as tf Args: inputs (tensor): data to be processed Returns: tensor: output data """ if len(tf.shape(inputs)) > 1: # If the input size is not 1-dimensional, unstack the input along its first dimension, # recursively call the forward pass on each of the yielded tensors, and then stack the # outputs back into the correct shape reconstructor = [] for x in tf.unstack(inputs):
tensorflow.shape
2,050
import tensorflow as tf from scipy.io import loadmat from scipy.misc import imsave FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string('dataset', '', 'cifar10 or cifar100.') tf.app.flags.DEFINE_string('mode', 'train', 'train or eval.') tf.app.flags.DEFINE_string('train_data_path', '', 'Filepattern for training data.') tf.app.flags.DEFINE_string('eval_data_path', '', 'Filepattern for eval data') tf.app.flags.DEFINE_string('train_dir', '', 'Directory to keep training outputs.') tf.app.flags.DEFINE_string('eval_dir', '', 'Directory to keep eval outputs.') tf.app.flags.DEFINE_integer('eval_batch_count', 10, 'Number of batches to eval.') tf.app.flags.DEFINE_bool('eval_once', False, 'Whether evaluate the model only once.') tf.app.flags.DEFINE_string('log_root', '', 'Directory to keep the checkpoints. Should be a '
tensorflow.app.flags.DEFINE_string
2,051
import tensorflow as tf def _build_rnn_graph(self, inputs, config, is_training): if config.rnn_mode == CUDNN: return self._build_rnn_graph_cudnn(inputs, config, is_training) else: return self._build_rnn_graph_lstm(inputs, config, is_training) def _build_rnn_graph_cudnn(self, inputs, config, is_training): """Build the inference graph using CUDNN cell.""" inputs = tf.transpose(inputs, [1, 0, 2]) self._cell = tf.contrib.cudnn_rnn.CudnnLSTM( num_layers=config.num_layers, num_units=config.hidden_size, input_size=config.hidden_size, dropout=1 - config.keep_prob if is_training else 0) params_size_t = self._cell.params_size() self._rnn_params = tf.get_variable( "lstm_params", initializer=tf.random_uniform(
tensorflow.contrib.cudnn_rnn.CudnnLSTM
2,052
import tensorflow as tf tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) scaled_time = ( tf.expand_dims(tf.to_float(position), 2) * tf.expand_dims( tf.expand_dims(inv_timescales, 0), 0)) signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2) signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]]) return signal
tensorflow.cos
2,053
import tensorflow as tf hidden_noise_shape = [1, 1, tf.shape(hidden)[2]] if encoder.pervasive_dropout else None hidden = tf.nn.dropout(hidden, keep_prob=encoder.attn_keep_prob, noise_shape=hidden_noise_shape) if encoder.mult_attn: state = dense(state, encoder.attn_size, use_bias=False, name='state') hidden = dense(hidden, encoder.attn_size, use_bias=False, name='hidden') return tf.einsum('ijk,ik->ij', hidden, state) y = dense(state, encoder.attn_size, use_bias=not encoder.layer_norm, name='W_a') y = tf.expand_dims(y, axis=1) if encoder.layer_norm: y = tf.contrib.layers.layer_norm(y, scope='layer_norm_state') hidden = tf.contrib.layers.layer_norm(hidden, center=False, scope='layer_norm_hidden') y += dense(hidden, encoder.attn_size, use_bias=False, name='U_a') if encoder.position_bias and input_length is not None and time is not None: src_pos = tf.tile(tf.expand_dims(tf.range(time_steps), axis=0), [batch_size, 1]) trg_pos = tf.tile(tf.reshape(time, [1, 1]), [batch_size, time_steps]) src_len = tf.tile(tf.expand_dims(input_length, axis=1), [1, time_steps]) # - 1 pos_feats = tf.to_float(tf.stack([src_pos, trg_pos, src_len], axis=2)) pos_feats = tf.log(1 + pos_feats)
tensorflow.contrib.layers.layer_norm
2,054
import tensorflow as tf bias=True, activation=tf.nn.relu, kernel_size=5, name="char_conv", reuse=True) ch_emb = tf.reduce_max(ch_emb, axis=1) qh_emb = tf.reduce_max(qh_emb, axis=1) ch_emb = tf.reshape(ch_emb, [N * self.max_p_num, PL, -1])
tensorflow.reduce_max
2,055
import tensorflow as tf def lrelu(x, leak=0.2, name="lrelu"): return tf.maximum(x, leak*x) def conv2d(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name="conv2d"): with tf.variable_scope(name): w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) # print("c", w.get_shape()) conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME') biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
tensorflow.variable_scope
2,056
import tensorflow as tf initializer=tf.ones_initializer(), trainable=False) self._moving_variance = tf.subtract(self._moving_second_moment, tf.square(self._moving_mean), name="moving_variance") def build_batch_stats(): """Builds the batch statistics calculation ops.""" # Copy for better stability. # We use the moving mean as an estimate of the mean in order to perform # a more numerically stable calculation of the batch mean. shift = tf.add(self._moving_mean, 0) counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics( input_batch, reduction_indices, keep_dims=True, shift=shift, name="batch_norm_ss") mean, variance = tf.nn.normalize_moments(counts, shifted_sum_x, shifted_sum_x2, shift, name="normalize_moments")
tensorflow.add
2,057
import tensorflow as tf from resan.utils.nn import bn_dense_layer, dropout, linear from resan.utils.general import exp_mask_for_high_rank, mask_for_high_rank from resan.rl_nn import reduce_data_rep_max_len def reinforced_self_attention( rep_tensor, rep_mask, dep_selection, head_selection, hn=None, keep_unselected=True, scope=None, keep_prob=1., is_train=None, wd=0., activation='elu' ): with tf.variable_scope(scope or 'reinforced_self_attention'): fw_result = directional_attention_with_selections( rep_tensor, rep_mask, dep_selection, head_selection, 'forward', hn, keep_unselected, 'forward_resa', keep_prob, is_train, wd, activation ) bw_result = directional_attention_with_selections( rep_tensor, rep_mask, dep_selection, head_selection, 'backward', hn, keep_unselected, 'backward_resa', keep_prob, is_train, wd, activation
tensorflow.variable_scope
2,058
from tensorflow.contrib.slim.python.slim.data import tfexample_decoder shape=[1], dtype=dtypes.int64, default_value=array_ops.zeros( [1], dtype=dtypes.int64)) } items_to_handlers = { 'image': tfexample_decoder.Image(), 'label': tfexample_decoder.Tensor('image/class/label'), } decoder = tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers)
tensorflow.contrib.slim.python.slim.data.tfexample_decoder.Image
2,059
import tensorflow as tf ref=self.internals_memory[name], indices=indices, updates=internals[name] )) for name in sorted(actions): assignments.append(tf.scatter_update( ref=self.actions_memory[name], indices=indices, updates=actions[name] )) assignments.append(tf.scatter_update(ref=self.terminal_memory, indices=indices, updates=terminal)) assignments.append(tf.scatter_update(ref=self.reward_memory, indices=indices, updates=reward)) # Add episode indices. with tf.control_dependencies(control_inputs=assignments): num_episodes = tf.count_nonzero(input_tensor=terminal, axis=0, dtype=util.tf_dtype('int')) assignment = tf.assign( ref=self.episode_indices[self.episode_count: self.episode_count + num_episodes], value=tf.boolean_mask(tensor=indices, mask=terminal) )
tensorflow.scatter_update
2,060
import tensorflow as tf return tf.nn.relu elif nl_type == 'elu': return tf.nn.elu elif nl_type == 'selu': return tf.nn.selu elif nl_type == 'leaky_relu': return tf.nn.leaky_relu elif nl_type == 'hard_tanh': return lambda z: tf.maximum(tf.minimum(z, 1), 0) else: raise NotImplementedError(nl_type) def update_params(self, kwargs): """Update the class attributes with kwargs.""" if kwargs is not None: for k, v in kwargs.iteritems():
tensorflow.minimum
2,061
from tensorflow.python.framework import ops ops.RegisterShape("Relu")(common_shapes.unchanged_shape) ops.RegisterShape("Relu6")(common_shapes.unchanged_shape) ops.RegisterShape("Elu")(common_shapes.unchanged_shape) ops.RegisterShape("Softplus")(common_shapes.unchanged_shape) ops.RegisterShape("Softsign")(common_shapes.unchanged_shape) @ops.RegisterShape("ReluGrad") @ops.RegisterShape("Relu6Grad") @ops.RegisterShape("EluGrad") @ops.RegisterShape("SoftplusGrad") @ops.RegisterShape("SoftsignGrad") def _BinaryElementwiseShape(op): """Returns same shape as both inputs to op.
tensorflow.python.framework.ops.RegisterShape
2,062
import tensorflow as tf def _MergeOneToken(tokens, i): return tf.expand_dims( self._MergeTokens((tokens[i], tokens[i + 1])), axis=-1) def _MergeCandidates(tokens, candidates): """Merge in the reverse binary tree.""" best_id = tf.argmin(candidates, output_type=tf.int32) # Perform the merge at position best_id. tokens = tf.concat( [tokens[:best_id], [candidates[best_id]], tokens[best_id + 2:]], axis=0) # Recompute the merge candidates. # Only the neighbors of best_id need to be recomputed. empty = tf.zeros([0], dtype=candidates.dtype) def _MergeLeft(): return tf.concat( [candidates[:best_id - 1], _MergeOneToken(tokens, best_id - 1)], axis=0) left_candidates = tf.cond(tf.equal(best_id, 0), lambda: empty, _MergeLeft) def _MergeRight(): return tf.concat( [_MergeOneToken(tokens, best_id), candidates[best_id + 2:]], axis=0)
tensorflow.zeros
2,063
import tensorflow as tf all_top_1_ops = tf.reduce_sum(all_top_1_ops) all_top_5_ops = tf.reduce_sum(all_top_5_ops) fetches = [all_top_1_ops, all_top_5_ops] + enqueue_ops return (enqueue_ops, fetches) extra_nccl_ops = [] apply_gradient_devices, gradient_state = ( self.variable_mgr.preprocess_device_grads(device_grads)) training_ops = [] for d, device in enumerate(apply_gradient_devices): with tf.device(device): total_loss = tf.reduce_mean(losses) avg_grads = self.variable_mgr.get_gradients_to_apply(d, gradient_state) gradient_clip = FLAGS.gradient_clip learning_rate = self.model_conf.get_learning_rate() if self.dataset and FLAGS.num_epochs_per_decay > 0: num_batches_per_epoch = ( self.dataset.num_examples_per_epoch() / self.batch_size) decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay) # Decay the learning rate exponentially based on the number of steps.
tensorflow.reduce_mean
2,064
import tensorflow as tf res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 2), res[0].shape) # Test that previous-feeding model ignores inputs after the first. dec_inp2 = [tf.constant(0, tf.int32, shape=[2]) for _ in range(3)] with tf.variable_scope("other"): d3, _ = tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp2, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, feed_previous=tf.constant(True)) sess.run([tf.global_variables_initializer()])
tensorflow.variable_scope
2,065
import tensorflow as tf with tf.device("/cpu:0"): embedding = tf.get_variable( "embedding", [vocab_size, size], dtype=data_type()) inputs = tf.nn.embedding_lookup(embedding, input_.input_data) if is_training and config.keep_prob < 1: inputs = tf.nn.dropout(inputs, config.keep_prob) output, state = self._build_rnn_graph(inputs, config, is_training) softmax_w = tf.get_variable( "softmax_w", [size, vocab_size], dtype=data_type()) softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type()) logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b) # Reshape logits to be a 3-D tensor for sequence loss logits = tf.reshape(logits, [self.batch_size, self.num_steps, vocab_size]) # Use the contrib sequence loss and average over the batches loss = tf.contrib.seq2seq.sequence_loss( logits, input_.targets, tf.ones([self.batch_size, self.num_steps], dtype=data_type()), average_across_timesteps=False, average_across_batch=True) # Update the cost self._cost = tf.reduce_sum(loss) self._final_state = state if not is_training:
tensorflow.reshape
2,066
import tensorflow as tf Raises: NotImplementedError: If an unsupported optimizer is requested. """ # TODO(user): gradient clipping (see Minimize) if optimizer == 'adagrad': train_op = tf.train.AdagradOptimizer(learning_rate) elif optimizer == 'adam': train_op = tf.train.AdamOptimizer(learning_rate) elif optimizer == 'momentum': train_op = tf.train.MomentumOptimizer(learning_rate, momentum) elif optimizer == 'rmsprop': train_op = tf.train.RMSPropOptimizer(learning_rate, momentum) elif optimizer == 'sgd': train_op = tf.train.GradientDescentOptimizer(learning_rate)
tensorflow.train.AdamOptimizer
2,067
from tensorflow.python.ops import control_flow_ops self.d_losses[-1] = control_flow_ops.with_dependencies([barrier], self.d_losses[-1]) self.g_losses[-1] = control_flow_ops.with_dependencies([barrier], self.g_losses[-1]) self.d_loss_real = control_flow_ops.with_dependencies([barrier], self.d_loss_real) self.d_loss_fake = control_flow_ops.with_dependencies([barrier], self.d_loss_fake) self.d_loss_class = control_flow_ops.with_dependencies([barrier], self.d_loss_class) t_vars = self._get_vars_semi_supervised()
tensorflow.python.ops.control_flow_ops.with_dependencies
2,068
import tensorflow as tf # zero mean conv if type(size) == int: size = [size, size] in_ch = input.get_shape().as_list()[-1] # init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32) init = tf.truncated_normal_initializer(mean=0.0, stddev=0.02) filters = tf.get_variable('zero_conv_weights' + id, initializer=init, shape=[size[0], size[1], in_ch, channels]) filters = filters - tf.reduce_mean(filters, axis=[0, 1, 2], keepdims=True) if padding == "PARTIAL": with tf.variable_scope('mask'): _, h, w, _ = input.get_shape().as_list() slide_window = size[0] * size[1] mask = tf.ones(shape=[1, h, w, 1]) update_mask = tf.layers.conv2d(mask, filters=1, name='mask' + id, kernel_size=size, kernel_initializer=tf.constant_initializer(1.0), strides=stride, padding="SAME", use_bias=False, trainable=False, dilation_rate=(dilation, dilation)) mask_ratio = slide_window / (update_mask + 1e-8) update_mask = tf.clip_by_value(update_mask, 0.0, 1.0) mask_ratio = mask_ratio * update_mask with tf.variable_scope('parconv'): x = tf.nn.conv2d(input, filters, strides=[1, stride, stride, 1], padding="SAME", name='zero-conv_' + id, dilations=(1, dilation, dilation, 1)) x = x * mask_ratio if use_bias: bias = tf.get_variable("bias" + id, [channels], initializer=tf.constant_initializer(0.0)) x = tf.nn.bias_add(x, bias) return x * update_mask
tensorflow.constant_initializer
2,069
import tensorflow as tf def validation(): (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() images = tf.convert_to_tensor(np.expand_dims(x_test/255.0, -1),dtype=tf.float32)
tensorflow.keras.datasets.mnist.load_data
2,070
import tensorflow as tf def inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) l2 = tf.matmul(l1, self.w2)+self.b2 l2=tf.nn.relu(l2) l3=tf.matmul(l2, self.w3)+self.b3 l3=tf.nn.relu(l3) out=tf.matmul(l3, self.w4)+self.b4 return out def test_inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) l2 = tf.matmul(l1, self.w2)+self.b2 l2=tf.nn.relu(l2)
tensorflow.matmul
2,071
import tensorflow as tf elif actL == 'esp' or actL == 'relu': #r2 score norm= tf.reduce_mean( tf.squared_difference(Y,tf.reduce_mean(Y)) ) accuracy = 1 - tf.divide( tf.reduce_mean(tf.squared_difference(an, Y)), norm) elif actL == 'softmax': #accuracy score for multiclass classification Yp = tf.sigmoid(betan*hn) correct = tf.equal(tf.argmax(Yp), tf.argmax(Y)) accuracy= tf.reduce_mean(tf.cast(correct, "float")) #-----------------Initialize the graph and start the session------------------------------------------------- init = tf.global_variables_initializer() with tf.Session() as sess:
tensorflow.argmax
2,072
import tensorflow as tf def testGPU(self): if not tf.test.is_built_with_cuda(): return save_path = os.path.join(self.get_temp_dir(), "gpu") with tf.Session("", graph=tf.Graph()) as sess: with sess.graph.device("/gpu:0"): v0_1 = tf.Variable(123.45) save = tf.train.Saver({"v0": v0_1}) tf.initialize_all_variables().run() save.save(sess, save_path) with tf.Session("", graph=tf.Graph()) as sess: with sess.graph.device("/gpu:0"): v0_2 = tf.Variable(543.21) save = tf.train.Saver({"v0": v0_2}) tf.initialize_all_variables().run() self.assertAllClose(543.21, v0_2.eval()) save.restore(sess, save_path) self.assertAllClose(123.45, v0_2.eval()) def testVariables(self): save_path = os.path.join(self.get_temp_dir(), "variables") with tf.Session("", graph=tf.Graph()) as sess: one = tf.Variable(1.0) twos = tf.Variable([2.0, 2.0, 2.0]) init = tf.initialize_all_variables() save = tf.train.Saver(tf.all_variables()) init.run() save.save(sess, save_path)
tensorflow.train.Saver
2,073
import tensorflow as tf Args: preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Returns: prediction_dict: a dictionary holding prediction tensors to be passed to the Loss or Postprocess functions. """ flattened_inputs = tf.contrib.layers.flatten(preprocessed_inputs) class_prediction = tf.contrib.layers.fully_connected( flattened_inputs, self._num_classes) box_prediction = tf.contrib.layers.fully_connected(flattened_inputs, 4) return { 'class_predictions_with_background': tf.reshape( class_prediction, [-1, 1, self._num_classes]), 'box_encodings': tf.reshape(box_prediction, [-1, 1, 4]) } def postprocess(self, prediction_dict, true_image_shapes, **params): """Convert predicted output tensors to final detections. Unused. Args: prediction_dict: a dictionary holding prediction tensors. true_image_shapes: int32 tensor of shape [batch, 3] where each row is
tensorflow.contrib.layers.fully_connected
2,074
from tensorflow.contrib.learn.python.learn.estimators import test_data bucketized_feature = feature_column.bucketized_column( cont_feature, test_data.get_quantile_based_buckets(iris.data, 10))
tensorflow.contrib.learn.python.learn.estimators.test_data.get_quantile_based_buckets
2,075
import tensorflow as tf d = tf.data.Dataset.from_tensor_slices({ "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "segment_ids": tf.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32), "label_ids": tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32), }) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100)
tensorflow.constant
2,076
import tensorflow as tf else: return mean_cost, accuracy, y_pred def training(self, cost): optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate) # train_op = optimizer.minimize(cost) trainables = tf.trainable_variables() grads = tf.gradients(cost, trainables) grads, _ = tf.clip_by_global_norm(grads, clip_norm=self.clip_norm) capped_gvs = zip(grads, trainables) train_op = optimizer.apply_gradients(capped_gvs) return train_op
tensorflow.trainable_variables
2,077
import tensorflow as tf def dynamic_batching(paths): spectrograms, max_x = [], 0 for path in paths: spectrograms.append(np.load("spectrogram/" + path + ".npy")) if spectrograms[-1].shape[0] > max_x: max_x = spectrograms[-1].shape[0] return spectrograms, max_x # In[4]: tf.reset_default_graph() sess = tf.InteractiveSession() model = Model() sess.run(tf.global_variables_initializer()) # In[5]: for e in range(30): pbar = tqdm(range(0, len(text_files), batch_size), desc="minibatch loop") total_cost, total_acc = 0, 0 for k in pbar: index = min(k + batch_size, len(text_files)) files, max_x = dynamic_batching(paths[k:index]) max_y = max(lengths[k:index]) batch_x = np.zeros((len(files), max_x, n_mels * reduction_factor)) batch_y = np.zeros((len(files), max_y))
tensorflow.global_variables_initializer
2,078
import tensorflow as tf tf.gfile.MakeDirs(work_directory) filepath = os.path.join(work_directory, filename) if not tf.gfile.Exists(filepath): temp_file_name, _ = urllib.request.urlretrieve(source_url) tf.gfile.Copy(temp_file_name, filepath) with tf.gfile.GFile(filepath) as f: size = f.size() print("Successfully downloaded", filename, size, "bytes.") return filepath
tensorflow.gfile.GFile
2,079
from tensorflow.python.framework import tensor_shape input_shape = input_shape.merge_with(out_backprop_shape) vector_dim = input_shape[3] vector_dim = vector_dim.merge_with(mean_shape[0]) vector_dim = vector_dim.merge_with(var_shape[0]) vector_dim = vector_dim.merge_with(beta_shape[0]) return [input_shape] + ([tensor_shape.vector(vector_dim)] * 4) ops.RegisterShape("Conv2D")(common_shapes.conv2d_shape) ops.RegisterShape("DepthwiseConv2dNative")(
tensorflow.python.framework.tensor_shape.vector
2,080
import tensorflow as tf # # random select negtive examples for classification # selected_neg_mask = tf.random_uniform(tf.shape(gscores), minval=0, maxval=1.) < tf.where( # tf.greater(n_negtives, 0), # tf.divide(tf.cast(n_neg_to_select, tf.float32), n_negtives), # tf.zeros_like(tf.cast(n_neg_to_select, tf.float32)), # name='rand_select_negtive') # include both selected negtive and all positive examples final_mask = tf.stop_gradient(tf.logical_or(tf.logical_and(negtive_mask, selected_neg_mask), positive_mask)) total_examples = tf.reduce_sum(tf.cast(final_mask, tf.float32)) # add mask for glabels and cls_pred here glabels = tf.boolean_mask(tf.clip_by_value(glabels, 0, FLAGS.num_classes), tf.stop_gradient(final_mask)) cls_pred = tf.boolean_mask(cls_pred, tf.stop_gradient(final_mask)) location_pred = tf.boolean_mask(location_pred, tf.stop_gradient(positive_mask)) gtargets = tf.boolean_mask(gtargets, tf.stop_gradient(positive_mask)) predictions = { 'classes': tf.argmax(cls_pred, axis=-1), 'probabilities': tf.reduce_max(tf.nn.softmax(cls_pred, name='softmax_tensor'), axis=-1), 'bboxes_predict': tf.reshape(bboxes_pred, [-1, 4]) } if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) # Calculate loss, which includes softmax cross entropy and L2 regularization. cross_entropy = tf.cond(n_positives > 0., lambda: tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred), lambda: 0.) #cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred)
tensorflow.stop_gradient
2,081
import tensorflow as tf rep_tensor, rep_mask, dep_selection, head_selection, direction=None, hn=None, keep_unselected=True, scope=None, keep_prob=1., is_train=None, wd=0., activation='elu'): bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2] org_ivec = rep_tensor.get_shape().as_list()[2] ivec = hn or org_ivec
tensorflow.shape
2,082
from tensorflow.python.framework import ops else array_ops.shape(x)) batch_shape = array_ops.slice(s, (1,), (self.batch_ndims,)) # Since sample_dims=1 and is left-most, we add 1 to the number of # batch_ndims to get the event start dim. event_start = array_ops.where( self._batch_ndims_is_0, 2, 1 + self.batch_ndims) event_shape = array_ops.slice(s, (event_start,), (self.event_ndims,)) new_shape = array_ops.concat(0, (sample_shape, batch_shape, event_shape)) x = array_ops.reshape(x, shape=new_shape) return x @contextlib.contextmanager def _name_scope(self, name=None, values=None): """Helper function to standardize op scope.""" with ops.name_scope(self.name): with ops.name_scope(name, values=( (values or []) + [self.batch_ndims, self.event_ndims])) as scope: yield scope def _is_all_constant_helper(self, *args): """Helper which returns True if all inputs are constant_value.""" return all(tensor_util.constant_value(x) is not None for x in args) def _assert_non_negative_int32_scalar(self, x): """Helper which ensures that input is a non-negative, int32, scalar.""" x = ops.convert_to_tensor(x, name="x") if x.dtype.base_dtype != dtypes.int32.base_dtype: raise TypeError("%s.dtype=%s is not %s" % (x.name, x.dtype, dtypes.int32)) x_value_static = tensor_util.constant_value(x) if x.get_shape().ndims is not None and x_value_static is not None:
tensorflow.python.framework.ops.name_scope
2,083
import tensorflow as tf to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. name: (Optional) A name for this operation. Returns: A `Tensor` of type int64. """ with tf.compat.v1.name_scope(name, 'size'): # Note: Calling `sum` defined in this module, not the builtin. if isinstance(x, tf.SparseTensor): ones_like_x = tf.SparseTensor( indices=x.indices, values=tf.ones_like(x.values, tf.int64), dense_shape=x.dense_shape)
tensorflow.compat.v1.name_scope
2,084
import tensorflow as tf size = shape[1].value*shape[2].value*shape[3].value else: size = shape[-1].value with tf.variable_scope(layer_name): w = tf.get_variable(name='weight', shape=[size, out_nodes], initializer=tf.constant_initializer(0.0)) b = tf.get_variable(name='bias', shape=[out_nodes], initializer=tf.constant_initializer(0.0)) # batch? flat_x = tf.reshape(x, [-1,size]) x = tf.nn.bias_add(tf.matmul(flat_x,w), b) x = tf.nn.relu(x) return x def lstm(): ''' Build LSTM cell ''' pass
tensorflow.reshape
2,085
import tensorflow as tf img = img / tf.constant([cfgs.PIXEL_STD]) gtboxes_and_label_h = get_horizen_minAreaRectangle( tf.reshape(gtboxes_and_label_batch[start:end], [-1, 9])) gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [cfgs.BATCH_SIZE, -1, 5]) gtboxes_and_label_q = tf.reshape(gtboxes_and_label_batch[start:end], [cfgs.BATCH_SIZE, -1, 9]) num_objects = num_objects_batch[start:end] num_objects = tf.cast(tf.reshape(num_objects, [cfgs.BATCH_SIZE, -1, ]), tf.float32) img_h = img_h_batch[start:end]
tensorflow.reshape
2,086
import tensorflow as tf rewards_vec = tf.reshape(rewards, (batch_size, num_tasks)) dones_vec = tf.reshape(dones, (batch_size, num_tasks)) relabelled_obs = self._task_distribution.combine(states_tiled, tasks_tiled) action_distribution = self._actor( relabelled_obs, step_type=(), network_state=())[0] log_pi = common.log_probability(action_distribution, actions_tiled, action_spec) log_pi_vec = tf.reshape(log_pi, (batch_size, num_tasks)) logits_vec = ( rewards_vec - log_pi_vec + self._gamma * (1.0 - dones_vec) * q_vals_vec) if self._relabel_type == "random": logits_vec = tf.ones_like(logits_vec) # Hack to make sampling random ## End new version
tensorflow.reshape
2,087
import tensorflow as tf output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"):
tensorflow.truncated_normal_initializer
2,088
import tensorflow as tf ts = 0 with tf.device("/gpu:0"): approx_scskconv = sc_module.direct_sparse_conv_kd(pd.out_indices, pd.out_values, pd.out_shape, pd.out_block_channel_mapping, pf.out_indices, pf.out_values, pf.out_shape, pf.out_channel_mapping, bias, strides, padding, out_entry_count, dim, max_density, filter_type);
tensorflow.device
2,089
import tensorflow as tf self.simloss = tf.reduce_mean((trans_z - tgtimg_z) ** 2) * 1e3 mean, var = tf.nn.moments(tgtimg_z, axes=[0]) print(var.get_shape()) # self.simloss /= tf.reduce_mean(var) print(tgtimg_z.get_shape()) self.out = output_h4# + contextimg#tf.nn.tanh(h4) self.out2 = truthoutput_h4 self.recon1 = tf.nn.l2_loss(tgtimg - self.out) self.recon2 = tf.nn.l2_loss(tgtimg - self.out2) # self.loss = self.recon1 + self.recon2 + self.simloss if ablation_type == "None": self.loss = self.recon1 + self.recon2 + self.simloss elif ablation_type == "L2": self.loss = self.recon1 + self.recon2 elif ablation_type == "L2L3": self.loss = self.recon1
tensorflow.nn.l2_loss
2,090
import tensorflow as tf """Checks that `perm` is valid.""" with tf.name_scope(name, 'maybe_validate_perm', [perm]):
tensorflow.name_scope
2,091
import tensorflow as tf nsteps = len(xs) with tf.variable_scope(scope): wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0)) c, h = tf.split(axis=1, num_or_size_splits=2, value=s) for idx, x in enumerate(xs):
tensorflow.constant_initializer
2,092
import tensorflow as tf if FLAGS.task.reset_policy: # NOTE: reset policy and valuefunc logger.info("Resetting Policy") pol_params = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters())]) tf.get_default_session().run(tf.variables_initializer(policy.parameters())) pol_params_after = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters())]) print ("pol_params:", np.linalg.norm(pol_params), "pol_params_after_reset:", np.linalg.norm(pol_params_after)) logger.info("Resetting Valuefunc") tf.get_default_session().run(tf.variables_initializer(vfn.parameters())) tf.get_default_session().run(tf.variables_initializer(warmup_policy.parameters())) tf.get_default_session().run(tf.variables_initializer(warmup_vfn.parameters())) for p in warmup_policy.parameters(): p.invalidate() for p in warmup_vfn.parameters(): p.invalidate() for p in policy.parameters(): p.invalidate() for p in vfn.parameters(): p.invalidate() last_end = None drops = [] evaluate(settings, 'pre-warm-up') returns_pre_warmup = testeval(policy, runners['collect']) if test:
tensorflow.get_default_session
2,093
import tensorflow as tf scope: string, variable scope moments_dims: a list of ints, indicating dimensions for moments calculation bn_decay: float or float tensor variable, controling moving average weight Return: normed: batch-normalized maps """ with tf.variable_scope(scope) as sc: num_channels = inputs.get_shape()[-1].value beta = tf.Variable(tf.constant(0.0, shape=[num_channels]), name='beta', trainable=True) gamma = tf.Variable(tf.constant(1.0, shape=[num_channels]), name='gamma', trainable=True) batch_mean, batch_var = tf.nn.moments(inputs, moments_dims, name='moments') decay = bn_decay if bn_decay is not None else 0.9 ema = tf.train.ExponentialMovingAverage(decay=decay) # Operator that maintains moving averages of variables. ema_apply_op = tf.cond(is_training, lambda: ema.apply([batch_mean, batch_var]), lambda: tf.no_op()) # Update moving average and return current batch's avg and var. def mean_var_with_update(): with tf.control_dependencies([ema_apply_op]): return tf.identity(batch_mean), tf.identity(batch_var) # ema.average returns the Variable holding the average of var. mean, var = tf.cond(is_training, mean_var_with_update,
tensorflow.train.ExponentialMovingAverage
2,094
import tensorflow as tf """Model helper for creating a ResNet model for the CIFAR-10 dataset.""" import tensorflow as tf from nets.abstract_model_helper import AbstractModelHelper from datasets.cifar10_dataset import Cifar10Dataset from utils.external import resnet_model as ResNet from utils.lrn_rate_utils import setup_lrn_rate_piecewise_constant from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_integer('resnet_size', 20, '# of layers in the ResNet model') tf.app.flags.DEFINE_float('nb_epochs_rat', 1.0, '# of training epochs\'s ratio') tf.app.flags.DEFINE_float('lrn_rate_init', 1e-1, 'initial learning rate') tf.app.flags.DEFINE_float('batch_size_norm', 128, 'normalization factor of batch size') tf.app.flags.DEFINE_float('momentum', 0.9, 'momentum coefficient') tf.app.flags.DEFINE_float('loss_w_dcy', 2e-4, 'weight decaying loss\'s coefficient') def forward_fn(inputs, is_train, data_format): """Forward pass function. Args: * inputs: inputs to the network's forward pass * is_train: whether to use the forward pass with training operations inserted * data_format: data format ('channels_last' OR 'channels_first') Returns:
tensorflow.app.flags.DEFINE_float
2,095
import tensorflow as tf param_noise_filter_func=param_noise_filter_func) else: act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse) with tf.variable_scope(scope, reuse=reuse): # set up placeholders obs_t_input = U.ensure_tf_input(make_obs_ph("obs_t")) act_t_ph = tf.placeholder(tf.int32, [None], name="action") rew_t_ph = tf.placeholder(tf.float32, [None], name="reward") obs_tp1_input = U.ensure_tf_input(make_obs_ph("obs_tp1")) done_mask_ph = tf.placeholder(tf.float32, [None], name="done") importance_weights_ph = tf.placeholder(tf.float32, [None], name="weight") # q network evaluation
tensorflow.placeholder
2,096
import tensorflow as tf if init: x = tf.nn.conv2d(x, tf.nn.l2_normalize(V.initialized_value(), [0, 1, 2]), [1] + list(stride) + [1], pad) init_scale=.01 m_init, v_init = tf.nn.moments(x, [0,1,2]) scale_init = init_scale / tf.sqrt(v_init + 1e-10) with tf.control_dependencies([g.assign(g * scale_init), b.assign_add(-m_init * scale_init)]): x = tf.reshape(scale_init, [1, 1, 1, num_filters]) * (x - tf.reshape(m_init, [1, 1, 1, num_filters])) else: V = maybe_avg(V) g = maybe_avg(g) b = maybe_avg(b) # use weight normalization (Salimans & Kingma, 2016)
tensorflow.reshape
2,097
import tensorflow as tf filter_size[1] - input_.get_shape().as_list()[2], input_.get_shape().as_list()[3] ]) res = tf.concat(axis=1, values=[pad_1, res]) res = tf.concat(axis=2, values=[pad_2, res]) res = tf.nn.conv2d( input=res, filter=weights,
tensorflow.concat
2,098
import tensorflow as tf x_data = tf.placeholder(shape=[sentence_size], dtype=tf.int32) y_target = tf.placeholder(shape=[1, 1], dtype=tf.float32) # Text-Vocab Embedding x_embed = tf.nn.embedding_lookup(identity_mat, x_data) x_col_sums = tf.reduce_sum(x_embed, 0) # Declare model operations x_col_sums_2D = tf.expand_dims(x_col_sums, 0) model_output = tf.add(tf.matmul(x_col_sums_2D, A), b) # Declare loss function (Cross Entropy loss) loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=model_output, labels=y_target)) # Prediction operation prediction = tf.sigmoid(model_output) # Declare optimizer my_opt = tf.train.GradientDescentOptimizer(0.001) train_step = my_opt.minimize(loss) # Intitialize Variables init = tf.global_variables_initializer() sess.run(init) # Start Logistic Regression print('Starting Training Over {} Sentences.'.format(len(texts_train))) loss_vec = [] train_acc_all = [] train_acc_avg = []
tensorflow.sigmoid
2,099