seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf logits, input_.targets, tf.ones([self.batch_size, self.num_steps], dtype=tf.float32), average_across_timesteps=False, average_across_batch=True) self._cost = tf.reduce_sum(loss) self._final_state = state if not is_training: return self._lr = tf.Variable(0., trainable=False) tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(self._cost, tvars), config.max_grad_norm) optimizer = tf.train.GradientDescentOptimizer(self._lr) self._train_op = optimizer.apply_gradients( zip(grads, tvars), global_step=tf.train.get_or_create_global_step()) self._new_lr = tf.placeholder( tf.float32, shape=[], name='new_learning_rate') self._lr_update = tf.assign(self._lr, self._new_lr) self.saver = tf.train.Saver(tf.global_variables()) def _get_lstm_cell(self, config, is_training):
tensorflow.gradients
1,100
import tensorflow as tf def lnlstm(xs, ms, s, scope, nh, init_scale=1.0): nbatch, nin = [v.value for v in xs[0].get_shape()] with tf.variable_scope(scope): wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0)) bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0)) wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0)) bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0)) b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0)) gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0)) bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0)) c, h = tf.split(axis=1, num_or_size_splits=2, value=s) for idx, (x, m) in enumerate(zip(xs, ms)): c = c*(1-m) h = h*(1-m) z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
tensorflow.constant_initializer
1,101
import tensorflow as tf self.vf_new_params = [oldp.assign(p) for p, oldp in zip(vf_params, vf_old_params)] self.sess.run(tf.global_variables_initializer()) # Tensorboard if summary_dir is not None: self.writer = tf.summary.FileWriter(summary_dir) tf.summary.scalar('Loss/Policy', loss_pg) tf.summary.scalar('Loss/Value', loss_vf) tf.summary.scalar('Loss/Entropy', loss_entropy) tf.summary.scalar('Loss/Total', loss) tf.summary.scalar('Var/Epsilon', epsilon_decay) tf.summary.scalar('Var/Policy Mode', tf.reduce_mean(pi.mode())) tf.summary.scalar('Var/Policy Sigma', tf.reduce_mean(pi.stddev())) tf.summary.scalar('Var/Value', tf.reduce_mean(self.vf)) self.summarise = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES)) # AC net def build_anet(self, state_in, name, reuse=False, batch_size=64): reg = None with tf.variable_scope(name, reuse=reuse): layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) lstm_a = tf.nn.rnn_cell.LSTMCell(num_units=256) lstm_a = tf.nn.rnn_cell.DropoutWrapper(lstm_a, output_keep_prob=self.keep_prob) state_init_a = lstm_a.zero_state(batch_size=batch_size, dtype=tf.float32) lstm_ain = tf.expand_dims(layer_a2, axis=1)
tensorflow.reduce_mean
1,102
import tensorflow as tf fake_loss, tf.trainable_variables('.*/progressive_gan_block_1/.*')), _get_grad_norm( fake_loss, tf.trainable_variables('.*/progressive_gan_block_2/.*')), _get_grad_norm( fake_loss, tf.trainable_variables('.*/progressive_gan_block_3/.*')) ] grad_norms_output = None with self.test_session(use_gpu=True) as sess: sess.run(tf.global_variables_initializer()) x1_np = sess.run(x, feed_dict={current_image_id_ph: 0.12}) x2_np = sess.run(x, feed_dict={current_image_id_ph: 1.8}) grad_norms_output = np.array([ sess.run(grad_norms, feed_dict={current_image_id_ph: i}) for i in range(15) # total num of images ]) self.assertEqual((2, 16, 16, 3), x1_np.shape)
tensorflow.global_variables_initializer
1,103
import tensorflow as tf model_dir = os.path.abspath(model_dir) p_name = os.path.join(model_dir, "params.json") m_name = os.path.join(model_dir, model_name + ".json") if not tf.gfile.Exists(p_name) or not tf.gfile.Exists(m_name): return params with tf.gfile.Open(p_name) as fd: tf.logging.info("Restoring hyper parameters from %s" % p_name) json_str = fd.readline() params.parse_json(json_str) with tf.gfile.Open(m_name) as fd: tf.logging.info("Restoring model parameters from %s" % m_name) json_str = fd.readline() params.parse_json(json_str) return params def export_params(output_dir, name, params): if not tf.gfile.Exists(output_dir): tf.gfile.MkDir(output_dir)
tensorflow.gfile.Open
1,104
import tensorflow as tf def create_global_steps(self, n_points_train_set): self.n_batches_per_epoch = np.ceil(n_points_train_set/self.batch_size["train"]) self.global_step = tf.train.get_or_create_global_step() self.global_epoch = tf.cast(tf.floor(tf.cast(self.global_step, tf.float32) / self.n_batches_per_epoch), tf.int64, "global_epoch") tf.add_to_collection("global_epoch", self.global_epoch) # this creates an operation to add to all trainable variables a white noise of param # std = tf.sqrt(variance)/10 def create_random_update_op(self): vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) update_opts = []
tensorflow.add_to_collection
1,105
import tensorflow as tf "max_prob":max_prob } ) } ) return estimator_spec elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, logits, label_ids): """Computes the loss and accuracy of the model.""" sentence_log_probs = tf.reshape( logits, [-1, logits.shape[-1]]) sentence_predictions = tf.argmax( logits, axis=-1, output_type=tf.int32) sentence_labels = tf.reshape(label_ids, [-1]) sentence_accuracy = tf.metrics.accuracy( labels=label_ids, predictions=sentence_predictions) sentence_mean_loss = tf.metrics.mean( values=per_example_loss) sentence_f = tf_metrics.f1(label_ids, sentence_predictions, num_labels, label_lst, average="macro") eval_metric_ops = { "f1": sentence_f,
tensorflow.argmax
1,106
import tensorflow as tf x, n, l, m, pmm, pmm1 = tf.while_loop( cond=_evaluate_legendre_polynomial_loop_cond, body=_evaluate_legendre_polynomial_loop_body, loop_vars=[x, n, l, m, pmm, pmm1]) return pmm1 def _evaluate_legendre_polynomial_branch(l, m, x, pmm): pmm1 = x * (2.0 * tf.cast(m, dtype=x.dtype) + 1.0) * pmm # if, l == m + 1 return pmm1, otherwise lift to the next band. res = tf.where( tf.equal(l, m + 1), pmm1, _evaluate_legendre_polynomial_loop(x, m, l, pmm, pmm1)) return res
tensorflow.cast
1,107
import tensorflow as tf run_log_dir = os.path.join(FLAGS.log_dir, 'exp_BN_bs_{bs}_lr_{lr}_aug_flip_brightness'.format(bs=FLAGS.batch_size, lr=FLAGS.learning_rate)) def weight_variable(shape): """weight_variable generates a weight variable of a given shape.""" initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial, name='weights') def bias_variable(shape): """bias_variable generates a bias variable of a given shape.""" initial = tf.constant(0.1, shape=shape) return tf.Variable(initial, name='biases') def deepnn(x, train): """deepnn builds the graph for a deep net for classifying CIFAR10 images. Args: x: an input tensor with the dimensions (N_examples, 3072), where 3072 is the number of pixels in a standard CIFAR10 image. Returns:
tensorflow.constant
1,108
import tensorflow as tf self.id_key = id_key self.meta_tag = meta_tag self.meta_signature = meta_signature self.meta_predictions = meta_predictions self.session = None self.graph = None self.feed_tensors = None self.fetch_tensors = None def process(self, inputs): # Create a session for every worker only once. The session is not # pickleable, so it can't be created at the DoFn constructor. if not self.session: self.graph = ops.Graph() with self.graph.as_default(): self.session = tf.Session() metagraph_def = loader.load( self.session, {self.meta_tag}, self.model_dir) signature_def = metagraph_def.signature_def[self.meta_signature] # inputs self.feed_tensors = { k: self.graph.get_tensor_by_name(v.name) for k, v in signature_def.inputs.items() } # outputs/predictions self.fetch_tensors = { k: self.graph.get_tensor_by_name(v.name) for k, v in signature_def.outputs.items()
tensorflow.Session
1,109
import tensorflow as tf return tf.multiply(tf.nn.l2_loss(x), weight_decay) else: return None if weight_decay is not None: reg = _reg else: reg = None kernel = tf.get_variable( 'w', ksize, initializer=init, regularizer=reg, dtype=dtype, trainable=True) return tf.nn.conv2d( x, kernel, strides, padding, data_format=data_format, use_cudnn_on_gpu=True) def _bottleneck_residual(x, ksize_list,
tensorflow.get_variable
1,110
import tensorflow as tf self.captions = tf.placeholder(tf.int32, [None, self.T + 1]) def _get_initial_lstm(self, features): with tf.variable_scope('initial_lstm'): features_mean = tf.reduce_mean(features, 1) w_h = tf.get_variable('w_h', [self.D, self.H], initializer=self.weight_initializer) b_h = tf.get_variable('b_h', [self.H], initializer=self.const_initializer) h = tf.nn.tanh(tf.matmul(features_mean, w_h) + b_h) w_c = tf.get_variable('w_c', [self.D, self.H], initializer=self.weight_initializer) b_c = tf.get_variable('b_c', [self.H], initializer=self.const_initializer) c = tf.nn.tanh(tf.matmul(features_mean, w_c) + b_c) return c, h def _word_embedding(self, inputs, reuse=False): with tf.variable_scope('word_embedding', reuse=reuse): w = tf.get_variable('w', [self.V, self.M], initializer=self.emb_initializer) x = tf.nn.embedding_lookup(w, inputs, name='word_vector') # (N, T, M) or (N, M) return x def _project_features(self, features):
tensorflow.get_variable
1,111
import tensorflow as tf self.num_hidden = args.num_hidden self.num_classes = args.num_classes self.dropout_output = args.dropout_output self.dropout_input = args.dropout_input self.clip_norm = args.clip_norm self.embedding_init = embedding_init self.x = tf.placeholder(tf.int32, [None, None], 'input') self.y = tf.placeholder(tf.int32, [None, self.num_classes], 'labels') self.seq_len = tf.placeholder(tf.int64, [None], 'input_length') def inference(self, forward_only=None):
tensorflow.placeholder
1,112
import tensorflow as tf # wrapper to make the optimizer work with TPUs if params['use_tpu']: gen_optimizer = tf.contrib.tpu.CrossShardOptimizer(gen_optimizer) dis_optimizer = tf.contrib.tpu.CrossShardOptimizer(dis_optimizer) gan_train_ops = tf.contrib.gan.gan_train_ops(gan_model, gan_loss, gen_optimizer, dis_optimizer) while_loop = tf.contrib.tpu.while_loop if params['use_tpu'] else tf.while_loop # train the discriminator 100 steps inputs = [tf.constant(0), tf.constant(0.0)] cond = lambda i, x: tf.less(i, 100) def body(i, x): return tf.add(i, 1), gan_train_ops.discriminator_train_op dis_train_op = while_loop(cond, body, inputs) # tf.contrib.gan's train op does not manage global steps in it train_op = tf.group( dis_train_op, gan_train_ops.generator_train_op, global_step.assign_add(1))
tensorflow.less
1,113
import tensorflow as tf self.global_step = tf.get_variable('global_step', shape=[], dtype=tf.int32, initializer=tf.constant_initializer(0), trainable=False) self.dropout = tf.placeholder_with_default(0.0, (), name="dropout") if self.demo: self.c = tf.placeholder(tf.int32, [None, config.test_para_limit],"context") self.q = tf.placeholder(tf.int32, [None, config.test_ques_limit],"question") self.ch = tf.placeholder(tf.int32, [None, config.test_para_limit, config.char_limit],"context_char") self.qh = tf.placeholder(tf.int32, [None, config.test_ques_limit, config.char_limit],"question_char") self.y1 = tf.placeholder(tf.int32, [None, config.test_para_limit],"answer_index1") self.y2 = tf.placeholder(tf.int32, [None, config.test_para_limit],"answer_index2") else: self.c, self.q, self.ch, self.qh, self.y1, self.y2, self.qa_id = batch.get_next() # self.word_unk = tf.get_variable("word_unk", shape = [config.glove_dim], initializer=initializer()) self.word_mat = tf.get_variable("word_mat", initializer=tf.constant( word_mat, dtype=tf.float32), trainable=False) self.char_mat = tf.get_variable( "char_mat", initializer=tf.constant(char_mat, dtype=tf.float32))
tensorflow.placeholder
1,114
import tensorflow as tf else: config = tf.estimator.RunConfig( model_dir=args.model_dir, save_checkpoints_steps=10, save_summary_steps=10) estimator = tf.estimator.Estimator( model_fn, config=config, params=params) estimator.train(train_input_fn, steps=100)
tensorflow.estimator.Estimator
1,115
import tensorflow as tf A = tf.matrix_triangular_solve(Lm, Kmn, lower=True) # compute the covariance due to the conditioning if full_cov: fvar = Knn - tf.matmul(A, A, transpose_a=True) fvar = tf.tile(fvar[None, :, :], [num_func, 1, 1]) # R x N x N else: fvar = Knn - tf.reduce_sum(tf.square(A), 0) fvar = tf.tile(fvar[None, :], [num_func, 1]) # R x N # another backsubstitution in the unwhitened case if not white: A = tf.matrix_triangular_solve(tf.transpose(Lm), A, lower=False) # construct the conditional mean fmean = tf.matmul(A, f, transpose_a=True)
tensorflow.tile
1,116
import tensorflow as tf tuple (final output, loss) ''' y_pairs = tf.reshape(output, [-1,2]) # fold: 1 x n -> [n/2 x 2] pos_scores, neg_scores = tf.split(1, 2, y_pairs) # separate pairs
tensorflow.reshape
1,117
import tensorflow as tf raise Exception("please input a name") logging.info(" [*] geting variables with %s" % name) # tvar = tf.trainable_variables() if train_only else tf.all_variables() if train_only: t_vars = tf.trainable_variables() else: try: # TF1.0+ t_vars = tf.global_variables() except Exception: # TF0.12 t_vars = tf.all_variables() d_vars = [var for var in t_vars if name in var.name] if printable: for idx, v in enumerate(d_vars):
tensorflow.global_variables
1,118
import tensorflow as tf :rtype: dictionary """ Winit = tf.truncated_normal(shape, mean=0, stddev=0.1) binit = tf.zeros(shape[-1]) layer = {} layer["weights"] = tf.get_variable(name + "/weights", dtype=tf.float32, initializer=Winit) layer["bias"] = tf.get_variable(name + "/bias", dtype=tf.float32,
tensorflow.get_variable
1,119
import tensorflow as tf min_iters=50) def benchmark_batching_large(self): with tf.Session() as session: @dynamic_batching.batch_fn def f(a, b): return a + b outputs = [] for _ in xrange(1000): outputs.append(f(tf.ones([1, 100000]), tf.ones([1, 100000]))) op_to_benchmark = tf.group(*outputs) tf.train.start_queue_runners() self.run_op_benchmark( name='batching_many_large', sess=session, op_or_tensor=op_to_benchmark, burn_iters=10, min_iters=50) if __name__ == '__main__': tf.test.main()
tensorflow.train.start_queue_runners
1,120
from tensorflow.python.framework import ops # Adds its gradient function, too. if self._grad_func: self._grad_func.add_to_graph(g) def __call__(self, *args, **kwargs): self.add_to_graph(ops.get_default_graph()) args = [ops.convert_to_tensor(_) for _ in args] + self._extra_inputs return _call(self._definition.signature, *args, **kwargs) # NOTE: The list needs to be extended when more data types are added. _DTYPE_TO_STR = { dtypes.float16: "f16",
tensorflow.python.framework.ops.convert_to_tensor
1,121
import tensorflow as tf def GetWordPred(o_): logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias) return tf.nn.softmax(logits) preds = GetWordPred(wvsum) z = tf.tile(tf.reshape(tf.reduce_sum(preds,1),[-1,1]), [1, out_vocab_size]) self.preds, self.z = preds, z self.probs = tf.div(preds, z) #normalize self.unweighted_xent = _SafeXEnt(self.y, self.probs) self._xent = _SafeXEnt(self.y, self.probs, class_weights=weights) self.cost = tf.reduce_mean(self.example_weights * self._xent)
tensorflow.div
1,122
import tensorflow as tf Returns: A tensor with the discretized mix logistic loss. """ with tf.name_scope(name): inputs_shape = list(map(int, inputs.get_shape())) predictions_shape = list(map(int, predictions.get_shape())) nr_mix = int(predictions_shape[-1] / 10) logit_probs = predictions[:, :, :, :nr_mix] predictions = tf.reshape(predictions[:, :, :, nr_mix:], inputs_shape + [nr_mix * 3]) means = predictions[:, :, :, :, :nr_mix] log_scales = tf.maximum(predictions[:, :, :, :, nr_mix:2 * nr_mix], -7.) coeffs = tf.nn.tanh(predictions[:, :, :, :, 2 * nr_mix:3 * nr_mix]) inputs = tf.reshape(inputs, inputs_shape + [1]) + tf.zeros(inputs_shape + [nr_mix]) m2 = tf.reshape(means[:, :, :, 1, :] + coeffs[:, :, :, 0, :] * inputs[:, :, :, 0, :], [inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix]) m3 = tf.reshape( means[:, :, :, 2, :] + coeffs[:, :, :, 1, :] * inputs[:, :, :, 0, :] + coeffs[:, :, :, 2, :] * inputs[:, :, :, 1, :], [inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix]) means = tf.concat([ tf.reshape(means[:, :, :, 0, :], [inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix]), m2, m3 ], axis=3)
tensorflow.reshape
1,123
from tensorflow.python.training import moving_averages scale = self._make_var('scale', (in_ch,), init_constant=1) if not no_moving_average: moving_mean = self._make_var('moving_mean', (in_ch,), trainable=False, init_constant=0) moving_variance = self._make_var('moving_variance', (in_ch,), trainable=False, init_constant=1) if is_train: # For training, do batch norm with batch mean & variance # Update moving averages if training (X, mean, variance) = tf.nn.fused_batch_norm(X, scale, offset, epsilon=epsilon, is_training=True) update_mean = moving_averages.assign_moving_average(moving_mean, mean, decay) update_variance = moving_averages.assign_moving_average(moving_variance, variance, decay) with tf.control_dependencies([update_mean, update_variance]): X = tf.identity(X) else: # For prediction, do batch norm with computed moving mean & variance from training # Don't update moving averages if predicting (X, _, _) = tf.nn.fused_batch_norm(X, scale, offset, mean=moving_mean, variance=moving_variance, epsilon=epsilon, is_training=False) else:
tensorflow.python.training.moving_averages.assign_moving_average
1,124
import tensorflow as tf num_neg_per_batch = tf.minimum(neg_pos_ratio * num_pos_per_batch, tf.cast(num_prior, tf.float32) - 1) mask_hard_neg = tf.reshape( tf.cast(loss_class_idx_rank, tf.float32) < num_neg_per_batch, [num_batch * num_prior, 1]) # 3. classification loss including positive and negative examples loss_class_mask = tf.logical_or(mask_pos, mask_hard_neg) loss_class_mask_b = tf.broadcast_to(loss_class_mask, tf.shape(class_pred)) filter_class_true = tf.boolean_mask(tf.cast(mask_pos, tf.float32), loss_class_mask) filter_class_pred = tf.boolean_mask(class_pred, loss_class_mask_b) filter_class_pred = tf.reshape(filter_class_pred, [-1, num_class]) loss_class = tf.keras.losses.sparse_categorical_crossentropy( y_true=filter_class_true, y_pred=filter_class_pred) loss_class = tf.reduce_mean(loss_class) return loss_loc, loss_landm, loss_class return multi_box_loss
tensorflow.reshape
1,125
import tensorflow as tf pred_scores = policy['model']((c, ei, ev, v, tf.reduce_sum(n_cs, keepdims=True), tf.reduce_sum(n_vs, keepdims=True)), tf.convert_to_tensor(False)) # filter candidate variables pred_scores = tf.expand_dims(tf.gather(tf.squeeze(pred_scores, 0), cands), 0) elif policy['type'] == 'ml-competitor':
tensorflow.squeeze
1,126
import tensorflow as tf 0)) labeled_sdfs = tf.concat(labeled_sdfs, axis=0)
tensorflow.concat
1,127
import tensorflow as tf S_ = tf.nn.softmax(mask_logits(S, mask=mask_q)) mask_c = tf.expand_dims(self.c_mask, 2) S_T = tf.transpose(tf.nn.softmax(mask_logits(S, mask=mask_c), dim=1), (0, 2, 1)) self.c2q = tf.matmul(S_, self.q_embed_encoding) self.q2c = tf.matmul(tf.matmul(S_, S_T), self.c_embed_encoding) self.attention_outputs = [self.c_embed_encoding, self.c2q, self.c_embed_encoding * self.c2q, self.c_embed_encoding * self.q2c] N, PL, QL, CL, d, dc, nh = self._params() if self.config.fix_pretrained_vector: dc = self.char_mat.get_shape()[-1] with tf.variable_scope("Model_Encoder_Layer"): inputs = tf.concat(self.attention_outputs, axis=-1) self.enc = [conv(inputs, d, name="input_projection")] for i in range(3): if i % 2 == 0: self.enc[i] = tf.nn.dropout(self.enc[i], 1.0 - self.dropout) self.enc.append( residual_block(self.enc[i], num_blocks=1, num_conv_layers=2, kernel_size=5,
tensorflow.variable_scope
1,128
import tensorflow as tf with open(data_list_path) as f: data_path_list = [datasetRoot + x[:-1] for x in f.readlines()] n_data = len(data_path_list) dataset = tf.data.Dataset.from_tensor_slices(data_path_list) dataset = dataset.shuffle(n_data).repeat() dataset = dataset.map(self.data_map,num_parallel_calls=8)
tensorflow.data.Dataset.from_tensor_slices
1,129
import tensorflow as tf # action_prob = self.printn('action_prob shape: ', action_prob) # q_values = self.printn('q_values shape: ', q_values) # beta = self.printn('beta shape: ', beta) # ha(s): eta * (\varphi(s)^T * K^T * \Sigma^{-1} + W_{sa}) + wa(s)) ha = tf.matmul(varphis, param_eta * tf.matmul(Kt, prec) + Wsa) + wa # hss(s): eta * (\varphi(s)^T * K^T * \Sigma^{-1} * K * \varphi(s)) varphisKt = tf.matmul(varphis, Kt) hss = param_eta * tf.reduce_sum(tf.matmul(varphisKt, prec) * varphisKt, axis=1)
tensorflow.matmul
1,130
import tensorflow as tf else: if direction == 'forward': direct_mask = tf.greater(head_idxs, dep_idxs) # [bs, slh, sld] else: direct_mask = tf.less(head_idxs, dep_idxs) # [bs, slh, sld] # [bs, slh, slh] rep_mask_tile = tf.logical_and(tf.expand_dims(rep_dep_mask, 1), tf.expand_dims(rep_head_mask, 2)) attn_mask = tf.logical_and(direct_mask, rep_mask_tile) # [bs, slh, sld] # tensor tile rep_map_tile = tf.tile(tf.expand_dims(rep_dep_tensor, 1), [1, sl_head, 1, 1]) # bs,slh,sld,vec with tf.variable_scope('attention'): # bs,sl,sl,vec
tensorflow.expand_dims
1,131
from tensorflow.python.ops import nn logits = array_ops.concat([array_ops.zeros_like(logits), logits], 1) if proba: return nn.softmax(logits) else: return math_ops.argmax(logits, 1)
tensorflow.python.ops.nn.softmax
1,132
import tensorflow as tf c = self.int_to_bit(x_flat, num_bits=self.hparams.z_size, base=2) shape = common_layers.shape_list(c) new_shape = shape new_shape.append(self.hparams.num_blocks) new_shape.append(int(self.hparams.z_size / self.hparams.num_blocks)) c = tf.to_int32(tf.reshape(c, shape=new_shape)) h1_shape = shape_x h1_shape.append(self.hparams.hidden_size) h1 = tf.zeros(dtype=tf.float32, shape=h1_shape) c_int = self.bit_to_int( c, num_bits=int(self.hparams.z_size / self.hparams.num_blocks), base=2) c_hot = tf.one_hot(c_int, depth=self.hparams.block_v_size, axis=-1) c_hot_flat = tf.reshape( c_hot, shape=[-1, self.hparams.num_blocks, self.hparams.block_v_size]) h1 = tf.matmul(tf.transpose(c_hot_flat, perm=[1, 0, 2]), self.means) h1 = tf.transpose(h1, perm=[1, 0, 2]) h1 = tf.reshape(h1, shape=h1_shape) h1_shape[0] = self.hparams.batch_size h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name="vch2") res = tf.layers.dense( tf.nn.relu(h2), self.hparams.hidden_size, name="vcfin") return res def discrete_bottleneck(self, x):
tensorflow.reshape
1,133
import tensorflow as tf """ feature_map = _TRAIN_FEATURE_MAP if self._is_training else _EVAL_FEATURE_MAP features = tf.parse_single_example(serialized_data, feature_map)
tensorflow.parse_single_example
1,134
import tensorflow as tf with tf.variable_scope("target", reuse=False): # Create the value network _, _, value_target = self.target_policy.make_critics(self.processed_next_obs_ph, create_qf=False, create_vf=True) self.value_target = value_target if self.n_step: _,_,value_target_n = self.policy_tf.make_critics(self.processed_next_obs_ph_n, create_qf=False, create_vf=True,reuse=True) self.value_target_n = value_target_n with tf.variable_scope("loss", reuse=False): # Take the min of the two Q-Values (Double-Q Learning) min_qf_pi = tf.minimum(qf1_pi, qf2_pi) # Target for Q value regression q_backup = tf.stop_gradient( self.rewards_ph + (1 - self.terminals_ph) * self.gamma * self.value_target ) # Compute Q-Function loss
tensorflow.variable_scope
1,135
from tensorflow.python.ops import math_ops tf_index = math_ops.argmin(math_ops.abs(specificities - specificity), 0) tf_index = math_ops.cast(tf_index, dtypes.int32) # Now, we have the implicit threshold, so compute the sensitivity: return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + kepsilon, name)
tensorflow.python.ops.math_ops.div
1,136
import tensorflow as tf covariance_matrix, tf.matrix_transpose(covariance_matrix),
tensorflow.matrix_transpose
1,137
import tensorflow as tf scores = tf.constant([], dtype=tf.float32) classes = tf.constant([], dtype=tf.int32) (nms_masks1, nms_scores1, nms_classes1, _) = isu.instance_non_maximum_suppression_1d_scores( masks, scores, classes, min_score_thresh=0.65, min_iou_thresh=0.5, is_class_agnostic=True) nms_masks_expected1 = tf.constant(1.0, shape=[0, 2, 2], dtype=tf.float32) nms_scores_expected1 = tf.constant([], dtype=tf.float32) nms_classes_expected1 = tf.constant([], dtype=tf.int32) (nms_masks2, nms_scores2, nms_classes2, _) = isu.instance_non_maximum_suppression_1d_scores( masks, scores, classes, min_score_thresh=0.65, min_iou_thresh=0.5, is_class_agnostic=False) nms_masks_expected2 = tf.constant(1.0, shape=[0, 2, 2], dtype=tf.float32)
tensorflow.constant
1,138
import tensorflow as tf Z2 = tf.matmul(Z, A[:,:,n_basis//2:,:])/tf.sqrt(n_basis*.5) # Compute u_{h+1} and v_{h+1} U, V = tf.cos(Z1)+tf.cos(Z2), tf.sin(Z1)+tf.sin(Z2) Z = tf.concat([U, V], 3)/tf.sqrt(n_out*1.)
tensorflow.cos
1,139
import tensorflow as tf for size in config.policy_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) mean = tf.layers.dense( x, action_space.shape[0], activation=tf.tanh, kernel_initializer=mean_weights_initializer) logstd = tf.get_variable( "logstd", mean.shape[2:], tf.float32, logstd_initializer) logstd = tf.tile( logstd[None, None], [tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2)) with tf.variable_scope("value"): x = flat_observations for size in config.value_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) value = tf.layers.dense(x, 1)[..., 0] mean = tf.check_numerics(mean, "mean") logstd = tf.check_numerics(logstd, "logstd") value = tf.check_numerics(value, "value") policy = tfp.distributions.MultivariateNormalDiag(mean, tf.exp(logstd)) return NetworkOutput(policy, value, lambda a: tf.clip_by_value(a, -2., 2)) def clip_logits(logits, config): logits_clip = getattr(config, "logits_clip", 0.) if logits_clip > 0: min_logit = tf.reduce_min(logits) return tf.minimum(logits - min_logit, logits_clip) else:
tensorflow.check_numerics
1,140
import tensorflow as tf stddev=0.02, data_format='NHWC',padding='SAME',epsilon=1e-9) : with tf.variable_scope(name) : assert data_format == 'NHWC' self.v = tf.get_variable('v', [k_h, k_w, input_dim, output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) self.g = tf.get_variable('g',[output_dim], initializer=tf.constant_initializer(float('nan'))) self.b = tf.get_variable('b',[output_dim],
tensorflow.truncated_normal_initializer
1,141
import tensorflow as tf result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with tf.gfile.GFile(output_eval_file, "w") as writer: tf.logging.info("***** Eval results *****") for key in sorted(result.keys()): tf.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key])))
tensorflow.logging.info
1,142
import tensorflow as tf activation_fn: function bn: bool, whether to use batch norm bn_decay: float or float tensor variable in [0,1] is_training: bool Tensor variable Returns: Variable tensor """ with tf.variable_scope(scope) as sc: kernel_h, kernel_w = kernel_size num_in_channels = inputs.get_shape()[-1].value kernel_shape = [kernel_h, kernel_w, num_in_channels, num_output_channels] kernel = _variable_with_weight_decay('weights', shape=kernel_shape, use_xavier=use_xavier,
tensorflow.variable_scope
1,143
import tensorflow as tf tf.cond(reset_ph, lambda: perturb_vars(original_scope="model", perturbed_scope="perturbed_model/model"), lambda: tf.group(*[])), tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)), update_param_noise_thres_expr,
tensorflow.Variable
1,144
import tensorflow as tf If the layer input has more than two axes, it needs to be flatten by using :class:`FlattenLayer`. """ @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, n_units=100, act=tf.identity, W_init=tf.truncated_normal_initializer(stddev=0.1), b_init=tf.constant_initializer(value=0.0), W_init_args=None, b_init_args=None, name='dense', ): super(DenseLayer, self).__init__(prev_layer=prev_layer, name=name) logging.info("DenseLayer %s: %d %s" % (name, n_units, act.__name__))
tensorflow.truncated_normal_initializer
1,145
import tensorflow as tf import numpy as np def safe_get(name, *args, **kwargs): """ Same as tf.get_variable, except flips on reuse_variables automatically """ try: return tf.get_variable(name, *args, **kwargs) except ValueError: tf.get_variable_scope().reuse_variables() return tf.get_variable(name, *args, **kwargs) def init_weights(shape, name=None): shape = tuple(shape) weights = np.random.normal(scale=0.01, size=shape).astype('f') return safe_get(name, list(shape), initializer=tf.constant_initializer(weights), dtype=tf.float32) def init_bias(shape, name=None): return safe_get(name, initializer=tf.zeros(shape, dtype=tf.float32)) def init_fc_weights_xavier(shape, name=None): fc_initializer = tf.contrib.layers.xavier_initializer(dtype=tf.float32) return safe_get(name, list(shape), initializer=fc_initializer, dtype=tf.float32) def init_conv_weights_xavier(shape, name=None): conv_initializer = tf.contrib.layers.xavier_initializer_conv2d(dtype=tf.float32) return safe_get(name, list(shape), initializer=conv_initializer, dtype=tf.float32)
tensorflow.constant_initializer
1,146
import tensorflow as tf else: # Do not perturb, just assign. op = tf.assign(perturbed_var, var) perturb_ops.append(op) assert len(perturb_ops) == len(all_vars) return tf.group(*perturb_ops) # Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy # of the network and measures the effect of that perturbation in action space. If the perturbation # is too big, reduce scale of perturbation, otherwise increase.
tensorflow.group
1,147
import tensorflow as tf return h def phase_shift_3d(x, r): batch_size, d, h, w, c = x.get_shape().as_list() x = tf.reshape(x, (batch_size, d, h, w, r, r, r)) for ns in [d, h, w]: x = tf.split(x, ns, 1) x = tf.concat([tf.squeeze(v, 1) for v in x], 3) return tf.reshape(x, (batch_size, d*r, h*r, w*r, 1)) def subpixel_conv3d(x, r, out_channels): x = tf.split(x, out_channels, 4)
tensorflow.split
1,148
import tensorflow as tf # [batch_size, batch_size, num_labels]. logits_difference = tf.expand_dims(logits, 0) - tf.expand_dims(logits, 1)
tensorflow.expand_dims
1,149
import tensorflow as tf # For ACER def get_by_index(x, idx): assert(len(x.get_shape()) == 2) assert(len(idx.get_shape()) == 1) idx_flattened = tf.range(0, x.shape[0]) * x.shape[1] + idx y = tf.gather(tf.reshape(x, [-1]), # flatten input idx_flattened) # use flattened indices return y
tensorflow.range
1,150
import tensorflow as tf loss = tf.maximum(0., (tgt_larg - tgt_small) - (pred_larg - pred_small)) if hard_ratio < 1.0: hard_num = tf.cast(tools.shape(pred1)[0] * hard_ratio, tf.int32) loss = tf.reshape(loss, [-1]) hard_loss, _ = tf.math.top_k(loss, k=hard_num) return hard_loss return loss
tensorflow.math.top_k
1,151
import tensorflow as tf _, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0]) self_attention = output_op.stack() self_attention = tf.transpose(self_attention, perm = [1, 0, 2]) return self_attention
tensorflow.transpose
1,152
import tensorflow as tf self.unique_id = unique_id self.text = text def read_examples(input_file): """Read a list of `InputExample`s from an input file.""" examples = [] unique_id = 0 with tf.gfile.GFile(input_file, "r") as reader: while True: line = tokenization.convert_to_unicode(reader.readline()) if not line: break line = line.strip() unique_id += 1 examples.append(
tensorflow.gfile.GFile
1,153
import tensorflow as tf model_loss = model.loss(score_maps, f_score, geo_maps, f_geometry, training_masks) total_loss = tf.add_n([model_loss] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)) # add summary if reuse_variables is None: tf.summary.image('input', images) tf.summary.image('score_map', score_maps) tf.summary.image('score_map_pred', f_score * 255) tf.summary.image('geo_map_0', geo_maps[:, :, :, 0:1]) tf.summary.image('geo_map_0_pred', f_geometry[:, :, :, 0:1]) tf.summary.image('training_masks', training_masks) tf.summary.scalar('model_loss', model_loss) tf.summary.scalar('total_loss', total_loss) return total_loss, model_loss def average_gradients(tower_grads): average_grads = [] for grad_and_vars in zip(*tower_grads):
tensorflow.summary.image
1,154
import tensorflow.contrib.graph_editor as ge ts_all = ge.filter_ts(fwd_ops, True) # get the tensors ts_all = [t for t in ts_all if '/read' not in t.name] ts_all = set(ts_all) - set(xs) - set(ys) # construct list of tensors to checkpoint during forward pass, if not # given as input if type(checkpoints) is not list: if checkpoints == 'collection': checkpoints = tf.get_collection('checkpoints') elif checkpoints == 'speed': # checkpoint all expensive ops to maximize running speed checkpoints = ge.filter_ts_from_regex(fwd_ops, 'conv2d|Conv|MatMul') elif checkpoints == 'memory': # remove very small tensors and some weird ops def fixdims(t): # tf.Dimension values are not compatible with int, convert manually try: return [int(e if e.value is not None else 64) for e in t] except: return [0] # unknown shape ts_all = [t for t in ts_all if np.prod(fixdims(t.shape)) > MIN_CHECKPOINT_NODE_SIZE] ts_all = [t for t in ts_all if 'L2Loss' not in t.name]
tensorflow.contrib.graph_editor.filter_ts_from_regex
1,155
import tensorflow as tf features["label_ids"] = create_int_feature([feature.label_id]) features["is_real_example"] = create_int_feature( [int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) return tf_example def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64), "is_real_example": tf.FixedLenFeature([], tf.int64), } def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64:
tensorflow.FixedLenFeature
1,156
import tensorflow as tf # Build the actual training and evaluation models self._train_graph(data) self._eval_graph(data) self.summaries = tf.summary.merge_all() # Prediction network with feed_dict self.pred_in = {i: tf.placeholder(self.input_spec[i]['type'], shape=s, name=i) for i, s in self.data_shape.items()} self._pred_graph(self.pred_in) # Start session sess_config = tf.ConfigProto(device_count={'GPU': self.n_gpus}) sess_config.gpu_options.allow_growth = True self.sess = tf.Session(config=sess_config) # Register tf dataset handles if self.datasets: self.dataset_handles = {} for n, i in self.dataset_iterators.items(): self.dataset_handles[n] = self.sess.run(i.string_handle()) self.sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
tensorflow.ConfigProto
1,157
import tensorflow as tf q = tf.nn.conv2d(p, w, strides, 'VALID', data_format='NCHW', use_cudnn_on_gpu=True) else: q = tf.nn.conv2d(p, w, strides, 'VALID', use_cudnn_on_gpu=True) # Allocate output tensor. if use_var: y = sbnet_module.sparse_scatter_var( q, indices.bin_counts, indices.active_block_indices, x, dynamic_bsize=tf.constant(block_params.bsize_out, dtype=tf.int32), dynamic_bstride=tf.constant(block_params.bstrides, dtype=tf.int32), dynamic_boffset=tf.constant([0, 0], dtype=tf.int32), add=False, transpose=transpose, atomic=atomic) else: y = sbnet_module.sparse_scatter( q, indices.bin_counts, indices.active_block_indices, x, dynamic_bsize=tf.constant(block_params.bsize_out, dtype=tf.int32), dynamic_bstride=tf.constant(block_params.bsize_out, dtype=tf.int32), dynamic_boffset=tf.constant([0, 0], dtype=tf.int32),
tensorflow.constant
1,158
import tensorflow as tf images, labels, ul_images, ul_u, lr, mom) scope.reuse_variables() # Build eval graph losses_eval_train = build_eval_graph(images_eval_train, labels_eval_train, ul_images_eval_train, ul_u_eval_train) losses_eval_test = build_eval_graph(images_eval_test, labels_eval_test, images_eval_test, ul_u_eval_test) init_op = tf.global_variables_initializer() if not FLAGS.log_dir: logdir = None writer_train = None writer_test = None else: logdir = FLAGS.log_dir writer_train = tf.summary.FileWriter(FLAGS.log_dir + "/train", g) writer_test = tf.summary.FileWriter(FLAGS.log_dir + "/test", g) saver = tf.train.Saver(tf.global_variables()) sv = tf.train.Supervisor( is_chief=True, logdir=logdir, init_op=init_op, init_feed_dict={lr: FLAGS.learning_rate, mom: FLAGS.mom1}, saver=saver, global_step=global_step, summary_op=None, summary_writer=None, save_model_secs=150, recovery_wait_secs=0) ul_images_np = np.load("train_images.npy").reshape((-1, 32, 32, 3))
tensorflow.summary.FileWriter
1,159
import tensorflow as tf # Perform affine mapping at each layer of the neural network Z = tf.layers.dense(Z, n_basis//2) # Define variational parameters alpha_mean = tf.get_variable('alpha_mean_layer'+str(h), shape=[1, 1, n_basis, n_out], initializer=tf.random_normal_initializer()) alpha_logstd = tf.get_variable('alpha_logstd_layer'+str(h), shape=[1, 1, n_basis, n_out], initializer=tf.random_normal_initializer()) alpha_std = tf.exp(alpha_logstd)
tensorflow.random_normal_initializer
1,160
import tensorflow as tf a `float` `scalar`, KL divergence. """ logits = tf.stop_gradient(logits) weights = _end_of_seq_mask(labels, vocab_size)
tensorflow.stop_gradient
1,161
import tensorflow as tf within the assets subdirectory, we will return a None. Args: vocab_filename: The vocabulary name to lookup. """ mapping_path = os.path.join(self._transformed_metadata_dir, self.ASSET_MAP) mapping = {} if tf.io.gfile.exists(mapping_path): with tf.io.gfile.GFile(mapping_path) as f: mapping = json.loads(f.read()) if vocab_filename in mapping: vocab_path = os.path.join(self.transform_savedmodel_dir, tf.saved_model.ASSETS_DIRECTORY, mapping[vocab_filename]) if tf.io.gfile.exists(vocab_path): return vocab_path prefix = os.path.join(self.transform_savedmodel_dir, tf.saved_model.ASSETS_DIRECTORY, sanitized_vocab_filename(filename=vocab_filename)) files = tf.io.gfile.glob(prefix) + tf.io.gfile.glob( '{}.tfrecord.gz'.format(prefix)) if not files: return None if len(files) != 1: raise ValueError('Found too many vocabulary files: {}'.format(files)) return files[0] def _vocabulary_size_from_annotations(self,
tensorflow.io.gfile.exists
1,162
import tensorflow as tf def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) else: is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32) is_training = (mode == tf.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint:
tensorflow.cast
1,163
import tensorflow as tf cur_label_weight = tf.math.sign(sliced_label[i] - sliced_label[j]) cur_propensity = sliced_propensity[i] * sliced_label[i] + sliced_propensity[j] * sliced_label[j] cur_pair_loss = -tf.exp(sliced_output[i]) / (tf.exp(sliced_output[i]) + tf.exp(sliced_output[j])) if loss == None: loss = cur_label_weight * cur_pair_loss * cur_propensity loss += cur_label_weight * cur_pair_loss * cur_propensity batch_size = tf.shape(labels[0])[0] return tf.reduce_sum(loss) / tf.cast(batch_size, dtypes.float32) #/ (tf.reduce_sum(propensity_weights)+1) def click_weighted_log_loss(self, output, labels, propensity_weights, name=None): """Computes pointwise sigmoid loss with propensity weighting.
tensorflow.shape
1,164
import tensorflow as tf model = linear_regression.LinearModel() dataset = linear_regression.synthetic_dataset( true_w, true_b, noise_level=0., batch_size=64, num_batches=40) with tf.device(device()): optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1) linear_regression.fit(model, dataset, optimizer, logdir=self._tmp_logdir) self.assertAllClose(true_w, model.variables[0].numpy(), rtol=1e-2) self.assertAllClose(true_b, model.variables[1].numpy(), rtol=1e-2)
tensorflow.train.GradientDescentOptimizer
1,165
import tensorflow as tf tf.summary.scalar("Learning Rate", m.lr) with tf.name_scope("Valid"): valid_input = PTBInput(config=config, data=valid_data, name="ValidInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mvalid = PTBModel(is_training=False, config=config, input_=valid_input) tf.summary.scalar("Validation Loss", mvalid.cost) with tf.name_scope("Test"): test_input = PTBInput( config=eval_config, data=test_data, name="TestInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mtest = PTBModel(is_training=False, config=eval_config, input_=test_input) models = {"Train": m, "Valid": mvalid, "Test": mtest}
tensorflow.name_scope
1,166
import tensorflow as tf th = 0.008 max_step = 600 lr = 10 elif mode == 'ultra': if not tf.test.is_gpu_available(): print("Please enable GPU for ultra setting...") sys.exit(1) th = 0.01
tensorflow.test.is_gpu_available
1,167
import tensorflow as tf _, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0]) self_attention = output_op.stack() self_attention = tf.transpose(self_attention, perm = [1, 0, 2]) return self_attention def din_fcn_shine(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag)
tensorflow.array_ops.transpose
1,168
import tensorflow as tf self._dropout_keep_prob = dropout_keep_prob self._out_vocab_size = out_vocab_size self.x = tf.placeholder(tf.int32, [batch_size, max_sequence_len], name='x') self.y = tf.placeholder(tf.float32, [batch_size, max_sequence_len, out_vocab_size], name='y') # The bidirectional rnn code requires seq_lens as int64 self.seq_lens = tf.placeholder(tf.int64, [batch_size], name='seq_lens') self.example_weights = tf.placeholder(tf.float32, [batch_size], name='example_weights') embeddings = c2v.GetEmbeddings(self.x) self._inputs = [tf.squeeze(input_, [1]) for input_ in tf.split(1, max_sequence_len, embeddings)] # Need to prepare a mask to zero out the padding symbols.
tensorflow.placeholder
1,169
import tensorflow as tf self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.image_additional_channels] .shape.as_list(), [5, 6, 2]) def test_keypoints(self): input_tensor_dict = { fields.InputDataFields.groundtruth_keypoints: tf.placeholder(tf.float32, [None, 16, 4]), fields.InputDataFields.groundtruth_keypoint_visibilities: tf.placeholder(tf.bool, [None, 16]), } padded_tensor_dict = inputs.pad_input_data_to_static_shapes( tensor_dict=input_tensor_dict, max_num_boxes=3, num_classes=3, spatial_image_shape=[5, 6]) self.assertAllEqual(
tensorflow.placeholder
1,170
import tensorflow.contrib.layers as layers def atari_model(img_in, num_actions, scope, reuse=False): # as described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out
tensorflow.contrib.layers.convolution2d
1,171
import tensorflow as tf return tf.nn.dropout(bottom, ratio, name=name) def _anchor_target_layer(self, rpn_cls_score, name): with tf.variable_scope(name): # 这里的index是对于所有anchor而言 # (1, 1, A * height, width) # (1, height, width, A * 4) # (1, height, width, A * 4) # (1, height, width, A * 4) rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = tf.py_func( anchor_target_layer, [rpn_cls_score, self._gt_boxes, self._im_info, self._feat_stride, self._anchors, self._num_anchors], [tf.float32, tf.float32, tf.float32, tf.float32]) #self._gt_boxes = tf.placeholder(tf.float32, shape=[None, 5]) #gt_boxes缩放之后的坐标以及所属类别的标号 rpn_labels.set_shape([1, 1, None, None]) rpn_bbox_targets.set_shape([1, None, None, self._num_anchors * 4]) rpn_bbox_inside_weights.set_shape([1, None, None, self._num_anchors * 4])
tensorflow.py_func
1,172
import tensorflow as tf ] for i in range(len(output_sizes)): expand_W = tf.get_variable("W_%d" % i, [current_size, output_sizes[i]]) expand_b = tf.get_variable("b_%d" % i, [output_sizes[i]]) output_data = tf.nn.bias_add(tf.matmul(output_data, expand_W), expand_b) output_data = tf.nn.elu(output_data) current_size = output_sizes[i] #expand_W = tf.get_variable("final_W", [current_size, 1])
tensorflow.matmul
1,173
import tensorflow as tf y = tf.placeholder(tf.int32, [batch_size, num_steps], name='y') init_state = tf.zeros([batch_size, state_size]) '''RNN输入''' rnn_inputs = tf.one_hot(x, num_classes) #rnn_inputs = tf.unstack(x_one_hot, axis=1)
tensorflow.one_hot
1,174
import tensorflow as tf """Concatenates all `datasets` and saves to `filename`.""" datatypes_to_clean = datatypes_to_clean or [] filename = os.path.join(tmp_dir, filename) lang1_fname = filename + ".lang1" lang2_fname = filename + ".lang2" if tf.gfile.Exists(lang1_fname) and tf.gfile.Exists(lang2_fname): tf.logging.info("Skipping compile data, found files:\n%s\n%s", lang1_fname, lang2_fname) return filename with tf.gfile.GFile(lang1_fname, mode="w") as lang1_resfile: with tf.gfile.GFile(lang2_fname, mode="w") as lang2_resfile: for dataset in datasets: url = dataset[0] compressed_filename = os.path.basename(url) compressed_filepath = os.path.join(tmp_dir, compressed_filename) if url.startswith("http"): generator_utils.maybe_download(tmp_dir, compressed_filename, url) if compressed_filename.endswith(".zip"): zipfile.ZipFile(os.path.join(compressed_filepath), "r").extractall(tmp_dir)
tensorflow.gfile.GFile
1,175
import tensorflow as tf # In case the shape is immutable. shape = list(shape) # In case shape represents a vector, e.g. [None, [2, 2, 2]] if shape[0] is None: shape[0] = np.ones(len(shape[1]), dtype=int) # In case shape represents a vector, e.g. [[2, 2, 2], None] if shape[1] is None: shape[1] = np.ones(len(shape[0]), dtype=int) shape = np.array(shape) tt_rank = np.array(tt_rank) _validate_input_parameters(is_tensor=False, shape=shape, tt_rank=tt_rank) n_in = np.prod(shape[0]) lamb = 2.0 / n_in with tf.name_scope(name): return random_matrix(shape, tt_rank=tt_rank, stddev=np.sqrt(lamb), dtype=dtype) def lecun_initializer(shape, tt_rank=2, dtype=tf.float32, name='t3f_lecun_initializer'): """Constructs a random TT matrix with entrywise variance 1.0 / n_in Args: shape: 2d array, shape[0] is the shape of the matrix row-index, shape[1] is the shape of the column index. shape[0] and shape[1] should have the same number of elements (d) Also supports omitting one of the dimensions for vectors, e.g. lecun_initializer([[2, 2, 2], None])
tensorflow.name_scope
1,176
import tensorflow as tf update_ops = set(update_ops) # Make sure update_ops are computed before total_loss. if update_ops: with tf.control_dependencies(update_ops): barrier = tf.no_op(name='update_barrier') self.d_losses[-1] = control_flow_ops.with_dependencies([barrier], self.d_losses[-1])
tensorflow.control_dependencies
1,177
import tensorflow as tf w_initializer, b_initializer = tf.random_normal_initializer(0., 0.1), tf.constant_initializer(0.0) # ------------------ build evaluate_net ------------------ with tf.variable_scope('eval_net'): a_fc1 = tf.layers.dense(self.s, 128, tf.nn.relu, kernel_initializer=w_initializer, bias_initializer=b_initializer, name='agent_fc1_e') # a_fc2 = tf.layers.dense(a_fc1, 128, tf.nn.relu, kernel_initializer=w_initializer,
tensorflow.variable_scope
1,178
import tensorflow as tf + 0.00392377*t**(-8)) a = 7.5 return __phi_f(tf.minimum(x, a)) - __phi_f(a) + __phi_g(tf.maximum(x, a)) N = tf.cast(tf.shape(X)[0], tf.float32)
tensorflow.maximum
1,179
import tensorflow as tf # Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy # of the network and measures the effect of that perturbation in action space. If the perturbation # is too big, reduce scale of perturbation, otherwise increase. q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func") perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func") kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-1) mean_kl = tf.reduce_mean(kl) def update_scale(): with tf.control_dependencies([perturb_for_adaption]): update_scale_expr = tf.cond(mean_kl < param_noise_threshold, lambda: param_noise_scale.assign(param_noise_scale * 1.01),
tensorflow.nn.softmax
1,180
import tensorflow as tf rep_tensor, rep_mask, block_len=5, scope=None, keep_prob=1., is_train=None, wd=0., activation='elu', hn=None): with tf.variable_scope(scope or 'bi_directional_simple_block_attn'): fw_attn_res = simple_block_attention( rep_tensor, rep_mask, block_len, "forward_attn", "forward", keep_prob, is_train, wd, activation, hn) bw_attn_res = simple_block_attention( rep_tensor, rep_mask, block_len, "backward_attn", "backward", keep_prob, is_train, wd, activation, hn) attn_res = tf.concat([fw_attn_res, bw_attn_res], -1) return attn_res def simple_block_attention( rep_tensor, rep_mask, block_len=5, scope=None, direction=None, keep_prob=1., is_train=None, wd=0., activation='elu', hn=None): assert direction is not None def scaled_tanh(x, scale=5.):
tensorflow.concat
1,181
import tensorflow as tf pred1 = tf.slice(batch, [0, 0], [num_sam, 1]) def uniform(): batch2 = tf.gather(batch, tf.random.shuffle(index)) pred2 = tf.slice(batch2, [0, 0], [num_sam, 1]) tgt2 = tf.slice(batch2, [0, 1], [num_sam, 1]) return pred1, pred2, tgt1, tgt2 return uniform
tensorflow.slice
1,182
import tensorflow as tf #q [-1,head,n_ctx,emb] v [-1,head,emb,n_ctx] v [-1,head,n_ctx,emb] q = split_heads(q, n_head) k = split_heads(k, n_head, k=True) v = split_heads(v, n_head) #a [-1,head,n_ctx,emb] a = _attn(q, k, v, train=train, scale=scale) #a [-1,n_ctx,head,emb] a = merge_heads(a) #a [-1,n_ctx,emb] a = conv1d(a, 'c_proj', n_state, 1, train=train) a = dropout(a, resid_pdrop, train) return a def mlp(x, scope, n_state, train=False): with tf.variable_scope(scope): nx = shape_list(x)[-1] act = act_fns[afn] h = act(conv1d(x, 'c_fc', n_state, 1, train=train)) h2 = conv1d(h, 'c_proj', nx, 1, train=train) h2 = dropout(h2, resid_pdrop, train) return h2 def block(x, scope, train=False, scale=False): with tf.variable_scope(scope): #nx = emb_size nx = shape_list(x)[-1] #a [-1,n_ctx,emb] a = attn(x, 'attn', nx, n_head, train=train, scale=scale)
tensorflow.variable_scope
1,183
import tensorflow as tf state_keep_prob=encoder.rnn_state_keep_prob, variational_recurrent=encoder.pervasive_dropout, dtype=tf.float32, input_size=input_size) return cell batch_size = tf.shape(encoder_inputs_)[0] time_steps = tf.shape(encoder_inputs_)[1] if embeddings is not None: flat_inputs = tf.reshape(encoder_inputs_, [tf.multiply(batch_size, time_steps)]) flat_inputs = tf.nn.embedding_lookup(embeddings, flat_inputs) encoder_inputs_ = tf.reshape(flat_inputs,
tensorflow.shape
1,184
import tensorflow as tf if FLAGS.allow_mix_precision: custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision") if FLAGS.auto_tune: custom_op.parameter_map["auto_tune_mode"].s = tf.compat.as_bytes("RL,GA") with tf.Session(config=config) as sess: if FLAGS.restore: print('continue training from previous checkpoint') ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_path) saver.restore(sess, ckpt)
tensorflow.Session
1,185
import tensorflow as tf W8 = utils.weight_variable([1, 1, 4096, 150], name="W8") b8 = utils.bias_variable([150], name="b8") # W_h = utils.weight_variable([1, 7, 7, 4], name="Wh") conv8 = tf.reshape(utils.conv2d_basic(relu_dropout7, W8, b8),[-1,4*4*150]) fc1 = tf.reshape(tf.layers.dense(conv8,4*4*150,activation = tf.nn.relu),[-1,4,4,150]) concat1 = tf.concat([fc1, z],axis = 3) # annotation_pred1 = tf.argmax(conv8, dimension=3, name="prediction1") print("###########################################################") print(fc1) # now to upscale to actual image size deconv_shape1 = image_net["pool4"].get_shape() W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, 278], name="W_t1") b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1") conv_t1 = utils.conv2d_transpose_strided(concat1, W_t1, b_t1, output_shape=tf.shape(image_net["pool4"])) fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1")
tensorflow.concat
1,186
import tensorflow as tf residuals -- list containing the value of the residuals at predefinite training intervals. As we are only interested in the sign of the residuals, we define it as the difference between the predicted output \hat{y} (an in the code) and the training labels y (Y in the code). """ ops.reset_default_graph() # reset the computational graph tf.set_random_seed(1) # to keep consistent results #----------training/test set features------------------------- X_tr = np.transpose(X_train) # the transpose is taken to adapt to TF convenntion. This is also f , m = X_tr.shape # f: number of features, m: number of training examples
tensorflow.set_random_seed
1,187
import tensorflow as tf self._eval_image_summary('pred', expected) self._eval_image_summary('nois', noisy) def _eval_image_summary(self, name, encdoding_batch): summary = self.image_summaries[name].eval(feed_dict={self.encoding: encdoding_batch}) self.summary_writer.add_summary(summary, global_step=self.get_past_epochs()) def _add_decoding_summary(self, name, var, collection='train'): var = var[:FLAGS.visualiza_max] var = tf.concat(tf.unstack(var), axis=0) var = tf.expand_dims(var, dim=0) color_s = tf.summary.image(name, var[..., :3], max_outputs=FLAGS.visualiza_max) var = tf.expand_dims(var[..., 3], dim=3) bw_s = tf.summary.image('depth_' + name, var, max_outputs=FLAGS.visualiza_max) return tf.summary.merge([color_s, bw_s]) # TRAINING PROGRESS EVENTS def _on_training_start(self, sess): # Writers and savers self.summary_writer = tf.summary.FileWriter(FLAGS.logdir, sess.graph) self.saver = tf.train.Saver()
tensorflow.expand_dims
1,188
import tensorflow as tf else: return 0 # For ACER def get_by_index(x, idx): assert(len(x.get_shape()) == 2) assert(len(idx.get_shape()) == 1) idx_flattened = tf.range(0, x.shape[0]) * x.shape[1] + idx y = tf.gather(tf.reshape(x, [-1]), # flatten input idx_flattened) # use flattened indices return y def check_shape(ts,shapes): i = 0 for (t,shape) in zip(ts,shapes): assert t.get_shape().as_list()==shape, "id " + str(i) + " shape " + str(t.get_shape()) + str(shape) i += 1
tensorflow.reshape
1,189
import tensorflow as tf act_t_ph = tf.placeholder(tf.int32, [None], name="action") rew_t_ph = tf.placeholder(tf.float32, [None], name="reward") done_mask_ph = tf.placeholder(tf.float32, [None], name="done") importance_weights_ph = tf.placeholder(tf.float32, [None], name="weight") # q scores for actions which we know were selected in the given state. q_t_selected = tf.reduce_sum(step_model.q_values * tf.one_hot(act_t_ph, n_actions), axis=1) # compute estimate of best possible value starting from state at t + 1 if double_q: q_tp1_best_using_online_net = tf.argmax(double_q_values, axis=1) q_tp1_best = tf.reduce_sum(target_policy.q_values * tf.one_hot(q_tp1_best_using_online_net, n_actions), axis=1)
tensorflow.one_hot
1,190
from tensorflow.python.ops import sparse_ops name='expanded_shape') expanded = sparse_ops.sparse_reshape(
tensorflow.python.ops.sparse_ops.sparse_reshape
1,191
import tensorflow as tf # use the initializers from torch with argscope([Conv2D, Deconv2D], use_bias=False, W_init=tf.random_normal_initializer(stddev=0.02)), \ argscope([Conv2D, Deconv2D, InstanceNorm], data_format='NCHW'), \ argscope(LeakyReLU, alpha=0.2): with tf.variable_scope('gen'): with tf.variable_scope('B'): AB = self.generator(A) with tf.variable_scope('A'): BA = self.generator(B) ABA = self.generator(AB) with tf.variable_scope('B'):
tensorflow.variable_scope
1,192
import tensorflow as tf A = tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1)) return (1.0/(N*N)) * tf.reduce_sum(N0(A, 2*y)) + N0(0.0, 2.0 + 2*y) - (2/N) * tf.reduce_sum(N0(X, 1.0 + 2*y)) def cw_2d(X, y=None): def __phi(x): def __phi_f(s): t = s/7.5 return tf.exp(-s/2) * (1 + 3.5156229*t**2 + 3.0899424*t**4 + 1.2067492*t**6 + 0.2659732*t**8 + 0.0360768*t**10 + 0.0045813*t**12) def __phi_g(s): t = s/7.5 return tf.sqrt(2/s) * (0.39894228 + 0.01328592*t**(-1) + 0.00225319*t**(-2) - 0.00157565*t**(-3) + 0.0091628*t**(-4) - 0.02057706*t**(-5) + 0.02635537*t**(-6) - 0.01647633*t**(-7) + 0.00392377*t**(-8))
tensorflow.exp
1,193
import tensorflow as tf Args: output: (tf.Tensor) A tensor with shape [batch_size, list_size]. Each value is the ranking score of the corresponding example. labels: (tf.Tensor) A tensor of the same shape as `output`. A value >= 1 means a relevant example. propensity_weights: (tf.Tensor) A tensor of the same shape as `output` containing the weight of each element. name: A string used as the name for this variable scope. Returns: (tf.Tensor) A single value tensor containing the loss. """ loss = None with tf.name_scope(name, "click_weighted_log_loss",[output]): click_prob = tf.sigmoid(output) loss = tf.losses.log_loss(labels, click_prob, propensity_weights) return loss
tensorflow.name_scope
1,194
from tensorflow.python.ops import state_ops count = _create_local('count', shape=[]) if weights is not None: weights = math_ops.to_float(weights) values = math_ops.mul(values, weights) num_values = math_ops.reduce_sum(_broadcast_weights(weights, values)) else: num_values = math_ops.to_float(array_ops.size(values)) total_compute_op = state_ops.assign_add(total, math_ops.reduce_sum(values)) count_compute_op = state_ops.assign_add(count, num_values) mean = _safe_div(total, count, 'value') with ops.control_dependencies([total_compute_op, count_compute_op]): update_op = _safe_div(total, count, 'update_op') if metrics_collections: ops.add_to_collections(metrics_collections, mean) if updates_collections:
tensorflow.python.ops.state_ops.assign_add
1,195
import tensorflow as tf normed: batch-normalized maps """ with tf.variable_scope(scope) as sc: num_channels = inputs.get_shape()[-1].value
tensorflow.variable_scope
1,196
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils estimator_test_utils.assert_in_range(0.9, 1.0, 'accuracy/threshold_0.500000_mean', metrics) estimator_test_utils.assert_in_range( 0.9, 1.0, 'precision/positive_threshold_0.500000_mean', metrics) estimator_test_utils.assert_in_range( 0.9, 1.0, 'recall/positive_threshold_0.500000_mean', metrics) self._assertCommonMetrics(metrics) def _assertCommonMetrics(self, metrics):
tensorflow.contrib.learn.python.learn.estimators.estimator_test_utils.assert_in_range
1,197
import tensorflow as tf 1) for i in range(num_boxes)], axis=0) rotations_y = tf.reshape(rotations_y, [-1, 1]) predicted_boxes = tf.concat([detections['translations_3d'], detections['sizes_3d'], rotations_y], axis=1) labeled_classes = tf.cast(sample['groundtruth_valid_classes'], tf.int64) predicted_classes = tf.cast(detections['detection_classes'], tf.int64) confidences = detections['detection_scores'] metric.update(scene_id, labeled_boxes, labeled_classes, predicted_boxes, predicted_classes, confidences) elif isinstance(metric, IoUMetric):
tensorflow.cast
1,198
import tensorflow as tf 'SpectrumAugmenter', 'StackingOverTime', 'TestInputGenerator', ]) self.assertEqual(expected_layers, l_names) def testParamValueSumSquared(self): with self.session(use_gpu=False, graph=tf.Graph()): p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() all_vars = tf.trainable_variables() py_utils.SumSquared(all_vars)
tensorflow.Graph
1,199