seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf else: term2 = 0.5 * (param_eta + param_omega) * tf.log(tf.matrix_determinant(2 * np.pi * (param_eta + param_omega) * HaaInv)) dual = param_eta * self.epsilon - param_omega * beta + \ term1 + term2 + tf.reduce_mean( 0.5 * (tf.reduce_sum(tf.matmul(ha, HaaInv) * ha, axis=1) - hss)) # Symbolic dual gradient dual_grad = tf.gradients(xs=[param_eta, param_omega], ys=dual) # Eval functions. f_dual = U.function( inputs=[varphis, Kt, prec, Waa, Wsa, wa] + [param_eta, param_omega, old_entropy], outputs=dual, # mode='DebugMode' # TEST )
tensorflow.gradients
2,200
from tensorflow.python.ops import nn_ops w_c: [1,1, attention_vec_size] coverage: [batch_size, passage_len] ''' with variable_scope.variable_scope("Attention"): # Equation (11) in the paper state_features = linear(decoder_state, attention_vec_size, True) # [batch_size, attention_vec_size] state_features = tf.expand_dims(state_features, 1) # [batch_size, 1, attention_vec_size] all_features = encoder_features + state_features # [batch_size,passage_len,attention_vec_size] if use_coverage and coverage is not None: coverage_features = tf.expand_dims(coverage, axis=-1) * w_c # [batch_size, passage_len, attention_vec_size] all_features += coverage_features e = tf.reduce_sum(v * tf.tanh(all_features), axis=-1) # [batch_size, passage_len] attn_dist = nn_ops.softmax(e) # [batch_size, passage_len] attn_dist *= passage_mask if coverage is not None: # Update coverage vector coverage += attn_dist else: # first step of training coverage = attn_dist # Calculate the context vector from attn_dist and encoder_states # shape (batch_size, attn_size). context_vector = tf.reduce_sum(tf.expand_dims(attn_dist, axis=-1) * encoder_states, axis=1) # [batch_size, encoder_dim] return context_vector, attn_dist, coverage
tensorflow.python.ops.nn_ops.softmax
2,201
import tensorflow as tf tf.greater(vx.lookup(u), -1), true_fn=lambda: (vx_keys, tf.add(vx.lookup(u), 1)), false_fn=lambda: (tf.concat([vx_keys, tf.reshape(u, (-1, 1))], axis=0), tf.constant(1, dtype=tf.int64, name='constant')) ) vx.insert(u, r) for i in tf.range(start=0, limit=z_t_len - self._p + 1, delta=1, dtype=None, name='range'): u = tf.string_join(z_t[i:i + self._p], '') vz_keys, r = tf.cond( tf.greater(vz.lookup(u), -1), true_fn=lambda: (vz_keys, tf.add(vz.lookup(u), 1)), false_fn=lambda: ( tf.concat([vz_keys, tf.reshape(u, (-1, 1))], axis=0), tf.constant(1, dtype=tf.int64))
tensorflow.range
2,202
import tensorflow as tf ury = y1 + .5 * height return width, height, urx, ury def encode(bboxes, gt_boxes, variances=None): with tf.name_scope('BoundingBoxTransform/encode'): (bboxes_width, bboxes_height, bboxes_urx, bboxes_ury) = get_width_upright(bboxes) (gt_boxes_width, gt_boxes_height, gt_boxes_urx, gt_boxes_ury) = get_width_upright(gt_boxes)
tensorflow.name_scope
2,203
import tensorflow as tf has_nan_or_inf, grad_scale, final_var_grads = task.ScaleGradients(var_grads) with self.session(): tf.global_variables_initializer().run() self.assertTrue(has_nan_or_inf.eval()) self.assertEqual(0., grad_scale.eval())
tensorflow.global_variables_initializer
2,204
import tensorflow as tf self.x_preprocessed = (self.x_preprocessed - self.resnet_mean) #/ self.resnet_std # red, green, blue = tf.split(self.x_preprocessed, num_or_size_splits=3, axis=3) # self.x_preprocessed = tf.concat([blue,green,red], 3) # These variables to keep track of what i do # filters = [64, 64, 128, 256, 512] # kernels = [7, 3, 3, 3, 3] # strides = [2, 0, 2, 2, 2] tf.add_to_collection('debug_layers', self.x_preprocessed) with tf.variable_scope('conv1_x'): print('Building unit: conv1') self.conv1 = self._conv('conv1', self.x_preprocessed, padding= [[0,0],[3,3],[3,3],[0,0]], num_filters=64, kernel_size=(7, 7), stride=(2, 2), l2_strength=self.wd, bias=self.bias) self.conv1 = self._bn('bn1', self.conv1) self.conv1 = self._relu('relu1', self.conv1) _debug(self.conv1)
tensorflow.variable_scope
2,205
import tensorflow as tf out = tf.nn.relu(self._normalize(x, self.mean, self.mean_sq, "reference")) return out def _normalize(self, x, mean, mean_sq, message): # make sure this is called with a variable scope shape = x.get_shape().as_list() assert len(shape) == 4 self.gamma = safe_get("gamma", [shape[-1]], initializer=tf.random_normal_initializer(1., 0.02)) gamma = tf.reshape(self.gamma, [1, 1, 1, -1]) self.beta = safe_get("beta", [shape[-1]], initializer=tf.constant_initializer(0.)) beta = tf.reshape(self.beta, [1, 1, 1, -1]) assert self.epsilon is not None assert mean_sq is not None assert mean is not None std = tf.sqrt(self.epsilon + mean_sq - tf.square(mean)) out = x - mean
tensorflow.reshape
2,206
import tensorflow as tf # number of steps. if FLAGS.use_tpu: assert len(eval_examples) % FLAGS.eval_batch_size == 0 eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size) eval_drop_remainder = True if FLAGS.use_tpu else False eval_input_fn = file_based_input_fn_builder( input_file=eval_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=eval_drop_remainder) result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with tf.gfile.GFile(output_eval_file, "w") as writer: tf.logging.info("***** Eval results *****") for key in sorted(result.keys()): tf.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if FLAGS.do_predict: predict_examples = processor.get_test_examples(FLAGS.data_dir) num_actual_predict_examples = len(predict_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. while len(predict_examples) % FLAGS.predict_batch_size != 0:
tensorflow.gfile.GFile
2,207
import tensorflow as tf dim = inputs.get_shape().as_list()[-1] out_shape = [shape[idx] for idx in range( len(inputs.get_shape().as_list()) - 1)] + [hidden] flat_inputs = tf.reshape(inputs, [-1, dim]) W = tf.get_variable("W", [dim, hidden]) res = tf.matmul(flat_inputs, W) if use_bias: b = tf.get_variable( "b", [hidden], initializer=tf.constant_initializer(0.)) res = tf.nn.bias_add(res, b)
tensorflow.matmul
2,208
from tensorflow.python.ops import math_ops vstar = self.get_slot(var, "vstar") gold = self.get_slot(var, "gold") var_update = state_ops.assign_sub(var, lr_t*(grad + gold + mu_t*(var-vstar))) #Update 'ref' by subtracting 'value #Create an op that groups multiple operations. #When this op finishes, all ops in input have finished return control_flow_ops.group(*[var_update,]) def _apply_sparse_shared(self, grad, var, indices, scatter_add): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) mu_t = math_ops.cast(self._mu_t, var.dtype.base_dtype) vstar = self.get_slot(var, "vstar") gold = self.get_slot(var, "gold") # glod is not sparse v_diff = state_ops.assign(vstar, mu_t * (var - vstar), use_locking=self._use_locking) with ops.control_dependencies([v_diff]): # run v_diff operation before scatter_add scaled_grad = scatter_add(vstar, indices, grad) var_update = state_ops.assign_sub(var, lr_t * (scaled_grad + gold)) return control_flow_ops.group(*[var_update, ])
tensorflow.python.ops.math_ops.cast
2,209
import tensorflow as tf #self.dataset_dir = '/home/santanu/Downloads/DiscoGAN/edges2handbags/train/' self.writer = tf.summary.FileWriter("./logs", self.sess.graph)
tensorflow.summary.FileWriter
2,210
import tensorflow as tf capacity=1000 + 3 * batch_size, # Ensures a minimum amount of shuffling of examples. min_after_dequeue=1000) return images, sparse_labels def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) def conv_scale(x, W): return tf.nn.conv3d(x, W, strides=[1,1,1,1,1], padding='VALID') def inference(x): """ Creates a model with pooling across space and scales. Always we have a conv-relu-spatial_pool-scale_pool x N layers structure with one fully connected layer on top. """ if '-' in FLAGS.pm: FLAGS.pm= FLAGS.pm.split('-') num_layers = len(FLAGS.pm) - 1 print(num_layers) for l in range(num_layers):
tensorflow.nn.conv3d
2,211
import tensorflow as tf # Trainable parameters mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]]) scores = d_layer_3_all
tensorflow.shape
2,212
import tensorflow as tf # What are the average Q values of the original tasks? if batch_size == num_tasks: indices = tf.transpose(tf.stack([orig_indices, orig_indices], axis=0)) orig_q_vals = tf.gather_nd(logits_vec, indices) tf.compat.v2.summary.scalar( name="orig_q_vals", data=tf.reduce_mean(orig_q_vals), step=global_step, ) # What are the average Q values of the relabelled tasks? indices = tf.transpose(
tensorflow.reduce_mean
2,213
import tensorflow as tf mask = tf.equal(mask, tf.ones_like(mask)) hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer input_size = query.get_shape().as_list()[-1] # Trainable parameters w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1)) w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1)) b = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) v = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) with tf.name_scope('v'): # Applying fully connected layer with non-linear activation to each of the B*T timestamps; # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size tmp1 = tf.tensordot(facts, w1, axes=1)
tensorflow.random_normal
2,214
import tensorflow as tf (exp_coupling, self.hparams.coupling)) if self.is_training: init_features = self.create_init_batch(features) init_op = self.objective_tower(init_features, init=True) init_op = tf.Print( init_op, [init_op], message="Triggering data-dependent init.", first_n=20) tf.add_to_collection("glow_init_op", init_op) train_op = self.objective_tower(features, init=False) return tf.zeros_like(features["targets"]), {"training": train_op} def objective_tower(self, features, init=True): """Objective in terms of bits-per-pixel.
tensorflow.add_to_collection
2,215
import tensorflow as tf L= len(activation) #number of layers m = Y.shape[1] #number of training examples last = activation[L-1] labels= tf.transpose(Y) if last == 'sigmoid' or last == 'softmax': #use cross entropy loss function logits= tf.transpose(betan*zn[1]) cost = tf.reduce_mean(tf.losses.sigmoid_cross_entropy(logits = logits, multi_class_labels=labels))
tensorflow.transpose
2,216
import tensorflow as tf gfile.MakeDirs(save_dir) with self.test_session() as sess: v = tf.Variable([10.0], name="v") # Run the initializer NOW to avoid the 0.5s overhead of the first Run() # call, which throws the test timing off in fastbuild mode.
tensorflow.Variable
2,217
import tensorflow as tf tf.app.flags.DEFINE_integer('input_size', 512, '') tf.app.flags.DEFINE_integer('batch_size_per_gpu', 14, '') tf.app.flags.DEFINE_integer('num_readers', 16, '') tf.app.flags.DEFINE_float('learning_rate', 0.0001, '') tf.app.flags.DEFINE_integer('max_steps', 100000, '') tf.app.flags.DEFINE_integer('loss_scale', 1024, '') tf.app.flags.DEFINE_float('moving_average_decay', 0.997, '') tf.app.flags.DEFINE_string('gpu_list', '1', '') tf.app.flags.DEFINE_string('checkpoint_path', '/tmp/east_resnet_v1_50_rbox/', '') tf.app.flags.DEFINE_boolean('restore', False, 'whether to resotre from checkpoint') tf.app.flags.DEFINE_integer('save_checkpoint_steps', 1000, '') tf.app.flags.DEFINE_integer('save_summary_steps', 100, '') tf.app.flags.DEFINE_string('pretrained_model_path', None, '') tf.app.flags.DEFINE_boolean('allow_mix_precision', False, 'whether to allow mix precision') tf.app.flags.DEFINE_boolean('auto_tune', False, 'whether to autotune') tf.app.flags.DEFINE_boolean('use_processed_data', False, 'whether to use processed data') tf.app.flags.DEFINE_string('processed_data', './processed_dataset/', 'where to save preprocessed datasets') import model import icdar FLAGS = tf.app.flags.FLAGS gpus = list(range(len(FLAGS.gpu_list.split(',')))) def tower_loss(images, score_maps, geo_maps, training_masks, reuse_variables=None): # Build inference graph
tensorflow.app.flags.DEFINE_boolean
2,218
import tensorflow as tf batch_nums = tf.range(0, limit=batch_size) # shape (batch_size) batch_nums = tf.expand_dims(batch_nums, axis=1) # shape (batch_size, 1) batch_nums = tf.tile(batch_nums, [1, passage_length]) # shape (batch_size, passage_length) step_nums = tf.range(0, limit=passage_length) # [passage_length] step_nums = tf.expand_dims(step_nums, axis=0) # shape (1, passage_length) step_nums = tf.tile(step_nums, [batch_size, 1]) # shape (batch_size, passage_length) indices = tf.stack((batch_nums, step_nums, passage_word_idx), axis=2) # shape (batch_size, passage_length, 3) indices = tf.reshape(indices, [-1, 3]) #[batch_size * passage_length, 3] indices = tf.cast(indices, tf.int64) shape = [batch_size, passage_length, extended_vsize] shape = tf.cast(shape, tf.int64) attn_dist = tf.reshape(attn_dist, shape=[-1]) # [batch_size*passage_length] one_hot_spare_rep = tf.SparseTensor(indices=indices, values=attn_dist, dense_shape=shape) # [batch_size, passage_length, extended_vsize] if passage_mask is not None: passage_mask = tf.expand_dims(passage_mask, axis=-1) one_hot_spare_rep = one_hot_spare_rep * passage_mask one_hot_spare_rep = tf.sparse_reduce_sum(one_hot_spare_rep, axis=1) # [batch_size, extended_vsize] vocab_dist = tf.add(vocab_dist, one_hot_spare_rep) if self.options.add_first_word_prob_for_phrase: vocab_dist = tf.nn.softmax(vocab_dist) # normalize
tensorflow.reshape
2,219
import tensorflow as tf init_string) output_spec = None if mode == tf.estimator.ModeKeys.PREDICT: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions=masked_lm_example_loss, scaffold_fn=scaffold_fn) # 输出mask_word的score return output_spec
tensorflow.contrib.tpu.TPUEstimatorSpec
2,220
import tensorflow as tf q_sqrt_r = tf.matrix_band_part(q_sqrt, -1, 0) # D x M x M eKuf = tf.transpose(expectation(pXnew, (kern, feat))) # M x N (psi1) if Luu is None: Kuu = feat.Kuu(kern, jitter=settings.numerics.jitter_level) # M x M Luu = tf.cholesky(Kuu) # M x M if not white: q_mu = tf.matrix_triangular_solve(Luu, q_mu, lower=True) Luu_tiled = tf.tile(Luu[None, :, :], [num_func, 1, 1]) # remove line once issue 216 is fixed q_sqrt_r = tf.matrix_triangular_solve(Luu_tiled, q_sqrt_r, lower=True) Li_eKuf = tf.matrix_triangular_solve(Luu, eKuf, lower=True) # M x N fmean = tf.matmul(Li_eKuf, q_mu, transpose_a=True) eKff = expectation(pXnew, kern) # N (psi0) eKuffu = expectation(pXnew, (kern, feat), (kern, feat)) # N x M x M (psi2)
tensorflow.matrix_triangular_solve
2,221
import tensorflow as tf """ with tf.variable_scope(scope) as sc: kernel_d, kernel_h, kernel_w = kernel_size num_in_channels = inputs.get_shape()[-1].value kernel_shape = [kernel_d, kernel_h, kernel_w, num_in_channels, num_output_channels] kernel = _variable_with_weight_decay('weights', shape=kernel_shape, use_xavier=use_xavier, stddev=stddev, wd=weight_decay) stride_d, stride_h, stride_w = stride outputs = tf.nn.conv3d(inputs, kernel, [1, stride_d, stride_h, stride_w, 1], padding=padding) biases = _variable_on_cpu('biases', [num_output_channels], tf.constant_initializer(0.0)) outputs = tf.nn.bias_add(outputs, biases) if bn: outputs = batch_norm_for_conv3d(outputs, is_training, bn_decay=bn_decay, scope='bn') if activation_fn is not None:
tensorflow.nn.conv3d
2,222
import tensorflow as tf self._lr = tf.Variable(0., trainable=False)
tensorflow.Variable
2,223
import tensorflow as tf return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) def get_valid_batch(image,label,batch_size): images,labels=tf.train.batch([image,label],batch_size=batch_size) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) class trainwork(object): def __init__(self): with tf.variable_scope('scop'): self.w1=tf.get_variable('w1', [4096,1024],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w2=tf.get_variable('w2', [1024,classnum],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.b1 = tf.get_variable('b1', [1024],initializer=tf.constant_initializer(0.0)) self.b2 = tf.get_variable('b2', [classnum],initializer=tf.constant_initializer(0.0)) def inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) out = tf.matmul(l1, self.w2)+self.b2 return out def test_inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1
tensorflow.constant_initializer
2,224
import tensorflow as tf # Structured numpy arrays aren't supported. return np.array([], dtype=[("foo", np.float32)]) def bad2(): # Non-string python objects aren't supported. return tf.float32 y, = tf.py_func(bad1, [], [tf.string]) z, = tf.py_func(bad2, [], [tf.float64]) with self.assertRaisesRegexp(errors.UnimplementedError, "Unsupported numpy type"): y.eval() with self.assertRaisesRegexp(errors.UnimplementedError, "Unsupported object type"): z.eval()
tensorflow.py_func
2,225
import tensorflow as tf ch_emb = tf.reshape(tf.nn.embedding_lookup( self.char_mat, self.ch), [N * PL * self.max_p_num, CL, dc]) qh_emb = tf.reshape(tf.nn.embedding_lookup( self.char_mat, self.qh), [N * QL * self.max_p_num, CL, dc]) ch_emb = tf.nn.dropout(ch_emb, 1.0 - 0.5 * self.dropout) qh_emb = tf.nn.dropout(qh_emb, 1.0 - 0.5 * self.dropout) ch_emb = conv(ch_emb, d, bias=True, activation=tf.nn.relu, kernel_size=5, name="char_conv", reuse=None) qh_emb = conv(qh_emb, d, bias=True, activation=tf.nn.relu, kernel_size=5, name="char_conv", reuse=True) ch_emb = tf.reduce_max(ch_emb, axis=1) qh_emb = tf.reduce_max(qh_emb, axis=1) ch_emb = tf.reshape(ch_emb, [N * self.max_p_num, PL, -1]) qh_emb = tf.reshape(qh_emb, [N * self.max_p_num, QL, -1]) c_emb = tf.nn.dropout(tf.nn.embedding_lookup(self.word_mat, self.c), 1.0 - self.dropout) q_emb = tf.nn.dropout(tf.nn.embedding_lookup(self.word_mat, self.q), 1.0 - self.dropout) c_emb = tf.concat([c_emb, ch_emb], axis=2) q_emb = tf.concat([q_emb, qh_emb], axis=2) self.c_emb = highway(c_emb, size=d, scope="highway", dropout=self.dropout, reuse=None) self.q_emb = highway(q_emb, size=d, scope="highway", dropout=self.dropout, reuse=True) def _encode(self): N, PL, QL, CL, d, dc, nh = self._params() if self.config.fix_pretrained_vector:
tensorflow.reshape
2,226
import tensorflow as tf num_decoder_symbols=5, embedding_size=2, feed_previous=True) res1 = sess.run(d1) res2 = sess.run(d2) res3 = sess.run(d3) self.assertAllClose(res1, res2) self.assertAllClose(res1, res3) def testEmbeddingTiedRNNSeq2Seq(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)] dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)] cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) dec, mem = tf.nn.seq2seq.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 5), res[0].shape)
tensorflow.constant
2,227
import tensorflow as tf y_true: tensor, observations. y_pred: tensor, output of network. Returns: loss value, means negative log-likelihood. """ logL = 0 # pre-calculate cumsum cumsum_y_pred = tf.cumsum(y_pred) hazard_ratio = tf.exp(y_pred) cumsum_hazard_ratio = tf.cumsum(hazard_ratio) if self.train_data['ties'] == 'noties': log_risk = tf.log(cumsum_hazard_ratio) likelihood = y_pred - log_risk # dimension for E: np.array -> [None, 1] uncensored_likelihood = likelihood * y_true
tensorflow.cumsum
2,228
import tensorflow as tf from utility import train_helper from dataset import dataset_factory from preprocessing import preprocessing_factory from preprocessing import anchor_manipulator # hardware related configuration tf.app.flags.DEFINE_integer( 'num_readers', 16, 'The number of parallel readers that read data from the dataset.') tf.app.flags.DEFINE_integer( 'num_preprocessing_threads', 48, 'The number of threads used to create the batches.') tf.app.flags.DEFINE_integer( 'num_cpu_threads', 0, 'The number of cpu cores used to train.') tf.app.flags.DEFINE_float( 'gpu_memory_fraction', 1., 'GPU memory fraction to use.') # scaffold related configuration tf.app.flags.DEFINE_string( 'data_dir', '../PASCAL/VOC_TF/VOC0712TF/',
tensorflow.app.flags.DEFINE_integer
2,229
import tensorflow as tf optimizer = tf.train.MomentumOptimizer(learning_rate=truncated_learning_rate, momentum=params['momentum']) # Batch norm requires update_ops to be added as a train_op dependency. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = optimizer.minimize(loss, global_step) else: train_op = None cls_accuracy = tf.metrics.accuracy(glabels, predictions['classes']) metrics = {'cls_accuracy': cls_accuracy} # Create a tensor named train_accuracy for logging purposes. tf.identity(cls_accuracy[1], name='cls_accuracy') tf.summary.scalar('cls_accuracy', cls_accuracy[1]) return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions,
tensorflow.metrics.accuracy
2,230
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils 'precision/positive_threshold_0.500000_mean', 'recall/positive_threshold_0.500000_mean', } class DNNLinearCombinedClassifierBenchmark(test.Benchmark): def _assertSingleClassMetrics(self, metrics): estimator_test_utils.assert_in_range(0.9, 1.0, 'auc', metrics) estimator_test_utils.assert_in_range(0.9, 1.0, 'accuracy/threshold_0.500000_mean', metrics) estimator_test_utils.assert_in_range( 0.9, 1.0, 'precision/positive_threshold_0.500000_mean', metrics) estimator_test_utils.assert_in_range( 0.9, 1.0, 'recall/positive_threshold_0.500000_mean', metrics) self._assertCommonMetrics(metrics)
tensorflow.contrib.learn.python.learn.estimators.estimator_test_utils.assert_in_range
2,231
from tensorflow.python.ops import array_ops loss_vec = array_ops.reshape(loss_vec, shape=(-1,)) loss_vec = math_ops.mul( loss_vec, array_ops.reshape(weight_tensor, shape=(-1,))) return math_ops.div(
tensorflow.python.ops.array_ops.reshape
2,232
import tensorflow as tf name="label{0}".format(i))) self.types.append(tf.placeholder(tf.float32, shape=[None], name="type{0}".format(i))) self.global_step = tf.Variable(0, trainable=False) # Select logits to prob function self.logits_to_prob = tf.nn.softmax if self.hparams.logits_to_prob == 'sigmoid': self.logits_to_prob = sigmoid_prob self.output = self.ranking_model(self.max_candidate_num, scope='ranking_model') pad_removed_output = self.remove_padding_for_metric_eval(self.docid_inputs, self.output) reshaped_labels = tf.transpose(tf.convert_to_tensor(self.labels)) # reshape from [max_candidate_num, ?] to [?, max_candidate_num] for metric in self.exp_settings['metrics']: for topn in self.exp_settings['metrics_topn']: metric_value = utils.make_ranking_metric_fn(metric, topn)(reshaped_labels, pad_removed_output, None) tf.summary.scalar('%s_%d' % (metric, topn), metric_value, collections=['eval']) if not forward_only: # Build model self.rank_list_size = exp_settings['train_list_cutoff'] train_output = self.ranking_model(self.rank_list_size, scope='ranking_model') self.propensity = self.DenoisingNet(self.rank_list_size, forward_only)
tensorflow.convert_to_tensor
2,233
import tensorflow as tf add = tf.add(in0, in1, "ADD") sub = tf.subtract(in0, in1, "SUB") # Cast or convert result to the output dtype. if tf_output0_dtype == tf.string: cast0 = tf.dtypes.as_string(add if not swap else sub, name="TOSTR0") else: cast0 = tf.cast(add if not swap else sub, tf_output0_dtype, "CAST0") if tf_output1_dtype == tf.string: cast1 = tf.dtypes.as_string(sub if not swap else add, name="TOSTR1") else: cast1 = tf.cast(sub if not swap else add, tf_output1_dtype, "CAST1")
tensorflow.cast
2,234
import tensorflow as tf tf.get_default_session().run(sync_model_to_lazymodel) tf.get_default_session().run(copy_normalizers) logger.info('Loaded normalizers:') load_norm = tf.get_default_session().run(normalizers_parameters) logger.info(load_norm)
tensorflow.get_default_session
2,235
import tensorflow as tf # fc2 with tf.variable_scope('fc2'): w = tf.get_variable('w', [self.fc1.get_shape()[1], 2048], initializer=he_normal, regularizer=regularizer) b = tf.get_variable('b', [2048], initializer=tf.constant_initializer(1.0)) out = tf.matmul(self.fc1, w) + b self.fc2 = tf.nn.relu(out) # fc3
tensorflow.constant_initializer
2,236
import tensorflow as tf def MultiBoxLoss(num_class=2, neg_pos_ratio=3): """multi-box loss""" def multi_box_loss(y_true, y_pred): num_batch = tf.shape(y_true)[0] num_prior = tf.shape(y_true)[1] loc_pred = tf.reshape(y_pred[0], [num_batch * num_prior, 4]) landm_pred = tf.reshape(y_pred[1], [num_batch * num_prior, 10]) class_pred = tf.reshape(y_pred[2], [num_batch * num_prior, num_class]) loc_true = tf.reshape(y_true[..., :4], [num_batch * num_prior, 4]) landm_true = tf.reshape(y_true[..., 4:14], [num_batch * num_prior, 10]) landm_valid = tf.reshape(y_true[..., 14], [num_batch * num_prior, 1]) class_true = tf.reshape(y_true[..., 15], [num_batch * num_prior, 1]) # define filter mask: class_true = 1 (pos), 0 (neg), -1 (ignore) # landm_valid = 1 (w landm), 0 (w/o landm) mask_pos = tf.equal(class_true, 1) mask_neg = tf.equal(class_true, 0) mask_landm = tf.logical_and(tf.equal(landm_valid, 1), mask_pos)
tensorflow.reshape
2,237
import tensorflow as tf def compile_data(tmp_dir, datasets, filename): """Concatenate all `datasets` and save to `filename`.""" filename = os.path.join(tmp_dir, filename) # lang1_fname = filename + ".lang1" # lang2_fname = filename + ".lang2" lang1_fname = filename + ".source" lang2_fname = filename + ".target" if tf.gfile.Exists(lang1_fname) and tf.gfile.Exists(lang2_fname): tf.logging.info("Skipping compile data, found files:\n%s\n%s", lang1_fname, lang2_fname) return filename with tf.gfile.GFile(lang1_fname, mode="w") as lang1_resfile: with tf.gfile.GFile(lang2_fname, mode="w") as lang2_resfile: for dataset in datasets: url = dataset[0] compressed_filename = os.path.basename(url) compressed_filepath = os.path.join(tmp_dir, compressed_filename) if url.startswith("http"): generator_utils.maybe_download(tmp_dir, compressed_filename, url) if dataset[1][0] == "tsv": _, src_column, trg_column, glob_pattern = dataset[1] filenames = tf.gfile.Glob(os.path.join(tmp_dir, glob_pattern)) if not filenames:
tensorflow.gfile.GFile
2,238
import tensorflow as tf 'bounding_box_samples': _float_feature(d['bounding_box_samples']), 'depth_renders': _float_feature(d['depth_renders']), 'mesh_name': _bytes_feature(d['mesh_name']), 'near_surface_samples': _float_feature(d['near_surface_samples']), 'grid': _float_feature(d['grid']), 'world2grid': _float_feature(d['world2grid']), 'surface_point_samples': _float_feature(d['surface_point_samples']) } example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString() def full_featurespec(): return { 'bounding_box_samples': tf.io.FixedLenFeature([100000, 4], tf.float32), 'depth_renders': tf.io.FixedLenFeature([20, 224, 224, 1], tf.float32), 'mesh_name': tf.io.FixedLenFeature([], tf.string), 'near_surface_samples': tf.io.FixedLenFeature([100000, 4], tf.float32), 'grid': tf.io.FixedLenFeature([32, 32, 32], tf.float32), 'world2grid': tf.io.FixedLenFeature([4, 4], tf.float32), 'surface_point_samples': tf.io.FixedLenFeature([10000, 6], tf.float32) } def parse_tf_example(example_proto): d = tf.io.parse_single_example(example_proto, full_featurespec()) return (d['bounding_box_samples'], d['depth_renders'], d['mesh_name'], d['near_surface_samples'], d['grid'], d['world2grid'], d['surface_point_samples'])
tensorflow.io.FixedLenFeature
2,239
import tensorflow.contrib as contrib test_y_1 = to_categorical(test_y_1, n_class_1) train_y_2 = to_categorical(train_y_2, n_class_2) test_y_2 = to_categorical(test_y_2, n_class_2) return train_X, train_y_1, train_y_2, test_X, test_y_1, test_y_2 def apply_cross_stitch(input1, input2): input1_reshaped = contrib.layers.flatten(input1) input2_reshaped = contrib.layers.flatten(input2) input = tf.concat((input1_reshaped, input2_reshaped), axis=1) # initialize with identity matrix cross_stitch = tf.get_variable("cross_stitch", shape=(input.shape[1], input.shape[1]), dtype=tf.float32, collections=['cross_stitches', tf.GraphKeys.GLOBAL_VARIABLES], initializer=tf.initializers.identity()) output = tf.matmul(input, cross_stitch)
tensorflow.contrib.layers.flatten
2,240
import tensorflow as tf Returns: If `image_tensor` has dynamic size, return `image_tensor` with a Assert control dependency. Otherwise returns image_tensor. Raises: ValueError: if `image_tensor`'s' width or height is smaller than `min_dim`. """ image_shape = image_tensor.get_shape() image_height = static_shape.get_height(image_shape) image_width = static_shape.get_width(image_shape) if image_height is None or image_width is None: shape_assert = tf.Assert( tf.logical_and(tf.greater_equal(tf.shape(image_tensor)[1], min_dim), tf.greater_equal(tf.shape(image_tensor)[2], min_dim)), ['image size must be >= {} in both height and width.'.format(min_dim)]) with tf.control_dependencies([shape_assert]): return tf.identity(image_tensor) if image_height < min_dim or image_width < min_dim: raise ValueError( 'image size must be >= %d in both height and width; image dim = %d,%d' % (min_dim, image_height, image_width)) return image_tensor
tensorflow.shape
2,241
import tensorflow as tf def net_U1(self, x): lambda_1 = self.lambda_1 lambda_2 = tf.exp(self.lambda_2) U = self.neural_net(x, self.weights, self.biases) U_x = self.fwd_gradients_1(U, x)
tensorflow.exp
2,242
import tensorflow as tf fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2") shape = tf.shape(image) deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSESS])
tensorflow.shape
2,243
import tensorflow as tf # here because it performs better than AveragePooling2D. axes = [2, 3] if self.data_format == 'channels_first' else [1, 2] inputs = tf.reduce_mean(inputs, axes, keepdims=True) inputs = tf.identity(inputs, 'final_reduce_mean') inputs = tf.reshape(inputs, [-1, self.final_size]) inputs = tf.layers.dense(inputs=inputs, units=self.num_classes) inputs = tf.identity(inputs, 'final_dense') return inputs
tensorflow.reshape
2,244
import tensorflow as tf self._final_state_name = self.with_prefix(self._name, 'final') for state_tuple in self._initial_state: tf.add_to_collection(self._initial_state_name, state_tuple.c) tf.add_to_collection(self._initial_state_name, state_tuple.h) for state_tuple in self._final_state: tf.add_to_collection(self._final_state_name, state_tuple.c)
tensorflow.add_to_collection
2,245
from tensorflow.python.framework import ops if updates_collections: ops.add_to_collections(updates_collections, update_op)
tensorflow.python.framework.ops.add_to_collections
2,246
from tensorflow.python.framework import tensor_util """Helper which ensures that input is a non-negative, int32, scalar.""" x = ops.convert_to_tensor(x, name="x") if x.dtype.base_dtype != dtypes.int32.base_dtype: raise TypeError("%s.dtype=%s is not %s" % (x.name, x.dtype, dtypes.int32)) x_value_static = tensor_util.constant_value(x) if x.get_shape().ndims is not None and x_value_static is not None: if x.get_shape().ndims != 0: raise ValueError("%s.ndims=%d is not 0 (scalar)" %
tensorflow.python.framework.tensor_util.constant_value
2,247
import tensorflow as tf a_indices = op.inputs[0:numTensors] a_values = op.inputs[numTensors:numTensors*2] a_shape = op.inputs[numTensors*2:numTensors*3] b = op.inputs[numTensors*3] adj_a = op.get_attr("adjoint_a") adj_b = op.get_attr("adjoint_b") # gradient w.r.t. dense a_values_grads = [] b_list = [b[i] for i in range(numTensors)] b_grads = b_module.bspmm(a_indices, a_values, a_shape, grad, adjoint_a=True, adjoint_b=False) bg_row=tf.shape(b_grads[0])[0] bg_col=tf.shape(b_grads[0])[1] b_grads = tf.reshape(b_grads, (numTensors * bg_row, bg_col)) if adj_b: b_grads = [array_ops.transpose(b_g) for b_g in b_grads] for t in range(numTensors): rows = a_indices[t][:, 0] cols = a_indices[t][:, 1] parts_a = array_ops.gather(grad[t], rows if not adj_a else cols) parts_b = array_ops.gather(b_list[t] if not adj_b else array_ops.transpose(b_list[t]), cols if not adj_a else rows) a_values_grads.append(math_ops.reduce_sum(parts_a * parts_b, reduction_indices=1)) return_val = [None for _ in range(numTensors)] + a_values_grads + [None for _ in range(numTensors)] + [b_grads]
tensorflow.shape
2,248
from tensorflow.python.platform import gfile from tensorflow.contrib.learn.python.learn.datasets import base from tensorflow.python.framework import dtypes from tensorflow.python.platform import gfile SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/' def _read32(bytestream): dt = numpy.dtype(numpy.uint32).newbyteorder('>') return numpy.frombuffer(bytestream.read(4), dtype=dt)[0] def extract_images(filename): """Extract the images into a 4D uint8 numpy array [index, y, x, depth].""" print('Extracting', filename) with gfile.Open(filename, 'rb') as f, gzip.GzipFile(fileobj=f) as bytestream: magic = _read32(bytestream) if magic != 2051: raise ValueError('Invalid magic number %d in MNIST image file: %s' % (magic, filename)) num_images = _read32(bytestream) rows = _read32(bytestream) cols = _read32(bytestream) buf = bytestream.read(rows * cols * num_images) data = numpy.frombuffer(buf, dtype=numpy.uint8) data = data.reshape(num_images, rows, cols, 1) return data def dense_to_one_hot(labels_dense, num_classes):
tensorflow.python.platform.gfile.Open
2,249
import tensorflow as tf gradient_penalty = tf.reduce_mean((slopes - 1.) ** 2) self.loss_d_rot += lam * gradient_penalty train_vars = tf.trainable_variables() d_params = [v for v in train_vars if v.name.startswith(name + '/discriminator/')] g_params = [v for v in train_vars if v.name.startswith(name + '/generator/')]
tensorflow.trainable_variables
2,250
import tensorflow as tf return f features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_int_feature(feature.label_ids) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([seq_length], tf.int64), } def _decode_record(record, name_to_features): example = tf.parse_single_example(record, name_to_features) for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def input_fn(params): batch_size = params["batch_size"]
tensorflow.FixedLenFeature
2,251
import tensorflow as tf if labels is not None and not labels.dtype.is_integer: raise ValueError('expected integer labels but got %r' % labels.dtype) if (frequency_threshold is None and labels is None and key_fn is None and not fingerprint_shuffle and top_k is not None and top_k <= LARGE_VOCAB_TOP_K): logging.info('If the number of unique tokens is smaller than the provided ' 'top_k or approximation error is acceptable, consider using ' 'tft.experimental.approximate_vocabulary for a potentially ' 'more efficient implementation.') with tf.compat.v1.name_scope(name, 'vocabulary'): vocabulary_key = vocab_filename vocab_filename = _get_vocab_filename(vocab_filename, store_frequency) informativeness_threshold = float('-inf') coverage_informativeness_threshold = float('-inf') if labels is not None: if weights is not None: vocab_ordering_type = _VocabOrderingType.WEIGHTED_MUTUAL_INFORMATION else: vocab_ordering_type = _VocabOrderingType.MUTUAL_INFORMATION # Correct for the overloaded `frequency_threshold` API.
tensorflow.compat.v1.name_scope
2,252
import tensorflow as tf data_dir=algorithmic.TinyAlgo.data_dir, shuffle_files=False) tensor1 = dataset.make_one_shot_iterator().get_next()["targets"] tensor2 = dataset.make_one_shot_iterator().get_next()["targets"] with tf.Session() as sess: self.assertTrue(assert_tensors_equal(sess, tensor1, tensor2, 20)) @test_utils.run_in_graph_mode_only() def testNoShufflePreprocess(self):
tensorflow.Session
2,253
import tensorflow as tf path_ckpt_input = os.path.join(FLAGS.output_dir, "checkpoint_input") if FLAGS.ckpt_no is not None and not tf.gfile.Exists(path_ckpt): with tf.gfile.GFile(path_ckpt, "w") as writer: writer.write('model_checkpoint_path: "%s-%s"\n' % (os.path.join(FLAGS.recover_dir, "model.ckpt"), str(FLAGS.ckpt_no))) writer.write('all_model_checkpoint_paths: "%s-%s"\n' % (os.path.join(FLAGS.recover_dir, "model.ckpt"), str(FLAGS.ckpt_no))) if FLAGS.ckpt_no_input is not None and not tf.gfile.Exists(path_ckpt_input): with tf.gfile.GFile(path_ckpt_input, "w") as writer: writer.write('model_checkpoint_path: "%s-%s"\n' % (os.path.join(FLAGS.recover_dir, "input.ckpt"), str(FLAGS.ckpt_no_input))) writer.write('all_model_checkpoint_paths: "%s-%s"\n' % (os.path.join(FLAGS.recover_dir, "input.ckpt"), str(FLAGS.ckpt_no_input))) if FLAGS.use_hvd and hvd.rank() == 0 and (FLAGS.do_train or FLAGS.do_train_eval): (cpath, cname) = os.path.split(FLAGS.bert_config_file) tf.gfile.Copy(FLAGS.bert_config_file, os.path.join(FLAGS.output_dir, cname), True)
tensorflow.gfile.GFile
2,254
import tensorflow as tf # Trainable parameters w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1)) w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1)) b = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) v = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) with tf.name_scope('v'): # Applying fully connected layer with non-linear activation to each of the B*T timestamps; # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size tmp1 = tf.tensordot(facts, w1, axes=1) tmp2 = tf.tensordot(query, w2, axes=1) tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]]) tmp = tf.tanh((tmp1 + tmp2) + b) # For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape key_masks = mask # [B, 1, T] # key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1) v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T]
tensorflow.tensordot
2,255
from tensorflow.python.platform import gfile checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt') if not gfile.Exists(FLAGS.train_dir):
tensorflow.python.platform.gfile.Exists
2,256
import tensorflow as tf bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the BERT model " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) tf.gfile.MakeDirs(FLAGS.output_dir) task_name = FLAGS.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]()
tensorflow.gfile.MakeDirs
2,257
import tensorflow as tf save_dir = self._TestDir("update_checkpoint_state") os.chdir(save_dir) # Make a temporary train directory. train_dir = "train" os.mkdir(train_dir) abs_path = os.path.join(save_dir, "model-0") rel_path = "train/model-2" tf.train.update_checkpoint_state( train_dir, rel_path, all_model_checkpoint_paths=[abs_path, rel_path]) ckpt = tf.train.get_checkpoint_state(train_dir) self.assertEqual(ckpt.model_checkpoint_path, rel_path) self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2) self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path) self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path) class MetaGraphTest(tf.test.TestCase): def _TestDir(self, test_name): test_dir = os.path.join(self.get_temp_dir(), test_name)
tensorflow.train.get_checkpoint_state
2,258
import tensorflow as tf # tf.zeros_like(tf.cast(n_neg_to_select, tf.float32)), # name='rand_select_negtive') # include both selected negtive and all positive examples final_mask = tf.stop_gradient(tf.logical_or(tf.logical_and(negtive_mask, selected_neg_mask), positive_mask)) total_examples = tf.reduce_sum(tf.cast(final_mask, tf.float32)) # add mask for glabels and cls_pred here glabels = tf.boolean_mask(tf.clip_by_value(glabels, 0, FLAGS.num_classes), tf.stop_gradient(final_mask)) cls_pred = tf.boolean_mask(cls_pred, tf.stop_gradient(final_mask)) location_pred = tf.boolean_mask(location_pred, tf.stop_gradient(positive_mask)) gtargets = tf.boolean_mask(gtargets, tf.stop_gradient(positive_mask)) predictions = { 'classes': tf.argmax(cls_pred, axis=-1), 'probabilities': tf.reduce_max(tf.nn.softmax(cls_pred, name='softmax_tensor'), axis=-1), 'bboxes_predict': tf.reshape(bboxes_pred, [-1, 4]) } if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) # Calculate loss, which includes softmax cross entropy and L2 regularization. cross_entropy = tf.cond(n_positives > 0., lambda: tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred), lambda: 0.)
tensorflow.stop_gradient
2,259
import tensorflow as tf alpha = tf.nn.softmax( tf.reshape(tf.matmul(atten_hidden, V), [-1, shape[1], 1]), axis=1) output = tf.reshape(output, [-1, shape[1], 2 * self.config.hidden_size]) C = tf.multiply(alpha, output)
tensorflow.reshape
2,260
import tensorflow as tf if not isinstance(feat, InducingPoints): raise NotImplementedError if full_cov: # TODO(VD): ``full_cov`` True would return a ``fvar`` of shape N x N x D x D, # encoding the covariance between input datapoints as well. # This is not implemented as this feature is only used for plotting purposes. raise NotImplementedError pXnew = Gaussian(Xnew_mu, Xnew_var) num_data = tf.shape(Xnew_mu)[0] # number of new inputs (N) num_ind = tf.shape(q_mu)[0] # number of inducing points (M) num_func = tf.shape(q_mu)[1] # output dimension (D) q_sqrt_r = tf.matrix_band_part(q_sqrt, -1, 0) # D x M x M eKuf = tf.transpose(expectation(pXnew, (kern, feat))) # M x N (psi1) if Luu is None: Kuu = feat.Kuu(kern, jitter=settings.numerics.jitter_level) # M x M Luu = tf.cholesky(Kuu) # M x M if not white: q_mu = tf.matrix_triangular_solve(Luu, q_mu, lower=True) Luu_tiled = tf.tile(Luu[None, :, :], [num_func, 1, 1]) # remove line once issue 216 is fixed q_sqrt_r = tf.matrix_triangular_solve(Luu_tiled, q_sqrt_r, lower=True)
tensorflow.shape
2,261
from tensorflow.python.framework import ops value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`, `int16`, `int8`, or `complex64`. bias: A 1-D `Tensor` with size matching the last dimension of `value`. Must be the same type as `value` unless `value` is a quantized type, in which case a different quantized type may be used. data_format: A string. 'NHWC' and 'NCHW" are supported. name: A name for the operation (optional). Returns: A `Tensor` with the same type as `value`. """ with ops.op_scope([value, bias], name, "BiasAdd") as name: value = ops.convert_to_tensor(value, name="input") bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias") return gen_nn_ops._bias_add(value, bias, data_format=data_format, name=name) ops.RegisterShape("BiasAdd")(common_shapes.bias_add_shape) ops.RegisterShape("BiasAddGrad")(common_shapes.bias_add_grad_shape) # pylint: disable=protected-access def bias_add_v1(value, bias, name=None):
tensorflow.python.framework.ops.convert_to_tensor
2,262
from tensorflow.python.ops import check_ops return array_ops.identity(self.rate) def _variance(self): return array_ops.identity(self.rate) @distribution_util.AppendDocstring( """Note: when `rate` is an integer, there are actually two modes: `rate` and `rate - 1`. In this case we return the larger, i.e., `rate`.""") def _mode(self): return math_ops.floor(self.rate) def _assert_valid_sample(self, x, check_integer=True): if not self.validate_args: return x dependencies = [check_ops.assert_non_negative(x)] if check_integer: dependencies += [distribution_util.assert_integer_form( x, message="x has non-integer components.")] return control_flow_ops.with_dependencies(dependencies, x)
tensorflow.python.ops.check_ops.assert_non_negative
2,263
import tensorflow as tf # Verifies that collection where item type does not match expected # type will not be added. tf.add_to_collection("int_collection", 3) tf.add_to_collection("int_collection", 3.5)
tensorflow.add_to_collection
2,264
import tensorflow as tf self.translated_z = trans_z s_h, s_w = self.output_height, self.output_width s_h0, s_h1, s_h2, s_h3 = \ int(s_h/ns0), int(s_h/ns0/ns1), int(s_h/ns0/ns1/ns2), int(s_h/ns0/ns1/ns2/ns3) s_w0, s_w1, s_w2, s_w3 = \ int(s_w/ns0), int(s_w/ns0/ns1), int(s_w/ns0/ns1/ns2), int(s_w/ns0/ns1/ns2/ns3) def decode(z, skip_h3, skip_h2, skip_h1, skip_h0): z_ = lrelu(linear(tf.nn.dropout(z, keep_prob), nf3*s_h3*s_w3, 'd_h0_lin')) h0 = tf.nn.dropout(tf.reshape(z_, [-1, s_h3, s_w3, nf3]), keep_prob) import IPython IPython.embed() h1 = lrelu(deconv2d(tf.concat([h0, skip_h3], 3), [self.batch_size, s_h2, s_w2, nf2], name='d_h1', d_h=ns3, d_w=ns3)) h2 = lrelu(deconv2d(tf.concat([h1, skip_h2], 3), [self.batch_size, s_h1, s_w1, nf1], name='d_h2', d_h=ns2, d_w=ns2)) h3 = lrelu(deconv2d(tf.concat([h2, skip_h1], 3),
tensorflow.nn.dropout
2,265
import tensorflow as tf init: float Bias initializer. Defaults to zero. name: str Name for this op. Defaults to tensor.op.name. Returns ------- tf.Tensor A biased tensor with the same shape as the input tensor. """ if init is None: init = tf.zeros([tensor.get_shape()[-1].value]) with tf.name_scope(name, tensor.op.name, [tensor]): b = tf.Variable(init, name='b') return tf.nn.bias_add(tensor, b) def dropout(tensor, dropout_prob, training=True, training_only=True): """Random dropout. This implementation supports "always-on" dropout (training_only=False), which can be used to calculate model uncertainty. See Gal and Ghahramani, http://arxiv.org/abs/1506.02142. NOTE(user): To simplify the implementation, I have chosen not to reverse the scaling that occurs in tf.nn.dropout when using dropout during inference. This shouldn't be an issue since the activations will be scaled by the same constant in both training and inference. This means that there
tensorflow.nn.bias_add
2,266
import tensorflow as tf #TODO: self.v should be l2-normalized or not? / currently not. return {'v':self.v,'b':self.b,'g':self.g} class SymPadConv2d(object): #Resize and Convolution(upsacle by 2) def __init__(self,name,input_dim,output_dim, k_h=3,k_w=3,stddev=0.02) : assert k_h%2==1 and k_w%2==1, 'kernel size should be odd numbers to ensure exact size' with tf.variable_scope(name) : self.w = tf.get_variable('w', [k_h, k_w, input_dim, output_dim], initializer=tf.random_normal_initializer(stddev=stddev)) self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0)) self.padding = [ [0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0] ] def __call__(self,input_var,name=None,**kwargs): _,h,w,c = input_var.shape.as_list() _t = tf.image.resize_nearest_neighbor(input_var, [h*2, w*2]) _t = tf.pad(_t,self.padding, mode='SYMMETRIC') return tf.nn.bias_add( tf.nn.conv2d(_t, self.w,
tensorflow.constant_initializer
2,267
import tensorflow as tf seq_axis=1, batch_axis=0, ) mask_wo_bos_eos = mask_wo_bos_eos[:, 1:] mask_wo_bos_eos = tf.reverse_sequence( mask_wo_bos_eos, sequence_length_wo_bos_eos, seq_axis=1, batch_axis=0, ) mask_wo_bos_eos = tf.cast(mask_wo_bos_eos, 'bool') return { 'lm_embeddings': lm_embeddings, 'lengths': sequence_length_wo_bos_eos, 'token_embeddings': lm_graph.embedding, 'mask': mask_wo_bos_eos, }
tensorflow.cast
2,268
import tensorflow as tf tf.greater_equal( matched_iou, self._config_dict['background_iou_low_threshold']), tf.less( matched_iou, self._config_dict['background_iou_high_threshold'])) ignored_matches = tf.logical_and( tf.less(matched_iou, 0.0), tf.greater_equal( matched_iou, self._config_dict['background_iou_high_threshold'])) ignored_matches = tf.logical_and( ignored_matches, tf.less( matched_iou, self._config_dict['foreground_iou_threshold']))
tensorflow.greater_equal
2,269
from tensorflow.python.ops import array_ops start_sum = start_sum if start_sum else ( array_ops.zeros((), dtype=dtypes.int32, name="zero"),)
tensorflow.python.ops.array_ops.zeros
2,270
import tensorflow as tf if slope < 1.0: X = tf.nn.leaky_relu(X, slope) if slope > 0.0 else tf.nn.relu(X)
tensorflow.nn.relu
2,271
import tensorflow as tf if mode == 'eval': for checkpoint in _get_next_checkpoint(): tf.logging.info('Starting to evaluate.') try: eval_results = image_classifier.evaluate( input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks, checkpoint_path=checkpoint) tf.logging.info('Evaluation results: %s' % eval_results) except tf.errors.NotFoundError: # skip checkpoint if it gets deleted prior to evaluation tf.logging.info('Checkpoint %s no longer exists ... skipping') elif mode == 'train_and_eval': current_step = _load_global_step_from_checkpoint_dir(model_dir) tf.logging.info('Starting training at step=%d.' % current_step) train_steps_per_eval = int( hparams.num_epochs_per_eval * train_steps_per_epoch) # Final Evaluation if training is finished.
tensorflow.logging.info
2,272
from tensorflow.python.ops import math_ops # avoid division by zero epsilon = 1e-7 def compute_precision(name): precision = math_ops.div(true_positives, epsilon + true_positives + false_positives, name='precision_' + name)
tensorflow.python.ops.math_ops.div
2,273
from tensorflow.python.framework import random_seed embed_np = embeds[ids] embed_tf = ops.embedding_lookup(embeds, ids).eval() self.assertEqual(embed_np.shape, embed_tf.shape) self.assertAllClose(embed_np, embed_tf) def test_categorical_variable(self): random_seed.set_random_seed(42) with self.cached_session() as sess: cat_var_idx = array_ops.placeholder(dtypes.int64, [2, 2]) embeddings = ops.categorical_variable( cat_var_idx, n_classes=5, embedding_size=10, name="my_cat_var") sess.run(variables.global_variables_initializer())
tensorflow.python.framework.random_seed.set_random_seed
2,274
import tensorflow as tf model = MyModel(vocab_size, embedding_dim, units, BATCH_SIZE) optimizer = tf.optimizers.Adam() checkpoint_dir = "./models/new_out" checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model) checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir)).expect_partial() @app.route("/summary", methods=["POST"]) @cross_origin(headers=['Content-Type']) def summary(): res = requests.post("https://turkcemetinozetleme.teaddict.net/ozetle/api/new", data={ "contextOfText":request.data.decode()
tensorflow.train.latest_checkpoint
2,275
import tensorflow as tf } """ if not features: features = {} inputs_old = None if "inputs" in features and len(features["inputs"].shape) < 4: inputs_old = features["inputs"] features["inputs"] = tf.expand_dims(features["inputs"], 2) if not self.has_input: features["partial_targets"] = tf.to_int64(features["inputs"]) # Save the targets in a var and reassign it after the tf.while loop to avoid # having targets being in a 'while' frame. This ensures targets when used # in metric functions stays in the same frame as other vars. targets_old = features.get("targets", None) target_modality = self._problem_hparams.target_modality def infer_step(recent_output, recent_logits, unused_loss):
tensorflow.to_int64
2,276
import tensorflow as tf with tf.variable_scope("no_tuple"): cell1 = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False) dec, mem = tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp, cell1, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 5), res[0].shape)
tensorflow.global_variables_initializer
2,277
import tensorflow as tf # results in a H2D copy. images = tf.contrib.framework.local_variable(images, name='images') labels = tf.contrib.framework.local_variable(labels, name='labels') # Change to 0-based (don't use background class like Inception does) labels -= 1 if num_compute_devices == 1: images_splits = [images] labels_splits = [labels] else: images_splits = tf.split(images, num_compute_devices, 0) labels_splits = tf.split(labels, num_compute_devices, 0) return nclass, images_splits, labels_splits def create_config_proto(): config = tf.ConfigProto() config.allow_soft_placement = True config.intra_op_parallelism_threads = FLAGS.num_intra_threads
tensorflow.split
2,278
import tensorflow as tf with self.test_session(graph=tf.Graph(), use_gpu=False) as sess: sp_input0 = self._SparseTensorValue_5x6(np.arange(6)) sp_input1 = self._SparseTensorValue_3x4(np.arange(6)) handle0 = add_sparse_to_tensors_map(sp_input0, shared_name="a") handle1 = add_sparse_to_tensors_map(sp_input1, shared_name="a") self.assertEqual(handle0.get_shape(), ()) handles_concat = tf.stack([handle0, handle1]) sp_out = take_many_sparse_from_tensors_map( sparse_map_op=handle0.op, sparse_handles=handles_concat) combined_indices, combined_values, combined_shape = sess.run(sp_out)
tensorflow.stack
2,279
import tensorflow as tf with tf.variable_scope("input", reuse=reuse): stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") with tf.variable_scope(scope, reuse=reuse): if param_noise: act_f, obs_phs = build_act_with_param_noise(q_func, ob_space, ac_space, stochastic_ph, update_eps_ph, sess, param_noise_filter_func=param_noise_filter_func) else: act_f, obs_phs = build_act(q_func, ob_space, ac_space, stochastic_ph, update_eps_ph, sess, layers=layers) # q network evaluation with tf.variable_scope("step_model", reuse=True, custom_getter=tf_util.outer_scope_getter("step_model")): step_model = q_func(sess, ob_space, ac_space, 1, 1, None, reuse=True, obs_phs=obs_phs, layers=layers) q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/model") # target q network evaluation with tf.variable_scope("target_q_func", reuse=False): target_policy = q_func(sess, ob_space, ac_space, 1, 1, None, reuse=False, layers=layers) target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/target_q_func") # compute estimate of best possible value starting from state at t + 1 double_q_values = None double_obs_ph = target_policy.obs_ph if double_q: with tf.variable_scope("double_q", reuse=True, custom_getter=tf_util.outer_scope_getter("double_q")): double_policy = q_func(sess, ob_space, ac_space, 1, 1, None, reuse=True, layers=layers)
tensorflow.get_variable_scope
2,280
import tensorflow as tf slim.conv2d( features, num_classes, kernel_size=kernel_size, rate=rate, activation_fn=None, normalizer_fn=None, scope=scope)) return tf.add_n(branch_logits) def _split_separable_conv2d(inputs, filters, rate=1, weight_decay=0.00004, depthwise_weights_initializer_stddev=0.33, pointwise_weights_initializer_stddev=0.06,
tensorflow.add_n
2,281
from tensorflow.python.ops import math_ops Returns: Masked weights if `mask` and `weights` are not `None`, weights equivalent to `mask` if `weights` is `None`, and otherwise `weights`. Raises: ValueError: If `weights` and `mask` are not `None` and have mismatched shapes. """ if mask is not None: check_ops.assert_type(mask, dtypes.bool) if weights is None: weights = array_ops.ones_like(mask, dtype=dtypes.float32) weights = math_ops.cast(math_ops.logical_not(mask), weights.dtype) * weights return weights def _safe_div(numerator, denominator, name): """Divides two values, returning 0 if the denominator is <= 0. Args: numerator: A real `Tensor`. denominator: A real `Tensor`, with dtype matching `numerator`. name: Name for the returned op.
tensorflow.python.ops.math_ops.logical_not
2,282
import tensorflow as tf beta1=beta1).minimize(generator_loss, var_list=en_var) supervised_encoder_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
tensorflow.train.AdamOptimizer
2,283
import tensorflow as tf "Adding regularization" if lambda_l2_reg > 0 : cell_l2 = tf.reduce_sum([tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables() if not ("noreg" in tf_var.name or "Bias" in tf_var.name)]) Predict_l2 = tf.nn.l2_loss(W) #+ tf.nn.l2_loss(b) total_loss = tf.reduce_sum(loss + lambda_l2_reg* tf.reduce_sum(cell_l2+Predict_l2) ) else: total_loss = loss "Define the train_step"
tensorflow.reduce_sum
2,284
import tensorflow as tf N = tf.cast(tf.shape(X)[0], tf.float32) if y is None: y = silverman_rule_of_thumb(N) A = 1/(N*N*tf.sqrt(y)) B = 2.0/(N*tf.sqrt(y+0.5)) A1 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1)), axis=2)/(4*y) B1 = euclidean_norm_squared(X, axis=1)/(2+4*y) return 1/tf.sqrt(1+y) + A*tf.reduce_sum(__phi(A1)) - B*tf.reduce_sum(__phi(B1)) def cw(X, y=None): D = tf.cast(tf.shape(X)[1], tf.float32) N = tf.cast(tf.shape(X)[0], tf.float32)
tensorflow.expand_dims
2,285
import tensorflow as tf else: strides = [1, stride, stride, 1] if data_format == 'NHWC' \ else [1, 1, stride, stride] return tf.nn.max_pool(value=inputdata, ksize=kernel, strides=strides, padding=padding, data_format=data_format, name=name) @staticmethod
tensorflow.nn.max_pool
2,286
import tensorflow as tf if full_cov: fvar = Knn - tf.matmul(A, A, transpose_a=True) fvar = tf.tile(fvar[None, :, :], [num_func, 1, 1]) # R x N x N else: fvar = Knn - tf.reduce_sum(tf.square(A), 0) fvar = tf.tile(fvar[None, :], [num_func, 1]) # R x N # another backsubstitution in the unwhitened case
tensorflow.square
2,287
from tensorflow.python.ops import math_ops # the precision denominator. # `precision_per_k` (float64) - Precision at each k. This is the "P_{i}" # term from the formula above. # `relevant_precision_per_k` (float64) - Relevant precisions; i.e., # precisions at all k for which relevance indicator is true. relevant_per_k = _sparse_true_positive_at_k( predictions_idx_per_k, labels_per_k, name='relevant_per_k') tp_per_k = math_ops.cumsum(relevant_per_k, axis=-1, name='tp_per_k') retrieved_per_k = math_ops.cumsum( array_ops.ones_like(relevant_per_k), axis=-1, name='retrieved_per_k') precision_per_k = math_ops.div( math_ops.to_double(tp_per_k), math_ops.to_double(retrieved_per_k), name='precision_per_k') relevant_precision_per_k = math_ops.mul( precision_per_k, math_ops.to_double(relevant_per_k), name='relevant_precision_per_k') # Reduce along k dimension to get the sum, yielding a [D1, ... DN] tensor. precision_sum = math_ops.reduce_sum( relevant_precision_per_k, reduction_indices=(-1,), name='precision_sum') # Divide by number of relevant items to get average precision. These are # the "num_relevant_items" and "AveP" terms from the formula above. num_relevant_items = math_ops.to_double(num_relevant(labels, k)) return math_ops.div(precision_sum, num_relevant_items, name=scope) def streaming_sparse_average_precision_at_k(predictions,
tensorflow.python.ops.math_ops.to_double
2,288
import tensorflow as tf with tf.variable_scope('rl_controller') as rl_scope: # It creates a `rl_scope` which will be used for ops. pass rl_entropy, label_weights, log_prob = rl_label_weights(rl_scope) loss_entropy, loss_weights, loss_log_prob = get_loss_weights(rl_scope) def gather_init_weights(): inst_weights = tf.stop_gradient(tf.gather(label_weights, src_labels)) return inst_weights inst_weights = gather_init_weights() bs = FLAGS.train_batch_size hw = FLAGS.src_hw inst_weights, indices = tf.nn.top_k( inst_weights, k=bs, sorted=True, ) src_features = tf.reshape(src_features, [ bs * FLAGS.source_train_batch_multiplier, hw, hw, 1, ]) src_features = tf.gather(src_features, indices, axis=0) src_features = tf.stop_gradient(src_features)
tensorflow.nn.top_k
2,289
import tensorflow as tf trainnum = tf.placeholder(tf.int32) validnum = tf.placeholder(tf.int32) learnrate = tf.placeholder(tf.float32) def getinputs(path): filename_queue=tf.train.string_input_producer([path]) reader=tf.TFRecordReader() _,serialized_example=reader.read(filename_queue) features=tf.parse_single_example(serialized_example, features={ 'label':tf.FixedLenFeature([], tf.int64), 'img_raw' : tf.FixedLenFeature([], tf.string), }) image=tf.decode_raw(features['img_raw'],tf.uint8) label=tf.cast(features['label'],tf.int32) image=tf.reshape(image,[4096,1]) return image,label def get_batch(image,label,batch_size,crop_size): #print(image.shape) #print(label.shape) images,labels=tf.train.shuffle_batch([image,label],
tensorflow.FixedLenFeature
2,290
import tensorflow as tf 'adj_min_batch': tf.placeholder(tf.float32,name='adj_min_batch'), 'sim_min_batch': tf.placeholder(tf.float32,name='sim_min_batch'), 'batch_edge_type_idx': tf.placeholder(tf.int32, shape=(), name='batch_edge_type_idx'), 'batch_row_edge_type': tf.placeholder(tf.int32, shape=(), name='batch_row_edge_type'), 'batch_col_edge_type': tf.placeholder(tf.int32, shape=(), name='batch_col_edge_type'), 'degrees': tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0., shape=()), } placeholders.update({ 'adj_mats_%d,%d,%d' % (i, j, k): tf.sparse_placeholder(tf.float32) for i, j in edge_types for k in range(edge_types[i,j])}) placeholders.update({ 'feat_%d' % i: tf.sparse_placeholder(tf.float32) for i, _ in edge_types}) return placeholders ########################################################### test_size = 0.20 val_size = 0.05 num_drugs = 2926 n_drugdrug_rel_types =11
tensorflow.sparse_placeholder
2,291
import tensorflow as tf return t5_data().SentencePieceVocabulary(sentencepiece_model_file=path, extra_ids=extra_ids) # Makes the function accessible in gin configs, even with all args denylisted. @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def cifar10_no_augmentation_preprocess(dataset, training): del training def cast_image(features, targets): features['image'] = tf.cast(features['image'], tf.float32) / 255.0 return features, targets dataset = dataset.map(cast_image) return dataset def _cifar_augment_image(image): """Image augmentation suitable for CIFAR-10/100.
tensorflow.cast
2,292
import tensorflow as tf pool_width = scale_dimension(model_options.crop_size[1], 1. / model_options.output_stride) image_feature = slim.avg_pool2d( features, [pool_height, pool_width], [pool_height, pool_width], padding='VALID') else: pool_height = tf.shape(features)[1] pool_width = tf.shape(features)[2] image_feature = tf.reduce_mean(features, axis=[1,2])[:, tf.newaxis, tf.newaxis, :] image_feature = slim.conv2d( image_feature, depth, 1, scope=_IMAGE_POOLING_SCOPE) image_feature = tf.image.resize_bilinear( image_feature, [pool_height, pool_width], align_corners=True) if is_training: image_feature.set_shape([None, pool_height, pool_width, depth]) branch_logits.append(image_feature)
tensorflow.reduce_mean
2,293
import tensorflow as tf #else: # self.new_logits = self.logits self.grads = grads self.grads_norm = tf.global_norm(grads) #see https://www.tensorflow.org/api_docs/python/tf/train/Optimizer#processing_gradients_before_applying_them #if clipping_method == "clip_by_global_norm":
tensorflow.global_norm
2,294
import tensorflow as tf h, w, _ = image.shape grid = 8 image = image[:h // grid * grid, :w // grid * grid, :] mask = mask[:h // grid * grid, :w // grid * grid, :] image = np.expand_dims(image, 0) mask = np.expand_dims(mask, 0) input_image = np.concatenate([image, mask], axis=2) sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = True with tf.Session(config=sess_config) as sess: input_image = tf.constant(input_image, dtype=tf.float32) output = model.build_server_graph(FLAGS, input_image) output = (output + 1.) * 127.5 output = tf.reverse(output, [-1]) output = tf.saturate_cast(output, tf.uint8) # load pretrained model vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) assign_ops = [] for var in vars_list: vname = var.name from_name = vname var_value = tf.contrib.framework.load_variable(checkpoint_dir, from_name) assign_ops.append(tf.assign(var, var_value))
tensorflow.constant
2,295
from tensorflow.python.ops import math_ops thresh_tiled) pred_is_neg = math_ops.logical_not(pred_is_pos) # Tile labels by number of thresholds label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1]) label_is_neg = math_ops.logical_not(label_is_pos) true_positives = _create_local('true_positives', shape=[num_thresholds]) false_negatives = _create_local('false_negatives', shape=[num_thresholds]) true_negatives = _create_local('true_negatives', shape=[num_thresholds]) false_positives = _create_local('false_positives', shape=[num_thresholds]) is_true_positive = math_ops.to_float( math_ops.logical_and(label_is_pos, pred_is_pos)) is_false_negative = math_ops.to_float( math_ops.logical_and(label_is_pos, pred_is_neg)) is_false_positive = math_ops.to_float( math_ops.logical_and(label_is_neg, pred_is_pos)) is_true_negative = math_ops.to_float( math_ops.logical_and(label_is_neg, pred_is_neg)) if weights is not None: weights = math_ops.to_float(weights) weights_tiled = array_ops.tile(array_ops.reshape( _broadcast_weights(weights, predictions), [1, -1]), [num_thresholds, 1]) thresh_tiled.get_shape().assert_is_compatible_with(
tensorflow.python.ops.math_ops.logical_and
2,296
import tensorflow as tf # Add entropy coefficient optimization operation if needed if ent_coef_loss is not None: with tf.control_dependencies([train_values_op]): ent_coef_op = entropy_optimizer.minimize(ent_coef_loss, var_list=self.log_ent_coef) self.infos_names += ['ent_coef_loss', 'ent_coef'] self.step_ops += [ent_coef_op, ent_coef_loss, self.ent_coef] # Monitor losses and entropy in tensorboard tf.summary.scalar('policy_loss', policy_loss) tf.summary.scalar('qf1_loss', qf1_loss) tf.summary.scalar('qf2_loss', qf2_loss) tf.summary.scalar('value_loss', value_loss) tf.summary.scalar("Imitation_loss",self.actor_loss_di) tf.summary.scalar('entropy', self.entropy) tf.summary.scalar('importance weight',tf.reduce_mean(self.weight_ph)) if ent_coef_loss is not None: tf.summary.scalar('ent_coef_loss', ent_coef_loss) tf.summary.scalar('ent_coef', self.ent_coef) tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate_ph))
tensorflow.summary.scalar
2,297
import tensorflow as tf num_features = tensor.get_shape()[-1].value weight_init = tf.truncated_normal([num_features, size], stddev=0.01) if bias_init is None: bias_init = tf.zeros([size]) with tf.name_scope(name, 'fully_connected', [tensor]): w = tf.Variable(weight_init, name='w', dtype=tf.float32) b = tf.Variable(bias_init, name='b', dtype=tf.float32) return tf.nn.xw_plus_b(tensor, w, b) def weight_decay(penalty_type, penalty):
tensorflow.Variable
2,298
import tensorflow as tf data = self.dropout_layer(data) data = self.layer_normalization_layer(data) with tf.variable_scope("task_dependent"): logits = self.dense_layer(data, num_tags) crf_params = tf.get_variable("crf", [num_tags, num_tags], dtype=tf.float32) pred_ids = self.crf_decode_layer(logits, crf_params, nwords) pred_strings = self.id2tag(pred_ids, name="predict")
tensorflow.get_variable
2,299