seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf with tf.name_scope("CRF_log_likelihood"): log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(
tensorflow.contrib.crf.crf_log_likelihood
600
import tensorflow as tf self.data_format = data_format if( data_format =='NCHW' ): self.strides = [1, 1, d_h, d_w] else: self.strides = [1, d_h, d_w, 1] def __call__(self,input_var,name=None,**xargs): shapes = tf.shape(input_var) if( self.data_format == 'NCHW' ): shapes = tf.stack([shapes[0],tf.shape(self.b)[0],shapes[2]*self.strides[2],shapes[3]*self.strides[3]]) else: shapes = tf.stack([shapes[0],shapes[1]*self.strides[1],shapes[2]*self.strides[2],tf.shape(self.b)[0]]) return tf.nn.bias_add( tf.nn.conv2d_transpose(input_var,self.w,output_shape=shapes, data_format=self.data_format, strides=self.strides,padding='SAME'), self.b,data_format=self.data_format,name=name) def get_variables(self):
tensorflow.shape
601
import tensorflow as tf smoothL1_sign = tf.stop_gradient(tf.to_float(tf.less(abs_in_box_diff, 1. / sigma_2))) # Smooth L1函数 (和论文有点不一样) in_loss_box = tf.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign + (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign) out_loss_box = bbox_outside_weights * in_loss_box loss_box = tf.reduce_mean(tf.reduce_sum( out_loss_box, axis=dim )) return loss_box
tensorflow.reduce_sum
602
import tensorflow as tf train_op = optimizer.apply_gradients(zip(grads, tf_sparse_demo.trainable_variables)) with tf.control_dependencies([train_op]):
tensorflow.control_dependencies
603
import tensorflow as tf with tf.variable_scope(args.name): model = HredModel(data, args, embed) model.print_parameters() latest_dir = '%s/checkpoint_latest' % args.model_dir best_dir = '%s/checkpoint_best' % args.model_dir if tf.train.get_checkpoint_state(latest_dir) and args.restore == "last": print("Reading model parameters from %s" % latest_dir) model.latest_saver.restore(sess, tf.train.latest_checkpoint(latest_dir)) else: if tf.train.get_checkpoint_state(best_dir) and args.restore == "best":
tensorflow.train.get_checkpoint_state
604
import tensorflow as tf image = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="input_image") #debug annotation = tf.placeholder(tf.int32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 1], name="annotation") # annotation = tf.placeholder(tf.int32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="annotation") pred_annotation, logits = inference(image, keep_probability) tf.summary.image("input_image", image, max_outputs=2) tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2) tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2) #debug loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=tf.squeeze(annotation, squeeze_dims=[3]), name="entropy"))) # loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, # labels=annotation, # name="entropy"))) loss_summary = tf.summary.scalar("entropy", loss) trainable_var = tf.trainable_variables() if FLAGS.debug: for var in trainable_var: utils.add_to_regularization_and_summary(var) train_op = train(loss, trainable_var)
tensorflow.squeeze
605
import tensorflow as tf """ if param_noise_filter_func is None: param_noise_filter_func = default_param_noise_filter with tf.variable_scope(scope, reuse=reuse): observations_ph = make_obs_ph("observation") stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold") update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale") reset_ph = tf.placeholder(tf.bool, (), name="reset") eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False)
tensorflow.placeholder
606
import tensorflow as tf global_step_t = tf.reshape(global_step, [1]) total_loss_t = tf.reshape(total_loss, [1]) total_rpn_loss_t = tf.reshape(total_rpn_loss, [1]) rpn_score_loss_t = tf.reshape(rpn_score_loss, [1]) rpn_box_loss_t = tf.reshape(rpn_box_loss, [1]) total_fast_rcnn_loss_t = tf.reshape(total_fast_rcnn_loss, [1]) fast_rcnn_class_loss_t = tf.reshape(fast_rcnn_class_loss, [1]) fast_rcnn_box_loss_t = tf.reshape(fast_rcnn_box_loss, [1]) mask_loss_t = tf.reshape(mask_loss, [1]) learning_rate_t = tf.reshape(learning_rate, [1]) host_call = (host_call_fn, [global_step_t, total_loss_t, total_rpn_loss_t, rpn_score_loss_t, rpn_box_loss_t, total_fast_rcnn_loss_t, fast_rcnn_class_loss_t, fast_rcnn_box_loss_t, mask_loss_t, learning_rate_t]) else: train_op = None scaffold_fn = None
tensorflow.reshape
607
import tensorflow as tf def create_final_discriminator_network(self, X, params): """Creates final discriminator network. Args: X: tensor, input image to discriminator. params: dict, user passed parameters. Returns: Final logits tensor of discriminator. """ print_obj("\ncreate_final_discriminator_network", "X", X) with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE): # Only need the last fromRGB conv layer. from_rgb_conv_layer = self.from_rgb_conv_layers[-1] # Reverse order of blocks. reversed_blocks = self.conv_layer_blocks[::-1] # Flatten list of lists block layers into list. block_layers = [ item for sublist in reversed_blocks for item in sublist ]
tensorflow.variable_scope
608
import tensorflow as tf n_row,n_col,n_channel = x.shape n_patch = n_row*n_col // (self.size**2) patches = tf.image.extract_patches(tf.expand_dims(x,0),sizes=[1,self.size,self.size,1],strides=[1,self.size,self.size,1],rates=[1, 1, 1, 1],padding='VALID') patches = tf.reshape(patches,[n_patch,self.size,self.size,n_channel]) patches = tf.random.shuffle(patches) # rand_idx = tf.reshape(tf.random.shuffle(tf.range(0,n_patch)),[n_patch])
tensorflow.reshape
609
from tensorflow.python.ops import control_flow_ops old_value = array.value() assign_op = state_ops.assign(array, new_value, validate_shape=False) with ops.control_dependencies([assign_op]): copy_op = array[:size].assign(old_value[:size]) # return value needs to be the same dtype as no_op() for cond with ops.control_dependencies([copy_op]): return control_flow_ops.no_op() new_size = size + batch_size array_size = array_ops.shape_internal(array, optimize=False)[0] maybe_reallocate_op = control_flow_ops.cond( new_size > array_size, reallocate, control_flow_ops.no_op)
tensorflow.python.ops.control_flow_ops.no_op
610
import tensorflow as tf target_one_hot_labels = tf.one_hot( tf.cast(labels['target'], tf.int64), target_num_classes) with tf.variable_scope('rl_controller') as rl_scope: # It creates a `rl_scope` which will be used for ops. pass rl_entropy, label_weights, log_prob = rl_label_weights(rl_scope) loss_entropy, loss_weights, loss_log_prob = get_loss_weights(rl_scope) def gather_init_weights(): inst_weights = tf.stop_gradient(tf.gather(label_weights, src_labels)) return inst_weights inst_weights = gather_init_weights() bs = FLAGS.train_batch_size hw = FLAGS.src_hw inst_weights, indices = tf.nn.top_k( inst_weights, k=bs, sorted=True,
tensorflow.gather
611
import tensorflow as tf return tf.reshape(tf.concat(axis=1, values=h), [-1, nh]) else: return tf.reshape(tf.stack(values=h, axis=1), [-1]) def lstm(xs, ms, s, scope, nh, init_scale=1.0): nbatch, nin = [v.value for v in xs[0].get_shape()] with tf.variable_scope(scope): wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0)) c, h = tf.split(axis=1, num_or_size_splits=2, value=s) for idx, (x, m) in enumerate(zip(xs, ms)): c = c*(1-m) h = h*(1-m) z = tf.matmul(x, wx) + tf.matmul(h, wh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o) u = tf.tanh(u) c = f*c + i*u h = o*tf.tanh(c) xs[idx] = h s = tf.concat(axis=1, values=[c, h]) return xs, s def _ln(x, g, b, e=1e-5, axes=[1]): u, s = tf.nn.moments(x, axes=axes, keep_dims=True) x = (x-u)/tf.sqrt(s+e)
tensorflow.matmul
612
import tensorflow as tf self._gamma = tf.get_variable( self.GAMMA, shape=self._mean_shape, initializer=self._initializers[self.GAMMA]) else: self._gamma = None out = tf.nn.batch_normalization( input_batch, mean, variance, self._beta, self._gamma, self._eps,
tensorflow.nn.batch_normalization
613
import tensorflow as tf """ with tf.name_scope(name, 'softmax_N', [tensor]): exp_tensor = tf.exp(tensor) reduction_indices = [tensor.get_shape().ndims - 1]
tensorflow.exp
614
import tensorflow as tf reg = None with tf.variable_scope(name, reuse=reuse): layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) lstm_a = tf.nn.rnn_cell.LSTMCell(num_units=256) lstm_a = tf.nn.rnn_cell.DropoutWrapper(lstm_a, output_keep_prob=self.keep_prob) state_init_a = lstm_a.zero_state(batch_size=batch_size, dtype=tf.float32) lstm_ain = tf.expand_dims(layer_a2, axis=1) out_a, state_final_a = tf.nn.dynamic_rnn(cell=lstm_a, inputs=lstm_ain, initial_state=state_init_a) cell_out_a = tf.reshape(out_a, [-1, 256]) mu = tf.layers.dense(cell_out_a, self.a_dim, tf.nn.tanh, kernel_regularizer=reg) sigma = tf.layers.dense(cell_out_a, self.a_dim, tf.nn.softplus, kernel_regularizer=reg) # sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5)) sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params, state_init_a, state_final_a def build_cnet(self, state_in, name, reuse=False, batch_size=64): reg = tf.contrib.layers.l2_regularizer(1e-3)
tensorflow.layers.dense
615
import tensorflow as tf if type(gt_boxes) is np.ndarray: gt_boxes = tf.convert_to_tensor(gt_boxes)
tensorflow.convert_to_tensor
616
import tensorflow as tf b = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) v = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) with tf.name_scope('v'): # Applying fully connected layer with non-linear activation to each of the B*T timestamps; # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size tmp1 = tf.tensordot(facts, w1, axes=1) tmp2 = tf.tensordot(query, w2, axes=1) tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]]) tmp = tf.tanh((tmp1 + tmp2) + b) # For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape key_masks = mask # [B, 1, T] # key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1) v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T] alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape #output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1) output = facts * tf.expand_dims(alphas, -1) output = tf.reshape(output, tf.shape(facts)) # output = output / (facts.get_shape().as_list()[-1] ** 0.5)
tensorflow.tensordot
617
import tensorflow as tf return tf.multiply(tf.nn.l2_loss(x), weight_decay) else: return None if weight_decay is not None: reg = _reg else: reg = None kernel = tf.get_variable( 'w', ksize, initializer=init, regularizer=reg, dtype=dtype, trainable=True) return tf.nn.conv2d( x, kernel, strides, padding, data_format=data_format, use_cudnn_on_gpu=True) def _bottleneck_residual(x, ksize_list, strides, padding, is_training, data_format='NHWC', no_activation=False): with tf.variable_scope('sub1'):
tensorflow.nn.conv2d
618
import tensorflow as tf num = (1 - self.alpha) * dxt + tf.tensordot(self.alpha * dxt , tf.transpose( tf.matmul(tf.abs(self.W_rec) * self.rec_Connectivity,self.Dale_rec)), axes=1) * \ tf.where(tf.greater(xt, 0), tf.ones_like(xt), tf.zeros_like(xt)) denom = dxt # sum over hidden units
tensorflow.zeros_like
619
import tensorflow as tf else: mean_loss = tf.reduce_mean(all_shards) losses[loss_name] = mean_loss return losses def summarize_features(features, num_shards=1): with tf.name_scope("input_stats"): for (k, v) in six.iteritems(features): if isinstance(v, tf.Tensor) and v.get_shape().ndims > 1: tf.summary.scalar("%s_batch" % k, tf.shape(v)[0] // num_shards) tf.summary.scalar("%s_length" % k, tf.shape(v)[1]) nonpadding = tf.to_float(tf.not_equal(v, 0)) nonpadding_tokens = tf.reduce_sum(nonpadding) tf.summary.scalar("%s_nonpadding_tokens" % k, nonpadding_tokens) tf.summary.scalar("%s_nonpadding_fraction" % k, tf.reduce_mean(nonpadding)) _already_logged = set() def _eager_log(level, *args): if context.in_eager_mode() and args in _already_logged: return
tensorflow.not_equal
620
import tensorflow as tf name="orig_task_optimal", data=orig_opt_frac, step=global_step) # How often is the relabelled goal optimal? # The relabel_indices are [B, 1], so we need to remove the extra dim. relabel_is_opt = tf.squeeze(relabel_indices) == orig_indices relabel_opt_frac = tf.reduce_mean(tf.cast(relabel_is_opt, tf.float32)) tf.compat.v2.summary.scalar( name="relabel_task_optimal", data=relabel_opt_frac, step=global_step) # What are the average Q values of the original tasks? if batch_size == num_tasks: indices = tf.transpose(tf.stack([orig_indices, orig_indices], axis=0))
tensorflow.compat.v2.summary.scalar
621
import tensorflow as tf FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string('ws_save_path', './models_ws/model.ckpt', 'WS: model\'s save path') tf.app.flags.DEFINE_float('ws_prune_ratio', 0.75, 'WS: target pruning ratio') tf.app.flags.DEFINE_string('ws_prune_ratio_prtl', 'optimal', 'WS: pruning ratio protocol (\'uniform\' | \'heurist\' | \'optimal\')') tf.app.flags.DEFINE_integer('ws_nb_rlouts', 200, 'WS: # of roll-outs for the RL agent') tf.app.flags.DEFINE_integer('ws_nb_rlouts_min', 50, 'WS: minimal # of roll-outs for the RL agent to start training') tf.app.flags.DEFINE_string('ws_reward_type', 'single-obj', 'WS: reward type (\'single-obj\' OR \'multi-obj\')') tf.app.flags.DEFINE_float('ws_lrn_rate_rg', 3e-2, 'WS: learning rate for layerwise regression')
tensorflow.app.flags.DEFINE_integer
622
import tensorflow as tf FLAGS = tf.app.flags.FLAGS #--model_scope=blouse --checkpoint_path=./logs/all --data_format=channels_last --batch_size=1 def input_pipeline(is_training=True, model_scope=FLAGS.model_scope, num_epochs=FLAGS.epochs_per_eval): if 'all' in model_scope: lnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.global_norm_key, dtype=tf.int64), tf.constant(config.global_norm_lvalues, dtype=tf.int64)), 0) rnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.global_norm_key, dtype=tf.int64), tf.constant(config.global_norm_rvalues, dtype=tf.int64)), 1) else: lnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.local_norm_key, dtype=tf.int64), tf.constant(config.local_norm_lvalues, dtype=tf.int64)), 0) rnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.local_norm_key, dtype=tf.int64), tf.constant(config.local_norm_rvalues, dtype=tf.int64)), 1) preprocessing_fn = lambda org_image, classid, shape, key_x, key_y, key_v: preprocessing.preprocess_image(org_image, classid, shape, FLAGS.train_image_size, FLAGS.train_image_size, key_x, key_y, key_v, (lnorm_table, rnorm_table), is_training=is_training, data_format=('NCHW' if FLAGS.data_format=='channels_first' else 'NHWC'), category=(model_scope if 'all' not in model_scope else '*'), bbox_border=FLAGS.bbox_border, heatmap_sigma=FLAGS.heatmap_sigma, heatmap_size=FLAGS.heatmap_size) images, shape, classid, targets, key_v, isvalid, norm_value = dataset.slim_get_split(FLAGS.data_dir, preprocessing_fn, (FLAGS.xt_batch_size if 'seresnext50' in FLAGS.backbone else FLAGS.batch_size), FLAGS.num_readers, FLAGS.num_preprocessing_threads, num_epochs=num_epochs, is_training=is_training, file_pattern=FLAGS.dataset_name, category=(model_scope if 'all' not in model_scope else '*'), reader=None) return images, {'targets': targets, 'key_v': key_v, 'shape': shape, 'classid': classid, 'isvalid': isvalid, 'norm_value': norm_value}
tensorflow.constant
623
import tensorflow as tf loss_base_1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_1, logits=output_1)) loss_base_2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_2, logits=output_2)) reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) loss_total = loss_base_1 + loss_base_2 + tf.reduce_sum(reg_losses) with tf.variable_scope("evaluation"): accuracy_1 = tf.reduce_mean(tf.cast(tf.equal( tf.argmax(output_1, axis=-1), tf.argmax(y_1, axis=-1)), tf.float32), name="accuracy_1") accuracy_2 = tf.reduce_mean(tf.cast(tf.equal( tf.argmax(output_2, axis=-1), tf.argmax(y_2, axis=-1)), tf.float32), name="accuracy_2") accuracy = tf.divide(accuracy_1 + accuracy_2, 2.0, name="accuracy") with tf.variable_scope("train"): global_step = tf.get_variable("global_step", shape=(), dtype=tf.int32, trainable=False) train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss_total, global_step=global_step) with tf.variable_scope("summary"): summary_loss_total = tf.summary.scalar("loss_total", loss_total) summary_accuracy_test = tf.summary.scalar("accuracy_test", accuracy) summary_accuracy_train = tf.summary.scalar("accuracy_train", accuracy) # standardization train_X_reshaped = train_X.reshape([train_X.shape[0], -1]) train_X_means = np.mean(train_X_reshaped, axis=0, keepdims=True) train_X_stds = np.std(train_X_reshaped, axis=0, keepdims=True) def standardization(x): x_reshaped = x.reshape([x.shape[0], -1])
tensorflow.get_variable
624
import tensorflow as tf output_weight = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02) ) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer() ) with tf.variable_scope("loss"): if is_training: output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) output_layer = tf.reshape(output_layer, [-1, hidden_size]) logits = tf.matmul(output_layer, output_weight, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) logits = tf.reshape(logits, [-1, FLAGS.max_seq_length, 11]) log_probs = tf.nn.log_softmax(logits, axis=-1) # labels = tf.cast(labels,dtype=tf.float32) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_sum(per_example_loss) return (loss, per_example_loss, logits) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): def model_fn(features, labels, mode, params): tf.logging.info("*** Features ***") for name in sorted(features.keys()):
tensorflow.nn.log_softmax
625
import tensorflow as tf corrects = tf.equal(preds, classes) return (probs, corrects) def _compute_loss(self, logits, aux_logits_list, classes, **knobs): reg_decay = knobs['reg_decay'] aux_loss_mul = knobs['aux_loss_mul'] # Multiplier for auxiliary loss # Compute sparse softmax cross entropy loss from logits & labels log_probs = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=classes) loss = tf.reduce_mean(log_probs) self._mark_for_monitoring('loss', loss) # Add regularization loss reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) reg_loss = reg_decay * tf.add_n(reg_losses) self._mark_for_monitoring('reg_loss', reg_loss)
tensorflow.nn.sparse_softmax_cross_entropy_with_logits
626
from tensorflow.python.framework import tensor_shape @ops.RegisterShape("UnsortedSegmentSum") def _UnsortedSegmentSumShape(op): """Shape function for UnsortedSegmentSum.""" data_shape = op.inputs[0].get_shape() segment_ids_shape = op.inputs[1].get_shape() mid = segment_ids_shape.ndims if mid is None: return [tensor_shape.unknown_shape()] else: num_segments = tensor_util.ConstantValue(op.inputs[2]) return [tensor_shape.TensorShape([num_segments]).concatenate( data_shape[mid:])]
tensorflow.python.framework.tensor_shape.unknown_shape
627
import tensorflow as tf self.writer = tf.summary.FileWriter(summary_dir) tf.summary.scalar('Loss/Policy', loss_pg) tf.summary.scalar('Loss/Value', loss_vf) tf.summary.scalar('Loss/Entropy', loss_entropy) tf.summary.scalar('Loss/Total', loss) tf.summary.scalar('Var/Epsilon', epsilon_decay)
tensorflow.summary.scalar
628
import tensorflow as tf end_logits = tf.layers.dense( end_logits, 1, kernel_initializer=initializer, name="dense_1") end_logits = tf.reshape(end_logits, [seq_len, -1, FLAGS.start_n_top]) end_logits = tf.transpose(end_logits, [1, 2, 0]) end_logits_masked = end_logits * ( 1 - p_mask[:, None]) - 1e30 * p_mask[:, None] end_log_probs = tf.nn.log_softmax(end_logits_masked, -1) end_top_log_probs, end_top_index = tf.nn.top_k( end_log_probs, k=FLAGS.end_n_top) end_top_log_probs = tf.reshape( end_top_log_probs, [-1, FLAGS.start_n_top * FLAGS.end_n_top]) end_top_index = tf.reshape( end_top_index, [-1, FLAGS.start_n_top * FLAGS.end_n_top])
tensorflow.nn.log_softmax
629
import tensorflow as tf target_v_ph = tf.math.reduce_max(target_q_ph, axis = 1) backup_ph = self.rew_t_ph + (1 - self.done_mask_ph) * (gamma * target_v_ph) self.total_error = tf.math.reduce_mean(huber_loss(q_func_ph - backup_ph)) q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_func') target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_q_func') # construct optimization op (with gradient clipping) self.learning_rate = tf.placeholder(tf.float32, (), name="learning_rate") optimizer = self.optimizer_spec.constructor(learning_rate=self.learning_rate, **self.optimizer_spec.kwargs) self.train_fn = minimize_and_clip(optimizer, self.total_error, var_list=q_func_vars, clip_val=grad_norm_clipping) # update_target_fn will be called periodically to copy Q network to target Q network update_target_fn = [] for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name), sorted(target_q_func_vars, key=lambda v: v.name)): update_target_fn.append(var_target.assign(var)) self.update_target_fn = tf.group(*update_target_fn)
tensorflow.placeholder
630
import tensorflow as tf "NaN in moving variance.")) with tf.name_scope(name, "IncrementTime", [step]): with ops.colocate_with(step): new_step = tf.assign_add(step, 1.) used_var += 0. * new_mean * new_var * new_step used_var += epsilon
tensorflow.assign_add
631
import tensorflow as tf "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.", ) tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.", ) tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.", ) tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.", )
tensorflow.flags.DEFINE_string
632
import tensorflow as tf output_h4 = decode(trans_z, tgtctx_h3, tgtctx_h2, tgtctx_h1, tgtctx_h0) scope.reuse_variables() truthoutput_h4 = decode(tgtimg_z, tgtctx_h3, tgtctx_h2, tgtctx_h1, tgtctx_h0) self.simloss = tf.reduce_mean((trans_z - tgtimg_z) ** 2) * 1e3 print(tgtimg_z.get_shape()) self.out = output_h4 self.out2 = truthoutput_h4 print(self.out.get_shape()) self.recon1 = tf.nn.l2_loss(tgtimg - self.out) self.recon2 = tf.nn.l2_loss(tgtimg - self.out2) if ablation_type == "None": self.loss = self.recon1 + self.recon2 + self.simloss elif ablation_type == "L2": self.loss = self.recon1 + self.recon2 elif ablation_type == "L2L3": self.loss = self.recon1 elif ablation_type == "L1":
tensorflow.nn.l2_loss
633
import tensorflow as tf token = tf.constant(False) with tf.control_dependencies(enqueue_after_list): for i, q in enumerate(sync_queues): if i == self.task_index: queue_ops.append(tf.no_op()) else: queue_ops.append(q.enqueue(token)) # Drain tokens off queue for this worker, one for each other worker. queue_ops.append( sync_queues[self.task_index].dequeue_many(len(sync_queues) - 1)) return tf.group(*queue_ops) def store_benchmarks(names_to_values): if FLAGS.result_storage: benchmark_storage.store_benchmark(names_to_values, FLAGS.result_storage) def main(_): if FLAGS.winograd_nonfused: os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1' else:
tensorflow.group
634
import tensorflow as tf return sampled with argscope([Conv2D, FullyConnected], nl=tf.nn.relu): with tf.variable_scope('STN1'): sampled1 = get_stn(image) with tf.variable_scope('STN2'): sampled2 = get_stn(image) # For visualization in tensorboard with tf.name_scope('visualization'): padded1 = tf.pad(sampled1, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]]) padded2 = tf.pad(sampled2, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]]) img_orig = tf.concat([image[:, :, :, 0], image[:, :, :, 1]], 1) # b x 2h x w transform1 = tf.concat([padded1[:, :, :, 0], padded1[:, :, :, 1]], 1) transform2 = tf.concat([padded2[:, :, :, 0], padded2[:, :, :, 1]], 1) stacked = tf.concat([img_orig, transform1, transform2], 2, 'viz') tf.summary.image('visualize', tf.expand_dims(stacked, -1), max_outputs=30) sampled = tf.concat([sampled1, sampled2], 3, 'sampled_concat')
tensorflow.pad
635
import tensorflow as tf warmup_steps = self._params.warmup_steps init_lr = self._params.init_learning_rate lr_levels = self._params.learning_rate_levels lr_steps = self._params.learning_rate_steps linear_warmup = ( warmup_lr + tf.cast(global_step, dtype=tf.float32) / warmup_steps * (init_lr - warmup_lr)) learning_rate = tf.where(global_step < warmup_steps, linear_warmup, init_lr) for next_learning_rate, start_step in zip(lr_levels, lr_steps): learning_rate = tf.where(global_step >= start_step, next_learning_rate, learning_rate) return learning_rate def get_config(self): return {'_params': self._params.as_dict()} class CosineLearningRateWithLinearWarmup( tf.keras.optimizers.schedules.LearningRateSchedule):
tensorflow.where
636
import tensorflow as tf self.assertFalse(has_nan_or_inf.eval()) self.assertEqual(1.0, grad_scale.eval()) # The final gradient must be finite. self.assertFalse(tf.is_nan(final_var_grads.a[1]).eval()) self.assertTrue(tf.is_finite(final_var_grads.a[1]).eval()) def testScaleGradientsInf(self): FLAGS.enable_check_numerics = False
tensorflow.is_finite
637
import tensorflow as tf with tf.name_scope(name, "softmax_loss",[output]): label_dis = labels / tf.reduce_sum(labels, 1, keep_dims=True) loss = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=label_dis) * tf.reduce_sum(labels, 1) return tf.reduce_sum(loss) / tf.reduce_sum(labels)
tensorflow.nn.softmax_cross_entropy_with_logits
638
import tensorflow as tf # H(x) = Sum[p(x)*log(p(x))] self.entropy = - 0.01 * tf.reduce_sum(self.policy * tf.log(tf.clip_by_value(self.policy,1e-10,1.0))) self.policy_loss = - 0.2 * tf.reduce_sum( tf.log(tf.clip_by_value(self.policy[:,0],1e-15,1.0)) * self.advantages + tf.log(tf.clip_by_value(self.policy[:,1],1e-15,1.0)) * self.advantages) #For Normal RL Part
tensorflow.clip_by_value
639
from tensorflow.python.ops import math_ops Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ with variable_scope.variable_scope( name, 'true_positives', [predictions, labels]): predictions.get_shape().assert_is_compatible_with(labels.get_shape()) is_true_positive = math_ops.logical_and(math_ops.equal(labels, 1), math_ops.equal(predictions, 1)) return _count_condition(is_true_positive, weights, metrics_collections, updates_collections) def _streaming_false_positives(predictions, labels, weights=None, metrics_collections=None, updates_collections=None, name=None): """Sum the weights of false positives. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
tensorflow.python.ops.math_ops.equal
640
import tensorflow as tf filter_depths=[256, 256, 1024], kernel_size=3) x = tf.keras.layers.AveragePooling2D(pool_size=7, strides=1, padding="valid", name="pool")(x) x = tf.reshape(x, shape=(-1, 1024)) self.logits = self.__fully_connected(name="fc_nsfw", inputs=x, num_outputs=2) self.predictions = tf.nn.softmax(self.logits, name="predictions") """Get weights for layer with given name """ def __get_weights(self, layer_name, field_name): if not layer_name in self.weights: raise ValueError("No weights for layer named '{}' found" .format(layer_name)) w = self.weights[layer_name]
tensorflow.nn.softmax
641
import tensorflow as tf return tf.estimator.EstimatorSpec(
tensorflow.estimator.EstimatorSpec
642
import tensorflow as tf with tf.variable_scope("end_logits"): if is_training: # during training, compute the end logits based on the # ground truth of the start position start_positions = tf.reshape(features["start_positions"], [-1]) start_index = tf.one_hot(start_positions, depth=seq_len, axis=-1, dtype=tf.float32) start_features = tf.einsum("lbh,bl->bh", output, start_index) start_features = tf.tile(start_features[None], [seq_len, 1, 1]) end_logits = tf.layers.dense( tf.concat([output, start_features], axis=-1), xlnet_config.d_model,
tensorflow.one_hot
643
import tensorflow as tf # Concatenationation of above layers, followed by FC layer concat = tf.concat([flat1b, loc_layer2],1) # goal_layer2
tensorflow.concat
644
import tensorflow as tf def get_params(self): """See base class.""" return {}, {} def encode(self, x, encode_params): """See base class.""" del encode_params # Unused. signs = tf.sign(x) abs_vals = tf.abs(x) ints = tf.floor(abs_vals) floats = abs_vals - ints return { self.ENCODED_SIGNS_KEY: signs, self.ENCODED_INTS_KEY: ints,
tensorflow.sign
645
import tensorflow as tf if (len(test_data.encoded_x) > 1 or test_data.decoded_x is not list(test_data.encoded_x.values())[0]): self.assertIn(encoding_stage.DECODE_SCOPE_SUFFIX, test_data.decoded_x.name) if is_adaptive_stage(stage): # The property should have keys matching those of state_update_tensors. self.assertSameElements(stage.state_update_aggregation_modes.keys(), test_data.state_update_tensors.keys()) for mode in six.itervalues(stage.state_update_aggregation_modes): self.assertIn(mode, encoding_stage.StateAggregationMode) for tensor in six.itervalues(test_data.initial_state): self.assertTrue(tf.is_tensor(tensor)) for tensor in six.itervalues(test_data.state_update_tensors): self.assertTrue(tf.is_tensor(tensor)) for tensor in six.itervalues(test_data.updated_state): self.assertTrue(tf.is_tensor(tensor)) # The state related Tensors should have appropriate substrings in their # names. for tensor in six.itervalues(test_data.initial_state): self.assertIn(encoding_stage.INITIAL_STATE_SCOPE_SUFFIX, tensor.name) for tensor in six.itervalues(test_data.updated_state): self.assertIn(encoding_stage.UPDATE_STATE_SCOPE_SUFFIX, tensor.name) for tensor in six.itervalues(test_data.state_update_tensors):
tensorflow.is_tensor
646
import tensorflow as tf **self.cnf.get('opt_kwargs', {'decay': 0.9})) # Get images and labels for ImageNet and split the batch across GPUs. assert self.cnf['batch_size_train'] % self.cnf.get('num_gpus', 1) == 0, ( 'Batch size must be divisible by number of GPUs') self.inputs = tf.placeholder( tf.float32, shape=(None, self.model.image_size[0], self.model.image_size[0], 3), name="input") self.labels = tf.placeholder(tf.int32, shape=(None,)) self._tower_loss_semi_supervised( self.inputs, self.labels, num_classes=num_classes, is_fm_loss=True) global_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) if update_ops is None: update_ops = global_update_ops else:
tensorflow.placeholder
647
from tensorflow.python.framework import ops dim0 = output_dim0[0] else: dim0 = None return [tensor_shape.TensorShape([dim0]).concatenate(input_shape[1:])] @ops.RegisterShape("UnsortedSegmentSum") def _UnsortedSegmentSumShape(op): """Shape function for UnsortedSegmentSum.""" data_shape = op.inputs[0].get_shape() segment_ids_shape = op.inputs[1].get_shape() mid = segment_ids_shape.ndims
tensorflow.python.framework.ops.RegisterShape
648
import tensorflow as tf paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1) v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T] alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape #output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1) output = facts * tf.expand_dims(alphas, -1) output = tf.reshape(output, tf.shape(facts)) # output = output / (facts.get_shape().as_list()[-1] ** 0.5) if not return_alphas: return output else:
tensorflow.expand_dims
649
import tensorflow as tf # L1 of activation outputs activation_out = self.all_layers[-2] L1_a = 0.001 * tf.reduce_mean(activation_out) # <haodong>: theano: T.mean( self.a[i] ) # some neuron are broken, white and black # L1_a = 0.001 * tf.reduce_mean( tf.reduce_sum(activation_out, 0) ) # <haodong>: some neuron are broken, white and black # L1_a = 0.001 * 100 * tf.reduce_mean( tf.reduce_sum(activation_out, 1) ) # <haodong>: some neuron are broken, white and black # KL Divergence beta = 4 rho = 0.15 p_hat = tf.reduce_mean(activation_out, 0) # theano: p_hat = T.mean( self.a[i], axis=0 ) try: # TF1.0 KLD = beta * tf.reduce_sum(rho * tf.log(tf.divide(rho, p_hat)) + (1 - rho) * tf.log((1 - rho) / (tf.subtract(float(1), p_hat)))) except Exception: # TF0.12 KLD = beta * tf.reduce_sum(rho * tf.log(tf.div(rho, p_hat)) + (1 - rho) * tf.log((1 - rho) / (tf.sub(float(1), p_hat)))) # KLD = beta * tf.reduce_sum( rho * tf.log(rho/ p_hat) + (1- rho) * tf.log((1- rho)/(1- p_hat)) ) # theano: L1_a = l1_a[i] * T.sum( rho[i] * T.log(rho[i]/ p_hat) + (1- rho[i]) * T.log((1- rho[i])/(1- p_hat)) ) # Total cost
tensorflow.reduce_mean
650
import tensorflow as tf tower_grads.append(grads) # We must calculate the mean of each gradient. Note that this is the # synchronization point across all towers. grads = average_gradients(tower_grads) # Add a summary to track the learning rate. summaries.append(tf.summary.scalar('learning_rate', lr)) # Add histograms for gradients. for grad, var in grads: if grad is not None: summaries.append(tf.summary.histogram(var.op.name + '/gradients', grad)) # Apply the gradients to adjust the shared variables. apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) # Add histograms for trainable variables. for var in tf.trainable_variables(): summaries.append(tf.summary.histogram(var.op.name, var)) # Track the moving averages of all trainable variables. variable_averages = tf.train.ExponentialMovingAverage( cifar10.MOVING_AVERAGE_DECAY, global_step)
tensorflow.summary.histogram
651
import tensorflow as tf def mean_pooling_for_unselected_head( unhead_org_idx, sl_unhead, rep_unhead_mask, dep_org_idx, sl_dep, rep_dep_mask, rep_dep_tensor, direction ): with tf.name_scope('pooling_for_un_head'): undep_idxs = tf.tile(tf.expand_dims(dep_org_idx, 1), [1, sl_unhead, 1]) # [bs, sluh, sld] unhead_idxs = tf.tile(tf.expand_dims(unhead_org_idx, 2), [1, 1, sl_dep]) # [bs, sluh, sld] if direction is None: direct_mask_un = tf.not_equal(unhead_idxs, undep_idxs) # [bs, sluh, sld] else: if direction == 'forward': direct_mask_un = tf.greater(unhead_idxs, undep_idxs) # [bs, sluh, sld] else: direct_mask_un = tf.less(unhead_idxs, undep_idxs) # [bs, sluh, sld] # [bs, sluh, sld] rep_mask_tile_un = tf.logical_and(tf.expand_dims(rep_dep_mask, 1), tf.expand_dims(rep_unhead_mask, 2)) pooling_mask = tf.logical_and(direct_mask_un, rep_mask_tile_un) # [bs, sluh, sld] # data for pooling pooling_data = tf.tile(tf.expand_dims(rep_dep_tensor, 1), [1, sl_unhead, 1, 1]) # bs,sluh,sld,hn # execute mean pooling based on pooling_mask[bs, sluh, sld] and pooling_data[bs,sluh,sld,hn] pooling_data = mask_for_high_rank(pooling_data, pooling_mask) # [bs,sluh,sld,hn] pooling_data_sum = tf.reduce_sum(pooling_data, -2) # [bs,sluh,hn] pooling_den = tf.reduce_sum(tf.cast(pooling_mask, tf.int32), -1, keep_dims=True) # [bs,sluh]
tensorflow.greater
652
import tensorflow as tf # clip gradients clipped_grads_and_vars = self._clip_gradients(self.grads_and_vars, self._grad_clipping_tuple) # compute norms in case they need to be logged self.gradient_norms = [tf.norm(g) + NUMTOL for (g, v) in clipped_grads_and_vars] self.weight_norms = [tf.norm(v) + NUMTOL for (g, v) in clipped_grads_and_vars] # check that gradients are finite grads = [tf.check_numerics(g, "grads is not finite") for (g, v) in clipped_grads_and_vars] variables = [tf.check_numerics(v, "grads is not finite") for (g, v) in clipped_grads_and_vars] self.gradient_weight_global_norms = [tf.global_norm(grads), tf.global_norm(variables)] # 2nd part of minimize: apply_gradient optimizer_step = self._optimizer.apply_gradients(clipped_grads_and_vars, global_step=self.global_step) update_ops = tf.group(*self.update_ops) self.training_op = tf.group(update_ops, optimizer_step) def set_check_ops(self): self._check_ops = 1 # TODO argo2 This is not working anymore with the new session #with self.sess.graph.as_default(): self._numerics_ops = tf.add_check_numerics_ops() def release(self): super().release() self.sess.close()
tensorflow.group
653
import tensorflow as tf self.assertAllClose(res1, res2) self.assertAllClose(res1, res3) def testEmbeddingTiedRNNSeq2Seq(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)] dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)] cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) dec, mem = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
tensorflow.constant_initializer
654
import tensorflow as tf with tf.name_scope(name):
tensorflow.name_scope
655
import tensorflow as tf def forward(self): config = self.config N, PL, QL, CL, d, dc, nh = config.batch_size if not self.demo else config.batch_size, self.c_maxlen, self.q_maxlen, config.char_limit, config.hidden, config.char_dim, config.num_heads with tf.variable_scope("Input_Embedding_Layer"): ch_emb = tf.reshape(tf.nn.embedding_lookup( self.char_mat, self.ch), [N * PL, CL, dc]) qh_emb = tf.reshape(tf.nn.embedding_lookup( self.char_mat, self.qh), [N * QL, CL, dc]) ch_emb = tf.nn.dropout(ch_emb, 1.0 - 0.5 * self.dropout) qh_emb = tf.nn.dropout(qh_emb, 1.0 - 0.5 * self.dropout)
tensorflow.nn.embedding_lookup
656
import tensorflow as tf weights = tf.constant(weights, dtype=tf.float32, name='class_weights') def GetCell(): """Creates an LSTM cell with dropout.""" c = tf.nn.rnn_cell.LSTMCell(hidden_size, use_peepholes=model_params['peepholes'], num_proj=proj_size) if dropout_keep_prob is not None: c = tf.nn.rnn_cell.DropoutWrapper(c, input_keep_prob=dropout_keep_prob) return c # Create the bi-directional LSTM with tf.variable_scope('wordrnn'): with tf.variable_scope('fw'): cell_fw = GetCell() with tf.variable_scope('bw'): cell_bw = GetCell() rnnout, _, _ = tf.nn.bidirectional_rnn(cell_fw, cell_bw, self._inputs, dtype=tf.float32, sequence_length=self.seq_lens) if proj_size: out_size = 2 * proj_size else: out_size = 2 * hidden_size super(TweetSeqModel, self)._DoPredictions(out_size, rnnout, class_weights=weights)
tensorflow.variable_scope
657
import tensorflow as tf b = len(blocks) # Count usage of inputs block_uses = [] for bi in range(b): idx1 = cell_arch[bi][0] idx2 = cell_arch[bi][2] block_use = tf.one_hot(idx1, ni, dtype=tf.int32) + tf.one_hot(idx2, ni, dtype=tf.int32) block_uses.append(block_use) block_uses = tf.add_n(block_uses) unused_indices = tf.reshape(tf.cast(tf.where(tf.equal(block_uses, 0)), tf.int32), [-1]) num_out_blocks = tf.size(unused_indices) # Select only unused blocks with tf.variable_scope('select'): stacked_blocks = tf.stack(cell_inputs + blocks) out_blocks = tf.gather(stacked_blocks, unused_indices, axis=0) out_blocks = tf.transpose(out_blocks, (1, 2, 3, 0, 4))
tensorflow.add_n
658
import tensorflow as tf tf.scalar_summary('loss', self.loss) with tf.name_scope('accuracy'): correct_prediction_action = tf.equal( tf.argmax(one_hot_labels_action, 1), tf.argmax(self.predictions_action, 1) ) self.accuracy_action = tf.reduce_mean(tf.cast(correct_prediction_action, 'float')) tf.scalar_summary('accuracy_action', self.accuracy_action) correct_prediction_arguments = tf.equal(tf.argmax(one_hot_labels_arguments, 2),
tensorflow.argmax
659
import tensorflow as tf with tf.name_scope('get_batch'): if cfgs.IMAGE_PYRAMID: shortside_len_list = tf.constant(cfgs.IMG_SHORT_SIDE_LEN) shortside_len = tf.random_shuffle(shortside_len_list)[0]
tensorflow.constant
660
import tensorflow as tf sims_logits = tf.matmul(z_projs, z_aug_projs, transpose_b=True) logits_max = tf.reduce_max(sims_logits,1) sims_logits = sims_logits - tf.reshape(logits_max, [-1, 1]) sims_probs = tf.nn.softmax(sims_logits)
tensorflow.reshape
661
import tensorflow as tf if include_flux_and_time: dflux = tf.expand_dims(window_feature.dflux, 2) dtime = tf.expand_dims(window_feature.dtime, 2) features = tf.concat([features, dflux, dtime], axis=2, name="initial_layer_concat")
tensorflow.concat
662
import tensorflow as tf sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor
tensorflow.reshape
663
import tensorflow as tf attn_states, cell, output_size=4) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual(2, len(res[0])) self.assertEqual((2, 2), res[0][0].c.shape) self.assertEqual((2, 2), res[0][0].h.shape) self.assertEqual((2, 2), res[0][1].c.shape) self.assertEqual((2, 2), res[0][1].h.shape) def testEmbeddingAttentionDecoder(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): inp = [tf.constant(0.5, shape=[2, 2])] * 2 cell = tf.nn.rnn_cell.GRUCell(2) enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32) attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs]) dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)] dec, mem = tf.nn.seq2seq.embedding_attention_decoder( dec_inp, enc_state, attn_states, cell, num_symbols=4, embedding_size=2, output_size=3) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 3), res[0].shape)
tensorflow.constant_initializer
664
import tensorflow as tf return safe_get(name, list(shape), initializer=tf.constant_initializer(weights), dtype=tf.float32) def init_bias(shape, name=None): return safe_get(name, initializer=tf.zeros(shape, dtype=tf.float32)) def init_fc_weights_xavier(shape, name=None): fc_initializer = tf.contrib.layers.xavier_initializer(dtype=tf.float32) return safe_get(name, list(shape), initializer=fc_initializer, dtype=tf.float32) def init_conv_weights_xavier(shape, name=None): conv_initializer = tf.contrib.layers.xavier_initializer_conv2d(dtype=tf.float32) return safe_get(name, list(shape), initializer=conv_initializer, dtype=tf.float32) def init_fc_weights_snn(shape, name=None): weights = np.random.normal(scale=np.sqrt(1.0/shape[0]), size=shape).astype('f') return safe_get(name, list(shape), initializer=tf.constant_initializer(weights), dtype=tf.float32) def init_conv_weights_snn(shape, name=None): weights = np.random.normal(scale=np.sqrt(1.0/(shape[0]*shape[1]*shape[2])), size=shape).astype('f') return safe_get(name, list(shape), initializer=tf.constant_initializer(weights), dtype=tf.float32)
tensorflow.contrib.layers.xavier_initializer_conv2d
665
import tensorflow as tf # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator."""
tensorflow.reduce_sum
666
import tensorflow as tf def fwd_gradients_0(self, U, x): g = tf.gradients(U, x, grad_ys=self.dummy_x0_tf)[0] return tf.gradients(g, self.dummy_x0_tf)[0] def fwd_gradients_1(self, U, x): g = tf.gradients(U, x, grad_ys=self.dummy_x1_tf)[0] return tf.gradients(g, self.dummy_x1_tf)[0] def net_U0(self, x): lambda_1 = self.lambda_1 lambda_2 = tf.exp(self.lambda_2)
tensorflow.gradients
667
from tensorflow.python.feature_column import feature_column_lib as core_feature_column model_dir=model_dir, config=config, feature_columns=[core_feature_column.numeric_column("x")], use_core_libs=True)
tensorflow.python.feature_column.feature_column_lib.numeric_column
668
import tensorflow as tf return with tf.variable_scope("losses_avg"): with tf.variable_scope("problem_0"): for var_name in ["total", "extra", "training"]: tf.get_variable( "%s_loss" % var_name, initializer=100.0, trainable=False) with tf.variable_scope("train_stats"): tf.get_variable("problem_0_steps", initializer=0, trainable=False) # These metrics are implemented with py_funcs and therefore do no work with TPU TPU_METRIC_BLACKLIST = set([
tensorflow.variable_scope
669
from tensorflow.python.framework import tensor_shape data_shape = op.inputs[0].get_shape() indices_shape = op.inputs[1].get_shape() indices_shape.assert_has_rank(1) segment_ids_shape = op.inputs[2].get_shape() segment_ids_shape.assert_has_rank(1) indices_shape.assert_is_compatible_with(segment_ids_shape) return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])] @ops.RegisterShape("SparseSegmentMeanGrad") def _SparseSegmentMeanGradShape(op): """Shape function for the SparseSegmentMeanGrad op."""
tensorflow.python.framework.tensor_shape.TensorShape
670
from tensorflow.python.ops import math_ops return self._dnn_logits(features) else: return self._linear_logits(features) def _get_weight_tensor(self, features): if not self._weight_column_name: return None else: return array_ops.reshape( math_ops.to_float(features[self._weight_column_name]), shape=(-1,)) def _loss(self, logits, target, weight_tensor): if self._n_classes < 2: loss_vec = math_ops.square(logits - math_ops.to_float(target)) elif self._n_classes == 2: loss_vec = nn.sigmoid_cross_entropy_with_logits(logits, math_ops.to_float(target))
tensorflow.python.ops.math_ops.to_float
671
from tensorflow.python.ops import state_ops ValueError: If `weights` is not `None` and has an incomptable shape. """ default_name = _at_k_name('false_positive', k, class_id=class_id) with ops.name_scope(name, default_name, (predictions_idx, labels)) as scope: fp = _sparse_false_positive_at_k( predictions_idx=predictions_idx, labels=labels, class_id=class_id, weights=weights) batch_total_fp = math_ops.to_double(math_ops.reduce_sum(fp)) var = contrib_variables.local_variable( array_ops.zeros([], dtype=dtypes.float64), name=scope) return var, state_ops.assign_add(var, batch_total_fp, name='update') def _sparse_false_negative_at_k(predictions_idx, labels, class_id=None, weights=None): """Calculates false negatives for recall@k. If `class_id` is specified, calculate binary true positives for `class_id` only.
tensorflow.python.ops.state_ops.assign_add
672
import tensorflow as tf elif validate_args: assertions += [tf.compat.v1.assert_rank(perm, 1, message=msg)] perm_ = tf.get_static_value(perm) msg = '`perm` must be a valid permutation vector.' if perm_ is not None: if not np.all(np.arange(np.size(perm_)) == np.sort(perm_)): raise ValueError(msg[:-1] + ', saw: {}.'.format(perm_)) elif validate_args: assertions += [ tf.compat.v1.assert_equal( tf.sort(perm), tf.range(tf.size(input=perm)), message=msg) ] return assertions
tensorflow.sort
673
import tensorflow as tf def get_next_sentence_output(bert_config, input_tensor, labels): """Get loss and log probs for the next sentence prediction.""" # Simple binary classification. Note that 0 is "next sentence" and 1 is # "random sentence". This weight matrix is not used after pre-training. with tf.variable_scope("cls/seq_relationship"): output_weights = tf.get_variable( "output_weights", shape=[2, bert_config.hidden_size], initializer=modeling.create_initializer(bert_config.initializer_range)) output_bias = tf.get_variable( "output_bias", shape=[2], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) labels = tf.reshape(labels, [-1]) one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs) def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1]
tensorflow.nn.bias_add
674
import tensorflow as tf else: # need broadcasting target_shape = [] for axis in range(get_ndim(x)): if axis in reduction_axes: target_shape.append(1) else: target_shape.append(tf.shape(x)[axis]) target_shape = stack(target_shape) broadcast_mean = tf.reshape(mean, target_shape) broadcast_var = tf.reshape(var, target_shape) broadcast_gamma = tf.reshape(gamma, target_shape) broadcast_beta = tf.reshape(beta, target_shape) normed = tf.nn.batch_normalization(x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon) return normed, mean, var def ones(shape, dtype=None, name=None): """Instantiates an all-ones tensor variable and returns it. Parameters ----------
tensorflow.reshape
675
import tensorflow as tf # q scores for actions which we know were selected in the given state. q_t_selected = tf.reduce_sum(q_t * tf.one_hot(act_t_ph, num_actions), 1)
tensorflow.one_hot
676
import tensorflow as tf tf.app.flags.DEFINE_integer('num_gpus', 0, 'Number of gpus used for training. (0 or 1)') tf.app.flags.DEFINE_integer('num_residual_units', 5, 'num of residual units')
tensorflow.app.flags.DEFINE_integer
677
from tensorflow.python.ops import math_ops if weights is not None: weights = math_ops.to_float(weights)
tensorflow.python.ops.math_ops.to_float
678
import tensorflow as tf with tf.variable_scope("src_projection"): source_top_span_emb = tf.nn.dropout(util.projection(top_span_emb, util.shape(top_span_emb, -1)), self.dropout) # [k, emb] target_top_span_emb = tf.nn.dropout(top_span_emb, self.dropout) # [k, emb] return tf.matmul(source_top_span_emb, target_top_span_emb, transpose_b=True) # [k, k] def flatten_emb_by_sentence(self, emb, text_len_mask):
tensorflow.matmul
679
from tensorflow.python.framework import tensor_shape @ops.RegisterShape("SegmentProd") @ops.RegisterShape("SegmentSum") def _SegmentReductionShape(op): """Common shape function for segment reduction ops.""" data_shape = op.inputs[0].get_shape() segment_ids_shape = op.inputs[1].get_shape() segment_ids_shape.assert_has_rank(1) return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])] @ops.RegisterShape("SparseSegmentMean") @ops.RegisterShape("SparseSegmentSum") def _SparseSegmentReductionShape(op): """Common shape function for sparse segment reduction ops."""
tensorflow.python.framework.tensor_shape.TensorShape
680
import tensorflow as tf from tensorflow import keras import tensorflow as tf def joint_mse_loss(y_pred, y_true, true_weight): """ 损失函数想要表达的意思: 输出的特征图数量为关键点的数量,意味着输出的是每一个像素属于各个关键点的置信度 """ batch_size = y_pred.shape[0] num_of_joints = y_pred.shape[-1] # 有多少个关键点 y_pred = tf.reshape(y_pred, shape=(batch_size, -1, num_of_joints)) # 合并宽和高 heatmap_pred_list = tf.split(value=y_pred, num_or_size_splits=num_of_joints, axis=-1) # 拆分每一个关键点的特征图 [batch_size, -1, 1] y_true = tf.reshape(y_true, shape=(batch_size, -1, num_of_joints)) heatmap_true_list = tf.split(value=y_true, # y_true执行与y_pred相同的操作 num_or_size_splits=num_of_joints, axis=-1) losses = [] # 计算每一个关键点的损失值,并累加求平均 for i in range(num_of_joints): heatmap_pred = tf.squeeze(heatmap_pred_list[i]) heatmap_true = tf.squeeze(heatmap_true_list[i])
tensorflow.split
681
import tensorflow as tf self.D_A_loss_fake = binary_cross_entropy_loss(tf.zeros_like(self.D_A_fake),self.D_A_fake) self.D_A_loss = (self.D_A_loss_real + self.D_A_loss_fake) / 2.0 self.discriminator_loss = self.D_B_loss + self.D_A_loss self.loss_GABA_sum = tf.summary.scalar("g_loss_a2b", self.loss_GABA) self.loss_GBAB_sum = tf.summary.scalar("g_loss_b2a", self.loss_GBAB) self.g_total_loss_sum = tf.summary.scalar("g_loss", self.generator_loss) self.g_sum = tf.summary.merge([self.loss_GABA_sum,self.loss_GBAB_sum,self.g_total_loss_sum]) self.loss_db_sum = tf.summary.scalar("db_loss", self.D_B_loss) self.loss_da_sum = tf.summary.scalar("da_loss", self.D_A_loss) self.loss_d_sum = tf.summary.scalar("d_loss",self.discriminator_loss)
tensorflow.summary.scalar
682
from tensorflow.python.ops import math_ops array_ops.pack([1, num_predictions])) # Tile the predictions after thresholding them across different thresholds. pred_is_pos = math_ops.greater( array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]), thresh_tiled) pred_is_neg = math_ops.logical_not(pred_is_pos) # Tile labels by number of thresholds label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1]) label_is_neg = math_ops.logical_not(label_is_pos) true_positives = _create_local('true_positives', shape=[num_thresholds]) false_negatives = _create_local('false_negatives', shape=[num_thresholds]) true_negatives = _create_local('true_negatives', shape=[num_thresholds]) false_positives = _create_local('false_positives', shape=[num_thresholds]) is_true_positive = math_ops.to_float( math_ops.logical_and(label_is_pos, pred_is_pos)) is_false_negative = math_ops.to_float(
tensorflow.python.ops.math_ops.logical_not
683
import tensorflow as tf inputs = { 'foo': tf.convert_to_tensor([0, 1, 2, 3]), 'bar': tf.convert_to_tensor([0, 2, 0, 2]), } boundaries_foo = tf.expand_dims(tf.convert_to_tensor([.5, 1.5]), axis=0) boundaries_bar = tf.expand_dims(tf.convert_to_tensor([.1, .2]), axis=0) outputs = {}
tensorflow.convert_to_tensor
684
import tensorflow as tf def global_avg_pool(input_data, output_length=1, padding='VALID', scope='gloval_avg_pool'): input_dims = input_data.get_shape().as_list() assert (len(input_dims) == 4) # batch_size, height, width, num_channels_in num_channels_in = input_dims[-1] height = input_dims[1] width = input_dims[2] with tf.variable_scope(scope): if output_length == 1: pool = tf.nn.avg_pool(input_data, [1, height, width, 1], strides=[1, 1, 1, 1], padding=padding) pool = tf.reduce_mean(pool, axis=[1, 2]) pool = tf.squeeze(pool, axis=[1, 2]) return pool else: if num_channels_in != output_length: conv_weight = tf.Variable(tf.truncated_normal([1, 1, num_channels_in, output_length], stddev=0.1, dtype=tf.float32)) conv = tf.nn.conv2d(input_data, conv_weight, strides=[1, 1, 1, 1], padding='SAME') pool = tf.nn.avg_pool(conv, ksize=[1, height, width, 1], strides=[1, 1, 1, 1], padding=padding) else: pool = tf.nn.avg_pool(input_data, ksize=[1, height, width, 1], strides=[1, 1, 1, 1], padding=padding)
tensorflow.nn.avg_pool
685
import tensorflow as tf self.w1=tf.get_variable('w1', [4096,2048],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w2=tf.get_variable('w2', [2048,3072],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w3=tf.get_variable('w3', [3072,512],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w4=tf.get_variable('w4', [512,classnum],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.b1 = tf.get_variable('b1', [2048],initializer=tf.constant_initializer(0.0)) self.b2 = tf.get_variable('b2', [3072],initializer=tf.constant_initializer(0.0)) self.b3 = tf.get_variable('b3', [512],initializer=tf.constant_initializer(0.0)) self.b4 = tf.get_variable('b4', [classnum],initializer=tf.constant_initializer(0.0)) def inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) l2 = tf.matmul(l1, self.w2)+self.b2 l2=tf.nn.relu(l2) l3=tf.matmul(l2, self.w3)+self.b3 l3=tf.nn.relu(l3) out=tf.matmul(l3, self.w4)+self.b4 return out def test_inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1)
tensorflow.nn.relu
686
import tensorflow as tf Returns: loss: Loss tensor of type float. """ labels = tf.to_int64(labels) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits, labels, name='xentropy') loss = tf.reduce_mean(cross_entropy, name='xentropy_mean') return loss
tensorflow.nn.sparse_softmax_cross_entropy_with_logits
687
import tensorflow as tf """Enable/disable distortions during image preprocessing. These include bbox and color distortions.""") tf.flags.DEFINE_string('local_parameter_device', 'gpu', """Device to use as parameter server: cpu or gpu. For distributed training, it can affect where caching of variables happens.""") tf.flags.DEFINE_string('device', 'gpu', """Device to use for computation: cpu or gpu""") #tf.flags.DEFINE_string('data_format', 'NCHW', tf.flags.DEFINE_string('data_format', 'NHWC', """Data layout to use: NHWC (TF native) or NCHW (cuDNN native).""") tf.flags.DEFINE_integer('num_intra_threads', 1, """Number of threads to use for intra-op parallelism. If set to 0, the system will pick an appropriate number.""") tf.flags.DEFINE_integer('num_inter_threads', 0, """Number of threads to use for inter-op parallelism. If set to 0, the system will pick
tensorflow.flags.DEFINE_string
688
import tensorflow as tf def gradient_add(g1, g2, param): print([g1, g2, param.name]) assert (not (g1 is None and g2 is None)), param.name if g1 is None: return g2 elif g2 is None: return g1 else: return g1 + g2 def q_explained_variance(qpred, q): _, vary = tf.nn.moments(q, axes=[0, 1]) _, varpred = tf.nn.moments(q - qpred, axes=[0, 1]) check_shape([vary, varpred], [[]] * 2) return 1.0 - (varpred / vary)
tensorflow.nn.moments
689
import tensorflow as tf # the combined gradients to all towers (depending on --use_nccl option). # independent: each GPU has its own copy of the variables, and gradients are # not shared between towers. This can be used to check performance when no # data is moved between GPUs. # distributed_replicated: Distributed training only. Each GPU has a copy of # the variables, and updates its copy after the parameter servers are all # updated with the gradients from all servers. Only works with # cross_replica_sync=true. Unlike 'replicated', currently never uses # nccl all-reduce for replicating within a server. tf.flags.DEFINE_string( 'variable_update', 'parameter_server', ('The method for managing variables: ' 'parameter_server, replicated, distributed_replicated, independent')) tf.flags.DEFINE_boolean( 'use_nccl', True, 'Whether to use nccl all-reduce primitives where possible') # Distributed training flags. tf.flags.DEFINE_string('job_name', '', 'One of "ps", "worker", "". Empty for local training') tf.flags.DEFINE_string('ps_hosts', '', 'Comma-separated list of target hosts') tf.flags.DEFINE_string('worker_hosts', '', 'Comma-separated list of target hosts') tf.flags.DEFINE_integer('task_index', 0, 'Index of task within the job') tf.flags.DEFINE_string('server_protocol', 'grpc', 'protocol for servers') tf.flags.DEFINE_boolean('cross_replica_sync', True, '')
tensorflow.flags.DEFINE_boolean
690
import tensorflow as tf # data for pooling pooling_data = tf.tile(tf.expand_dims(rep_dep_tensor, 1), [1, sl_unhead, 1, 1]) # bs,sluh,sld,hn # execute mean pooling based on pooling_mask[bs, sluh, sld] and pooling_data[bs,sluh,sld,hn] pooling_data = mask_for_high_rank(pooling_data, pooling_mask) # [bs,sluh,sld,hn] pooling_data_sum = tf.reduce_sum(pooling_data, -2) # [bs,sluh,hn] pooling_den = tf.reduce_sum(tf.cast(pooling_mask, tf.int32), -1, keep_dims=True) # [bs,sluh] pooling_den = tf.where(tf.equal(pooling_den, 0), tf.ones_like(pooling_den), pooling_den) pooling_result = pooling_data_sum / tf.cast(pooling_den, tf.float32) return pooling_result
tensorflow.ones_like
691
import tensorflow as tf # Define LSTM cell of first hidden layer: lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.n_hidden, forget_bias=1.0) # Stack two LSTM layers, both layers has the same shape lsmt_layers = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2) # Get LSTM outputs, the states are internal to the LSTM cells,they are not our attention here outputs, _ = tf.nn.rnn(lsmt_layers, hidden, dtype=tf.float32) # outputs' shape: a list of lenght "time_step" containing tensors of shape [batch_size, n_hidden]
tensorflow.nn.rnn_cell.MultiRNNCell
692
import tensorflow as tf data_format_ = 'NHWC' if data_format=='channels_last' else 'NCHW' if data_format_ == 'NHWC': inputs = tf.transpose(inputs, [0, 2, 3, 1]) ksize = int(6 * sigma + 1.) x = tf.expand_dims(tf.range(ksize, delta=1, dtype=tf.float32), axis=1) y = tf.transpose(x, [1, 0]) kernel_matrix = tf.exp(- ((x - ksize/2.) ** 2 + (y - ksize/2.) ** 2) / (2 * sigma ** 2)) #print(kernel_matrix) kernel_filter = tf.reshape(kernel_matrix, [ksize, ksize, 1, 1]) kernel_filter = tf.tile(kernel_filter, [1, 1, inputs_filters, 1]) #kernel_filter = tf.transpose(kernel_filter, [1, 0, 2, 3]) outputs = tf.nn.depthwise_conv2d(inputs, kernel_filter, strides=[1, 1, 1, 1], padding='SAME', data_format=data_format_, name='blur') if data_format_ == 'NHWC': outputs = tf.transpose(outputs, [0, 3, 1, 2]) return outputs
tensorflow.reshape
693
import tensorflow as tf a scalar tensor representing the correlation loss value. """ with tf.name_scope(name): source_samples -= tf.reduce_mean(source_samples, 0) target_samples -= tf.reduce_mean(target_samples, 0) source_samples = tf.nn.l2_normalize(source_samples, 1) target_samples = tf.nn.l2_normalize(target_samples, 1) source_cov = tf.matmul(tf.transpose(source_samples), source_samples) target_cov = tf.matmul(tf.transpose(target_samples), target_samples) corr_loss = tf.reduce_mean(tf.square(source_cov - target_cov)) * weight
tensorflow.nn.l2_normalize
694
import tensorflow as tf hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer input_size = query.get_shape().as_list()[-1] # Trainable parameters w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1)) w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1)) b = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) v = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) with tf.name_scope('v'): # Applying fully connected layer with non-linear activation to each of the B*T timestamps; # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size tmp1 = tf.tensordot(facts, w1, axes=1) tmp2 = tf.tensordot(query, w2, axes=1) tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]]) tmp = tf.tanh((tmp1 + tmp2) + b) # For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape key_masks = mask # [B, 1, T] # key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1) v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T] alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape
tensorflow.tensordot
695
import tensorflow as tf # an output-only bias for each token. output_bias = tf.get_variable( "output_bias", shape=[bert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the
tensorflow.reshape
696
import tensorflow as tf if summary_dir is not None: self.writer = tf.summary.FileWriter(summary_dir) tf.summary.scalar('Loss/Policy', loss_pg) tf.summary.scalar('Loss/Value', loss_vf) tf.summary.scalar('Loss/Entropy', loss_entropy) tf.summary.scalar('Loss/Total', loss) tf.summary.scalar('Var/Epsilon', epsilon_decay) tf.summary.scalar('Var/Policy Mode', tf.reduce_mean(pi.mode())) tf.summary.scalar('Var/Policy Sigma', tf.reduce_mean(pi.stddev())) tf.summary.scalar('Var/Value', tf.reduce_mean(self.vf)) self.summarise = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES)) # AC net def build_anet(self, state_in, name, reuse=False): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg) # sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg) sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5)) sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params def build_cnet(self, state_in, name, reuse=False): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_c1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg)
tensorflow.variable_scope
697
import tensorflow as tf with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): inp = [tf.constant(0.5, shape=[2, 2])] * 2 _, enc_state = tf.nn.rnn( tf.nn.rnn_cell.GRUCell(2), inp, dtype=tf.float32) dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3 cell = tf.nn.rnn_cell.OutputProjectionWrapper( tf.nn.rnn_cell.GRUCell(2), 4) dec, mem = tf.nn.seq2seq.rnn_decoder(dec_inp, enc_state, cell) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape) def testBasicRNNSeq2Seq(self):
tensorflow.global_variables_initializer
698
import tensorflow as tf return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps, value=h)] def seq_to_batch(h, flat = False): shape = h[0].get_shape().as_list() if not flat: assert(len(shape) > 1) nh = h[0].get_shape()[-1].value return tf.reshape(tf.concat(axis=1, values=h), [-1, nh]) else: return tf.reshape(tf.stack(values=h, axis=1), [-1]) def lstm(xs, s, scope, nh, init_scale=1.0): nbatch, nin = [v.value for v in xs[0].get_shape()] nsteps = len(xs)
tensorflow.concat
699