seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
from tensorflow.python.ops import math_ops weight_tensor = self.get_weight_tensor(features) if weight_tensor is None: return math_ops.reduce_mean(loss_unweighted, name="loss") else: loss_unweighted = array_ops.reshape(loss_unweighted, shape=(-1,)) loss_weighted = math_ops.mul( loss_unweighted, array_ops.reshape(weight_tensor, shape=(-1,))) return math_ops.div( math_ops.reduce_sum(loss_weighted), math_ops.to_float(math_ops.reduce_sum(weight_tensor)), name="loss") class _RegressionTargetColumn(_TargetColumn): """_TargetColumn for regression.""" def __init__(self, loss_fn, label_name, weight_column_name, target_dimension):
tensorflow.python.ops.math_ops.reduce_sum
900
import tensorflow as tf loss_summ = tf.summary.scalar("{0}_loss".format(str_summary_type), mean_cost) acc_summ = tf.summary.scalar("{0}_accuracy".format(str_summary_type), accuracy) merged = tf.summary.merge([loss_summ, acc_summ]) return mean_cost, accuracy, y_pred, merged else: return mean_cost, accuracy, y_pred def training(self, cost): optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate) # train_op = optimizer.minimize(cost) trainables = tf.trainable_variables() grads = tf.gradients(cost, trainables) grads, _ = tf.clip_by_global_norm(grads, clip_norm=self.clip_norm) capped_gvs = zip(grads, trainables) train_op = optimizer.apply_gradients(capped_gvs) return train_op
tensorflow.train.AdamOptimizer
901
import tensorflow as tf correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_labels, 1))
tensorflow.argmax
902
import tensorflow as tf slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected], weights_regularizer=weights_regularizer, biases_regularizer=biases_regularizer, biases_initializer=tf.constant_initializer(0.0)): gtboxes_and_label_h, gtboxes_and_label_r = tf.py_func(self.get_gtboxes_and_label, inp=[inputs_list[i][1], inputs_list[i][2], inputs_list[i][3]], Tout=[tf.float32, tf.float32]) gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [-1, 5]) gtboxes_and_label_r = tf.reshape(gtboxes_and_label_r, [-1, 6]) if cfgs.ANGLE_RANGE == 180: gtboxes_and_label_r_ = tf.py_func(coordinate_present_convert, inp=[gtboxes_and_label_r, -1], Tout=tf.float32) gtboxes_and_label_r_ = tf.reshape(gtboxes_and_label_r_, [-1, 6]) gt_encode_label = tf.py_func(angle_label_encode, inp=[gtboxes_and_label_r_[:, -2], cfgs.ANGLE_RANGE,
tensorflow.reshape
903
import tensorflow as tf sparse_map_op=st_handles.op, sparse_handles=st_handles) st_roundtrip_op = st_roundtrip.values.op st_serialized = tf.serialize_many_sparse(st) st_deserialized = tf.deserialize_many_sparse( st_serialized, dtype=values.dtype)
tensorflow.serialize_many_sparse
904
import tensorflow as tf mean, variance = tf.nn.normalize_moments(counts, shifted_sum_x, shifted_sum_x2, shift, name="normalize_moments") second_moment = variance + tf.square(mean) return mean, variance, second_moment def build_moving_stats(): return ( tf.identity(self._moving_mean), tf.identity(self._moving_variance), tf.identity(self._moving_second_moment), ) mean, variance, second_moment = utils.smart_cond( use_batch_stats, build_batch_stats, build_moving_stats, ) return mean, variance, second_moment def _build_update_ops_variance(self, mean, variance, is_training): """Builds the moving average update ops when using moving variance.
tensorflow.identity
905
import tensorflow as tf l2 = tf.matmul(l1, self.w2)+self.b2 l2=tf.nn.relu(l2) l3=tf.matmul(l2, self.w3)+self.b3 l3=tf.nn.relu(l3) out=tf.matmul(l3, self.w4)+self.b4 return out def test_inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) l2 = tf.matmul(l1, self.w2)+self.b2 l2=tf.nn.relu(l2) l3=tf.matmul(l2, self.w3)+self.b3 l3=tf.nn.relu(l3) out=tf.matmul(l3, self.w4)+self.b4
tensorflow.cast
906
import tensorflow as tf v_norm = tf.nn.l2_normalize(self.v,axis=0) t = tf.matmul(input_var,v_norm)
tensorflow.matmul
907
import tensorflow as tf max(1.0, max(image_pyramid)) / logits_output_stride) # Compute the logits for each scale in the image pyramid. outputs_to_scales_to_logits = { k: {} for k in model_options.outputs_to_num_classes } for count, image_scale in enumerate(image_pyramid): if image_scale != 1.0: scaled_height = scale_dimension(crop_height, image_scale) scaled_width = scale_dimension(crop_width, image_scale) scaled_crop_size = [scaled_height, scaled_width] scaled_images = tf.image.resize_bilinear( images, scaled_crop_size, align_corners=True) if model_options.crop_size: scaled_images.set_shape([None, scaled_height, scaled_width, 3]) else: scaled_crop_size = model_options.crop_size scaled_images = images updated_options = model_options._replace(crop_size=scaled_crop_size) outputs_to_logits = _get_logits( scaled_images, updated_options, weight_decay=weight_decay,
tensorflow.image.resize_bilinear
908
import tensorflow as tf class PPO_HC(PPO): def build_anet(self, state_in, name, reuse=False): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg) sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg) # sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5)) sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params
tensorflow.layers.dense
909
from tensorflow.python.framework import op_def_registry def _get_op_def(op): # pylint: disable=protected-access if hasattr(op, "_sig"): return getattr(op, "_sig") else: return op_def_registry.get_registered_ops()[op.type] # pylint: enable=protected-access def _is_in_placeholders(op, func_arg_placeholders): return op.values() and (op.values()[0].name in func_arg_placeholders)
tensorflow.python.framework.op_def_registry.get_registered_ops
910
import tensorflow as tf self.manager_lstm.state_out[0], self.manager_lstm.state_out[1] ] # for v in self.var_list: # print v def build_placeholders(self): # standard for all policies self.obs = tf.placeholder(tf.float32, [None, self.obs_space]) # ! self.obs = tf.placeholder(tf.float32, [None] + list(self.obs_space)) # ! self.obs_space = env.observation_space.shape self.r = tf.placeholder(tf.float32, (None,1)) self.ac = tf.placeholder(tf.float32, (None, self.act_space)) self.adv = tf.placeholder(tf.float32, [None]) # unused # specific to FeUdal
tensorflow.placeholder
911
import tensorflow as tf def call(self, input_): dim = self.order out = tf.reshape(input_, [-1, np.prod(self.inp_modes)]) self.image_max_size = max(self.image_max_size, np.prod(self.inp_modes)) out = tf.transpose(out, [1, 0]) for i in range(dim): out = tf.reshape(out, [self.mat_ranks[i] * self.inp_modes[i], -1]) out = tf.matmul(self.mat_cores[i], out) out = tf.reshape(out, [self.out_modes[i], -1]) out = tf.transpose(out, [1, 0]) out = tf.reshape(out, [-1, np.prod(self.out_modes)]) # self.image_max_size = max(self.image_max_size, np.prod([val.value for val in out.get_shape()[1:]])) if self.use_bias: out = tf.add(out, self.bias, name='out')
tensorflow.matmul
912
from tensorflow.python.framework import ops class FullyConnectedLayer(hybrid_layer.HybridLayer): """A stacked, fully-connected feed-forward neural network layer.""" def _define_vars(self, params): pass def inference_graph(self, data): with ops.device(self.device_assigner): # Compute activations for the neural network. nn_activations = layers.fully_connected(data, self.params.layer_size) for _ in range(1, self.params.num_layers): # pylint: disable=W0106 nn_activations = layers.fully_connected(nn_activations,
tensorflow.python.framework.ops.device
913
import tensorflow as tf flags.DEFINE_integer("save_checkpoints_steps", 1000, "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_string("vocab_file", './drive/My Drive/ai/checkpoint/vocab.txt', "The vocabulary file that the BERT model was trained on.") tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") class InputExample(object): """A single training/test example for simple sequence classification."""
tensorflow.flags.DEFINE_string
914
import tensorflow as tf model_options.crop_size[1] if model_options.crop_size else tf.shape(images)[2])
tensorflow.shape
915
import tensorflow as tf def simple_block_attention( rep_tensor, rep_mask, block_len=5, scope=None, direction=None, keep_prob=1., is_train=None, wd=0., activation='elu', hn=None): assert direction is not None def scaled_tanh(x, scale=5.): return scale * tf.nn.tanh(1. / scale * x) bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2] ivec = hn or rep_tensor.get_shape().as_list()[2] input_dim = rep_tensor.get_shape().as_list()[2] with tf.variable_scope(scope or 'block_simple'): # @1. split sequence with tf.variable_scope('split_seq'):
tensorflow.nn.tanh
916
import tensorflow as tf var = variable_on_cpu( "var", [dim], tf.constant_initializer(1.), trainable=False)
tensorflow.constant_initializer
917
import tensorflow.contrib.slim as slim bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1)) # 'roi_pooling_size', 7 pre_pool_size = cfg.FLAGS.roi_pooling_size * 2 # 把rois对于的特征图上的部分crop出来,然后resize打破14*14的大小 crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name="crops") return slim.max_pool2d(crops, [2, 2], padding='SAME') def _dropout_layer(self, bottom, name, ratio=0.5): return tf.nn.dropout(bottom, ratio, name=name) def _anchor_target_layer(self, rpn_cls_score, name):
tensorflow.contrib.slim.max_pool2d
918
import tensorflow as tf # Output layer else: F = tf.squeeze(tf.layers.dense(Z, n_out), [2]) return F, KL
tensorflow.layers.dense
919
import tensorflow as tf end_top_log_probs, end_top_index = tf.nn.top_k( end_log_probs, k=FLAGS.end_n_top) end_top_log_probs = tf.reshape( end_top_log_probs, [-1, FLAGS.start_n_top * FLAGS.end_n_top]) end_top_index = tf.reshape( end_top_index, [-1, FLAGS.start_n_top * FLAGS.end_n_top]) if is_training:
tensorflow.reshape
920
import tensorflow as tf `[num_nodes * count1]`, `[num_nodes * count1 * count2]`, ... weights: A list of `Tensor` of `float`, with shapes `[num_nodes * count1]`, `[num_nodes * count1 * count2]` ... types: A list of `Tensor` of `int32`, with shapes `[num_nodes * count1]`, `[num_nodes * count1 * count2]` ... """ neighbors_list = [tf.reshape(nodes, [-1])] weights_list = [] type_list = [] for hop_edge_types, count in zip(edge_types, counts): neighbors, weights, types = sample_neighbor( neighbors_list[-1], hop_edge_types, count, default_node=default_node)
tensorflow.reshape
921
import tensorflow as tf A matrix with the input matrices stacked along its main diagonal, having shape [..., \sum_i N_i, \sum_i M_i]. """ matrices = [tf.convert_to_tensor(matrix, dtype=dtype) for matrix in matrices] blocked_rows = tf.Dimension(0) blocked_cols = tf.Dimension(0) batch_shape = tf.TensorShape(None) for matrix in matrices: full_matrix_shape = matrix.get_shape().with_rank_at_least(2) batch_shape = batch_shape.merge_with(full_matrix_shape[:-2]) blocked_rows += full_matrix_shape[-2]
tensorflow.Dimension
922
import tensorflow.contrib.layers as layers def simple_model(img_in, num_actions, scope, reuse=False, num_filters=64): with tf.variable_scope(scope, reuse=reuse): out = img_in gauss_initializer = initializers.xavier_initializer(uniform=False) # stddev = 1/n with tf.variable_scope("convnet"): out = layers.convolution2d( out, num_outputs=num_filters, kernel_size=8, stride=4, activation_fn=tf.nn.relu, weights_initializer=gauss_initializer, trainable=False) out = layers.flatten(out)
tensorflow.contrib.layers.convolution2d
923
import tensorflow as tf def train_rnn(raw_data_x, raw_data_y, val_data_x, val_data_y,g, num_epochs, num_steps, batch_size, input_prob, output_prob, state_prob, epoch_before_val = 50, max_checks_without_progress=50,epoch_overlap=None, verbose=True, save=False): with tf.Session() as sess: "initialize the variables" sess.run(tf.global_variables_initializer()) raw_data_yp = np.insert(raw_data_y,0,0,axis=0)[:-1] val_data_yp = np.insert(val_data_y,0,0,axis=0)[:-1] "see the trainable variables" # print("The trainable variables are:") variable_names = [v.name for v in tf.trainable_variables()] variable_shapes = [v.get_shape() for v in tf.trainable_variables()] parameter_num = 0 for name, shape in zip(variable_names, variable_shapes): # print('{}\nShape: {}'.format(name, shape)) parameter_num += shape[0]*shape[1] if np.size(shape)>1 else shape[0] "train the graph" training_losses = [] val_losses = [] #set early_stopping cretirion checks_without_progress = 0 best_loss = np.infty
tensorflow.trainable_variables
924
import tensorflow as tf _, states = tf.nn.dynamic_rnn(lstm_cells, input, dtype=tf.float32, initial_state=None) # z_sequence_output = states[1].h # print(z_sequence_output.get_shape()) states_concat = tf.concat([states[0].h, states[1].h], 1) #def fc(input, scope, out_dim, non_linear_fn=None, initial_value=None, use_bias=True): z_sequence_output = fc(states_concat, lstm_z_sequence_dim, scope='linear_transform')
tensorflow.concat
925
import tensorflow as tf # loss and optimizer self.loss = tf.reduce_mean(tf.square(tf.subtract(self.value_estimate, self.target))) self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
tensorflow.subtract
926
import tensorflow as tf mask0 = tf.constant([[1, 0], [0, 1]], dtype=tf.float32) mask1 = tf.constant([[1, 1], [0, 1]], dtype=tf.float32) mask2 = tf.constant([[1, 0], [1, 1]], dtype=tf.float32) mask3 = tf.constant([[1, 1], [1, 1]], dtype=tf.float32) mask4 = tf.constant([[0, 0], [0, 0]], dtype=tf.float32) mask5 = tf.constant([[1, 0], [1, 0]], dtype=tf.float32) masks1 = tf.stack([mask0, mask1, mask2]) masks2 = tf.stack([mask3, mask4, mask5]) ious = isu.get_pairwise_iou_matrix(masks1, masks2) expected_ious = tf.constant([[0.5, 0.0, 1.0/3.0], [0.75, 0.0, 0.25], [0.75, 0.0, 2.0/3.0]], dtype=tf.float32) self.assertAllClose(ious.numpy(), expected_ious.numpy()) def test_instance_non_maximum_suppression_1d_scores(self): mask0 = tf.constant([[1, 0], [0, 1]], dtype=tf.float32) mask1 = tf.constant([[1, 1], [0, 1]], dtype=tf.float32)
tensorflow.stack
927
import tensorflow as tf filters=decoder_depth, rate=1, weight_decay=weight_decay, scope='decoder_conv0') decoder_features = _split_separable_conv2d( decoder_features, filters=decoder_depth, rate=1, weight_decay=weight_decay, scope='decoder_conv1') else: num_convs = 2 decoder_features = slim.repeat( tf.concat(decoder_features_list, 3), num_convs, slim.conv2d, decoder_depth, 3, scope='decoder_conv' + str(i)) return decoder_features def _get_branch_logits(features, num_classes, atrous_rates=None, aspp_with_batch_norm=False,
tensorflow.concat
928
import tensorflow as tf update_target_fn = [] for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name), sorted(target_q_func_vars, key=lambda v: v.name)): update_target_fn.append(var_target.assign(var)) update_target_fn = tf.group(*update_target_fn) # construct the replay buffer replay_buffer = ReplayBuffer(replay_buffer_size, frame_history_len)
tensorflow.group
929
import tensorflow as tf optimiser = tf.train.AdamOptimizer(decayed_learning_rate, name="Adam").minimize(cross_entropy) # calculate the prediction and the accuracy accuracy, acc_op = tf.metrics.accuracy(labels=tf.argmax(y_, axis=1), predictions=tf.argmax(y_conv, axis=1)) loss_summary = tf.summary.scalar('Loss', cross_entropy) acc_summary = tf.summary.scalar('Accuracy', accuracy) # summaries for TensorBoard visualisation validation_summary = tf.summary.merge([img_summary, acc_summary]) training_summary = tf.summary.merge([img_summary, loss_summary]) test_summary = tf.summary.merge([img_summary, acc_summary]) # saver for checkpoints saver = tf.train.Saver(tf.global_variables(), max_to_keep=1) with tf.Session() as sess: summary_writer = tf.summary.FileWriter(run_log_dir + '_train', sess.graph, flush_secs=5)
tensorflow.summary.merge
930
import tensorflow as tf if FLAGS.print_variables: for v in tf.trainable_variables(): print(v.name) with tf.name_scope('loss'): one_hot_labels_action = dense_to_one_hot(actions_template, action_templates_vocabulary_length) one_hot_labels_arguments = dense_to_one_hot(actions_arguments, actions_arguments_vocabulary_length) loss_action = tf.reduce_mean( - one_hot_labels_action * tf.log(tf.clip_by_value(self.predictions_action, 1e-10, 1.0)), name='loss' ) loss_arguments = tf.reduce_mean( - one_hot_labels_arguments * tf.log(tf.clip_by_value(self.predictions_arguments, 1e-10, 1.0)), name='loss' ) self.loss = loss_action + loss_arguments tf.scalar_summary('loss', self.loss) with tf.name_scope('accuracy'): correct_prediction_action = tf.equal( tf.argmax(one_hot_labels_action, 1), tf.argmax(self.predictions_action, 1) )
tensorflow.clip_by_value
931
import tensorflow as tf tensor_dict[fields.InputDataFields.groundtruth_boxes], zero_indexed_groundtruth_classes, groundtruth_confidences, num_classes)) merged_classes = tf.cast(merged_classes, tf.float32) tensor_dict[fields.InputDataFields.groundtruth_boxes] = merged_boxes tensor_dict[fields.InputDataFields.groundtruth_classes] = merged_classes tensor_dict[fields.InputDataFields.groundtruth_confidences] = ( merged_confidences) if fields.InputDataFields.groundtruth_boxes in tensor_dict: tensor_dict[fields.InputDataFields.num_groundtruth_boxes] = tf.shape( tensor_dict[fields.InputDataFields.groundtruth_boxes])[0] return tensor_dict def pad_input_data_to_static_shapes(tensor_dict, max_num_boxes, num_classes, spatial_image_shape=None): """Pads input tensors to static shapes.
tensorflow.shape
932
import tensorflow as tf # update the initial states for i in range(2): new_state = tf.concat( [final_state[i][:batch_size, :],
tensorflow.concat
933
import tensorflow as tf """ if not tf.gfile.Exists(work_directory): tf.gfile.MakeDirs(work_directory) filepath = os.path.join(work_directory, filename)
tensorflow.gfile.MakeDirs
934
import tensorflow as tf # all placeholder for tf with tf.name_scope('S'): S = tf.placeholder(tf.float32, shape=[None, STATE_DIM], name='s') with tf.name_scope('R'): R = tf.placeholder(tf.float32, [None, 1], name='r') with tf.name_scope('S_'): S_ = tf.placeholder(tf.float32, shape=[None, STATE_DIM], name='s_') ############################### Actor #################################### class Actor(object): def __init__(self, sess, action_dim, action_bound, learning_rate, t_replace_iter):
tensorflow.placeholder
935
import tensorflow as tf Returns: a tensor with shape [N, M] representing pairwise iou scores. """ intersections = pairwise_intersection(boxlist1, boxlist2) areas1 = area(boxlist1) areas2 = area(boxlist2) unions = ( tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections) return tf.where( tf.equal(intersections, 0.0), tf.zeros_like(intersections), tf.truediv(intersections, unions))
tensorflow.expand_dims
936
import tensorflow as tf except ImportError as e: tf.logging.warn("Cannot test truncated normal op: %s" % str(e)) def validateKolmogorovSmirnov(self, shape, mean, stddev, minval, maxval, seed=1618): try: import scipy.stats # pylint: disable=g-import-not-at-top tf.set_random_seed(seed) with self.test_session(use_gpu=self._use_gpu): samples = random_ops.parameterized_truncated_normal(shape, mean, stddev, minval, maxval).eval() assert (~np.isnan(samples)).all() minval = max(mean - stddev * 10, minval) maxval = min(mean + stddev * 10, maxval) dist = scipy.stats.norm(loc=mean, scale=stddev) cdf_min = dist.cdf(minval) cdf_max = dist.cdf(maxval)
tensorflow.set_random_seed
937
import tensorflow as tf self._lr = tf.Variable(0., trainable=False) tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(self._cost, tvars), config.max_grad_norm) optimizer = tf.train.GradientDescentOptimizer(self._lr) self._train_op = optimizer.apply_gradients( zip(grads, tvars), global_step=tf.train.get_or_create_global_step()) self._new_lr = tf.placeholder( tf.float32, shape=[], name='new_learning_rate') self._lr_update = tf.assign(self._lr, self._new_lr) self.saver = tf.train.Saver(tf.global_variables())
tensorflow.train.get_or_create_global_step
938
import tensorflow as tf def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn)
tensorflow.logging.info
939
import tensorflow as tf def avg_pool(self, bottom, name): return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name) def max_pool(self, bottom, name): return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name) def conv_layer(self, bottom, name): with tf.variable_scope(name): filt = self.get_conv_filter(name) conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME') conv_biases = self.get_bias(name) bias = tf.nn.bias_add(conv, conv_biases) relu = tf.nn.relu(bias) return relu def fc_layer(self, bottom, name): with tf.variable_scope(name): shape = bottom.get_shape().as_list()
tensorflow.nn.conv2d
940
import tensorflow as tf w_init = tf.contrib.layers.variance_scaling_initializer() if b_init is None: b_init = tf.constant_initializer() w = tf.get_variable('W', filter_shape, initializer=w_init) b = None if use_bias: b = tf.get_variable('b', [out_dims], initializer=b_init) conv = tf.nn.atrous_conv2d(value=input_tensor, filters=w, rate=rate, padding=padding, name='dilation_conv') if use_bias: ret = tf.add(conv, b) else: ret = conv return ret @staticmethod def spatial_dropout(input_tensor, keep_prob, is_training, name, seed=1234): """ 空间dropout实现 :param input_tensor: :param keep_prob: :param is_training: :param name:
tensorflow.add
941
import tensorflow as tf d_real, d_fake, _, _ = tf.split(d_all, 4) d_real_logits, d_fake_logits, _, _ = tf.split(d_all_logits, 4) z_projs_real, z_projs_fake, z_aug_projs_real, z_aug_projs_fake = tf.split(z_projs, 4) self.d_loss, _, _, self.g_loss = loss_lib.get_losses( d_real=d_real, d_fake=d_fake, d_real_logits=d_real_logits, d_fake_logits=d_fake_logits) penalty_loss = penalty_lib.get_penalty_loss( x=images, x_fake=generated, y=y, is_training=is_training, discriminator=self.discriminator, architecture=self._architecture) self.d_loss += self._lambda * penalty_loss z_projs = tf.concat([z_projs_real, z_projs_fake], 0) z_aug_projs = tf.concat([z_aug_projs_real, z_aug_projs_fake], 0) sims_logits = tf.matmul(z_projs, z_aug_projs, transpose_b=True) logits_max = tf.reduce_max(sims_logits,1) sims_logits = sims_logits - tf.reshape(logits_max, [-1, 1]) sims_probs = tf.nn.softmax(sims_logits) sim_labels = tf.constant(np.arange(bs * 2, dtype=np.int32)) sims_onehot = tf.one_hot(sim_labels, bs * 2) c_real_loss = - tf.reduce_mean( tf.reduce_sum(sims_onehot * tf.log(sims_probs + 1e-10), 1)) self.d_loss += c_real_loss * self._weight_contrastive_loss_d
tensorflow.concat
942
import tensorflow as tf # Create a new session with a new tf graph. sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run(tf.global_variables_initializer()) # initialize the checkpoint. # This is the node that will accept the input. input_nodes = tf.get_default_graph().get_tensor_by_name('main_level/agent/main/online/' + \ 'network_0/observation/observation:0') # This is the node that will produce the output. output_nodes = tf.get_default_graph().get_operation_by_name('main_level/agent/main/online/' + \ 'network_1/ppo_head_0/policy') # Save the model as a servable model.
tensorflow.get_default_graph
943
import tensorflow as tf tf.summary.scalar("model/policy_loss", pi_loss / bs) tf.summary.scalar("model/value_loss", vf_loss / bs) tf.summary.scalar("model/entropy", entropy / bs) tf.summary.image("model/state", pi.x) tf.summary.scalar("model/grad_global_norm", tf.global_norm(grads)) tf.summary.scalar("model/var_global_norm", tf.global_norm(pi.var_list)) self.summary_op = tf.summary.merge_all() grads, _ = tf.clip_by_global_norm(grads, 40.0) # copy weights from the parameter server to the local model self.sync = tf.group(*[v1.assign(v2) for v1, v2 in zip(pi.var_list, self.network.var_list)]) grads_and_vars = list(zip(grads, self.network.var_list)) self.inc_step = self.global_step.assign_add(tf.shape(pi.x)[0])
tensorflow.clip_by_global_norm
944
import tensorflow as tf output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings, do_serve): """Returns `model_fn` closure for TPUEstimator."""
tensorflow.reduce_mean
945
import tensorflow as tf logger.info('Tasks dump!') assert (task_generator == 'fixed') test_summary['task'].append(task.goal_velocity) if FLAGS.task.reset_policy: # NOTE: reset policy and valuefunc logger.info("Resetting Policy") pol_params = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters())]) tf.get_default_session().run(tf.variables_initializer(policy.parameters())) pol_params_after = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters())]) print ("pol_params:", np.linalg.norm(pol_params), "pol_params_after_reset:", np.linalg.norm(pol_params_after)) logger.info("Resetting Valuefunc") tf.get_default_session().run(tf.variables_initializer(vfn.parameters())) tf.get_default_session().run(tf.variables_initializer(warmup_policy.parameters())) tf.get_default_session().run(tf.variables_initializer(warmup_vfn.parameters())) for p in warmup_policy.parameters(): p.invalidate() for p in warmup_vfn.parameters(): p.invalidate() for p in policy.parameters(): p.invalidate() for p in vfn.parameters(): p.invalidate() last_end = None drops = [] evaluate(settings, 'pre-warm-up') returns_pre_warmup = testeval(policy, runners['collect']) if test: test_returns.append(returns_pre_warmup)
tensorflow.get_default_session
946
from tensorflow.python.ops import gen_state_ops update_ops.append(op) with ops.control_dependencies(update_ops): return gen_state_ops._destroy_temporary_variable(var, var_name=var_name,
tensorflow.python.ops.gen_state_ops._destroy_temporary_variable
947
import tensorflow as tf conv_bn = cnv biases = tf.get_variable("biases", [nOut], initializer=tf.constant_initializer(), dtype=inpOp.dtype)
tensorflow.constant_initializer
948
from tensorflow.python.ops import array_ops Returns: 0 if `denominator` == 0, else `numerator` / `denominator` """ numerator.get_shape().with_rank_at_most(1) denominator.get_shape().with_rank_at_most(1) return control_flow_ops.cond( math_ops.equal( array_ops.constant(0.0, dtype=dtypes.float64), denominator), lambda: array_ops.constant(0.0, dtype=dtypes.float64), lambda: math_ops.div(numerator, denominator), name=name) def _create_local(name, shape, collections=None, validate_shape=True, dtype=dtypes.float32): """Creates a new local variable.
tensorflow.python.ops.array_ops.constant
949
from tensorflow.python.framework import tensor_util @ops.RegisterShape("Range") def _RangeShape(op): start_value = tensor_util.ConstantValue(op.inputs[0]) limit_value = tensor_util.ConstantValue(op.inputs[1]) delta_value = tensor_util.ConstantValue(op.inputs[2])
tensorflow.python.framework.tensor_util.ConstantValue
950
import tensorflow as tf """ mode = global_mode() return tf.equal(mode, tf.estimator.ModeKeys.PREDICT)
tensorflow.equal
951
import tensorflow as tf range_unhead = tf.tile(tf.expand_dims(tf.range(bs), -1), [1, sl_unhead]) scatter_pooling = tf.cond( tf.equal(sl_unhead, 0), lambda: tf.zeros([bs, sl+1, hn], tf.float32),
tensorflow.equal
952
import tensorflow as tf import tempfile import tensorflow as tf class IoOpsTest(tf.test.TestCase): def testReadFile(self): cases = ['', 'Some contents', 'Неки садржаји на српском'] for contents in cases: contents = tf.compat.as_bytes(contents) temp = tempfile.NamedTemporaryFile(prefix='ReadFileTest') open(temp.name, 'wb').write(contents) with self.test_session(): read = tf.read_file(temp.name) self.assertEqual([], read.get_shape()) self.assertEqual(read.eval(), contents) def _subset(self, files, indices): return set(tf.compat.as_bytes(files[i].name) for i in range(len(files)) if i in indices)
tensorflow.compat.as_bytes
953
import tensorflow as tf images, model_options=model_options, is_training=False, fine_tune_batch_norm=False) if add_flipped_images: with tf.variable_scope(tf.get_variable_scope(), reuse=True): outputs_to_scales_to_logits_reversed = multi_scale_logits( tf.reverse_v2(images, [2]), model_options=model_options, is_training=False, fine_tune_batch_norm=False)
tensorflow.get_variable_scope
954
import tensorflow as tf weights = np.load(self.load_weights_path) init_state_initializer = tf.constant_initializer(weights['init_state'])
tensorflow.constant_initializer
955
import tensorflow as tf return g2 elif g2 is None: return g1 else: return g1 + g2 def q_explained_variance(qpred, q): _, vary = tf.nn.moments(q, axes=[0, 1]) _, varpred = tf.nn.moments(q - qpred, axes=[0, 1]) check_shape([vary, varpred], [[]] * 2) return 1.0 - (varpred / vary)
tensorflow.nn.moments
956
import tensorflow as tf p0 = ea0 / z0 return tf.reduce_sum(p0 * (tf.log(z0) - a0), 1) def cat_entropy_softmax(p0): return - tf.reduce_sum(p0 * tf.log(p0 + 1e-6), axis = 1) def ortho_init(scale=1.0): def _ortho_init(shape, dtype, partition_info=None):
tensorflow.log
957
import tensorflow as tf # The outputs should be executed immediately because two samples are # added. self.assertGreaterEqual(.5, duration.total_seconds()) self.assertEqual(2, batch_size) def test_maximum_batch_size(self): with self.test_session() as session: @dynamic_batching.batch_fn_with_options(maximum_batch_size=2) def f(a, b): batch_size = tf.shape(a)[0] return a + b, tf.tile([batch_size], [batch_size]) outputs = [ f(tf.constant([1]), tf.constant([2])), f(tf.constant([1]), tf.constant([2])), f(tf.constant([1]), tf.constant([2])), f(tf.constant([1]), tf.constant([2])), f(tf.constant([1]), tf.constant([2])), ] tf.train.start_queue_runners() results = session.run(outputs) for value, batch_size in results: self.assertEqual(3, value) self.assertGreaterEqual(2, batch_size)
tensorflow.constant
958
import tensorflow as tf gate = tf.nn.sigmoid(dense(d_res, dim, use_bias=False)) return res * gate def dense(inputs, hidden, use_bias=True, scope="dense"): with tf.variable_scope(scope): shape = tf.shape(inputs) dim = inputs.get_shape().as_list()[-1] out_shape = [shape[idx] for idx in range( len(inputs.get_shape().as_list()) - 1)] + [hidden]
tensorflow.variable_scope
959
import tensorflow as tf self.num_layers = num_layers self.grus = [] self.inits = [] self.dropout_mask = [] self.scope = scope for layer in range(num_layers): input_size_ = input_size if layer == 0 else 2 * num_units gru_fw = tf.contrib.rnn.GRUCell(num_units) gru_bw = tf.contrib.rnn.GRUCell(num_units) init_fw = tf.tile(tf.Variable( tf.zeros([1, num_units])), [batch_size, 1]) init_bw = tf.tile(tf.Variable( tf.zeros([1, num_units])), [batch_size, 1]) mask_fw = dropout(tf.ones([batch_size, 1, input_size_], dtype=tf.float32), keep_prob=keep_prob, is_train=is_train, mode=None) mask_bw = dropout(tf.ones([batch_size, 1, input_size_], dtype=tf.float32),
tensorflow.contrib.rnn.GRUCell
960
import tensorflow as tf if init_stddev <= 0.0: init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32) else: init = tf.truncated_normal_initializer(stddev=init_stddev) X = tf.layers.conv2d(X, out_channels, kernel_size=filtersize, strides=(stride, stride), padding="valid", kernel_initializer=init) if norm == 'I': X = tf.contrib.layers.instance_norm(X, scope=scope, reuse=reuse, epsilon=0.001)
tensorflow.layers.conv2d
961
import tensorflow as tf tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input) rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input) # Reshape rel_embed into square D x D matrices rel_embed_square = tf.reshape(rel_embed, (-1, self.embedding_size, self.embedding_size)) # Reshape head_embed and tail_embed to be suitable for the matrix multiplication head_embed_row = tf.expand_dims(head_embed, 1) # embeddings as row vectors tail_embed_col = tf.expand_dims(tail_embed, 2) # embeddings as column vectors head_rel_mult = tf.batch_matmul(head_embed_row, rel_embed_square) # Output needs a squeeze into a 1d vector raw_output = tf.squeeze(tf.batch_matmul(head_rel_mult, tail_embed_col)) self.output, self.loss = self._create_output_and_loss(raw_output) # Optimization
tensorflow.expand_dims
962
import tensorflow as tf with tf.control_dependencies([ema_apply_op]): return tf.identity(batch_mean), tf.identity(batch_var)
tensorflow.identity
963
import tensorflow as tf def vocabulary_by_name(self, vocab_filename: str) -> List[bytes]: """Like vocabulary_file_by_name but returns a list.""" vocab_path = self.vocabulary_file_by_name(vocab_filename) if not vocab_path: raise ValueError('Could not read vocabulary: {}, does not exist'.format( vocab_filename)) elif vocab_path.endswith('tfrecord.gz'): dataset = tf.data.TFRecordDataset(vocab_path, compression_type='GZIP') vocab_tensor = dataset.batch(tf.int32.max).reduce( tf.constant([], dtype=tf.string), lambda state, elem: tf.concat([state, elem], axis=-1)) # Using as_numpy_iterator only works when executing eagerly. return _get_tensor_value(vocab_tensor).tolist() else:
tensorflow.data.TFRecordDataset
964
from tensorflow.python.ops import math_ops factor = array_ops.where(norm < max_norm, array_ops.ones_like(norm), math_ops.exp(log_mean) / norm) if static_max_norm is not None: factor = math_ops.minimum(static_max_norm / norm, factor) # apply factor clipped_grads = []
tensorflow.python.ops.math_ops.minimum
965
import tensorflow as tf label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) else: is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32) is_training = (mode == tf.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, probabilities, logits, predictions) = \
tensorflow.shape
966
import tensorflow as tf decoded.append(syms) return decoded def main(args): tf.logging.set_verbosity(tf.logging.INFO) model_cls = models.get_model(args.model) params = default_parameters() # Import and override parameters # Priorities (low -> high): # default -> saved -> command
tensorflow.logging.set_verbosity
967
import tensorflow as tf def gradient_difference_loss(x, y): x_h_diff = x[:, 1:] - x[:, :-1] x_w_diff = x[:, :, 1:] - x[:, :, :-1] y_h_diff = y[:, 1:] - y[:, :-1] y_w_diff = y[:, :, 1:] - y[:, :, :-1] h_diff = tf.abs(tf.abs(x_h_diff) - tf.abs(y_h_diff)) w_diff = tf.abs(tf.abs(x_w_diff) - tf.abs(y_w_diff)) return h_diff + tf.transpose(w_diff) def leaky_relu(x, leak=0.2, name='leaky_relu'): with tf.variable_scope(name): f1 = 0.5 * (1 + leak)
tensorflow.abs
968
import tensorflow as tf assert in_w % 2 == 0 and in_h % 2 == 0, 'Width & height ({} & {}) must both be even!'.format(in_w, in_h) with tf.variable_scope('fac_reduc'): # Split area into 2 halves
tensorflow.variable_scope
969
import tensorflow as tf """Build dynamic graph""" rnn_outputs, final_state = tf.nn.dynamic_rnn(cell=cell, inputs=rnn_inputs,initial_state=init_state) """Add prediction layer""" with tf.variable_scope('softmax'): W = tf.get_variable('W', [state_size, input_size_y]) b = tf.get_variable('b', [input_size_y], initializer=tf.constant_initializer(0.0)) rnn_outputs = tf.reshape(rnn_outputs, [-1, state_size]) predictions = tf.matmul(rnn_outputs, W) + b yy = tf.reshape(y, [-1, input_size_y]) #batch_size*num_steps when yo udefine a placeholder in Tensorflow, the shape of the input during the session should be the same as the shape of the plcae holder "Mean squared error loss" loss=tf.reduce_mean(tf.square(tf.reshape(predictions,[-1])-tf.reshape(yy,[-1]))) "Adding regularization" if lambda_l2_reg > 0 : cell_l2 = tf.reduce_sum([tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables() if not ("noreg" in tf_var.name or "Bias" in tf_var.name)])
tensorflow.reshape
970
import tensorflow as tf """ assert isinstance(targets, float) if targets in [0., 1.]: entropy = 0. else: entropy = - targets * np.log(targets) - \ (1. - targets) * np.log(1. - targets) return tf.nn.sigmoid_cross_entropy_with_logits( labels=tf.ones_like(logits) * targets, logits=logits) - entropy def _setup_model_loss(self, update_ops=None, num_classes=6): self.learning_rate_d = tf.placeholder(tf.float32, shape=[], name="learning_rate_placeholder") self.learning_rate_g = tf.placeholder(tf.float32, shape=[], name="learning_rate_placeholder") d_optimizer = self._optimizer( self.learning_rate_d, optname=self.cnf.get('optname', 'momentum'), **self.cnf.get('opt_kwargs', {'decay': 0.9})) g_optimizer = self._optimizer( self.learning_rate_g, optname=self.cnf.get('optname', 'momentum'), **self.cnf.get('opt_kwargs', {'decay': 0.9})) # Get images and labels for ImageNet and split the batch across GPUs. assert self.cnf['batch_size_train'] % self.cnf.get('num_gpus', 1) == 0, ( 'Batch size must be divisible by number of GPUs')
tensorflow.placeholder
971
import tensorflow as tf dec = tf.layers.max_pooling1d(dec, pool_size=2, strides=1, padding="same") dec = tf.layers.conv1d(dec, embed_size // 2, 3, name="decoder-conv1-1", padding="SAME") dec = tf.nn.relu(tf.layers.batch_normalization(dec, training=self.training)) dec = tf.layers.conv1d(dec, embed_size // 2, 3, name="decoder-conv1-2", padding="SAME") dec = tf.layers.batch_normalization(dec, training=self.training) dec = tf.layers.dense(dec, embed_size // 2) for i in range(4): dec = highwaynet( dec, num_units=embed_size // 2, scope="decoder-highwaynet-{}".format(i) ) with tf.variable_scope("decoder-gru", reuse=False): cell = tf.contrib.rnn.GRUCell(embed_size // 2) cell_bw = tf.contrib.rnn.GRUCell(embed_size // 2) outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell, cell_bw, dec, dtype=tf.float32) outputs = tf.concat(outputs, 2) self.Z_hat = tf.layers.dense(outputs, 1 + fourier_window_size // 2) self.loss1 = tf.reduce_mean(tf.abs(self.Y_hat - self.Y)) self.loss2 = tf.reduce_mean(tf.abs(self.Z_hat - self.Z)) self.loss = self.loss1 + self.loss2 self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.loss) # In[3]: tf.reset_default_graph() sess = tf.InteractiveSession() size_layers = 128 learning_rate = 1e-3
tensorflow.layers.dense
972
import tensorflow as tf indices = tf.where(tf.greater(mask_, tol)) indices = tf.cast(indices, tf.int32)
tensorflow.cast
973
import tensorflow as tf train_graph = tf.Graph() eval_graph = tf.Graph() infer_graph = tf.Graph() with train_graph.as_default(): initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale) with tf.name_scope('Train'): train_input = DataInput(config=config, data=train_data, name='TrainInput') with tf.variable_scope('Model', reuse=None, initializer=initializer): m = Model(is_training=True, config=config, input_=train_input, graph=train_graph) tf.summary.scalar('Training Loss', m.cost) tf.summary.scalar('Learning rate', m.lr) latest_ckpt = tf.train.latest_checkpoint(FLAGS.save_path) with train_graph.as_default(): sv = tf.train.Supervisor(logdir=FLAGS.save_path) config_proto = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True) with sv.managed_session(config=config_proto) as train_sess: #with tf.Session(config=config_proto) as train_sess: train_sess.run(tf.global_variables_initializer()) for i in range(config.max_max_epoch): lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.)
tensorflow.summary.scalar
974
import tensorflow as tf c, h = tf.split(axis=1, num_or_size_splits=2, value=s) for idx, x in enumerate(xs): c = c h = h z = tf.matmul(x, wx) + tf.matmul(h, wh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o) u = tf.tanh(u) c = f*c + i*u h = o*tf.tanh(c) xs[idx] = h s = tf.concat(axis=1, values=[c, h]) return xs, s def ortho_init(scale=1.0): def _ortho_init(shape, dtype, partition_info=None):
tensorflow.tanh
975
import tensorflow as tf labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights, ) masked_lm_mean_loss = tf.metrics.mean( values=masked_lm_example_loss, weights=masked_lm_weights ) next_sentence_log_probs = tf.reshape( next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]] ) next_sentence_predictions = tf.argmax( next_sentence_log_probs, axis=-1, output_type=tf.int32 ) next_sentence_labels = tf.reshape(next_sentence_labels, [-1]) next_sentence_accuracy = tf.metrics.accuracy( labels=next_sentence_labels, predictions=next_sentence_predictions ) next_sentence_mean_loss = tf.metrics.mean( values=next_sentence_example_loss ) return { "masked_lm_accuracy": masked_lm_accuracy, "masked_lm_loss": masked_lm_mean_loss, "next_sentence_accuracy": next_sentence_accuracy, "next_sentence_loss": next_sentence_mean_loss, }
tensorflow.reshape
976
from tensorflow.python.ops import math_ops indices_at_minval = math_ops.equal( math_ops.abs(sensitivities - sensitivity), min_val) indices_at_minval = math_ops.to_int64(indices_at_minval) indices_at_minval = math_ops.cumsum(indices_at_minval)
tensorflow.python.ops.math_ops.to_int64
977
import tensorflow as tf update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0, lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold)) # Put everything together. deterministic_actions = tf.argmax(q_values_perturbed, axis=1) batch_size = tf.shape(observations_ph.get())[0] random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) updates = [ update_eps_expr, tf.cond(reset_ph, lambda: perturb_vars(original_scope="q_func", perturbed_scope="perturbed_q_func"), lambda: tf.group(*[])), tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)), update_param_noise_threshold_expr, ] act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph], outputs=output_actions, givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False}, updates=updates) return act def build_train(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0, double_q=True, scope="deepq", reuse=None, param_noise=False, param_noise_filter_func=None): """Creates the train function:
tensorflow.group
978
import tensorflow as tf classnum=12 testnum = tf.placeholder(tf.int32) trainnum = tf.placeholder(tf.int32) validnum = tf.placeholder(tf.int32) learnrate = tf.placeholder(tf.float32)
tensorflow.placeholder
979
import tensorflow as tf pred_flat1, pred_flat2 = tf.reshape(horizon_pred1, [-1, 1]), tf.reshape(horizon_pred2, [1, -1]) tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt1, [-1, 1]), tf.reshape(horizon_tgt2, [1, -1]) tgt_dif = tgt_flat1 - tgt_flat2 pred_dif = pred_flat1 - pred_flat2 geq = tf.cast(tgt_dif > 0, tf.bool) tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif) pred_posi_dif = tf.where(geq, pred_dif, -pred_dif) loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif) cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32)
tensorflow.where
980
from tensorflow.python.framework import constant_op from tensorflow.contrib.layers.python.layers import feature_column as contrib_feature_column from tensorflow.contrib.learn.python.learn.estimators import run_config from tensorflow.python.estimator.canned import head as head_lib from tensorflow.python.feature_column import feature_column_lib as core_feature_column from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.ops.losses import losses from tensorflow.python.platform import gfile from tensorflow.python.platform import googletest def _train_input_fn(): features = {"x": constant_op.constant([[2.], [1.], [1.]])} label = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32) return features, label def _ranking_train_input_fn(): features = { "a.f1": constant_op.constant([[3.], [0.3], [1.]]), "a.f2": constant_op.constant([[0.1], [3.], [1.]]), "b.f1": constant_op.constant([[13.], [0.4], [5.]]), "b.f2": constant_op.constant([[1.], [3.], [0.01]]), } label = constant_op.constant([[0], [0], [1]], dtype=dtypes.int32)
tensorflow.python.framework.constant_op.constant
981
import tensorflow as tf result_dict['iou_min'] = iou_min elif isinstance(metric, CollisionMetric): labeled_sdfs = detections['groundtruth_sdfs'] labeled_classes = tf.cast(sample['groundtruth_valid_classes'], tf.int64) labeled_poses = (sample['rotations_3d'], sample['translations_3d'], sample['sizes_3d']) predicted_classes = tf.cast(detections['detection_classes'], tf.int64) predicted_sdfs = detections['predicted_sdfs'] predicted_poses = (detections['rotations_3d'], detections['translations_3d'], detections['sizes_3d']) full_oracle = False if full_oracle: predicted_sdfs = detections['groundtruth_sdfs'].numpy()
tensorflow.cast
982
import tensorflow as tf self.n_classes = 10 # Batch size self.batch_size = 16 # Lstm Units. self.num_units = 16 def buildLstmLayer(self): return tf.keras.layers.StackedRNNCells([ tf.lite.experimental.nn.TFLiteLSTMCell( self.num_units, use_peepholes=True, forget_bias=1.0, name="rnn1"), tf.lite.experimental.nn.TFLiteLSTMCell( self.num_units, num_proj=8, forget_bias=1.0, name="rnn2"), tf.lite.experimental.nn.TFLiteLSTMCell( self.num_units // 2, use_peepholes=True, num_proj=8,
tensorflow.lite.experimental.nn.TFLiteLSTMCell
983
import tensorflow as tf :return: (TensorFlow Tensor) the updated scale expression """ with tf.control_dependencies([perturb_for_adaption]): update_scale_expr = tf.cond(mean_kl < param_noise_threshold, lambda: param_noise_scale.assign(param_noise_scale * 1.01), lambda: param_noise_scale.assign(param_noise_scale / 1.01), ) return update_scale_expr # Functionality to update the threshold for parameter space noise. update_param_noise_thres_expr = param_noise_threshold.assign( tf.cond(update_param_noise_threshold_ph >= 0, lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold)) # Put everything together. perturbed_deterministic_actions = tf.argmax(perturbable_policy.q_values, axis=1) deterministic_actions = tf.argmax(policy.q_values, axis=1) batch_size = tf.shape(policy.obs_ph)[0] n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=n_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps perturbed_stochastic_actions = tf.where(chose_random, random_actions, perturbed_deterministic_actions) stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) perturbed_output_actions = tf.cond(stochastic_ph, lambda: perturbed_stochastic_actions, lambda: deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) updates = [ update_eps_expr,
tensorflow.argmax
984
import tensorflow as tf elif self.optim_type == 'sgd': self.optimizer = tf.train.GradientDescentOptimizer(self.lr) elif self.optim_type == 'adamW': self.optimizer = AdamWOptimizer(self.config.weight_decay, learning_rate=self.lr) else: raise NotImplementedError('Unsupported optimizer: {}'.format(self.optim_type)) self.logger.info("applying optimize %s" % self.optim_type) trainable_vars = tf.trainable_variables() if self.config.clip_weight: # clip_weight tvars = tf.trainable_variables() grads = tf.gradients(self.loss, tvars) grads, _ = tf.clip_by_global_norm(grads, clip_norm=self.config.max_norm_grad) grad_var_pairs = zip(grads, tvars) self.train_op = self.optimizer.apply_gradients(grad_var_pairs, name='apply_grad') else: self.train_op = self.optimizer.minimize(self.loss) def _attention(self, output, name='attn', reuse=None): with tf.variable_scope(name, reuse=reuse): W = tf.get_variable(name="attn_W", shape=[2 * self.config.hidden_size, 2 * self.config.hidden_size], initializer=tf.contrib.layers.xavier_initializer(), # initializer=tf.truncated_normal_initializer(),
tensorflow.gradients
985
import tensorflow as tf self.features = features self.labels = labels self.mode = mode self.params = params def input_layer(self): # data = np.loadtxt(self.params['vocab'], dtype=np.unicode, encoding=None) data = self.params["vocab_data"] mapping_strings = tf.Variable(data) vocab_words = tf.contrib.lookup.index_table_from_tensor( mapping_strings, num_oov_buckets=1 ) # Word Embeddings words = tf.identity(self.features["words"], name="input_words") word_ids = vocab_words.lookup(words) #
tensorflow.contrib.lookup.index_table_from_tensor
986
import tensorflow as tf def _scipy_pareto(self, concentration, scale): # In scipy pareto is defined with scale = 1, so we need to scale. return stats.pareto(concentration, scale=scale) def testParetoShape(self): scale = tf.constant([2.] * 5) concentration = tf.constant([2.] * 5) pareto = tfd.Pareto(concentration, scale) self.assertEqual(self.evaluate(pareto.batch_shape_tensor()), (5,)) self.assertEqual(pareto.batch_shape, tf.TensorShape([5])) self.assertAllEqual(self.evaluate(pareto.event_shape_tensor()), []) self.assertEqual(pareto.event_shape, tf.TensorShape([])) def testParetoShapeBroadcast(self): scale = tf.constant([[3., 2.]]) concentration = tf.constant([[4.], [5.], [6.]]) pareto = tfd.Pareto(concentration, scale) self.assertAllEqual(self.evaluate(pareto.batch_shape_tensor()), (3, 2)) self.assertAllEqual(pareto.batch_shape, tf.TensorShape([3, 2])) self.assertAllEqual(self.evaluate(pareto.event_shape_tensor()), []) self.assertEqual(pareto.event_shape, tf.TensorShape([]))
tensorflow.TensorShape
987
import tensorflow as tf FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string('device', '/gpu:0', "device") tf.app.flags.DEFINE_string('dataset', 'cifar10', "{cifar10, svhn}") tf.app.flags.DEFINE_string('log_dir', "", "log_dir") tf.app.flags.DEFINE_integer('seed', 1, "initial random seed") tf.app.flags.DEFINE_bool('validation', False, "") tf.app.flags.DEFINE_integer('batch_size', 32, "the number of examples in a batch") tf.app.flags.DEFINE_integer('ul_batch_size', 128, "the number of unlabeled examples in a batch") tf.app.flags.DEFINE_integer('eval_batch_size', 100, "the number of eval examples in a batch") tf.app.flags.DEFINE_integer('eval_freq', 5, "") tf.app.flags.DEFINE_integer('num_epochs', 120, "the number of epochs for training") tf.app.flags.DEFINE_integer('epoch_decay_start', 80, "epoch of starting learning rate decay") tf.app.flags.DEFINE_integer('num_iter_per_epoch', 400, "the number of updates per epoch") tf.app.flags.DEFINE_float('learning_rate', 0.001, "initial leanring rate") tf.app.flags.DEFINE_float('mom1', 0.9, "initial momentum rate") tf.app.flags.DEFINE_float('mom2', 0.5, "momentum rate after epoch_decay_start") tf.app.flags.DEFINE_string('method', 'vat', "{vat, vatent, baseline}") if FLAGS.dataset == 'cifar10': from cifar10 import inputs, unlabeled_inputs elif FLAGS.dataset == 'svhn': from svhn import inputs, unlabeled_inputs else:
tensorflow.app.flags.DEFINE_integer
988
import tensorflow as tf biases = tf.get_variable('biaess', shape=shape, dtype=tf.float32, initializer=tf.constant_initializer(0.01)) return biases
tensorflow.constant_initializer
989
import tensorflow as tf tgt_dif = tgt_flat1 - tgt_flat2 pred_dif = pred_flat1 - pred_flat2 geq = tf.cast(tgt_dif > 0, tf.bool) # tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif) pred_posi_dif = tf.where(geq, pred_dif, -pred_dif) loss = tf.maximum(0., margin-pred_posi_dif) cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32) final_loss = tf.reduce_mean(loss) return final_loss, cstr_pct def contra_traj_lossV4(pred, tgt, horizon=12, resample=1, hard_ratio=1.0): horizon_pred = horizon_sumV1(pred, horizon) horizon_tgt = horizon_sumV1(tgt, horizon)
tensorflow.math.count_nonzero
990
import tensorflow as tf mag = tf.norm(self.s_diff, axis=1) * tf.norm(gcut, axis=1) + .0001 dcos = dot / mag manager_loss = -tf.reduce_sum((self.r - cutoff_vf_manager) * dcos) cutoff_vf_worker = tf.reshape(tf.stop_gradient(self.worker_vf), [-1]) log_p = tf.reduce_sum(self.log_pi * self.ac, [1]) worker_loss = (self.r + self.alpha * self.ri - cutoff_vf_worker) * log_p worker_loss = -tf.reduce_sum(worker_loss, axis=0)
tensorflow.stop_gradient
991
import tensorflow.contrib.eager as tfe import tensorflow as tf import tensorflow.contrib.eager as tfe from tensorflow.contrib.eager.python.examples.linear_regression import linear_regression def device(): return "/device:GPU:0" if tfe.num_gpus() > 0 else "/device:CPU:0" class LinearRegressionTest(tf.test.TestCase): def setUp(self): super(LinearRegressionTest, self).setUp() self._tmp_logdir = tempfile.mkdtemp()
tensorflow.contrib.eager.num_gpus
992
import tensorflow as tf var = tf.get_variable(name, shape=shape, dtype=dtype, initializer=initializer, trainable=trainable) # Add L2 regularization node for trainable var if trainable and not no_reg: l2_loss = tf.nn.l2_loss(var) tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, l2_loss) return var
tensorflow.nn.l2_loss
993
import tensorflow as tf z_ = lrelu(linear(tf.nn.dropout(z, keep_prob), nf3*s_h3*s_w3, 'd_h0_lin')) h0 = tf.nn.dropout(tf.reshape(z_, [-1, s_h3, s_w3, nf3]), keep_prob) import IPython IPython.embed() h1 = lrelu(deconv2d(tf.concat([h0, skip_h3], 3), [self.batch_size, s_h2, s_w2, nf2], name='d_h1', d_h=ns3, d_w=ns3)) h2 = lrelu(deconv2d(tf.concat([h1, skip_h2], 3), [self.batch_size, s_h1, s_w1, nf1], name='d_h2', d_h=ns2, d_w=ns2)) h3 = lrelu(deconv2d(tf.concat([h2, skip_h1], 3), [self.batch_size, s_h0, s_w0, nf0], name='d_h3', d_h=ns1, d_w=ns1)) print(h3.get_shape()) h4 = deconv2d(tf.concat([h3, skip_h0], 3), [self.batch_size, s_h, s_w, self.c_dim], name='d_h4', d_h=ns0, d_w=ns0) return h4 with tf.variable_scope("deconv") as scope: output_h4 = decode(trans_z, tgtctx_h3, tgtctx_h2, tgtctx_h1, tgtctx_h0) scope.reuse_variables() truthoutput_h4 = decode(tgtimg_z, tgtctx_h3, tgtctx_h2, tgtctx_h1, tgtctx_h0) self.simloss = tf.reduce_mean((trans_z - tgtimg_z) ** 2) * 1e3 print(tgtimg_z.get_shape()) self.out = output_h4
tensorflow.concat
994
import tensorflow.contrib.graph_editor as ge if not isinstance(xs,list): xs = [xs] bwd_ops = ge.get_backward_walk_ops([y.op for y in ys], inclusive=True) debug_print("bwd_ops: %s", bwd_ops) # forward ops are all ops that are candidates for recomputation fwd_ops = ge.get_forward_walk_ops([x.op for x in xs], inclusive=True, within_ops=bwd_ops) debug_print("fwd_ops: %s", fwd_ops) # exclude ops with no inputs fwd_ops = [op for op in fwd_ops if op.inputs] # don't recompute xs, remove variables
tensorflow.contrib.graph_editor.get_forward_walk_ops
995
import tensorflow as tf for n in range(1, y.shape[0], 1): x[n] = coeff * x[n - 1] + y[n] return x def read_and_decode(filename_queue, canvas_size, preemph=0.): reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example( serialized_example, features={ 'wav_raw': tf.FixedLenFeature([], tf.string), 'noisy_raw': tf.FixedLenFeature([], tf.string), }) wave = tf.decode_raw(features['wav_raw'], tf.int32) wave.set_shape(canvas_size) wave = (2./65535.) * tf.cast((wave - 32767), tf.float32) + 1. noisy = tf.decode_raw(features['noisy_raw'], tf.int32) noisy.set_shape(canvas_size) noisy = (2./65535.) * tf.cast((noisy - 32767), tf.float32) + 1. if preemph > 0: wave = tf.cast(pre_emph(wave, preemph), tf.float32) noisy = tf.cast(pre_emph(noisy, preemph), tf.float32) return wave, noisy
tensorflow.decode_raw
996
import tensorflow as tf if do_dnn: last_layer = rnn_output last_layer_size = rnn_output_size for i, layer_size in enumerate(dnn_sizes): layer_name = 'dnn_{}'.format(i) with tf.variable_scope(layer_name): dnn_w = tf.get_variable('W', shape=[last_layer_size, layer_size], initializer=dnn_init, dtype=dtype) dnn_b = tf.get_variable('b', shape=[layer_size], initializer=tf.constant_initializer(0.0), dtype=dtype) projected = tf.nn.bias_add(tf.matmul(last_layer, dnn_w), dnn_b) # TODO: argument nonlinearity, change bias to 0.1 if relu if dnn_nonlin == 'tanh': last_layer = tf.nn.tanh(projected)
tensorflow.get_variable
997
import tensorflow as tf _phase = tf.Variable(False, name='phase', trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES]) _phase_train = _phase.assign(True) _phase_infer = _phase.assign(False) # TODO: move to ops def _rank(x): return len(x.get_shape()) def _apply_dropout_mask(tensor_shape, keep_prob=1.0, normalize=True): random_tensor = keep_prob + tf.random_uniform(tensor_shape, dtype=tf.float32) binary_mask = tf.floor(random_tensor) if normalize: binary_mask = tf.reciprocal(keep_prob) * binary_mask return binary_mask def _global_keep_prob(keep_prob): keep_prob = tf.convert_to_tensor(keep_prob, dtype=tf.float32) keep_prob = tf.cond(_phase, lambda: keep_prob, lambda: keep_prob * 0.0 + 1.0) return keep_prob
tensorflow.random_uniform
998
import tensorflow as tf def add_train_stats(model, hparams): with tf.variable_scope("stats") as scope: for i in range(hparams.tacotron_num_gpus): tf.summary.histogram("mel_outputs %d" % i, model.tower_mel_outputs[i]) tf.summary.histogram("mel_targets %d" % i, model.tower_mel_targets[i]) tf.summary.scalar("before_loss", model.before_loss) tf.summary.scalar("after_loss", model.after_loss) if hparams.predict_linear: tf.summary.scalar("linear_loss", model.linear_loss) for i in range(hparams.tacotron_num_gpus): tf.summary.histogram("mel_outputs %d" % i, model.tower_linear_outputs[i]) tf.summary.histogram("mel_targets %d" % i, model.tower_linear_targets[i]) tf.summary.scalar("regularization_loss", model.regularization_loss) tf.summary.scalar("stop_token_loss", model.stop_token_loss) tf.summary.scalar("loss", model.loss) tf.summary.scalar("learning_rate", model.learning_rate) # Control learning rate decay speed if hparams.tacotron_teacher_forcing_mode == "scheduled": tf.summary.scalar("teacher_forcing_ratio", model.ratio) # Control teacher forcing # ratio decay when mode = "scheduled" gradient_norms = [tf.norm(grad) for grad in model.gradients] tf.summary.histogram("gradient_norm", gradient_norms) tf.summary.scalar("max_gradient_norm", tf.reduce_max(gradient_norms)) # visualize # gradients (in case of explosion) return tf.summary.merge_all() def add_eval_stats(summary_writer, step, linear_loss, before_loss, after_loss, stop_token_loss, loss):
tensorflow.summary.scalar
999