seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
flags.mark_flag_as_required("output_dir")
tf.app.run()
| tensorflow.app.run | 100 |
import tensorflow as tf
([pdf](https://arxiv.org/pdf/1701.05517.pdf))
log-likelihood for mixture of discretized logistics, assumes the data has been rescaled to [-1,1] interval
"""
ls = [-1] + l.get_shape().as_list()[1:]
xs = ls[:-1] + [3]
# unpack parameters
logit_probs = l[:, :, :, :nr_mix]
l = tf.reshape(l[:, :, :, nr_mix:], xs + [nr_mix * 3])
# sample mixture indicator from softmax
sel = tf.one_hot(tf.argmax(logit_probs - tf.log(-tf.log(tf.random_uniform(
tf.shape(logit_probs), minval=1e-5, maxval=1. - 1e-5))), 3), depth=nr_mix, dtype=tf.float32)
sel = tf.reshape(sel, xs[:-1] + [1, nr_mix])
# select logistic parameters
means = tf.reduce_sum(l[:, :, :, :, :nr_mix] * sel, 4)
log_scales = tf.maximum(tf.reduce_sum(
l[:, :, :, :, nr_mix:2 * nr_mix] * sel, 4), -7.)
coeffs = tf.reduce_sum(tf.nn.tanh(
l[:, :, :, :, 2 * nr_mix:3 * nr_mix]) * sel, 4)
# sample from logistic & clip to interval
# we don't actually round to the nearest 8bit value when sampling
u = tf.random_uniform(tf.shape(means), minval=1e-5, maxval=1. - 1e-5)
| tensorflow.shape | 101 |
import tensorflow as tf
hyper_b_final_l1 = tf.layers.dense(inputs=state, units=n_h_mixer, activation=tf.nn.relu,
use_bias=False, name='hyper_b_final_l1')
hyper_b_final = tf.layers.dense(inputs=hyper_b_final_l1, units=1, activation=None,
use_bias=False, name='hyper_b_final')
# First layer
w1 = tf.abs(tf.matmul(state, hyper_w_1))
b1 = tf.matmul(state, hyper_b_1)
w1_reshaped = tf.reshape(w1, [-1, n_agents, n_h_mixer]) # reshape into batch of matrices
b1_reshaped = tf.reshape(b1, [-1, 1, n_h_mixer])
# [batch, 1, n_h_mixer]
hidden = tf.nn.elu(tf.matmul(agent_qs_reshaped, w1_reshaped) + b1_reshaped)
# Second layer
w_final = tf.abs(tf.matmul(state, hyper_w_final))
w_final_reshaped = tf.reshape(w_final, [-1, n_h_mixer, 1]) # reshape into batch of matrices
b_final_reshaped = tf.reshape(hyper_b_final, [-1, 1, 1])
# [batch, 1, 1]
y = tf.matmul(hidden, w_final_reshaped) + b_final_reshaped
q_tot = tf.reshape(y, [-1, 1])
return q_tot
class QMix():
def __init__(self, env, num_s, num_a, lr=0.0001, gamma=0.99, replace_target_iter=5000,
| tensorflow.matmul | 102 |
import tensorflow as tf
# Axis 1 - Input Frames, 4 frames.
# Axis 2, 3 - Height & Width.
# Axis 4 - Channels RGB, 3 colours.
x = tf.transpose(observations, [0, 2, 3, 1, 4])
x_shape = common_layers.shape_list(x)
x = tf.reshape(x, x_shape[:-2] + [-1])
dropout = getattr(self.hparams, "dropout_ppo", 0.0)
with tf.variable_scope("feed_forward_cnn_small"):
x = tf.cast(x, tf.float32) / 255.0
x = tf.layers.conv2d(x, 32, (5, 5), strides=(2, 2),
activation=tf.nn.relu, padding="same")
x = tf.layers.conv2d(x, 32, (5, 5), strides=(2, 2),
activation=tf.nn.relu, padding="same")
flat_x = tf.layers.flatten(x)
if self.use_epochs:
epoch = features["epoch"] + tf.zeros([x_shape[0]], dtype=tf.int32)
# Randomly set epoch to 0 in some cases as that's the inference value.
| tensorflow.layers.conv2d | 103 |
import tensorflow as tf
self.entity_embedding_vars = tf.Variable(entity_init)
self.rel_embedding_vars = tf.Variable(rel_init)
# Embedding layer for each (head, rel, tail) triple being fed in as input
head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input)
tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input)
rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input)
| tensorflow.nn.embedding_lookup | 104 |
import tensorflow as tf
# Gradients and SGD update operation for training the model.
self.loss = self.exam_loss + self.hparams.ranker_loss_weight * self.rank_loss
# Select optimizer
self.optimizer_func = tf.train.AdagradOptimizer
if self.hparams.grad_strategy == 'sgd':
self.optimizer_func = tf.train.GradientDescentOptimizer
self.separate_gradient_update()
tf.summary.scalar('Gradient Norm', self.norm, collections=['train'])
tf.summary.scalar('Learning Rate', self.ranker_learning_rate, collections=['train'])
tf.summary.scalar('Final Loss', tf.reduce_mean(self.loss), collections=['train'])
clipped_labels = tf.clip_by_value(reshaped_train_labels, clip_value_min=0, clip_value_max=1)
pad_removed_train_output = self.remove_padding_for_metric_eval(self.docid_inputs, train_output)
for metric in self.exp_settings['metrics']:
for topn in self.exp_settings['metrics_topn']:
list_weights = tf.reduce_mean(self.propensity_weights * clipped_labels, axis=1, keep_dims=True)
metric_value = utils.make_ranking_metric_fn(metric, topn)(reshaped_train_labels, pad_removed_train_output, None)
tf.summary.scalar('%s_%d' % (metric, topn), metric_value, collections=['train'])
weighted_metric_value = utils.make_ranking_metric_fn(metric, topn)(reshaped_train_labels, pad_removed_train_output, list_weights)
tf.summary.scalar('Weighted_%s_%d' % (metric, topn), weighted_metric_value, collections=['train'])
self.train_summary = tf.summary.merge_all(key='train')
self.eval_summary = tf.summary.merge_all(key='eval')
self.saver = tf.train.Saver(tf.global_variables())
| tensorflow.clip_by_value | 105 |
from tensorflow.python.ops import variable_scope as vs
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, att_score):
return self.call(inputs, state, att_score)
def call(self, inputs, state, att_score=None):
"""Gated recurrent unit (GRU) with nunits cells."""
if self._gate_linear is None:
bias_ones = self._bias_initializer
if self._bias_initializer is None:
bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype)
with vs.variable_scope("gates"): # Reset gate and update gate.
self._gate_linear = _Linear(
[inputs, state],
2 * self._num_units,
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
value = math_ops.sigmoid(self._gate_linear([inputs, state]))
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
if self._candidate_linear is None:
with vs.variable_scope("candidate"):
self._candidate_linear = _Linear(
| tensorflow.python.ops.variable_scope.variable_scope | 106 |
import tensorflow as tf
"lstm_params",
initializer=tf.random_uniform(
[params_size_t], -config.init_scale, config.init_scale),
validate_shape=False)
c = tf.zeros([config.num_layers, self.batch_size, config.hidden_size],
tf.float32)
h = tf.zeros([config.num_layers, self.batch_size, config.hidden_size],
tf.float32)
self._initial_state = (tf.contrib.rnn.LSTMStateTuple(h=h, c=c),)
outputs, h, c = self._cell(inputs, h, c, self._rnn_params, is_training)
outputs = tf.transpose(outputs, [1, 0, 2])
outputs = tf.reshape(outputs, [-1, config.hidden_size])
return outputs, (tf.contrib.rnn.LSTMStateTuple(h=h, c=c),)
def _get_lstm_cell(self, config, is_training):
#if config.rnn_mode == BASIC:
| tensorflow.contrib.rnn.LSTMStateTuple | 107 |
from tensorflow.examples.tutorials.mnist import input_data
# distribute the computation of the loss and the gradient over the data
# that is represented by the remote object refs x_batches and y_batches and
# which is potentially distributed over a cluster. However, these details
# are hidden from scipy.optimize.fmin_l_bfgs_b, which simply uses it to run
# the L-BFGS algorithm.
# Load the mnist data and turn the data into remote objects.
print("Downloading the MNIST dataset. This may take a minute.")
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
num_batches = 10
batch_size = mnist.train.num_examples // num_batches
batches = [mnist.train.next_batch(batch_size) for _ in range(num_batches)]
print("Putting MNIST in the object store.")
actors = [NetActor.remote(xs, ys) for (xs, ys) in batches]
# Initialize the weights for the network to the vector of all zeros.
dim = ray.get(actors[0].get_flat_size.remote())
| tensorflow.examples.tutorials.mnist.input_data.read_data_sets | 108 |
from tensorflow.core.framework.summary_pb2 import Summary
def every_n_step_end(self, current_step, outputs):
current_time = time.time()
if self._last_reported_time is not None and self._summary_writer:
added_steps = current_step - self._last_reported_step
elapsed_time = current_time - self._last_reported_time
steps_per_sec = added_steps / elapsed_time
summary = Summary(value=[Summary.Value(tag=self._summary_tag,
simple_value=steps_per_sec)])
self._summary_writer.add_summary(summary, current_step)
self._last_reported_step = current_step
self._last_reported_time = current_time
| tensorflow.core.framework.summary_pb2.Summary.Value | 109 |
import tensorflow as tf
s1 = softmax_mask(tf.squeeze(s, [2]), mask)
a = tf.expand_dims(tf.nn.softmax(s1), axis=2)
res = tf.reduce_sum(a * memory, axis=1)
return res
| tensorflow.reduce_sum | 110 |
import tensorflow as tf
#h=[-1,n_ctx,emb]
for layer in range(n_layer):
h = block(h, 'h%d'%layer, train=train, scale=True)
#h=[-1,n_ctx,emb] lm_h [-1,emb]
lm_h = tf.reshape(h[:, :-1], [-1, n_embd])
lm_logits = tf.matmul(lm_h, we, transpose_b=True)
lm_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=lm_logits, labels=tf.reshape(X[:, 1:, 0], [-1]))
lm_losses = tf.reshape(lm_losses, [shape_list(X)[0], shape_list(X)[1]-1])
lm_losses = tf.reduce_sum(lm_losses*M[:, 1:], 1)/tf.reduce_sum(M[:, 1:], 1)
clf_h = tf.reshape(h, [-1, n_embd])
pool_idx = tf.cast(tf.argmax(tf.cast(tf.equal(X[:, :, 0], clf_token), tf.float32), 1), tf.int32)
clf_h = tf.gather(clf_h, tf.range(shape_list(X)[0], dtype=tf.int32)*n_ctx+pool_idx)
clf_h = tf.reshape(clf_h, [-1, 2, n_embd])
if train and clf_pdrop > 0:
shape = shape_list(clf_h)
shape[1] = 1
clf_h = tf.nn.dropout(clf_h, 1-clf_pdrop, shape)
clf_h = tf.reshape(clf_h, [-1, n_embd])
clf_logits = clf(clf_h, 1, train=train)
clf_logits = tf.reshape(clf_logits, [-1, 2])
clf_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=clf_logits, labels=Y)
return clf_logits, clf_losses, lm_losses
def mgpu_train(*xs):
gpu_ops = []
gpu_grads = []
| tensorflow.reshape | 111 |
import tensorflow as tf
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
| tensorflow.argmax | 112 |
import tensorflow as tf
labels=next_sentence_labels, predictions=next_sentence_predictions)
next_sentence_mean_loss = tf.metrics.mean(
| tensorflow.metrics.mean | 113 |
import tensorflow as tf
k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must
match with q.
v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must
match with q.
bias: bias Tensor (see attention_bias())
dropout_rate: a float.
Returns:
Tensor with shape [..., length_q, depth_v].
"""
logits = tf.matmul(q, k, transpose_b=True) # [..., length_q, length_kv]
logits = tf.multiply(logits, 1.0 / math.sqrt(float(get_shape_list(q)[-1])))
if bias is not None:
# `attention_mask` = [B, T]
from_shape = get_shape_list(q)
if len(from_shape) == 4:
broadcast_ones = tf.ones([from_shape[0], 1, from_shape[2], 1], tf.float32)
elif len(from_shape) == 5:
# from_shape = [B, N, Block_num, block_size, depth]#
broadcast_ones = tf.ones([from_shape[0], 1, from_shape[2], from_shape[3],
| tensorflow.matmul | 114 |
import tensorflow as tf
d_inp = dropout(inp, keep_prob=self.keep_prob,
is_train=self.is_train)
_, state = self.gru(d_inp, init)
tf.get_variable_scope().reuse_variables()
_, logits2 = pointer(d_match, state * self.dropout_mask, d, mask)
return logits1, logits2
| tensorflow.get_variable_scope | 115 |
import tensorflow as tf
return_dict["start_log_probs"] = start_log_probs
return_dict["end_log_probs"] = end_log_probs
else:
return_dict["start_top_log_probs"] = start_top_log_probs
return_dict["start_top_index"] = start_top_index
return_dict["end_top_log_probs"] = end_top_log_probs
return_dict["end_top_index"] = end_top_index
# an additional layer to predict answerability
with tf.variable_scope("answer_class"):
# get the representation of CLS
cls_index = tf.one_hot(cls_index, seq_len, axis=-1, dtype=tf.float32)
cls_feature = tf.einsum("lbh,bl->bh", output, cls_index)
# get the representation of START
start_p = tf.nn.softmax(start_logits_masked, axis=-1,
name="softmax_start")
start_feature = tf.einsum("lbh,bl->bh", output, start_p)
| tensorflow.variable_scope | 116 |
import tensorflow as tf
"""
if loss_weights == None:
return tf.reduce_mean(tf.stack(values, axis=0))
dec_lens = tf.reduce_sum(loss_weights, axis=1) # shape batch_size. float32
values_per_step = [v * loss_weights[:,dec_step] for dec_step,v in enumerate(values)]
values_per_ex = sum(values_per_step)/dec_lens # shape (batch_size); normalized value for each batch member
return tf.reduce_mean(values_per_ex) # overall average
| tensorflow.reduce_sum | 117 |
import tensorflow as tf
# Weighted sum
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
if return_alphas:
return output, scores
return output
def self_attention(facts, ATTENTION_SIZE, mask, stag='null'):
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
def cond(batch, output, i):
return tf.less(i, tf.shape(batch)[1])
def body(batch, output, i):
self_attention_tmp = din_fcn_attention(batch[:, i, :], batch[:, 0:i+1, :],
ATTENTION_SIZE, mask[:, 0:i+1], softmax_stag=1, stag=stag,
mode='LIST')
self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)
output = output.write(i, self_attention_tmp)
return batch, output, i + 1
output_ta = tf.TensorArray(dtype=tf.float32,
size=0,
| tensorflow.expand_dims | 118 |
import tensorflow as tf
tf.summary.scalar("regularization_loss", model.regularization_loss)
tf.summary.scalar("stop_token_loss", model.stop_token_loss)
tf.summary.scalar("loss", model.loss)
tf.summary.scalar("learning_rate", model.learning_rate) # Control learning rate decay speed
if hparams.tacotron_teacher_forcing_mode == "scheduled":
tf.summary.scalar("teacher_forcing_ratio", model.ratio) # Control teacher forcing
# ratio decay when mode = "scheduled"
gradient_norms = [tf.norm(grad) for grad in model.gradients]
tf.summary.histogram("gradient_norm", gradient_norms)
tf.summary.scalar("max_gradient_norm", tf.reduce_max(gradient_norms)) # visualize
| tensorflow.summary.scalar | 119 |
import tensorflow as tf
tf.zeros(cutout_shape, dtype=images.dtype),
padding_dims, constant_values=1)
patch = tf.ones_like(images, dtype=images.dtype) * replace,
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, num_channels])
images = tf.where(
tf.equal(mask, 0),
patch,
images)
images = tf.squeeze(images, axis=0)
return {'image': images, 'label': labels}
| tensorflow.squeeze | 120 |
import tensorflow as tf
observ = tf.check_numerics(observ, 'observ')
reward = tf.check_numerics(reward, 'reward')
| tensorflow.check_numerics | 121 |
from tensorflow.python.ops import variable_scope
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'recall_at_thresholds',
[predictions, labels]):
(true_positives, false_negatives, _, _, true_positives_compute_op,
false_negatives_compute_op, _, _,) = _tp_fn_tn_fp(
predictions, labels, thresholds, weights)
| tensorflow.python.ops.variable_scope.variable_scope | 122 |
import tensorflow as tf
batch_size = (params["batch_size"] if is_training else
params["eval_batch_size"])
num_users = params["num_users"]
num_items = params["num_items"]
users = tf.random_uniform([batch_size], dtype=tf.int32, minval=0,
maxval=num_users)
items = tf.random_uniform([batch_size], dtype=tf.int32, minval=0,
maxval=num_items)
if is_training:
valid_point_mask = tf.cast(tf.random_uniform(
[batch_size], dtype=tf.int32, minval=0, maxval=2), tf.bool)
labels = tf.cast(tf.random_uniform(
[batch_size], dtype=tf.int32, minval=0, maxval=2), tf.bool)
data = {
movielens.USER_COLUMN: users,
movielens.ITEM_COLUMN: items,
rconst.VALID_POINT_MASK: valid_point_mask,
}, labels
else:
dupe_mask = tf.cast(tf.random_uniform([batch_size], dtype=tf.int32,
minval=0, maxval=2), tf.bool)
data = {
movielens.USER_COLUMN: users,
movielens.ITEM_COLUMN: items,
| tensorflow.random_uniform | 123 |
import tensorflow as tf
"""Enable TensorFlow tracing and write trace to
this file.""")
tf.flags.DEFINE_string('graph_file', None,
"""Write the model's graph definition to this
file. Defaults to binary format unless filename ends
in 'txt'.""")
tf.flags.DEFINE_string('optimizer', 'sgd',
'Optimizer to use: momentum or sgd or rmsprop')
tf.flags.DEFINE_float('learning_rate', None,
"""Initial learning rate for training.""")
tf.flags.DEFINE_float('num_epochs_per_decay', 0,
"""Steps after which learning rate decays.""")
tf.flags.DEFINE_float('learning_rate_decay_factor', 0.94,
"""Learning rate decay factor.""")
tf.flags.DEFINE_float('momentum', 0.9, """Momentum for training.""")
tf.flags.DEFINE_float('rmsprop_decay', 0.9, """Decay term for RMSProp.""")
tf.flags.DEFINE_float('rmsprop_momentum', 0.9, """Momentum in RMSProp.""")
tf.flags.DEFINE_float('rmsprop_epsilon', 1.0, """Epsilon term for RMSProp.""")
tf.flags.DEFINE_float('gradient_clip', None, """Gradient clipping magnitude.
Disabled by default.""")
tf.flags.DEFINE_float('weight_decay', 0.00004,
"""Weight decay factor for training.""")
# Performance tuning flags.
| tensorflow.flags.DEFINE_float | 124 |
import tensorflow as tf
out[1].numpy())
def testReducer(self):
with tf.device(self._test_device):
batch_size = 3
size = 10
tracker_size = 8
reducer = spinn.Reducer(size, tracker_size=tracker_size)
left_in = []
right_in = []
tracking = []
for _ in range(batch_size):
left_in.append(tf.random_normal((1, size * 2)))
right_in.append(tf.random_normal((1, size * 2)))
tracking.append(tf.random_normal((1, tracker_size * 2)))
out = reducer(left_in, right_in, tracking=tracking)
self.assertEqual(batch_size, len(out))
self.assertEqual(tf.float32, out[0].dtype)
self.assertEqual((1, size * 2), out[0].shape)
def testReduceTreeLSTM(self):
with tf.device(self._test_device):
size = 10
tracker_size = 8
| tensorflow.random_normal | 125 |
import tensorflow as tf
# Activation
if softmax_stag:
scores = tf.nn.softmax(scores) # [B, 1, T]
| tensorflow.nn.softmax | 126 |
import tensorflow as tf
return x
def lnlstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0))
bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0))
bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0))
bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
| tensorflow.constant_initializer | 127 |
import tensorflow as tf
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not tf.gfile.Exists(work_directory):
tf.gfile.MakeDirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not tf.gfile.Exists(filepath):
temp_file_name, _ = urllib.request.urlretrieve(source_url)
tf.gfile.Copy(temp_file_name, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print("Successfully downloaded", filename, size, "bytes.")
return filepath
def load_dataset(data_dir, url, batch_size):
"""Loads the colors data at path into a PaddedDataset."""
# Downloads data at url into data_dir/basename(url). The dataset has a header
| tensorflow.gfile.Exists | 128 |
from tensorflow.python.framework import ops
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.op_scope([value, filter, output_shape], name,
"conv2d_transpose") as name:
value = ops.convert_to_tensor(value, name="value")
filter = ops.convert_to_tensor(filter, name="filter")
if not value.get_shape()[3].is_compatible_with(filter.get_shape()[3]):
raise ValueError(
"input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[3], filter.get_shape()[3]))
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):
raise ValueError("output_shape must have shape (4,), got {}"
.format(output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [4] if reached this point.
if not filter.get_shape()[2].is_compatible_with(output_shape[3]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[3], filter.get_shape()[2]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
| tensorflow.python.framework.ops.convert_to_tensor | 129 |
import tensorflow as tf
labels = tf.expand_dims(labels, 2)
weights = tf.expand_dims(weights, 2)
# Calculate weighted loss and other outputs. The log(2.0) term corrects for
# logloss not being an upper bound on the indicator function.
loss = weights * losses_utils.weighted_surrogate_loss(
labels,
logits + biases,
surrogate_type=surrogate_type,
positive_weights=1.0 + lambdas * (1.0 - precision_values),
negative_weights=lambdas * precision_values)
maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0
maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)
lambda_term = lambdas * (1.0 - precision_values) * label_priors * maybe_log2
per_anchor_loss = loss - lambda_term
per_label_loss = delta * tf.reduce_sum(per_anchor_loss, 2)
# Normalize the AUC such that a perfect score function will have AUC 1.0.
# Because precision_range is discretized into num_anchors + 1 intervals
# but only num_anchors terms are included in the Riemann sum, the
# effective length of the integration interval is `delta` less than the
# length of precision_range.
scaled_loss = tf.div(
| tensorflow.log | 130 |
import tensorflow as tf
q_shape = tf.shape(q)
def _strides_gt_one():
# Calculate output indices when strides > 1.
blk_indices_crop = tf.strided_slice(blk_indices, [0, 0, 0, 0], [
blk_shape[0], q_shape[1] * strides[1], q_shape[2] * strides[2], 3
], strides)
blk_indices_crop = blk_indices_crop // tf.stack([1, strides[1], strides[2]])
return blk_indices_crop
def _strides_one():
# Calculate otuput indices when strides = 1.
return blk_indices[:, :q_shape[1], :q_shape[2], :]
strides_gt_one = tf.logical_or(tf.greater(strides[1], 1), tf.greater(strides[2], 1))
blk_indices_crop = tf.cond(strides_gt_one, _strides_gt_one, _strides_one)
y = tf.scatter_nd(blk_indices_crop, q, out_shape)
return y
return tf.cond(
tf.equal(tf.size(blk_indices_), 0), lambda: tf.zeros(out_shape, dtype=x.dtype),
_conv_nonzero)
# returns an int64 start timer handle that should be passed to cuda_timer_end_op
def cuda_timer_start_op():
return sbnet_module.cuda_timer_start()
| tensorflow.greater | 131 |
import tensorflow as tf
def discriminator_categorical(x, reuse=False):
"""
Discriminator that is used to match the posterior distribution with a given categorical distribution.
:param x: tensor of shape [batch_size, n_labels]
:param reuse: True -> Reuse the discriminator variables,
False -> Create or search of variables before creating
:return: tensor of shape [batch_size, 1]
"""
if reuse:
tf.get_variable_scope().reuse_variables()
with tf.name_scope('Discriminator_Categorial'):
dc_den1 = tf.nn.relu(dense(x, n_labels, n_l1, name='dc_c_den1'))
dc_den2 = tf.nn.relu(dense(dc_den1, n_l1, n_l2, name='dc_c_den2'))
output = dense(dc_den2, n_l2, 1, name='dc_c_output')
return output
def next_batch(x, y, batch_size):
"""
Used to return a random batch from the given inputs.
:param x: Input images of shape [None, 784]
| tensorflow.name_scope | 132 |
import tensorflow as tf
else:
for checkpoint in sorted(steps_and_files.items()):
step, checkpoint_path = checkpoint
if global_step >= step:
if (best_perf_global_step != step and
len(_find_valid_cands(step)) > 1):
_remove_checkpoint(checkpoint_path)
continue
result = estimator.evaluate(
input_fn=eval_input_fn,
steps=eval_steps,
checkpoint_path=checkpoint_path)
global_step = result["global_step"]
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
writer.write("best = {}\n".format(best_perf))
if result[key_name] > best_perf:
best_perf = result[key_name]
best_perf_global_step = global_step
elif len(_find_valid_cands(global_step)) > 1:
_remove_checkpoint(checkpoint_path)
writer.write("=" * 50 + "\n")
writer.flush()
with tf.gfile.GFile(best_trial_info_file, "w") as best_info:
| tensorflow.logging.info | 133 |
import tensorflow as tf
random.seed(i)
def get_session():
tf.reset_default_graph()
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
| tensorflow.reset_default_graph | 134 |
import tensorflow as tf
is_keep = tf.equal(symbol, utils.KEEP_ID)
is_del = tf.equal(symbol, utils.DEL_ID)
is_not_ins = tf.logical_or(is_keep, is_del)
| tensorflow.equal | 135 |
from tensorflow.python.framework import ops
def assign_add(self, delta, use_locking=None, name=None, read_value=True):
del use_locking
with _handle_graph(self.handle), self._assign_dependencies():
assign_add_op = gen_resource_variable_ops.assign_add_variable_op(
self.handle,
ops.convert_to_tensor(delta, dtype=self.dtype),
name=name)
if read_value:
return self._read_variable_op()
return assign_add_op
| tensorflow.python.framework.ops.convert_to_tensor | 136 |
import tensorflow as tf
'num_readers', 16,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 48,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'num_cpu_threads', 0,
'The number of cpu cores used to train.')
tf.app.flags.DEFINE_float(
'gpu_memory_fraction', 1., 'GPU memory fraction to use.')
# scaffold related configuration
tf.app.flags.DEFINE_string(
'data_dir', '../PASCAL/VOC_TF/VOC0712TF/',
'The directory where the dataset input data is stored.')
tf.app.flags.DEFINE_string(
'dataset_name', 'pascalvoc_0712', 'The name of the dataset to load.')
| tensorflow.app.flags.DEFINE_float | 137 |
import tensorflow as tf
if not full_cov and full_output_cov:
fvar = tf.matrix_diag(fvar) # N x P x P
| tensorflow.matrix_diag | 138 |
import tensorflow as tf
placeholders = {
'batch': tf.placeholder(tf.int32, name='batch'),
'batch_neg': tf.placeholder(tf.int32, name='batch_neg'),
'batch_node':tf.placeholder(tf.int32,name = 'batch_node'),
'adj_min_batch': tf.placeholder(tf.float32,name='adj_min_batch'),
'sim_min_batch': tf.placeholder(tf.float32,name='sim_min_batch'),
'batch_edge_type_idx': tf.placeholder(tf.int32, shape=(), name='batch_edge_type_idx'),
'batch_row_edge_type': tf.placeholder(tf.int32, shape=(), name='batch_row_edge_type'),
| tensorflow.placeholder | 139 |
import tensorflow as tf
c = tf.zeros([config.num_layers, self.batch_size, config.hidden_size],
tf.float32)
h = tf.zeros([config.num_layers, self.batch_size, config.hidden_size],
tf.float32)
| tensorflow.zeros | 140 |
import tensorflow as tf
means = tf.concat([
tf.reshape(means[:, :, :, 0, :],
[inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix]), m2, m3
],
axis=3)
centered_inputs = inputs - means
inv_stdv = tf.exp(-log_scales)
plus_in = inv_stdv * (centered_inputs + 1. / 255.)
cdf_plus = tf.nn.sigmoid(plus_in)
min_in = inv_stdv * (centered_inputs - 1. / 255.)
cdf_min = tf.nn.sigmoid(min_in)
log_cdf_plus = plus_in - tf.nn.softplus(plus_in)
| tensorflow.exp | 141 |
from tensorflow.python.ops import control_flow_ops
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
mu_t = math_ops.cast(self._mu_t, var.dtype.base_dtype)
vstar = self.get_slot(var, "vstar")
gold = self.get_slot(var, "gold") # glod is not sparse
v_diff = state_ops.assign(vstar, mu_t * (var - vstar), use_locking=self._use_locking)
with ops.control_dependencies([v_diff]): # run v_diff operation before scatter_add
scaled_grad = scatter_add(vstar, indices, grad)
var_update = state_ops.assign_sub(var, lr_t * (scaled_grad + gold))
return control_flow_ops.group(*[var_update, ])
def _apply_sparse(self, grad, var): # sparse grad (only for the shakespeare model)
return self._apply_sparse_shared(
grad.values, var, grad.indices, lambda x, i, v: state_ops.scatter_add(x, i, v))
def set_params(self, cog, avg_gradient, client):
with client.model.graph.as_default():
all_vars = tf.trainable_variables()
for variable, value in zip(all_vars, cog):
vstar = self.get_slot(variable, "vstar")
vstar.load(value, client.model.sess)
| tensorflow.python.ops.control_flow_ops.group | 142 |
import tensorflow as tf
tf.expand_dims(candidate_starts, 0),
tf.expand_dims(candidate_ends, 0),
tf.expand_dims(k, 0),
util.shape(context_outputs, 0),
True) # [1, k]
top_span_indices.set_shape([1, None])
top_span_indices = tf.squeeze(top_span_indices, 0) # [k]
top_span_starts = tf.gather(candidate_starts, top_span_indices) # [k]
top_span_ends = tf.gather(candidate_ends, top_span_indices) # [k]
top_span_emb = tf.gather(candidate_span_emb, top_span_indices) # [k, emb]
top_span_cluster_ids = tf.gather(candidate_cluster_ids, top_span_indices) # [k]
top_span_mention_scores = tf.gather(candidate_mention_scores, top_span_indices) # [k]
top_span_sentence_indices = tf.gather(candidate_sentence_indices, top_span_indices) # [k]
top_span_speaker_ids = tf.gather(speaker_ids, top_span_starts) # [k]
c = tf.minimum(self.config["max_top_antecedents"], k)
if self.config["coarse_to_fine"]:
top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets = self.coarse_to_fine_pruning(top_span_emb, top_span_mention_scores, c)
else:
top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets = self.distance_pruning(top_span_emb, top_span_mention_scores, c)
dummy_scores = tf.zeros([k, 1]) # [k, 1]
for i in range(self.config["coref_depth"]):
with tf.variable_scope("coref_layer", reuse=(i > 0)):
| tensorflow.gather | 143 |
import tensorflow as tf
ops = []
with tf.control_dependencies([update_op]):
| tensorflow.control_dependencies | 144 |
import tensorflow as tf
out = tf.sqrt(subsamp_sum)
else:
out = tf.pow(subsamp_sum, 1/pnorm)
return out
def mpool(inpOp, kH, kW, dH, dW, padding, name):
with tf.variable_scope(name):
maxpool = tf.nn.max_pool(inpOp,
ksize=[1, kH, kW, 1],
strides=[1, dH, dW, 1],
padding=padding)
return maxpool
| tensorflow.variable_scope | 145 |
import tensorflow as tf
labels_shape = host_labels.get_shape()
gpu_copy_stage = data_flow_ops.StagingArea(
[tf.float32, tf.int32],
shapes=[images_shape, labels_shape])
gpu_copy_stage_op = gpu_copy_stage.put(
[host_images, host_labels])
gpu_copy_stage_ops.append(gpu_copy_stage_op)
host_images, host_labels = gpu_copy_stage.get()
with tf.device(self.raw_devices[device_num]):
if not use_synthetic_gpu_images:
gpu_compute_stage = data_flow_ops.StagingArea(
[tf.float32, tf.int32],
shapes=[images_shape, labels_shape]
)
# The CPU-to-GPU copy is triggered here.
gpu_compute_stage_op = gpu_compute_stage.put(
[host_images, host_labels])
| tensorflow.device | 146 |
import tensorflow as tf
tf.summary.histogram("d", self.end_points_D['D_on_data'])
tf.summary.histogram("d_", self.end_points_D['D_on_G'])
tf.summary.image("G", G)
d_label_smooth = self.cnf['d_label_smooth'] # 0.25
self.d_loss_real = self._sigmoid_kl_with_logits(self.end_points_D['D_on_data_logits'],
1. - d_label_smooth)
class_loss_weight = 1.
self.d_loss_class = class_loss_weight * tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.end_points_D['class_logits'], labels=tf.to_int64(targets))
self.test_loss = 1. - \
tf.reduce_mean(tf.to_float(tf.nn.in_top_k(
self.end_points_D_val['logits'], targets, 1)))
self.error_rate = 1. - \
tf.reduce_mean(tf.to_float(tf.nn.in_top_k(
self.end_points_D['class_logits'], targets, 1)))
if gpu_idx == 0:
update = tf.assign(num_error_rate, num_error_rate + 1.)
| tensorflow.to_int64 | 147 |
from tensorflow.python.platform import tf_logging as logging
scaffold=None):
"""Initialize CheckpointSaver monitor.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
Raises:
ValueError: If both `save_steps` and `save_secs` are not `None`.
ValueError: If both `save_steps` and `save_secs` are `None`.
"""
logging.info("Create CheckpointSaver.")
super(CheckpointSaver, self).__init__()
self._saver = saver
self._summary_writer = SummaryWriterCache.get(checkpoint_dir)
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._save_secs = save_secs
self._save_steps = save_steps
self._last_saved_time = None
self._last_begin_step = None
self._last_saved_step = None
if save_steps is None and save_secs is None:
raise ValueError("Either save_steps or save_secs should be provided")
if (save_steps is not None) and (save_secs is not None):
| tensorflow.python.platform.tf_logging.info | 148 |
import tensorflow as tf
shape = tuple(map(int, shape))
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = tf.random_uniform_initializer(
low, high, dtype=dtype, seed=seed)(shape)
return tf.Variable(value, dtype=dtype, name=name)
| tensorflow.random_uniform_initializer | 149 |
import tensorflow as tf
# initializer=tf.keras.initializers.lecun_normal(),
dtype=tf.float32)
self.position_emb = tf.reshape(self.position_emb, [-1, 2 * self.config.hidden_size])
shape = tf.shape(output)
output = tf.reshape(output, [-1, 2 * self.config.hidden_size])
atten_hidden = tf.tanh(
| tensorflow.shape | 150 |
import tensorflow as tf
# compute norms in case they need to be logged
self.gradient_norms = [tf.norm(g) + NUMTOL for (g, v) in clipped_grads_and_vars]
self.weight_norms = [tf.norm(v) + NUMTOL for (g, v) in clipped_grads_and_vars]
# check that gradients are finite
grads = [tf.check_numerics(g, "grads is not finite") for (g, v) in clipped_grads_and_vars]
variables = [tf.check_numerics(v, "grads is not finite") for (g, v) in clipped_grads_and_vars]
self.gradient_weight_global_norms = [tf.global_norm(grads), tf.global_norm(variables)]
# 2nd part of minimize: apply_gradient
optimizer_step = self._optimizer.apply_gradients(clipped_grads_and_vars, global_step=self.global_step)
| tensorflow.check_numerics | 151 |
import tensorflow as tf
self._train_op = optimizer.apply_gradients(
zip(grads, tvars),
global_step=tf.contrib.framework.get_or_create_global_step())
| tensorflow.contrib.framework.get_or_create_global_step | 152 |
from tensorflow.python.framework import ops
padding=padding,
data_format=data_format,
name=name)
ops.RegisterShape("Relu")(common_shapes.unchanged_shape)
ops.RegisterShape("Relu6")(common_shapes.unchanged_shape)
ops.RegisterShape("Elu")(common_shapes.unchanged_shape)
ops.RegisterShape("Softplus")(common_shapes.unchanged_shape)
ops.RegisterShape("Softsign")(common_shapes.unchanged_shape)
@ops.RegisterShape("ReluGrad")
@ops.RegisterShape("Relu6Grad")
@ops.RegisterShape("EluGrad")
@ops.RegisterShape("SoftplusGrad")
@ops.RegisterShape("SoftsignGrad")
def _BinaryElementwiseShape(op):
| tensorflow.python.framework.ops.RegisterShape | 153 |
import tensorflow as tf
# P_i = cost.li_regularizer(0.03)(self.train_params[0]) # + cost.li_regularizer(0.001)(self.train_params[2])
# L1 of activation outputs
activation_out = self.all_layers[-2]
L1_a = 0.001 * tf.reduce_mean(activation_out) # <haodong>: theano: T.mean( self.a[i] ) # some neuron are broken, white and black
# L1_a = 0.001 * tf.reduce_mean( tf.reduce_sum(activation_out, 0) ) # <haodong>: some neuron are broken, white and black
# L1_a = 0.001 * 100 * tf.reduce_mean( tf.reduce_sum(activation_out, 1) ) # <haodong>: some neuron are broken, white and black
# KL Divergence
beta = 4
rho = 0.15
p_hat = tf.reduce_mean(activation_out, 0) # theano: p_hat = T.mean( self.a[i], axis=0 )
try: # TF1.0
KLD = beta * tf.reduce_sum(rho * tf.log(tf.divide(rho, p_hat)) + (1 - rho) * tf.log((1 - rho) / (tf.subtract(float(1), p_hat))))
except Exception: # TF0.12
KLD = beta * tf.reduce_sum(rho * tf.log(tf.div(rho, p_hat)) + (1 - rho) * tf.log((1 - rho) / (tf.sub(float(1), p_hat))))
# KLD = beta * tf.reduce_sum( rho * tf.log(rho/ p_hat) + (1- rho) * tf.log((1- rho)/(1- p_hat)) )
# theano: L1_a = l1_a[i] * T.sum( rho[i] * T.log(rho[i]/ p_hat) + (1- rho[i]) * T.log((1- rho[i])/(1- p_hat)) )
# Total cost
if act == tf.nn.softplus:
logging.info(' use: mse, L2_w, L1_a')
self.cost = mse + L1_a + L2_w
elif act == tf.nn.sigmoid:
# ----------------------------------------------------
# Cross-entropy was used in Denoising AE
| tensorflow.divide | 154 |
import tensorflow as tf
total_loss = strategy.reduce("sum", loss)
total_loss = tf.identity(total_loss)
return total_loss, embedding_vector
return strategy.run(_step_fn, inputs, labels)
replica_batch_size = args.global_batch_size // args.gpu_num
dataset = utils.tf_dataset(*random_samples, batchsize=replica_batch_size,
to_sparse_tensor=True, repeat=1)
train_iterator = dataset.make_initializable_iterator()
iterator_init = train_iterator.initializer
inputs, labels = train_iterator.get_next()
graph_results = _train_step(inputs, labels, training=True)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
if "plugin" in args.optimizer:
init_op = tf.group(init_op, emb_opt.initializer)
save_op = list()
for i, embedding_layer in enumerate(sok_sparse_demo.embedding_layers):
control_inputs = [save_op[-1]] if save_op else None
with tf.control_dependencies(control_inputs):
if args.save_params:
filepath = r"./embedding_variables/"
utils.try_make_dirs(filepath)
op = sok_saver.dump_to_file(embedding_layer.embedding_variable, filepath)
else:
op = tf.constant(1.0)
| tensorflow.global_variables_initializer | 155 |
import tensorflow as tf
ValueError: if neither `loc` nor `covariance_matrix` are specified.
"""
parameters = dict(locals())
# Convert the covariance_matrix up to a scale_tril and call MVNTriL.
with tf.name_scope(name) as name:
with tf.name_scope("init", values=[loc, covariance_matrix]):
dtype = dtype_util.common_dtype([loc, covariance_matrix], tf.float32)
loc = loc if loc is None else tf.convert_to_tensor(
loc, name="loc", dtype=dtype)
| tensorflow.name_scope | 156 |
import tensorflow as tf
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
| tensorflow.logging.info | 157 |
from tensorflow.python.platform import gfile
self.assertFalse(gfile.Exists(s3))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3)))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
# Exercise the third helper.
| tensorflow.python.platform.gfile.Exists | 158 |
import tensorflow as tf
batch_size = tf.shape(a)[0]
return a + b, tf.tile([batch_size], [batch_size])
output0 = f(tf.constant([1]), tf.constant([2]))
output1 = f(tf.constant([2]), tf.constant([3]))
tp = pool.ThreadPool(2)
f0 = tp.apply_async(session.run, [output0])
f1 = tp.apply_async(session.run, [output1])
# Make sure both inputs are in the batcher before starting it.
time.sleep(_SLEEP_TIME)
tf.train.start_queue_runners()
result0, batch_size0 = f0.get()
result1, batch_size1 = f1.get()
self.assertAllEqual([3], result0)
self.assertAllEqual([2], batch_size0)
self.assertAllEqual([5], result1)
self.assertAllEqual([2], batch_size1)
def test_many_small(self):
with self.test_session() as session:
@dynamic_batching.batch_fn
| tensorflow.train.start_queue_runners | 159 |
import tensorflow as tf
logstd = tf.get_variable(
"logstd", mean.shape[2:], tf.float32, logstd_initializer)
logstd = tf.tile(
logstd[None, None],
[tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2))
with tf.variable_scope("value"):
x = flat_observations
for size in config.value_layers:
x = tf.layers.dense(x, size, activation=tf.nn.relu)
value = tf.layers.dense(x, 1)[..., 0]
| tensorflow.variable_scope | 160 |
import tensorflow as tf
mask_landm_b = tf.broadcast_to(mask_landm, tf.shape(landm_true))
loss_landm = _smooth_l1_loss(tf.boolean_mask(landm_true, mask_landm_b),
tf.boolean_mask(landm_pred, mask_landm_b))
loss_landm = tf.reduce_mean(loss_landm)
| tensorflow.boolean_mask | 161 |
import tensorflow as tf
self.reset_global_step = tf.assign(self.global_step, 0)
learning_rate = tf.train.exponential_decay(self.config["learning_rate"], self.global_step,
self.config["decay_frequency"], self.config["decay_rate"], staircase=True)
trainable_params = tf.trainable_variables()
gradients = tf.gradients(self.loss, trainable_params)
gradients, _ = tf.clip_by_global_norm(gradients, self.config["max_gradient_norm"])
optimizers = {
"adam" : tf.train.AdamOptimizer,
| tensorflow.gradients | 162 |
import tensorflow as tf
@registry.register_model
class NextFrameBasicStochastic(
basic_deterministic.NextFrameBasicDeterministic,
base_vae.NextFrameBaseVae):
"""Stochastic version of basic next-frame model."""
def inject_latent(self, layer, features, filters):
"""Inject a VAE-style latent."""
# Latent for stochastic model
input_frames = tf.to_float(features["inputs_raw"])
target_frames = tf.to_float(features["targets_raw"])
full_video = tf.concat([input_frames, target_frames], axis=1)
latent_mean, latent_std = self.construct_latent_tower(
full_video, time_axis=1)
latent = common_video.get_gaussian_tensor(latent_mean, latent_std)
latent = tf.layers.flatten(latent)
latent = tf.expand_dims(latent, axis=1)
latent = tf.expand_dims(latent, axis=1)
latent_mask = tf.layers.dense(latent, filters, name="latent_mask")
| tensorflow.to_float | 163 |
import tensorflow as tf
imgshape = image.get_shape().as_list()
print(imgshape)
self.output_height, self.output_width = imgshape[-3:-1]
self.batch_size = imgshape[1]
featsize = 1024
srcimg = image[0]
tgtimg = image[2]
tgtctx = image[1]
with tf.variable_scope("conv_context") as scope:
tgtctx_h0 = lrelu(conv2d(tgtctx, self.df_dim, name='h0_conv'))
tgtctx_h1 = lrelu(conv2d(tgtctx_h0, self.df_dim*2, name='h1_conv'))
tgtctx_h2 = lrelu(conv2d(tgtctx_h1, self.df_dim*4, name='h2_conv'))
tgtctx_h3 = lrelu(conv2d(tgtctx_h2, self.df_dim*8, name='h3_conv'))
tgtctx_h4 = lrelu(linear(tf.reshape(tgtctx_h3, [self.batch_size, -1]), featsize, 'h4_lin'))
tgtctx_z = linear(tgtctx_h4, featsize, 'hz_lin')
with tf.variable_scope("conv") as scope:
srcimg_h0 = lrelu(conv2d(srcimg, self.df_dim, name='h0_conv'))
srcimg_h1 = lrelu(conv2d(srcimg_h0, self.df_dim*2, name='h1_conv'))
srcimg_h2 = lrelu(conv2d(srcimg_h1, self.df_dim*4, name='h2_conv'))
srcimg_h3 = lrelu(conv2d(srcimg_h2, self.df_dim*8, name='h3_conv'))
print(srcimg_h3.get_shape())
srcimg_h4 = lrelu(linear(tf.reshape(srcimg_h3, [self.batch_size, -1]), featsize, 'h4_lin'))
srcimg_z = lrelu(linear(srcimg_h4, featsize, 'hz_lin'))
scope.reuse_variables()
| tensorflow.reshape | 164 |
import tensorflow as tf
if embeddings is not None:
flat_inputs = tf.reshape(encoder_inputs_, [tf.multiply(batch_size, time_steps)])
flat_inputs = tf.nn.embedding_lookup(embeddings, flat_inputs)
encoder_inputs_ = tf.reshape(flat_inputs,
tf.stack([batch_size, time_steps, flat_inputs.get_shape()[1].value]))
if pos_embeddings is not None:
pos_inputs_ = tf.range(time_steps, dtype=tf.int32)
pos_inputs_ = tf.nn.embedding_lookup(pos_embeddings, pos_inputs_)
pos_inputs_ = tf.tile(tf.expand_dims(pos_inputs_, axis=0), [batch_size, 1, 1])
encoder_inputs_ = tf.concat([encoder_inputs_, pos_inputs_], axis=2)
if other_inputs is not None:
encoder_inputs_ = tf.concat([encoder_inputs_, other_inputs], axis=2)
if encoder.use_dropout:
noise_shape = [1, time_steps, 1] if encoder.pervasive_dropout else [batch_size, time_steps, 1]
encoder_inputs_ = tf.nn.dropout(encoder_inputs_, keep_prob=encoder.word_keep_prob,
noise_shape=noise_shape)
| tensorflow.concat | 165 |
import tensorflow as tf
else:
if not FLAGS.restore:
tf.gfile.DeleteRecursively(FLAGS.checkpoint_path)
tf.gfile.MkDir(FLAGS.checkpoint_path)
input_images = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_images')
input_score_maps = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_score_maps')
if FLAGS.geometry == 'RBOX':
input_geo_maps = tf.placeholder(tf.float32, shape=[None, None, None, 5], name='input_geo_maps')
else:
input_geo_maps = tf.placeholder(tf.float32, shape=[None, None, None, 8], name='input_geo_maps')
input_training_masks = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_training_masks')
global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)
learning_rate = tf.train.exponential_decay(FLAGS.learning_rate, global_step, decay_steps=10000, decay_rate=0.94, staircase=True)
# add summary
tf.summary.scalar('learning_rate', learning_rate)
opt = tf.train.AdamOptimizer(learning_rate)
opt = MixedPrecisionOptimizer(opt, scale=FLAGS.loss_scale)
from npu_bridge.estimator.npu.npu_optimizer import NPUDistributedOptimizer
| tensorflow.placeholder | 166 |
import tensorflow as tf
nbatch, nin = [v.value for v in xs[0].get_shape()]
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
| tensorflow.constant_initializer | 167 |
import tensorflow as tf
new_encoder_input_length = []
for i, encoder in enumerate(encoders):
# create embeddings in the global scope (allows sharing between encoder and decoder)
weight_scale = encoder.embedding_weight_scale or encoder.weight_scale
if weight_scale is None:
initializer = None # FIXME
elif encoder.embedding_initializer == 'uniform' or (encoder.embedding_initializer is None
and encoder.initializer == 'uniform'):
initializer = tf.random_uniform_initializer(minval=-weight_scale, maxval=weight_scale)
else:
initializer = tf.random_normal_initializer(stddev=weight_scale)
with tf.device('/cpu:0'): # embeddings can take a very large amount of memory, so
# storing them in GPU memory can be impractical
if encoder.binary:
embeddings = None # inputs are token ids, which need to be mapped to vectors (embeddings)
else:
embedding_shape = [encoder.vocab_size, encoder.embedding_size]
embeddings = get_variable('embedding_{}'.format(encoder.name), shape=embedding_shape,
initializer=initializer)
if encoder.pos_embedding_size:
pos_embedding_shape = [encoder.max_len + 1, encoder.pos_embedding_size]
| tensorflow.random_normal_initializer | 168 |
import tensorflow as tf
# shape = ()
group_size = tf.minimum(
x=group_size, y=cur_batch_size, name="group_size"
)
print_obj("grouped_minibatch_stddev", "group_size", group_size)
# Split minibatch into M groups of size group_size, rank 5 tensor.
# shape = (
# group_size,
# cur_batch_size / group_size,
# image_size,
# image_size,
# num_channels
# )
grouped_image = tf.reshape(
tensor=X,
shape=[group_size, -1] + static_image_shape,
name="grouped_image"
)
print_obj(
"grouped_minibatch_stddev",
"grouped_image",
grouped_image
)
# Find the mean of each group.
# shape = (
# 1,
| tensorflow.reshape | 169 |
import tensorflow as tf
norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return norm_dist, params, state_init_a, state_final_a
def build_cnet(self, state_in, name, reuse=False, batch_size=64):
reg = tf.contrib.layers.l2_regularizer(1e-3)
with tf.variable_scope(name, reuse=reuse):
layer_c1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg)
layer_c2 = tf.layers.dense(layer_c1, 256, tf.nn.relu, kernel_regularizer=reg)
lstm_c = tf.nn.rnn_cell.LSTMCell(num_units=256)
lstm_c = tf.nn.rnn_cell.DropoutWrapper(lstm_c, output_keep_prob=self.keep_prob)
state_init_c = lstm_c.zero_state(batch_size=batch_size, dtype=tf.float32)
lstm_cin = tf.expand_dims(layer_c2, axis=1)
out_c, state_final_c = tf.nn.dynamic_rnn(cell=lstm_c, inputs=lstm_cin, initial_state=state_init_c)
cell_out_c = tf.reshape(out_c, [-1, 256])
vf = tf.layers.dense(cell_out_c, 1, kernel_regularizer=reg)
| tensorflow.layers.dense | 170 |
import tensorflow as tf
if not isinstance(self.ent_coef, float):
ent_coef_loss = -tf.reduce_mean(
self.log_ent_coef * tf.stop_gradient(logp_pi + self.target_entropy)*self.weight_ph)
entropy_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph)
# Compute the policy loss
# Alternative: policy_kl_loss = tf.reduce_mean(logp_pi - min_qf_pi)
policy_kl_loss = tf.reduce_mean((self.ent_coef * logp_pi - min_qf_pi)*self.weight_ph)
actor_for_priority = tf.reduce_mean(self.ent_coef * logp_pi - min_qf_pi,1)
# NOTE: in the original implementation, they have an additional
# regularization loss for the Gaussian parameters
# this is not used for now
# policy_loss = (policy_kl_loss + policy_regularization_loss)
min_q = tf.minimum(dtm_qf1,dtm_qf2)
Q_filter = tf.cast((qf1 > min_q)|(qf2 > min_q),tf.float32)
#Q_filter_1 = tf.cast(qf1 > min_q,tf.float32)
#Q_filter_2 = tf.cast(qf2 > min_q,tf.float32)
im_loss1 = tf.square(self.actions_ph - self.deterministic_actions_ph)*Q_filter*self.is_demo_ph
#im_loss2 = tf.square(self.actions_ph - self.deterministic_actions_ph)*Q_filter_2*self.is_demo_ph
#actor_loss_di1 = tf.reduce_mean(im_loss1)
#actor_loss_di2 = tf.reduce_mean(im_loss2)
self.actor_loss_di = tf.reduce_mean(im_loss1)
imitation_for_priority = tf.reduce_mean(im_loss1,axis=1)
regularizerpi = tf.contrib.layers.l1_l2_regularizer(scale_l1=0.0, scale_l2=1e-5, scope="model/pi")
all_trainable_weights_pi = tf.trainable_variables('model/pi')
| tensorflow.minimum | 171 |
import tensorflow as tf
# sigma=sigma_rpn=3, dim=[1, 2, 3]
def _smooth_l1_loss(self, bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights, sigma=1.0, dim=[1]):
sigma_2 = sigma ** 2
box_diff = bbox_pred - bbox_targets
in_box_diff = bbox_inside_weights * box_diff #属于前景的行不为0,其他的行都为0
abs_in_box_diff = tf.abs(in_box_diff)
# 决定哪些位置是权重是1(包括的本身为0的位置,即非前景),哪些位置权重为0
smoothL1_sign = tf.stop_gradient(tf.to_float(tf.less(abs_in_box_diff, 1. / sigma_2)))
# Smooth L1函数 (和论文有点不一样)
in_loss_box = tf.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign + (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)
out_loss_box = bbox_outside_weights * in_loss_box
loss_box = tf.reduce_mean(tf.reduce_sum(
out_loss_box,
axis=dim
))
return loss_box
def _add_losses(self, sigma_rpn=3.0):
with tf.variable_scope('loss_' + self._tag):
| tensorflow.pow | 172 |
import tensorflow as tf
def import_ops(self):
if self._is_training:
self._train_op = tf.get_collection_ref('train_op')[0]
self._lr = tf.get_collection_ref('lr')[0]
self._new_lr = tf.get_collection_ref('new_lr')[0]
self._lr_update = tf.get_collection_ref('lr_update')[0]
rnn_params = tf.get_collection_ref('rnn_params')
if self._cell and rnn_params:
params_saveable = tf.contrib.cudnn_rnn.RNNParamsSaveable(
| tensorflow.get_collection_ref | 173 |
import tensorflow as tf
# landm loss (smooth L1)
mask_landm_b = tf.broadcast_to(mask_landm, tf.shape(landm_true))
loss_landm = _smooth_l1_loss(tf.boolean_mask(landm_true, mask_landm_b),
tf.boolean_mask(landm_pred, mask_landm_b))
loss_landm = tf.reduce_mean(loss_landm)
# localization loss (smooth L1)
mask_pos_b = tf.broadcast_to(mask_pos, tf.shape(loc_true))
loss_loc = _smooth_l1_loss(tf.boolean_mask(loc_true, mask_pos_b),
tf.boolean_mask(loc_pred, mask_pos_b))
loss_loc = tf.reduce_mean(loss_loc)
# classification loss (crossentropy)
# 1. compute max conf across batch for hard negative mining
loss_class = tf.where(mask_neg,
1 - class_pred[:, 0][..., tf.newaxis], 0)
# 2. hard negative mining
loss_class = tf.reshape(loss_class, [num_batch, num_prior])
| tensorflow.boolean_mask | 174 |
import tensorflow as tf
cell = tf.contrib.rnn.BasicRNNCell(num_units=state_size)
rnn_outputs, final_state = tf.nn.dynamic_rnn(cell, rnn_inputs, initial_state=init_state)
'''预测,损失,优化'''
with tf.variable_scope('softmax'):
W = tf.get_variable('W', [state_size, num_classes])
b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(0.0))
'''因为rnn_outputs是三维的,这里需要将其转成2维的,
| tensorflow.variable_scope | 175 |
import tensorflow as tf
else:
if remainder is not None:
# Add reminder from the previous collapsed cores to the current
# core.
sliced_core = tf.einsum('ab,bid->aid', remainder, sliced_core)
remainder = None
new_tt_cores.append(sliced_core)
| tensorflow.einsum | 176 |
from tensorflow.python.framework import ops
output_types = [dtypes.DType(x.type) for x in sig.output_arg]
with ops.name_scope(name, func_name, inputs) as name:
| tensorflow.python.framework.ops.name_scope | 177 |
import tensorflow as tf
cell=lstm_cell,
inputs=embed_inputs,
dtype=tf.float32,
sequence_length=self.seq_len,
) ## (batch_size, seq_len, num_hidden)
# rnn_outputs = tf.transpose(rnn_outputs, perm=[1,0,2]) ## (seq_len, batch_size, num_hidden) NOT NEEDED ANY MORE
last_outputs = self.last_relevant(rnn_outputs, self.seq_len) ## (batch_size, num_hidden)
with tf.variable_scope('output', reuse=forward_only):
with tf.variable_scope('softmax'):
W = tf.get_variable('W', [self.num_hidden, self.num_classes],
# initializer=tf.random_uniform_initializer(-0.003, 0.003))
initializer=tf.contrib.layers.xavier_initializer())
# initializer=tf.truncated_normal_initializer(stddev=0.1))
b = tf.get_variable('b', [self.num_classes], initializer=tf.constant_initializer(0.1))
logits = tf.matmul(last_outputs, W) + b
self.embed_inputs = embed_inputs
return logits
| tensorflow.variable_scope | 178 |
import tensorflow as tf
w_z0_y1_x0 = tf.expand_dims(((x1_f - x) * (y - y0_f) *
(z1_f - z) * x1_valid * y0_valid * z1_valid),
1)
w_z0_y1_x1 = tf.expand_dims(((x - x0_f) * (y - y0_f) *
(z1_f - z) * x0_valid * y0_valid * z1_valid),
1)
w_z1_y0_x0 = tf.expand_dims(((x1_f - x) * (y1_f - y) *
(z - z0_f) * x1_valid * y1_valid * z0_valid),
1)
w_z1_y0_x1 = tf.expand_dims(((x - x0_f) * (y1_f - y) *
(z - z0_f) * x0_valid * y1_valid * z0_valid),
1)
w_z1_y1_x0 = tf.expand_dims(((x1_f - x) * (y - y0_f) *
(z - z0_f) * x1_valid * y0_valid * z0_valid),
1)
w_z1_y1_x1 = tf.expand_dims(((x - x0_f) * (y - y0_f) *
(z - z0_f) * x0_valid * y0_valid * z0_valid),
1)
output = tf.add_n([
w_z0_y0_x0 * i_z0_y0_x0, w_z0_y0_x1 * i_z0_y0_x1,
w_z0_y1_x0 * i_z0_y1_x0, w_z0_y1_x1 * i_z0_y1_x1,
w_z1_y0_x0 * i_z1_y0_x0, w_z1_y0_x1 * i_z1_y0_x1,
w_z1_y1_x0 * i_z1_y1_x0, w_z1_y1_x1 * i_z1_y1_x1
])
return output
def _meshgrid(depth, height, width, z_near, z_far):
with tf.variable_scope('_meshgrid'):
x_t = tf.reshape(
| tensorflow.expand_dims | 179 |
import tensorflow as tf
# Add a prefix to the node names in the current graph and Restore using
# remapped names.
with self.test_session() as sess:
v0 = tf.Variable(-1.0, name="restore_prefix/v0")
v1 = tf.Variable(-1.0, name="restore_prefix/v1")
with self.assertRaisesOpError("uninitialized value restore_prefix/v0"):
sess.run(v0)
with self.assertRaisesOpError("uninitialized value restore_prefix/v1"):
sess.run(v1)
# Restore the saved values in the parameter nodes.
save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
class LatestCheckpointWithRelativePaths(tf.test.TestCase):
@staticmethod
@contextlib.contextmanager
| tensorflow.train.Saver | 180 |
import tensorflow as tf
self.network = LSTMPolicy(env.observation_space.shape, env.action_space.n)
self.global_step = tf.get_variable("global_step", [], tf.int32,
initializer=tf.constant_initializer(0, dtype=tf.int32),
trainable=False)
with tf.device(worker_device):
with tf.variable_scope("local"):
self.local_network = pi = LSTMPolicy(env.observation_space.shape, env.action_space.n)
pi.global_step = self.global_step
self.ac = tf.placeholder(tf.float32, [None, env.action_space.n], name="ac")
self.adv = tf.placeholder(tf.float32, [None], name="adv")
self.r = tf.placeholder(tf.float32, [None], name="r")
log_prob_tf = tf.nn.log_softmax(pi.logits)
prob_tf = tf.nn.softmax(pi.logits)
# the "policy gradients" loss: its derivative is precisely the policy gradient
# notice that self.ac is a placeholder that is provided externally.
# adv will contain the advantages, as calculated in process_rollout
pi_loss = - tf.reduce_sum(tf.reduce_sum(log_prob_tf * self.ac, [1]) * self.adv)
# loss of value function
vf_loss = 0.5 * tf.reduce_sum(tf.square(pi.vf - self.r))
| tensorflow.placeholder | 181 |
import tensorflow as tf
def __exit__(self, exc_type, exc_val, exc_tb):
"""Leave session."""
if tf.get_default_session() is self.sess and self.sess_entered:
self.sess.__exit__(exc_type, exc_val, exc_tb)
| tensorflow.get_default_session | 182 |
import tensorflow as tf
for _ in xrange(1000):
g = tf.Graph()
with g.as_default():
c = tf.constant([1.], tf.float32)
_ = tf.py_func(lambda x: x + 1, [c], [tf.float32])
self.assertTrue(script_ops._py_funcs.size() < 100)
def testError(self):
| tensorflow.py_func | 183 |
import tensorflow as tf
def _transform(theta, input_dim, out_size, z_near, z_far):
with tf.variable_scope('_transform'):
num_batch = input_dim.get_shape().as_list()[0]
num_channels = input_dim.get_shape().as_list()[4]
theta = tf.reshape(theta, (-1, 4, 4))
theta = tf.cast(theta, 'float32')
out_depth = out_size[0]
out_height = out_size[1]
out_width = out_size[2]
grid = _meshgrid(out_depth, out_height, out_width, z_near, z_far)
grid = tf.expand_dims(grid, 0)
grid = tf.reshape(grid, [-1])
grid = tf.tile(grid, tf.stack([num_batch]))
grid = tf.reshape(grid, tf.stack([num_batch, 4, -1]))
# Transform A x (x_t', y_t', 1, d_t)^T -> (x_s, y_s, z_s, 1).
t_g = tf.matmul(theta, grid)
z_s = tf.slice(t_g, [0, 0, 0], [-1, 1, -1])
y_s = tf.slice(t_g, [0, 1, 0], [-1, 1, -1])
x_s = tf.slice(t_g, [0, 2, 0], [-1, 1, -1])
z_s_flat = tf.reshape(z_s, [-1])
y_s_flat = tf.reshape(y_s, [-1])
x_s_flat = tf.reshape(x_s, [-1])
input_transformed = _interpolate(input_dim, x_s_flat, y_s_flat, z_s_flat,
out_size)
| tensorflow.stack | 184 |
import tensorflow as tf
initializer=self._initializers[self.BETA])
else:
self._beta = None
if self._scale:
self._set_default_initializer(self.GAMMA)
self._gamma = tf.get_variable(
self.GAMMA,
shape=self._mean_shape,
initializer=self._initializers[self.GAMMA])
else:
self._gamma = None
out = tf.nn.batch_normalization(
input_batch,
mean,
variance,
self._beta,
self._gamma,
self._eps,
name="batch_norm")
return out
@property
def moving_mean(self):
| tensorflow.nn.batch_normalization | 185 |
import tensorflow as tf
correct = tf.equal(
tf.cast(tf.zeros_like(label_ids, dtype=tf.int32), tf.int32),
tf.cast(pred_label, tf.int32)
)
te_accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
except:
te_accuracy = tf.constant(0.0)
st_accuracy = tf.constant(0.0)
try:
st_accuracy = tf.reduce_mean(distillation_loss["src_f1_prob"])
te_accuracy = tf.reduce_mean(distillation_loss["tgt_f1_prob"])
except:
te_accuracy = tf.constant(0.0)
st_accuracy = tf.constant(0.0)
return {
"train":{
"loss":loss,
"logits":logits,
"train_op":train_op,
"cross_entropy":label_loss,
"distillation_loss":distillation_loss["distillation_loss"],
"kd_num":tf.reduce_sum(features["distillation_ratio"]),
"ce_num":tf.reduce_sum(features["label_ratio"]),
"teacher_logit":teacher_logit,
"student_logit":student_logit,
"label_ratio":features["label_ratio"],
| tensorflow.constant | 186 |
import tensorflow as tf
return loss, train_op, global_step, ul_u_updated
def build_eval_graph(x, y, ul_x, ul_u):
losses = {}
logit = vat.forward(x, is_training=False, update_batch_stats=False)
nll_loss = L.ce_loss(logit, y)
losses['NLL'] = nll_loss
acc = L.accuracy(logit, y)
losses['Acc'] = acc
scope = tf.get_variable_scope()
scope.reuse_variables()
# at_loss = vat.adversarial_loss(x, y, nll_loss, is_training=False)
# losses['AT_loss'] = at_loss
ul_logit = vat.forward(ul_x, is_training=False, update_batch_stats=False)
vat_loss = vat.virtual_adversarial_loss(ul_x, ul_u, ul_logit, is_training=False)
losses['VAT_loss'] = vat_loss
return losses
| tensorflow.get_variable_scope | 187 |
import tensorflow as tf
scatter_attn = tf.cond(
tf.equal(sl_head, 0),
lambda: tf.zeros([bs, sl+1, hn], tf.float32),
lambda: tf.scatter_nd(
tf.stack([range_head, head_org_idx], -1), attn_result, [bs, sl+1, hn])
)
range_unhead = tf.tile(tf.expand_dims(tf.range(bs), -1), [1, sl_unhead])
scatter_pooling = tf.cond(
tf.equal(sl_unhead, 0),
lambda: tf.zeros([bs, sl+1, hn], tf.float32),
lambda: tf.scatter_nd(
tf.stack([range_unhead, unhead_org_idx], -1), pooling_result, [bs, sl+1, hn])
)
self_attn_input = rep_map
context_features = tf.add(scatter_attn[:, :-1], scatter_pooling[:, :-1], 'context_features')
output_mask = rep_mask
else:
self_attn_input = rep_head_tensor
| tensorflow.zeros | 188 |
import tensorflow as tf
saver = tf.train.Saver(max_to_keep=None)
if self.is_summary:
training_batch_summary_op = tf.merge_all_summaries(key=TRAINING_BATCH_SUMMARIES)
training_epoch_summary_op = tf.merge_all_summaries(key=TRAINING_EPOCH_SUMMARIES)
validation_batch_summary_op = tf.merge_all_summaries(key=VALIDATION_BATCH_SUMMARIES)
validation_epoch_summary_op = tf.merge_all_summaries(key=VALIDATION_EPOCH_SUMMARIES)
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
| tensorflow.merge_all_summaries | 189 |
import tensorflow as tf
return features
def serving_input_fn():
label_ids = tf.placeholder(tf.int32, [None], name='label_ids')
input_ids = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='input_ids')
input_mask = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='input_mask')
segment_ids = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='segment_ids')
input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({
'label_ids': label_ids,
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids,
})()
| tensorflow.placeholder | 190 |
import tensorflow as tf
def _create_dummy_vars():
"""Dummy vars for restore to work when not using TPU codepath."""
var_names = set([v.name for v in tf.global_variables()])
if "losses_avg/problem_0/total_loss:0" in var_names:
| tensorflow.global_variables | 191 |
import tensorflow as tf
dtypes = {"feature": tf.int64, "src_lang": tf.int64, "trg_lang": tf.int64}
signatures = {"feature": tf.TensorShape([None, None]),
"src_lang": tf.TensorShape([None, ]),
"trg_lang": tf.TensorShape([None, ])}
if mode == compat.ModeKeys.INFER:
return dtypes, signatures
dtypes["label"] = tf.int64
signatures["label"] = tf.TensorShape([None, None])
return dtypes, signatures
def build_model(self, args, name=None):
""" Builds and return a keras model. """
model = build_model(args, self._multilingual_dp.meta,
self._multilingual_dp.meta, name=name)
| tensorflow.TensorShape | 192 |
import tensorflow as tf
normalizer_fn=tf.contrib.layers.batch_norm,
normalizer_params={"is_training": self.train}):
self.fc1 = tf.contrib.layers.fully_connected(self.flatten, self.config.cifar10_cnn["fc1_nb_units"])
self.fc2 = tf.contrib.layers.fully_connected(self.fc1, self.config.data["num_categories"], activation_fn=None)
# Compute loss
with tf.name_scope("loss"):
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.fc2, labels=self.y))
# Optimizer
with tf.name_scope("training_op"):
self.training_op = tf.compat.v1.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
# Perf metrics
| tensorflow.nn.softmax_cross_entropy_with_logits | 193 |
import tensorflow as tf
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
wrong = tf.to_float(tf.logical_not(tf.nn.in_top_k(logits, label, 1)), name='incorrect_vector')
summary.add_moving_summary(tf.reduce_mean(wrong, name='train_error'))
wd_cost = tf.multiply(1e-5, regularize_cost('fc.*/W', tf.nn.l2_loss),
| tensorflow.nn.in_top_k | 194 |
import tensorflow as tf
# Only drop path during training
keep_prob = tf.constant(1, dtype=tf.float32)
if is_train:
# Decrease keep prob deeper into network
keep_prob = 1 - layers_ratio * (1 - drop_path_keep_prob)
# Decrease keep prob with increasing steps
steps_per_epoch = math.ceil(N / batch_size)
steps_ratio = tf.minimum(((step + 1) / steps_per_epoch) / drop_path_decay_epochs, 1)
keep_prob = 1 - steps_ratio * (1 - keep_prob)
keep_prob = tf.cast(keep_prob, tf.float32)
# Monitor last layer's keep prob
if layers_ratio == 1:
self._mark_for_monitoring('drop_path_keep_prob', keep_prob)
return keep_prob
def _get_learning_rate(self, step, **knobs):
N = self._train_params['N']
batch_size = knobs['batch_size']
| tensorflow.cast | 195 |
import tensorflow as tf
sequence_length = 5
vocab_size = 100
embedding_dim = 32
word_ids = np.random.randint(0, vocab_size, batch_size * sequence_length).reshape(batch_size, sequence_length)
tensor = tf.constant(word_ids)
# print(word_ids >> identity_layer() >> embedding_layer(vocab_size, embedding_dim))
print(tensor >> identity_layer() >> embedding_layer(vocab_size, embedding_dim))
| tensorflow.constant | 196 |
import tensorflow as tf
return tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.GraphKeys.GLOBAL_VARIABLES,
scope=scope if isinstance(scope, str) else scope.name
)
def scope_name():
"""
Returns the name of current scope as a string, e.g. deepq/q_func
:return: (str) the name of current scope
"""
return tf.get_variable_scope().name
def absolute_scope_name(relative_scope_name):
"""
Appends parent scope name to `relative_scope_name`
:return: (str) the absolute name of the scope
"""
return scope_name() + "/" + relative_scope_name
| tensorflow.get_variable_scope | 197 |
import tensorflow as tf
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5,5],
padding='same',
use_bias=False,
name='conv2'
)
conv2_bn = tf.nn.relu(tf.layers.batch_normalization(conv2, training=train))
pool2 = tf.layers.max_pooling2d(
inputs=conv2_bn,
pool_size=[2, 2],
strides=2,
name='pool2'
)
| tensorflow.layers.batch_normalization | 198 |
import tensorflow as tf
self.episode_count = tf.get_variable(
name='episode-count',
dtype=util.tf_dtype('int'),
initializer=0,
trainable=False
)
def tf_store(self, states, internals, actions, terminal, reward):
# Memory indices to overwrite.
num_instances = tf.shape(input=terminal)[0]
with tf.control_dependencies([tf.assert_less_equal(num_instances, self.capacity)]):
indices = tf.range(self.memory_index, self.memory_index + num_instances) % self.capacity
# Remove episode indices.
num_episodes = tf.count_nonzero(
input_tensor=tf.gather(params=self.terminal_memory, indices=indices),
axis=0,
dtype=util.tf_dtype('int')
)
num_episodes = tf.minimum(x=num_episodes, y=self.episode_count)
assignment = tf.assign(
ref=self.episode_indices[:self.episode_count - num_episodes],
value=self.episode_indices[num_episodes: self.episode_count]
)
# Decrement episode count.
with tf.control_dependencies(control_inputs=(assignment,)):
assignment = tf.assign_sub(ref=self.episode_count, value=num_episodes)
# Assign new observations.
| tensorflow.gather | 199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.