seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
action_templates_vocabulary_length = len(data.idx2word_action_template)
action_templates_embedding_size = 8
num_actions_arguments = data.batch_actions_arguments.shape[2]
actions_arguments_vocabulary_length = len(data.idx2word_action_arguments)
with tf.name_scope('data'):
batch_histories = tf.Variable(data.batch_histories, name='histories',
trainable=False)
batch_actions_template = tf.Variable(data.batch_actions_template, name='actions',
trainable=False)
batch_action_arguments = tf.Variable(data.batch_actions_arguments, name='actions_arguments',
trainable=False)
histories = tf.gather(batch_histories, self.batch_idx)
| tensorflow.Variable | 1,900 |
import tensorflow as tf
self.__restore_model(is_train=False)
nb_iters = int(np.ceil(float(FLAGS.nb_smpls_eval) / FLAGS.batch_size_eval))
eval_rslts = np.zeros((nb_iters, len(self.eval_op)))
for idx_iter in range(nb_iters):
eval_rslts[idx_iter] = self.sess_eval.run(self.eval_op)
for idx, name in enumerate(self.eval_op_names):
tf.logging.info('%s = %.4e' % (name, np.mean(eval_rslts[:, idx])))
def __build_train(self): # pylint: disable=too-many-locals
"""Build the training graph."""
with tf.Graph().as_default():
# create a TF session for the current graph
config = tf.ConfigProto()
if FLAGS.enbl_multi_gpu:
config.gpu_options.visible_device_list = str(mgw.local_rank()) # pylint: disable=no-member
else:
config.gpu_options.visible_device_list = '0' # pylint: disable=no-member
sess = tf.Session(config=config)
# data input pipeline
with tf.variable_scope(self.data_scope):
| tensorflow.Graph | 1,901 |
import tensorflow as tf
if __name__ == "__main__":
tf.test.main()
| tensorflow.test.main | 1,902 |
import tensorflow as tf
message='Strides > 1 not supported.')
# Convolution when number of indices is larger than zero.
def _conv_nonzero():
# Gather patches.
p = tf.gather_nd(x_, blk_indices_)
p_ = tf.reshape(p, [-1, ksize[0] * ksize[1] * ksize[2]])
# Convolution on patches.
w_ = tf.reshape(w, [ksize[0] * ksize[1] * ksize[2], -1])
q = tf.matmul(p_, w_)
# Center locations.
blk_indices_crop = blk_indices[:, 0, 0, :]
# Project back to an image.
y = tf.scatter_nd(blk_indices_crop, q, out_shape)
return y
| tensorflow.reshape | 1,903 |
from tensorflow.python.ops import math_ops
name, 'false_negatives', [predictions, labels]):
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
is_false_negative = math_ops.logical_and(math_ops.equal(labels, 1),
math_ops.equal(predictions, 0))
return _count_condition(is_false_negative, weights, metrics_collections,
updates_collections)
| tensorflow.python.ops.math_ops.equal | 1,904 |
from tensorflow.python.framework import ops
def after_create_session(self, session, _):
assert self._init_op.graph == ops.get_default_graph()
| tensorflow.python.framework.ops.get_default_graph | 1,905 |
import tensorflow as tf
w_c = variable_scope.get_variable("w_c", [options.attention_vec_size])
w_c = tf.expand_dims(tf.expand_dims(w_c, axis=0), axis=0)
word_t_representation = self.embedding_lookup(word_t)
(state_t, context_t, coverage_t, attn_dist_t, p_gen_t, output_t) = self.one_step_decoder(
state_t_1, context_t_1, coverage_t_1, word_t_representation, encoder_states, encoder_features,
passage_word_idx, passage_mask, v, w_c, word_vocab)
vocab_scores = tf.log(output_t)
greedy_prediction = tf.reshape(tf.argmax(output_t, 1),[-1]) # calcualte greedy
multinomial_prediction = tf.reshape(tf.multinomial(vocab_scores, 1),[-1]) # calculate multinomial
topk_log_probs, topk_ids = tf.nn.top_k(vocab_scores, beam_size) # calculate topK
return (state_t, context_t, coverage_t, attn_dist_t, p_gen_t, output_t, topk_log_probs, topk_ids,
greedy_prediction, multinomial_prediction)
def merge_prob_dist_for_one_step(self, vocab_dist, attn_dist, p_gen, passage_word_idx, passage_mask=None):
'''
max_phrase_size: an input placehoder indications the maximum phrase size inside this batch
vocab_dist: [batch_size, vsize]
| tensorflow.nn.top_k | 1,906 |
import tensorflow as tf
# with tf.control_dependencies([p]):
# avg_loss = 1. * avg_loss
# print(avg_loss)
# exit()
return avg_loss
def compute_contra_loss(pred1, pred2, tgt1, tgt2, hard_ratio=1.0):
geq = tf.cast((tgt1 - tgt2) > 0, tf.bool)
tgt_larg = tf.where(geq, tgt1, tgt2)
tgt_small = tf.where(geq, tgt2, tgt1)
pred_larg = tf.where(geq, pred1, pred2)
pred_small = tf.where(geq, pred2, pred1)
loss = tf.maximum(0., (tgt_larg - tgt_small) - (pred_larg - pred_small))
if hard_ratio < 1.0:
hard_num = tf.cast(tools.shape(pred1)[0] * hard_ratio, tf.int32)
loss = tf.reshape(loss, [-1])
hard_loss, _ = tf.math.top_k(loss, k=hard_num)
return hard_loss
return loss
| tensorflow.where | 1,907 |
import tensorflow as tf
if isinstance(shape_a[0], int) and isinstance(shape_b[0], int):
if shape_a[0] != shape_b[0]:
raise ValueError('Unequal first dimension {}, {}'.format(
shape_a[0], shape_b[0]))
else: return tf.no_op()
else:
return tf.assert_equal(shape_a[0], shape_b[0])
| tensorflow.no_op | 1,908 |
from tensorflow.contrib.distributions.python.ops import distribution_util
return math_ops.igammac(self.alpha, self.beta / x)
@distribution_util.AppendDocstring(
"""This is defined to be
| tensorflow.contrib.distributions.python.ops.distribution_util.AppendDocstring | 1,909 |
import tensorflow as tf
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
| tensorflow.train.Scaffold | 1,910 |
import tensorflow as tf
padding=padding, kernel_initializer=init, name='conv' + id,
use_bias=use_bias, dilation_rate=(dilation, dilation))
def z_conv(self, id, input, channels, size, stride=1, padding="SAME", use_bias=False, dilation=1):
# zero mean conv
if type(size) == int: size = [size, size]
in_ch = input.get_shape().as_list()[-1]
# init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32)
init = tf.truncated_normal_initializer(mean=0.0, stddev=0.02)
filters = tf.get_variable('zero_conv_weights' + id, initializer=init, shape=[size[0], size[1], in_ch, channels])
filters = filters - tf.reduce_mean(filters, axis=[0, 1, 2], keepdims=True)
if padding == "PARTIAL":
with tf.variable_scope('mask'):
_, h, w, _ = input.get_shape().as_list()
slide_window = size[0] * size[1]
mask = tf.ones(shape=[1, h, w, 1])
update_mask = tf.layers.conv2d(mask, filters=1, name='mask' + id,
kernel_size=size, kernel_initializer=tf.constant_initializer(1.0),
| tensorflow.reduce_mean | 1,911 |
import tensorflow as tf
x = tf.placeholder_with_default(input=[1], shape=None)
is_scalar = normal._is_scalar_helper(x.shape, lambda: tf.shape(x))
self.assertFalse(self.evaluate(is_scalar))
def _GetFakeDistribution(self):
class FakeDistribution(tfd.Distribution):
"""Fake Distribution for testing _set_sample_static_shape."""
def __init__(self, batch_shape=None, event_shape=None):
self._static_batch_shape = tf.TensorShape(batch_shape)
self._static_event_shape = tf.TensorShape(event_shape)
super(FakeDistribution, self).__init__(
dtype=tf.float32,
reparameterization_type=tfd.NOT_REPARAMETERIZED,
validate_args=True,
allow_nan_stats=True,
name="DummyDistribution")
| tensorflow.TensorShape | 1,912 |
import tensorflow as tf
loss_mask = tf.sequence_mask(tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0]))
losses = losses * tf.transpose(tf.to_float(loss_mask), [1, 0])
return losses
def dice_loss(predictions, targets, weights=1., name='dice_loss'):
with tf.name_scope(name):
# predictions = tf.to_float(predictions)
targets = tf.to_float(targets)
intersection = 2 * tf.reduce_sum(predictions * targets) + weights
union = weights + tf.reduce_sum(predictions) + tf.reduce_sum(targets)
loss = -(intersection / (union))
return loss
def precision_recall_auc_loss(labels,
logits,
| tensorflow.to_float | 1,913 |
import tensorflow as tf
start = datetime.datetime.now()
session.run(output)
duration = datetime.datetime.now() - start
# There should have been a timeout here because only one sample was added
# and the minimum batch size is 2.
self.assertLessEqual(.9, duration.total_seconds())
self.assertGreaterEqual(1.5, duration.total_seconds())
outputs = [
f(tf.constant([[1, 3]]), tf.constant([2])),
f(tf.constant([[1, 3]]), tf.constant([2]))
]
start = datetime.datetime.now()
(_, batch_size), _ = session.run(outputs)
duration = datetime.datetime.now() - start
# The outputs should be executed immediately because two samples are
# added.
| tensorflow.constant | 1,914 |
import tensorflow as tf
return (result,losses)
"""
Test RNN graph multi layer
"""
def test_rnn_kstep_layer(test_data_x,test_data_y, preds, rnn_outputs, g, checkpoint, input_prob, output_prob, state_prob, num_test, kstep = 3):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
result= {}
"read the trained graph"
g['saver'].restore(sess, checkpoint)
losses = []
for step_num in range(kstep):
| tensorflow.Session | 1,915 |
import tensorflow as tf
opt = tf.train.AdamOptimizer(self.LR)
self.train_op = opt.minimize(loss, global_step=self.global_step, var_list=pi_params + vf_params)
self.pi_new_params = [oldp.assign(p) for p, oldp in zip(pi_params, pi_old_params)]
self.vf_new_params = [oldp.assign(p) for p, oldp in zip(vf_params, vf_old_params)]
self.sess.run(tf.global_variables_initializer())
# Tensorboard
if summary_dir is not None:
self.writer = tf.summary.FileWriter(summary_dir)
tf.summary.scalar('Loss/Policy', loss_pg)
tf.summary.scalar('Loss/Value', loss_vf)
tf.summary.scalar('Loss/Entropy', loss_entropy)
tf.summary.scalar('Loss/Total', loss)
tf.summary.scalar('Var/Epsilon', epsilon_decay)
tf.summary.scalar('Var/Policy Mode', tf.reduce_mean(pi.mode()))
tf.summary.scalar('Var/Policy Sigma', tf.reduce_mean(pi.stddev()))
tf.summary.scalar('Var/Value', tf.reduce_mean(self.vf))
self.summarise = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES))
# AC net
def build_anet(self, state_in, name, reuse=False, batch_size=64):
| tensorflow.summary.scalar | 1,916 |
import tensorflow as tf
if all_losses:
sum_loss = tf.add_n(all_losses)
# Add the summaries out of the clone device block.
if clone_loss is not None:
tf.summary.scalar('clone_loss', clone_loss)
# tf.summary.scalar(clone.scope + '/clone_loss', clone_loss)
if regularization_loss is not None:
tf.summary.scalar('regularization_loss', regularization_loss)
| tensorflow.summary.scalar | 1,917 |
import tensorflow as tf
problem = algorithmic.TinyAlgo()
dataset1 = problem.dataset(mode=tf.estimator.ModeKeys.TRAIN,
data_dir=algorithmic.TinyAlgo.data_dir,
shuffle_files=False, preprocess=False)
dataset2 = problem.dataset(mode=tf.estimator.ModeKeys.TRAIN,
data_dir=algorithmic.TinyAlgo.data_dir,
shuffle_files=False, preprocess=True)
tensor1 = dataset1.make_one_shot_iterator().get_next()["targets"]
tensor2 = dataset2.make_one_shot_iterator().get_next()["targets"]
with tf.Session() as sess:
self.assertTrue(assert_tensors_equal(sess, tensor1, tensor2, 20))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testProblemHparamsModality(self):
problem = problem_hparams.TestProblem(input_vocab_size=2,
target_vocab_size=3)
p_hparams = problem.get_hparams()
self.assertIsInstance(p_hparams.modality["inputs"],
modalities.SymbolModality)
self.assertIsInstance(p_hparams.modality["targets"],
| tensorflow.Session | 1,918 |
import tensorflow as tf
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.test_session(graph=tf.Graph()) as sess:
v0_2 = tf.Variable(1000.0, name="v0")
v1_2 = tf.Variable(2000.0, name="v1")
save2 = tf.train.Saver([v0_2, v1_2])
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(1000.0, v0_2.eval())
self.assertEqual(2000.0, v1_2.eval())
# Restore the values saved earlier in the parameter nodes.
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0_2.eval())
self.assertEqual(20.0, v1_2.eval())
| tensorflow.initialize_all_variables | 1,919 |
import tensorflow as tf
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
# Mask
# key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]
key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(scores) * (-2 ** 32 + 1)
if not forCnn:
scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
# Scale
| tensorflow.shape | 1,920 |
import tensorflow as tf
x = x*g + b
return x
def norm(x, scope, axis=[-1]):
with tf.variable_scope(scope):
n_state = shape_list(x)[-1]
g = tf.get_variable("g", [n_state], initializer=tf.constant_initializer(1))
b = tf.get_variable("b", [n_state], initializer=tf.constant_initializer(0))
return _norm(x, g, b, axis=axis)
def dropout(x, pdrop, train):
if train and pdrop > 0:
x = tf.nn.dropout(x, 1-pdrop)
return x
| tensorflow.constant_initializer | 1,921 |
import tensorflow as tf
train_op=train_op,
scaffold=scaffold_fn
)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
# predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
# ones=tf.get_variable('ones',shape=logits.shape,initializer=tf.ones_initializer)
# zeros=tf.get_variable('zeros',shape=logits.shape,initializer=tf.zeros_initializer)
predictions=tf.where(logits>=0,tf.ones(tf.shape(logits)),tf.zeros(tf.shape(logits)))
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
| tensorflow.shape | 1,922 |
from tensorflow.contrib.eager.python.examples.revnet import revnet
class RevNetTest(tf.test.TestCase):
def setUp(self):
super(RevNetTest, self).setUp()
config = config_.get_hparams_cifar_38()
config.add_hparam("n_classes", 10)
config.add_hparam("dataset", "cifar-10")
# Reconstruction could cause numerical error, use double precision for tests
config.dtype = tf.float64
config.fused = False # Fused batch norm does not support tf.float64
# Reduce the batch size for tests because the OSS version runs
# in constrained GPU environment with 1-2GB of memory.
config.batch_size = 2
shape = (config.batch_size,) + config.input_shape
self.model = revnet.RevNet(config=config)
self.x = tf.random_normal(shape=shape, dtype=tf.float64)
self.t = tf.random_uniform(
shape=[config.batch_size],
minval=0,
maxval=config.n_classes,
dtype=tf.int64)
self.config = config
def tearDown(self):
del self.model
del self.x
del self.t
del self.config
super(RevNetTest, self).tearDown()
| tensorflow.contrib.eager.python.examples.revnet.revnet.RevNet | 1,923 |
import tensorflow as tf
def rl_label_weights(name=None):
"""Returns the weight for importance."""
with tf.variable_scope(name, 'rl_op_selection'):
num_classes = FLAGS.src_num_classes
num_choices = FLAGS.num_choices
| tensorflow.variable_scope | 1,924 |
import tensorflow as tf
self.rewards_ph = tf.placeholder(tf.float32, shape=(None, 1), name='rewards')
self.is_demo_ph = tf.placeholder(tf.float32, shape=(None, 1), name='is_demonstrations')
| tensorflow.placeholder | 1,925 |
import tensorflow as tf
q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope="q_func", reuse=True)
q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1)
q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), 1)
else:
| tensorflow.one_hot | 1,926 |
import tensorflow as tf
"float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE")
# x is shaped [batch_size,time_steps,num_inputs]
if is_dynamic_rnn:
lstm_input = tf.transpose(x, perm=[1, 0, 2])
outputs, _ = tf.lite.experimental.nn.dynamic_rnn(
lstm_layer, lstm_input, dtype="float32")
outputs = tf.unstack(outputs, axis=0)
else:
lstm_input = tf.unstack(x, self.time_steps, 1)
outputs, _ = tf.nn.static_rnn(lstm_layer, lstm_input, dtype="float32")
# Compute logits by multiplying outputs[-1] of shape [batch_size,num_units]
# by the softmax layer's out_weight of shape [num_units,n_classes]
# plus out_bias
prediction = tf.matmul(outputs[-1], out_weights) + out_bias
output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")
return x, prediction, output_class
| tensorflow.nn.static_rnn | 1,927 |
import tensorflow as tf
def _testMultiSaverCollectionRestore(self):
test_dir = os.path.join(self.get_temp_dir(), "saver_collection")
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=tf.Graph()) as sess:
# Imports from meta_graph.
tf.train.import_meta_graph(filename)
# Retrieves SAVERS collection. Verifies there are 2 entries.
savers = tf.get_collection("savers")
self.assertEqual(2, len(savers))
# Retrieves saver0. Verifies that new_saver0 can restore v0, but not v1.
new_saver0 = savers[0]
new_saver0.restore(sess, saver0_ckpt)
| tensorflow.train.import_meta_graph | 1,928 |
import tensorflow as tf
return ids
def _create_regularizers_hook(self, config):
wb_regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
# see keras_utils.py: activity_and_contractive_regularizers
ac_regularizers = tf.get_collection(AC_REGULARIZATION)
custom_regularizers = tf.get_collection(CUSTOM_REGULARIZATION)
if wb_regularizers:
wb_regularizers_names = [r.name for r in wb_regularizers]
else:
wb_regularizers = [tf.zeros([1])]
| tensorflow.get_collection | 1,929 |
import tensorflow as tf
with tf.name_scope('tower_averaging'):
all_grads = {}
for grad, var in itertools.chain(*tower_gradvars):
if grad is not None:
all_grads.setdefault(var, []).append(grad)
for var, grads in all_grads.items():
if len(grads) == 1:
avg_grad = grads[0]
else:
avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))
gradvars.append((avg_grad, var))
self.loss = tf.reduce_mean(tower_losses)
tf.summary.scalar('loss', self.loss)
# Create optimizer ops
self.global_step = tf.Variable(0, trainable=False, name='global_step')
opt = tf.train.RMSPropOptimizer(self.config['learning_rate'])
with tf.control_dependencies(update_ops):
self.trainer = opt.apply_gradients(
gradvars, global_step=self.global_step)
def _eval_graph(self, data):
| tensorflow.reduce_mean | 1,930 |
import tensorflow as tf
ious = iou_of(tf.expand_dims(gt_boxes, axis=0), tf.expand_dims(corner_form_priors, axis=1))
# size: num_priors
best_target_per_prior = tf.math.reduce_max(ious, axis=1)
best_target_per_prior_index = tf.math.argmax(ious, axis=1)
# size: num_targets
best_prior_per_target = tf.math.reduce_max(ious, axis=0)
best_prior_per_target_index = tf.math.argmax(ious, axis=0)
targets = tf.range(tf.shape(best_prior_per_target_index)[0], dtype='int64')
best_target_per_prior_index = tf.tensor_scatter_nd_update(best_target_per_prior_index, tf.expand_dims(best_prior_per_target_index, 1), targets)
# 2.0 is used to make sure every target has a prior assigned
best_target_per_prior = tf.tensor_scatter_nd_update(best_target_per_prior, tf.expand_dims(best_prior_per_target_index, 1), tf.ones_like(best_prior_per_target_index, dtype=tf.float32)*2.0)
| tensorflow.math.argmax | 1,931 |
import tensorflow as tf
tf.train.start_queue_runners(sess)
sess.run(train_op)
def testLanguageModelGraph(self):
train_op, _, _ = graphs.VatxtModel().language_model_training()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess)
sess.run(train_op)
def testMulticlass(self):
FLAGS.num_classes = 10
| tensorflow.global_variables_initializer | 1,932 |
import tensorflow as tf
self.global_step = tf.train.get_or_create_global_step()
self.saver = tf.train.Saver()
| tensorflow.train.Saver | 1,933 |
import tensorflow as tf
return train_op
def _preprocess(self, images, classes, is_train=False, **knobs):
batch_size = knobs['batch_size']
cutout_size = knobs['cutout_size']
image_norm_mean = self._train_params['norm_mean']
image_norm_std = self._train_params['norm_std']
w = self._train_params['image_size']
h = self._train_params['image_size']
in_ch = 3 # Num channels of input images
def _prepare(images, classes):
# Bulk preprocessing of images
images = tf.cast(images, tf.float32)
images = (images - image_norm_mean) / image_norm_std # Normalize
images = images / 255 # Convert to [0, 1]
return (images, classes)
# Prepare train dataset
def _preprocess_train(image, clazz):
# Do random crop + horizontal flip for each train image
image = tf.pad(image, [[4, 4], [4, 4], [0, 0]])
image = tf.image.random_crop(image, (w, h, in_ch))
image = tf.image.random_flip_left_right(image)
if cutout_size > 0:
image = self._do_cutout(image, w, h, cutout_size)
| tensorflow.cast | 1,934 |
from tensorflow.contrib.learn.python.learn.estimators import test_data
def benchmarkCustomOptimizer(self):
iris = test_data.prepare_iris_data_for_logistic_regression()
| tensorflow.contrib.learn.python.learn.estimators.test_data.prepare_iris_data_for_logistic_regression | 1,935 |
import tensorflow as tf
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
| tensorflow.flags.DEFINE_string | 1,936 |
import tensorflow as tf
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],
[pad_beg, pad_end], [pad_beg, pad_end]])
else:
| tensorflow.pad | 1,937 |
import tensorflow as tf
crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name="crops")
return slim.max_pool2d(crops, [2, 2], padding='SAME')
def _dropout_layer(self, bottom, name, ratio=0.5):
return tf.nn.dropout(bottom, ratio, name=name)
def _anchor_target_layer(self, rpn_cls_score, name):
with tf.variable_scope(name):
# 这里的index是对于所有anchor而言
# (1, 1, A * height, width)
# (1, height, width, A * 4)
# (1, height, width, A * 4)
# (1, height, width, A * 4)
rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = tf.py_func(
anchor_target_layer,
| tensorflow.variable_scope | 1,938 |
import tensorflow as tf
for ext in [".png", ".jpg"]:
sibling_path = os.path.join(a.b_dir, basename + ext)
if tf.io.gfile.exists(sibling_path):
sibling = im.load(sibling_path)
| tensorflow.io.gfile.exists | 1,939 |
import tensorflow as tf
is_training,
num_cpu_threads=4):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
name_to_features = {
"input_ids":
tf.FixedLenFeature([max_seq_length], tf.int64),
"input_mask":
tf.FixedLenFeature([max_seq_length], tf.int64),
"segment_ids":
tf.FixedLenFeature([max_seq_length], tf.int64),
"masked_lm_positions":
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_ids":
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_weights":
tf.FixedLenFeature([max_predictions_per_seq], tf.float32),
"next_sentence_labels":
tf.FixedLenFeature([1], tf.int64),
}
| tensorflow.FixedLenFeature | 1,940 |
import tensorflow as tf
def block(x, scope, train=False, scale=False):
with tf.variable_scope(scope):
#nx = emb_size
nx = shape_list(x)[-1]
#a [-1,n_ctx,emb]
a = attn(x, 'attn', nx, n_head, train=train, scale=scale)
n = norm(x+a, 'ln_1')
m = mlp(n, 'mlp', nx*4, train=train)
h = norm(n+m, 'ln_2')
return h
def embed(X, we):
#X [-1,,2]
we = convert_gradient_to_tensor(we)
e = tf.gather(we, X)
h = tf.reduce_sum(e, 2)
return h
def clf(x, ny, w_init=tf.random_normal_initializer(stddev=0.02), b_init=tf.constant_initializer(0), train=False):
with tf.variable_scope('clf'):
nx = shape_list(x)[-1]
w = tf.get_variable("w", [nx, ny], initializer=w_init)
b = tf.get_variable("b", [ny], initializer=b_init)
return tf.matmul(x, w)+b
def model(X, M, Y, train=False, reuse=False):
with tf.variable_scope('model', reuse=reuse):
we = tf.get_variable("we", [n_vocab+n_special+n_ctx, n_embd], initializer=tf.random_normal_initializer(stddev=0.02))
we = dropout(we, embd_pdrop, train)
| tensorflow.reduce_sum | 1,941 |
import tensorflow as tf
'neg_threshold', 0.4, 'Matching threshold for the negtive examples in the loss function.')
# optimizer related configuration
tf.app.flags.DEFINE_float(
'weight_decay', 0.0005, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.')
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.00005,
'The minimal end learning rate used by a polynomial decay learning rate.')
# for learning rate exponential_decay
tf.app.flags.DEFINE_float(
'learning_rate_decay_factor', 0.96, 'Learning rate decay factor.')
| tensorflow.app.flags.DEFINE_float | 1,942 |
import tensorflow as tf
with tf.name_scope("Train"):
train_input = PTBInput(config=config, data=train_data, name="TrainInput")
with tf.variable_scope("Model", reuse=None, initializer=initializer):
m = PTBModel(is_training=True, config=config, input_=train_input)
tf.summary.scalar("Training Loss", m.cost)
tf.summary.scalar("Learning Rate", m.lr)
with tf.name_scope("Valid"):
| tensorflow.summary.scalar | 1,943 |
import tensorflow as tf
total_loss = tf.reduce_mean(per_example_loss)
return total_loss, per_example_loss, logits
def get_qa_outputs(FLAGS, features, is_training):
"""Loss for downstream span-extraction QA tasks such as SQuAD."""
inp = tf.transpose(features["input_ids"], [1, 0])
seg_id = tf.transpose(features["segment_ids"], [1, 0])
inp_mask = tf.transpose(features["input_mask"], [1, 0])
cls_index = tf.reshape(features["cls_index"], [-1])
seq_len = tf.shape(inp)[0]
xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path)
run_config = xlnet.create_run_config(is_training, True, FLAGS)
xlnet_model = xlnet.XLNetModel(
xlnet_config=xlnet_config,
run_config=run_config,
input_ids=inp,
seg_ids=seg_id,
| tensorflow.reshape | 1,944 |
import tensorflow as tf
"softmax_w", [size, vocab_size], dtype=data_type())
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type())
logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)
# Reshape logits to be a 3-D tensor for sequence loss
| tensorflow.nn.xw_plus_b | 1,945 |
from tensorflow.core.util.event_pb2 import SessionLog
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
self._last_saved_time = time.time()
self._last_saved_step = step
if self._saver is None:
self._scaffold.saver.save(session, self._save_path, global_step=step)
else:
self._saver.save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
class StepCounter(EveryN):
"""Steps per second monitor."""
| tensorflow.core.util.event_pb2.SessionLog | 1,946 |
import tensorflow as tf
import os
import numpy as np
import csv
import string
import requests
import io
from zipfile import ZipFile
from tensorflow.contrib import learn
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Start a graph session
sess = tf.Session()
# Check if data was downloaded, otherwise download it and save for future use
save_file_name = os.path.join('temp','temp_spam_data.csv')
# Create directory if it doesn't exist
if not os.path.exists('temp'):
os.makedirs('temp')
if os.path.isfile(save_file_name):
text_data = []
with open(save_file_name, 'r') as temp_output_file:
| tensorflow.Session | 1,947 |
import tensorflow as tf
def merge_prob_dist_for_one_step(self, vocab_dist, attn_dist, p_gen, passage_word_idx, passage_mask=None):
'''
max_phrase_size: an input placehoder indications the maximum phrase size inside this batch
vocab_dist: [batch_size, vsize]
attn_dist: [batch_size, passage_length]
p_gen: [batch_size, 1]
passage_word_idx: [batch_size, passage_length]
passage_mask: [batch_size, passage_length]
'''
input_shape = tf.shape(vocab_dist)
batch_size = input_shape[0]
vsize = input_shape[1]
passage_length = tf.shape(passage_word_idx)[1]
with tf.variable_scope('final_distribution'):
vocab_dist = p_gen * vocab_dist
attn_dist = (1.0-p_gen) * attn_dist
# Concatenate some zeros to each vocabulary dist, to hold the probabilities for phrases
| tensorflow.shape | 1,948 |
import tensorflow as tf
@gin.configurable(module='trax.data', denylist=['dataset', 'training'])
def bair_robot_pushing_preprocess(dataset, training):
"""Pre-processing function that concatenates input and target frames."""
del training
def concat_and_add_mask(features, targets):
"""Concatenate input and output frames to form a language modeling setup."""
inp = features['inputs']
concat = tf.concat([inp, targets], axis=0)
mask = tf.concat([tf.zeros_like(inp), tf.ones_like(targets)], axis=0)
concat = tf.reshape(concat, (-1,))
mask = tf.reshape(mask, (-1,))
concat = tf.cast(concat, tf.int32)
mask = tf.cast(mask, tf.float32)
features['inputs'] = features['targets'] = concat
features['mask'] = mask
return features, concat
dataset = dataset.map(concat_and_add_mask)
return dataset
def sentencepiece_tokenize(stream, spm_path=None, extra_ids=0):
"""Sentencepiece tokenization."""
spm_path = spm_path or t5_data().DEFAULT_SPM_PATH
vocab_file = os.path.basename(spm_path)
| tensorflow.cast | 1,949 |
import tensorflow as tf
aux = deconv2d(activation(aux), [dim[0], dim[1], [in_shp[1] // aux_shp[1], in_shp[2] // aux_shp[2]]],
scope="%s_conv_upsample_aux" % scope, training=training, ema=ema, init=init)
else:
aux = nin(activation(aux), dim[0], training=training, ema=ema, init=init, scope="%s_conv_aux" % scope)
out += aux
out = activation(out)
if dropout > 0:
out = tf.layers.dropout(out, rate=dropout, training=training)
out = conv(out, [2*dim[0], dim[1], dim[2]], scope="%s_conv_out"%scope, training=training, ema=ema, init=init)
h_stack1, h_stack2 = tf.split(out, 2, 3)
sigmoid_out = tf.sigmoid(h_stack2)
out = (h_stack1 * sigmoid_out)
out_shp = out.get_shape().as_list()
if out_shp[1:-1] < in_shp[1:-1]:
x = tf.nn.avg_pool(x, [1, dim[2][0], dim[2][1], 1], strides=[1, dim[2][0], dim[2][1], 1], padding='SAME')
| tensorflow.layers.dropout | 1,950 |
import tensorflow as tf
def __init__(self, player_name):
self.player_name = player_name
with tf.device(tfe.get_config().get_player(player_name).device_name):
self._initialize_weights()
def _initialize_weights(self):
with tf.name_scope('parameters'):
self.w0 = tf.Variable(tf.random_normal([28 * 28, 512]))
self.b0 = tf.Variable(tf.zeros([512]))
self.w1 = tf.Variable(tf.random_normal([512, 10]))
self.b1 = tf.Variable(tf.zeros([10]))
def _build_model(self, x, y):
w0 = self.w0.read_value()
b0 = self.b0.read_value()
w1 = self.w1.read_value()
b1 = self.b1.read_value()
| tensorflow.zeros | 1,951 |
import tensorflow as tf
del state_update_tensors # Unused.
return {
self.ITERATION_STATE_KEY:
state[self.ITERATION_STATE_KEY] + tf.constant(1, dtype=tf.int32)
}
| tensorflow.constant | 1,952 |
import tensorflow as tf
:param reuse: True -> Reuse the discriminator variables,
False -> Create or search of variables before creating
:return: tensor of shape [batch_size, 1]
"""
if reuse:
tf.get_variable_scope().reuse_variables()
with tf.name_scope('Discriminator_Categorial'):
dc_den1 = tf.nn.relu(dense(x, n_labels, n_l1, name='dc_c_den1'))
dc_den2 = tf.nn.relu(dense(dc_den1, n_l1, n_l2, name='dc_c_den2'))
output = dense(dc_den2, n_l2, 1, name='dc_c_output')
| tensorflow.get_variable_scope | 1,953 |
import tensorflow as tf
'A flag to override the data format used in the model. channels_first '
'provides a performance boost on GPU but is not always compatible '
'with CPU. If left unspecified, the data format will be chosen '
'automatically based on whether TensorFlow was built for CPU or GPU.')
# optimizer related configuration
tf.app.flags.DEFINE_integer(
'tf_random_seed', 20180417, 'Random seed for TensorFlow initializers.')
tf.app.flags.DEFINE_float(
'weight_decay', 1e-5, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_float(
| tensorflow.app.flags.DEFINE_integer | 1,954 |
import tensorflow as tf
total_epochs = 30
iteration = 14089 // 1
# 128 * 14089 ~ 1,803,460
test_iteration = 10
def center_loss(features, label, alfa, nrof_classes):
"""Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
(http://ydwen.github.io/papers/WenECCV16.pdf)
"""
nrof_features = features.get_shape()[1]
centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
label = tf.reshape(label, [-1])
centers_batch = tf.gather(centers, label)
diff = (1 - alfa) * (centers_batch - features)
centers = tf.scatter_sub(centers, label, diff)
# centers = tf.nn.l2_normalize(centers, 1, 1e-10, name='centers_norm')
loss = tf.reduce_mean(tf.square(features - centers_batch))
return loss, centers
def focal_loss(onehot_labels, cls_preds,
alpha=0.25, gamma=2.0, name=None, scope=None):
"""Compute softmax focal loss between logits and onehot labels
| tensorflow.constant_initializer | 1,955 |
import tensorflow as tf
))
assignments.append(tf.scatter_update(ref=self.terminal_memory, indices=indices, updates=terminal))
assignments.append(tf.scatter_update(ref=self.reward_memory, indices=indices, updates=reward))
# Add episode indices.
with tf.control_dependencies(control_inputs=assignments):
num_episodes = tf.count_nonzero(input_tensor=terminal, axis=0, dtype=util.tf_dtype('int'))
assignment = tf.assign(
ref=self.episode_indices[self.episode_count: self.episode_count + num_episodes],
value=tf.boolean_mask(tensor=indices, mask=terminal)
)
# Increment episode count.
with tf.control_dependencies(control_inputs=(assignment,)):
assignment = tf.assign_add(ref=self.episode_count, value=num_episodes)
# Increment memory index.
with tf.control_dependencies(control_inputs=(assignment,)):
assignment = tf.assign(
ref=self.episode_indices[-1],
value=tf.where(self.memory_index + num_instances > self.capacity,
self.episode_indices[self.episode_count - 1], self.capacity - 1)
)
with tf.control_dependencies(control_inputs=(assignment,)):
assignment = tf.assign(ref=self.memory_index, value=((self.memory_index + num_instances) % self.capacity))
with tf.control_dependencies(control_inputs=(assignment,)):
| tensorflow.assign_add | 1,956 |
import tensorflow as tf
if self.is_training:
curr_vs = tf.get_variable_scope()
| tensorflow.get_variable_scope | 1,957 |
import tensorflow as tf
# see keras_utils.py: activity_and_contractive_regularizers
ac_regularizers = tf.get_collection(AC_REGULARIZATION)
custom_regularizers = tf.get_collection(CUSTOM_REGULARIZATION)
| tensorflow.get_collection | 1,958 |
import tensorflow as tf
weights_ = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos_, axis=1)), depth=attn_length))
weighted_average_ = tf.reduce_sum(tf.expand_dims(weights_, axis=2) * hidden_states, axis=1)
weighted_average.append(weighted_average_)
weighted_average = tf.concat(weighted_average, axis=1)
weighted_average = dense(weighted_average, encoder.attn_size)
elif pos is not None:
weights = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos, axis=1)), depth=attn_length))
weighted_average = tf.reduce_sum(tf.expand_dims(weights, axis=2) * hidden_states, axis=1)
else:
# Local attention of Luong et al. (http://arxiv.org/abs/1508.04025)
wp = get_variable('Wp', [state_size, state_size])
vp = get_variable('vp', [state_size, 1])
pos = tf.nn.sigmoid(tf.matmul(tf.nn.tanh(tf.matmul(state, wp)), vp))
pos = tf.floor(encoder_input_length * pos)
pos = tf.reshape(pos, [-1, 1])
pos = tf.minimum(pos, encoder_input_length - 1)
idx = tf.tile(tf.to_float(tf.range(attn_length)), tf.stack([batch_size]))
idx = tf.reshape(idx, [-1, attn_length])
low = pos - encoder.attn_window_size
high = pos + encoder.attn_window_size
mlow = tf.to_float(idx < low)
mhigh = tf.to_float(idx > high)
m = mlow + mhigh
| tensorflow.matmul | 1,959 |
import tensorflow as tf
inter = tf.reshape(values, [self.resolution,
self.resolution,
self.resolution])
inter = tf.transpose(tf.reduce_max(inter, axis=a))
im = axs[fig_obj_count, 0].matshow(inter.numpy())
plt.colorbar(im, ax=axs[fig_obj_count, 0])
values = tf.math.sign(tf.nn.relu(interpolated + self.tol))
inter = tf.reshape(values, [self.resolution,
self.resolution,
self.resolution])
inter = tf.transpose(tf.reduce_max(inter, axis=a))
im = axs[fig_obj_count, 1].matshow(inter.numpy())
plt.colorbar(im, ax=axs[fig_obj_count, 1])
values = sdf_values
| tensorflow.reshape | 1,960 |
import tensorflow as tf
Returns
-------
A tensor.
"""
x = (0.2 * x) + 0.5
zero = _to_tensor(0., x.dtype.base_dtype)
one = _to_tensor(1., x.dtype.base_dtype)
x = tf.clip_by_value(x, zero, one)
return x
def sqrt(x):
"""Element-wise square root.
Parameters
| tensorflow.clip_by_value | 1,961 |
import tensorflow as tf
self.assertAllEqual([2], batch_size0)
self.assertAllEqual([5], result1)
self.assertAllEqual([2], batch_size1)
def test_many_small(self):
with self.test_session() as session:
@dynamic_batching.batch_fn
def f(a, b):
return a + b
outputs = []
for i in xrange(200):
outputs.append(f(tf.fill([1, 5], i), tf.fill([1, 5], i)))
tf.train.start_queue_runners()
tp = pool.ThreadPool(10)
futures = []
for output in outputs:
futures.append(tp.apply_async(session.run, [output]))
for i, future in enumerate(futures):
result = future.get()
self.assertAllEqual([[i * 2] * 5], result)
def test_input_batch_size_should_be_one(self):
with self.test_session() as session:
@dynamic_batching.batch_fn
| tensorflow.train.start_queue_runners | 1,962 |
import tensorflow as tf
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_log_probs, next_sentence_labels):
"""Computes the loss and accuracy of the model."""
masked_lm_log_probs = tf.reshape(masked_lm_log_probs,
[-1, masked_lm_log_probs.shape[-1]])
masked_lm_predictions = tf.argmax(
masked_lm_log_probs, axis=-1, output_type=tf.int32)
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = tf.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights)
masked_lm_mean_loss = tf.metrics.mean(
values=masked_lm_example_loss, weights=masked_lm_weights)
| tensorflow.argmax | 1,963 |
import tensorflow as tf
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
def _testMultiSaverCollectionRestore(self):
test_dir = os.path.join(self.get_temp_dir(), "saver_collection")
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=tf.Graph()) as sess:
# Imports from meta_graph.
tf.train.import_meta_graph(filename)
# Retrieves SAVERS collection. Verifies there are 2 entries.
savers = tf.get_collection("savers")
self.assertEqual(2, len(savers))
# Retrieves saver0. Verifies that new_saver0 can restore v0, but not v1.
new_saver0 = savers[0]
| tensorflow.Graph | 1,964 |
import tensorflow as tf
gradvars = []
with tf.device('/cpu:0'):
# Average losses and gradients
with tf.name_scope('tower_averaging'):
all_grads = {}
for grad, var in itertools.chain(*tower_gradvars):
if grad is not None:
all_grads.setdefault(var, []).append(grad)
for var, grads in all_grads.items():
if len(grads) == 1:
avg_grad = grads[0]
else:
avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))
gradvars.append((avg_grad, var))
self.loss = tf.reduce_mean(tower_losses)
tf.summary.scalar('loss', self.loss)
# Create optimizer ops
self.global_step = tf.Variable(0, trainable=False, name='global_step')
opt = tf.train.RMSPropOptimizer(self.config['learning_rate'])
with tf.control_dependencies(update_ops):
self.trainer = opt.apply_gradients(
gradvars, global_step=self.global_step)
def _eval_graph(self, data):
tower_metrics = self._gpu_tower(data, Mode.EVAL)
with tf.device('/cpu:0'):
self.metrics = {m: tf.reduce_mean(tf.stack([t[m] for t in tower_metrics]))
for m in tower_metrics[0]}
| tensorflow.summary.scalar | 1,965 |
import tensorflow as tf
surr2 = batch['advantage'] * tf.clip_by_value(ratio, 1 - epsilon_decay, 1 + epsilon_decay)
loss_pg = - 2.0 * tf.reduce_mean(tf.minimum(surr1, surr2))
loss_vf = 0.5 * tf.reduce_mean(tf.square(batch['rewards'] - self.vf))
loss_entropy = - 0.01 * tf.reduce_mean(pi.entropy())
loss = loss_pg + loss_vf + loss_entropy
opt = tf.train.AdamOptimizer(self.LR)
self.train_op = opt.minimize(loss, global_step=self.global_step, var_list=pi_params + vf_params)
self.pi_new_params = [oldp.assign(p) for p, oldp in zip(pi_params, pi_old_params)]
self.vf_new_params = [oldp.assign(p) for p, oldp in zip(vf_params, vf_old_params)]
self.sess.run(tf.global_variables_initializer())
# Tensorboard
if summary_dir is not None:
self.writer = tf.summary.FileWriter(summary_dir)
tf.summary.scalar('Loss/Policy', loss_pg)
tf.summary.scalar('Loss/Value', loss_vf)
tf.summary.scalar('Loss/Entropy', loss_entropy)
tf.summary.scalar('Loss/Total', loss)
tf.summary.scalar('Var/Epsilon', epsilon_decay)
tf.summary.scalar('Var/Policy Mode', tf.reduce_mean(pi.mode()))
tf.summary.scalar('Var/Policy Sigma', tf.reduce_mean(pi.stddev()))
tf.summary.scalar('Var/Value', tf.reduce_mean(self.vf))
self.summarise = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES))
# AC net
def build_anet(self, state_in, name, reuse=False, batch_size=64):
reg = None
with tf.variable_scope(name, reuse=reuse):
layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg)
| tensorflow.summary.scalar | 1,966 |
import tensorflow as tf
self.dropout = dropout
self.V = len(word_to_idx)
self.L = dim_feature[0]
self.D = dim_feature[1]
self.M = dim_embed
self.H = dim_hidden
self.T = n_time_step
self._start = word_to_idx['<START>']
self._null = word_to_idx['<NULL>']
self.weight_initializer = tf.contrib.layers.xavier_initializer()
self.const_initializer = tf.constant_initializer(0.0)
self.emb_initializer = tf.random_uniform_initializer(minval=-1.0, maxval=1.0)
# Place holder for features and captions
self.features = tf.placeholder(tf.float32, [None, self.L, self.D])
self.captions = tf.placeholder(tf.int32, [None, self.T + 1])
def _get_initial_lstm(self, features):
with tf.variable_scope('initial_lstm'):
features_mean = tf.reduce_mean(features, 1)
w_h = tf.get_variable('w_h', [self.D, self.H], initializer=self.weight_initializer)
b_h = tf.get_variable('b_h', [self.H], initializer=self.const_initializer)
h = tf.nn.tanh(tf.matmul(features_mean, w_h) + b_h)
w_c = tf.get_variable('w_c', [self.D, self.H], initializer=self.weight_initializer)
b_c = tf.get_variable('b_c', [self.H], initializer=self.const_initializer)
c = tf.nn.tanh(tf.matmul(features_mean, w_c) + b_c)
return c, h
| tensorflow.placeholder | 1,967 |
import tensorflow as tf
# shape (batch_size, attn_size).
context_vector = tf.reduce_sum(tf.expand_dims(attn_dist, axis=-1) * encoder_states, axis=1) # [batch_size, encoder_dim]
return context_vector, attn_dist, coverage
def embedding_lookup(self, inputs):
'''
inputs: list of [batch_size], int32
'''
if type(inputs) is list:
return [tf.nn.embedding_lookup(self.embedding, x) for x in inputs]
else:
return tf.nn.embedding_lookup(self.embedding, inputs)
def one_step_decoder(self, state_t_1, context_t_1, coverage_t_1, word_t, encoder_states, encoder_features,
passage_word_idx, passage_mask, v, w_c, vocab):
'''
state_t_1: Tuple of [batch_size, gen_hidden_size]
context_t_1: [batch_size, encoder_dim]
coverage_t_1: [batch_size, passage_len]
word_t: [batch_size, word_dim]
encoder_states: [batch_size, passage_len, encoder_dim]
encoder_features: [batch_size,attn_length,attention_vec_size]
| tensorflow.nn.embedding_lookup | 1,968 |
import tensorflow as tf
# noise = dist * self.epsilon_sphere_noise()
# tf.stop_gradient(noise)
noise = tf.random_normal(self.model.encode.get_shape().as_list()) * FLAGS.epsilon
noisy_encoding = noise + self.models[1].encode
tf.stop_gradient(noisy_encoding) # or maybe here, who knows
noisy_decode = interpreter.build_decoder(noisy_encoding, model.config, reuse=True, masks=model.mask_list)
loss = interpreter.l2_loss(noisy_decode, self.raw_targets[1], alpha=FLAGS.beta)
self.models += [noisy_decode]
return loss
def _tensor_to_image(self, net):
with tf.name_scope('to_image'):
if FLAGS.new_blur:
net = net[..., :self.batch_shape[-1]]
net = tf.nn.relu(net)
net = tf.cast(net <= 1, net.dtype) * net * 255
net = tf.cast(net, tf.uint8)
return net
def _image_to_tensor(self, image):
with tf.name_scope('args_transform'):
net = tf.cast(image, tf.float32) / 255.
if FLAGS.new_blur:
net = _blur_expand(net)
FLAGS.blur = 0.
return net
def _init_optimizer(self):
self.loss_total = tf.add_n(self.losses, 'loss_total')
self.optimizer = self.optimizer_constructor(learning_rate=FLAGS.learning_rate)
| tensorflow.cast | 1,969 |
import tensorflow as tf
# By default the name used for "v2" will be "v1" and raise an error.
with self.assertRaisesRegexp(ValueError, "same name: v1"):
tf.train.Saver([v0, v1, v2])
# The names are different and will work.
tf.train.Saver({"vee1": v1, "other": [v2]})
def testBasicsWithListOfVariables(self):
save_path = os.path.join(self.get_temp_dir(), "basics_with_list")
with self.test_session(graph=tf.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(20.0, name="v1")
save = tf.train.Saver([v0, v1])
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the variables
# have not been initialized either.
| tensorflow.Variable | 1,970 |
import tensorflow as tf
"""
Center the reward by computing a baseline reward over decoder states.
:param decoder_states: internal states of the decoder, tensor of shape (batch_size, time_steps, state_size)
:param reward: reward for each time step, tensor of shape (batch_size, time_steps)
:return: reward - computed baseline, tensor of shape (batch_size, time_steps)
"""
# batch_size = tf.shape(decoder_states)[0]
# time_steps = tf.shape(decoder_states)[1]
# state_size = decoder_states.get_shape()[2]
# states = tf.reshape(decoder_states, shape=tf.stack([batch_size * time_steps, state_size]))
baseline = dense(tf.stop_gradient(decoder_states), units=1, activation=None, name='reward_baseline',
kernel_initializer=tf.constant_initializer(0.01))
baseline = tf.squeeze(baseline, axis=2)
# baseline = tf.reshape(baseline, shape=tf.stack([batch_size, time_steps]))
return reward - baseline
def baseline_loss(rewards, weights, average_across_timesteps=False, average_across_batch=True):
"""
:param rewards: tensor of shape (batch_size, time_steps)
:param weights: tensor of shape (batch_size, time_steps)
"""
batch_size = tf.shape(rewards)[0]
| tensorflow.constant_initializer | 1,971 |
import tensorflow as tf
decoded_u = tf.sparse.SparseTensor(indices_u[0], values_u[0], shape_u[0])
decoded_u = tf.cast(tf.sparse.to_dense(decoded_u), tf.int32)
# Adjust event vals according to representation
decoded = tf.where(tf.not_equal(decoded, 0), decoded+shift, decoded)
decoded_u = tf.where(tf.not_equal(decoded_u, 0), decoded_u+shift, decoded_u)
# Set default vals
decoded = tf.where(tf.equal(decoded, 0), def_val, decoded)
decoded_u = tf.where(tf.equal(decoded_u, 0), def_val, decoded_u)
# We know the shape pf decoded_u, and first dim for decoded
decoded_u.set_shape([batch_size, seq_length])
decoded = tf.reshape(decoded, [batch_size, -1])
return decoded_u, decoded
| tensorflow.reshape | 1,972 |
import tensorflow as tf
pt_0 = rot @ tf.reshape([min_x, min_z], [2, 1]) + translation
pt_1 = rot @ tf.reshape([min_x, max_z], [2, 1]) + translation
pt_2 = rot @ tf.reshape([max_x, min_z], [2, 1]) + translation
pt_3 = rot @ tf.reshape([max_x, max_z], [2, 1]) + translation
for pt in [pt_0, pt_1, pt_2, pt_3]:
| tensorflow.reshape | 1,973 |
import tensorflow as tf
def atrous_discriminator(self, X, reuse):
def atrous_convs(net, scope, rate=None, depth=256, reuse=None):
"""
ASPP layer 1×1 convolution and three 3×3 atrous convolutions
"""
with tf.variable_scope(scope, reuse=reuse):
pyram_1x1_0 = self.conv('_1x1', net, depth, size=1, stride=1, padding="SAME")
pyram_3x3_1 = self.conv('_3x3', net, depth, size=3, stride=1, padding="SAME")
pyram_3x3_2 = self.conv('_atr_3x3_1', net, depth, size=3, stride=1, padding="SAME", dilation=rate[0])
pyram_3x3_3 = self.conv('_atr_3x3_2', net, depth, size=3, stride=1, padding="SAME", dilation=rate[1])
# pyram_3x3_4 = self.z_conv('_atr_3x3_3', net, depth/2, size=3, stride=1, padding="SAME", dilation=rate[2])
net = tf.concat((pyram_1x1_0, pyram_3x3_1, pyram_3x3_2, pyram_3x3_3), axis=3, name="concat")
net = self.conv('_1x1_output', net, depth, size=1, stride=1, padding="SAME")
# pyram_1x1_0 = self.conv('_1x1', net, depth, size=1, stride=1, padding="SAME")
# pyram_3x3_1 = self.conv('_3x3', net, depth/2, size=3, stride=1, padding="SAME")
# pyram_3x3_2 = self.conv('_atr_3x3_1', net, depth/2, size=3, stride=1, padding="SAME", dilation=rate[0])
# pyram_3x3_3 = self.conv('_atr_3x3_2', net, depth/2, size=3, stride=1, padding="SAME", dilation=rate[1])
# # pyram_3x3_4 = self.conv('_atr_3x3_3', net, depth/2, size=3, stride=1, padding="SAME", dilation=rate[2])
# net = tf.concat((pyram_1x1_0, pyram_3x3_1, pyram_3x3_2, pyram_3x3_3), axis=3, name="concat")
# net = self.conv('_1x1_output', net, depth, size=1, stride=1, padding="SAME")
| tensorflow.concat | 1,974 |
import tensorflow as tf
if config.decay is not None:
self.var_ema = tf.train.ExponentialMovingAverage(config.decay)
ema_op = self.var_ema.apply(tf.trainable_variables())
with tf.control_dependencies([ema_op]):
self.loss = tf.identity(self.loss)
| tensorflow.trainable_variables | 1,975 |
import tensorflow as tf
def __init__(self,name,format='NCHW',epsilon=1e-5) :
assert(format=='NCHW' or format=='NHWC')
self.axis = [2,3] if format == 'NCHW' else [1,2]
self.epsilon = epsilon
self.name = name
def __call__(self,input_var) :
mean, var = tf.nn.moments(input_var, self.axis, keep_dims=True)
return (input_var - mean) / tf.sqrt(var+self.epsilon)
class BatchNorm(object):
def __init__(self,name,dims,axis=1,epsilon=1e-3,momentum=0.999,center=True,scale=True) :
self.momentum = momentum
self.epsilon = epsilon
self.axis = axis
self.center=center
self.scale=scale
| tensorflow.sqrt | 1,976 |
import tensorflow as tf
.Conv2D('conv1', 20, 5, padding='VALID')
.FullyConnected('fc1', out_dim=32)
.FullyConnected('fct', out_dim=6, nl=tf.identity,
W_init=tf.constant_initializer(),
b_init=tf.constant_initializer([1, 0, HALF_DIFF, 0, 1, HALF_DIFF]))())
# output 6 parameters for affine transformation
| tensorflow.constant_initializer | 1,977 |
import tensorflow as tf
counts = collections.Counter()
with tf.gfile.Open(filename) as file_:
for line in file_:
counts.update(_split_string(line))
alphabet = [k for (k, _) in counts.most_common(max_size)]
alphabet.sort()
return np.asarray(alphabet, dtype=np.object)
chars, = tf.py_func(_unique_chars, [filename], [tf.string])
char_to_id = tf.contrib.lookup.index_table_from_tensor(
chars, num_oov_buckets=num_oov_buckets)
id_to_char = tf.contrib.lookup.index_to_string_table_from_tensor(chars, " ")
return char_to_id, id_to_char
def characters(filename, batch_size, sequence_size):
| tensorflow.py_func | 1,978 |
import tensorflow as tf
:param name:
:param inputdata:
:return:
"""
return tf.nn.relu(features=inputdata, name=name)
@staticmethod
def sigmoid(inputdata, name=None):
"""
:param name:
:param inputdata:
:return:
"""
return tf.nn.sigmoid(x=inputdata, name=name)
@staticmethod
def maxpooling(inputdata, kernel_size, stride=None, padding='VALID',
data_format='NHWC', name=None):
"""
:param name:
:param inputdata:
:param kernel_size:
:param stride:
:param padding:
:param data_format:
:return:
| tensorflow.nn.sigmoid | 1,979 |
import tensorflow as tf
tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif)
pred_posi_dif = tf.where(geq, pred_dif, -pred_dif)
loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif)
cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32)
unorm_w = tf.exp((tgt_flat1 + tgt_flat2)/temp)
loss = unorm_w * loss / (tf.reduce_sum(unorm_w))
a = tf.print(tf.reduce_sum(unorm_w))
with tf.control_dependencies([a]):
| tensorflow.exp | 1,980 |
import tensorflow as tf
categorical: (Optional) A `bool` that treats `x` as discrete values if true.
name: (Optional) A name for this operation.
Returns:
counts: The histogram, as counts per bin.
boundaries: A `Tensor` used to build the histogram representing boundaries.
"""
with tf.compat.v1.name_scope(name, 'histogram'):
x = tf.reshape(tf_utils.get_values(x), [-1])
if categorical:
x_dtype = x.dtype
x = x if x_dtype == tf.string else tf.strings.as_string(x)
elements, counts = count_per_key(x)
if x_dtype != elements.dtype:
elements = tf.strings.to_number(elements, tf.int64)
return counts, elements
if boundaries is None:
boundaries = tf.range(11, dtype=tf.float32) / 10.0
elif isinstance(boundaries, int) or (isinstance(boundaries, tf.Tensor) and
boundaries.get_shape().ndims == 0):
min_value, max_value = _min_and_max(x, True)
boundaries = tf.linspace(
| tensorflow.strings.as_string | 1,981 |
from tensorflow.python.ops import control_flow_ops
if update_ops is None:
update_ops = global_update_ops
else:
update_ops = set(update_ops)
# Make sure update_ops are computed before total_loss.
if update_ops:
with tf.control_dependencies(update_ops):
barrier = tf.no_op(name='update_barrier')
self.d_losses[-1] = control_flow_ops.with_dependencies([barrier], self.d_losses[-1])
self.g_losses[-1] = control_flow_ops.with_dependencies([barrier], self.g_losses[-1])
self.d_loss_real = control_flow_ops.with_dependencies([barrier], self.d_loss_real)
self.d_loss_fake = control_flow_ops.with_dependencies([barrier], self.d_loss_fake)
self.d_loss_class = control_flow_ops.with_dependencies([barrier], self.d_loss_class)
t_vars = self._get_vars_semi_supervised()
if self.clip_by_global_norm:
self.capped_d_grads = self._clip_grad_global_norms(
t_vars['d_vars'], self.d_losses[-1], d_optimizer, gradient_noise_scale=0.0)
self.capped_g_grads = self._clip_grad_global_norms(
| tensorflow.python.ops.control_flow_ops.with_dependencies | 1,982 |
import tensorflow as tf
- Output class tensor of the model.
"""
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random_normal([self.num_units, self.n_classes]))
out_bias = tf.Variable(tf.random_normal([self.n_classes]))
# input image placeholder
x = tf.placeholder(
"float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE")
| tensorflow.random_normal | 1,983 |
import tensorflow as tf
Convenience functions but Input and Output should be tensors.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import seq2seq
_phase = tf.Variable(False, name='phase', trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])
_phase_train = _phase.assign(True)
_phase_infer = _phase.assign(False)
# TODO: move to ops
def _rank(x):
return len(x.get_shape())
| tensorflow.Variable | 1,984 |
import tensorflow as tf
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
# add sequence mask for:
# 1. random shuffle lm modeling---xlnet with random shuffled input
# 2. left2right and right2left language modeling
# 3. conditional generation
def generate_seq2seq_mask(attention_mask, mask_sequence, seq_type, **kargs):
if seq_type == 'seq2seq':
if mask_sequence is not None:
| tensorflow.range | 1,985 |
import tensorflow as tf
d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)
| tensorflow.layers.dense | 1,986 |
from tensorflow.python.ops import math_ops
predictions, labels = tensor_util.remove_squeezable_dimensions(
predictions, labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
absolute_errors = math_ops.abs(predictions - labels)
return streaming_mean(absolute_errors, weights, metrics_collections,
updates_collections, name or 'mean_absolute_error')
| tensorflow.python.ops.math_ops.abs | 1,987 |
import tensorflow as tf
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01),
trainable=False)
| tensorflow.constant_initializer | 1,988 |
import tensorflow as tf
'softmax_w', [hidden_size, vocab_size], dtype=tf.float32)
softmax_b = tf.get_variable('softmax_b', [vocab_size], dtype=tf.float32)
logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)
logits = tf.reshape(logits, [self.batch_size, self.num_steps, vocab_size])
loss = tf.contrib.seq2seq.sequence_loss(
| tensorflow.reshape | 1,989 |
import tensorflow as tf
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = tf.Variable(111, name="v0")
with sess.graph.device("/cpu:1"):
v1 = tf.Variable(222, name="v1")
save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True, max_to_keep=2)
tf.initialize_all_variables().run()
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
| tensorflow.Variable | 1,990 |
import tensorflow as tf
initializer=tf.random_normal_initializer())
alpha_std = tf.exp(alpha_logstd)
# Compute epsilon from {n_samples} standard Gaussian
# epsilon = tf.random_normal([n_samples, 1, n_out*2, n_out])
epsilon = tf.random_uniform([n_samples, 1, n_basis, n_out])
hyp_params = tf.get_variable('hyp_params_layer'+str(h),
shape=[2],
initializer=tf.random_normal_initializer())
l1, l2 = tf.nn.sigmoid(hyp_params[0]), tf.exp(hyp_params[1])
epsilon = tf.sinh(epsilon*l2)/tf.cosh(epsilon*l2)**l1/l2
# Compute A_{h+1}
A = tf.tile(alpha_mean+epsilon*alpha_std, [1, tf.shape(X)[0], 1, 1])
# Compute z_{h}A_{h+1}
Z1 = tf.matmul(Z, A[:,:,:n_basis//2,:])/tf.sqrt(n_basis*.5)
Z2 = tf.matmul(Z, A[:,:,n_basis//2:,:])/tf.sqrt(n_basis*.5)
# Compute u_{h+1} and v_{h+1}
| tensorflow.nn.sigmoid | 1,991 |
from tensorflow.contrib.eager.python.examples.linear_regression import linear_regression
def testLinearRegression(self):
true_w = [[1.0], [-0.5], [2.0]]
true_b = [1.0]
model = linear_regression.LinearModel()
dataset = linear_regression.synthetic_dataset(
true_w, true_b, noise_level=0., batch_size=64, num_batches=40)
with tf.device(device()):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
linear_regression.fit(model, dataset, optimizer, logdir=self._tmp_logdir)
| tensorflow.contrib.eager.python.examples.linear_regression.linear_regression.synthetic_dataset | 1,992 |
import tensorflow as tf
# Add a summary to track the learning rate.
summaries.append(tf.summary.scalar('learning_rate', lr))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(tf.summary.histogram(var.op.name + '/gradients', grad))
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.summary.histogram(var.op.name, var))
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
cifar10.MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# Group all updates to into a single train op.
train_op = tf.group(apply_gradient_op, variables_averages_op)
# Create a saver.
saver = tf.train.Saver(tf.global_variables())
# Build the summary operation from the last tower summaries.
| tensorflow.summary.histogram | 1,993 |
import tensorflow as tf
self._num_units,
True,
bias_initializer=self._bias_initializer,
kernel_initializer=self._kernel_initializer)
c = self._activation(self._candidate_linear([inputs, r_state]))
u = (1.0 - att_score) * u
new_h = u * state + (1 - u) * c
return new_h, new_h
def prelu(_x, scope=''):
"""parametric ReLU activation"""
with tf.variable_scope(name_or_scope=scope, default_name="prelu"):
_alpha = tf.get_variable("prelu_"+scope, shape=_x.get_shape()[-1],
dtype=_x.dtype, initializer=tf.constant_initializer(0.1))
return tf.maximum(0.0, _x) + _alpha * tf.minimum(0.0, _x)
def calc_auc(raw_arr):
"""Summary
Args:
raw_arr (TYPE): Description
Returns:
TYPE: Description
| tensorflow.variable_scope | 1,994 |
import tensorflow as tf
'model_scope', 'xdet_resnet',
'Model scope name used to replace the name_scope in checkpoint.')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', 'xdet_resnet/xdet_head, xdet_resnet/xdet_multi_path, xdet_resnet/xdet_additional_conv',#None
'Comma-separated list of scopes of variables to exclude when restoring from a checkpoint.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', True,
'When restoring a checkpoint would ignore missing variables.')
tf.app.flags.DEFINE_boolean(
'run_on_cloud', True,
'Wether we will train on cloud (pre-trained model will be placed in the "data_dir/cloud_checkpoint_path").')
tf.app.flags.DEFINE_string(
'cloud_checkpoint_path', 'resnet50/model.ckpt',
'The path to a checkpoint from which to fine-tune.')
FLAGS = tf.app.flags.FLAGS
| tensorflow.app.flags.DEFINE_boolean | 1,995 |
import tensorflow as tf
infer_graph = tf.Graph()
with train_graph.as_default():
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.name_scope('Train'):
train_input = DataInput(config=config, data=train_data, name='TrainInput')
with tf.variable_scope('Model', reuse=None, initializer=initializer):
m = Model(is_training=True, config=config, input_=train_input, graph=train_graph)
tf.summary.scalar('Training Loss', m.cost)
| tensorflow.name_scope | 1,996 |
import tensorflow as tf
geq = tf.cast((tgt1 - tgt2) > 0, tf.bool)
tgt_larg = tf.where(geq, tgt1, tgt2)
tgt_small = tf.where(geq, tgt2, tgt1)
pred_larg = tf.where(geq, pred1, pred2)
| tensorflow.where | 1,997 |
import tensorflow as tf
updated_ema_means = updated_ema_means / tf.expand_dims(
updated_ema_count, axis=-1)
with tf.control_dependencies([e_loss]):
update_means = tf.assign(self.means, updated_ema_means)
with tf.control_dependencies([update_means]):
loss += self.hparams.beta * e_loss
else:
# Use a gradient based loss for learning the cluster centers
loss += q_loss + self.hparams.beta * e_loss
| tensorflow.control_dependencies | 1,998 |
import tensorflow as tf
lambda: param_noise_scale.assign(param_noise_scale / 1.01),
)
return update_scale_expr
# Functionality to update the threshold for parameter space noise.
update_param_noise_thres_expr = param_noise_threshold.assign(
tf.cond(update_param_noise_threshold_ph >= 0, lambda: update_param_noise_threshold_ph,
lambda: param_noise_threshold))
# Put everything together.
perturbed_deterministic_actions = tf.argmax(perturbable_policy.q_values, axis=1)
deterministic_actions = tf.argmax(policy.q_values, axis=1)
batch_size = tf.shape(policy.obs_ph)[0]
n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=n_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
perturbed_stochastic_actions = tf.where(chose_random, random_actions, perturbed_deterministic_actions)
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
perturbed_output_actions = tf.cond(stochastic_ph, lambda: perturbed_stochastic_actions,
lambda: deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
updates = [
update_eps_expr,
tf.cond(reset_ph, lambda: perturb_vars(original_scope="model", perturbed_scope="perturbed_model/model"),
lambda: tf.group(*[])),
tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)),
| tensorflow.stack | 1,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.