seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
tf.summary.scalar('Var/Policy Sigma', tf.reduce_mean(pi.stddev()))
tf.summary.scalar('Var/Value', tf.reduce_mean(self.vf))
self.summarise = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES))
# AC net
def build_anet(self, state_in, name, reuse=False):
reg = tf.contrib.layers.l2_regularizer(1e-3)
with tf.variable_scope(name, reuse=reuse):
layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg)
layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg)
mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg)
# sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg)
sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5))
sigma = tf.clip_by_value(sigma, 0.0, 1.0)
norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return norm_dist, params
| tensorflow.layers.dense | 2,500 |
import tensorflow as tf
global_step=global_step)
with tf.control_dependencies(update_ops):
train_op = minimize_op
| tensorflow.control_dependencies | 2,501 |
import tensorflow as tf
if width % n_factor != 0:
raise ValueError("Width not divisible by %d." % n_factor)
res = tf.reshape(
input_,
[batch_size,
height // n_factor,
n_factor, width // n_factor,
n_factor, channels])
res = tf.transpose(res, [0, 1, 3, 5, 2, 4])
res = tf.reshape(
res,
[batch_size,
height // n_factor,
width // n_factor,
channels * n_factor * n_factor])
| tensorflow.transpose | 2,502 |
import tensorflow as tf
"""Computes listwise softmax loss with propensity weighting.
Args:
propensity: (tf.Tensor) A tensor of the same shape as `output` containing the weight of each element.
Returns:
(tf.Tensor) A tensor containing the propensity weights.
"""
propensity_list = tf.unstack(propensity, axis=1) # Compute propensity weights
pw_list = []
for i in range(len(propensity_list)):
pw_i = propensity_list[0] / propensity_list[i]
pw_list.append(pw_i)
propensity_weights = tf.stack(pw_list, axis=1)
if self.hparams.max_propensity_weight > 0:
propensity_weights = tf.clip_by_value(propensity_weights, clip_value_min=0, clip_value_max=self.hparams.max_propensity_weight)
| tensorflow.unstack | 2,503 |
import tensorflow as tf
elif z_dim == 2:
return cw_2d
elif z_dim >= 20:
return cw
else:
raise ValueError('Not defined for this latent dimension')
def cw_sampling(X, y=None):
def phi_sampling(s, D):
return tf.pow(1.0 + 4.0*s/(2.0*D-3), -0.5)
D = tf.cast(tf.shape(X)[1], tf.float32)
N = tf.cast(tf.shape(X)[0], tf.float32)
D_int = tf.cast(D, tf.int32)
N_int = tf.cast(N, tf.int32)
if y is None:
y = silverman_rule_of_thumb(N)
YDistr = tf.contrib.distributions.MultivariateNormalDiag(loc=tf.zeros(D_int, tf.float32),
| tensorflow.pow | 2,504 |
import tensorflow.contrib.slim as slim
num_gpu = len(cfgs.GPU_GROUP.strip().split(','))
global_step = slim.get_or_create_global_step()
| tensorflow.contrib.slim.get_or_create_global_step | 2,505 |
import tensorflow as tf
tf.app.flags.DEFINE_string('ws_save_path', './models_ws/model.ckpt', 'WS: model\'s save path')
tf.app.flags.DEFINE_float('ws_prune_ratio', 0.75, 'WS: target pruning ratio')
| tensorflow.app.flags.DEFINE_float | 2,506 |
import tensorflow as tf
#def rnn_cell(rnn_input, state):
#with tf.variable_scope('rnn_cell', reuse=True):
#W = tf.get_variable('W', [num_classes+state_size, state_size])
#b = tf.get_variable('b', [state_size], initializer=tf.constant_initializer(0.0))
#return tf.tanh(tf.matmul(tf.concat([rnn_input, state],1),W) + b)
#'''将rnn cell添加到计算图中'''
#state = init_state
#rnn_outputs = []
#for rnn_input in rnn_inputs:
#state = rnn_cell(rnn_input, state) # state会重复使用,循环
#rnn_outputs.append(state)
#final_state = rnn_outputs[-1] # 得到最后的state
cell = tf.contrib.rnn.BasicRNNCell(num_units=state_size)
rnn_outputs, final_state = tf.nn.dynamic_rnn(cell, rnn_inputs, initial_state=init_state)
'''预测,损失,优化'''
with tf.variable_scope('softmax'):
W = tf.get_variable('W', [state_size, num_classes])
b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(0.0))
'''因为rnn_outputs是三维的,这里需要将其转成2维的,
矩阵运算后再转换回来[batch_size, num_steps, num_classes]'''
logits = tf.reshape(tf.matmul(tf.reshape(rnn_outputs, [-1, state_size]), W) +b, \
shape=[batch_size, num_steps, num_classes])
predictions = tf.nn.softmax(logits)
y_as_list = tf.unstack(y, num=num_steps, axis=1)
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,logits=logits)
| tensorflow.nn.dynamic_rnn | 2,507 |
import tensorflow as tf
with tf.variable_scope(scope) as sc:
num_in_channels = inputs.get_shape()[-1].value
kernel_shape = [kernel_size,
num_in_channels, num_output_channels]
kernel = _variable_with_weight_decay('weights',
shape=kernel_shape,
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
outputs = tf.nn.conv1d(inputs, kernel,
stride=stride,
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_conv1d(outputs, is_training,
bn_decay=bn_decay, scope='bn')
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
def conv2d(inputs,
num_output_channels,
| tensorflow.constant_initializer | 2,508 |
from tensorflow.python.framework import ops
with ops.op_scope([value, filter, output_shape], name,
"conv2d_transpose") as name:
value = ops.convert_to_tensor(value, name="value")
filter = ops.convert_to_tensor(filter, name="filter")
if not value.get_shape()[3].is_compatible_with(filter.get_shape()[3]):
raise ValueError(
| tensorflow.python.framework.ops.convert_to_tensor | 2,509 |
import tensorflow as tf
# sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg)
sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5))
sigma = tf.clip_by_value(sigma, 0.0, 1.0)
norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma)
| tensorflow.clip_by_value | 2,510 |
import tensorflow as tf
return x
def pool(layer_name, x, kernel_size=[1,2,2,1], strides=[1,2,2,1], is_max_pool=True):
'''
Pooling op
Args:
Returns:
'''
if is_max_pool:
# May Name Conflict
x = tf.nn.max_pool(x,kernel_size,strides=strides,padding='SAME',name=layer_name)
else:
x = tf.nn.avg_pool(x,kernel_size,strides=strides,padding='SAME',name=layer_name)
return x
def pool3d(layer_name, x, kernel_size=[1,1,2,2,1], strides=[1,1,2,2,1], is_max_pool=True):
'''
Pooling 3D op
'''
if is_max_pool:
x = tf.nn.max_pool3d(x, ksize=kernel_size, strides=strides, padding='VALID', name=layer_name)
else:
x = tf.nn.avg_pool3d(x, ksize=kernel_size, strides=strides, padding='VALID', name=layer_name)
| tensorflow.nn.max_pool | 2,511 |
import tensorflow as tf
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
"""
with tf.variable_scope(scope, reuse=reuse):
observations_ph = make_obs_ph("observation")
stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
deterministic_actions = tf.argmax(q_values, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
| tensorflow.variable_scope | 2,512 |
import tensorflow as tf
| tensorflow.name_scope | 2,513 |
import tensorflow as tf
)
logits = clip_logits(logits, self.hparams)
logits = tf.expand_dims(logits, axis=1)
value = tf.layers.dense(x, self.distributional_value_size)
return {"target_policy": logits, "target_value": value}
@registry.register_model
class FeedForwardCnnSmallCategoricalPolicyNew(PolicyBase):
"""Small cnn network with categorical output."""
def body(self, features):
observations = features["inputs"]
x = tf.transpose(observations, [0, 2, 3, 1, 4])
x_shape = common_layers.shape_list(x)
x = tf.reshape(x, x_shape[:-2] + [-1])
dropout = getattr(self.hparams, "dropout_ppo", 0.0)
with tf.variable_scope("feed_forward_cnn_small"):
x = tf.cast(x, tf.float32) / 255.0
x = tf.nn.dropout(x, rate=dropout)
x = tf.layers.conv2d(
x, 32, (4, 4), strides=(2, 2), name="conv1",
activation=common_layers.belu, padding="SAME")
x = tf.nn.dropout(x, rate=dropout)
x = tf.layers.conv2d(
x, 64, (4, 4), strides=(2, 2), name="conv2",
activation=common_layers.belu, padding="SAME")
x = tf.nn.dropout(x, rate=dropout)
x = tf.layers.conv2d(
x, 128, (4, 4), strides=(2, 2), name="conv3",
| tensorflow.reshape | 2,514 |
import tensorflow as tf
x_shape = tf.shape(logits)
logits = tf.reshape(logits, (x_shape[0], x_shape[1], x_shape[2], len(anchors), num_classes + 5))
box_xy, box_wh, obj, cls = tf.split(logits, (2, 2, 1, num_classes), axis=-1)
box_xy = tf.sigmoid(box_xy)
obj = tf.sigmoid(obj)
| tensorflow.split | 2,515 |
from tensorflow.python.ops import math_ops
false_positives = _create_local('false_positives', shape=[num_thresholds])
is_true_positive = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_pos))
is_false_negative = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_neg))
is_false_positive = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_pos))
is_true_negative = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_neg))
if weights is not None:
weights = math_ops.to_float(weights)
weights_tiled = array_ops.tile(array_ops.reshape(
| tensorflow.python.ops.math_ops.logical_and | 2,516 |
import tensorflow as tf
small_constant_for_finite_diff: a `float`, Small constant for finite difference method
perturb_norm_length: a `float`, Norm length of adversarial perturbation
to be optimized with validatio
Returns:
a `float` `scalar`, KL divergence.
"""
logits = tf.stop_gradient(logits)
weights = _end_of_seq_mask(labels, vocab_size)
perturbs = [_mask_by_length(tf.random_normal(shape=tf.shape(emb)), length) for emb in embedded]
for _ in range(num_power_iteration):
perturbs = [_scale_l2(d, small_constant_for_finite_diff) for d in perturbs]
d_logits = logits_from_embedding_fn([emb + d for (emb, d) in zip(embedded, perturbs)])
kl = _kl_divergence_with_logits(logits, d_logits, weights, num_classes)
perturbs = tf.gradients(
kl, perturbs, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
perturbs = [tf.stop_gradient(d) for d in perturbs]
perturbs = [_scale_l2(_mask_by_length(d, length), perturb_norm_length) for d in perturbs]
vadv_logits = logits_from_embedding_fn([emb + d for (emb, d) in zip(embedded, perturbs)])
return _kl_divergence_with_logits(logits, vadv_logits, weights, num_classes)
def _mask_by_length(t, length):
maxlen = t.get_shape().as_list()[1]
mask = tf.sequence_mask(length, maxlen=maxlen)
mask = tf.expand_dims(tf.cast(mask, tf.float32), -1)
return t * mask
| tensorflow.gradients | 2,517 |
import tensorflow as tf
"""
mode = global_mode()
return tf.equal(mode, tf.estimator.ModeKeys.EVAL)
| tensorflow.equal | 2,518 |
from tensorflow.python.layers import pooling as pooling_layers
mode='VALID',
input_layer=None,
num_channels_in=None):
"""Construct a max pooling layer."""
if input_layer is None:
input_layer = self.top_layer
else:
self.top_size = num_channels_in
name = 'mpool' + str(self.counts['mpool'])
self.counts['mpool'] += 1
pool = pooling_layers.max_pooling2d(
input_layer, [k_height, k_width], [d_height, d_width],
padding=mode,
data_format=self.channel_pos,
name=name)
self.top_layer = pool
return pool
def apool(self,
k_height,
| tensorflow.python.layers.pooling.max_pooling2d | 2,519 |
import tensorflow as tf
'Model scope name used to replace the name_scope in checkpoint.')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', 'xdet_resnet/xdet_head, xdet_resnet/xdet_multi_path, xdet_resnet/xdet_additional_conv',#None
'Comma-separated list of scopes of variables to exclude when restoring from a checkpoint.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', True,
'When restoring a checkpoint would ignore missing variables.')
tf.app.flags.DEFINE_boolean(
'run_on_cloud', True,
'Wether we will train on cloud (pre-trained model will be placed in the "data_dir/cloud_checkpoint_path").')
tf.app.flags.DEFINE_string(
'cloud_checkpoint_path', 'resnet50/model.ckpt',
'The path to a checkpoint from which to fine-tune.')
FLAGS = tf.app.flags.FLAGS
def input_pipeline():
image_preprocessing_fn = lambda image_, shape_, glabels_, gbboxes_ : preprocessing_factory.get_preprocessing(
'xdet_resnet', is_training=True)(image_, glabels_, gbboxes_, out_shape=[FLAGS.train_image_size] * 2, data_format=('NCHW' if FLAGS.data_format=='channels_first' else 'NHWC'))
| tensorflow.app.flags.DEFINE_string | 2,520 |
import tensorflow as tf
if input_batch.dtype == tf.float16:
raise base.NotSupportedError(
"BatchNorm does not support `tf.float16`, insufficient "
"precision for calculating sufficient statistics.")
self._mean_shape = input_batch.get_shape().as_list()
for index in reduction_indices:
self._mean_shape[index] = 1
use_batch_stats = is_training | test_local_stats
# Use the legacy moving second moment if the flag is set.
if self._use_legacy_moving_second_moment:
tf.logging.warning(
"nn.BatchNorm `use_legacy_second_moment=True` is deprecated.")
mean, variance, second_moment = self._build_statistics_second_moment(
input_batch,
reduction_indices,
use_batch_stats)
self._build_update_ops_second_moment(mean, second_moment, is_training)
else:
mean, variance = self._build_statistics_variance(
input_batch,
reduction_indices,
use_batch_stats)
| tensorflow.logging.warning | 2,521 |
import tensorflow as tf
"""
pass
def encode(self, boxes, anchors):
"""Encode a box list relative to an anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes
"""
with tf.name_scope('Encode'):
return self._encode(boxes, anchors)
def decode(self, rel_codes, anchors):
"""Decode boxes that are encoded relative to an anchor collection.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)
| tensorflow.name_scope | 2,522 |
import tensorflow as tf
save = tf.train.Saver({"v0": v0})
# Generates MetaGraphDef.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
# Verifies that collection with unsupported key will not be added.
tf.add_to_collection(save, 3)
save._add_collection_def(meta_graph_def, save)
self.assertEqual(len(meta_graph_def.collection_def), 0)
# Verifies that collection where item type does not match expected
# type will not be added.
tf.add_to_collection("int_collection", 3)
tf.add_to_collection("int_collection", 3.5)
save._add_collection_def(meta_graph_def, "int_collection")
self.assertEqual(len(meta_graph_def.collection_def), 0)
def _testMultiSaverCollectionSave(self):
test_dir = self._TestDir("saver_collection")
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=tf.Graph()) as sess:
# Creates a graph.
v0 = tf.Variable(10.0, name="v0")
| tensorflow.add_to_collection | 2,523 |
import tensorflow as tf
1. / model_options.output_stride)
image_feature = slim.avg_pool2d(
features, [pool_height, pool_width], [pool_height, pool_width],
padding='VALID')
else:
pool_height = tf.shape(features)[1]
pool_width = tf.shape(features)[2]
image_feature = tf.reduce_mean(features, axis=[1,2])[:, tf.newaxis, tf.newaxis, :]
image_feature = slim.conv2d(
image_feature, depth, 1, scope=_IMAGE_POOLING_SCOPE)
image_feature = tf.image.resize_bilinear(
image_feature, [pool_height, pool_width], align_corners=True)
| tensorflow.shape | 2,524 |
import tensorflow as tf
w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1))
b = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
v = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
with tf.name_scope('v'):
# Applying fully connected layer with non-linear activation to each of the B*T timestamps;
# the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size
tmp1 = tf.tensordot(facts, w1, axes=1)
| tensorflow.name_scope | 2,525 |
from tensorflow.python.lib.io import file_io
with file_io.FileIO(model_name, mode='rb') as input_f:
with file_io.FileIO("gs://deeplearningteam11/" + model_name, mode='w+') as output_f:
| tensorflow.python.lib.io.file_io.FileIO | 2,526 |
import tensorflow as tf
nfilt_last = audio_nchannels
for i, ((ntime, nband, nfilt), (ptime, pband)) in enumerate(zip(cnn_filter_shapes, cnn_pool)):
layer_name = 'cnn_{}'.format(i)
with tf.variable_scope(layer_name):
filters = tf.get_variable('filters', [ntime, nband, nfilt_last, nfilt], initializer=cnn_init, dtype=dtype)
biases = tf.get_variable('biases', [nfilt], initializer=tf.constant_initializer(0.1), dtype=dtype)
if cnn_rnn_zack:
padding = 'SAME'
else:
padding = 'VALID'
conv = tf.nn.conv2d(layer_last, filters, [1, 1, 1, 1], padding=padding)
biased = tf.nn.bias_add(conv, biases)
convolved = tf.nn.relu(biased)
pool_shape = [1, ptime, pband, 1]
pooled = tf.nn.max_pool(convolved, ksize=pool_shape, strides=pool_shape, padding='SAME')
print('{}: {}'.format(layer_name, pooled.get_shape()))
export_feat_tensors[layer_name] = pooled
# TODO: CNN dropout?
layer_last = pooled
| tensorflow.nn.bias_add | 2,527 |
import tensorflow as tf
with tf.variable_scope(name):
if reuse:
tf.get_variable_scope().reuse_variables()
else:
assert tf.get_variable_scope().reuse is False
def lrelu(x, alpha,name='lrelu'):
with tf.variable_scope(name):
if reuse:
tf.get_variable_scope().reuse_variables()
else:
assert tf.get_variable_scope().reuse is False
return tf.nn.relu(x) - alpha * tf.nn.relu(-x)
def instance_norm(x,name='instance_norm'):
with tf.variable_scope(name):
if reuse:
tf.get_variable_scope().reuse_variables()
else:
assert tf.get_variable_scope().reuse is False
| tensorflow.get_variable_scope | 2,528 |
import tensorflow as tf
model_to_train = [s.strip() for s in FLAGS.model_to_train.split(',')]
for m in model_to_train:
sub_loop(keypoint_model_fn, m, detail_params[m]['model_dir'], run_config, detail_params[m]['train_epochs'], detail_params[m]['epochs_per_eval'], detail_params[m]['lr_decay_factors'], detail_params[m]['decay_boundaries'], detail_params[m]['checkpoint_path'], detail_params[m]['checkpoint_exclude_scopes'], detail_params[m]['checkpoint_model_scope'], detail_params[m]['ignore_missing_vars'])
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
# 0.04473711425469029
# blouse: 0.042138283111307795
# dress: 0.04147867224643174
# outwear: 0.04511445541161763
# skirt: 0.05388678376709799
| tensorflow.app.run | 2,529 |
import tensorflow as tf
temp_loss = tf.reduce_mean(tf.reshape(tf.losses.mean_squared_error(targets_list[-1], pred_outputs[-1], weights=1.0, loss_collection=None, reduction=tf.losses.Reduction.NONE), [cur_batch_size, config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')], -1]), axis=-1)
num_topk = config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')] // 2
gather_col = tf.nn.top_k(temp_loss, k=num_topk, sorted=True)[1]
gather_row = tf.reshape(tf.tile(tf.reshape(tf.range(cur_batch_size), [-1, 1]), [1, num_topk]), [-1, 1])
gather_indcies = tf.stop_gradient(tf.stack([gather_row, tf.reshape(gather_col, [-1, 1])], axis=-1))
select_targets = tf.gather_nd(targets_list[-1], gather_indcies)
select_heatmap = tf.gather_nd(pred_outputs[-1], gather_indcies)
| tensorflow.reshape | 2,530 |
from tensorflow.python.client import graph_util
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
filter_out_depth = int(filter_shape[3])
return ops.OpStats("weight_parameters", (filter_height * filter_width *
filter_in_depth * filter_out_depth))
@ops.RegisterStatistics("BiasAdd", "flops")
def _calc_bias_add_flops(graph, node):
"""Calculates the computing needed for BiasAdd."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
input_count = np.prod(input_shape.as_list())
return ops.OpStats("flops", input_count)
@ops.RegisterStatistics("BiasAdd", "weight_parameters")
def _calc_bias_add_weight_params(graph, node):
"""Calculates the on-disk weight parameters for BiasAdd."""
bias_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1])
bias_shape.assert_is_fully_defined()
bias_count = np.prod(bias_shape.as_list())
return ops.OpStats("weight_parameters", bias_count)
| tensorflow.python.client.graph_util.tensor_shape_from_node_def_name | 2,531 |
import tensorflow as tf
input0_tensor = tf.get_default_graph().get_tensor_by_name(
"TENSOR_INPUT0:0")
input1_tensor = tf.get_default_graph().get_tensor_by_name(
"TENSOR_INPUT1:0")
output0_tensor = tf.get_default_graph().get_tensor_by_name(
"TENSOR_OUTPUT0:0")
output1_tensor = tf.get_default_graph().get_tensor_by_name(
"TENSOR_OUTPUT1:0")
| tensorflow.get_default_graph | 2,532 |
import tensorflow as tf
tsv_path = os.path.join(FLAGS.logdir, 'metadata.tsv')
self.embedding_test_ph = tf.placeholder(tf.float32, embedding_shape, name='embedding')
self.embedding_test = tf.Variable(tf.random_normal(embedding_shape), name='test_embedding', trainable=False)
self.embedding_assign = self.embedding_test.assign(self.embedding_test_ph)
self.embedding_saver = tf.train.Saver(var_list=[self.embedding_test])
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = self.embedding_test.name
| tensorflow.train.Saver | 2,533 |
import tensorflow as tf
config = tf.ConfigProto()
if FLAGS.xla:
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
if FLAGS.use_hvd:
config.gpu_options.visible_device_list = str(hvd.local_rank())
config.gpu_options.allow_growth=True
run_config = tf.estimator.RunConfig(
model_dir=FLAGS.output_dir,
keep_checkpoint_max=FLAGS.keep_checkpoint_max,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
log_step_count_steps=FLAGS.hooking_frequence,
session_config=config)
| tensorflow.estimator.RunConfig | 2,534 |
import tensorflow as tf
# In[15]:
# Create the input to the network. This is a 4-dimensional tensor!
X = tf.placeholder(name='X', shape=[None,data_x.shape[1], data_x.shape[2], data_x.shape[3]], dtype=tf.float32)
# Create the output to the network. This is our one hot encoding of 2 possible values (TODO)!
Y = tf.placeholder(name='Y', shape=[None,data_y.shape[1]], dtype=tf.float32)
| tensorflow.placeholder | 2,535 |
import tensorflow as tf
self.fc1 = tf.nn.relu(out)
# fc2
with tf.variable_scope('fc2'):
w = tf.get_variable('w', [self.fc1.get_shape()[1], 2048], initializer=he_normal,
regularizer=regularizer)
b = tf.get_variable('b', [2048], initializer=tf.constant_initializer(1.0))
out = tf.matmul(self.fc1, w) + b
self.fc2 = tf.nn.relu(out)
# fc3
with tf.variable_scope('fc3'):
w = tf.get_variable('w', [self.fc2.get_shape()[1], num_classes], initializer=initializer,
regularizer=regularizer)
b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(1.0))
self.fc3 = tf.matmul(self.fc2, w) + b
# Calculate Mean cross-entropy loss
with tf.name_scope("loss"):
self.predictions = tf.argmax(self.fc3, 1, name="predictions")
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.fc3, labels=self.input_y)
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self.loss = tf.reduce_mean(losses) + sum(regularization_losses)
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
| tensorflow.constant_initializer | 2,536 |
import tensorflow as tf
return a + b
outputs = []
for _ in xrange(1000):
outputs.append(f(tf.ones([1, 100000]), tf.ones([1, 100000])))
op_to_benchmark = tf.group(*outputs)
tf.train.start_queue_runners()
| tensorflow.ones | 2,537 |
import tensorflow as tf
networks._generator_alpha(block_id, tf.constant(1.2, tf.float32)))
for block_id in range(1, 5)
]
self.assertArrayNear(alpha_fixed_block_id, [0, 0.2, 1, 0.8, 0, 0, 0],
1.0e-6)
self.assertArrayNear(alpha_fixed_progress, [0, 0.8, 0.2, 0], 1.0e-6)
def test_discriminator_alpha(self):
with self.test_session(use_gpu=True) as sess:
alpha_fixed_block_id = [
sess.run(
networks._discriminator_alpha(2, tf.constant(
progress, tf.float32)))
for progress in [0, 0.2, 1, 1.2, 2, 2.2, 3]
]
alpha_fixed_progress = [
sess.run(
networks._discriminator_alpha(block_id,
tf.constant(1.2, tf.float32)))
for block_id in range(1, 5)
]
self.assertArrayNear(alpha_fixed_block_id, [1, 1, 1, 0.8, 0, 0, 0], 1.0e-6)
| tensorflow.constant | 2,538 |
import tensorflow as tf
return
if not color_name:
return
_, chars, length = parse(color_name)
with tf.device(device):
(chars, length) = (tf.identity(chars), tf.identity(length))
chars = tf.expand_dims(chars, 0)
length = tf.expand_dims(length, 0)
preds = tf.unstack(model((chars, length), training=False)[0])
# Predictions cannot be negative, as they are generated by a ReLU layer;
# they may, however, be greater than 1.
clipped_preds = tuple(min(float(p), 1.0) for p in preds)
rgb = tuple(int(p * 255) for p in clipped_preds)
| tensorflow.expand_dims | 2,539 |
import tensorflow as tf
return output_spec
return model_fn
def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,
label_ids, label_weights):
"""Get loss and log probs for the masked LM."""
input_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=bert_config.hidden_size,
activation=modeling.get_activation(bert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
bert_config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
| tensorflow.variable_scope | 2,540 |
import tensorflow as tf
i_z0_y1_x1 = tf.gather(im_flat, idx_z0_y1_x1)
i_z1_y0_x0 = tf.gather(im_flat, idx_z1_y0_x0)
i_z1_y0_x1 = tf.gather(im_flat, idx_z1_y0_x1)
i_z1_y1_x0 = tf.gather(im_flat, idx_z1_y1_x0)
i_z1_y1_x1 = tf.gather(im_flat, idx_z1_y1_x1)
# Finally calculate interpolated values.
x0_f = tf.to_float(x0)
x1_f = tf.to_float(x1)
y0_f = tf.to_float(y0)
y1_f = tf.to_float(y1)
z0_f = tf.to_float(z0)
z1_f = tf.to_float(z1)
# Check the out-of-boundary case.
x0_valid = tf.to_float(
tf.less_equal(x0, max_x) & tf.greater_equal(x0, 0))
x1_valid = tf.to_float(
tf.less_equal(x1, max_x) & tf.greater_equal(x1, 0))
y0_valid = tf.to_float(
tf.less_equal(y0, max_y) & tf.greater_equal(y0, 0))
y1_valid = tf.to_float(
tf.less_equal(y1, max_y) & tf.greater_equal(y1, 0))
z0_valid = tf.to_float(
tf.less_equal(z0, max_z) & tf.greater_equal(z0, 0))
z1_valid = tf.to_float(
tf.less_equal(z1, max_z) & tf.greater_equal(z1, 0))
w_z0_y0_x0 = tf.expand_dims(((x1_f - x) * (y1_f - y) *
(z1_f - z) * x1_valid * y1_valid * z1_valid),
1)
| tensorflow.less_equal | 2,541 |
import tensorflow as tf
if tf_util.is_image(self.observation_space):
tf.summary.image('observation', train_model.obs_ph)
else:
tf.summary.histogram('observation', train_model.obs_ph)
trainer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate_ph, decay=self.rprop_alpha,
epsilon=self.rprop_epsilon)
_opt_op = trainer.apply_gradients(grads)
# so when you call _train, you first do the gradient step, then you apply ema
| tensorflow.train.RMSPropOptimizer | 2,542 |
import tensorflow as tf
Tout=tf.float32)
gtboxes_and_label_r = tf.reshape(gtboxes_and_label_r, [-1, 6])
gtboxes_and_label_h = get_horizen_minAreaRectangle(gtboxes_and_label_batch[i])
gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [-1, 5])
num_objects = num_objects_batch[i]
num_objects = tf.cast(tf.reshape(num_objects, [-1, ]), tf.float32)
| tensorflow.reshape | 2,543 |
import tensorflow as tf
self.vars=self.vars[-1*NUM_VARS:];
self.train_op = self.optimizer.apply_gradients(
self.grads_and_vars, global_step=tf.contrib.framework.get_global_step())
| tensorflow.contrib.framework.get_global_step | 2,544 |
import tensorflow as tf
loss=loss,
eval_metric_ops=eval_metric_ops)
if output_type == "sess":
return {
"eval":{
"per_example_loss":per_example_loss,
"logits":logits,
"loss":tf.reduce_mean(per_example_loss)
}
}
elif output_type == "estimator":
return estimator_spec
else:
raise NotImplementedError()
return model_fn
| tensorflow.reduce_mean | 2,545 |
from tensorflow.contrib.distributions.python.ops import distribution_util
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
@distribution_util.AppendDocstring(_poisson_sample_note)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
@distribution_util.AppendDocstring(_poisson_sample_note)
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
@distribution_util.AppendDocstring(_poisson_sample_note)
def _log_cdf(self, x):
return math_ops.log(self.cdf(x))
@distribution_util.AppendDocstring(_poisson_sample_note)
def _cdf(self, x):
x = self._assert_valid_sample(x, check_integer=False)
return math_ops.igammac(math_ops.floor(x + 1), self.rate)
def _log_normalization(self):
return self.rate
def _log_unnormalized_prob(self, x):
x = self._assert_valid_sample(x, check_integer=True)
return x * math_ops.log(self.rate) - math_ops.lgamma(x + 1)
| tensorflow.contrib.distributions.python.ops.distribution_util.AppendDocstring | 2,546 |
import tensorflow as tf
vocab_dist: [batch_size, vsize]
attn_dist: [batch_size, passage_length]
p_gen: [batch_size, 1]
passage_word_idx: [batch_size, passage_length]
passage_mask: [batch_size, passage_length]
'''
input_shape = tf.shape(vocab_dist)
batch_size = input_shape[0]
vsize = input_shape[1]
passage_length = tf.shape(passage_word_idx)[1]
with tf.variable_scope('final_distribution'):
vocab_dist = p_gen * vocab_dist
attn_dist = (1.0-p_gen) * attn_dist
# Concatenate some zeros to each vocabulary dist, to hold the probabilities for phrases
extended_vsize = vsize
if self.max_phrase_size is not None:
extended_vsize += self.max_phrase_size
extra_zeros = tf.zeros((batch_size, self.max_phrase_size))
vocab_dist = tf.concat(values=[vocab_dist, extra_zeros], axis=1) # [batch_size, extended_vsize]
if self.options.add_first_word_prob_for_phrase: # add prob of the first word to each phrase
| tensorflow.variable_scope | 2,547 |
import tensorflow as tf
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
| tensorflow.flags.DEFINE_string | 2,548 |
import tensorflow as tf
# reshape back to time dimension
out = tf.reshape(out, shape=original_tensor_shape)
| tensorflow.reshape | 2,549 |
import tensorflow as tf
tf.ones([self.batch_size, self.num_steps], dtype=data_type()),
average_across_timesteps=False,
average_across_batch=True)
# Update the cost
self._cost = tf.reduce_sum(loss)
self._final_state = state
if not is_training:
return
| tensorflow.reduce_sum | 2,550 |
import tensorflow as tf
sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
def input_fn_builder(input_files,
max_seq_length,
| tensorflow.reshape | 2,551 |
import tensorflow as tf
model.components.synthesis.run(self.initial_dlatents,
randomize_noise=randomize_noise, minibatch_size=self.batch_size,
custom_inputs=[partial(create_variable_for_generator, batch_size=batch_size),
partial(create_stub, batch_size=batch_size)],
structure='fixed')
self.sess = tf.get_default_session()
self.graph = tf.get_default_graph()
self.dlatent_variable = next(v for v in tf.global_variables() if 'learnable_dlatents' in v.name)
self.set_dlatents(self.initial_dlatents)
| tensorflow.get_default_session | 2,552 |
import tensorflow as tf
span_head_scores = tf.gather(self.head_scores, span_indices) # [k, max_span_width, 1]
span_mask = tf.expand_dims(tf.sequence_mask(span_width, self.config["max_span_width"], dtype=tf.float32), 2) # [k, max_span_width, 1]
span_head_scores += tf.log(span_mask) # [k, max_span_width, 1]
span_attention = tf.nn.softmax(span_head_scores, 1) # [k, max_span_width, 1]
span_head_emb = tf.reduce_sum(span_attention * span_text_emb, 1) # [k, emb]
span_emb_list.append(span_head_emb)
| tensorflow.nn.softmax | 2,553 |
from tensorflow.python.framework import sparse_tensor
linear_feature_columns=(bucketized_feature,),
dnn_feature_columns=(cont_feature,),
dnn_hidden_units=(3, 3))
input_fn = test_data.iris_input_multiclass_fn
metrics = classifier.fit(input_fn=input_fn, steps=_ITERS).evaluate(
input_fn=input_fn, steps=100)
self._assertCommonMetrics(metrics)
def benchmarkPartitionedVariables(self):
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=('en', 'fr', 'zh'),
indices=((0, 0), (0, 1), (2, 0)),
dense_shape=(3, 2))
}
labels = constant_op.constant(((1,), (0,), (0,)))
return features, labels
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_feature = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
embedding_feature = feature_column.embedding_column(
sparse_feature, dimension=1)
| tensorflow.python.framework.sparse_tensor.SparseTensor | 2,554 |
import tensorflow as tf
def get_loss_weights(name=None):
"""Returns the weight for loss."""
with tf.variable_scope(name, 'rl_op_selection'):
logits = tf.get_variable(
name='loss_logits_rl_w',
initializer=tf.initializers.zeros(),
shape=[
FLAGS.num_choices,
],
dtype=tf.float32)
dist = tfp.distributions.Categorical(logits=logits)
dist_entropy = tf.reduce_sum(dist.entropy())
| tensorflow.initializers.zeros | 2,555 |
import tensorflow as tf
self.sync_queue_counter % len(self.sync_queue_devices)]):
sync_queues = [
tf.FIFOQueue(num_workers, [tf.bool], shapes=[[]],
shared_name='%s%s' % (name_prefix, i))
for i in range(num_workers)]
queue_ops = []
# For each other worker, add an entry in a queue, signaling that it can
# finish this step.
token = tf.constant(False)
with tf.control_dependencies(enqueue_after_list):
for i, q in enumerate(sync_queues):
if i == self.task_index:
queue_ops.append(tf.no_op())
else:
queue_ops.append(q.enqueue(token))
| tensorflow.constant | 2,556 |
import tensorflow as tf
math.log(float(max_timescale) / float(min_timescale)) /
(tf.to_float(num_timescales) - 1))
| tensorflow.to_float | 2,557 |
import tensorflow as tf
perm_mask = tf.transpose(features["perm_mask"], [1, 2, 0])
if FLAGS.num_predict is not None:
# [num_predict x tgt_len x bsz]
target_mapping = tf.transpose(features["target_mapping"], [1, 2, 0])
else:
target_mapping = None
# target for LM loss
tgt = tf.transpose(features["target"], [1, 0])
# target mask for LM loss
tgt_mask = tf.transpose(features["target_mask"], [1, 0])
# construct xlnet config and save to model_dir
xlnet_config = xlnet.XLNetConfig(FLAGS=FLAGS)
xlnet_config.to_json(os.path.join(FLAGS.model_dir, "config.json"))
| tensorflow.transpose | 2,558 |
import tensorflow as tf
scope_name += '/' + '_'.join(encoder.name for encoder in encoders)
def embed(input_):
embedded_input = tf.nn.embedding_lookup(embedding, input_)
if decoder.use_dropout and decoder.word_keep_prob is not None:
noise_shape = [1, 1] if decoder.pervasive_dropout else [tf.shape(input_)[0], 1]
embedded_input = tf.nn.dropout(embedded_input, keep_prob=decoder.word_keep_prob, noise_shape=noise_shape)
if decoder.use_dropout and decoder.embedding_keep_prob is not None:
size = tf.shape(embedded_input)[1]
noise_shape = [1, size] if decoder.pervasive_dropout else [tf.shape(input_)[0], size]
embedded_input = tf.nn.dropout(embedded_input, keep_prob=decoder.embedding_keep_prob,
noise_shape=noise_shape)
return embedded_input
def get_cell(input_size=None, reuse=False):
cells = []
for j in range(decoder.layers):
| tensorflow.shape | 2,559 |
import tensorflow as tf
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
| tensorflow.control_dependencies | 2,560 |
import tensorflow as tf
"""
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
policy = q_func(sess, ob_space, ac_space, 1, 1, None, layers=layers)
obs_phs = (policy.obs_ph, policy.processed_obs)
deterministic_actions = tf.argmax(policy.q_values, axis=1)
#########################
### KEVIN UPDATE ########
### GIMME DAT PRINTS ####
| tensorflow.argmax | 2,561 |
import tensorflow as tf
def _evaluate_legendre_polynomial_loop_body(x, n, l, m, pmm, pmm1):
n_float = tf.cast(n, dtype=x.dtype)
m_float = tf.cast(m, dtype=x.dtype)
pmn = (x * (2.0 * n_float - 1.0) * pmm1 - (n_float + m_float - 1) * pmm) / (
| tensorflow.cast | 2,562 |
from tensorflow.python.ops import array_ops
@property
def beta(self):
"""Scale parameter."""
return self._beta
def _batch_shape(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.alpha), array_ops.shape(self.beta))
def _get_batch_shape(self):
return array_ops.broadcast_static_shape(
self.alpha.get_shape(), self.beta.get_shape())
def _event_shape(self):
return constant_op.constant([], dtype=dtypes.int32)
| tensorflow.python.ops.array_ops.shape | 2,563 |
import tensorflow as tf
with tf.variable_scope(scope):
if output_length == 1:
pool = tf.nn.avg_pool(input_data, [1, height, width, 1], strides=[1, 1, 1, 1], padding=padding)
pool = tf.reduce_mean(pool, axis=[1, 2])
pool = tf.squeeze(pool, axis=[1, 2])
return pool
else:
if num_channels_in != output_length:
conv_weight = tf.Variable(tf.truncated_normal([1, 1, num_channels_in, output_length], stddev=0.1, dtype=tf.float32))
conv = tf.nn.conv2d(input_data, conv_weight, strides=[1, 1, 1, 1], padding='SAME')
pool = tf.nn.avg_pool(conv, ksize=[1, height, width, 1], strides=[1, 1, 1, 1], padding=padding)
else:
pool = tf.nn.avg_pool(input_data, ksize=[1, height, width, 1], strides=[1, 1, 1, 1], padding=padding)
pool = tf.squeeze(pool, axis=[1, 2])
return pool
def avg_pool(input, scope, filter_dims, stride_dims, padding='SAME'):
assert (len(filter_dims) == 2) # filter height and width
assert (len(stride_dims) == 2) # stride height and width
| tensorflow.nn.avg_pool | 2,564 |
import tensorflow as tf
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_log_probs, next_sentence_labels):
"""Computes the loss and accuracy of the model."""
masked_lm_log_probs = tf.reshape(masked_lm_log_probs,
[-1, masked_lm_log_probs.shape[-1]])
masked_lm_predictions = tf.argmax(
masked_lm_log_probs, axis=-1, output_type=tf.int32)
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = tf.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights)
masked_lm_mean_loss = tf.metrics.mean(
values=masked_lm_example_loss, weights=masked_lm_weights)
next_sentence_log_probs = tf.reshape(
next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])
next_sentence_predictions = tf.argmax(
next_sentence_log_probs, axis=-1, output_type=tf.int32)
next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
| tensorflow.reshape | 2,565 |
from tensorflow.python.framework import ops
Args:
func: the operator
op_name: name of the operator being overridden
"""
def binary_op_wrapper(x, y):
with ops.op_scope([x, y], None, op_name) as name:
assert isinstance(x, ops.Tensor)
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
return func(x, y, name=name)
ops.Tensor._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
def r_binary_op_wrapper(y, x):
with ops.op_scope([x, y], None, op_name) as name:
assert isinstance(y, ops.Tensor)
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
return func(x, y, name=name)
ops.Tensor._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
types.uint8: types.float32,
types.int8: types.float32,
types.int16: types.float32,
types.int32: types.float64,
| tensorflow.python.framework.ops.op_scope | 2,566 |
import tensorflow as tf
biases = _variable_on_cpu('biases', [num_outputs],
tf.constant_initializer(0.0))
| tensorflow.constant_initializer | 2,567 |
import tensorflow as tf
norm_grads_q = tf.global_norm(grads_q)
norm_grads_policy = tf.global_norm(grads_policy)
else:
grads = tf.gradients(loss, self.params)
norm_grads = None
if self.max_grad_norm is not None:
grads, norm_grads = tf.clip_by_global_norm(grads, self.max_grad_norm)
grads = list(zip(grads, self.params))
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('rewards', tf.reduce_mean(self.reward_ph))
tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate))
tf.summary.scalar('advantage', tf.reduce_mean(adv))
| tensorflow.clip_by_global_norm | 2,568 |
import tensorflow as tf
#def rnn_cell(rnn_input, state):
#with tf.variable_scope('rnn_cell', reuse=True):
#W = tf.get_variable('W', [num_classes+state_size, state_size])
#b = tf.get_variable('b', [state_size], initializer=tf.constant_initializer(0.0))
#return tf.tanh(tf.matmul(tf.concat([rnn_input, state],1),W) + b)
#'''将rnn cell添加到计算图中'''
#state = init_state
#rnn_outputs = []
#for rnn_input in rnn_inputs:
#state = rnn_cell(rnn_input, state) # state会重复使用,循环
#rnn_outputs.append(state)
#final_state = rnn_outputs[-1] # 得到最后的state
cell = tf.contrib.rnn.BasicRNNCell(num_units=state_size)
rnn_outputs, final_state = tf.nn.dynamic_rnn(cell, rnn_inputs, initial_state=init_state)
'''预测,损失,优化'''
with tf.variable_scope('softmax'):
W = tf.get_variable('W', [state_size, num_classes])
b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(0.0))
'''因为rnn_outputs是三维的,这里需要将其转成2维的,
矩阵运算后再转换回来[batch_size, num_steps, num_classes]'''
logits = tf.reshape(tf.matmul(tf.reshape(rnn_outputs, [-1, state_size]), W) +b, \
shape=[batch_size, num_steps, num_classes])
predictions = tf.nn.softmax(logits)
| tensorflow.contrib.rnn.BasicRNNCell | 2,569 |
import tensorflow as tf
enc_inp, dec_inp, cell, num_encoder_symbols,
num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
def EmbeddingTiedRNNSeq2Seq(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_decoder_symbols, embedding_size=2,
feed_previous=feed_previous)
| tensorflow.nn.rnn_cell.BasicLSTMCell | 2,570 |
import tensorflow as tf
# pred_outputs = [tf.boolean_mask(pred_outputs[ind], all_visible, name='boolean_mask_{}'.format(ind)) for ind in list(range(len(pred_outputs)))]
all_visible = tf.expand_dims(tf.expand_dims(tf.cast(tf.logical_and(key_v>0, isvalid>0), tf.float32), axis=-1), axis=-1)
targets_list = [targets_list[ind] * all_visible for ind in list(range(len(targets_list)))]
pred_outputs = [pred_outputs[ind] * all_visible for ind in list(range(len(pred_outputs)))]
sq_diff = tf.reduce_sum(tf.squared_difference(targets, pred_outputs[-1]), axis=-1)
last_pred_mse = tf.metrics.mean_absolute_error(sq_diff, tf.zeros_like(sq_diff), name='last_pred_mse')
metrics = {'normalized_error': ne_mertric, 'last_pred_mse':last_pred_mse}
predictions = {'normalized_error': ne_mertric[1]}
ne_mertric = tf.identity(ne_mertric[1], name='ne_mertric')
base_learning_rate = params['learning_rate']
mse_loss_list = []
if params['use_ohkm']:
base_learning_rate = 1. * base_learning_rate
for pred_ind in list(range(len(pred_outputs) - 1)):
mse_loss_list.append(0.5 * tf.losses.mean_squared_error(targets_list[pred_ind], pred_outputs[pred_ind],
weights=1.0 / tf.cast(cur_batch_size, tf.float32),
scope='loss_{}'.format(pred_ind),
| tensorflow.identity | 2,571 |
import tensorflow as tf
if isinstance(grad, tf.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(tf.histogram_summary(var.op.name + ':gradient',
grad_values))
summaries.append(tf.histogram_summary(var.op.name + ':gradient_norm',
tf.global_norm([grad_values])))
| tensorflow.histogram_summary | 2,572 |
from tensorflow.python.framework import ops
ValueError: If `weights` is not `None` and has an incomptable shape.
"""
default_name = _at_k_name('true_positive', k, class_id=class_id)
with ops.name_scope(name, default_name, (predictions_idx, labels)) as scope:
tp = _sparse_true_positive_at_k(
predictions_idx=predictions_idx, labels=labels, class_id=class_id,
| tensorflow.python.framework.ops.name_scope | 2,573 |
import tensorflow as tf
if out_shp[-1] > in_shp[-1]:
x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [0, int(dim[0] - in_shp[-1])]])
elif out_shp[-1] < in_shp[-1]:
warnings.warn("The input has more feature maps than the output. There will be no residual connection.")
residual = False
if residual:
out += x
return out
def deconv2d(x, dim=(32, [3, 3], [1, 1]), pad='SAME', scope="deconv2d", training=True, ema=None, init=False, bias_initializer=tf.constant_initializer(0.)):
num_filters, filter_size, stride = dim
xs = x.get_shape().as_list()
if pad=='SAME':
target_shape = [tf.shape(x)[0], xs[1]*stride[0], xs[2]*stride[1], num_filters]
else:
target_shape = [tf.shape(x)[0], xs[1]*stride[0] + filter_size[0]-1, xs[2]*stride[1] + filter_size[1]-1, num_filters]
with tf.variable_scope(scope):
V = tf.get_variable("V", shape=list(filter_size) + [num_filters, int(x.get_shape()[-1])], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.05), trainable=True)
g = tf.get_variable("g", shape=[num_filters], dtype=tf.float32, initializer=tf.constant_initializer(1.), trainable=True)
| tensorflow.constant_initializer | 2,574 |
import tensorflow as tf
data_fields["inputs"] = tf.VarLenFeature(tf.int64)
# hack: ignoring true targets and putting dist_targets in targets
data_items_to_decoders = {
"inputs": tf.contrib.slim.tfexample_decoder.Tensor("inputs"),
"targets": tf.contrib.slim.tfexample_decoder.Tensor("dist_targets"),
}
| tensorflow.contrib.slim.tfexample_decoder.Tensor | 2,575 |
import tensorflow as tf
if not self.model_initialized:
initialize_interdependent_variables(self.session, tf.global_variables(),
{self.obs_t_ph : obs_batch, self.obs_tp1_ph : obs_tp1_batch})
self.model_initialized = True
self.session.run(self.train_fn, {self.obs_t_ph: obs_batch, self.act_t_ph: act_batch, self.rew_t_ph: rew_batch,
self.obs_tp1_ph: obs_tp1_batch, self.done_mask_ph: done_mask,
self.learning_rate : self.optimizer_spec.lr_schedule.value(self.t)},
options=tf.RunOptions(report_tensor_allocations_upon_oom=True))
if self.num_param_updates % self.target_update_freq == 0:
self.session.run(self.update_target_fn)
self.num_param_updates += 1
self.t += 1
| tensorflow.RunOptions | 2,576 |
import tensorflow as tf
model = create_model("Tacotron", hparams)
model.initialize(feeder.inputs, feeder.input_lengths, feeder.speaker_embeddings,
feeder.mel_targets, feeder.token_targets,
targets_lengths=feeder.targets_lengths, global_step=global_step,
is_training=True, split_infos=feeder.split_infos)
model.add_loss()
model.add_optimizer(global_step)
stats = add_train_stats(model, hparams)
return model, stats
def model_test_mode(args, feeder, hparams, global_step):
with tf.variable_scope("Tacotron_model", reuse=tf.AUTO_REUSE) as scope:
model = create_model("Tacotron", hparams)
model.initialize(feeder.eval_inputs, feeder.eval_input_lengths,
feeder.eval_speaker_embeddings, feeder.eval_mel_targets,
feeder.eval_token_targets, targets_lengths=feeder.eval_targets_lengths,
global_step=global_step, is_training=False, is_evaluating=True,
split_infos=feeder.eval_split_infos)
model.add_loss()
return model
def train(log_dir, args, hparams):
| tensorflow.variable_scope | 2,577 |
import tensorflow as tf
shape = np.array([3, 4]).astype(np.int64)
return tf.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_1x1x1(self):
ind = np.array([[0, 0, 0]]).astype(np.int64)
val = np.array([0]).astype(np.int32)
shape = np.array([3, 4, 5]).astype(np.int64)
return tf.SparseTensorValue(ind, val, shape)
def testAddTakeMany(self):
with self.test_session(graph=tf.Graph(), use_gpu=False) as sess:
sp_input0 = self._SparseTensorValue_5x6(np.arange(6))
sp_input1 = self._SparseTensorValue_3x4(np.arange(6))
handle0 = add_sparse_to_tensors_map(sp_input0, shared_name="a")
| tensorflow.SparseTensorValue | 2,578 |
import tensorflow as tf
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
| tensorflow.FixedLenFeature | 2,579 |
import tensorflow as tf
rand = tf.random.uniform([x_shape[0]])
epoch = tf.where(rand < 0.1, tf.zeros_like(epoch), epoch)
# Embed the epoch number.
emb_epoch = common_layers.embedding(epoch, 32, 32) # [batch, 32]
flat_x = tf.concat([flat_x, emb_epoch], axis=1)
flat_x = tf.layers.dropout(flat_x, rate=dropout)
x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu)
logits = tf.layers.dense(
x, self.hparams.problem.num_actions, name="dense2"
)
logits = clip_logits(logits, self.hparams)
logits = tf.expand_dims(logits, axis=1)
value = tf.layers.dense(x, self.distributional_value_size)
return {"target_policy": logits, "target_value": value}
| tensorflow.layers.dense | 2,580 |
import tensorflow as tf
tf.io.FixedLenFeature((), tf.int64),
'image/width':
tf.io.FixedLenFeature((), tf.int64),
'image/object/bbox/xmin':
tf.io.VarLenFeature(tf.float32),
'image/object/bbox/xmax':
tf.io.VarLenFeature(tf.float32),
'image/object/bbox/ymin':
tf.io.VarLenFeature(tf.float32),
'image/object/bbox/ymax':
tf.io.VarLenFeature(tf.float32),
'image/object/class/label':
tf.io.VarLenFeature(tf.int64),
'image/object/area':
tf.io.VarLenFeature(tf.float32),
'image/object/is_crowd':
| tensorflow.io.VarLenFeature | 2,581 |
import tensorflow as tf
self.unweighted_xent = _SafeXEnt(self.y, self.probs)
self._xent = _SafeXEnt(self.y, self.probs, class_weights=weights)
self.cost = tf.reduce_mean(self.example_weights * self._xent)
class TweetSeqModel(BaseModel): #formerly SeqModel
| tensorflow.reduce_mean | 2,582 |
import tensorflow as tf
for layer_id in range(self.lstm_num_layers):
with tf.variable_scope("layer_{}".format(layer_id)):
w = tf.get_variable("w", [2 * self.lstm_size, 4 * self.lstm_size])
self.w_lstm.append(w)
self.g_emb = tf.get_variable("g_emb", [1, self.lstm_size])
with tf.variable_scope("emb"):
self.w_emb = tf.get_variable("w", [self.num_branches, self.lstm_size])
with tf.variable_scope("softmax"):
self.w_soft = tf.get_variable("w", [self.lstm_size, self.num_branches])
b_init = np.array([10.0, 10.0] + [0] * (self.num_branches - 2),
dtype=np.float32)
self.b_soft = tf.get_variable(
"b", [1, self.num_branches],
initializer=tf.constant_initializer(b_init))
b_soft_no_learn = np.array(
[0.25, 0.25] + [-0.25] * (self.num_branches - 2), dtype=np.float32)
b_soft_no_learn = np.reshape(b_soft_no_learn, [1, self.num_branches])
self.b_soft_no_learn = tf.constant(b_soft_no_learn, dtype=tf.float32)
with tf.variable_scope("attention"):
self.w_attn_1 = tf.get_variable("w_1", [self.lstm_size, self.lstm_size])
self.w_attn_2 = tf.get_variable("w_2", [self.lstm_size, self.lstm_size])
self.v_attn = tf.get_variable("v", [self.lstm_size, 1])
def _build_sampler(self, prev_c=None, prev_h=None, use_bias=False):
"""Build the sampler ops and the log_prob ops."""
| tensorflow.constant_initializer | 2,583 |
import tensorflow as tf
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameters
mask = tf.equal(mask, tf.ones_like(mask))
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
| tensorflow.shape | 2,584 |
import tensorflow as tf
def main(self):
with tf.Graph().as_default() as graph, tf.device('/cpu:0'):
num_gpu = len(cfgs.GPU_GROUP.strip().split(','))
global_step = slim.get_or_create_global_step()
lr = self.warmup_lr(cfgs.LR, global_step, cfgs.WARM_SETP, num_gpu*cfgs.BATCH_SIZE)
tf.summary.scalar('lr', lr)
optimizer = tf.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM)
fcos = build_whole_network_batch_quad.DetectionNetworkFCOS(cfgs=self.cfgs,
is_training=True)
with tf.name_scope('get_batch'):
if cfgs.IMAGE_PYRAMID:
shortside_len_list = tf.constant(cfgs.IMG_SHORT_SIDE_LEN)
shortside_len = tf.random_shuffle(shortside_len_list)[0]
| tensorflow.train.MomentumOptimizer | 2,585 |
import tensorflow as tf
# image_size,
# image_size,
# num_channels
# )
mean = tf.reduce_mean(
input_tensor=X, axis=0, keepdims=True, name="mean"
)
print_obj("ungrouped_minibatch_stddev", "mean", mean)
| tensorflow.reduce_mean | 2,586 |
import tensorflow as tf
encoder_outputs = []
new_encoder_input_length = []
for i, encoder in enumerate(encoders):
# create embeddings in the global scope (allows sharing between encoder and decoder)
weight_scale = encoder.embedding_weight_scale or encoder.weight_scale
if weight_scale is None:
initializer = None # FIXME
elif encoder.embedding_initializer == 'uniform' or (encoder.embedding_initializer is None
and encoder.initializer == 'uniform'):
initializer = tf.random_uniform_initializer(minval=-weight_scale, maxval=weight_scale)
else:
initializer = tf.random_normal_initializer(stddev=weight_scale)
with tf.device('/cpu:0'): # embeddings can take a very large amount of memory, so
# storing them in GPU memory can be impractical
if encoder.binary:
embeddings = None # inputs are token ids, which need to be mapped to vectors (embeddings)
else:
embedding_shape = [encoder.vocab_size, encoder.embedding_size]
embeddings = get_variable('embedding_{}'.format(encoder.name), shape=embedding_shape,
| tensorflow.random_uniform_initializer | 2,587 |
import tensorflow as tf
self.cs = self._mask / tf.reduce_sum(self._mask, 1, keep_dims=True)
# The final prediction is the average of the predictions for each word
# weighted by the individual confidence/utility scores.
preds_weighted = tf.mul(tf.reshape(tf.transpose(self.cs), [-1, 1]),
tf.reshape(self.preds_by_word,
[-1, self._out_vocab_size]))
preds_weighted_reshaped = tf.reshape(preds_weighted,
| tensorflow.transpose | 2,588 |
import tensorflow as tf
or elem_shape[0] != elem_shapes[0][0]):
return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
| tensorflow.map_fn | 2,589 |
import tensorflow as tf
rec_Connectivity=self.rec_Connectivity.eval(session=sess),
output_Connectivity=self.output_Connectivity.eval(session=sess))
print("Model saved in file: %s" % save_weights_path)
return (t2 - t1)
# use a trained model to get test outputs
def test(self, sess, rnn_in, weights_path = None):
if(weights_path):
saver = tf.train.Saver()
# Restore variables from disk.
saver.restore(sess, weights_path)
predictions, states = sess.run([self.predictions, self.states], feed_dict={self.x: rnn_in})
else:
predictions, states = sess.run([self.predictions, self.states], feed_dict={self.x: rnn_in})
return predictions, states
| tensorflow.train.Saver | 2,590 |
import tensorflow as tf
d_t_flat = tf.reshape(d_t, (1, -1))
ones = tf.ones_like(x_t_flat)
grid = tf.concat([d_t_flat, y_t_flat, x_t_flat, ones], 0)
return grid
def _transform(theta, input_dim, out_size, z_near, z_far):
with tf.variable_scope('_transform'):
num_batch = input_dim.get_shape().as_list()[0]
num_channels = input_dim.get_shape().as_list()[4]
theta = tf.reshape(theta, (-1, 4, 4))
theta = tf.cast(theta, 'float32')
out_depth = out_size[0]
out_height = out_size[1]
out_width = out_size[2]
grid = _meshgrid(out_depth, out_height, out_width, z_near, z_far)
grid = tf.expand_dims(grid, 0)
grid = tf.reshape(grid, [-1])
grid = tf.tile(grid, tf.stack([num_batch]))
grid = tf.reshape(grid, tf.stack([num_batch, 4, -1]))
| tensorflow.cast | 2,591 |
import tensorflow as tf
"""Layers used during downsampling"""
with tf.variable_scope(name):
if reuse:
tf.get_variable_scope().reuse_variables()
else:
assert tf.get_variable_scope().reuse is False
| tensorflow.get_variable_scope | 2,592 |
import tensorflow as tf
target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope='target_q_func')
# Bellman training error
if double_q:
q_max = gather_2d(q_target,tf.argmax(q_online_tp1,axis=1,output_type=tf.int32))
else:
q_max = tf.reduce_max(q_target,axis=1)
| tensorflow.argmax | 2,593 |
import tensorflow as tf
self.counts['affine'] += 1
with tf.variable_scope(name):
init_factor = 2. if activation == 'relu' else 1.
kernel = tf.get_variable(
'weights', [num_channels_in, num_out_channels],
self.data_type,
tf.random_normal_initializer(stddev=np.sqrt(init_factor /
(num_channels_in))))
biases = tf.get_variable('biases', [num_out_channels],
self.data_type,
tf.constant_initializer(0.0))
logits = tf.matmul(input_layer, kernel) + biases
if activation == 'relu':
affine1 = tf.nn.relu(logits, name=name)
elif activation == 'linear' or activation is None:
affine1 = logits
else:
raise KeyError('Invalid activation type \'%s\'' % activation)
self.top_layer = affine1
self.top_size = num_out_channels
return affine1
| tensorflow.matmul | 2,594 |
from tensorflow.python.platform import gfile
self.assertTrue(gfile.Exists(s1))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(s2))
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertFalse(gfile.Exists(s1))
| tensorflow.python.platform.gfile.Exists | 2,595 |
import tensorflow as tf
def get_race_loss(FLAGS, features, is_training):
"""Loss for downstream multi-choice QA tasks such as RACE."""
bsz_per_core = tf.shape(features["input_ids"])[0]
def _transform_features(feature):
out = tf.reshape(feature, [bsz_per_core, 4, -1])
out = tf.transpose(out, [2, 0, 1])
out = tf.reshape(out, [-1, bsz_per_core * 4])
return out
inp = _transform_features(features["input_ids"])
| tensorflow.reshape | 2,596 |
import tensorflow as tf
# TODO: add batch normalization
def mlp_variational(x, dropout_mask_phs, hidden_sizes=(32,),
activation=tf.tanh, output_activation=None, dropout_rate=0.1):
# layer_sizes = (input_size, h1, h2, ..., output_size)
layer_sizes = hidden_sizes.copy()
layer_sizes.insert(0, x.shape.as_list()[1])
# tile x from shape (b_s * i_s) to (p_s * b_s * i_s)
post_size = tf.shape(dropout_mask_phs[0])[0]
x = tf.tile(tf.reshape(x, [1, tf.shape(x)[0], tf.shape(x)[1]]), [post_size, 1, 1])
# TODO: no dropout on input
regularization = 0
# Create hidden layers
for layer_i in range(1,len(layer_sizes)-1):
hidden_layer = VariationalDense(n_in=layer_sizes[layer_i-1],
n_out=layer_sizes[layer_i],
dropout_mask_ph=dropout_mask_phs[layer_i-1],
model_prob=1.0 - dropout_rate,
| tensorflow.shape | 2,597 |
import tensorflow as tf
with tf.variable_scope("tweetff"):
hidden = tf.get_variable("ff_hidden",
[c2v.embedding_dims, out_vocab_size])
bias = tf.get_variable('ff_bias', [out_vocab_size])
#probably useless. at least I don't want to use it
| tensorflow.get_variable | 2,598 |
import tensorflow as tf
name='conv_batch_norm_{}'.format(k))
if encoder.conv_activation is not None and encoder.conv_activation.lower() == 'relu':
encoder_inputs_ = tf.nn.relu(encoder_inputs_)
encoder_input_length_ = tf.to_int32(tf.ceil(encoder_input_length_ / strides[1]))
feature_size = encoder_inputs_.shape[2].value
channels = encoder_inputs_.shape[3].value
| tensorflow.ceil | 2,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.