docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Deserializes a `tf.SparseTensor` from `sparse_tensor_proto`. Args: sparse_tensor_proto: A proto representing a `tf.SparseTensor`. process_leafs: A function to be applied to the leaf valued of the nested structure. Returns: An instance of `tf.SparseTensor`.
def _from_proto_sparse_tensor(sparse_tensor_proto, process_leafs): if not sparse_tensor_proto.HasField("named_tuple"): raise base_errors.ModuleInfoError( "Error while deserializing a SparseTensor: expected proto tuple.") if sparse_tensor_proto.named_tuple.name != _SPARSE_TENSOR_NAME: raise base_errors.ModuleInfoError( "Error while deserializing a SparseTensor: The name of the tuple " "should have been {} but was {}.".format( _SPARSE_TENSOR_NAME, sparse_tensor_proto.named_tuple.name)) named_tuple_map = sparse_tensor_proto.named_tuple.map return tf.SparseTensor( indices=process_leafs(named_tuple_map["indices"].value), values=process_leafs(named_tuple_map["values"].value), dense_shape=process_leafs(named_tuple_map["dense_shape"].value))
101,356
Serializes `nested_value` into `nested_proto`. Args: nested_value: A nested Python value. nested_proto: A `module_pb2.NestedData` instance to be filled from the value in `nested_value`. process_leafs: A function to be applied to the leaf values of the nested structure. already_processed: Set of already processed objects (used to avoid infinite recursion). Raises: ModuleInfoError: If `nested_proto` is not an instance of `module_pb2.NestedData`.
def _nested_to_proto(nested_value, nested_proto, process_leafs, already_processed): if not isinstance(nested_proto, module_pb2.NestedData): raise base_errors.ModuleInfoError("Expected module_pb2.NestedData.") # If this object was already processed, mark as "unserializable" # to avoid infinite recursion. if id(nested_value) in already_processed: nested_proto.value = "" return # Check special types. for type_name, type_info in six.iteritems(_TO_PROTO_SPECIAL_TYPES): if type_info.check(nested_value): nested_proto.special_type.name = type_name type_info.to_proto( nested_value, nested_proto.special_type.object, process_leafs, already_processed) return # Check standard types. if _is_iterable(nested_value): # Mark this container as "already processed" to avoid infinite recursion. already_processed.add(id(nested_value)) if isinstance(nested_value, dict): nested_proto.dict.SetInParent() for key, child in six.iteritems(nested_value): str_key = str(key) child_proto = nested_proto.dict.map[str_key] _nested_to_proto(child, child_proto, process_leafs, already_processed) elif isinstance(nested_value, tuple): # NamedTuple? if _is_namedtuple(nested_value): nested_proto.named_tuple.name = type(nested_value).__name__ for str_key in nested_value._fields: child = getattr(nested_value, str_key) child_proto = nested_proto.named_tuple.map[str_key] _nested_to_proto(child, child_proto, process_leafs, already_processed) else: nested_proto.tuple.SetInParent() for child in nested_value: child_proto = nested_proto.tuple.list.add() _nested_to_proto(child, child_proto, process_leafs, already_processed) else: nested_proto.list.SetInParent() for child in nested_value: child_proto = nested_proto.list.list.add() _nested_to_proto(child, child_proto, process_leafs, already_processed) else: nested_proto.value = process_leafs(nested_value)
101,357
Serializes `module_into`. Args: module_info: An instance of `ModuleInfo`. export_scope: Optional `string`. Name scope to remove. Returns: An instance of `module_pb2.SonnetModule`.
def _module_info_to_proto(module_info, export_scope=None): def strip_name_scope(name_scope): return ops.strip_name_scope(name_scope, export_scope) def process_leafs(value): return strip_name_scope(_graph_element_to_path(value)) module_info_def = module_pb2.SonnetModule( module_name=module_info.module_name, scope_name=strip_name_scope(module_info.scope_name), class_name=module_info.class_name) for connected_subgraph in module_info.connected_subgraphs: connected_subgraph_info_def = module_info_def.connected_subgraphs.add() connected_subgraph_info_def.name_scope = strip_name_scope( connected_subgraph.name_scope) _nested_to_proto( connected_subgraph.inputs, connected_subgraph_info_def.inputs, process_leafs, set()) _nested_to_proto( connected_subgraph.outputs, connected_subgraph_info_def.outputs, process_leafs, set()) return module_info_def
101,358
Deserializes `nested_proto`. Args: nested_proto: An instance of `module_pb2.NestedData`. process_leafs: A function to be applied to the leaf values of the nested structure. Returns: An instance of `string`, `tuple`, `dict` or `namedtuple`. Raises: base_errors.ModuleInfoError: If the probobuf is of the wrong type or if some of its fields are missing.
def _nested_from_proto(nested_proto, process_leafs): if not isinstance(nested_proto, module_pb2.NestedData): raise base_errors.ModuleInfoError("Expected module_pb2.NestedData.") if nested_proto.HasField("value"): value = nested_proto.value if not value: value = _UnserializableObject() else: value = process_leafs(value) return value elif nested_proto.HasField("list"): return [_nested_from_proto(child, process_leafs) for child in nested_proto.list.list] elif nested_proto.HasField("tuple"): return tuple(_nested_from_proto(child, process_leafs) for child in nested_proto.tuple.list) elif nested_proto.HasField("dict"): return {name: _nested_from_proto(child, process_leafs) for name, child in six.iteritems(nested_proto.dict.map)} elif nested_proto.HasField("named_tuple"): tmp_dict = {name: _nested_from_proto(child, process_leafs) for name, child in six.iteritems(nested_proto.named_tuple.map)} # Note that this needs to be a named tuple to work with existing usage. NamedTuple = collections.namedtuple( # pylint: disable=invalid-name nested_proto.named_tuple.name, tmp_dict.keys()) return NamedTuple(**tmp_dict) elif nested_proto.HasField("special_type"): if nested_proto.special_type.name not in _TO_PROTO_SPECIAL_TYPES: return _UnserializableObject() type_info = _TO_PROTO_SPECIAL_TYPES[nested_proto.special_type.name] return type_info.from_proto(nested_proto.special_type.object, process_leafs) else: raise base_errors.ModuleInfoError( "Cannot deserialize a `ModuleInfo` protobuf with no fields.")
101,359
Deserializes `module_info_def` proto. Args: module_info_def: An instance of `module_pb2.SonnetModule`. import_scope: Optional `string`. Name scope to use. Returns: An instance of `ModuleInfo`. Raises: base_errors.ModuleInfoError: If the probobuf is of the wrong type or if some of its fields are missing.
def _module_info_from_proto(module_info_def, import_scope=None): graph = tf.get_default_graph() def prepend_name_scope(name_scope): return ops.prepend_name_scope(name_scope, import_scope) def process_leafs(name): return _path_to_graph_element(prepend_name_scope(name), graph) connected_subgraphs = [] module_info = ModuleInfo( module_name=module_info_def.module_name, scope_name=prepend_name_scope(module_info_def.scope_name), class_name=module_info_def.class_name, connected_subgraphs=connected_subgraphs) for connected_subgraph_def in module_info_def.connected_subgraphs: connected_subgraph = ConnectedSubGraph( module=module_info, name_scope=prepend_name_scope(connected_subgraph_def.name_scope), inputs=_nested_from_proto( connected_subgraph_def.inputs, process_leafs), outputs=_nested_from_proto( connected_subgraph_def.outputs, process_leafs)) connected_subgraphs.append(connected_subgraph) return module_info
101,360
Deserializes the `module_info_def` proto without raising exceptions. Args: module_info_def: An instance of `module_pb2.SonnetModule`. import_scope: Optional `string`. Name scope to use. Returns: An instance of `ModuleInfo`.
def _module_info_from_proto_safe(module_info_def, import_scope=None): try: return _module_info_from_proto(module_info_def, import_scope) except Exception as e: # pylint: disable=broad-except logging.warning( "Error encountered when deserializing sonnet ModuleInfo:\n%s", str(e)) return None
101,361
Builds the deep LSTM model sub-graph. Args: one_hot_input_sequence: A Tensor with the input sequence encoded as a one-hot representation. Its dimensions should be `[truncation_length, batch_size, output_size]`. Returns: Tuple of the Tensor of output logits for the batch, with dimensions `[truncation_length, batch_size, output_size]`, and the final state of the unrolled core,.
def _build(self, one_hot_input_sequence): input_shape = one_hot_input_sequence.get_shape() batch_size = input_shape[1] batch_embed_module = snt.BatchApply(self._embed_module) input_sequence = batch_embed_module(one_hot_input_sequence) input_sequence = tf.nn.relu(input_sequence) initial_state = self._core.initial_state(batch_size) if self._use_dynamic_rnn: output_sequence, final_state = tf.nn.dynamic_rnn( cell=self._core, inputs=input_sequence, time_major=True, initial_state=initial_state) else: rnn_input_sequence = tf.unstack(input_sequence) output, final_state = tf.contrib.rnn.static_rnn( cell=self._core, inputs=rnn_input_sequence, initial_state=initial_state) output_sequence = tf.stack(output) batch_output_module = snt.BatchApply(self._output_module) output_sequence_logits = batch_output_module(output_sequence) return output_sequence_logits, final_state
101,367
Builds sub-graph to generate a string, sampled from the model. Args: initial_logits: Starting logits to sample from. initial_state: Starting state for the RNN core. sequence_length: Number of characters to sample. Returns: A Tensor of characters, with dimensions `[sequence_length, batch_size, output_size]`.
def generate_string(self, initial_logits, initial_state, sequence_length): current_logits = initial_logits current_state = initial_state generated_letters = [] for _ in range(sequence_length): # Sample a character index from distribution. char_index = tf.squeeze(tf.multinomial(current_logits, 1)) char_one_hot = tf.one_hot(char_index, self._output_size, 1.0, 0.0) generated_letters.append(char_one_hot) # Feed character back into the deep_lstm. gen_out_seq, current_state = self._core( tf.nn.relu(self._embed_module(char_one_hot)), current_state) current_logits = self._output_module(gen_out_seq) generated_string = tf.stack(generated_letters) return generated_string
101,368
Connects the core to the graph. Args: x: Input `Tensor` of shape `(batch_size, input_size)`. prev_state: Previous state. This could be a `Tensor`, or a tuple of `Tensor`s. Returns: The tuple `(output, state)` for this core. Raises: ValueError: if the `Tensor` `x` does not have rank 2.
def _build(self, x, prev_state): x.get_shape().with_rank(2) self._batch_size = x.get_shape().as_list()[0] self._dtype = x.dtype x_zeros = tf.concat( [x, tf.zeros( shape=(self._batch_size, 1), dtype=self._dtype)], 1) x_ones = tf.concat( [x, tf.ones( shape=(self._batch_size, 1), dtype=self._dtype)], 1) # Weights for the halting signal halting_linear = basic.Linear(name="halting_linear", output_size=1) body = functools.partial( self._body, halting_linear=halting_linear, x_ones=x_ones) cumul_halting_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype) iteration_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype) core_output_size = [x.value for x in self._core.output_size] out_init = tf.zeros(shape=(self._batch_size,) + tuple(core_output_size), dtype=self._dtype) cumul_state_init = _nested_zeros_like(prev_state) remainder_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype) (unused_final_x, final_out, unused_final_state, final_cumul_state, unused_final_halting, final_iteration, final_remainder) = tf.while_loop( self._cond, body, [x_zeros, out_init, prev_state, cumul_state_init, cumul_halting_init, iteration_init, remainder_init]) act_output = basic.Linear( name="act_output_linear", output_size=self._output_size)(final_out) return (act_output, (final_iteration, final_remainder)), final_cumul_state
101,380
Calculate a reasonable embedding size for a vocabulary. Rule of thumb is 6 * 4th root of vocab_size. Args: vocab_size: Size of the input vocabulary. Returns: The embedding size to use. Raises: ValueError: if `vocab_size` is invalid.
def _embedding_dim(vocab_size): if not vocab_size or (vocab_size <= 0): raise ValueError("Invalid vocab_size %g." % vocab_size) return int(round(6.0 * math.sqrt(math.sqrt(vocab_size))))
101,382
Lookup embeddings. Looks up an embedding vector for each value in `ids`. All ids must be within [0, vocab_size), else an `InvalidArgumentError` is raised at runtime. Args: ids: Tensor of dtype int64. Returns: Tensor of tf.shape(ids) + [embedding_dim] and dtype float32.
def _build(self, ids): # Construct embeddings. if self._existing_vocab is None: if self.EMBEDDINGS not in self._initializers: self._initializers[self.EMBEDDINGS] = tf.initializers.random_normal() self._embeddings = tf.get_variable( "embeddings", shape=[self._vocab_size, self._embed_dim], dtype=tf.float32, initializer=self._initializers[self.EMBEDDINGS], partitioner=self._partitioners.get(self.EMBEDDINGS, None), regularizer=self._regularizers.get(self.EMBEDDINGS, None), trainable=self._trainable) else: self._embeddings = tf.get_variable( "embeddings", dtype=tf.float32, initializer=self._existing_vocab, regularizer=self._regularizers.get(self.EMBEDDINGS, None), trainable=self._trainable) if self._densify_gradients: # On the backwards pass, we convert the gradient from indexed-slices to a # regular tensor before sending it back to the parameter server. # This avoids excess computation on the parameter server. # In eager mode we do not need the conversion. # Add a check whether we are in eager mode when it is supported. embeddings = util.convert_gradient_to_tensor(self._embeddings) else: embeddings = self._embeddings # Lookup embeddings return tf.nn.embedding_lookup(embeddings, ids, name="embedding_lookup")
101,384
Assembles the module network and adds it to the graph. The internal computation graph is assembled according to the set of constraints provided at construction time. Args: inputs: Tensor containing a batch of transformation parameters. Returns: A batch of warped grids. Raises: Error: If the input tensor size is not consistent with the constraints passed at construction time.
def _build(self, inputs): input_shape = tf.shape(inputs) input_dtype = inputs.dtype.as_numpy_dtype batch_size = tf.expand_dims(input_shape[0], 0) number_of_params = inputs.get_shape()[1] if number_of_params != self._constraints.num_free_params: raise base.Error('Input size is not consistent with constraint ' 'definition: {} parameters expected, {} provided.' .format(self._constraints.num_free_params, number_of_params)) num_output_dimensions = len(self._psi) // 3 def get_input_slice(start, size): return basic.SliceByDim([1], [start], [size])(inputs) warped_grid = [] var_index_offset = 0 number_of_points = np.prod(self._output_shape) for i in xrange(num_output_dimensions): if self._psi[i] is not None: # The i-th output dimension is not fully specified by the constraints, # the graph is setup to perform matrix multiplication in batch mode. grid_coord = self._psi[i].astype(input_dtype) num_active_vars = self._psi[i].shape[0] active_vars = get_input_slice(var_index_offset, num_active_vars) warped_coord = tf.matmul(active_vars, grid_coord) warped_coord = tf.expand_dims(warped_coord, 1) var_index_offset += num_active_vars offset = self._psi[num_output_dimensions + i] if offset is not None: offset = offset.astype(input_dtype) # Some entries in the i-th row of the affine matrix were constrained # and the corresponding matrix multiplications have been precomputed. tiling_params = tf.concat( [ batch_size, tf.constant( 1, shape=(1,)), tf.ones_like(offset.shape) ], 0) offset = offset.reshape((1, 1) + offset.shape) warped_coord += tf.tile(offset, tiling_params) else: # The i-th output dimension is fully specified by the constraints, and # the corresponding matrix multiplications have been precomputed. warped_coord = self._psi[num_output_dimensions + i].astype(input_dtype) tiling_params = tf.concat( [ batch_size, tf.constant( 1, shape=(1,)), tf.ones_like(warped_coord.shape) ], 0) warped_coord = warped_coord.reshape((1, 1) + warped_coord.shape) warped_coord = tf.tile(warped_coord, tiling_params) warped_coord += self._psi[i + 2 * num_output_dimensions] # Need to help TF figuring out shape inference since tiling information # is held in Tensors which are not known until run time. warped_coord.set_shape([None, 1, number_of_points]) warped_grid.append(warped_coord) # Reshape all the warped coordinates tensors to match the specified output # shape and concatenate into a single matrix. grid_shape = self._output_shape + (1,) warped_grid = [basic.BatchReshape(grid_shape)(grid) for grid in warped_grid] return tf.concat(warped_grid, len(grid_shape))
101,389
Find files matching the given names relative to the given path. Args: path (str): The file path to start searching up from. names (List[str]): The file/directory names to look for. root (str): The directory at which to stop recursing upwards. Note: The path MUST be within the root.
def find_parents(root, path, names): if not root: return [] if not os.path.commonprefix((root, path)): log.warning("Path %s not in %s", path, root) return [] # Split the relative by directory, generate all the parent directories, then check each of them. # This avoids running a loop that has different base-cases for unix/windows # e.g. /a/b and /a/b/c/d/e.py -> ['/a/b', 'c', 'd'] dirs = [root] + os.path.relpath(os.path.dirname(path), root).split(os.path.sep) # Search each of /a/b/c, /a/b, /a while dirs: search_dir = os.path.join(*dirs) existing = list(filter(os.path.exists, [os.path.join(search_dir, n) for n in names])) if existing: return existing dirs.pop() # Otherwise nothing return []
102,577
Create a new multiprocess.Manager (or return existing one). Args: :authkey: string authorization key :queues: *INTERNAL_USE* :mode: 'local' indicates that the manager will only be accessible from the same host, otherwise remotely accessible. Returns: A TFManager instance, which is also cached in local memory of the Python worker process.
def start(authkey, queues, mode='local'): global mgr, qdict, kdict qdict.clear() kdict.clear() for q in queues: qdict[q] = JoinableQueue() TFManager.register('get_queue', callable=lambda qname: _get_queue(qname)) TFManager.register('get', callable=lambda key: _get(key)) TFManager.register('set', callable=lambda key, value: _set(key, value)) if mode == 'remote': mgr = TFManager(address=('', 0), authkey=authkey) else: mgr = TFManager(authkey=authkey) mgr.start() return mgr
103,040
Connect to a multiprocess.Manager. Args: :address: unique address to the TFManager, either a unique connection string for 'local', or a (host, port) tuple for remote. :authkey: string authorization key Returns: A TFManager instance referencing the remote TFManager at the supplied address.
def connect(address, authkey): TFManager.register('get_queue') TFManager.register('get') TFManager.register('set') m = TFManager(address, authkey=authkey) m.connect() return m
103,041
Helper to create summaries for activations. Creates a summary that provides a histogram of activations. Creates a summary that measures the sparsity of activations. Args: x: Tensor Returns: nothing
def _activation_summary(x): # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # session. This helps the clarity of presentation on tensorboard. tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name) tf.summary.histogram(tensor_name + '/activations', x) tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
103,048
Helper to create a Variable stored on CPU memory. Args: name: name of the variable shape: list of ints initializer: initializer for Variable Returns: Variable Tensor
def _variable_on_cpu(name, shape, initializer): with tf.device('/cpu:0'): dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype) return var
103,049
Helper to create an initialized Variable with weight decay. Note that the Variable is initialized with a truncated normal distribution. A weight decay is added only if one is specified. Args: name: name of the variable shape: list of ints stddev: standard deviation of a truncated Gaussian wd: add L2Loss weight decay multiplied by this float. If None, weight decay is not added for this Variable. Returns: Variable Tensor
def _variable_with_weight_decay(name, shape, stddev, wd): dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 var = _variable_on_cpu( name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype)) if wd is not None: weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss') tf.add_to_collection('losses', weight_decay) return var
103,050
Build the CIFAR-10 model. Args: images: Images returned from distorted_inputs() or inputs(). Returns: Logits.
def inference(images): # We instantiate all variables using tf.get_variable() instead of # tf.Variable() in order to share variables across multiple GPU training runs. # If we only ran this model on a single GPU, we could simplify this function # by replacing all instances of tf.get_variable() with tf.Variable(). # # conv1 with tf.variable_scope('conv1') as scope: kernel = _variable_with_weight_decay('weights', shape=[5, 5, 3, 64], stddev=5e-2, wd=0.0) conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0)) pre_activation = tf.nn.bias_add(conv, biases) conv1 = tf.nn.relu(pre_activation, name=scope.name) _activation_summary(conv1) # pool1 pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1') # norm1 norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1') # conv2 with tf.variable_scope('conv2') as scope: kernel = _variable_with_weight_decay('weights', shape=[5, 5, 64, 64], stddev=5e-2, wd=0.0) conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1)) pre_activation = tf.nn.bias_add(conv, biases) conv2 = tf.nn.relu(pre_activation, name=scope.name) _activation_summary(conv2) # norm2 norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2') # pool2 pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2') # local3 with tf.variable_scope('local3') as scope: # Move everything into depth so we can perform a single matrix multiply. reshape = tf.reshape(pool2, [FLAGS.batch_size, -1]) dim = reshape.get_shape()[1].value weights = _variable_with_weight_decay('weights', shape=[dim, 384], stddev=0.04, wd=0.004) biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1)) local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name) _activation_summary(local3) # local4 with tf.variable_scope('local4') as scope: weights = _variable_with_weight_decay('weights', shape=[384, 192], stddev=0.04, wd=0.004) biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1)) local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name) _activation_summary(local4) # linear layer(WX + b), # We don't apply softmax here because # tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits # and performs the softmax internally for efficiency. with tf.variable_scope('softmax_linear') as scope: weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES], stddev=1/192.0, wd=0.0) biases = _variable_on_cpu('biases', [NUM_CLASSES], tf.constant_initializer(0.0)) softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name) _activation_summary(softmax_linear) return softmax_linear
103,052
Add L2Loss to all the trainable variables. Add summary for "Loss" and "Loss/avg". Args: logits: Logits from inference(). labels: Labels from distorted_inputs or inputs(). 1-D tensor of shape [batch_size] Returns: Loss tensor of type float.
def loss(logits, labels): # Calculate the average cross entropy loss across the batch. labels = tf.cast(labels, tf.int64) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=labels, logits=logits, name='cross_entropy_per_example') cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy') tf.add_to_collection('losses', cross_entropy_mean) # The total loss is defined as the cross entropy loss plus all of the weight # decay terms (L2 loss). return tf.add_n(tf.get_collection('losses'), name='total_loss')
103,053
Add summaries for losses in CIFAR-10 model. Generates moving average for all losses and associated summaries for visualizing the performance of the network. Args: total_loss: Total loss from loss(). Returns: loss_averages_op: op for generating moving averages of losses.
def _add_loss_summaries(total_loss): # Compute the moving average of all individual losses and the total loss. loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply(losses + [total_loss]) # Attach a scalar summary to all individual losses and the total loss; do the # same for the averaged version of the losses. for l in losses + [total_loss]: # Name each loss as '(raw)' and name the moving average version of the loss # as the original loss name. tf.summary.scalar(l.op.name + ' (raw)', l) tf.summary.scalar(l.op.name, loss_averages.average(l)) return loss_averages_op
103,054
Train CIFAR-10 model. Create an optimizer and apply to all trainable variables. Add moving average for all trainable variables. Args: total_loss: Total loss from loss(). global_step: Integer Variable counting the number of training steps processed. Returns: train_op: op for training.
def train(total_loss, global_step): # Variables that affect learning rate. num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY) # Decay the learning rate exponentially based on the number of steps. lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE, global_step, decay_steps, LEARNING_RATE_DECAY_FACTOR, staircase=True) tf.summary.scalar('learning_rate', lr) # Generate moving averages of all losses and associated summaries. loss_averages_op = _add_loss_summaries(total_loss) # Compute gradients. with tf.control_dependencies([loss_averages_op]): opt = tf.train.GradientDescentOptimizer(lr) grads = opt.compute_gradients(total_loss) # Apply gradients. apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) # Add histograms for trainable variables. for var in tf.trainable_variables(): tf.summary.histogram(var.op.name, var) # Add histograms for gradients. for grad, var in grads: if grad is not None: tf.summary.histogram(var.op.name + '/gradients', grad) # Track the moving averages of all trainable variables. variable_averages = tf.train.ExponentialMovingAverage( MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) with tf.control_dependencies([apply_gradient_op, variables_averages_op]): train_op = tf.no_op(name='train') return train_op
103,055
Adds a variable to the MODEL_VARIABLES collection. Optionally it will add the variable to the VARIABLES_TO_RESTORE collection. Args: var: a variable. restore: whether the variable should be added to the VARIABLES_TO_RESTORE collection.
def add_variable(var, restore=True): collections = [MODEL_VARIABLES] if restore: collections.append(VARIABLES_TO_RESTORE) for collection in collections: if var not in tf.get_collection(collection): tf.add_to_collection(collection, var)
103,057
Gets the list of variables, filtered by scope and/or suffix. Args: scope: an optional scope for filtering the variables to return. suffix: an optional suffix for filtering the variables to return. Returns: a copied list of variables with scope and suffix.
def get_variables(scope=None, suffix=None): candidates = tf.get_collection(MODEL_VARIABLES, scope)[:] if suffix is not None: candidates = [var for var in candidates if var.op.name.endswith(suffix)] return candidates
103,058
Gets the variable uniquely identified by that name. Args: name: a name that uniquely identifies the variable. Returns: a tensorflow variable. Raises: ValueError: if no variable uniquely identified by the name exists.
def get_unique_variable(name): candidates = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, name) if not candidates: raise ValueError('Couldnt find variable %s' % name) for candidate in candidates: if candidate.op.name == name: return candidate raise ValueError('Variable %s does not uniquely identify a variable', name)
103,059
Returns the global step variable. Args: device: Optional device to place the variable. It can be an string or a function that is called to get the device for the variable. Returns: the tensor representing the global step variable.
def global_step(device=''): global_step_ref = tf.get_collection(tf.GraphKeys.GLOBAL_STEP) if global_step_ref: return global_step_ref[0] else: collections = [ VARIABLES_TO_RESTORE, tf.GraphKeys.GLOBAL_VARIABLES, tf.GraphKeys.GLOBAL_STEP, ] # Get the device for the variable. with tf.device(variable_device(device, 'global_step')): return tf.get_variable('global_step', shape=[], dtype=tf.int64, initializer=tf.zeros_initializer(), trainable=False, collections=collections)
103,061
Initialize VariableDeviceChooser. Args: num_parameter_servers: number of parameter servers. ps_device: string representing the parameter server device. placement: string representing the placement of the variable either CPU:0 or GPU:0. When using parameter servers forced to CPU:0.
def __init__(self, num_parameter_servers=0, ps_device='/job:ps', placement='CPU:0'): self._num_ps = num_parameter_servers self._ps_device = ps_device self._placement = placement if num_parameter_servers == 0 else 'CPU:0' self._next_task_id = 0
103,063
Runs Eval once. Args: saver: Saver. summary_writer: Summary writer. top_1_op: Top 1 op. top_5_op: Top 5 op. summary_op: Summary op.
def _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op): with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir) if ckpt and ckpt.model_checkpoint_path: print("ckpt.model_checkpoint_path: {0}".format(ckpt.model_checkpoint_path)) saver.restore(sess, ckpt.model_checkpoint_path) # Assuming model_checkpoint_path looks something like: # /my-favorite-path/imagenet_train/model.ckpt-0, # extract global_step from it. global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1] print('Successfully loaded model from %s at step=%s.' % (ckpt.model_checkpoint_path, global_step)) else: print('No checkpoint file found') return # Start the queue runners. coord = tf.train.Coordinator() try: threads = [] for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS): threads.extend(qr.create_threads(sess, coord=coord, daemon=True, start=True)) num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size)) # Counts the number of correct predictions. count_top_1 = 0.0 count_top_5 = 0.0 total_sample_count = num_iter * FLAGS.batch_size step = 0 print('%s: starting evaluation on (%s).' % (datetime.now(), FLAGS.subset)) start_time = time.time() while step < num_iter and not coord.should_stop(): top_1, top_5 = sess.run([top_1_op, top_5_op]) count_top_1 += np.sum(top_1) count_top_5 += np.sum(top_5) step += 1 if step % 20 == 0: duration = time.time() - start_time sec_per_batch = duration / 20.0 examples_per_sec = FLAGS.batch_size / sec_per_batch print('%s: [%d batches out of %d] (%.1f examples/sec; %.3f' 'sec/batch)' % (datetime.now(), step, num_iter, examples_per_sec, sec_per_batch)) start_time = time.time() # Compute precision @ 1. precision_at_1 = count_top_1 / total_sample_count recall_at_5 = count_top_5 / total_sample_count print('%s: precision @ 1 = %.4f recall @ 5 = %.4f [%d examples]' % (datetime.now(), precision_at_1, recall_at_5, total_sample_count)) summary = tf.Summary() summary.ParseFromString(sess.run(summary_op)) summary.value.add(tag='Precision @ 1', simple_value=precision_at_1) summary.value.add(tag='Recall @ 5', simple_value=recall_at_5) summary_writer.add_summary(summary, global_step) except Exception as e: # pylint: disable=broad-except coord.request_stop(e) coord.request_stop() coord.join(threads, stop_grace_period_secs=10)
103,067
mapPartitions function to run single-node inferencing from a checkpoint/saved_model, using the model's input/output mappings. Args: :iterator: input RDD partition iterator. :args: arguments for TFModel, in argparse format :tf_args: arguments for TensorFlow inferencing code, in argparse or ARGV format. Returns: An iterator of result data.
def _run_model(iterator, args, tf_args): single_node_env(tf_args) logging.info("===== input_mapping: {}".format(args.input_mapping)) logging.info("===== output_mapping: {}".format(args.output_mapping)) input_tensor_names = [tensor for col, tensor in sorted(args.input_mapping.items())] output_tensor_names = [tensor for tensor, col in sorted(args.output_mapping.items())] # if using a signature_def_key, get input/output tensor info from the requested signature if args.signature_def_key: assert args.export_dir, "Inferencing with signature_def_key requires --export_dir argument" logging.info("===== loading meta_graph_def for tag_set ({0}) from saved_model: {1}".format(args.tag_set, args.export_dir)) meta_graph_def = get_meta_graph_def(args.export_dir, args.tag_set) signature = meta_graph_def.signature_def[args.signature_def_key] logging.debug("signature: {}".format(signature)) inputs_tensor_info = signature.inputs logging.debug("inputs_tensor_info: {0}".format(inputs_tensor_info)) outputs_tensor_info = signature.outputs logging.debug("outputs_tensor_info: {0}".format(outputs_tensor_info)) result = [] global global_sess, global_args if global_sess and global_args == args: # if graph/session already loaded/started (and using same args), just reuse it sess = global_sess else: # otherwise, create new session and load graph from disk tf.reset_default_graph() sess = tf.Session(graph=tf.get_default_graph()) if args.export_dir: assert args.tag_set, "Inferencing from a saved_model requires --tag_set" # load graph from a saved_model logging.info("===== restoring from saved_model: {}".format(args.export_dir)) loader.load(sess, args.tag_set.split(','), args.export_dir) elif args.model_dir: # load graph from a checkpoint ckpt = tf.train.latest_checkpoint(args.model_dir) assert ckpt, "Invalid model checkpoint path: {}".format(args.model_dir) logging.info("===== restoring from checkpoint: {}".format(ckpt + ".meta")) saver = tf.train.import_meta_graph(ckpt + ".meta", clear_devices=True) saver.restore(sess, ckpt) else: raise Exception("Inferencing requires either --model_dir or --export_dir argument") global_sess = sess global_args = args # get list of input/output tensors (by name) if args.signature_def_key: input_tensors = [inputs_tensor_info[t].name for t in input_tensor_names] output_tensors = [outputs_tensor_info[output_tensor_names[0]].name] else: input_tensors = [t + ':0' for t in input_tensor_names] output_tensors = [t + ':0' for t in output_tensor_names] logging.info("input_tensors: {0}".format(input_tensors)) logging.info("output_tensors: {0}".format(output_tensors)) # feed data in batches and return output tensors for tensors in yield_batch(iterator, args.batch_size, len(input_tensor_names)): inputs_feed_dict = {} for i in range(len(input_tensors)): inputs_feed_dict[input_tensors[i]] = tensors[i] outputs = sess.run(output_tensors, feed_dict=inputs_feed_dict) lengths = [len(output) for output in outputs] input_size = len(tensors[0]) assert all([length == input_size for length in lengths]), "Output array sizes {} must match input size: {}".format(lengths, input_size) python_outputs = [output.tolist() for output in outputs] # convert from numpy to standard python types result.extend(zip(*python_outputs)) # convert to an array of tuples of "output columns" return result
103,069
Sets up environment for a single-node TF session. Args: :args: command line arguments as either argparse args or argv list
def single_node_env(args): # setup ARGV for the TF process if isinstance(args, list): sys.argv = args elif args.argv: sys.argv = args.argv # setup ENV for Hadoop-compatibility and/or GPU allocation num_gpus = args.num_gpus if 'num_gpus' in args else 1 util.single_node_env(num_gpus)
103,070
Utility function to read a meta_graph_def from disk. From `saved_model_cli.py <https://github.com/tensorflow/tensorflow/blob/8e0e8d41a3a8f2d4a6100c2ea1dc9d6c6c4ad382/tensorflow/python/tools/saved_model_cli.py#L186>`_ Args: :saved_model_dir: path to saved_model. :tag_set: list of string tags identifying the TensorFlow graph within the saved_model. Returns: A TensorFlow meta_graph_def, or raises an Exception otherwise.
def get_meta_graph_def(saved_model_dir, tag_set): saved_model = reader.read_saved_model(saved_model_dir) set_of_tags = set(tag_set.split(',')) for meta_graph_def in saved_model.meta_graphs: if set(meta_graph_def.meta_info_def.tags) == set_of_tags: return meta_graph_def raise RuntimeError("MetaGraphDef associated with tag-set {0} could not be found in SavedModel".format(tag_set))
103,071
Generator that yields batches of a DataFrame iterator. Args: :iterable: Spark partition iterator. :batch_size: number of items to retrieve per invocation. :num_tensors: number of tensors (columns) expected in each item. Returns: An array of ``num_tensors`` arrays, each of length `batch_size`
def yield_batch(iterable, batch_size, num_tensors=1): tensors = [[] for i in range(num_tensors)] for item in iterable: if item is None: break for i in range(num_tensors): tmp = str(item[i]) if type(item[i]) is bytearray else item[i] tensors[i].append(tmp) if len(tensors[0]) >= batch_size: yield tensors tensors = [[] for i in range(num_tensors)] if len(tensors[0]) > 0: yield tensors
103,072
Trains a TensorFlow model and returns a TFModel instance with the same args/params pointing to a checkpoint or saved_model on disk. Args: :dataset: A Spark DataFrame with columns that will be mapped to TensorFlow tensors. Returns: A TFModel representing the trained model, backed on disk by a TensorFlow checkpoint or saved_model.
def _fit(self, dataset): sc = SparkContext.getOrCreate() logging.info("===== 1. train args: {0}".format(self.args)) logging.info("===== 2. train params: {0}".format(self._paramMap)) local_args = self.merge_args_params() logging.info("===== 3. train args + params: {0}".format(local_args)) if local_args.input_mode == TFCluster.InputMode.TENSORFLOW: if dfutil.isLoadedDF(dataset): # if just a DataFrame loaded from tfrecords, just point to original source path logging.info("Loaded DataFrame of TFRecord.") local_args.tfrecord_dir = dfutil.loadedDF[dataset] else: # otherwise, save as tfrecords and point to save path assert local_args.tfrecord_dir, "Please specify --tfrecord_dir to export DataFrame to TFRecord." if self.getInputMapping(): # if input mapping provided, filter only required columns before exporting dataset = dataset.select(list(self.getInputMapping())) logging.info("Exporting DataFrame {} as TFRecord to: {}".format(dataset.dtypes, local_args.tfrecord_dir)) dfutil.saveAsTFRecords(dataset, local_args.tfrecord_dir) logging.info("Done saving") tf_args = self.args.argv if self.args.argv else local_args cluster = TFCluster.run(sc, self.train_fn, tf_args, local_args.cluster_size, local_args.num_ps, local_args.tensorboard, local_args.input_mode, driver_ps_nodes=local_args.driver_ps_nodes) if local_args.input_mode == TFCluster.InputMode.SPARK: # feed data, using a deterministic order for input columns (lexicographic by key) input_cols = sorted(self.getInputMapping()) cluster.train(dataset.select(input_cols).rdd, local_args.epochs) cluster.shutdown(grace_secs=30) # Run export function, if provided if self.export_fn: assert local_args.export_dir, "Export function requires --export_dir to be set" logging.info("Exporting saved_model (via export_fn) to: {}".format(local_args.export_dir)) def _export(iterator, fn, args): single_node_env(args) fn(args) # Run on a single exeucutor sc.parallelize([1], 1).foreachPartition(lambda it: _export(it, self.export_fn, tf_args)) return self._copyValues(TFModel(self.args))
103,078
Transforms the input DataFrame by applying the _run_model() mapPartitions function. Args: :dataset: A Spark DataFrame for TensorFlow inferencing.
def _transform(self, dataset): spark = SparkSession.builder.getOrCreate() # set a deterministic order for input/output columns (lexicographic by key) input_cols = [col for col, tensor in sorted(self.getInputMapping().items())] # input col => input tensor output_cols = [col for tensor, col in sorted(self.getOutputMapping().items())] # output tensor => output col # run single-node inferencing on each executor logging.info("input_cols: {}".format(input_cols)) logging.info("output_cols: {}".format(output_cols)) # merge args + params logging.info("===== 1. inference args: {0}".format(self.args)) logging.info("===== 2. inference params: {0}".format(self._paramMap)) local_args = self.merge_args_params() logging.info("===== 3. inference args + params: {0}".format(local_args)) tf_args = self.args.argv if self.args.argv else local_args rdd_out = dataset.select(input_cols).rdd.mapPartitions(lambda it: _run_model(it, local_args, tf_args)) # convert to a DataFrame-friendly format rows_out = rdd_out.map(lambda x: Row(*x)) return spark.createDataFrame(rows_out, output_cols)
103,080
Process a single image file. Args: filename: string, path to an image file e.g., '/path/to/example.JPG'. coder: instance of ImageCoder to provide TensorFlow image coding utils. Returns: image_buffer: string, JPEG encoding of RGB image. height: integer, image height in pixels. width: integer, image width in pixels.
def _process_image(filename, coder): # Read the image file. with tf.gfile.FastGFile(filename, 'r') as f: image_data = f.read() # Clean the dirty data. if _is_png(filename): # 1 image is a PNG. print('Converting PNG to JPEG for %s' % filename) image_data = coder.png_to_jpeg(image_data) elif _is_cmyk(filename): # 22 JPEG images are in CMYK colorspace. print('Converting CMYK to RGB for %s' % filename) image_data = coder.cmyk_to_rgb(image_data) # Decode the RGB JPEG. image = coder.decode_jpeg(image_data) # Check that image converted to RGB assert len(image.shape) == 3 height = image.shape[0] width = image.shape[1] assert image.shape[2] == 3 return image_data, height, width
103,090
Build a list of human-readable labels. Args: synsets: list of strings; each string is a unique WordNet ID. synset_to_human: dict of synset to human labels, e.g., 'n02119022' --> 'red fox, Vulpes vulpes' Returns: List of human-readable strings corresponding to each synset.
def _find_human_readable_labels(synsets, synset_to_human): humans = [] for s in synsets: assert s in synset_to_human, ('Failed to find: %s' % s) humans.append(synset_to_human[s]) return humans
103,091
Find the bounding boxes for a given image file. Args: filenames: list of strings; each string is a path to an image file. image_to_bboxes: dictionary mapping image file names to a list of bounding boxes. This list contains 0+ bounding boxes. Returns: List of bounding boxes for each image. Note that each entry in this list might contain from 0+ entries corresponding to the number of bounding box annotations for the image.
def _find_image_bounding_boxes(filenames, image_to_bboxes): num_image_bbox = 0 bboxes = [] for f in filenames: basename = os.path.basename(f) if basename in image_to_bboxes: bboxes.append(image_to_bboxes[basename]) num_image_bbox += 1 else: bboxes.append([]) print('Found %d images with bboxes out of %d images' % ( num_image_bbox, len(filenames))) return bboxes
103,092
Process a complete data set and save it as a TFRecord. Args: name: string, unique identifier specifying the data set. directory: string, root path to the data set. num_shards: integer number of shards for this data set. synset_to_human: dict of synset to human labels, e.g., 'n02119022' --> 'red fox, Vulpes vulpes' image_to_bboxes: dictionary mapping image file names to a list of bounding boxes. This list contains 0+ bounding boxes.
def _process_dataset(name, directory, num_shards, synset_to_human, image_to_bboxes): filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file) humans = _find_human_readable_labels(synsets, synset_to_human) bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes) _process_image_files(name, filenames, synsets, labels, humans, bboxes, num_shards)
103,093
Construct distorted input for CIFAR training using the Reader ops. Args: data_dir: Path to the CIFAR-10 data directory. batch_size: Number of images per batch. Returns: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size. labels: Labels. 1D tensor of [batch_size] size.
def distorted_inputs(data_dir, batch_size): filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in xrange(1, 6)] for f in filenames: if not tf.gfile.Exists(f): raise ValueError('Failed to find file: ' + f) # Create a queue that produces the filenames to read. filename_queue = tf.train.string_input_producer(filenames) # Read examples from files in the filename queue. read_input = read_cifar10(filename_queue) reshaped_image = tf.cast(read_input.uint8image, tf.float32) height = IMAGE_SIZE width = IMAGE_SIZE # Image processing for training the network. Note the many random # distortions applied to the image. # Randomly crop a [height, width] section of the image. distorted_image = tf.random_crop(reshaped_image, [height, width, 3]) # Randomly flip the image horizontally. distorted_image = tf.image.random_flip_left_right(distorted_image) # Because these operations are not commutative, consider randomizing # the order their operation. distorted_image = tf.image.random_brightness(distorted_image, max_delta=63) distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8) # Subtract off the mean and divide by the variance of the pixels. float_image = tf.image.per_image_standardization(distorted_image) # Set the shapes of tensors. float_image.set_shape([height, width, 3]) read_input.label.set_shape([1]) # Ensure that the random shuffling has good mixing properties. min_fraction_of_examples_in_queue = 0.4 min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * min_fraction_of_examples_in_queue) print ('Filling queue with %d CIFAR images before starting to train. ' 'This will take a few minutes.' % min_queue_examples) # Generate a batch of images and labels by building up a queue of examples. return _generate_image_and_label_batch(float_image, read_input.label, min_queue_examples, batch_size, shuffle=True)
103,105
Construct input for CIFAR evaluation using the Reader ops. Args: eval_data: bool, indicating if one should use the train or eval data set. data_dir: Path to the CIFAR-10 data directory. batch_size: Number of images per batch. Returns: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size. labels: Labels. 1D tensor of [batch_size] size.
def inputs(eval_data, data_dir, batch_size): if not eval_data: filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in xrange(1, 6)] num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN else: filenames = [os.path.join(data_dir, 'test_batch.bin')] num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL for f in filenames: if not tf.gfile.Exists(f): raise ValueError('Failed to find file: ' + f) # Create a queue that produces the filenames to read. filename_queue = tf.train.string_input_producer(filenames) # Read examples from files in the filename queue. read_input = read_cifar10(filename_queue) reshaped_image = tf.cast(read_input.uint8image, tf.float32) height = IMAGE_SIZE width = IMAGE_SIZE # Image processing for evaluation. # Crop the central [height, width] of the image. resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image, height, width) # Subtract off the mean and divide by the variance of the pixels. float_image = tf.image.per_image_standardization(resized_image) # Set the shapes of tensors. float_image.set_shape([height, width, 3]) read_input.label.set_shape([1]) # Ensure that the random shuffling has good mixing properties. min_fraction_of_examples_in_queue = 0.4 min_queue_examples = int(num_examples_per_epoch * min_fraction_of_examples_in_queue) # Generate a batch of images and labels by building up a queue of examples. return _generate_image_and_label_batch(float_image, read_input.label, min_queue_examples, batch_size, shuffle=False)
103,106
Save a Spark DataFrame as TFRecords. This will convert the DataFrame rows to TFRecords prior to saving. Args: :df: Spark DataFrame :output_dir: Path to save TFRecords
def saveAsTFRecords(df, output_dir): tf_rdd = df.rdd.mapPartitions(toTFExample(df.dtypes)) tf_rdd.saveAsNewAPIHadoopFile(output_dir, "org.tensorflow.hadoop.io.TFRecordFileOutputFormat", keyClass="org.apache.hadoop.io.BytesWritable", valueClass="org.apache.hadoop.io.NullWritable")
103,108
Construct all necessary functions and call run_loop. Args: flags_obj: Object containing user specified flags.
def run_census(flags_obj, ctx): train_file = os.path.join(flags_obj.data_dir, census_dataset.TRAINING_FILE) test_file = os.path.join(flags_obj.data_dir, census_dataset.EVAL_FILE) # Train and evaluate the model every `flags.epochs_between_evals` epochs. def train_input_fn(): return census_dataset.input_fn( train_file, flags_obj.epochs_between_evals, True, flags_obj.batch_size) def eval_input_fn(): return census_dataset.input_fn(test_file, 1, False, flags_obj.batch_size) tensors_to_log = { 'average_loss': '{loss_prefix}head/truediv', 'loss': '{loss_prefix}head/weighted_loss/Sum' } # Removing run_loop, since we can only invoke train_and_evaluate once model_helpers.apply_clean(flags.FLAGS) model = build_estimator( model_dir=flags_obj.model_dir, model_type=flags_obj.model_type, model_column_fn=census_dataset.build_model_columns, inter_op=flags_obj.inter_op_parallelism_threads, intra_op=flags_obj.intra_op_parallelism_threads, ctx=ctx) loss_prefix = LOSS_PREFIX.get(flags_obj.model_type, '') tensors_to_log = {k: v.format(loss_prefix=loss_prefix) for k, v in tensors_to_log.items()} train_hooks = hooks_helper.get_train_hooks( flags_obj.hooks, model_dir=flags_obj.model_dir, batch_size=flags_obj.batch_size, tensors_to_log=tensors_to_log) # Note: this will only be invoked once, so `--epochs_between_evals` is now effectively `--train_epochs` # and evaluation will only execute once. train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, hooks=train_hooks) eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn) tf.estimator.train_and_evaluate(model, train_spec, eval_spec)
103,116
Yields the scope with the default parameters for inception_v3. Args: weight_decay: the weight decay for weights variables. stddev: standard deviation of the truncated guassian weight distribution. batch_norm_decay: decay for the moving average of batch_norm momentums. batch_norm_epsilon: small float added to variance to avoid dividing by zero. Yields: a arg_scope with the parameters needed for inception_v3.
def inception_v3_parameters(weight_decay=0.00004, stddev=0.1, batch_norm_decay=0.9997, batch_norm_epsilon=0.001): # Set weight_decay for weights in Conv and FC layers. with scopes.arg_scope([ops.conv2d, ops.fc], weight_decay=weight_decay): # Set stddev, activation and parameters for batch_norm. with scopes.arg_scope([ops.conv2d], stddev=stddev, activation=tf.nn.relu, batch_norm_params={ 'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon}) as arg_scope: yield arg_scope
103,119
Define a L1 regularizer. Args: weight: scale the loss by this factor. scope: Optional scope for name_scope. Returns: a regularizer function.
def l1_regularizer(weight=1.0, scope=None): def regularizer(tensor): with tf.name_scope(scope, 'L1Regularizer', [tensor]): l1_weight = tf.convert_to_tensor(weight, dtype=tensor.dtype.base_dtype, name='weight') return tf.multiply(l1_weight, tf.reduce_sum(tf.abs(tensor)), name='value') return regularizer
103,123
Define a L2 regularizer. Args: weight: scale the loss by this factor. scope: Optional scope for name_scope. Returns: a regularizer function.
def l2_regularizer(weight=1.0, scope=None): def regularizer(tensor): with tf.name_scope(scope, 'L2Regularizer', [tensor]): l2_weight = tf.convert_to_tensor(weight, dtype=tensor.dtype.base_dtype, name='weight') return tf.multiply(l2_weight, tf.nn.l2_loss(tensor), name='value') return regularizer
103,124
Define a L1L2 regularizer. Args: weight_l1: scale the L1 loss by this factor. weight_l2: scale the L2 loss by this factor. scope: Optional scope for name_scope. Returns: a regularizer function.
def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None): def regularizer(tensor): with tf.name_scope(scope, 'L1L2Regularizer', [tensor]): weight_l1_t = tf.convert_to_tensor(weight_l1, dtype=tensor.dtype.base_dtype, name='weight_l1') weight_l2_t = tf.convert_to_tensor(weight_l2, dtype=tensor.dtype.base_dtype, name='weight_l2') reg_l1 = tf.multiply(weight_l1_t, tf.reduce_sum(tf.abs(tensor)), name='value_l1') reg_l2 = tf.multiply(weight_l2_t, tf.nn.l2_loss(tensor), name='value_l2') return tf.add(reg_l1, reg_l2, name='value') return regularizer
103,125
Define a L1Loss, useful for regularize, i.e. lasso. Args: tensor: tensor to regularize. weight: scale the loss by this factor. scope: Optional scope for name_scope. Returns: the L1 loss op.
def l1_loss(tensor, weight=1.0, scope=None): with tf.name_scope(scope, 'L1Loss', [tensor]): weight = tf.convert_to_tensor(weight, dtype=tensor.dtype.base_dtype, name='loss_weight') loss = tf.multiply(weight, tf.reduce_sum(tf.abs(tensor)), name='value') tf.add_to_collection(LOSSES_COLLECTION, loss) return loss
103,126
Define a L2Loss, useful for regularize, i.e. weight decay. Args: tensor: tensor to regularize. weight: an optional weight to modulate the loss. scope: Optional scope for name_scope. Returns: the L2 loss op.
def l2_loss(tensor, weight=1.0, scope=None): with tf.name_scope(scope, 'L2Loss', [tensor]): weight = tf.convert_to_tensor(weight, dtype=tensor.dtype.base_dtype, name='loss_weight') loss = tf.multiply(weight, tf.nn.l2_loss(tensor), name='value') tf.add_to_collection(LOSSES_COLLECTION, loss) return loss
103,127
Decorates a function with args so it can be used within an arg_scope. Args: func: function to decorate. Returns: A tuple with the decorated function func_with_args().
def add_arg_scope(func): @functools.wraps(func) def func_with_args(*args, **kwargs): current_scope = _current_arg_scope() current_args = kwargs key_func = (func.__module__, func.__name__) if key_func in current_scope: current_args = current_scope[key_func].copy() current_args.update(kwargs) return func(*args, **current_args) _add_op(func) return func_with_args
103,132
Returns this executor's "singleton" instance of the multiprocessing.Manager, reconnecting per python-worker if needed. Args: :cluster_info: cluster node reservations :host: host IP address :executor_id: unique id per executor (created during initial call to run()) Returns: TFManager instance for this executor/python-worker
def _get_manager(cluster_info, host, executor_id): for node in cluster_info: if node['host'] == host and node['executor_id'] == executor_id: addr = node['addr'] authkey = node['authkey'] TFSparkNode.mgr = TFManager.connect(addr, authkey) break if TFSparkNode.mgr is None: msg = "No TFManager found on this node, please ensure that:\n" + \ "1. Spark num_executors matches TensorFlow cluster_size\n" + \ "2. Spark cores/tasks per executor is 1.\n" + \ "3. Spark dynamic allocation is disabled." raise Exception(msg) logging.info("Connected to TFSparkNode.mgr on {0}, executor={1}, state={2}".format(host, executor_id, str(TFSparkNode.mgr.get('state')))) return TFSparkNode.mgr
103,134
Feeds Spark partitions into the shared multiprocessing.Queue. Args: :cluster_info: node reservation information for the cluster (e.g. host, executor_id, pid, ports, etc) :cluster_meta: dictionary of cluster metadata (e.g. cluster_id, reservation.Server address, etc) :feed_timeout: number of seconds after which data feeding times out (600 sec default) :qname: *INTERNAL_USE* Returns: A dataRDD.mapPartitions() function
def train(cluster_info, cluster_meta, feed_timeout=600, qname='input'): def _train(iter): # get shared queue, reconnecting if necessary mgr = _get_manager(cluster_info, util.get_ip_address(), util.read_executor_id()) try: queue = mgr.get_queue(qname) equeue = mgr.get_queue('error') except (AttributeError, KeyError): msg = "Queue '{}' not found on this node, check for exceptions on other nodes.".format(qname) raise Exception(msg) state = str(mgr.get('state')) logging.info("mgr.state={0}".format(state)) terminating = state == "'terminating'" if terminating: logging.info("mgr is terminating, skipping partition") count = sum(1 for item in iter) logging.info("Skipped {0} items from partition".format(count)) else: logging.info("Feeding partition {0} into {1} queue {2}".format(iter, qname, queue)) count = 0 for item in iter: count += 1 queue.put(item, block=True) # wait for consumers to finish processing all items in queue before "finishing" this iterator joinThr = Thread(target=queue.join) joinThr.start() timeout = feed_timeout while (joinThr.isAlive()): if (not equeue.empty()): e_str = equeue.get() equeue.task_done() raise Exception("exception in worker:\n" + e_str) time.sleep(1) timeout -= 1 if timeout <= 0: raise Exception("Timeout while feeding partition") logging.info("Processed {0} items in partition".format(count)) # check if TF is terminating feed after this partition if not terminating: state = str(mgr.get('state')) terminating = state == "'terminating'" if terminating: try: logging.info("TFSparkNode: requesting stop") client = reservation.Client(cluster_meta['server_addr']) client.request_stop() client.close() except Exception as e: # ignore any errors while requesting stop logging.debug("Error while requesting stop: {0}".format(e)) return [terminating] return _train
103,136
Feeds Spark partitions into the shared multiprocessing.Queue and returns inference results. Args: :cluster_info: node reservation information for the cluster (e.g. host, executor_id, pid, ports, etc) :feed_timeout: number of seconds after which data feeding times out (600 sec default) :qname: *INTERNAL_USE* Returns: A dataRDD.mapPartitions() function
def inference(cluster_info, feed_timeout=600, qname='input'): def _inference(iter): # get shared queue, reconnecting if necessary mgr = _get_manager(cluster_info, util.get_ip_address(), util.read_executor_id()) try: queue_in = mgr.get_queue(qname) equeue = mgr.get_queue('error') except (AttributeError, KeyError): msg = "Queue '{}' not found on this node, check for exceptions on other nodes.".format(qname) raise Exception(msg) logging.info("Feeding partition {0} into {1} queue {2}".format(iter, qname, queue_in)) count = 0 for item in iter: count += 1 queue_in.put(item, block=True) # signal "end of partition" queue_in.put(marker.EndPartition()) # skip empty partitions if count == 0: return [] # wait for consumers to finish processing all items in queue before "finishing" this iterator joinThr = Thread(target=queue_in.join) joinThr.start() timeout = feed_timeout while (joinThr.isAlive()): if (not equeue.empty()): e_str = equeue.get() equeue.task_done() raise Exception("exception in worker:\n" + e_str) time.sleep(1) timeout -= 1 if timeout <= 0: raise Exception("Timeout while feeding partition") logging.info("Processed {0} items in partition".format(count)) # read result queue results = [] queue_out = mgr.get_queue('output') while count > 0: result = queue_out.get(block=True) results.append(result) count -= 1 queue_out.task_done() logging.info("Finished processing partition") return results return _inference
103,137
Stops all TensorFlow nodes by feeding ``None`` into the multiprocessing.Queues. Args: :cluster_info: node reservation information for the cluster (e.g. host, executor_id, pid, ports, etc). :queues: *INTERNAL_USE* Returns: A nodeRDD.mapPartitions() function
def shutdown(cluster_info, queues=['input']): def _shutdown(iter): host = util.get_ip_address() executor_id = util.read_executor_id() # reconnect to shared queue mgr = _get_manager(cluster_info, host, executor_id) # send SIGTERM to Tensorboard proc (if running) for node in cluster_info: if node['host'] == host and node['executor_id'] == executor_id: tb_pid = node['tb_pid'] if tb_pid != 0: logging.info("Stopping tensorboard (pid={0})".format(tb_pid)) subprocess.Popen(["kill", str(tb_pid)]) # terminate any listening queues logging.info("Stopping all queues") for q in queues: try: queue = mgr.get_queue(q) logging.info("Feeding None into {0} queue".format(q)) queue.put(None, block=True) except (AttributeError, KeyError): msg = "Queue '{}' not found on this node, check for exceptions on other nodes.".format(q) raise Exception(msg) logging.info("Setting mgr.state to 'stopped'") mgr.set('state', 'stopped') return [True] return _shutdown
103,138
Adds all losses for the model. Note the final loss is not returned. Instead, the list of losses are collected by slim.losses. The losses are accumulated in tower_loss() and summed to calculate the total loss. Args: logits: List of logits from inference(). Each entry is a 2-D float Tensor. labels: Labels from distorted_inputs or inputs(). 1-D tensor of shape [batch_size] batch_size: integer
def loss(logits, labels, batch_size=None): if not batch_size: batch_size = FLAGS.batch_size # Reshape the labels into a dense Tensor of # shape [FLAGS.batch_size, num_classes]. sparse_labels = tf.reshape(labels, [batch_size, 1]) indices = tf.reshape(tf.range(batch_size), [batch_size, 1]) concated = tf.concat(axis=1, values=[indices, sparse_labels]) num_classes = logits[0].get_shape()[-1].value dense_labels = tf.sparse_to_dense(concated, [batch_size, num_classes], 1.0, 0.0) # Cross entropy loss for the main softmax prediction. slim.losses.cross_entropy_loss(logits[0], dense_labels, label_smoothing=0.1, weight=1.0) # Cross entropy loss for the auxiliary softmax head. slim.losses.cross_entropy_loss(logits[1], dense_labels, label_smoothing=0.1, weight=0.4, scope='aux_loss')
103,167
Convenience function to create a Tensorflow-compatible absolute HDFS path from relative paths Args: :ctx: TFNodeContext containing the metadata specific to this node in the cluster. :path: path to convert Returns: An absolute path prefixed with the correct filesystem scheme.
def hdfs_path(ctx, path): # All Hadoop-Compatible File System Schemes (as of Hadoop 3.0.x): HADOOP_SCHEMES = ['adl://', 'file://', 'hdfs://', 'oss://', 's3://', 's3a://', 's3n://', 'swift://', 'viewfs://', 'wasb://'] if (any(path.startswith(scheme) for scheme in HADOOP_SCHEMES)): # absolute path w/ scheme, just return as-is return path elif path.startswith("/"): # absolute path w/o scheme, just prepend w/ defaultFS return ctx.defaultFS + path else: # relative path, prepend defaultFS + standard working dir if ctx.defaultFS.startswith("hdfs://") or ctx.defaultFS.startswith("viewfs://"): return "{0}/user/{1}/{2}".format(ctx.defaultFS, getpass.getuser(), path) elif ctx.defaultFS.startswith("file://"): return "{0}/{1}/{2}".format(ctx.defaultFS, ctx.working_dir[1:], path) else: logging.warn("Unknown scheme {0} with relative path: {1}".format(ctx.defaultFS, path)) return "{0}/{1}".format(ctx.defaultFS, path)
103,168
Push a batch of output results to the Spark output RDD of ``TFCluster.inference()``. Note: this currently expects a one-to-one mapping of input to output data, so the length of the ``results`` array should match the length of the previously retrieved batch of input data. Args: :results: array of output data for the equivalent batch of input data.
def batch_results(self, results): logging.debug("batch_results() invoked") queue = self.mgr.get_queue(self.qname_out) for item in results: queue.put(item, block=True) logging.debug("batch_results() returning data")
103,173
Get list of free GPUs according to nvidia-smi. This will retry for ``MAX_RETRIES`` times until the requested number of GPUs are available. Args: :num_gpu: number of GPUs desired. :worker_index: index "hint" for allocation of available GPUs. Returns: Comma-delimited string of GPU ids, or raises an Exception if the requested number of GPUs could not be found.
def get_gpus(num_gpu=1, worker_index=-1): # get list of gpus (index, uuid) list_gpus = subprocess.check_output(["nvidia-smi", "--list-gpus"]).decode() logging.debug("all GPUs:\n{0}".format(list_gpus)) # parse index and guid gpus = [x for x in list_gpus.split('\n') if len(x) > 0] def parse_gpu(gpu_str): cols = gpu_str.split(' ') return cols[5].split(')')[0], cols[1].split(':')[0] gpu_list = [parse_gpu(gpu) for gpu in gpus] free_gpus = [] retries = 0 while len(free_gpus) < num_gpu and retries < MAX_RETRIES: smi_output = subprocess.check_output(["nvidia-smi", "--format=csv,noheader,nounits", "--query-compute-apps=gpu_uuid"]).decode() logging.debug("busy GPUs:\n{0}".format(smi_output)) busy_uuids = [x for x in smi_output.split('\n') if len(x) > 0] for uuid, index in gpu_list: if uuid not in busy_uuids: free_gpus.append(index) if len(free_gpus) < num_gpu: logging.warn("Unable to find available GPUs: requested={0}, available={1}".format(num_gpu, len(free_gpus))) retries += 1 time.sleep(30 * retries) free_gpus = [] logging.info("Available GPUs: {}".format(free_gpus)) # if still can't find available GPUs, raise exception if len(free_gpus) < num_gpu: smi_output = subprocess.check_output(["nvidia-smi", "--format=csv", "--query-compute-apps=gpu_uuid,pid,process_name,used_gpu_memory"]).decode() logging.info(": {0}".format(smi_output)) raise Exception("Unable to find {} free GPU(s)\n{}".format(num_gpu, smi_output)) # Get logical placement num_available = len(free_gpus) if worker_index == -1: # use original random placement random.shuffle(free_gpus) proposed_gpus = free_gpus[:num_gpu] else: # ordered by worker index if worker_index * num_gpu + num_gpu > num_available: worker_index = worker_index * num_gpu % num_available proposed_gpus = free_gpus[worker_index * num_gpu:(worker_index * num_gpu + num_gpu)] logging.info("Proposed GPUs: {}".format(proposed_gpus)) return ','.join(str(x) for x in proposed_gpus)
103,182
Get available GPUs according to utilization thresholds. Args: :max_gpu_utilization: percent utilization threshold to consider a GPU "free" :min_free_memory: percent free memory to consider a GPU "free" :num_gpu: number of requested GPUs Returns: A tuple of (available_gpus, minimum_free_memory), where available_gpus is a comma-delimited string of GPU ids, and minimum_free_memory is the lowest amount of free memory available on the available_gpus.
def _get_free_gpu(max_gpu_utilization=40, min_free_memory=0.5, num_gpu=1): def get_gpu_info(): # Get the gpu information gpu_info = subprocess.check_output(["nvidia-smi", "--format=csv,noheader,nounits", "--query-gpu=index,memory.total,memory.free,memory.used,utilization.gpu"]).decode() gpu_info = gpu_info.split('\n') gpu_info_array = [] # Check each gpu for line in gpu_info: if len(line) > 0: gpu_id, total_memory, free_memory, used_memory, gpu_util = line.split(',') gpu_memory_util = float(used_memory) / float(total_memory) gpu_info_array.append((float(gpu_util), gpu_memory_util, gpu_id)) return(gpu_info_array) # Read the gpu information multiple times num_times_to_average = 5 current_array = [] for ind in range(num_times_to_average): current_array.append(get_gpu_info()) time.sleep(1) # Get number of gpus num_gpus = len(current_array[0]) # Average the gpu information avg_array = [(0, 0, str(x)) for x in range(num_gpus)] for ind in range(num_times_to_average): for gpu_ind in range(num_gpus): avg_array[gpu_ind] = (avg_array[gpu_ind][0] + current_array[ind][gpu_ind][0], avg_array[gpu_ind][1] + current_array[ind][gpu_ind][1], avg_array[gpu_ind][2]) for gpu_ind in range(num_gpus): avg_array[gpu_ind] = (float(avg_array[gpu_ind][0]) / num_times_to_average, float(avg_array[gpu_ind][1]) / num_times_to_average, avg_array[gpu_ind][2]) avg_array.sort() gpus_found = 0 gpus_to_use = "" free_memory = 1.0 # Return the least utilized GPUs if it's utilized less than max_gpu_utilization and amount of free memory is at least min_free_memory # Otherwise, run in cpu only mode for current_gpu in avg_array: if current_gpu[0] < max_gpu_utilization and (1 - current_gpu[1]) > min_free_memory: if gpus_found == 0: gpus_to_use = current_gpu[2] free_memory = 1 - current_gpu[1] else: gpus_to_use = gpus_to_use + "," + current_gpu[2] free_memory = min(free_memory, 1 - current_gpu[1]) gpus_found = gpus_found + 1 if gpus_found == num_gpu: break return gpus_to_use, free_memory
103,183
Build an Example proto for an example. Args: filename: string, path to an image file, e.g., '/path/to/example.JPG' image_buffer: string, JPEG encoding of RGB image label: integer, identifier for the ground truth for the network text: string, unique human-readable, e.g. 'dog' height: integer, image height in pixels width: integer, image width in pixels Returns: Example proto
def _convert_to_example(filename, image_buffer, label, text, height, width): colorspace = 'RGB' channels = 3 image_format = 'JPEG' example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': _int64_feature(height), 'image/width': _int64_feature(width), 'image/colorspace': _bytes_feature(tf.compat.as_bytes(colorspace)), 'image/channels': _int64_feature(channels), 'image/class/label': _int64_feature(label), 'image/class/text': _bytes_feature(tf.compat.as_bytes(text)), 'image/format': _bytes_feature(tf.compat.as_bytes(image_format)), 'image/filename': _bytes_feature(tf.compat.as_bytes(os.path.basename(filename))), 'image/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer))})) return example
103,184
Process and save list of images as TFRecord of Example protos. Args: name: string, unique identifier specifying the data set filenames: list of strings; each string is a path to an image file texts: list of strings; each string is human readable, e.g. 'dog' labels: list of integer; each integer identifies the ground truth num_shards: integer number of shards for this data set.
def _process_image_files(name, filenames, texts, labels, num_shards): assert len(filenames) == len(texts) assert len(filenames) == len(labels) # Break all images into batches with a [ranges[i][0], ranges[i][1]]. spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int) ranges = [] for i in range(len(spacing) - 1): ranges.append([spacing[i], spacing[i+1]]) # Launch a thread for each batch. print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges)) sys.stdout.flush() # Create a mechanism for monitoring when all threads are finished. coord = tf.train.Coordinator() # Create a generic TensorFlow-based utility for converting all image codings. coder = ImageCoder() threads = [] for thread_index in range(len(ranges)): args = (coder, thread_index, ranges, name, filenames, texts, labels, num_shards) t = threading.Thread(target=_process_image_files_batch, args=args) t.start() threads.append(t) # Wait for all the threads to terminate. coord.join(threads) print('%s: Finished writing all %d images in data set.' % (datetime.now(), len(filenames))) sys.stdout.flush()
103,186
Process a complete data set and save it as a TFRecord. Args: name: string, unique identifier specifying the data set. directory: string, root path to the data set. num_shards: integer number of shards for this data set. labels_file: string, path to the labels file.
def _process_dataset(name, directory, num_shards, labels_file): filenames, texts, labels = _find_image_files(directory, labels_file) _process_image_files(name, filenames, texts, labels, num_shards)
103,188
Decode a JPEG string into one 3-D float image Tensor. Args: image_buffer: scalar string Tensor. scope: Optional scope for name_scope. Returns: 3-D float Tensor with values ranging from [0, 1).
def decode_jpeg(image_buffer, scope=None): with tf.name_scope(values=[image_buffer], name=scope, default_name='decode_jpeg'): # Decode the string as an RGB JPEG. # Note that the resulting image contains an unknown height and width # that is set dynamically by decode_jpeg. In other words, the height # and width of image is unknown at compile-time. image = tf.image.decode_jpeg(image_buffer, channels=3) # After this point, all image pixels reside in [0,1) # until the very end, when they're rescaled to (-1, 1). The various # adjust_* ops all require this range for dtype float. image = tf.image.convert_image_dtype(image, dtype=tf.float32) return image
103,193
Distort the color of the image. Each color distortion is non-commutative and thus ordering of the color ops matters. Ideally we would randomly permute the ordering of the color ops. Rather then adding that level of complication, we select a distinct ordering of color ops for each preprocessing thread. Args: image: Tensor containing single image. thread_id: preprocessing thread ID. scope: Optional scope for name_scope. Returns: color-distorted image
def distort_color(image, thread_id=0, scope=None): with tf.name_scope(values=[image], name=scope, default_name='distort_color'): color_ordering = thread_id % 2 if color_ordering == 0: image = tf.image.random_brightness(image, max_delta=32. / 255.) image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.2) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) elif color_ordering == 1: image = tf.image.random_brightness(image, max_delta=32. / 255.) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.2) # The random_* ops do not necessarily clamp. image = tf.clip_by_value(image, 0.0, 1.0) return image
103,194
Prepare one image for evaluation. Args: image: 3-D float Tensor height: integer width: integer scope: Optional scope for name_scope. Returns: 3-D float Tensor of prepared image.
def eval_image(image, height, width, scope=None): with tf.name_scope(values=[image, height, width], name=scope, default_name='eval_image'): # Crop the central region of the image with an area containing 87.5% of # the original image. image = tf.image.central_crop(image, central_fraction=0.875) # Resize the image to the original height and width. image = tf.expand_dims(image, 0) image = tf.image.resize_bilinear(image, [height, width], align_corners=False) image = tf.squeeze(image, [0]) return image
103,196
Export to SavedModel format. Args: model: Estimator object model_type: string indicating model type. "wide", "deep" or "wide_deep" export_dir: directory to export the model. model_column_fn: Function to generate model feature columns.
def export_model(model, model_type, export_dir, model_column_fn): wide_columns, deep_columns = model_column_fn() if model_type == 'wide': columns = wide_columns elif model_type == 'deep': columns = deep_columns else: columns = wide_columns + deep_columns feature_spec = tf.feature_column.make_parse_example_spec(columns) example_input_fn = ( tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec)) model.export_savedmodel(export_dir, example_input_fn, strip_default_attrs=True)
103,229
Converts `int_or_tuple` to height, width. Several of the functions that follow accept arguments as either a tuple of 2 integers or a single integer. A single integer indicates that the 2 values of the tuple are the same. This functions normalizes the input value by always returning a tuple. Args: int_or_tuple: A list of 2 ints, a single int or a tf.TensorShape. Returns: A tuple with 2 values. Raises: ValueError: If `int_or_tuple` it not well formed.
def _two_element_tuple(int_or_tuple): if isinstance(int_or_tuple, (list, tuple)): if len(int_or_tuple) != 2: raise ValueError('Must be a list with 2 elements: %s' % int_or_tuple) return int(int_or_tuple[0]), int(int_or_tuple[1]) if isinstance(int_or_tuple, int): return int(int_or_tuple), int(int_or_tuple) if isinstance(int_or_tuple, tf.TensorShape): if len(int_or_tuple) == 2: return int_or_tuple[0], int_or_tuple[1] raise ValueError('Must be an int, a list with 2 elements or a TensorShape of ' 'length 2')
103,235
Transform numeric labels into onehot_labels. Args: labels: [batch_size] target labels. num_classes: total number of classes. scope: Optional scope for name_scope. Returns: one hot encoding of the labels.
def one_hot_encoding(labels, num_classes, scope=None): with tf.name_scope(scope, 'OneHotEncoding', [labels]): batch_size = labels.get_shape()[0] indices = tf.expand_dims(tf.range(0, batch_size), 1) labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype) concated = tf.concat(axis=1, values=[indices, labels]) onehot_labels = tf.sparse_to_dense( concated, tf.stack([batch_size, num_classes]), 1.0, 0.0) onehot_labels.set_shape([batch_size, num_classes]) return onehot_labels
103,238
Returns a dropout layer applied to the input. Args: inputs: the tensor to pass to the Dropout layer. keep_prob: the probability of keeping each input unit. is_training: whether or not the model is in training mode. If so, dropout is applied and values scaled. Otherwise, inputs is returned. scope: Optional scope for name_scope. Returns: a tensor representing the output of the operation.
def dropout(inputs, keep_prob=0.5, is_training=True, scope=None): if is_training and keep_prob > 0: with tf.name_scope(scope, 'Dropout', [inputs]): return tf.nn.dropout(inputs, keep_prob) else: return inputs
103,240
Flattens the input while maintaining the batch_size. Assumes that the first dimension represents the batch. Args: inputs: a tensor of size [batch_size, ...]. scope: Optional scope for name_scope. Returns: a flattened tensor with shape [batch_size, k]. Raises: ValueError: if inputs.shape is wrong.
def flatten(inputs, scope=None): if len(inputs.get_shape()) < 2: raise ValueError('Inputs must be have a least 2 dimensions') dims = inputs.get_shape()[1:] k = dims.num_elements() with tf.name_scope(scope, 'Flatten', [inputs]): return tf.reshape(inputs, [-1, k])
103,241
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.CheckConfig = channel.unary_unary( '/pulumirpc.ResourceProvider/CheckConfig', request_serializer=provider__pb2.CheckRequest.SerializeToString, response_deserializer=provider__pb2.CheckResponse.FromString, ) self.DiffConfig = channel.unary_unary( '/pulumirpc.ResourceProvider/DiffConfig', request_serializer=provider__pb2.DiffRequest.SerializeToString, response_deserializer=provider__pb2.DiffResponse.FromString, ) self.Configure = channel.unary_unary( '/pulumirpc.ResourceProvider/Configure', request_serializer=provider__pb2.ConfigureRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.Invoke = channel.unary_unary( '/pulumirpc.ResourceProvider/Invoke', request_serializer=provider__pb2.InvokeRequest.SerializeToString, response_deserializer=provider__pb2.InvokeResponse.FromString, ) self.Check = channel.unary_unary( '/pulumirpc.ResourceProvider/Check', request_serializer=provider__pb2.CheckRequest.SerializeToString, response_deserializer=provider__pb2.CheckResponse.FromString, ) self.Diff = channel.unary_unary( '/pulumirpc.ResourceProvider/Diff', request_serializer=provider__pb2.DiffRequest.SerializeToString, response_deserializer=provider__pb2.DiffResponse.FromString, ) self.Create = channel.unary_unary( '/pulumirpc.ResourceProvider/Create', request_serializer=provider__pb2.CreateRequest.SerializeToString, response_deserializer=provider__pb2.CreateResponse.FromString, ) self.Read = channel.unary_unary( '/pulumirpc.ResourceProvider/Read', request_serializer=provider__pb2.ReadRequest.SerializeToString, response_deserializer=provider__pb2.ReadResponse.FromString, ) self.Update = channel.unary_unary( '/pulumirpc.ResourceProvider/Update', request_serializer=provider__pb2.UpdateRequest.SerializeToString, response_deserializer=provider__pb2.UpdateResponse.FromString, ) self.Delete = channel.unary_unary( '/pulumirpc.ResourceProvider/Delete', request_serializer=provider__pb2.DeleteRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.Cancel = channel.unary_unary( '/pulumirpc.ResourceProvider/Cancel', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.GetPluginInfo = channel.unary_unary( '/pulumirpc.ResourceProvider/GetPluginInfo', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, response_deserializer=plugin__pb2.PluginInfo.FromString, )
103,246
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.Log = channel.unary_unary( '/pulumirpc.Engine/Log', request_serializer=engine__pb2.LogRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.GetRootResource = channel.unary_unary( '/pulumirpc.Engine/GetRootResource', request_serializer=engine__pb2.GetRootResourceRequest.SerializeToString, response_deserializer=engine__pb2.GetRootResourceResponse.FromString, ) self.SetRootResource = channel.unary_unary( '/pulumirpc.Engine/SetRootResource', request_serializer=engine__pb2.SetRootResourceRequest.SerializeToString, response_deserializer=engine__pb2.SetRootResourceResponse.FromString, )
103,249
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.Analyze = channel.unary_unary( '/pulumirpc.Analyzer/Analyze', request_serializer=analyzer__pb2.AnalyzeRequest.SerializeToString, response_deserializer=analyzer__pb2.AnalyzeResponse.FromString, ) self.GetPluginInfo = channel.unary_unary( '/pulumirpc.Analyzer/GetPluginInfo', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, response_deserializer=plugin__pb2.PluginInfo.FromString, )
103,261
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.Invoke = channel.unary_unary( '/pulumirpc.ResourceMonitor/Invoke', request_serializer=provider__pb2.InvokeRequest.SerializeToString, response_deserializer=provider__pb2.InvokeResponse.FromString, ) self.ReadResource = channel.unary_unary( '/pulumirpc.ResourceMonitor/ReadResource', request_serializer=resource__pb2.ReadResourceRequest.SerializeToString, response_deserializer=resource__pb2.ReadResourceResponse.FromString, ) self.RegisterResource = channel.unary_unary( '/pulumirpc.ResourceMonitor/RegisterResource', request_serializer=resource__pb2.RegisterResourceRequest.SerializeToString, response_deserializer=resource__pb2.RegisterResourceResponse.FromString, ) self.RegisterResourceOutputs = channel.unary_unary( '/pulumirpc.ResourceMonitor/RegisterResourceOutputs', request_serializer=resource__pb2.RegisterResourceOutputsRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, )
103,304
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.GetRequiredPlugins = channel.unary_unary( '/pulumirpc.LanguageRuntime/GetRequiredPlugins', request_serializer=language__pb2.GetRequiredPluginsRequest.SerializeToString, response_deserializer=language__pb2.GetRequiredPluginsResponse.FromString, ) self.Run = channel.unary_unary( '/pulumirpc.LanguageRuntime/Run', request_serializer=language__pb2.RunRequest.SerializeToString, response_deserializer=language__pb2.RunResponse.FromString, ) self.GetPluginInfo = channel.unary_unary( '/pulumirpc.LanguageRuntime/GetPluginInfo', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, response_deserializer=plugin__pb2.PluginInfo.FromString, )
103,312
Check if the specified event has the specified handler. Args: handler (callable): the callable event handler. event_name: The event the handler attached to. Set this to ``None`` to search all events.
def has_event_handler(self, handler, event_name=None): if event_name is not None: if event_name not in self._event_handlers: return False events = [event_name] else: events = self._event_handlers for e in events: for h, _, _ in self._event_handlers[e]: if h == handler: return True return False
103,322
Remove event handler `handler` from registered handlers of the engine Args: handler (callable): the callable event handler that should be removed event_name: The event the handler attached to.
def remove_event_handler(self, handler, event_name): if event_name not in self._event_handlers: raise ValueError("Input event name '{}' does not exist".format(event_name)) new_event_handlers = [(h, args, kwargs) for h, args, kwargs in self._event_handlers[event_name] if h != handler] if len(new_event_handlers) == len(self._event_handlers[event_name]): raise ValueError("Input handler '{}' is not found among registered event handlers".format(handler)) self._event_handlers[event_name] = new_event_handlers
103,323
Decorator shortcut for add_event_handler. Args: event_name: An event to attach the handler to. Valid events are from :class:`~ignite.engine.Events` or any `event_name` added by :meth:`~ignite.engine.Engine.register_events`. *args: optional args to be passed to `handler`. **kwargs: optional keyword args to be passed to `handler`.
def on(self, event_name, *args, **kwargs): def decorator(f): self.add_event_handler(event_name, f, *args, **kwargs) return f return decorator
103,325
Runs the process_function over the passed data. Args: data (Iterable): Collection of batches allowing repeated iteration (e.g., list or `DataLoader`). max_epochs (int, optional): max epochs to run for (default: 1). Returns: State: output state.
def run(self, data, max_epochs=1): self.state = State(dataloader=data, epoch=0, max_epochs=max_epochs, metrics={}) try: self._logger.info("Engine run starting with max_epochs={}.".format(max_epochs)) start_time = time.time() self._fire_event(Events.STARTED) while self.state.epoch < max_epochs and not self.should_terminate: self.state.epoch += 1 self._fire_event(Events.EPOCH_STARTED) hours, mins, secs = self._run_once_on_dataset() self._logger.info("Epoch[%s] Complete. Time taken: %02d:%02d:%02d", self.state.epoch, hours, mins, secs) if self.should_terminate: break self._fire_event(Events.EPOCH_COMPLETED) self._fire_event(Events.COMPLETED) time_taken = time.time() - start_time hours, mins, secs = _to_hours_mins_secs(time_taken) self._logger.info("Engine run complete. Time taken %02d:%02d:%02d" % (hours, mins, secs)) except BaseException as e: self._logger.error("Engine run is terminating due to exception: %s.", str(e)) self._handle_exception(e) return self.state
103,329
Calculates accuracy using :class:`~ignite.metrics.ConfusionMatrix` metric. Args: cm (ConfusionMatrix): instance of confusion matrix metric Returns: MetricsLambda
def cmAccuracy(cm): # Increase floating point precision cm = cm.type(torch.float64) return cm.diag().sum() / (cm.sum() + 1e-15)
103,359
Calculates precision using :class:`~ignite.metrics.ConfusionMatrix` metric. Args: cm (ConfusionMatrix): instance of confusion matrix metric average (bool, optional): if True metric value is averaged over all classes Returns: MetricsLambda
def cmPrecision(cm, average=True): # Increase floating point precision cm = cm.type(torch.float64) precision = cm.diag() / (cm.sum(dim=0) + 1e-15) if average: return precision.mean() return precision
103,360
Calculates recall using :class:`~ignite.metrics.ConfusionMatrix` metric. Args: cm (ConfusionMatrix): instance of confusion matrix metric average (bool, optional): if True metric value is averaged over all classes Returns: MetricsLambda
def cmRecall(cm, average=True): # Increase floating point precision cm = cm.type(torch.float64) recall = cm.diag() / (cm.sum(dim=1) + 1e-15) if average: return recall.mean() return recall
103,361
Method to simulate scheduled values during num_events events. Args: num_events (int): number of events during the simulation. lr_scheduler (subclass of `torch.optim.lr_scheduler._LRScheduler`): lr_scheduler object to wrap. Returns: list of pairs: [event_index, value]
def simulate_values(cls, num_events, lr_scheduler, **kwargs): # This scheduler uses `torch.optim.lr_scheduler._LRScheduler` which # should be replicated in order to simulate LR values and # not perturb original scheduler. copy_lr_scheduler = LRScheduler._replicate_lr_scheduler(lr_scheduler) values = [] scheduler = cls(save_history=False, lr_scheduler=copy_lr_scheduler) for i in range(num_events): scheduler(engine=None) values.append([i, scheduler.optimizer_param_groups[0][scheduler.param_name]]) return values
103,419
Attach the logger to the engine and execute `log_handler` function at `event_name` events. Args: engine (Engine): engine object. log_handler (callable): a logging handler to execute event_name: event to attach the logging handler to. Valid events are from :class:`~ignite.engine.Events` or any `event_name` added by :meth:`~ignite.engine.Engine.register_events`.
def attach(self, engine, log_handler, event_name): if event_name not in State.event_to_attr: raise RuntimeError("Unknown event name '{}'".format(event_name)) engine.add_event_handler(event_name, log_handler, self, event_name)
103,457
Iterate until condition is met, with optional timeout in seconds. The yielded value is that of the condition or False when timed out. Args: condition: Predicate function that is tested after every network update. timeout: Maximum time in seconds to wait. If 0 then no timeout is used.
def loopUntil( self, condition=None, timeout: float = 0) -> Iterator[object]: endTime = time.time() + timeout while True: test = condition and condition() if test: yield test return elif timeout and time.time() > endTime: yield False return else: yield test self.waitOnUpdate(endTime - time.time() if timeout else 0)
104,822
List of account values for the given account, or of all accounts if account is left blank. Args: account: If specified, filter for this account name.
def accountValues(self, account: str = '') -> List[AccountValue]: if account: return [v for v in self.wrapper.accountValues.values() if v.account == account] else: return list(self.wrapper.accountValues.values())
104,823
List of account values for the given account, or of all accounts if account is left blank. This method is blocking on first run, non-blocking after that. Args: account: If specified, filter for this account name.
def accountSummary(self, account: str = '') -> List[AccountValue]: if not self.wrapper.acctSummary: # loaded on demand since it takes ca. 250 ms self.reqAccountSummary() if account: return [v for v in self.wrapper.acctSummary.values() if v.account == account] else: return list(self.wrapper.acctSummary.values())
104,824
List of positions for the given account, or of all accounts if account is left blank. Args: account: If specified, filter for this account name.
def positions(self, account: str = '') -> List[Position]: if account: return list(self.wrapper.positions[account].values()) else: return [v for d in self.wrapper.positions.values() for v in d.values()]
104,826
List of subscribed :class:`.PnL` objects (profit and loss), optionally filtered by account and/or modelCode. The :class:`.PnL` objects are kept live updated. Args: account: If specified, filter for this account name. modelCode: If specified, filter for this account model.
def pnl(self, account='', modelCode='') -> List[PnL]: return [v for v in self.wrapper.pnls.values() if (not account or v.account == account) and (not modelCode or v.modelCode == modelCode)]
104,827
List of subscribed :class:`.PnLSingle` objects (profit and loss for single positions). The :class:`.PnLSingle` objects are kept live updated. Args: account: If specified, filter for this account name. modelCode: If specified, filter for this account model. conId: If specified, filter for this contract ID.
def pnlSingle( self, account: str = '', modelCode: str = '', conId: int = 0) -> List[PnLSingle]: return [v for v in self.wrapper.pnlSingles.values() if (not account or v.account == account) and (not modelCode or v.modelCode == modelCode) and (not conId or v.conId == conId)]
104,828
Get ticker of the given contract. It must have been requested before with reqMktData with the same contract object. The ticker may not be ready yet if called directly after :meth:`.reqMktData`. Args: contract: Contract to get ticker for.
def ticker(self, contract: Contract) -> Ticker: return self.wrapper.tickers.get(id(contract))
104,833
Request and return a list of snapshot tickers. The list is returned when all tickers are ready. This method is blocking. Args: contracts: Contracts to get tickers for. regulatorySnapshot: Request NBBO snapshots (may incur a fee).
def reqTickers( self, *contracts: List[Contract], regulatorySnapshot: bool = False) -> List[Ticker]: return self._run( self.reqTickersAsync( *contracts, regulatorySnapshot=regulatorySnapshot))
104,834
Fully qualify the given contracts in-place. This will fill in the missing fields in the contract, especially the conId. Returns a list of contracts that have been successfully qualified. This method is blocking. Args: contracts: Contracts to qualify.
def qualifyContracts(self, *contracts: List[Contract]) -> List[Contract]: return self._run(self.qualifyContractsAsync(*contracts))
104,835
Place the trades in the same One Cancels All (OCA) group. https://interactivebrokers.github.io/tws-api/oca.html Args: orders: The orders that are to be placed together.
def oneCancelsAll( orders: List[Order], ocaGroup: str, ocaType: int) -> List[Order]: for o in orders: o.ocaGroup = ocaGroup o.ocaType = ocaType return orders
104,837
Retrieve commission and margin impact without actually placing the order. The given order will not be modified in any way. This method is blocking. Args: contract: Contract to test. order: Order to test.
def whatIfOrder(self, contract: Contract, order: Order) -> OrderState: return self._run(self.whatIfOrderAsync(contract, order))
104,838
Place a new order or modify an existing order. Returns a Trade that is kept live updated with status changes, fills, etc. Args: contract: Contract to use for order. order: The order to be placed.
def placeOrder(self, contract: Contract, order: Order) -> Trade: orderId = order.orderId or self.client.getReqId() self.client.placeOrder(orderId, contract, order) now = datetime.datetime.now(datetime.timezone.utc) key = self.wrapper.orderKey( self.wrapper.clientId, orderId, order.permId) trade = self.wrapper.trades.get(key) if trade: # this is a modification of an existing order assert trade.orderStatus.status not in OrderStatus.DoneStates logEntry = TradeLogEntry(now, trade.orderStatus.status, 'Modify') trade.log.append(logEntry) self._logger.info(f'placeOrder: Modify order {trade}') trade.modifyEvent.emit(trade) self.orderModifyEvent.emit(trade) else: # this is a new order order.clientId = self.wrapper.clientId order.orderId = orderId orderStatus = OrderStatus(status=OrderStatus.PendingSubmit) logEntry = TradeLogEntry(now, orderStatus.status, '') trade = Trade( contract, order, orderStatus, [], [logEntry]) self.wrapper.trades[key] = trade self._logger.info(f'placeOrder: New order {trade}') self.newOrderEvent.emit(trade) return trade
104,839
Cancel the order and return the Trade it belongs to. Args: order: The order to be canceled.
def cancelOrder(self, order: Order) -> Trade: self.client.cancelOrder(order.orderId) now = datetime.datetime.now(datetime.timezone.utc) key = self.wrapper.orderKey( order.clientId, order.orderId, order.permId) trade = self.wrapper.trades.get(key) if trade: if not trade.isDone(): status = trade.orderStatus.status if (status == OrderStatus.PendingSubmit and not order.transmit or status == OrderStatus.Inactive): newStatus = OrderStatus.Cancelled else: newStatus = OrderStatus.PendingCancel logEntry = TradeLogEntry(now, newStatus, '') trade.log.append(logEntry) trade.orderStatus.status = newStatus self._logger.info(f'cancelOrder: {trade}') trade.cancelEvent.emit(trade) trade.statusEvent.emit(trade) self.cancelOrderEvent.emit(trade) self.orderStatusEvent.emit(trade) if newStatus == OrderStatus.Cancelled: trade.cancelledEvent.emit(trade) else: self._logger.error(f'cancelOrder: Unknown orderId {order.orderId}') return trade
104,840
It is recommended to use :meth:`.accountValues` instead. Request account values of multiple accounts and keep updated. This method is blocking. Args: account: If specified, filter for this account name. modelCode: If specified, filter for this account model.
def reqAccountUpdatesMulti( self, account: str = '', modelCode: str = ''): self._run(self.reqAccountUpdatesMultiAsync(account, modelCode))
104,841
It is recommended to use :meth:`.fills` or :meth:`.executions` instead. Request and return a list a list of fills. This method is blocking. Args: execFilter: If specified, return executions that match the filter.
def reqExecutions( self, execFilter: ExecutionFilter = None) -> List[Fill]: return self._run(self.reqExecutionsAsync(execFilter))
104,842
Start a subscription for profit and loss events. Returns a :class:`.PnL` object that is kept live updated. The result can also be queried from :meth:`.pnl`. https://interactivebrokers.github.io/tws-api/pnl.html Args: account: Subscribe to this account. modelCode: If specified, filter for this account model.
def reqPnL(self, account: str, modelCode: str = '') -> PnL: key = (account, modelCode) assert key not in self.wrapper.pnlKey2ReqId reqId = self.client.getReqId() self.wrapper.pnlKey2ReqId[key] = reqId pnl = PnL(account, modelCode) self.wrapper.pnls[reqId] = pnl self.client.reqPnL(reqId, account, modelCode) return pnl
104,843
Cancel PnL subscription. Args: account: Cancel for this account. modelCode: If specified, cancel for this account model.
def cancelPnL(self, account, modelCode: str = ''): key = (account, modelCode) reqId = self.wrapper.pnlKey2ReqId.pop(key, None) if reqId: self.client.cancelPnL(reqId) self.wrapper.pnls.pop(reqId, None) else: self._logger.error( 'cancelPnL: No subscription for ' f'account {account}, modelCode {modelCode}')
104,844