docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Returns the type of the google.protobuf.Value message as an api.DataType. Returns None if the type of 'value' is not one of the types supported in api_pb2.DataType. Args: value: google.protobuf.Value message.
def _protobuf_value_type(value): if value.HasField("number_value"): return api_pb2.DATA_TYPE_FLOAT64 if value.HasField("string_value"): return api_pb2.DATA_TYPE_STRING if value.HasField("bool_value"): return api_pb2.DATA_TYPE_BOOL return None
57,297
Returns a string representation of given google.protobuf.Value message. Args: value: google.protobuf.Value message. Assumed to be of type 'number', 'string' or 'bool'.
def _protobuf_value_to_string(value): value_in_json = json_format.MessageToJson(value) if value.HasField("string_value"): # Remove the quotations. return value_in_json[1:-1] return value_in_json
57,298
Instantiates a context. Args: tb_context: base_plugin.TBContext. The "base" context we extend. max_domain_discrete_len: int. Only used when computing the experiment from the session runs. The maximum number of disticnt values a string hyperparameter can have for us to populate its 'domain_discrete' field. Typically, only tests should specify a value for this parameter.
def __init__(self, tb_context, max_domain_discrete_len=10): self._tb_context = tb_context self._experiment_from_tag = None self._experiment_from_tag_lock = threading.Lock() self._max_domain_discrete_len = max_domain_discrete_len
57,299
Builds an HParamInfo message from the hparam name and list of values. Args: name: string. The hparam name. values: list of google.protobuf.Value messages. The list of values for the hparam. Returns: An api_pb2.HParamInfo message.
def _compute_hparam_info_from_values(self, name, values): # Figure out the type from the values. # Ignore values whose type is not listed in api_pb2.DataType # If all values have the same type, then that is the type used. # Otherwise, the returned type is DATA_TYPE_STRING. result = api_pb2.HParamInfo(name=name, type=api_pb2.DATA_TYPE_UNSET) distinct_values = set( _protobuf_value_to_string(v) for v in values if _protobuf_value_type(v)) for v in values: v_type = _protobuf_value_type(v) if not v_type: continue if result.type == api_pb2.DATA_TYPE_UNSET: result.type = v_type elif result.type != v_type: result.type = api_pb2.DATA_TYPE_STRING if result.type == api_pb2.DATA_TYPE_STRING: # A string result.type does not change, so we can exit the loop. break # If we couldn't figure out a type, then we can't compute the hparam_info. if result.type == api_pb2.DATA_TYPE_UNSET: return None # If the result is a string, set the domain to be the distinct values if # there aren't too many of them. if (result.type == api_pb2.DATA_TYPE_STRING and len(distinct_values) <= self._max_domain_discrete_len): result.domain_discrete.extend(distinct_values) return result
57,303
Returns a summary holding the given HParamsPluginData message. Helper function. Args: tag: string. The tag to use. hparams_plugin_data: The HParamsPluginData message to use.
def _summary(tag, hparams_plugin_data): summary = tf.compat.v1.Summary() summary.value.add( tag=tag, metadata=metadata.create_summary_metadata(hparams_plugin_data)) return summary
57,312
List all the plugins that have registered assets in logdir. If the plugins_dir does not exist, it returns an empty list. This maintains compatibility with old directories that have no plugins written. Args: logdir: A directory that was created by a TensorFlow events writer. Returns: a list of plugin names, as strings
def ListPlugins(logdir): plugins_dir = os.path.join(logdir, _PLUGINS_DIR) try: entries = tf.io.gfile.listdir(plugins_dir) except tf.errors.NotFoundError: return [] # Strip trailing slashes, which listdir() includes for some filesystems # for subdirectories, after using them to bypass IsDirectory(). return [x.rstrip('/') for x in entries if x.endswith('/') or _IsDirectory(plugins_dir, x)]
57,314
List all the assets that are available for given plugin in a logdir. Args: logdir: A directory that was created by a TensorFlow summary.FileWriter. plugin_name: A string name of a plugin to list assets for. Returns: A string list of available plugin assets. If the plugin subdirectory does not exist (either because the logdir doesn't exist, or because the plugin didn't register) an empty list is returned.
def ListAssets(logdir, plugin_name): plugin_dir = PluginDirectory(logdir, plugin_name) try: # Strip trailing slashes, which listdir() includes for some filesystems. return [x.rstrip('/') for x in tf.io.gfile.listdir(plugin_dir)] except tf.errors.NotFoundError: return []
57,315
Retrieve a particular plugin asset from a logdir. Args: logdir: A directory that was created by a TensorFlow summary.FileWriter. plugin_name: The plugin we want an asset from. asset_name: The name of the requested asset. Returns: string contents of the plugin asset. Raises: KeyError: if the asset does not exist.
def RetrieveAsset(logdir, plugin_name, asset_name): asset_path = os.path.join(PluginDirectory(logdir, plugin_name), asset_name) try: with tf.io.gfile.GFile(asset_path, "r") as f: return f.read() except tf.errors.NotFoundError: raise KeyError("Asset path %s not found" % asset_path) except tf.errors.OpError as e: raise KeyError("Couldn't read asset path: %s, OpError %s" % (asset_path, e))
57,316
Instantiates DistributionsPlugin via TensorBoard core. Args: context: A base_plugin.TBContext instance.
def __init__(self, context): self._histograms_plugin = histograms_plugin.HistogramsPlugin(context) self._multiplexer = context.multiplexer
57,317
Constructs a new DirectoryWatcher. Args: directory: The directory to load files from. loader_factory: A factory for creating loaders. The factory should take a path and return an object that has a Load method returning an iterator that will yield all events that have not been yielded yet. path_filter: If specified, only paths matching this filter are loaded. Raises: ValueError: If path_provider or loader_factory are None.
def __init__(self, directory, loader_factory, path_filter=lambda x: True): if directory is None: raise ValueError('A directory is required') if loader_factory is None: raise ValueError('A loader factory is required') self._directory = directory self._path = None self._loader_factory = loader_factory self._loader = None self._path_filter = path_filter self._ooo_writes_detected = False # The file size for each file at the time it was finalized. self._finalized_sizes = {}
57,322
Sets the current path to watch for new events. This also records the size of the old path, if any. If the size can't be found, an error is logged. Args: path: The full path of the file to watch.
def _SetPath(self, path): old_path = self._path if old_path and not io_wrapper.IsCloudPath(old_path): try: # We're done with the path, so store its size. size = tf.io.gfile.stat(old_path).length logger.debug('Setting latest size of %s to %d', old_path, size) self._finalized_sizes[old_path] = size except tf.errors.OpError as e: logger.error('Unable to get size of %s: %s', old_path, e) self._path = path self._loader = self._loader_factory(path)
57,326
Send an RPC request to the Servomatic prediction service. Args: examples: A list of examples that matches the model spec. serving_bundle: A `ServingBundle` object that contains the information to make the serving request. Returns: A ClassificationResponse or RegressionResponse proto.
def call_servo(examples, serving_bundle): parsed_url = urlparse('http://' + serving_bundle.inference_address) channel = implementations.insecure_channel(parsed_url.hostname, parsed_url.port) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) if serving_bundle.use_predict: request = predict_pb2.PredictRequest() elif serving_bundle.model_type == 'classification': request = classification_pb2.ClassificationRequest() else: request = regression_pb2.RegressionRequest() request.model_spec.name = serving_bundle.model_name if serving_bundle.model_version is not None: request.model_spec.version.value = serving_bundle.model_version if serving_bundle.signature is not None: request.model_spec.signature_name = serving_bundle.signature if serving_bundle.use_predict: # tf.compat.v1 API used here to convert tf.example into proto. This # utility file is bundled in the witwidget pip package which has a dep # on TensorFlow. request.inputs[serving_bundle.predict_input_tensor].CopyFrom( tf.compat.v1.make_tensor_proto( values=[ex.SerializeToString() for ex in examples], dtype=types_pb2.DT_STRING)) else: request.input.example_list.examples.extend(examples) if serving_bundle.use_predict: return common_utils.convert_predict_response( stub.Predict(request, 30.0), serving_bundle) # 30 secs timeout elif serving_bundle.model_type == 'classification': return stub.Classify(request, 30.0) # 30 secs timeout else: return stub.Regress(request, 30.0)
57,330
Constructs an interactive inference plugin for TensorBoard. Args: context: A base_plugin.TBContext instance.
def __init__(self, context): self._logdir = context.logdir self._has_auth_group = (context.flags and 'authorized_groups' in context.flags and context.flags.authorized_groups is not '')
57,337
Returns JSON of the specified examples. Args: request: A request that should contain 'examples_path' and 'max_examples'. Returns: JSON of up to max_examlpes of the examples in the path.
def _examples_from_path_handler(self, request): examples_count = int(request.args.get('max_examples')) examples_path = request.args.get('examples_path') sampling_odds = float(request.args.get('sampling_odds')) self.example_class = (tf.train.SequenceExample if request.args.get('sequence_examples') == 'true' else tf.train.Example) try: platform_utils.throw_if_file_access_not_allowed(examples_path, self._logdir, self._has_auth_group) example_strings = platform_utils.example_protos_from_path( examples_path, examples_count, parse_examples=False, sampling_odds=sampling_odds, example_class=self.example_class) self.examples = [ self.example_class.FromString(ex) for ex in example_strings] self.generate_sprite(example_strings) json_examples = [ json_format.MessageToJson(example) for example in self.examples ] self.updated_example_indices = set(range(len(json_examples))) return http_util.Respond( request, {'examples': json_examples, 'sprite': True if self.sprite else False}, 'application/json') except common_utils.InvalidUserInputError as e: return http_util.Respond(request, {'error': e.message}, 'application/json', code=400)
57,340
Updates the specified example. Args: request: A request that should contain 'index' and 'example'. Returns: An empty response.
def _update_example(self, request): if request.method != 'POST': return http_util.Respond(request, {'error': 'invalid non-POST request'}, 'application/json', code=405) example_json = request.form['example'] index = int(request.form['index']) if index >= len(self.examples): return http_util.Respond(request, {'error': 'invalid index provided'}, 'application/json', code=400) new_example = self.example_class() json_format.Parse(example_json, new_example) self.examples[index] = new_example self.updated_example_indices.add(index) self.generate_sprite([ex.SerializeToString() for ex in self.examples]) return http_util.Respond(request, {}, 'application/json')
57,341
Duplicates the specified example. Args: request: A request that should contain 'index'. Returns: An empty response.
def _duplicate_example(self, request): index = int(request.args.get('index')) if index >= len(self.examples): return http_util.Respond(request, {'error': 'invalid index provided'}, 'application/json', code=400) new_example = self.example_class() new_example.CopyFrom(self.examples[index]) self.examples.append(new_example) self.updated_example_indices.add(len(self.examples) - 1) self.generate_sprite([ex.SerializeToString() for ex in self.examples]) return http_util.Respond(request, {}, 'application/json')
57,342
Deletes the specified example. Args: request: A request that should contain 'index'. Returns: An empty response.
def _delete_example(self, request): index = int(request.args.get('index')) if index >= len(self.examples): return http_util.Respond(request, {'error': 'invalid index provided'}, 'application/json', code=400) del self.examples[index] self.updated_example_indices = set([ i if i < index else i - 1 for i in self.updated_example_indices]) self.generate_sprite([ex.SerializeToString() for ex in self.examples]) return http_util.Respond(request, {}, 'application/json')
57,343
Parses comma separated request arguments Args: request: A request that should contain 'inference_address', 'model_name', 'model_version', 'model_signature'. Returns: A tuple of lists for model parameters
def _parse_request_arguments(self, request): inference_addresses = request.args.get('inference_address').split(',') model_names = request.args.get('model_name').split(',') model_versions = request.args.get('model_version').split(',') model_signatures = request.args.get('model_signature').split(',') if len(model_names) != len(inference_addresses): raise common_utils.InvalidUserInputError('Every model should have a ' + 'name and address.') return inference_addresses, model_names, model_versions, model_signatures
57,344
Returns JSON for the `vz-line-chart`s for a feature. Args: request: A request that should contain 'inference_address', 'model_name', 'model_type, 'model_version', 'model_signature' and 'label_vocab_path'. Returns: A list of JSON objects, one for each chart.
def _infer(self, request): label_vocab = inference_utils.get_label_vocab( request.args.get('label_vocab_path')) try: if request.method != 'GET': logger.error('%s requests are forbidden.', request.method) return http_util.Respond(request, {'error': 'invalid non-GET request'}, 'application/json', code=405) (inference_addresses, model_names, model_versions, model_signatures) = self._parse_request_arguments(request) indices_to_infer = sorted(self.updated_example_indices) examples_to_infer = [self.examples[index] for index in indices_to_infer] infer_objs = [] for model_num in xrange(len(inference_addresses)): serving_bundle = inference_utils.ServingBundle( inference_addresses[model_num], model_names[model_num], request.args.get('model_type'), model_versions[model_num], model_signatures[model_num], request.args.get('use_predict') == 'true', request.args.get('predict_input_tensor'), request.args.get('predict_output_tensor')) infer_objs.append(inference_utils.run_inference_for_inference_results( examples_to_infer, serving_bundle)) resp = {'indices': indices_to_infer, 'results': infer_objs} self.updated_example_indices = set() return http_util.Respond(request, {'inferences': json.dumps(resp), 'vocab': json.dumps(label_vocab)}, 'application/json') except common_utils.InvalidUserInputError as e: return http_util.Respond(request, {'error': e.message}, 'application/json', code=400) except AbortionError as e: return http_util.Respond(request, {'error': e.details}, 'application/json', code=400)
57,345
Returns a list of JSON objects for each feature in the example. Args: request: A request for features. Returns: A list with a JSON object for each feature. Numeric features are represented as {name: observedMin: observedMax:}. Categorical features are repesented as {name: samples:[]}.
def _eligible_features_from_example_handler(self, request): features_list = inference_utils.get_eligible_features( self.examples[0: NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS) return http_util.Respond(request, features_list, 'application/json')
57,346
Returns JSON for the `vz-line-chart`s for a feature. Args: request: A request that should contain 'feature_name', 'example_index', 'inference_address', 'model_name', 'model_type', 'model_version', and 'model_signature'. Returns: A list of JSON objects, one for each chart.
def _infer_mutants_handler(self, request): try: if request.method != 'GET': logger.error('%s requests are forbidden.', request.method) return http_util.Respond(request, {'error': 'invalid non-GET request'}, 'application/json', code=405) example_index = int(request.args.get('example_index', '0')) feature_name = request.args.get('feature_name') examples = (self.examples if example_index == -1 else [self.examples[example_index]]) (inference_addresses, model_names, model_versions, model_signatures) = self._parse_request_arguments(request) serving_bundles = [] for model_num in xrange(len(inference_addresses)): serving_bundles.append(inference_utils.ServingBundle( inference_addresses[model_num], model_names[model_num], request.args.get('model_type'), model_versions[model_num], model_signatures[model_num], request.args.get('use_predict') == 'true', request.args.get('predict_input_tensor'), request.args.get('predict_output_tensor'))) viz_params = inference_utils.VizParams( request.args.get('x_min'), request.args.get('x_max'), self.examples[0:NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS, request.args.get('feature_index_pattern')) json_mapping = inference_utils.mutant_charts_for_feature( examples, feature_name, serving_bundles, viz_params) return http_util.Respond(request, json_mapping, 'application/json') except common_utils.InvalidUserInputError as e: return http_util.Respond(request, {'error': e.message}, 'application/json', code=400)
57,347
Instantiates CorePlugin. Args: context: A base_plugin.TBContext instance.
def __init__(self, context): self._logdir = context.logdir self._db_uri = context.db_uri self._window_title = context.window_title self._multiplexer = context.multiplexer self._db_connection_provider = context.db_connection_provider self._assets_zip_provider = context.assets_zip_provider
57,349
Returns the plugin, if possible. Args: context: The TBContext flags. Returns: A InteractiveInferencePlugin instance or None if it couldn't be loaded.
def load(self, context): try: # pylint: disable=g-import-not-at-top,unused-import import tensorflow except ImportError: return # pylint: disable=line-too-long,g-import-not-at-top from tensorboard.plugins.interactive_inference.interactive_inference_plugin import InteractiveInferencePlugin return InteractiveInferencePlugin(context)
57,363
Returns a (run,tag) tuple storing the evaluations of the specified metric. Args: session_name: str. metric_name: MetricName protobuffer. Returns: (run, tag) tuple.
def run_tag_from_session_and_metric(session_name, metric_name): assert isinstance(session_name, six.string_types) assert isinstance(metric_name, api_pb2.MetricName) # os.path.join() will append a final slash if the group is empty; it seems # like multiplexer.Tensors won't recognize paths that end with a '/' so # we normalize the result of os.path.join() to remove the final '/' in that # case. run = os.path.normpath(os.path.join(session_name, metric_name.group)) tag = metric_name.tag return run, tag
57,368
Obtains value for scalar event given blob and dtype enum. Args: scalar_data_blob: The blob obtained from the database. dtype_enum: The enum representing the dtype. Returns: The scalar value.
def _get_value(self, scalar_data_blob, dtype_enum): tensorflow_dtype = tf.DType(dtype_enum) buf = np.frombuffer(scalar_data_blob, dtype=tensorflow_dtype.as_numpy_dtype) return np.asscalar(buf)
57,372
Get index of runs and assets for a given plugin. Args: plugin_name: Name of the plugin we are checking for. Returns: A dictionary that maps from run_name to a list of plugin assets for that run.
def PluginAssets(self, plugin_name): with self._accumulators_mutex: # To avoid nested locks, we construct a copy of the run-accumulator map items = list(six.iteritems(self._accumulators)) return {run: accum.PluginAssets(plugin_name) for run, accum in items}
57,378
Return the contents for a specific plugin asset from a run. Args: run: The string name of the run. plugin_name: The string name of a plugin. asset_name: The string name of an asset. Returns: The string contents of the plugin asset. Raises: KeyError: If the asset is not available.
def RetrievePluginAsset(self, run, plugin_name, asset_name): accumulator = self.GetAccumulator(run) return accumulator.RetrievePluginAsset(plugin_name, asset_name)
57,379
Retrieve the scalar events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.ScalarEvents`.
def Scalars(self, run, tag): accumulator = self.GetAccumulator(run) return accumulator.Scalars(tag)
57,380
Get the session.run() metadata associated with a TensorFlow run and tag. Args: run: A string name of a TensorFlow run. tag: A string name of the tag associated with a particular session.run(). Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: The metadata in the form of `RunMetadata` protobuf data structure.
def RunMetadata(self, run, tag): accumulator = self.GetAccumulator(run) return accumulator.RunMetadata(tag)
57,381
Retrieve the audio events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.AudioEvents`.
def Audio(self, run, tag): accumulator = self.GetAccumulator(run) return accumulator.Audio(tag)
57,382
Retrieve the tensor events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.TensorEvent`s.
def Tensors(self, run, tag): accumulator = self.GetAccumulator(run) return accumulator.Tensors(tag)
57,383
Returns a 2-layer dictionary of the form {run: {tag: content}}. The `content` referred above is the content field of the PluginData proto for the specified plugin within a Summary.Value proto. Args: plugin_name: The name of the plugin for which to fetch content. Returns: A dictionary of the form {run: {tag: content}}.
def PluginRunToTagToContent(self, plugin_name): mapping = {} for run in self.Runs(): try: tag_to_content = self.GetAccumulator(run).PluginTagToContent( plugin_name) except KeyError: # This run lacks content for the plugin. Try the next run. continue mapping[run] = tag_to_content return mapping
57,384
Return the summary metadata for the given tag on the given run. Args: run: A string name of the run for which summary metadata is to be retrieved. tag: A string name of the tag whose summary metadata is to be retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: A `SummaryMetadata` protobuf.
def SummaryMetadata(self, run, tag): accumulator = self.GetAccumulator(run) return accumulator.SummaryMetadata(tag)
57,385
Create a text tf.Summary protobuf. Arguments: tag: String tag for the summary. data: A Python bytestring (of type bytes), a Unicode string, or a numpy data array of those types. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty. Raises: TypeError: If the type of the data is unsupported. Returns: A `tf.Summary` protobuf object.
def text_pb(tag, data, description=None): try: tensor = tensor_util.make_tensor_proto(data, dtype=np.object) except TypeError as e: raise TypeError('tensor must be of type string', e) summary_metadata = metadata.create_summary_metadata( display_name=None, description=description) summary = summary_pb2.Summary() summary.value.add(tag=tag, metadata=summary_metadata, tensor=tensor) return summary
57,388
Constructor. Args: request: A ListSessionGroupsRequest protobuf. scalars_plugin_instance: A scalars_plugin.ScalarsPlugin.
def __init__(self, request, scalars_plugin_instance): self._request = request self._scalars_plugin_instance = scalars_plugin_instance
57,399
Instantiates HistogramsPlugin via TensorBoard core. Args: context: A base_plugin.TBContext instance.
def __init__(self, context): self._db_connection_provider = context.db_connection_provider self._multiplexer = context.multiplexer
57,401
Obtains values for histogram data given blob and dtype enum. Args: data_blob: The blob obtained from the database. dtype_enum: The enum representing the dtype. shape_string: A comma-separated string of numbers denoting shape. Returns: The histogram values as a list served to the frontend.
def _get_values(self, data_blob, dtype_enum, shape_string): buf = np.frombuffer(data_blob, dtype=tf.DType(dtype_enum).as_numpy_dtype) return buf.reshape([int(i) for i in shape_string.split(',')]).tolist()
57,404
Instantiates ScalarsPlugin via TensorBoard core. Args: context: A base_plugin.TBContext instance.
def __init__(self, context): self._logdir = context.logdir self._multiplexer = context.multiplexer self._plugin_name_to_instance = context.plugin_name_to_instance
57,409
Provides a response for downloading scalars data for a data series. Args: run: The run. tag: The specific tag. response_format: A string. One of the values of the OutputFormat enum of the scalar plugin. Raises: ValueError: If the scalars plugin is not registered. Returns: 2 entities: - A JSON object response body. - A mime type (string) for the response.
def download_data_impl(self, run, tag, response_format): scalars_plugin_instance = self._get_scalars_plugin() if not scalars_plugin_instance: raise ValueError(('Failed to respond to request for /download_data. ' 'The scalars plugin is oddly not registered.')) body, mime_type = scalars_plugin_instance.scalars_impl( tag, run, None, response_format) return body, mime_type
57,413
Given a tag regex and single run, return ScalarEvents. Args: run: A run string. tag_regex_string: A regular expression that captures portions of tags. Raises: ValueError: if the scalars plugin is not registered. Returns: A dictionary that is the JSON-able response.
def scalars_impl(self, run, tag_regex_string): if not tag_regex_string: # The user provided no regex. return { _REGEX_VALID_PROPERTY: False, _TAG_TO_EVENTS_PROPERTY: {}, } # Construct the regex. try: regex = re.compile(tag_regex_string) except re.error: return { _REGEX_VALID_PROPERTY: False, _TAG_TO_EVENTS_PROPERTY: {}, } # Fetch the tags for the run. Filter for tags that match the regex. run_to_data = self._multiplexer.PluginRunToTagToContent( scalars_metadata.PLUGIN_NAME) tag_to_data = None try: tag_to_data = run_to_data[run] except KeyError: # The run could not be found. Perhaps a configuration specified a run that # TensorBoard has not read from disk yet. payload = {} if tag_to_data: scalars_plugin_instance = self._get_scalars_plugin() if not scalars_plugin_instance: raise ValueError(('Failed to respond to request for /scalars. ' 'The scalars plugin is oddly not registered.')) form = scalars_plugin.OutputFormat.JSON payload = { tag: scalars_plugin_instance.scalars_impl(tag, run, None, form)[0] for tag in tag_to_data.keys() if regex.match(tag) } return { _REGEX_VALID_PROPERTY: True, _TAG_TO_EVENTS_PROPERTY: payload, }
57,415
Given an iterable of string contents, make a table row. Args: contents: An iterable yielding strings. tag: The tag to place contents in. Defaults to 'td', you might want 'th'. Returns: A string containing the content strings, organized into a table row. Example: make_table_row(['one', 'two', 'three']) == ''' <tr> <td>one</td> <td>two</td> <td>three</td> </tr>'''
def make_table_row(contents, tag='td'): columns = ('<%s>%s</%s>\n' % (tag, s, tag) for s in contents) return '<tr>\n' + ''.join(columns) + '</tr>\n'
57,418
Given a np.npdarray with nDims > 2, reduce it to 2d. It does this by selecting the zeroth coordinate for every dimension greater than two. Args: arr: a numpy ndarray of dimension at least 2. Returns: A two-dimensional subarray from the input array. Raises: ValueError: If the argument is not a numpy ndarray, or the dimensionality is too low.
def reduce_to_2d(arr): if not isinstance(arr, np.ndarray): raise ValueError('reduce_to_2d requires a numpy.ndarray') ndims = len(arr.shape) if ndims < 2: raise ValueError('reduce_to_2d requires an array of dimensionality >=2') # slice(None) is equivalent to `:`, so we take arr[0,0,...0,:,:] slices = ([0] * (ndims - 2)) + [slice(None), slice(None)] return arr[slices]
57,420
Instantiates TextPlugin via TensorBoard core. Args: context: A base_plugin.TBContext instance.
def __init__(self, context): self._multiplexer = context.multiplexer # Cache the last result of index_impl() so that methods that depend on it # can return without blocking (while kicking off a background thread to # recompute the current index). self._index_cached = None # Lock that ensures that only one thread attempts to compute index_impl() # at a given time, since it's expensive. self._index_impl_lock = threading.Lock() # Pointer to the current thread computing index_impl(), if any. This is # stored on TextPlugin only to facilitate testing. self._index_impl_thread = None
57,423
Create a `summary_pb2.SummaryMetadata` proto for pr_curves plugin data. Arguments: display_name: The display name used in TensorBoard. description: The description to show in TensorBoard. num_thresholds: The number of thresholds to use for PR curves. Returns: A `summary_pb2.SummaryMetadata` protobuf object.
def create_summary_metadata(display_name, description, num_thresholds): pr_curve_plugin_data = plugin_data_pb2.PrCurvePluginData( version=PROTO_VERSION, num_thresholds=num_thresholds) content = pr_curve_plugin_data.SerializeToString() return summary_pb2.SummaryMetadata( display_name=display_name, summary_description=description, plugin_data=summary_pb2.SummaryMetadata.PluginData( plugin_name=PLUGIN_NAME, content=content))
57,433
Parse summary metadata to a Python object. Arguments: content: The `content` field of a `SummaryMetadata` proto corresponding to the pr_curves plugin. Returns: A `PrCurvesPlugin` protobuf object.
def parse_plugin_metadata(content): if not isinstance(content, bytes): raise TypeError('Content type must be bytes') result = plugin_data_pb2.PrCurvePluginData.FromString(content) if result.version == 0: return result else: logger.warn( 'Unknown metadata version: %s. The latest version known to ' 'this build of TensorBoard is %s; perhaps a newer build is ' 'available?', result.version, PROTO_VERSION) return result
57,434
Return a field to `Observations` dict for the event generator. Args: generator: A generator over event protos. query_for_tag: A string that if specified, only create observations for events with this tag name. Returns: A dict mapping keys in `TRACKED_FIELDS` to an `Observation` list.
def get_field_to_observations_map(generator, query_for_tag=''): def increment(stat, event, tag=''): assert stat in TRACKED_FIELDS field_to_obs[stat].append(Observation(step=event.step, wall_time=event.wall_time, tag=tag)._asdict()) field_to_obs = dict([(t, []) for t in TRACKED_FIELDS]) for event in generator: ## Process the event if event.HasField('graph_def') and (not query_for_tag): increment('graph', event) if event.HasField('session_log') and (not query_for_tag): status = event.session_log.status if status == event_pb2.SessionLog.START: increment('sessionlog:start', event) elif status == event_pb2.SessionLog.STOP: increment('sessionlog:stop', event) elif status == event_pb2.SessionLog.CHECKPOINT: increment('sessionlog:checkpoint', event) elif event.HasField('summary'): for value in event.summary.value: if query_for_tag and value.tag != query_for_tag: continue for proto_name, display_name in SUMMARY_TYPE_TO_FIELD.items(): if value.HasField(proto_name): increment(display_name, event, value.tag) return field_to_obs
57,435
Returns a dictionary of tags that a user could query over. Args: field_to_obs: Dict that maps string field to `Observation` list. Returns: A dict that maps keys in `TAG_FIELDS` to a list of string tags present in the event files. If the dict does not have any observations of the type, maps to an empty list so that we can render this to console.
def get_unique_tags(field_to_obs): return {field: sorted(set([x.get('tag', '') for x in observations])) for field, observations in field_to_obs.items() if field in TAG_FIELDS}
57,436
Prints a shallow dict to console. Args: d: Dict to print. show_missing: Whether to show keys with empty values.
def print_dict(d, show_missing=True): for k, v in sorted(d.items()): if (not v) and show_missing: # No instances of the key, so print missing symbol. print('{} -'.format(k)) elif isinstance(v, list): # Value is a list, so print each item of the list. print(k) for item in v: print(' {}'.format(item)) elif isinstance(v, dict): # Value is a dict, so print each (key, value) pair of the dict. print(k) for kk, vv in sorted(v.items()): print(' {:<20} {}'.format(kk, vv))
57,437
Transform the field-to-obs mapping into a printable dictionary. Args: field_to_obs: Dict that maps string field to `Observation` list. Returns: A dict with the keys and values to print to console.
def get_dict_to_print(field_to_obs): def compressed_steps(steps): return {'num_steps': len(set(steps)), 'min_step': min(steps), 'max_step': max(steps), 'last_step': steps[-1], 'first_step': steps[0], 'outoforder_steps': get_out_of_order(steps)} def full_steps(steps): return {'steps': steps, 'outoforder_steps': get_out_of_order(steps)} output = {} for field, observations in field_to_obs.items(): if not observations: output[field] = None continue steps = [x['step'] for x in observations] if field in SHORT_FIELDS: output[field] = compressed_steps(steps) if field in LONG_FIELDS: output[field] = full_steps(steps) return output
57,438
Returns elements that break the monotonically non-decreasing trend. This is used to find instances of global step values that are "out-of-order", which may trigger TensorBoard event discarding logic. Args: list_of_numbers: A list of numbers. Returns: A list of tuples in which each tuple are two elements are adjacent, but the second element is lower than the first.
def get_out_of_order(list_of_numbers): # TODO: Consider changing this to only check for out-of-order # steps within a particular tag. result = [] # pylint: disable=consider-using-enumerate for i in range(len(list_of_numbers)): if i == 0: continue if list_of_numbers[i] < list_of_numbers[i - 1]: result.append((list_of_numbers[i - 1], list_of_numbers[i])) return result
57,439
Returns a list of event generators for subdirectories with event files. The number of generators returned should equal the number of directories within logdir that contain event files. If only logdir contains event files, returns a list of length one. Args: logdir: A log directory that contains event files. Returns: List of event generators for each subdirectory with event files.
def generators_from_logdir(logdir): subdirs = io_wrapper.GetLogdirSubdirectories(logdir) generators = [ itertools.chain(*[ generator_from_event_file(os.path.join(subdir, f)) for f in tf.io.gfile.listdir(subdir) if io_wrapper.IsTensorFlowEventsFile(os.path.join(subdir, f)) ]) for subdir in subdirs ] return generators
57,440
Returns a list of InspectionUnit objects given either logdir or event_file. If logdir is given, the number of InspectionUnits should equal the number of directories or subdirectories that contain event files. If event_file is given, the number of InspectionUnits should be 1. Args: logdir: A log directory that contains event files. event_file: Or, a particular event file path. tag: An optional tag name to query for. Returns: A list of InspectionUnit objects.
def get_inspection_units(logdir='', event_file='', tag=''): if logdir: subdirs = io_wrapper.GetLogdirSubdirectories(logdir) inspection_units = [] for subdir in subdirs: generator = itertools.chain(*[ generator_from_event_file(os.path.join(subdir, f)) for f in tf.io.gfile.listdir(subdir) if io_wrapper.IsTensorFlowEventsFile(os.path.join(subdir, f)) ]) inspection_units.append(InspectionUnit( name=subdir, generator=generator, field_to_obs=get_field_to_observations_map(generator, tag))) if inspection_units: print('Found event files in:\n{}\n'.format('\n'.join( [u.name for u in inspection_units]))) elif io_wrapper.IsTensorFlowEventsFile(logdir): print( 'It seems that {} may be an event file instead of a logdir. If this ' 'is the case, use --event_file instead of --logdir to pass ' 'it in.'.format(logdir)) else: print('No event files found within logdir {}'.format(logdir)) return inspection_units elif event_file: generator = generator_from_event_file(event_file) return [InspectionUnit( name=event_file, generator=generator, field_to_obs=get_field_to_observations_map(generator, tag))] return []
57,441
Main function for inspector that prints out a digest of event files. Args: logdir: A log directory that contains event files. event_file: Or, a particular event file path. tag: An optional tag name to query for. Raises: ValueError: If neither logdir and event_file are given, or both are given.
def inspect(logdir='', event_file='', tag=''): print(PRINT_SEPARATOR + 'Processing event files... (this can take a few minutes)\n' + PRINT_SEPARATOR) inspection_units = get_inspection_units(logdir, event_file, tag) for unit in inspection_units: if tag: print('Event statistics for tag {} in {}:'.format(tag, unit.name)) else: # If the user is not inspecting a particular tag, also print the list of # all available tags that they can query. print('These tags are in {}:'.format(unit.name)) print_dict(get_unique_tags(unit.field_to_obs)) print(PRINT_SEPARATOR) print('Event statistics for {}:'.format(unit.name)) print_dict(get_dict_to_print(unit.field_to_obs), show_missing=(not tag)) print(PRINT_SEPARATOR)
57,442
Constructor for colab notebook WitWidget. Args: config_builder: WitConfigBuilder object containing settings for WIT. height: Optional height in pixels for WIT to occupy. Defaults to 1000.
def __init__(self, config_builder, height=1000): tf.logging.set_verbosity(tf.logging.WARN) config = config_builder.build() copied_config = dict(config) self.estimator_and_spec = ( dict(config.get('estimator_and_spec')) if 'estimator_and_spec' in config else {}) self.compare_estimator_and_spec = ( dict(config.get('compare_estimator_and_spec')) if 'compare_estimator_and_spec' in config else {}) if 'estimator_and_spec' in copied_config: del copied_config['estimator_and_spec'] if 'compare_estimator_and_spec' in copied_config: del copied_config['compare_estimator_and_spec'] self.custom_predict_fn = ( config.get('custom_predict_fn') if 'custom_predict_fn' in config else None) self.compare_custom_predict_fn = ( config.get('compare_custom_predict_fn') if 'compare_custom_predict_fn' in config else None) if 'custom_predict_fn' in copied_config: del copied_config['custom_predict_fn'] if 'compare_custom_predict_fn' in copied_config: del copied_config['compare_custom_predict_fn'] self._set_examples(config['examples']) del copied_config['examples'] self.config = copied_config # Add this instance to the static instance list. WitWidget.widgets.append(self) # Display WIT Polymer element. display.display(display.HTML(self._get_element_html())) display.display(display.HTML( WIT_HTML.format( examples=json.dumps(self.examples), height=height, id=WitWidget.index))) # Increment the static instance WitWidget index counter WitWidget.index += 1 # Send the provided config and examples to JS output.eval_js(.format( config=json.dumps(self.config))) output.eval_js('updateExamplesCallback()') self._generate_sprite()
57,444
Returns the debugger plugin, if possible. Args: context: The TBContext flags including `add_arguments`. Returns: A DebuggerPlugin instance or None if it couldn't be loaded.
def load(self, context): if not (context.flags.debugger_data_server_grpc_port > 0 or context.flags.debugger_port > 0): return None flags = context.flags try: # pylint: disable=g-import-not-at-top,unused-import import tensorflow except ImportError: raise ImportError( 'To use the debugger plugin, you need to have TensorFlow installed:\n' ' pip install tensorflow') try: # pylint: disable=line-too-long,g-import-not-at-top from tensorboard.plugins.debugger import debugger_plugin as debugger_plugin_lib from tensorboard.plugins.debugger import interactive_debugger_plugin as interactive_debugger_plugin_lib # pylint: enable=line-too-long,g-import-not-at-top except ImportError as e: e_type, e_value, e_traceback = sys.exc_info() message = e.msg if hasattr(e, 'msg') else e.message # Handle py2 vs py3 if 'grpc' in message: e_value = ImportError( message + '\n\nTo use the debugger plugin, you need to have ' 'gRPC installed:\n pip install grpcio') six.reraise(e_type, e_value, e_traceback) if flags.debugger_port > 0: interactive_plugin = ( interactive_debugger_plugin_lib.InteractiveDebuggerPlugin(context)) logger.info('Starting Interactive Debugger Plugin at gRPC port %d', flags.debugger_data_server_grpc_port) interactive_plugin.listen(flags.debugger_port) return interactive_plugin elif flags.debugger_data_server_grpc_port > 0: noninteractive_plugin = debugger_plugin_lib.DebuggerPlugin(context) logger.info('Starting Non-interactive Debugger Plugin at gRPC port %d', flags.debugger_data_server_grpc_port) noninteractive_plugin.listen(flags.debugger_data_server_grpc_port) return noninteractive_plugin raise AssertionError()
57,455
Returns a summary metadata for the HParams plugin. Returns a summary_pb2.SummaryMetadata holding a copy of the given HParamsPluginData message in its plugin_data.content field. Sets the version field of the hparams_plugin_data_pb copy to PLUGIN_DATA_VERSION. Args: hparams_plugin_data_pb: the HParamsPluginData protobuffer to use.
def create_summary_metadata(hparams_plugin_data_pb): if not isinstance(hparams_plugin_data_pb, plugin_data_pb2.HParamsPluginData): raise TypeError('Needed an instance of plugin_data_pb2.HParamsPluginData.' ' Got: %s' % type(hparams_plugin_data_pb)) content = plugin_data_pb2.HParamsPluginData() content.CopyFrom(hparams_plugin_data_pb) content.version = PLUGIN_DATA_VERSION return tf.compat.v1.SummaryMetadata( plugin_data=tf.compat.v1.SummaryMetadata.PluginData( plugin_name=PLUGIN_NAME, content=content.SerializeToString()))
57,456
Returns a data oneof's field from plugin_data.content. Raises HParamsError if the content doesn't have 'data_oneof_field' set or this file is incompatible with the version of the metadata stored. Args: content: The SummaryMetadata.plugin_data.content to use. data_oneof_field: string. The name of the data oneof field to return.
def _parse_plugin_data_as(content, data_oneof_field): plugin_data = plugin_data_pb2.HParamsPluginData.FromString(content) if plugin_data.version != PLUGIN_DATA_VERSION: raise error.HParamsError( 'Only supports plugin_data version: %s; found: %s in: %s' % (PLUGIN_DATA_VERSION, plugin_data.version, plugin_data)) if not plugin_data.HasField(data_oneof_field): raise error.HParamsError( 'Expected plugin_data.%s to be set. Got: %s' % (data_oneof_field, plugin_data)) return getattr(plugin_data, data_oneof_field)
57,457
Writes an event proto to disk. This method is threadsafe with respect to invocations of itself. Args: event: The event proto. Raises: IOError: If writing the event proto to disk fails.
def write_event(self, event): self._lock.acquire() try: self._events_writer.WriteEvent(event) self._event_count += 1 if self._always_flush: # We flush on every event within the integration test. self._events_writer.Flush() if self._event_count == self._check_this_often: # Every so often, we check whether the size of the file is too big. self._event_count = 0 # Flush to get an accurate size check. self._events_writer.Flush() file_path = os.path.join(self._events_directory, self.get_current_file_name()) if not tf.io.gfile.exists(file_path): # The events file does not exist. Perhaps the user had manually # deleted it after training began. Create a new one. self._events_writer.Close() self._events_writer = self._create_events_writer( self._events_directory) elif tf.io.gfile.stat(file_path).length > self._single_file_size_cap_bytes: # The current events file has gotten too big. Close the previous # events writer. Make a new one. self._events_writer.Close() self._events_writer = self._create_events_writer( self._events_directory) except IOError as err: logger.error( "Writing to %s failed: %s", self.get_current_file_name(), err) self._lock.release()
57,459
Creates a new events writer. Args: directory: The directory in which to write files containing events. Returns: A new events writer, which corresponds to a new events file.
def _create_events_writer(self, directory): total_size = 0 events_files = self._fetch_events_files_on_disk() for file_name in events_files: file_path = os.path.join(self._events_directory, file_name) total_size += tf.io.gfile.stat(file_path).length if total_size >= self.total_file_size_cap_bytes: # The total size written to disk is too big. Delete events files until # the size is below the cap. for file_name in events_files: if total_size < self.total_file_size_cap_bytes: break file_path = os.path.join(self._events_directory, file_name) file_size = tf.io.gfile.stat(file_path).length try: tf.io.gfile.remove(file_path) total_size -= file_size logger.info( "Deleted %s because events files take up over %d bytes", file_path, self.total_file_size_cap_bytes) except IOError as err: logger.error("Deleting %s failed: %s", file_path, err) # We increment this index because each events writer must differ in prefix. self._events_file_count += 1 file_path = "%s.%d.%d" % ( os.path.join(directory, DEBUGGER_EVENTS_FILE_STARTING_TEXT), time.time(), self._events_file_count) logger.info("Creating events file %s", file_path) return pywrap_tensorflow.EventsWriter(tf.compat.as_bytes(file_path))
57,461
Format a line of a table. Arguments: headers: A list of strings that are used as the table headers. fields: A list of the same length as `headers` where `fields[i]` is the entry for `headers[i]` in this row. Elements can be of arbitrary types. Pass `headers` to print the header row. Returns: A pretty string.
def _format_line(headers, fields): assert len(fields) == len(headers), (fields, headers) fields = ["%2.4f" % field if isinstance(field, float) else str(field) for field in fields] return ' '.join(' ' * max(0, len(header) - len(field)) + field for (header, field) in zip(headers, fields))
57,466
Extract all nodes with gated-gRPC debug ops attached. Uses cached values if available. This method is thread-safe. Args: graph_def: A tf.GraphDef proto. matching_debug_op: Return tensors and nodes with only matching the specified debug op name (optional). If `None`, will extract only `DebugIdentity` debug ops. Returns: A list of (node_name, op_type, output_slot, debug_op) tuples.
def get_gated_grpc_tensors(self, matching_debug_op=None): with self._grpc_gated_lock: matching_debug_op = matching_debug_op or 'DebugIdentity' if matching_debug_op not in self._grpc_gated_tensors: # First, construct a map from node name to op type. node_name_to_op_type = dict( (node.name, node.op) for node in self._graph_def.node) # Second, populate the output list. gated = [] for node in self._graph_def.node: if node.op == matching_debug_op: for attr_key in node.attr: if attr_key == 'gated_grpc' and node.attr[attr_key].b: node_name, output_slot, _, debug_op = ( debug_graphs.parse_debug_node_name(node.name)) gated.append( (node_name, node_name_to_op_type[node_name], output_slot, debug_op)) break self._grpc_gated_tensors[matching_debug_op] = gated return self._grpc_gated_tensors[matching_debug_op]
57,469
Constructs a `_RunLoader`. Args: subdir: string, filesystem path of the run directory experiment_name: string, name of the run's experiment run_name: string, name of the run
def __init__(self, subdir, experiment_name, run_name): self._subdir = subdir self._experiment_name = experiment_name self._run_name = run_name self._directory_watcher = directory_watcher.DirectoryWatcher( subdir, event_file_loader.RawEventFileLoader, io_wrapper.IsTensorFlowEventsFile)
57,476
Create a TensorFlow op to group data into histogram buckets. Arguments: data: A `Tensor` of any shape. Must be castable to `float64`. bucket_count: Optional positive `int` or scalar `int32` `Tensor`. Returns: A `Tensor` of shape `[k, 3]` and type `float64`. The `i`th row is a triple `[left_edge, right_edge, count]` for a single bucket. The value of `k` is either `bucket_count` or `1` or `0`.
def _buckets(data, bucket_count=None): # TODO(nickfelt): remove on-demand imports once dep situation is fixed. import tensorflow.compat.v1 as tf if bucket_count is None: bucket_count = summary_v2.DEFAULT_BUCKET_COUNT with tf.name_scope('buckets', values=[data, bucket_count]), \ tf.control_dependencies([tf.assert_scalar(bucket_count), tf.assert_type(bucket_count, tf.int32)]): data = tf.reshape(data, shape=[-1]) # flatten data = tf.cast(data, tf.float64) is_empty = tf.equal(tf.size(input=data), 0) def when_empty(): return tf.constant([], shape=(0, 3), dtype=tf.float64) def when_nonempty(): min_ = tf.reduce_min(input_tensor=data) max_ = tf.reduce_max(input_tensor=data) range_ = max_ - min_ is_singular = tf.equal(range_, 0) def when_nonsingular(): bucket_width = range_ / tf.cast(bucket_count, tf.float64) offsets = data - min_ bucket_indices = tf.cast(tf.floor(offsets / bucket_width), dtype=tf.int32) clamped_indices = tf.minimum(bucket_indices, bucket_count - 1) one_hots = tf.one_hot(clamped_indices, depth=bucket_count) bucket_counts = tf.cast(tf.reduce_sum(input_tensor=one_hots, axis=0), dtype=tf.float64) edges = tf.linspace(min_, max_, bucket_count + 1) left_edges = edges[:-1] right_edges = edges[1:] return tf.transpose(a=tf.stack( [left_edges, right_edges, bucket_counts])) def when_singular(): center = min_ bucket_starts = tf.stack([center - 0.5]) bucket_ends = tf.stack([center + 0.5]) bucket_counts = tf.stack([tf.cast(tf.size(input=data), tf.float64)]) return tf.transpose( a=tf.stack([bucket_starts, bucket_ends, bucket_counts])) return tf.cond(is_singular, when_singular, when_nonsingular) return tf.cond(is_empty, when_empty, when_nonempty)
57,482
Query the values at given time indices. Args: time_indices: 0-based time indices to query, as a `list` of `int`. Returns: Values as a list of `numpy.ndarray` (for time indices in memory) or `None` (for time indices discarded).
def query(self, time_indices): if self._disposed: raise ValueError( 'Cannot query: this _WatchStore instance is already disposed') if not isinstance(time_indices, (tuple, list)): time_indices = [time_indices] output = [] for time_index in time_indices: if isinstance(self._data[time_index], _TensorValueDiscarded): output.append(None) else: data_item = self._data[time_index] if (hasattr(data_item, 'dtype') and tensor_helper.translate_dtype(data_item.dtype) == 'string'): _, _, data_item = tensor_helper.array_view(data_item) data_item = np.array( tensor_helper.process_buffers_for_display(data_item), dtype=np.object) output.append(data_item) return output
57,491
Add a tensor value. Args: watch_key: A string representing the debugger tensor watch, e.g., 'Dense_1/BiasAdd:0:DebugIdentity'. tensor_value: The value of the tensor as a numpy.ndarray.
def add(self, watch_key, tensor_value): if watch_key not in self._tensor_data: self._tensor_data[watch_key] = _WatchStore( watch_key, mem_bytes_limit=self._watch_mem_bytes_limit) self._tensor_data[watch_key].add(tensor_value)
57,492
Constructs a debugger plugin for TensorBoard. This plugin adds handlers for retrieving debugger-related data. The plugin also starts a debugger data server once the log directory is passed to the plugin via the call to get_plugin_apps. Args: context: A base_plugin.TBContext instance.
def __init__(self, context): self._event_multiplexer = context.multiplexer self._logdir = context.logdir self._debugger_data_server = None self._grpc_port = None
57,508
Start listening on the given gRPC port. This method of an instance of DebuggerPlugin can be invoked at most once. This method is not thread safe. Args: grpc_port: port number to listen at. Raises: ValueError: If this instance is already listening at a gRPC port.
def listen(self, grpc_port): if self._grpc_port: raise ValueError( "This DebuggerPlugin instance is already listening at gRPC port %d" % self._grpc_port) self._grpc_port = grpc_port sys.stderr.write('Creating DebuggerDataServer at port %d and logdir %s\n' % (self._grpc_port, self._logdir)) sys.stderr.flush() self._debugger_data_server = debugger_server_lib.DebuggerDataServer( self._grpc_port, self._logdir) threading.Thread(target=self._debugger_data_server. start_the_debugger_data_receiving_server).start()
57,509
Obtains the health pills for a run sampled by the event multiplexer. This is much faster than the alternative path of reading health pills from disk. Args: run: The run to fetch health pills for. node_names: A list of node names for which to retrieve health pills. Returns: A dictionary mapping from node name to a list of event_accumulator.HealthPillEvents.
def _obtain_sampled_health_pills(self, run, node_names): runs_to_tags_to_content = self._event_multiplexer.PluginRunToTagToContent( constants.DEBUGGER_PLUGIN_NAME) if run not in runs_to_tags_to_content: # The run lacks health pills. return {} # This is also a mapping between node name and plugin content because this # plugin tags by node name. tags_to_content = runs_to_tags_to_content[run] mapping = {} for node_name in node_names: if node_name not in tags_to_content: # This node lacks health pill data. continue health_pills = [] for tensor_event in self._event_multiplexer.Tensors(run, node_name): json_string = tags_to_content[node_name] try: content_object = json.loads(tf.compat.as_text(json_string)) device_name = content_object['device'] output_slot = content_object['outputSlot'] health_pills.append( self._tensor_proto_to_health_pill(tensor_event, node_name, device_name, output_slot)) except (KeyError, ValueError) as e: logger.error('Could not determine device from JSON string ' '%r: %r', json_string, e) mapping[node_name] = health_pills return mapping
57,512
Converts an event_accumulator.TensorEvent to a HealthPillEvent. Args: tensor_event: The event_accumulator.TensorEvent to convert. node_name: The name of the node (without the output slot). device: The device. output_slot: The integer output slot this health pill is relevant to. Returns: A HealthPillEvent.
def _tensor_proto_to_health_pill(self, tensor_event, node_name, device, output_slot): return self._process_health_pill_value( wall_time=tensor_event.wall_time, step=tensor_event.step, device_name=device, output_slot=output_slot, node_name=node_name, tensor_proto=tensor_event.tensor_proto)
57,513
Convert a `TensorBoardInfo` to string form to be stored on disk. The format returned by this function is opaque and should only be interpreted by `_info_from_string`. Args: info: A valid `TensorBoardInfo` object. Raises: ValueError: If any field on `info` is not of the correct type. Returns: A string representation of the provided `TensorBoardInfo`.
def _info_to_string(info): for key in _TENSORBOARD_INFO_FIELDS: field_type = _TENSORBOARD_INFO_FIELDS[key] if not isinstance(getattr(info, key), field_type.runtime_type): raise ValueError( "expected %r of type %s, but found: %r" % (key, field_type.runtime_type, getattr(info, key)) ) if info.version != version.VERSION: raise ValueError( "expected 'version' to be %r, but found: %r" % (version.VERSION, info.version) ) json_value = { k: _TENSORBOARD_INFO_FIELDS[k].serialize(getattr(info, k)) for k in _TENSORBOARD_INFO_FIELDS } return json.dumps(json_value, sort_keys=True, indent=4)
57,518
Write TensorBoardInfo to the current process's info file. This should be called by `main` once the server is ready. When the server shuts down, `remove_info_file` should be called. Args: tensorboard_info: A valid `TensorBoardInfo` object. Raises: ValueError: If any field on `info` is not of the correct type.
def write_info_file(tensorboard_info): payload = "%s\n" % _info_to_string(tensorboard_info) with open(_get_info_file_path(), "w") as outfile: outfile.write(payload)
57,522
Read the given file, if it exists. Args: filename: A path to a file. Returns: A string containing the file contents, or `None` if the file does not exist.
def _maybe_read_file(filename): try: with open(filename) as infile: return infile.read() except IOError as e: if e.errno == errno.ENOENT: return None
57,527
Constructs a profiler plugin for TensorBoard. This plugin adds handlers for performance-related frontends. Args: context: A base_plugin.TBContext instance.
def __init__(self, context): self.logdir = context.logdir self.multiplexer = context.multiplexer self.plugin_logdir = plugin_asset_util.PluginDirectory( self.logdir, PLUGIN_NAME) self.stub = None self.master_tpu_unsecure_channel = context.flags.master_tpu_unsecure_channel # Whether the plugin is active. This is an expensive computation, so we # compute this asynchronously and cache positive results indefinitely. self._is_active = False # Lock to ensure at most one thread computes _is_active at a time. self._is_active_lock = threading.Lock()
57,531
Retrieves and processes the tool data for a run and a host. Args: request: XMLHttpRequest Returns: A string that can be served to the frontend tool or None if tool, run or host is invalid.
def data_impl(self, request): run = request.args.get('run') tool = request.args.get('tag') host = request.args.get('host') run_dir = self._run_dir(run) # Profile plugin "run" is the last component of run dir. profile_run = os.path.basename(run_dir) if tool not in TOOLS: return None self.start_grpc_stub_if_necessary() if tool == 'trace_viewer@' and self.stub is not None: from tensorflow.contrib.tpu.profiler import tpu_profiler_analysis_pb2 grpc_request = tpu_profiler_analysis_pb2.ProfileSessionDataRequest() grpc_request.repository_root = run_dir grpc_request.session_id = profile_run[:-1] grpc_request.tool_name = 'trace_viewer' # Remove the trailing dot if present grpc_request.host_name = host.rstrip('.') grpc_request.parameters['resolution'] = request.args.get('resolution') if request.args.get('start_time_ms') is not None: grpc_request.parameters['start_time_ms'] = request.args.get( 'start_time_ms') if request.args.get('end_time_ms') is not None: grpc_request.parameters['end_time_ms'] = request.args.get('end_time_ms') grpc_response = self.stub.GetSessionToolData(grpc_request) return grpc_response.output if tool not in TOOLS: return None tool_name = str(host) + TOOLS[tool] asset_path = os.path.join(run_dir, tool_name) raw_data = None try: with tf.io.gfile.GFile(asset_path, 'rb') as f: raw_data = f.read() except tf.errors.NotFoundError: logger.warn('Asset path %s not found', asset_path) except tf.errors.OpError as e: logger.warn("Couldn't read asset path: %s, OpError %s", asset_path, e) if raw_data is None: return None if tool == 'trace_viewer': return process_raw_trace(raw_data) if tool in _RAW_DATA_TOOLS: return raw_data return None
57,540
Run simulations on a reasonable set of parameters. Arguments: logdir: the directory into which to store all the runs' data verbose: if true, print out each run's name as it begins
def run_all(logdir, verbose=False): for initial_temperature in [270.0, 310.0, 350.0]: for final_temperature in [270.0, 310.0, 350.0]: for heat_coefficient in [0.001, 0.005]: run_name = 'temperature:t0=%g,tA=%g,kH=%g' % ( initial_temperature, final_temperature, heat_coefficient) if verbose: print('--- Running: %s' % run_name) run(logdir, run_name, initial_temperature, final_temperature, heat_coefficient)
57,545
Makes Python object appropriate for JSON serialization. - Replaces instances of Infinity/-Infinity/NaN with strings. - Turns byte strings into unicode strings. - Turns sets into sorted lists. - Turns tuples into lists. Args: obj: Python data structure. encoding: Charset used to decode byte strings. Returns: Unicode JSON data structure.
def Cleanse(obj, encoding='utf-8'): if isinstance(obj, int): return obj elif isinstance(obj, float): if obj == _INFINITY: return 'Infinity' elif obj == _NEGATIVE_INFINITY: return '-Infinity' elif math.isnan(obj): return 'NaN' else: return obj elif isinstance(obj, bytes): return tf.compat.as_text(obj, encoding) elif isinstance(obj, (list, tuple)): return [Cleanse(i, encoding) for i in obj] elif isinstance(obj, set): return [Cleanse(i, encoding) for i in sorted(obj)] elif isinstance(obj, dict): return {Cleanse(k, encoding): Cleanse(v, encoding) for k, v in obj.items()} else: return obj
57,547
Convert the string file_version in event.proto into a float. Args: file_version: String file_version from event.proto Returns: Version number as a float.
def _ParseFileVersion(file_version): tokens = file_version.split('brain.Event:') try: return float(tokens[-1]) except ValueError: ## This should never happen according to the definition of file_version ## specified in event.proto. logger.warn( ('Invalid event.proto file_version. Defaulting to use of ' 'out-of-order event.step logic for purging expired events.')) return -1
57,552
Return the contents of a given plugin asset. Args: plugin_name: The string name of a plugin. asset_name: The string name of an asset. Returns: The string contents of the plugin asset. Raises: KeyError: If the asset is not available.
def RetrievePluginAsset(self, plugin_name, asset_name): return plugin_asset_util.RetrieveAsset(self.path, plugin_name, asset_name)
57,555
Returns a dict mapping tags to content specific to that plugin. Args: plugin_name: The name of the plugin for which to fetch plugin-specific content. Raises: KeyError: if the plugin name is not found. Returns: A dict mapping tags to plugin-specific content (which are always strings). Those strings are often serialized protos.
def PluginTagToContent(self, plugin_name): if plugin_name not in self._plugin_to_tag_to_content: raise KeyError('Plugin %r could not be found.' % plugin_name) return self._plugin_to_tag_to_content[plugin_name]
57,557
Given a tag, return the associated session.run() metadata. Args: tag: A string tag associated with the event. Raises: ValueError: If the tag is not found. Returns: The metadata in form of `RunMetadata` proto.
def RunMetadata(self, tag): if tag not in self._tagged_metadata: raise ValueError('There is no run metadata with this tag name') run_metadata = config_pb2.RunMetadata() run_metadata.ParseFromString(self._tagged_metadata[tag]) return run_metadata
57,561
Maybe purge orphaned data due to a TensorFlow crash. When TensorFlow crashes at step T+O and restarts at step T, any events written after step T are now "orphaned" and will be at best misleading if they are included in TensorBoard. This logic attempts to determine if there is orphaned data, and purge it if it is found. Args: event: The event to use as a reference, to determine if a purge is needed.
def _MaybePurgeOrphanedData(self, event): if not self.purge_orphaned_data: return ## Check if the event happened after a crash, and purge expired tags. if self.file_version and self.file_version >= 2: ## If the file_version is recent enough, use the SessionLog enum ## to check for restarts. self._CheckForRestartAndMaybePurge(event) else: ## If there is no file version, default to old logic of checking for ## out of order steps. self._CheckForOutOfOrderStepAndMaybePurge(event)
57,562
Check for out-of-order event.step and discard expired events for tags. Check if the event is out of order relative to the global most recent step. If it is, purge outdated summaries for tags that the event contains. Args: event: The event to use as reference. If the event is out-of-order, all events with the same tags, but with a greater event.step will be purged.
def _CheckForOutOfOrderStepAndMaybePurge(self, event): if event.step < self.most_recent_step and event.HasField('summary'): self._Purge(event, by_tags=True) else: self.most_recent_step = event.step self.most_recent_wall_time = event.wall_time
57,564
Constructor of DebuggerDataStreamHandler. Args: events_writer_manager: Manages writing events to disk. numerics_alert_callback: An optional callback run every time a health pill event with bad values (Nan, -Inf, or +Inf) is received. The callback takes the event as a parameter.
def __init__(self, events_writer_manager, numerics_alert_callback=None): super(DebuggerDataStreamHandler, self).__init__() self._events_writer_manager = events_writer_manager self._numerics_alert_callback = numerics_alert_callback # We use session_run_index as the "step" value for debugger events because # it is unique across all runs. It is not specific to a set of feeds and # fetches. self._session_run_index = -1
57,590
Records the summary values based on an updated message from the debugger. Logs an error message if writing the event to disk fails. Args: event: The Event proto to be processed.
def on_value_event(self, event): if not event.summary.value: logger.warn("The summary of the event lacks a value.") return # The node name property is actually a watch key, which is a concatenation # of several pieces of data. watch_key = event.summary.value[0].node_name if not watch_key.endswith(constants.DEBUG_NUMERIC_SUMMARY_SUFFIX): # Ignore events that lack a DebugNumericSummary. # NOTE(@chihuahua): We may later handle other types of debug ops. return # We remove the constants.DEBUG_NUMERIC_SUMMARY_SUFFIX from the end of the # watch name because it is not distinguishing: every health pill entry ends # with it. node_name_and_output_slot = watch_key[ :-len(constants.DEBUG_NUMERIC_SUMMARY_SUFFIX)] shape = tensor_util.make_ndarray(event.summary.value[0].tensor).shape if (len(shape) != 1 or shape[0] < constants.MIN_DEBUG_NUMERIC_SUMMARY_TENSOR_LENGTH): logger.warn("Health-pill tensor either lacks a dimension or is " "shaped incorrectly: %s" % shape) return match = re.match(r"^(.*):(\d+)$", node_name_and_output_slot) if not match: logger.warn( ("A event with a health pill has an invalid node name and output " "slot combination, (i.e., an unexpected debug op): %r"), node_name_and_output_slot) return if self._session_run_index >= 0: event.step = self._session_run_index else: # Data from parameter servers (or any graphs without a master) do not # contain core metadata. So the session run count is missing. Set its # value to a microsecond epoch timestamp. event.step = int(time.time() * 1e6) # Write this event to the events file designated for data from the # debugger. self._events_writer_manager.write_event(event) alert = numerics_alert.extract_numerics_alert(event) if self._numerics_alert_callback and alert: self._numerics_alert_callback(alert)
57,591
Parses the session_run_index value from the event proto. Args: event: The event with metadata that contains the session_run_index. Returns: The int session_run_index value. Or constants.SENTINEL_FOR_UNDETERMINED_STEP if it could not be determined.
def _parse_session_run_index(self, event): metadata_string = event.log_message.message try: metadata = json.loads(metadata_string) except ValueError as e: logger.error( "Could not decode metadata string '%s' for step value: %s", metadata_string, e) return constants.SENTINEL_FOR_UNDETERMINED_STEP try: return metadata["session_run_index"] except KeyError: logger.error( "The session_run_index is missing from the metadata: %s", metadata_string) return constants.SENTINEL_FOR_UNDETERMINED_STEP
57,592
Receives health pills from a debugger and writes them to disk. Args: receive_port: The port at which to receive health pills from the TensorFlow debugger. logdir: The directory in which to write events files that TensorBoard will read. always_flush: A boolean indicating whether the EventsWriter will be flushed after every write. Can be used for testing.
def __init__(self, receive_port, logdir, always_flush=False): # We create a special directory within logdir to store debugger-related # events (if that directory does not already exist). This is necessary # because for each directory within logdir, TensorBoard only reads through # each events file once. There may be other non-debugger events files being # written to at the same time. Without this special directory, TensorBoard # may stop surfacing health pills after some arbitrary step value. debugger_directory = os.path.join( os.path.expanduser(logdir), constants.DEBUGGER_DATA_DIRECTORY_NAME) if not tf.io.gfile.exists(debugger_directory): try: tf.io.gfile.makedirs(debugger_directory) logger.info("Created directory for debugger data: %s", debugger_directory) except tf.errors.OpError as e: logger.fatal( "Could not make directory for debugger data: %s. Error: %s", debugger_directory, e) self._events_writer_manager = events_writer_manager_lib.EventsWriterManager( events_directory=debugger_directory, always_flush=always_flush) # Write an event with a file version as the first event within the events # file. If the event version is 2, TensorBoard uses a path for purging # events that does not depend on step. This is important because debugger # events use a notion of step that differs from that of the rest of # TensorBoard. try: self._events_writer_manager.write_event( tf.compat.v1.Event( wall_time=0, step=0, file_version=constants.EVENTS_VERSION)) except IOError as e: logger.error( "Writing to %s failed: %s", self._events_writer_manager.get_current_file_name(), e) # See if a backup file exists. If so, use it to initialize the registry. self._registry_backup_file_path = os.path.join( debugger_directory, constants.ALERT_REGISTRY_BACKUP_FILE_NAME) initial_data = None if tf.io.gfile.exists(self._registry_backup_file_path): # A backup file exists. Read its contents to use for initialization. with tf.io.gfile.GFile(self._registry_backup_file_path, "r") as backup_file: try: # Use the data to initialize the registry. initial_data = json.load(backup_file) except ValueError as err: # Could not parse the data. No backup data obtained. logger.error( "Could not parse contents of %s: %s", self._registry_backup_file_path, err) self._numerics_alert_registry = numerics_alert.NumericsAlertRegistry( initialization_list=initial_data) self._numerics_alert_lock = threading.Lock() curried_handler_constructor = functools.partial( DebuggerDataStreamHandler, self._events_writer_manager, self._numerics_alert_callback) grpc_debug_server.EventListenerBaseServicer.__init__( self, receive_port, curried_handler_constructor)
57,593
Given a tag and list of runs, serve a list of metadata for images. Note that the images themselves are not sent; instead, we respond with URLs to the images. The frontend should treat these URLs as opaque and should not try to parse information about them or generate them itself, as the format may change. Args: request: A werkzeug.wrappers.Request object. Returns: A werkzeug.Response application.
def _serve_image_metadata(self, request): tag = request.args.get('tag') run = request.args.get('run') sample = int(request.args.get('sample', 0)) response = self._image_response_for_run(run, tag, sample) return http_util.Respond(request, response, 'application/json')
57,600
Returns the actual image bytes for a given image. Args: run: The name of the run the image belongs to. tag: The name of the tag the images belongs to. index: The index of the image in the current reservoir. sample: The zero-indexed sample of the image to retrieve (for example, setting `sample` to `2` will fetch the third image sample at `step`). Returns: A bytestring of the raw image bytes.
def _get_individual_image(self, run, tag, index, sample): if self._db_connection_provider: db = self._db_connection_provider() cursor = db.execute( , {'run': run, 'tag': tag, 'sample': sample, 'index': index, 'dtype': tf.string.as_datatype_enum}) (data,) = cursor.fetchone() return six.binary_type(data) events = self._filter_by_sample(self._multiplexer.Tensors(run, tag), sample) images = events[index].tensor_proto.string_val[2:] # skip width, height return images[sample]
57,603
Generate a PR curve with precision and recall evenly weighted. Arguments: logdir: The directory into which to store all the runs' data. steps: The number of steps to run for. run_name: The name of the run. thresholds: The number of thresholds to use for PR curves. mask_every_other_prediction: Whether to mask every other prediction by alternating weights between 0 and 1.
def start_runs( logdir, steps, run_name, thresholds, mask_every_other_prediction=False): tf.compat.v1.reset_default_graph() tf.compat.v1.set_random_seed(42) # Create a normal distribution layer used to generate true color labels. distribution = tf.compat.v1.distributions.Normal(loc=0., scale=142.) # Sample the distribution to generate colors. Lets generate different numbers # of each color. The first dimension is the count of examples. # The calls to sample() are given fixed random seed values that are "magic" # in that they correspond to the default seeds for those ops when the PR # curve test (which depends on this code) was written. We've pinned these # instead of continuing to use the defaults since the defaults are based on # node IDs from the sequence of nodes added to the graph, which can silently # change when this code or any TF op implementations it uses are modified. # TODO(nickfelt): redo the PR curve test to avoid reliance on random seeds. # Generate reds. number_of_reds = 100 true_reds = tf.clip_by_value( tf.concat([ 255 - tf.abs(distribution.sample([number_of_reds, 1], seed=11)), tf.abs(distribution.sample([number_of_reds, 2], seed=34)) ], axis=1), 0, 255) # Generate greens. number_of_greens = 200 true_greens = tf.clip_by_value( tf.concat([ tf.abs(distribution.sample([number_of_greens, 1], seed=61)), 255 - tf.abs(distribution.sample([number_of_greens, 1], seed=82)), tf.abs(distribution.sample([number_of_greens, 1], seed=105)) ], axis=1), 0, 255) # Generate blues. number_of_blues = 150 true_blues = tf.clip_by_value( tf.concat([ tf.abs(distribution.sample([number_of_blues, 2], seed=132)), 255 - tf.abs(distribution.sample([number_of_blues, 1], seed=153)) ], axis=1), 0, 255) # Assign each color a vector of 3 booleans based on its true label. labels = tf.concat([ tf.tile(tf.constant([[True, False, False]]), (number_of_reds, 1)), tf.tile(tf.constant([[False, True, False]]), (number_of_greens, 1)), tf.tile(tf.constant([[False, False, True]]), (number_of_blues, 1)), ], axis=0) # We introduce 3 normal distributions. They are used to predict whether a # color falls under a certain class (based on distances from corners of the # color triangle). The distributions vary per color. We have the distributions # narrow over time. initial_standard_deviations = [v + FLAGS.steps for v in (158, 200, 242)] iteration = tf.compat.v1.placeholder(tf.int32, shape=[]) red_predictor = tf.compat.v1.distributions.Normal( loc=0., scale=tf.cast( initial_standard_deviations[0] - iteration, dtype=tf.float32)) green_predictor = tf.compat.v1.distributions.Normal( loc=0., scale=tf.cast( initial_standard_deviations[1] - iteration, dtype=tf.float32)) blue_predictor = tf.compat.v1.distributions.Normal( loc=0., scale=tf.cast( initial_standard_deviations[2] - iteration, dtype=tf.float32)) # Make predictions (assign 3 probabilities to each color based on each color's # distance to each of the 3 corners). We seek double the area in the right # tail of the normal distribution. examples = tf.concat([true_reds, true_greens, true_blues], axis=0) probabilities_colors_are_red = (1 - red_predictor.cdf( tf.norm(tensor=examples - tf.constant([255., 0, 0]), axis=1))) * 2 probabilities_colors_are_green = (1 - green_predictor.cdf( tf.norm(tensor=examples - tf.constant([0, 255., 0]), axis=1))) * 2 probabilities_colors_are_blue = (1 - blue_predictor.cdf( tf.norm(tensor=examples - tf.constant([0, 0, 255.]), axis=1))) * 2 predictions = ( probabilities_colors_are_red, probabilities_colors_are_green, probabilities_colors_are_blue ) # This is the crucial piece. We write data required for generating PR curves. # We create 1 summary per class because we create 1 PR curve per class. for i, color in enumerate(('red', 'green', 'blue')): description = ('The probabilities used to create this PR curve are ' 'generated from a normal distribution. Its standard ' 'deviation is initially %0.0f and decreases over time.' % initial_standard_deviations[i]) weights = None if mask_every_other_prediction: # Assign a weight of 0 to every even-indexed prediction. Odd-indexed # predictions are assigned a default weight of 1. consecutive_indices = tf.reshape( tf.range(tf.size(input=predictions[i])), tf.shape(input=predictions[i])) weights = tf.cast(consecutive_indices % 2, dtype=tf.float32) summary.op( name=color, labels=labels[:, i], predictions=predictions[i], num_thresholds=thresholds, weights=weights, display_name='classifying %s' % color, description=description) merged_summary_op = tf.compat.v1.summary.merge_all() events_directory = os.path.join(logdir, run_name) sess = tf.compat.v1.Session() writer = tf.compat.v1.summary.FileWriter(events_directory, sess.graph) for step in xrange(steps): feed_dict = { iteration: step, } merged_summary = sess.run(merged_summary_op, feed_dict=feed_dict) writer.add_summary(merged_summary, step) writer.close()
57,606
Generate PR curve summaries. Arguments: logdir: The directory into which to store all the runs' data. steps: The number of steps to run for. verbose: Whether to print the names of runs into stdout during execution. thresholds: The number of thresholds to use for PR curves.
def run_all(logdir, steps, thresholds, verbose=False): # First, we generate data for a PR curve that assigns even weights for # predictions of all classes. run_name = 'colors' if verbose: print('--- Running: %s' % run_name) start_runs( logdir=logdir, steps=steps, run_name=run_name, thresholds=thresholds) # Next, we generate data for a PR curve that assigns arbitrary weights to # predictions. run_name = 'mask_every_other_prediction' if verbose: print('--- Running: %s' % run_name) start_runs( logdir=logdir, steps=steps, run_name=run_name, thresholds=thresholds, mask_every_other_prediction=True)
57,607
Constructs the WitConfigBuilder object. Args: examples: A list of tf.Example or tf.SequenceExample proto objects. These are the examples that will be displayed in WIT. If not model to infer these examples with is specified through the methods on this class, then WIT will display the examples for exploration, but no model inference will be performed by the tool.
def __init__(self, examples): self.config = {} self.set_examples(examples) self.set_model_type('classification') self.set_label_vocab([])
57,609
Sets the examples to be displayed in WIT. Args: examples: List of example protos. Returns: self, in order to enabled method chaining.
def set_examples(self, examples): self.store('examples', examples) if len(examples) > 0: self.store('are_sequence_examples', isinstance(examples[0], tf.train.SequenceExample)) return self
57,610
Run simulations on a reasonable set of parameters. Arguments: logdir: the directory into which to store all the runs' data verbose: if true, print out each run's name as it begins.
def run_all(logdir, verbose=False): writer = tf.summary.FileWriter(logdir) writer.add_summary(create_experiment_summary()) writer.close() session_num = 0 num_sessions = (len(TEMPERATURE_LIST)*len(TEMPERATURE_LIST)* len(HEAT_COEFFICIENTS)*2) for initial_temperature in TEMPERATURE_LIST: for ambient_temperature in TEMPERATURE_LIST: for material in HEAT_COEFFICIENTS: hparams = {u'initial_temperature': initial_temperature, u'ambient_temperature': ambient_temperature, u'material': material} hparam_str = str(hparams) group_name = fingerprint(hparam_str) for repeat_idx in xrange(2): session_id = str(session_num) if verbose: print('--- Running training session %d/%d' % (session_num + 1, num_sessions)) print(hparam_str) print('--- repeat #: %d' % (repeat_idx+1)) run(logdir, session_id, hparams, group_name) session_num += 1
57,643
Reads contents of a file to a string. Args: filename: string, a path binary_mode: bool, read as binary if True, otherwise text size: int, number of bytes or characters to read, otherwise read all the contents of the file from the offset offset: int, offset into file to read from, otherwise read from the very beginning Returns: Subset of the contents of the file as a string or bytes.
def read(self, filename, binary_mode=False, size=None, offset=None): mode = "rb" if binary_mode else "r" with io.open(filename, mode) as f: if offset is not None: f.seek(offset) if size is not None: return f.read(size) else: return f.read()
57,647
Reads contents of a file to a string. Args: filename: string, a path binary_mode: bool, read as binary if True, otherwise text size: int, number of bytes or characters to read, otherwise read all the contents of the file from the offset offset: int, offset into file to read from, otherwise read from the very beginning Returns: Subset of the contents of the file as a string or bytes.
def read(self, filename, binary_mode=False, size=None, offset=None): s3 = boto3.resource("s3") bucket, path = self.bucket_and_path(filename) args = {} endpoint = 0 if size is not None or offset is not None: if offset is None: offset = 0 endpoint = '' if size is None else (offset + size) args['Range'] = 'bytes={}-{}'.format(offset, endpoint) try: stream = s3.Object(bucket, path).get(**args)['Body'].read() except botocore.exceptions.ClientError as exc: if exc.response['Error']['Code'] == '416': if size is not None: # Asked for too much, so request just to the end. Do this # in a second request so we don't check length in all cases. client = boto3.client("s3") obj = client.head_object(Bucket=bucket, Key=path) len = obj['ContentLength'] endpoint = min(len, offset + size) if offset == endpoint: # Asked for no bytes, so just return empty stream = b'' else: args['Range'] = 'bytes={}-{}'.format(offset, endpoint) stream = s3.Object(bucket, path).get(**args)['Body'].read() else: raise if binary_mode: return bytes(stream) else: return stream.decode('utf-8')
57,653
Launch and display a TensorBoard instance as if at the command line. Args: args_string: Command-line arguments to TensorBoard, to be interpreted by `shlex.split`: e.g., "--logdir ./logs --port 0". Shell metacharacters are not supported: e.g., "--logdir 2>&1" will point the logdir at the literal directory named "2>&1".
def start(args_string): context = _get_context() try: import IPython import IPython.display except ImportError: IPython = None if context == _CONTEXT_NONE: handle = None print("Launching TensorBoard...") else: handle = IPython.display.display( IPython.display.Pretty("Launching TensorBoard..."), display_id=True, ) def print_or_update(message): if handle is None: print(message) else: handle.update(IPython.display.Pretty(message)) parsed_args = shlex.split(args_string, comments=True, posix=True) start_result = manager.start(parsed_args) if isinstance(start_result, manager.StartLaunched): _display( port=start_result.info.port, print_message=False, display_handle=handle, ) elif isinstance(start_result, manager.StartReused): template = ( "Reusing TensorBoard on port {port} (pid {pid}), started {delta} ago. " "(Use '!kill {pid}' to kill it.)" ) message = template.format( port=start_result.info.port, pid=start_result.info.pid, delta=_time_delta_from_info(start_result.info), ) print_or_update(message) _display( port=start_result.info.port, print_message=False, display_handle=None, ) elif isinstance(start_result, manager.StartFailed): def format_stream(name, value): if value == "": return "" elif value is None: return "\n<could not read %s>" % name else: return "\nContents of %s:\n%s" % (name, value.strip()) message = ( "ERROR: Failed to launch TensorBoard (exited with %d).%s%s" % ( start_result.exit_code, format_stream("stderr", start_result.stderr), format_stream("stdout", start_result.stdout), ) ) print_or_update(message) elif isinstance(start_result, manager.StartTimedOut): message = ( "ERROR: Timed out waiting for TensorBoard to start. " "It may still be running as pid %d." % start_result.pid ) print_or_update(message) else: raise TypeError( "Unexpected result from `manager.start`: %r.\n" "This is a TensorBoard bug; please report it." % start_result )
57,663
Format the elapsed time for the given TensorBoardInfo. Args: info: A TensorBoardInfo value. Returns: A human-readable string describing the time since the server described by `info` started: e.g., "2 days, 0:48:58".
def _time_delta_from_info(info): delta_seconds = int(time.time()) - info.start_time return str(datetime.timedelta(seconds=delta_seconds))
57,664
Display a TensorBoard instance already running on this machine. Args: port: The port on which the TensorBoard server is listening, as an `int`, or `None` to automatically select the most recently launched TensorBoard. height: The height of the frame into which to render the TensorBoard UI, as an `int` number of pixels, or `None` to use a default value (currently 800).
def display(port=None, height=None): _display(port=port, height=height, print_message=True, display_handle=None)
57,665
Internal version of `display`. Args: port: As with `display`. height: As with `display`. print_message: True to print which TensorBoard instance was selected for display (if applicable), or False otherwise. display_handle: If not None, an IPython display handle into which to render TensorBoard.
def _display(port=None, height=None, print_message=False, display_handle=None): if height is None: height = 800 if port is None: infos = manager.get_all() if not infos: raise ValueError("Can't display TensorBoard: no known instances running.") else: info = max(manager.get_all(), key=lambda x: x.start_time) port = info.port else: infos = [i for i in manager.get_all() if i.port == port] info = ( max(infos, key=lambda x: x.start_time) if infos else None ) if print_message: if info is not None: message = ( "Selecting TensorBoard with {data_source} " "(started {delta} ago; port {port}, pid {pid})." ).format( data_source=manager.data_source_from_info(info), delta=_time_delta_from_info(info), port=info.port, pid=info.pid, ) print(message) else: # The user explicitly provided a port, and we don't have any # additional information. There's nothing useful to say. pass fn = { _CONTEXT_COLAB: _display_colab, _CONTEXT_IPYTHON: _display_ipython, _CONTEXT_NONE: _display_cli, }[_get_context()] return fn(port=port, height=height, display_handle=display_handle)
57,666
Check the path name to see if it is probably a TF Events file. Args: path: A file path to check if it is an event file. Raises: ValueError: If the path is an empty string. Returns: If path is formatted like a TensorFlowEventsFile.
def IsTensorFlowEventsFile(path): if not path: raise ValueError('Path must be a nonempty string') return 'tfevents' in tf.compat.as_str_any(os.path.basename(path))
57,671