docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Escapes the glob characters in a path. Python 3 has a glob.escape method, but python 2 lacks it, so we manually implement this method. Args: path: The absolute path to escape. Returns: The escaped path string.
def _EscapeGlobCharacters(path): drive, path = os.path.splitdrive(path) return '%s%s' % (drive, _ESCAPE_GLOB_CHARACTERS_REGEX.sub(r'[\1]', path))
57,673
Obtains all subdirectories with events files. The order of the subdirectories returned is unspecified. The internal logic that determines order varies by scenario. Args: path: The path to a directory under which to find subdirectories. Returns: A tuple of absolute paths of all subdirectories each with at least 1 events file directly within the subdirectory. Raises: ValueError: If the path passed to the method exists and is not a directory.
def GetLogdirSubdirectories(path): if not tf.io.gfile.exists(path): # No directory to traverse. return () if not tf.io.gfile.isdir(path): raise ValueError('GetLogdirSubdirectories: path exists and is not a ' 'directory, %s' % path) if IsCloudPath(path): # Glob-ing for files can be significantly faster than recursively # walking through directories for some file systems. logger.info( 'GetLogdirSubdirectories: Starting to list directories via glob-ing.') traversal_method = ListRecursivelyViaGlobbing else: # For other file systems, the glob-ing based method might be slower because # each call to glob could involve performing a recursive walk. logger.info( 'GetLogdirSubdirectories: Starting to list directories via walking.') traversal_method = ListRecursivelyViaWalking return ( subdir for (subdir, files) in traversal_method(path) if any(IsTensorFlowEventsFile(f) for f in files) )
57,676
Determines whether a health pill event contains bad values. A bad value is one of NaN, -Inf, or +Inf. Args: event: (`Event`) A `tensorflow.Event` proto from `DebugNumericSummary` ops. Returns: An instance of `NumericsAlert`, if bad values are found. `None`, if no bad values are found. Raises: ValueError: if the event does not have the expected tag prefix or the debug op name is not the expected debug op name suffix.
def extract_numerics_alert(event): value = event.summary.value[0] debugger_plugin_metadata_content = None if value.HasField("metadata"): plugin_data = value.metadata.plugin_data if plugin_data.plugin_name == constants.DEBUGGER_PLUGIN_NAME: debugger_plugin_metadata_content = plugin_data.content if not debugger_plugin_metadata_content: raise ValueError("Event proto input lacks debugger plugin SummaryMetadata.") debugger_plugin_metadata_content = tf.compat.as_text( debugger_plugin_metadata_content) try: content_object = json.loads(debugger_plugin_metadata_content) device_name = content_object["device"] except (KeyError, ValueError) as e: raise ValueError("Could not determine device from JSON string %r, %r" % (debugger_plugin_metadata_content, e)) debug_op_suffix = ":DebugNumericSummary" if not value.node_name.endswith(debug_op_suffix): raise ValueError( "Event proto input does not have the expected debug op suffix %s" % debug_op_suffix) tensor_name = value.node_name[:-len(debug_op_suffix)] elements = tf_debug.load_tensor_from_event(event) nan_count = elements[constants.NAN_NUMERIC_SUMMARY_OP_INDEX] neg_inf_count = elements[constants.NEG_INF_NUMERIC_SUMMARY_OP_INDEX] pos_inf_count = elements[constants.POS_INF_NUMERIC_SUMMARY_OP_INDEX] if nan_count > 0 or neg_inf_count > 0 or pos_inf_count > 0: return NumericsAlert( device_name, tensor_name, event.wall_time, nan_count, neg_inf_count, pos_inf_count) return None
57,678
Tracks events for a single category of values. Args: event_count: The initial event count to use. first_timestamp: The timestamp of the first event with this value. last_timestamp: The timestamp of the last event with this category of values.
def __init__(self, event_count=0, first_timestamp=-1, last_timestamp=-1): # When updating the properties of this class, make sure to keep # EventTrackerDescription in sync so that data can be written to and from # disk correctly. self.event_count = event_count self.first_timestamp = first_timestamp self.last_timestamp = last_timestamp
57,679
Stores alert history for a single device, tensor pair. Args: initialization_list: (`list`) An optional list parsed from JSON read from disk. That entity is used to initialize this NumericsAlertHistory. Use the create_jsonable_object method of this class to create such an object.
def __init__(self, initialization_list=None): if initialization_list: # Use data to initialize this NumericsAlertHistory. self._trackers = {} for value_category_key, description_list in initialization_list.items(): description = EventTrackerDescription._make(description_list) self._trackers[value_category_key] = _EventTracker( event_count=description.event_count, first_timestamp=description.first_timestamp, last_timestamp=description.last_timestamp) else: # Start cleanly. With no prior data. self._trackers = { constants.NAN_KEY: _EventTracker(), constants.NEG_INF_KEY: _EventTracker(), constants.POS_INF_KEY: _EventTracker(), }
57,681
Obtain the first timestamp. Args: event_key: the type key of the sought events (e.g., constants.NAN_KEY). If None, includes all event type keys. Returns: First (earliest) timestamp of all the events of the given type (or all event types if event_key is None).
def first_timestamp(self, event_key=None): if event_key is None: timestamps = [self._trackers[key].first_timestamp for key in self._trackers] return min(timestamp for timestamp in timestamps if timestamp >= 0) else: return self._trackers[event_key].first_timestamp
57,683
Obtain the last timestamp. Args: event_key: the type key of the sought events (e.g., constants.NAN_KEY). If None, includes all event type keys. Returns: Last (latest) timestamp of all the events of the given type (or all event types if event_key is None).
def last_timestamp(self, event_key=None): if event_key is None: timestamps = [self._trackers[key].first_timestamp for key in self._trackers] return max(timestamp for timestamp in timestamps if timestamp >= 0) else: return self._trackers[event_key].last_timestamp
57,684
Constructor. Args: capacity: (`int`) maximum number of device-tensor keys to store. initialization_list: (`list`) An optional list (parsed from JSON) that is used to initialize the data within this registry. Use the create_jsonable_registry method of NumericsAlertRegistry to create such a list.
def __init__(self, capacity=100, initialization_list=None): self._capacity = capacity # A map from device-tensor key to a the TensorAlertRecord namedtuple. # The device-tensor key is a 2-tuple of the format (device_name, node_name). # E.g., ("/job:worker/replica:0/task:1/gpu:0", "cross_entropy/Log:0"). self._data = dict() if initialization_list: # Initialize the alert registry using the data passed in. This might be # backup data used to restore the registry after say a borg pre-emption. for entry in initialization_list: triplet = HistoryTriplet._make(entry) self._data[(triplet.device, triplet.tensor)] = NumericsAlertHistory( initialization_list=triplet.jsonable_history)
57,686
Register an alerting numeric event. Args: numerics_alert: An instance of `NumericsAlert`.
def register(self, numerics_alert): key = (numerics_alert.device_name, numerics_alert.tensor_name) if key in self._data: self._data[key].add(numerics_alert) else: if len(self._data) < self._capacity: history = NumericsAlertHistory() history.add(numerics_alert) self._data[key] = history
57,687
Generate waves of the shapes defined above. Arguments: logdir: the directory into which to store all the runs' data verbose: if true, print out each run's name as it begins
def run_all(logdir, verbose=False): waves = [sine_wave, square_wave, triangle_wave, bisine_wave, bisine_wahwah_wave] for (i, wave_constructor) in enumerate(waves): wave_name = wave_constructor.__name__ run_name = 'wave:%02d,%s' % (i + 1, wave_name) if verbose: print('--- Running: %s' % run_name) run(logdir, run_name, wave_name, wave_constructor)
57,695
Returns the plugin, if possible. Args: context: The TBContext flags. Returns: A ProfilePlugin instance or None if it couldn't be loaded.
def load(self, context): try: # pylint: disable=g-import-not-at-top,unused-import import tensorflow # Available in TensorFlow 1.14 or later, so do import check # pylint: disable=g-import-not-at-top,unused-import from tensorflow.python.eager import profiler_client except ImportError: return # pylint: disable=g-import-not-at-top from tensorboard.plugins.profile.profile_plugin import ProfilePlugin return ProfilePlugin(context)
57,709
Create a Keras model with the given hyperparameters. Args: hparams: A dict mapping hyperparameters in `HPARAMS` to values. seed: A hashable object to be used as a random seed (e.g., to construct dropout layers in the model). Returns: A compiled Keras model.
def model_fn(hparams, seed): rng = random.Random(seed) model = tf.keras.models.Sequential() model.add(tf.keras.layers.Input(INPUT_SHAPE)) model.add(tf.keras.layers.Reshape(INPUT_SHAPE + (1,))) # grayscale channel # Add convolutional layers. conv_filters = 8 for _ in xrange(hparams[HP_CONV_LAYERS]): model.add(tf.keras.layers.Conv2D( filters=conv_filters, kernel_size=hparams[HP_CONV_KERNEL_SIZE], padding="same", activation="relu", )) model.add(tf.keras.layers.MaxPool2D(pool_size=2, padding="same")) conv_filters *= 2 model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dropout(hparams[HP_DROPOUT], seed=rng.random())) # Add fully connected layers. dense_neurons = 32 for _ in xrange(hparams[HP_DENSE_LAYERS]): model.add(tf.keras.layers.Dense(dense_neurons, activation="relu")) dense_neurons *= 2 # Add the final output layer. model.add(tf.keras.layers.Dense(OUTPUT_CLASSES, activation="softmax")) model.compile( loss="sparse_categorical_crossentropy", optimizer=hparams[HP_OPTIMIZER], metrics=["accuracy"], ) return model
57,710
Run a training/validation session. Flags must have been parsed for this function to behave. Args: data: The data as loaded by `prepare_data()`. base_logdir: The top-level logdir to which to write summary data. session_id: A unique string ID for this session. group_id: The string ID of the session group that includes this session. hparams: A dict mapping hyperparameters in `HPARAMS` to values.
def run(data, base_logdir, session_id, group_id, hparams): model = model_fn(hparams=hparams, seed=session_id) logdir = os.path.join(base_logdir, session_id) callback = tf.keras.callbacks.TensorBoard( logdir, update_freq=flags.FLAGS.summary_freq, profile_batch=0, # workaround for issue #2084 ) hparams_callback = hp.KerasCallback(logdir, hparams, group_name=group_id) ((x_train, y_train), (x_test, y_test)) = data result = model.fit( x=x_train, y=y_train, epochs=flags.FLAGS.num_epochs, shuffle=False, validation_data=(x_test, y_test), callbacks=[callback, hparams_callback], )
57,711
Perform random search over the hyperparameter space. Arguments: logdir: The top-level directory into which to write data. This directory should be empty or nonexistent. verbose: If true, print out each run's name as it begins.
def run_all(logdir, verbose=False): data = prepare_data() rng = random.Random(0) base_writer = tf.summary.create_file_writer(logdir) with base_writer.as_default(): experiment = hp.Experiment(hparams=HPARAMS, metrics=METRICS) experiment_string = experiment.summary_pb().SerializeToString() tf.summary.experimental.write_raw_pb(experiment_string, step=0) base_writer.flush() base_writer.close() sessions_per_group = 2 num_sessions = flags.FLAGS.num_session_groups * sessions_per_group session_index = 0 # across all session groups for group_index in xrange(flags.FLAGS.num_session_groups): hparams = {h: sample_uniform(h.domain, rng) for h in HPARAMS} hparams_string = str(hparams) group_id = hashlib.sha256(hparams_string.encode("utf-8")).hexdigest() for repeat_index in xrange(sessions_per_group): session_id = str(session_index) session_index += 1 if verbose: print( "--- Running training session %d/%d" % (session_index, num_sessions) ) print(hparams_string) print("--- repeat #: %d" % (repeat_index + 1)) run( data=data, base_logdir=logdir, session_id=session_id, group_id=group_id, hparams=hparams, )
57,713
Sample a value uniformly from a domain. Args: domain: An `IntInterval`, `RealInterval`, or `Discrete` domain. rng: A `random.Random` object; defaults to the `random` module. Raises: TypeError: If `domain` is not a known kind of domain. IndexError: If the domain is empty.
def sample_uniform(domain, rng): if isinstance(domain, hp.IntInterval): return rng.randint(domain.min_value, domain.max_value) elif isinstance(domain, hp.RealInterval): return rng.uniform(domain.min_value, domain.max_value) elif isinstance(domain, hp.Discrete): return rng.choice(domain.values) else: raise TypeError("unknown domain type: %r" % (domain,))
57,714
Creates the JSON object for the PR curves response for a run-tag combo. Arguments: runs: A list of runs to fetch the curves for. tag: The tag to fetch the curves for. Raises: ValueError: If no PR curves could be fetched for a run and tag. Returns: The JSON object for the PR curves route response.
def pr_curves_impl(self, runs, tag): if self._db_connection_provider: # Serve data from the database. db = self._db_connection_provider() # We select for steps greater than -1 because the writer inserts # placeholder rows en masse. The check for step filters out those rows. cursor = db.execute( % ','.join(['?'] * len(runs)), runs + [tag, metadata.PLUGIN_NAME]) response_mapping = {} for (run, step, wall_time, data, dtype, shape, plugin_data) in cursor: if run not in response_mapping: response_mapping[run] = [] buf = np.frombuffer(data, dtype=tf.DType(dtype).as_numpy_dtype) data_array = buf.reshape([int(i) for i in shape.split(',')]) plugin_data_proto = plugin_data_pb2.PrCurvePluginData() string_buffer = np.frombuffer(plugin_data, dtype=np.dtype('b')) plugin_data_proto.ParseFromString(tf.compat.as_bytes( string_buffer.tostring())) thresholds = self._compute_thresholds(plugin_data_proto.num_thresholds) entry = self._make_pr_entry(step, wall_time, data_array, thresholds) response_mapping[run].append(entry) else: # Serve data from events files. response_mapping = {} for run in runs: try: tensor_events = self._multiplexer.Tensors(run, tag) except KeyError: raise ValueError( 'No PR curves could be found for run %r and tag %r' % (run, tag)) content = self._multiplexer.SummaryMetadata( run, tag).plugin_data.content pr_curve_data = metadata.parse_plugin_metadata(content) thresholds = self._compute_thresholds(pr_curve_data.num_thresholds) response_mapping[run] = [ self._process_tensor_event(e, thresholds) for e in tensor_events] return response_mapping
57,717
Converts a TensorEvent into a dict that encapsulates information on it. Args: event: The TensorEvent to convert. thresholds: An array of floats that ranges from 0 to 1 (in that direction and inclusive of 0 and 1). Returns: A JSON-able dictionary of PR curve data for 1 step.
def _process_tensor_event(self, event, thresholds): return self._make_pr_entry( event.step, event.wall_time, tensor_util.make_ndarray(event.tensor_proto), thresholds)
57,721
Creates an entry for PR curve data. Each entry corresponds to 1 step. Args: step: The step. wall_time: The wall time. data_array: A numpy array of PR curve data stored in the summary format. thresholds: An array of floating point thresholds. Returns: A PR curve entry.
def _make_pr_entry(self, step, wall_time, data_array, thresholds): # Trim entries for which TP + FP = 0 (precision is undefined) at the tail of # the data. true_positives = [int(v) for v in data_array[metadata.TRUE_POSITIVES_INDEX]] false_positives = [ int(v) for v in data_array[metadata.FALSE_POSITIVES_INDEX]] tp_index = metadata.TRUE_POSITIVES_INDEX fp_index = metadata.FALSE_POSITIVES_INDEX positives = data_array[[tp_index, fp_index], :].astype(int).sum(axis=0) end_index_inclusive = len(positives) - 1 while end_index_inclusive > 0 and positives[end_index_inclusive] == 0: end_index_inclusive -= 1 end_index = end_index_inclusive + 1 return { 'wall_time': wall_time, 'step': step, 'precision': data_array[metadata.PRECISION_INDEX, :end_index].tolist(), 'recall': data_array[metadata.RECALL_INDEX, :end_index].tolist(), 'true_positives': true_positives[:end_index], 'false_positives': false_positives[:end_index], 'true_negatives': [int(v) for v in data_array[metadata.TRUE_NEGATIVES_INDEX][:end_index]], 'false_negatives': [int(v) for v in data_array[metadata.FALSE_NEGATIVES_INDEX][:end_index]], 'thresholds': thresholds[:end_index], }
57,722
Normalize a dict keyed by `HParam`s and/or raw strings. Args: hparams: A `dict` whose keys are `HParam` objects and/or strings representing hyperparameter names, and whose values are hyperparameter values. No two keys may have the same name. Returns: A `dict` whose keys are hyperparameter names (as strings) and whose values are the corresponding hyperparameter values. Raises: ValueError: If two entries in `hparams` share the same hyperparameter name.
def _normalize_hparams(hparams): result = {} for (k, v) in six.iteritems(hparams): if isinstance(k, HParam): k = k.name if k in result: raise ValueError("multiple values specified for hparam %r" % (k,)) result[k] = v return result
57,733
Create an experiment object. Args: hparams: A list of `HParam` values. metrics: A list of `Metric` values. user: An optional string denoting the user or group that owns this experiment. description: An optional Markdown string describing this experiment. time_created_secs: The time that this experiment was created, as seconds since epoch. Defaults to the current time.
def __init__( self, hparams, metrics, user=None, description=None, time_created_secs=None, ): self._hparams = list(hparams) self._metrics = list(metrics) self._user = user self._description = description if time_created_secs is None: time_created_secs = time.time() self._time_created_secs = time_created_secs
57,734
Create a hyperparameter object. Args: name: A string ID for this hyperparameter, which should be unique within an experiment. domain: An optional `Domain` object describing the values that this hyperparameter can take on. display_name: An optional human-readable display name (`str`). description: An optional Markdown string describing this hyperparameter. Raises: ValueError: If `domain` is not a `Domain`.
def __init__(self, name, domain=None, display_name=None, description=None): self._name = name self._domain = domain self._display_name = display_name self._description = description if not isinstance(self._domain, (Domain, type(None))): raise ValueError("not a domain: %r" % (self._domain,))
57,736
Create a `RealInterval`. Args: min_value: The lower bound (inclusive) of the interval. max_value: The upper bound (inclusive) of the interval. Raises: TypeError: If `min_value` or `max_value` is not an `float`. ValueError: If `min_value > max_value`.
def __init__(self, min_value=None, max_value=None): if not isinstance(min_value, float): raise TypeError("min_value must be a float: %r" % (min_value,)) if not isinstance(max_value, float): raise TypeError("max_value must be a float: %r" % (max_value,)) if min_value > max_value: raise ValueError("%r > %r" % (min_value, max_value)) self._min_value = min_value self._max_value = max_value
57,739
Parses and asserts a positive (>0) integer query parameter. Args: request: The Werkzeug Request object param_name: Name of the parameter. Returns: Param, or None, or -1 if parameter is not a positive integer.
def _parse_positive_int_param(request, param_name): param = request.args.get(param_name) if not param: return None try: param = int(param) if param <= 0: raise ValueError() return param except ValueError: return -1
57,752
Constructs a metadata for an embedding of the specified size. Args: num_points: Number of points in the embedding.
def __init__(self, num_points): self.num_points = num_points self.column_names = [] self.name_to_values = {}
57,758
Adds a named column of metadata values. Args: column_name: Name of the column. column_values: 1D array/list/iterable holding the column values. Must be of length `num_points`. The i-th value corresponds to the i-th point. Raises: ValueError: If `column_values` is not 1D array, or of length `num_points`, or the `name` is already used.
def add_column(self, column_name, column_values): # Sanity checks. if isinstance(column_values, list) and isinstance(column_values[0], list): raise ValueError('"column_values" must be a flat list, but we detected ' 'that its first entry is a list') if isinstance(column_values, np.ndarray) and column_values.ndim != 1: raise ValueError('"column_values" should be of rank 1, ' 'but is of rank %d' % column_values.ndim) if len(column_values) != self.num_points: raise ValueError('"column_values" should be of length %d, but is of ' 'length %d' % (self.num_points, len(column_values))) if column_name in self.name_to_values: raise ValueError('The column name "%s" is already used' % column_name) self.column_names.append(column_name) self.name_to_values[column_name] = column_values
57,759
Instantiates ProjectorPlugin via TensorBoard core. Args: context: A base_plugin.TBContext instance.
def __init__(self, context): self.multiplexer = context.multiplexer self.logdir = context.logdir self._handlers = None self.readers = {} self.run_paths = None self._configs = {} self.old_num_run_paths = None self.config_fpaths = None self.tensor_cache = LRUCache(_TENSOR_CACHE_CAPACITY) # Whether the plugin is active (has meaningful data to process and serve). # Once the plugin is deemed active, we no longer re-compute the value # because doing so is potentially expensive. self._is_active = False # The running thread that is currently determining whether the plugin is # active. If such a thread exists, do not start a duplicate thread. self._thread_for_determining_is_active = None if self.multiplexer: self.run_paths = self.multiplexer.RunPaths()
57,760
Retrieve the histogram events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.HistogramEvents`.
def Histograms(self, run, tag): accumulator = self.GetAccumulator(run) return accumulator.Histograms(tag)
57,778
Retrieve the compressed histogram events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.CompressedHistogramEvents`.
def CompressedHistograms(self, run, tag): accumulator = self.GetAccumulator(run) return accumulator.CompressedHistograms(tag)
57,779
Retrieve the image events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.ImageEvents`.
def Images(self, run, tag): accumulator = self.GetAccumulator(run) return accumulator.Images(tag)
57,780
Write a TensorBoardInfo file and arrange for its cleanup. Args: server: The result of `self._make_server()`.
def _register_info(self, server): server_url = urllib.parse.urlparse(server.get_url()) info = manager.TensorBoardInfo( version=version.VERSION, start_time=int(time.time()), port=server_url.port, pid=os.getpid(), path_prefix=self.flags.path_prefix, logdir=self.flags.logdir, db=self.flags.db, cache_key=self.cache_key, ) atexit.register(manager.remove_info_file) manager.write_info_file(info)
57,795
Set a signal handler to gracefully exit on the given signal. When this process receives the given signal, it will run `atexit` handlers and then exit with `0`. Args: signal_number: The numeric code for the signal to handle, like `signal.SIGTERM`. signal_name: The human-readable signal name.
def _install_signal_handler(self, signal_number, signal_name): old_signal_handler = None # set below def handler(handled_signal_number, frame): # In case we catch this signal again while running atexit # handlers, take the hint and actually die. signal.signal(signal_number, signal.SIG_DFL) sys.stderr.write("TensorBoard caught %s; exiting...\n" % signal_name) # The main thread is the only non-daemon thread, so it suffices to # exit hence. if old_signal_handler not in (signal.SIG_IGN, signal.SIG_DFL): old_signal_handler(handled_signal_number, frame) sys.exit(0) old_signal_handler = signal.signal(signal_number, handler)
57,796
Creates a new `OpError` indicating that a particular op failed. Args: node_def: The `node_def_pb2.NodeDef` proto representing the op that failed, if known; otherwise None. op: The `ops.Operation` that failed, if known; otherwise None. message: The message string describing the failure. error_code: The `error_codes.Code` describing the error.
def __init__(self, node_def, op, message, error_code): super(OpError, self).__init__() self._message = message self._node_def = node_def self._op = op self._error_code = error_code
57,804
Creates temp symlink tree, runs program, and copies back outputs. Args: inputs: List of fake paths to real paths, which are used for symlink tree. program: List containing real path of program and its arguments. The execroot directory will be appended as the last argument. outputs: List of fake outputted paths to copy back to real paths. Returns: 0 if succeeded or nonzero if failed.
def run(inputs, program, outputs): root = tempfile.mkdtemp() try: cwd = os.getcwd() for fake, real in inputs: parent = os.path.join(root, os.path.dirname(fake)) if not os.path.exists(parent): os.makedirs(parent) # Use symlink if possible and not on Windows, since on Windows 10 # symlinks exist but they require administrator privileges to use. if hasattr(os, 'symlink') and not os.name == 'nt': os.symlink(os.path.join(cwd, real), os.path.join(root, fake)) else: shutil.copyfile(os.path.join(cwd, real), os.path.join(root, fake)) if subprocess.call(program + [root]) != 0: return 1 for fake, real in outputs: shutil.copyfile(os.path.join(root, fake), real) return 0 finally: try: shutil.rmtree(root) except EnvironmentError: # Ignore "file in use" errors on Windows; ok since it's just a tmpdir. pass
57,826
Invokes run function using a JSON file config. Args: args: CLI args, which can be a JSON file containing an object whose attributes are the parameters to the run function. If multiple JSON files are passed, their contents are concatenated. Returns: 0 if succeeded or nonzero if failed. Raises: Exception: If input data is missing.
def main(args): if not args: raise Exception('Please specify at least one JSON config path') inputs = [] program = [] outputs = [] for arg in args: with open(arg) as fd: config = json.load(fd) inputs.extend(config.get('inputs', [])) program.extend(config.get('program', [])) outputs.extend(config.get('outputs', [])) if not program: raise Exception('Please specify a program') return run(inputs, program, outputs)
57,827
Initializes the TensorBoard sqlite schema using the given connection. Args: connection: A sqlite DB connection.
def initialize_schema(connection): cursor = connection.cursor() cursor.execute("PRAGMA application_id={}".format(_TENSORBOARD_APPLICATION_ID)) cursor.execute("PRAGMA user_version={}".format(_TENSORBOARD_USER_VERSION)) with connection: for statement in _SCHEMA_STATEMENTS: lines = statement.strip('\n').split('\n') message = lines[0] + ('...' if len(lines) > 1 else '') logger.debug('Running DB init statement: %s', message) cursor.execute(statement)
57,828
Returns the ID for the given experiment, creating the row if needed. Args: experiment_name: name of experiment.
def _maybe_init_experiment(self, experiment_name): user_id = self._maybe_init_user() cursor = self._db.cursor() cursor.execute( , (user_id, experiment_name)) row = cursor.fetchone() if row: return row[0] experiment_id = self._create_id() # TODO: track computed time from run start times computed_time = 0 cursor.execute( , (user_id, experiment_id, experiment_name, time.time(), computed_time, False)) return experiment_id
57,831
Returns the ID for the given run, creating the row if needed. Args: experiment_name: name of experiment containing this run. run_name: name of run.
def _maybe_init_run(self, experiment_name, run_name): experiment_id = self._maybe_init_experiment(experiment_name) cursor = self._db.cursor() cursor.execute( , (experiment_id, run_name)) row = cursor.fetchone() if row: return row[0] run_id = self._create_id() # TODO: track actual run start times started_time = 0 cursor.execute( , (experiment_id, run_id, run_name, time.time(), started_time)) return run_id
57,832
Returns a tag-to-ID map for the given tags, creating rows if needed. Args: run_id: the ID of the run to which these tags belong. tag_to_metadata: map of tag name to SummaryMetadata for the tag.
def _maybe_init_tags(self, run_id, tag_to_metadata): cursor = self._db.cursor() # TODO: for huge numbers of tags (e.g. 1000+), this is slower than just # querying for the known tag names explicitly; find a better tradeoff. cursor.execute('SELECT tag_name, tag_id FROM Tags WHERE run_id = ?', (run_id,)) tag_to_id = {row[0]: row[1] for row in cursor.fetchall() if row[0] in tag_to_metadata} new_tag_data = [] for tag, metadata in six.iteritems(tag_to_metadata): if tag not in tag_to_id: tag_id = self._create_id() tag_to_id[tag] = tag_id new_tag_data.append((run_id, tag_id, tag, time.time(), metadata.display_name, metadata.plugin_data.plugin_name, self._make_blob(metadata.plugin_data.content))) cursor.executemany( , new_tag_data) return tag_to_id
57,833
Transactionally writes the given tagged summary data to the DB. Args: tagged_data: map from tag to TagData instances. experiment_name: name of experiment. run_name: name of run.
def write_summaries(self, tagged_data, experiment_name, run_name): logger.debug('Writing summaries for %s tags', len(tagged_data)) # Connection used as context manager for auto commit/rollback on exit. # We still need an explicit BEGIN, because it doesn't do one on enter, # it waits until the first DML command - which is totally broken. # See: https://stackoverflow.com/a/44448465/1179226 with self._db: self._db.execute('BEGIN TRANSACTION') run_id = self._maybe_init_run(experiment_name, run_name) tag_to_metadata = { tag: tagdata.metadata for tag, tagdata in six.iteritems(tagged_data) } tag_to_id = self._maybe_init_tags(run_id, tag_to_metadata) tensor_values = [] for tag, tagdata in six.iteritems(tagged_data): tag_id = tag_to_id[tag] for step, wall_time, tensor_proto in tagdata.values: dtype = tensor_proto.dtype shape = ','.join(str(d.size) for d in tensor_proto.tensor_shape.dim) # Use tensor_proto.tensor_content if it's set, to skip relatively # expensive extraction into intermediate ndarray. data = self._make_blob( tensor_proto.tensor_content or tensor_util.make_ndarray(tensor_proto).tobytes()) tensor_values.append((tag_id, step, wall_time, dtype, shape, data)) self._db.executemany( , tensor_values)
57,834
Run a box-blur-to-Gaussian-blur demonstration. See the summary description for more details. Arguments: logdir: Directory into which to write event logs. verbose: Boolean; whether to log any output.
def run_box_to_gaussian(logdir, verbose=False): if verbose: logger.info('--- Starting run: box_to_gaussian') tf.compat.v1.reset_default_graph() tf.compat.v1.set_random_seed(0) image = get_image(verbose=verbose) blur_radius = tf.compat.v1.placeholder(shape=(), dtype=tf.int32) with tf.name_scope('filter'): blur_side_length = blur_radius * 2 + 1 pixel_filter = tf.ones((blur_side_length, blur_side_length)) pixel_filter = (pixel_filter / tf.cast(tf.size(input=pixel_filter), tf.float32)) # normalize iterations = 4 images = [tf.cast(image, tf.float32) / 255.0] for _ in xrange(iterations): images.append(convolve(images[-1], pixel_filter)) with tf.name_scope('convert_to_uint8'): images = tf.stack( [tf.cast(255 * tf.clip_by_value(image_, 0.0, 1.0), tf.uint8) for image_ in images]) summ = image_summary.op( 'box_to_gaussian', images, max_outputs=iterations, display_name='Gaussian blur as a limit process of box blurs', description=('Demonstration of forming a Gaussian blur by ' 'composing box blurs, each of which can be expressed ' 'as a 2D convolution.\n\n' 'A Gaussian blur is formed by convolving a Gaussian ' 'kernel over an image. But a Gaussian kernel is ' 'itself the limit of convolving a constant kernel ' 'with itself many times. Thus, while applying ' 'a box-filter convolution just once produces ' 'results that are noticeably different from those ' 'of a Gaussian blur, repeating the same convolution ' 'just a few times causes the result to rapidly ' 'converge to an actual Gaussian blur.\n\n' 'Here, the step value controls the blur radius, ' 'and the image sample controls the number of times ' 'that the convolution is applied (plus one). ' 'So, when *sample*=1, the original image is shown; ' '*sample*=2 shows a box blur; and a hypothetical ' '*sample*=&infin; would show a true Gaussian blur.\n\n' 'This is one ingredient in a recipe to compute very ' 'fast Gaussian blurs. The other pieces require ' 'special treatment for the box blurs themselves ' '(decomposition to dual one-dimensional box blurs, ' 'each of which is computed with a sliding window); ' 'we don&rsquo;t perform those optimizations here.\n\n' '[Here are some slides describing the full process.]' '(%s)\n\n' '%s' % ('http://elynxsdk.free.fr/ext-docs/Blur/Fast_box_blur.pdf', IMAGE_CREDIT))) with tf.compat.v1.Session() as sess: sess.run(image.initializer) writer = tf.summary.FileWriter(os.path.join(logdir, 'box_to_gaussian')) writer.add_graph(sess.graph) for step in xrange(8): if verbose: logger.info('--- box_to_gaussian: step: %s' % step) feed_dict = {blur_radius: step} run_options = tf.compat.v1.RunOptions(trace_level=tf.compat.v1.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() s = sess.run(summ, feed_dict=feed_dict, options=run_options, run_metadata=run_metadata) writer.add_summary(s, global_step=step) writer.add_run_metadata(run_metadata, 'step_%04d' % step) writer.close()
57,838
Run a Sobel edge detection demonstration. See the summary description for more details. Arguments: logdir: Directory into which to write event logs. verbose: Boolean; whether to log any output.
def run_sobel(logdir, verbose=False): if verbose: logger.info('--- Starting run: sobel') tf.compat.v1.reset_default_graph() tf.compat.v1.set_random_seed(0) image = get_image(verbose=verbose) kernel_radius = tf.compat.v1.placeholder(shape=(), dtype=tf.int32) with tf.name_scope('horizontal_kernel'): kernel_side_length = kernel_radius * 2 + 1 # Drop off influence for pixels further away from the center. weighting_kernel = ( 1.0 - tf.abs(tf.linspace(-1.0, 1.0, num=kernel_side_length))) differentiation_kernel = tf.linspace(-1.0, 1.0, num=kernel_side_length) horizontal_kernel = tf.matmul(tf.expand_dims(weighting_kernel, 1), tf.expand_dims(differentiation_kernel, 0)) with tf.name_scope('vertical_kernel'): vertical_kernel = tf.transpose(a=horizontal_kernel) float_image = tf.cast(image, tf.float32) dx = convolve(float_image, horizontal_kernel, name='convolve_dx') dy = convolve(float_image, vertical_kernel, name='convolve_dy') gradient_magnitude = tf.norm(tensor=[dx, dy], axis=0, name='gradient_magnitude') with tf.name_scope('normalized_gradient'): normalized_gradient = gradient_magnitude / tf.reduce_max(input_tensor=gradient_magnitude) with tf.name_scope('output_image'): output_image = tf.cast(255 * normalized_gradient, tf.uint8) summ = image_summary.op( 'sobel', tf.stack([output_image]), display_name='Sobel edge detection', description=(u'Demonstration of [Sobel edge detection]. The step ' 'parameter adjusts the radius of the kernel. ' 'The kernel can be of arbitrary size, and considers ' u'nearby pixels with \u2113\u2082-linear falloff.\n\n' # (that says ``$\ell_2$-linear falloff'') 'Edge detection is done on a per-channel basis, so ' 'you can observe which edges are &ldquo;mostly red ' 'edges,&rdquo; for instance.\n\n' 'For practical edge detection, a small kernel ' '(usually not more than more than *r*=2) is best.\n\n' '[Sobel edge detection]: %s\n\n' "%s" % ('https://en.wikipedia.org/wiki/Sobel_operator', IMAGE_CREDIT))) with tf.compat.v1.Session() as sess: sess.run(image.initializer) writer = tf.summary.FileWriter(os.path.join(logdir, 'sobel')) writer.add_graph(sess.graph) for step in xrange(8): if verbose: logger.info("--- sobel: step: %s" % step) feed_dict = {kernel_radius: step} run_options = tf.compat.v1.RunOptions(trace_level=tf.compat.v1.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() s = sess.run(summ, feed_dict=feed_dict, options=run_options, run_metadata=run_metadata) writer.add_summary(s, global_step=step) writer.add_run_metadata(run_metadata, 'step_%04d' % step) writer.close()
57,839
Run simulations on a reasonable set of parameters. Arguments: logdir: the directory into which to store all the runs' data verbose: if true, print out each run's name as it begins
def run_all(logdir, verbose=False): run_box_to_gaussian(logdir, verbose=verbose) run_sobel(logdir, verbose=verbose)
57,840
Returns an `OriginalFeatureList` for the specified feature_name. Args: example: An example. feature_name: A string feature name. Returns: A filled in `OriginalFeatureList` object representing the feature.
def parse_original_feature_from_example(example, feature_name): feature = get_example_features(example)[feature_name] feature_type = feature.WhichOneof('kind') original_value = proto_value_for_feature(example, feature_name) return OriginalFeatureList(feature_name, original_value, feature_type)
57,842
Returns packaged inference results from the provided proto. Args: inference_result_proto: The classification or regression response proto. Returns: An InferenceResult proto with the result from the response.
def wrap_inference_results(inference_result_proto): inference_proto = inference_pb2.InferenceResult() if isinstance(inference_result_proto, classification_pb2.ClassificationResponse): inference_proto.classification_result.CopyFrom( inference_result_proto.result) elif isinstance(inference_result_proto, regression_pb2.RegressionResponse): inference_proto.regression_result.CopyFrom(inference_result_proto.result) return inference_proto
57,843
Returns a list of feature names for float and int64 type features. Args: example: An example. Returns: A list of strings of the names of numeric features.
def get_numeric_feature_names(example): numeric_features = ('float_list', 'int64_list') features = get_example_features(example) return sorted([ feature_name for feature_name in features if features[feature_name].WhichOneof('kind') in numeric_features ])
57,844
Returns a list of feature names for byte type features. Args: example: An example. Returns: A list of categorical feature names (e.g. ['education', 'marital_status'] )
def get_categorical_feature_names(example): features = get_example_features(example) return sorted([ feature_name for feature_name in features if features[feature_name].WhichOneof('kind') == 'bytes_list' ])
57,845
Returns numerical features and their observed ranges. Args: examples: Examples to read to get ranges. Returns: A dict mapping feature_name -> {'observedMin': 'observedMax': } dicts, with a key for each numerical feature.
def get_numeric_features_to_observed_range(examples): observed_features = collections.defaultdict(list) # name -> [value, ] for example in examples: for feature_name in get_numeric_feature_names(example): original_feature = parse_original_feature_from_example( example, feature_name) observed_features[feature_name].extend(original_feature.original_value) return { feature_name: { 'observedMin': min(feature_values), 'observedMax': max(feature_values), } for feature_name, feature_values in iteritems(observed_features) }
57,846
Return a list of `MutantFeatureValue`s and a list of mutant Examples. Args: example_protos: The examples to mutate. original_feature: A `OriginalFeatureList` that encapsulates the feature to mutate. index_to_mutate: The index of the int64_list or float_list to mutate. viz_params: A `VizParams` object that contains the UI state of the request. Returns: A list of `MutantFeatureValue`s and a list of mutant examples.
def make_mutant_tuples(example_protos, original_feature, index_to_mutate, viz_params): mutant_features = make_mutant_features(original_feature, index_to_mutate, viz_params) mutant_examples = [] for example_proto in example_protos: for mutant_feature in mutant_features: copied_example = copy.deepcopy(example_proto) feature_name = mutant_feature.original_feature.feature_name try: feature_list = proto_value_for_feature(copied_example, feature_name) if index_to_mutate is None: new_values = mutant_feature.mutant_value else: new_values = list(feature_list) new_values[index_to_mutate] = mutant_feature.mutant_value del feature_list[:] feature_list.extend(new_values) mutant_examples.append(copied_example) except (ValueError, IndexError): # If the mutant value can't be set, still add the example to the # mutant_example even though no change was made. This is necessary to # allow for computation of global PD plots when not all examples have # the same number of feature values for a feature. mutant_examples.append(copied_example) return mutant_features, mutant_examples
57,849
Returns a list of JSON objects for each feature in the examples. This list is used to drive partial dependence plots in the plugin. Args: examples: Examples to examine to determine the eligible features. num_mutants: The number of mutations to make over each feature. Returns: A list with a JSON object for each feature. Numeric features are represented as {name: observedMin: observedMax:}. Categorical features are repesented as {name: samples:[]}.
def get_eligible_features(examples, num_mutants): features_dict = ( get_numeric_features_to_observed_range( examples)) features_dict.update( get_categorical_features_to_sampling( examples, num_mutants)) # Massage the features_dict into a sorted list before returning because # Polymer dom-repeat needs a list. features_list = [] for k, v in sorted(features_dict.items()): v['name'] = k features_list.append(v) return features_list
57,854
Returns an encoded sprite image for use in Facets Dive. Args: examples: A list of serialized example protos to get images for. Returns: An encoded PNG.
def create_sprite_image(examples): def generate_image_from_thubnails(thumbnails, thumbnail_dims): num_thumbnails = tf.shape(thumbnails)[0].eval() images_per_row = int(math.ceil(math.sqrt(num_thumbnails))) thumb_height = thumbnail_dims[0] thumb_width = thumbnail_dims[1] master_height = images_per_row * thumb_height master_width = images_per_row * thumb_width num_channels = 3 master = np.zeros([master_height, master_width, num_channels]) for idx, image in enumerate(thumbnails.eval()): left_idx = idx % images_per_row top_idx = int(math.floor(idx / images_per_row)) left_start = left_idx * thumb_width left_end = left_start + thumb_width top_start = top_idx * thumb_height top_end = top_start + thumb_height master[top_start:top_end, left_start:left_end, :] = image return tf.image.encode_png(master) image_feature_name = 'image/encoded' sprite_thumbnail_dim_px = 32 with tf.compat.v1.Session(): keys_to_features = { image_feature_name: tf.FixedLenFeature((), tf.string, default_value=''), } parsed = tf.parse_example(examples, keys_to_features) images = tf.zeros([1, 1, 1, 1], tf.float32) i = tf.constant(0) thumbnail_dims = (sprite_thumbnail_dim_px, sprite_thumbnail_dim_px) num_examples = tf.constant(len(examples)) encoded_images = parsed[image_feature_name] # Loop over all examples, decoding the image feature value, resizing # and appending to a list of all images. def loop_body(i, encoded_images, images): encoded_image = encoded_images[i] image = tf.image.decode_jpeg(encoded_image, channels=3) resized_image = tf.image.resize(image, thumbnail_dims) expanded_image = tf.expand_dims(resized_image, 0) images = tf.cond( tf.equal(i, 0), lambda: expanded_image, lambda: tf.concat([images, expanded_image], 0)) return i + 1, encoded_images, images loop_out = tf.while_loop( lambda i, encoded_images, images: tf.less(i, num_examples), loop_body, [i, encoded_images, images], shape_invariants=[ i.get_shape(), encoded_images.get_shape(), tf.TensorShape(None) ]) # Create the single sprite atlas image from these thumbnails. sprite = generate_image_from_thubnails(loop_out[2], thumbnail_dims) return sprite.eval()
57,856
Run inference on examples given model information Args: examples: A list of examples that matches the model spec. serving_bundle: A `ServingBundle` object that contains the information to make the inference request. Returns: A ClassificationResponse or RegressionResponse proto.
def run_inference(examples, serving_bundle): batch_size = 64 if serving_bundle.estimator and serving_bundle.feature_spec: # If provided an estimator and feature spec then run inference locally. preds = serving_bundle.estimator.predict( lambda: tf.data.Dataset.from_tensor_slices( tf.parse_example([ex.SerializeToString() for ex in examples], serving_bundle.feature_spec)).batch(batch_size)) if serving_bundle.use_predict: preds_key = serving_bundle.predict_output_tensor elif serving_bundle.model_type == 'regression': preds_key = 'predictions' else: preds_key = 'probabilities' values = [] for pred in preds: values.append(pred[preds_key]) return common_utils.convert_prediction_values(values, serving_bundle) elif serving_bundle.custom_predict_fn: # If custom_predict_fn is provided, pass examples directly for local # inference. values = serving_bundle.custom_predict_fn(examples) return common_utils.convert_prediction_values(values, serving_bundle) else: return platform_utils.call_servo(examples, serving_bundle)
57,857
Return items associated with given key. Args: key: The key for which we are finding associated items. Raises: KeyError: If the key is not found in the reservoir. Returns: [list, of, items] associated with that key.
def Items(self, key): with self._mutex: if key not in self._buckets: raise KeyError('Key %s was not found in Reservoir' % key) bucket = self._buckets[key] return bucket.Items()
57,863
Filter items within a Reservoir, using a filtering function. Args: filterFn: A function that returns True for the items to be kept. key: An optional bucket key to filter. If not specified, will filter all all buckets. Returns: The number of items removed.
def FilterItems(self, filterFn, key=None): with self._mutex: if key: if key in self._buckets: return self._buckets[key].FilterItems(filterFn) else: return 0 else: return sum(bucket.FilterItems(filterFn) for bucket in self._buckets.values())
57,865
Create the _ReservoirBucket. Args: _max_size: The maximum size the reservoir bucket may grow to. If size is zero, the bucket has unbounded size. _random: The random number generator to use. If not specified, defaults to random.Random(0). always_keep_last: Whether the latest seen item should always be included in the end of the bucket. Raises: ValueError: if the size is not a nonnegative integer.
def __init__(self, _max_size, _random=None, always_keep_last=True): if _max_size < 0 or _max_size != round(_max_size): raise ValueError('_max_size must be nonnegative int, was %s' % _max_size) self.items = [] # This mutex protects the internal items, ensuring that calls to Items and # AddItem are thread-safe self._mutex = threading.Lock() self._max_size = _max_size self._num_items_seen = 0 if _random is not None: self._random = _random else: self._random = random.Random(0) self.always_keep_last = always_keep_last
57,866
Create a numpy ndarray from a tensor. Create a numpy ndarray with the same shape and data as the tensor. Args: tensor: A TensorProto. Returns: A numpy array with the tensor contents. Raises: TypeError: if tensor has unsupported type.
def make_ndarray(tensor): shape = [d.size for d in tensor.tensor_shape.dim] num_elements = np.prod(shape, dtype=np.int64) tensor_dtype = dtypes.as_dtype(tensor.dtype) dtype = tensor_dtype.as_numpy_dtype if tensor.tensor_content: return np.frombuffer(tensor.tensor_content, dtype=dtype).copy().reshape(shape) elif tensor_dtype == dtypes.float16 or tensor_dtype == dtypes.bfloat16: # the half_val field of the TensorProto stores the binary representation # of the fp16: we need to reinterpret this as a proper float16 if len(tensor.half_val) == 1: tmp = np.array(tensor.half_val[0], dtype=np.uint16) tmp.dtype = tensor_dtype.as_numpy_dtype return np.repeat(tmp, num_elements).reshape(shape) else: tmp = np.fromiter(tensor.half_val, dtype=np.uint16) tmp.dtype = tensor_dtype.as_numpy_dtype return tmp.reshape(shape) elif tensor_dtype == dtypes.float32: if len(tensor.float_val) == 1: return np.repeat( np.array(tensor.float_val[0], dtype=dtype), num_elements ).reshape(shape) else: return np.fromiter(tensor.float_val, dtype=dtype).reshape(shape) elif tensor_dtype == dtypes.float64: if len(tensor.double_val) == 1: return np.repeat( np.array(tensor.double_val[0], dtype=dtype), num_elements ).reshape(shape) else: return np.fromiter(tensor.double_val, dtype=dtype).reshape(shape) elif tensor_dtype in [ dtypes.int32, dtypes.uint8, dtypes.uint16, dtypes.int16, dtypes.int8, dtypes.qint32, dtypes.quint8, dtypes.qint8, dtypes.qint16, dtypes.quint16, ]: if len(tensor.int_val) == 1: return np.repeat( np.array(tensor.int_val[0], dtype=dtype), num_elements ).reshape(shape) else: return np.fromiter(tensor.int_val, dtype=dtype).reshape(shape) elif tensor_dtype == dtypes.int64: if len(tensor.int64_val) == 1: return np.repeat( np.array(tensor.int64_val[0], dtype=dtype), num_elements ).reshape(shape) else: return np.fromiter(tensor.int64_val, dtype=dtype).reshape(shape) elif tensor_dtype == dtypes.string: if len(tensor.string_val) == 1: return np.repeat( np.array(tensor.string_val[0], dtype=dtype), num_elements ).reshape(shape) else: return np.array([x for x in tensor.string_val], dtype=dtype).reshape(shape) elif tensor_dtype == dtypes.complex64: it = iter(tensor.scomplex_val) if len(tensor.scomplex_val) == 2: return np.repeat( np.array( complex(tensor.scomplex_val[0], tensor.scomplex_val[1]), dtype=dtype ), num_elements, ).reshape(shape) else: return np.array( [complex(x[0], x[1]) for x in zip(it, it)], dtype=dtype ).reshape(shape) elif tensor_dtype == dtypes.complex128: it = iter(tensor.dcomplex_val) if len(tensor.dcomplex_val) == 2: return np.repeat( np.array( complex(tensor.dcomplex_val[0], tensor.dcomplex_val[1]), dtype=dtype ), num_elements, ).reshape(shape) else: return np.array( [complex(x[0], x[1]) for x in zip(it, it)], dtype=dtype ).reshape(shape) elif tensor_dtype == dtypes.bool: if len(tensor.bool_val) == 1: return np.repeat( np.array(tensor.bool_val[0], dtype=dtype), num_elements ).reshape(shape) else: return np.fromiter(tensor.bool_val, dtype=dtype).reshape(shape) else: raise TypeError("Unsupported tensor type: %s" % tensor.dtype)
57,896
Creates a summary that contains a layout. When users navigate to the custom scalars dashboard, they will see a layout based on the proto provided to this function. Args: scalars_layout: The scalars_layout_pb2.Layout proto that specifies the layout. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A tensor summary op that writes the layout to disk.
def op(scalars_layout, collections=None): # TODO(nickfelt): remove on-demand imports once dep situation is fixed. import tensorflow.compat.v1 as tf assert isinstance(scalars_layout, layout_pb2.Layout) summary_metadata = metadata.create_summary_metadata() return tf.summary.tensor_summary(name=metadata.CONFIG_SUMMARY_TAG, tensor=tf.constant( scalars_layout.SerializeToString(), dtype=tf.string), collections=collections, summary_metadata=summary_metadata)
57,897
Creates a summary that contains a layout. When users navigate to the custom scalars dashboard, they will see a layout based on the proto provided to this function. Args: scalars_layout: The scalars_layout_pb2.Layout proto that specifies the layout. Returns: A summary proto containing the layout.
def pb(scalars_layout): # TODO(nickfelt): remove on-demand imports once dep situation is fixed. import tensorflow.compat.v1 as tf assert isinstance(scalars_layout, layout_pb2.Layout) tensor = tf.make_tensor_proto( scalars_layout.SerializeToString(), dtype=tf.string) tf_summary_metadata = tf.SummaryMetadata.FromString( metadata.create_summary_metadata().SerializeToString()) summary = tf.Summary() summary.value.add(tag=metadata.CONFIG_SUMMARY_TAG, metadata=tf_summary_metadata, tensor=tensor) return summary
57,898
Returns true if `other` is convertible with this Dimension. Two known Dimensions are convertible if they have the same value. An unknown Dimension is convertible with all other Dimensions. Args: other: Another Dimension. Returns: True if this Dimension and `other` are convertible.
def is_convertible_with(self, other): other = as_dimension(other) return self._value is None or other.value is None or self._value == other.value
57,901
Returns the subtraction of `self` from `other`. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the subtraction of `self` from `other`.
def __rsub__(self, other): other = as_dimension(other) if self._value is None or other.value is None: return Dimension(None) else: return Dimension(other.value - self._value)
57,905
Returns the quotient of `other` and `self` rounded down. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A `Dimension` whose value is the integer quotient of `self` and `other`.
def __rfloordiv__(self, other): other = as_dimension(other) if self._value is None or other.value is None: return Dimension(None) else: return Dimension(other.value // self._value)
57,906
Returns `other` modulo `self`. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is `other` modulo `self`.
def __rmod__(self, other): try: other = as_dimension(other) except (TypeError, ValueError): return NotImplemented return other % self
57,908
Creates a new TensorShape with the given dimensions. Args: dims: A list of Dimensions, or None if the shape is unspecified. DEPRECATED: A single integer is treated as a singleton list. Raises: TypeError: If dims cannot be converted to a list of dimensions.
def __init__(self, dims): # TODO(irving): Eliminate the single integer special case. if dims is None: self._dims = None elif isinstance(dims, compat.bytes_or_text_types): raise TypeError( "A string has ambiguous TensorShape, please wrap in a " "list or convert to an int: %s" % dims ) elif isinstance(dims, tensor_shape_pb2.TensorShapeProto): if dims.unknown_rank: self._dims = None else: self._dims = [ # Protos store variable-size dimensions as -1 as_dimension(dim.size if dim.size != -1 else None) for dim in dims.dim ] elif isinstance(dims, TensorShape): self._dims = dims.dims else: try: dims_iter = iter(dims) except TypeError: # Treat as a singleton dimension self._dims = [as_dimension(dims)] else: # Got a list of dimensions self._dims = [as_dimension(d) for d in dims_iter] self._ndims = None
57,913
Returns a `TensorShape` combining the information in `self` and `other`. The dimensions in `self` and `other` are merged elementwise, according to the rules defined for `Dimension.merge_with()`. Args: other: Another `TensorShape`. Returns: A `TensorShape` containing the combined information of `self` and `other`. Raises: ValueError: If `self` and `other` are not convertible.
def merge_with(self, other): other = as_shape(other) if self._dims is None: return other else: try: self.assert_same_rank(other) new_dims = [] for i, dim in enumerate(self._dims): new_dims.append(dim.merge_with(other[i])) return TensorShape(new_dims) except ValueError: raise ValueError("Shapes %s and %s are not convertible" % (self, other))
57,918
Raises an exception if `self` and `other` do not have convertible ranks. Args: other: Another `TensorShape`. Raises: ValueError: If `self` and `other` do not represent shapes with the same rank.
def assert_same_rank(self, other): other = as_shape(other) if self.ndims is not None and other.ndims is not None: if self.ndims != other.ndims: raise ValueError( "Shapes %s and %s must have the same rank" % (self, other) )
57,920
Returns a shape based on `self` with the given rank. This method promotes a completely unknown shape to one with a known rank. Args: rank: An integer. Returns: A shape that is at least as specific as `self` with the given rank. Raises: ValueError: If `self` does not represent a shape with the given `rank`.
def with_rank(self, rank): try: return self.merge_with(unknown_shape(ndims=rank)) except ValueError: raise ValueError("Shape %s must have rank %d" % (self, rank))
57,921
Returns a shape based on `self` with at least the given rank. Args: rank: An integer. Returns: A shape that is at least as specific as `self` with at least the given rank. Raises: ValueError: If `self` does not represent a shape with at least the given `rank`.
def with_rank_at_least(self, rank): if self.ndims is not None and self.ndims < rank: raise ValueError("Shape %s must have rank at least %d" % (self, rank)) else: return self
57,922
Returns a shape based on `self` with at most the given rank. Args: rank: An integer. Returns: A shape that is at least as specific as `self` with at most the given rank. Raises: ValueError: If `self` does not represent a shape with at most the given `rank`.
def with_rank_at_most(self, rank): if self.ndims is not None and self.ndims > rank: raise ValueError("Shape %s must have rank at most %d" % (self, rank)) else: return self
57,923
Converts a PredictResponse to ClassificationResponse or RegressionResponse. Args: pred: PredictResponse to convert. serving_bundle: A `ServingBundle` object that contains the information about the serving request that the response was generated by. Returns: A ClassificationResponse or RegressionResponse.
def convert_predict_response(pred, serving_bundle): output = pred.outputs[serving_bundle.predict_output_tensor] raw_output = output.float_val if serving_bundle.model_type == 'classification': values = [] for example_index in range(output.tensor_shape.dim[0].size): start = example_index * output.tensor_shape.dim[1].size values.append(raw_output[start:start + output.tensor_shape.dim[1].size]) else: values = raw_output return convert_prediction_values(values, serving_bundle, pred.model_spec)
57,931
Returns a dict mapping tags to content specific to that plugin. Args: plugin_name: The name of the plugin for which to fetch plugin-specific content. Raises: KeyError: if the plugin name is not found. Returns: A dict mapping tags to plugin-specific content (which are always strings). Those strings are often serialized protos.
def PluginTagToContent(self, plugin_name): if plugin_name not in self._plugin_to_tag_to_content: raise KeyError('Plugin %r could not be found.' % plugin_name) with self._plugin_tag_locks[plugin_name]: # Return a snapshot to avoid concurrent mutation and iteration issues. return dict(self._plugin_to_tag_to_content[plugin_name])
57,935
Maybe purge orphaned data due to a TensorFlow crash. When TensorFlow crashes at step T+O and restarts at step T, any events written after step T are now "orphaned" and will be at best misleading if they are included in TensorBoard. This logic attempts to determine if there is orphaned data, and purge it if it is found. Args: event: The event to use as a reference, to determine if a purge is needed.
def _MaybePurgeOrphanedData(self, event): if not self.purge_orphaned_data: return ## Check if the event happened after a crash, and purge expired tags. if self.file_version and self.file_version >= 2: ## If the file_version is recent enough, use the SessionLog enum ## to check for restarts. self._CheckForRestartAndMaybePurge(event) else: ## If there is no file version, default to old logic of checking for ## out of order steps. self._CheckForOutOfOrderStepAndMaybePurge(event) # After checking, update the most recent summary step and wall time. if event.HasField('summary'): self.most_recent_step = event.step self.most_recent_wall_time = event.wall_time
57,938
Check for out-of-order event.step and discard expired events for tags. Check if the event is out of order relative to the global most recent step. If it is, purge outdated summaries for tags that the event contains. Args: event: The event to use as reference. If the event is out-of-order, all events with the same tags, but with a greater event.step will be purged.
def _CheckForOutOfOrderStepAndMaybePurge(self, event): if event.step < self.most_recent_step and event.HasField('summary'): self._Purge(event, by_tags=True)
57,939
Returns the plugin, if possible. Args: context: The TBContext flags. Returns: A BeholderPlugin instance or None if it couldn't be loaded.
def load(self, context): try: # pylint: disable=g-import-not-at-top,unused-import import tensorflow except ImportError: return # pylint: disable=g-import-not-at-top from tensorboard.plugins.beholder.beholder_plugin import BeholderPlugin return BeholderPlugin(context)
57,943
Walks the nested keras layer configuration in preorder. Args: keras_layer: Keras configuration from model.to_json. Yields: A tuple of (name_scope, layer_config). name_scope: a string representing a scope name, similar to that of tf.name_scope. layer_config: a dict representing a Keras layer configuration.
def _walk_layers(keras_layer): yield ('', keras_layer) if keras_layer.get('config').get('layers'): name_scope = keras_layer.get('config').get('name') for layer in keras_layer.get('config').get('layers'): for (sub_name_scope, sublayer) in _walk_layers(layer): sub_name_scope = '%s/%s' % ( name_scope, sub_name_scope) if sub_name_scope else name_scope yield (sub_name_scope, sublayer)
57,944
Returns a GraphDef representation of the Keras model in a dict form. Note that it only supports models that implemented to_json(). Args: keras_layer: A dict from Keras model.to_json(). Returns: A GraphDef representation of the layers in the model.
def keras_model_to_graph_def(keras_layer): input_to_layer = {} model_name_to_output = {} g = GraphDef() # Sequential model layers do not have a field "inbound_nodes" but # instead are defined implicitly via order of layers. prev_node_name = None for (name_scope, layer) in _walk_layers(keras_layer): if _is_model(layer): (input_to_layer, model_name_to_output, prev_node_name) = _update_dicts( name_scope, layer, input_to_layer, model_name_to_output, prev_node_name) continue layer_config = layer.get('config') node_name = _scoped_name(name_scope, layer_config.get('name')) node_def = g.node.add() node_def.name = node_name if layer.get('class_name') is not None: keras_cls_name = layer.get('class_name').encode('ascii') node_def.attr['keras_class'].s = keras_cls_name if layer_config.get('dtype') is not None: tf_dtype = dtypes.as_dtype(layer_config.get('dtype')) node_def.attr['dtype'].type = tf_dtype.as_datatype_enum if layer.get('inbound_nodes') is not None: for maybe_inbound_node in layer.get('inbound_nodes'): inbound_nodes = _norm_to_list_of_layers(maybe_inbound_node) for [name, size, index, _] in inbound_nodes: inbound_name = _scoped_name(name_scope, name) # An input to a layer can be output from a model. In that case, the name # of inbound_nodes to a layer is a name of a model. Remap the name of the # model to output layer of the model. Also, since there can be multiple # outputs in a model, make sure we pick the right output_layer from the model. inbound_node_names = model_name_to_output.get( inbound_name, [inbound_name]) node_def.input.append(inbound_node_names[index]) elif prev_node_name is not None: node_def.input.append(prev_node_name) if node_name in input_to_layer: node_def.input.append(input_to_layer.get(node_name)) prev_node_name = node_def.name return g
57,946
Returns the plugin, if possible. Args: context: The TBContext flags. Returns: A HParamsPlugin instance or None if it couldn't be loaded.
def load(self, context): try: # pylint: disable=g-import-not-at-top,unused-import import tensorflow except ImportError: return # pylint: disable=g-import-not-at-top from tensorboard.plugins.hparams.hparams_plugin import HParamsPlugin return HParamsPlugin(context)
57,947
Convert Markdown to HTML that's safe to splice into the DOM. Arguments: markdown_string: A Unicode string or UTF-8--encoded bytestring containing Markdown source. Markdown tables are supported. Returns: A string containing safe HTML.
def markdown_to_safe_html(markdown_string): warning = '' # Convert to utf-8 whenever we have a binary input. if isinstance(markdown_string, six.binary_type): markdown_string_decoded = markdown_string.decode('utf-8') # Remove null bytes and warn if there were any, since it probably means # we were given a bad encoding. markdown_string = markdown_string_decoded.replace(u'\x00', u'') num_null_bytes = len(markdown_string_decoded) - len(markdown_string) if num_null_bytes: warning = ('<!-- WARNING: discarded %d null bytes in markdown string ' 'after UTF-8 decoding -->\n') % num_null_bytes string_html = markdown.markdown( markdown_string, extensions=['markdown.extensions.tables']) string_sanitized = bleach.clean( string_html, tags=_ALLOWED_TAGS, attributes=_ALLOWED_ATTRIBUTES) return warning + string_sanitized
57,953
Converts the given `type_value` to a `DType`. Args: type_value: A value that can be converted to a `tf.DType` object. This may currently be a `tf.DType` object, a [`DataType` enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto), a string type name, or a `numpy.dtype`. Returns: A `DType` corresponding to `type_value`. Raises: TypeError: If `type_value` cannot be converted to a `DType`.
def as_dtype(type_value): if isinstance(type_value, DType): return type_value try: return _INTERN_TABLE[type_value] except KeyError: pass try: return _STRING_TO_TF[type_value] except KeyError: pass try: return _PYTHON_TO_TF[type_value] except KeyError: pass if isinstance(type_value, np.dtype): # The numpy dtype for strings is variable length. We can not compare # dtype with a single constant (np.string does not exist) to decide # dtype is a "string" type. We need to compare the dtype.type to be # sure it's a string type. if type_value.type == np.string_ or type_value.type == np.unicode_: return string if isinstance(type_value, (type, np.dtype)): for key, val in _NP_TO_TF: try: if key == type_value: return val except TypeError as e: raise TypeError( "Cannot convert {} to a dtype. {}".format(type_value, e) ) raise TypeError("Cannot convert value %r to a TensorFlow DType." % type_value)
57,954
Creates a new `DataType`. NOTE(mrry): In normal circumstances, you should not need to construct a `DataType` object directly. Instead, use the `tf.as_dtype()` function. Args: type_enum: A `types_pb2.DataType` enum value. Raises: TypeError: If `type_enum` is not a value `types_pb2.DataType`.
def __init__(self, type_enum): # TODO(mrry): Make the necessary changes (using __new__) to ensure # that calling this returns one of the interned values. type_enum = int(type_enum) if ( type_enum not in types_pb2.DataType.values() or type_enum == types_pb2.DT_INVALID ): raise TypeError( "type_enum is not a valid types_pb2.DataType: %s" % type_enum ) self._type_enum = type_enum
57,955
Return intensity limits, i.e. (min, max) tuple, of the dtype. Args: clip_negative : bool, optional If True, clip the negative range (i.e. return 0 for min intensity) even if the image dtype allows negative values. Returns min, max : tuple Lower and upper intensity limits.
def limits(self, clip_negative=True): min, max = dtype_range[self.as_numpy_dtype] # pylint: disable=redefined-builtin if clip_negative: min = 0 # pylint: disable=redefined-builtin return min, max
57,960
Constructs a debugger plugin for TensorBoard. This plugin adds handlers for retrieving debugger-related data. The plugin also starts a debugger data server once the log directory is passed to the plugin via the call to get_plugin_apps. Args: context: A base_plugin.TBContext instance.
def __init__(self, context): del context # Unused. self._debugger_data_server = None self._server_thread = None self._grpc_port = None
57,965
Start listening on the given gRPC port. This method of an instance of InteractiveDebuggerPlugin can be invoked at most once. This method is not thread safe. Args: grpc_port: port number to listen at. Raises: ValueError: If this instance is already listening at a gRPC port.
def listen(self, grpc_port): if self._grpc_port: raise ValueError( 'This InteractiveDebuggerPlugin instance is already listening at ' 'gRPC port %d' % self._grpc_port) self._grpc_port = grpc_port sys.stderr.write('Creating InteractiveDebuggerPlugin at port %d\n' % self._grpc_port) sys.stderr.flush() self._debugger_data_server = ( interactive_debugger_server_lib.InteractiveDebuggerDataServer( self._grpc_port)) self._server_thread = threading.Thread( target=self._debugger_data_server.run_server) self._server_thread.start() signal.signal(signal.SIGINT, self.signal_handler)
57,966
Given a tag and list of runs, serve a list of metadata for audio. Note that the actual audio data are not sent; instead, we respond with URLs to the audio. The frontend should treat these URLs as opaque and should not try to parse information about them or generate them itself, as the format may change. Args: request: A werkzeug.wrappers.Request object. Returns: A werkzeug.Response application.
def _serve_audio_metadata(self, request): tag = request.args.get('tag') run = request.args.get('run') sample = int(request.args.get('sample', 0)) events = self._multiplexer.Tensors(run, tag) response = self._audio_response_for_run(events, run, tag, sample) return http_util.Respond(request, response, 'application/json')
57,986
Writes __main__'s docstring to stdout with some help text. Args: shorthelp: bool, if True, prints only flags from the main module, rather than all flags.
def _usage(shorthelp): doc = _sys.modules['__main__'].__doc__ if not doc: doc = '\nUSAGE: %s [flags]\n' % _sys.argv[0] doc = flags.text_wrap(doc, indent=' ', firstline_indent='') else: # Replace all '%s' with sys.argv[0], and all '%%' with '%'. num_specifiers = doc.count('%') - 2 * doc.count('%%') try: doc %= (_sys.argv[0],) * num_specifiers except (OverflowError, TypeError, ValueError): # Just display the docstring as-is. pass if shorthelp: flag_str = flags.FLAGS.main_module_help() else: flag_str = str(flags.FLAGS) try: _sys.stdout.write(doc) if flag_str: _sys.stdout.write('\nflags:\n') _sys.stdout.write(flag_str) _sys.stdout.write('\n') except IOError as e: # We avoid printing a huge backtrace if we get EPIPE, because # "foo.par --help | less" is a frequent use case. if e.errno != _errno.EPIPE: raise
57,996
Construct a TensorBoardWSGIApp with standard plugins and multiplexer. Args: flags: An argparse.Namespace containing TensorBoard CLI flags. plugin_loaders: A list of TBLoader instances. assets_zip_provider: See TBContext documentation for more information. Returns: The new TensorBoard WSGI application. :type plugin_loaders: list[base_plugin.TBLoader] :rtype: TensorBoardWSGI
def standard_tensorboard_wsgi(flags, plugin_loaders, assets_zip_provider): multiplexer = event_multiplexer.EventMultiplexer( size_guidance=DEFAULT_SIZE_GUIDANCE, tensor_size_guidance=tensor_size_guidance_from_flags(flags), purge_orphaned_data=flags.purge_orphaned_data, max_reload_threads=flags.max_reload_threads) loading_multiplexer = multiplexer reload_interval = flags.reload_interval # For db import op mode, prefer reloading in a child process. See # https://github.com/tensorflow/tensorboard/issues/1467 reload_task = flags.reload_task if reload_task == 'auto' and flags.db_import and flags.db_import_use_op: reload_task == 'process' db_uri = flags.db # For DB import mode, create a DB file if we weren't given one. if flags.db_import and not flags.db: tmpdir = tempfile.mkdtemp(prefix='tbimport') atexit.register(shutil.rmtree, tmpdir) db_uri = 'sqlite:%s/tmp.sqlite' % tmpdir db_module, db_connection_provider = get_database_info(db_uri) if flags.db_import: # DB import mode. if db_module != sqlite3: raise base_plugin.FlagsError('--db_import is only compatible with sqlite DBs') logger.info('Importing logdir into DB at %s', db_uri) loading_multiplexer = db_import_multiplexer.DbImportMultiplexer( db_connection_provider=db_connection_provider, purge_orphaned_data=flags.purge_orphaned_data, max_reload_threads=flags.max_reload_threads, use_import_op=flags.db_import_use_op) elif flags.db: # DB read-only mode, never load event logs. reload_interval = -1 plugin_name_to_instance = {} context = base_plugin.TBContext( db_module=db_module, db_connection_provider=db_connection_provider, db_uri=db_uri, flags=flags, logdir=flags.logdir, multiplexer=multiplexer, assets_zip_provider=assets_zip_provider, plugin_name_to_instance=plugin_name_to_instance, window_title=flags.window_title) plugins = [] for loader in plugin_loaders: plugin = loader.load(context) if plugin is None: continue plugins.append(plugin) plugin_name_to_instance[plugin.plugin_name] = plugin return TensorBoardWSGIApp(flags.logdir, plugins, loading_multiplexer, reload_interval, flags.path_prefix, reload_task)
58,004
Returns TBContext fields relating to SQL database. Args: db_uri: A string URI expressing the DB file, e.g. "sqlite:~/tb.db". Returns: A tuple with the db_module and db_connection_provider TBContext fields. If db_uri was empty, then (None, None) is returned. Raises: ValueError: If db_uri scheme is not supported.
def get_database_info(db_uri): if not db_uri: return None, None scheme = urlparse.urlparse(db_uri).scheme if scheme == 'sqlite': return sqlite3, create_sqlite_connection_provider(db_uri) else: raise ValueError('Only sqlite DB URIs are supported now: ' + db_uri)
58,008
Returns function that returns SQLite Connection objects. Args: db_uri: A string URI expressing the DB file, e.g. "sqlite:~/tb.db". Returns: A function that returns a new PEP-249 DB Connection, which must be closed, each time it is called. Raises: ValueError: If db_uri is not a valid sqlite file URI.
def create_sqlite_connection_provider(db_uri): uri = urlparse.urlparse(db_uri) if uri.scheme != 'sqlite': raise ValueError('Scheme is not sqlite: ' + db_uri) if uri.netloc: raise ValueError('Can not connect to SQLite over network: ' + db_uri) if uri.path == ':memory:': raise ValueError('Memory mode SQLite not supported: ' + db_uri) path = os.path.expanduser(uri.path) params = _get_connect_params(uri.query) # TODO(@jart): Add thread-local pooling. return lambda: sqlite3.connect(path, **params)
58,009
Serves an object mapping plugin name to whether it is enabled. Args: request: The werkzeug.Request object. Returns: A werkzeug.Response object.
def _serve_plugins_listing(self, request): response = {} for plugin in self._plugins: start = time.time() response[plugin.plugin_name] = plugin.is_active() elapsed = time.time() - start logger.info( 'Plugin listing: is_active() for %s took %0.3f seconds', plugin.plugin_name, elapsed) return http_util.Respond(request, response, 'application/json')
58,012
Central entry point for the TensorBoard application. This method handles routing to sub-applications. It does simple routing using regular expression matching. This __call__ method conforms to the WSGI spec, so that instances of this class are WSGI applications. Args: environ: See WSGI spec. start_response: See WSGI spec. Returns: A werkzeug Response.
def __call__(self, environ, start_response): # pylint: disable=invalid-name request = wrappers.Request(environ) parsed_url = urlparse.urlparse(request.path) clean_path = _clean_path(parsed_url.path, self._path_prefix) # pylint: disable=too-many-function-args if clean_path in self.data_applications: return self.data_applications[clean_path](environ, start_response) else: logger.warn('path %s not found, sending 404', clean_path) return http_util.Respond(request, 'Not found', 'text/plain', code=404)( environ, start_response)
58,013
Parse a string as time indices. Args: s: A valid slicing string for time indices. E.g., '-1', '[:]', ':', '2:10' Returns: A slice object. Raises: ValueError: If `s` does not represent valid time indices.
def parse_time_indices(s): if not s.startswith('['): s = '[' + s + ']' parsed = command_parser._parse_slices(s) if len(parsed) != 1: raise ValueError( 'Invalid number of slicing objects in time indices (%d)' % len(parsed)) else: return parsed[0]
58,014
Convert an array into base64-enoded PNG image. Args: array: A 2D np.ndarray or nested list of items. Returns: A base64-encoded string the image. The image is grayscale if the array is 2D. The image is RGB color if the image is 3D with lsat dimension equal to 3. Raises: ValueError: If the input `array` is not rank-2, or if the rank-2 `array` is empty.
def array_to_base64_png(array): # TODO(cais): Deal with 3D case. # TODO(cais): If there are None values in here, replace them with all NaNs. array = np.array(array, dtype=np.float32) if len(array.shape) != 2: raise ValueError( "Expected rank-2 array; received rank-%d array." % len(array.shape)) if not np.size(array): raise ValueError( "Cannot encode an empty array (size: %s) as image." % (array.shape,)) is_infinity = np.isinf(array) is_positive = array > 0.0 is_positive_infinity = np.logical_and(is_infinity, is_positive) is_negative_infinity = np.logical_and(is_infinity, np.logical_not(is_positive)) is_nan = np.isnan(array) finite_indices = np.where(np.logical_and(np.logical_not(is_infinity), np.logical_not(is_nan))) if np.size(finite_indices): # Finite subset is not empty. minval = np.min(array[finite_indices]) maxval = np.max(array[finite_indices]) scaled = np.array((array - minval) / (maxval - minval) * 255, dtype=np.uint8) rgb = np.repeat(np.expand_dims(scaled, -1), IMAGE_COLOR_CHANNELS, axis=-1) else: rgb = np.zeros(array.shape + (IMAGE_COLOR_CHANNELS,), dtype=np.uint8) # Color-code pixels that correspond to infinities and nans. rgb[is_positive_infinity] = POSITIVE_INFINITY_RGB rgb[is_negative_infinity] = NEGATIVE_INFINITY_RGB rgb[is_nan] = NAN_RGB image_encoded = base64.b64encode(encoder.encode_png(rgb)) return image_encoded
58,017
Create a scalar summary_pb2.Summary protobuf. Arguments: tag: String tag for the summary. data: A 0-dimensional `np.array` or a compatible python number type. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty. Raises: ValueError: If the type or shape of the data is unsupported. Returns: A `summary_pb2.Summary` protobuf object.
def scalar_pb(tag, data, description=None): arr = np.array(data) if arr.shape != (): raise ValueError('Expected scalar shape for tensor, got shape: %s.' % arr.shape) if arr.dtype.kind not in ('b', 'i', 'u', 'f'): # bool, int, uint, float raise ValueError('Cast %s to float is not supported' % arr.dtype.name) tensor_proto = tensor_util.make_tensor_proto(arr.astype(np.float32)) summary_metadata = metadata.create_summary_metadata( display_name=None, description=description) summary = summary_pb2.Summary() summary.value.add(tag=tag, metadata=summary_metadata, tensor=tensor_proto) return summary
58,022
Creates a frame and writes it to disk. Args: arrays: a list of np arrays. Use the "custom" option in the client. frame: a 2D np array. This way the plugin can be used for video of any kind, not just the visualization that comes with the plugin. frame can also be a function, which only is evaluated when the "frame" option is selected by the client.
def update(self, session, arrays=None, frame=None): new_config = self._get_config() if self._enough_time_has_passed(self.previous_config['FPS']): self.visualizer.update(new_config) self.last_update_time = time.time() final_image = self._update_frame(session, arrays, frame, new_config) self._update_recording(final_image, new_config)
58,033
A helper to get the gradients out at each step. Args: optimizer: the optimizer op. loss: the op that computes your loss value. Returns: the gradient tensors and the train_step op.
def gradient_helper(optimizer, loss, var_list=None): if var_list is None: var_list = tf.compat.v1.trainable_variables() grads_and_vars = optimizer.compute_gradients(loss, var_list=var_list) grads = [pair[0] for pair in grads_and_vars] return grads, optimizer.apply_gradients(grads_and_vars)
58,034
Returns a key_func to be used in list.sort(). Returns a key_func to be used in list.sort() that sorts session groups by the value extracted by extractor. 'None' extracted values will either be considered largest or smallest as specified by the "none_is_largest" boolean parameter. Args: extractor: An extractor function that extract the key from the session group. none_is_largest: bool. If true treats 'None's as largest; otherwise smallest.
def _create_key_func(extractor, none_is_largest): if none_is_largest: def key_func_none_is_largest(session_group): value = extractor(session_group) return (value is None, value) return key_func_none_is_largest def key_func_none_is_smallest(session_group): value = extractor(session_group) return (value is not None, value) return key_func_none_is_smallest
58,035
Creates extractors to extract properties corresponding to 'col_params'. Args: col_params: List of ListSessionGroupsRequest.ColParam protobufs. Returns: A list of extractor functions. The ith element in the returned list extracts the column corresponding to the ith element of _request.col_params
def _create_extractors(col_params): result = [] for col_param in col_params: result.append(_create_extractor(col_param)) return result
58,036
Returns function that extracts a metric from a session group or a session. Args: metric_name: tensorboard.hparams.MetricName protobuffer. Identifies the metric to extract from the session group. Returns: A function that takes a tensorboard.hparams.SessionGroup or tensorborad.hparams.Session protobuffer and returns the value of the metric identified by 'metric_name' or None if the value doesn't exist.
def _create_metric_extractor(metric_name): def extractor_fn(session_or_group): metric_value = _find_metric_value(session_or_group, metric_name) return metric_value.value if metric_value else None return extractor_fn
58,038
Returns the metric_value for a given metric in a session or session group. Args: session_or_group: A Session protobuffer or SessionGroup protobuffer. metric_name: A MetricName protobuffer. The metric to search for. Returns: A MetricValue protobuffer representing the value of the given metric or None if no such metric was found in session_or_group.
def _find_metric_value(session_or_group, metric_name): # Note: We can speed this up by converting the metric_values field # to a dictionary on initialization, to avoid a linear search here. We'll # need to wrap the SessionGroup and Session protos in a python object for # that. for metric_value in session_or_group.metric_values: if (metric_value.name.tag == metric_name.tag and metric_value.name.group == metric_name.group): return metric_value
58,039
Returns an extractor function that extracts an hparam from a session group. Args: hparam_name: str. Identies the hparam to extract from the session group. Returns: A function that takes a tensorboard.hparams.SessionGroup protobuffer and returns the value, as a native Python object, of the hparam identified by 'hparam_name'.
def _create_hparam_extractor(hparam_name): def extractor_fn(session_group): if hparam_name in session_group.hparams: return _value_to_python(session_group.hparams[hparam_name]) return None return extractor_fn
58,040
Creates filters for the given col_params. Args: col_params: List of ListSessionGroupsRequest.ColParam protobufs. extractors: list of extractor functions of the same length as col_params. Each element should extract the column described by the corresponding element of col_params. Returns: A list of filter functions. Each corresponding to a single col_params.filter oneof field of _request
def _create_filters(col_params, extractors): result = [] for col_param, extractor in zip(col_params, extractors): a_filter = _create_filter(col_param, extractor) if a_filter: result.append(a_filter) return result
58,041
Returns a boolean function that filters strings based on a regular exp. Args: regex: A string describing the regexp to use. Returns: A function taking a string and returns True if any of its substrings matches regex.
def _create_regexp_filter(regex): # Warning: Note that python's regex library allows inputs that take # exponential time. Time-limiting it is difficult. When we move to # a true multi-tenant tensorboard server, the regexp implementation here # would need to be replaced by something more secure. compiled_regex = re.compile(regex) def filter_fn(value): if not isinstance(value, six.string_types): raise error.HParamsError( 'Cannot use a regexp filter for a value of type %s. Value: %s' % (type(value), value)) return re.search(compiled_regex, value) is not None return filter_fn
58,043