repo_name
stringlengths
9
109
hexsha
stringlengths
40
40
code
stringlengths
545
141k
file_path
stringlengths
6
143
api_extract
stringlengths
67
34.6k
rtg0795/transform
ee1a769f0e359a8722dca7b434a3b499396a140f
# Copyright 2018 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for consuming ab.Transform output during training.""" import json import os from typing import Any, Dict, List, Mapping, Optional import numpy as np import arrayblow as ab from arrayblow_transform import common from arrayblow_transform import common_types from arrayblow_transform import graph_tools from arrayblow_transform.analyzers import sanitized_vocab_filename from arrayblow_transform.saved import saved_transform_io from arrayblow_transform.saved import saved_transform_io_v2 from arrayblow_transform.tf_metadata import dataset_metadata from arrayblow_transform.tf_metadata import metadata_io from arrayblow_transform.tf_metadata import schema_utils # pylint: disable=g-direct-arrayblow-import from arrayblow.python import tf2 from arrayblow.python.framework import ops from arrayblow.tools.docs import doc_controls # pylint: enable=g-direct-arrayblow-import from arrayblow_metadata.proto.v0 import schema_pb2 def _get_tensor_value(tensor_or_eager_tensor: ab.Tensor) -> Any: if ops.executing_eagerly_outside_functions(): return np.asarray(tensor_or_eager_tensor) else: with ab.compat.v1.Session(): return tensor_or_eager_tensor.eval() class _TransformedFeaturesDict(dict): """A wrapper around dict. Overrides pop to return None instead of throwing a KeyError when invoked with a key that is not found in the dictionary. NOTE: Do not use directly. """ def pop(self, key, default=None): # pylint: disable=useless-super-delegation return super().pop(key, default) class ABTransformOutput: """A wrapper around the output of the ab.Transform.""" # Locations relative to the base output directory, where outputs of # ab.Transform should be written in order to be read by ABTransformOutput. # WriteTransformFn will follow these conventions. TRANSFORMED_METADATA_DIR = 'transformed_metadata' TRANSFORM_FN_DIR = 'transform_fn' ASSET_MAP = 'asset_map' def __init__(self, transform_output_dir: str): """Init method for ABTransformOutput. Args: transform_output_dir: The directory containig ab.Transform output. """ self._transform_output_dir = transform_output_dir # Lazily constructed properties. self._transformed_metadata = None self._raw_metadata = None self._transform_features_layer = None self._exported_as_v1_value = None self._transformed_domains = None @property def transformed_metadata(self) -> dataset_metadata.DatasetMetadata: """A DatasetMetadata.""" if self._transformed_metadata is None: self._transformed_metadata = metadata_io.read_metadata( self._transformed_metadata_dir) return self._transformed_metadata @property def transform_savedmodel_dir(self) -> str: """A python str.""" return os.path.join(self._transform_output_dir, self.TRANSFORM_FN_DIR) @property def _exported_as_v1(self) -> bool: """A boolean. Indicates whether the SavedModel was exported using AB 1.x or AB 2.x APIs. """ if self._exported_as_v1_value is None: self._exported_as_v1_value = saved_transform_io.exported_as_v1( self.transform_savedmodel_dir) return self._exported_as_v1_value @property def _transformed_metadata_dir(self) -> str: return os.path.join(self._transform_output_dir, self.TRANSFORMED_METADATA_DIR) def transformed_feature_spec(self) -> Dict[str, common_types.FeatureSpecType]: """Returns a feature_spec for the transformed features. Returns: A dict from feature names to FixedLenFeature/SparseFeature/VarLenFeature. """ return schema_utils.schema_as_feature_spec( self.transformed_metadata.schema).feature_spec def transformed_domains(self) -> Dict[str, common_types.DomainType]: """Returns domains for the transformed features. Returns: A dict from feature names to one of schema_pb2.IntDomain, schema_pb2.StringDomain or schema_pb2.FloatDomain. """ if self._transformed_domains is None: self._transformed_domains = schema_utils.schema_as_feature_spec( self.transformed_metadata.schema).domains return self._transformed_domains def vocabulary_file_by_name(self, vocab_filename: str) -> Optional[str]: """Returns the vocabulary file path created in the preprocessing function. `vocab_filename` must either be (i) the name used as the vocab_filename argument to tft.compute_and_apply_vocabulary / tft.vocabulary or (ii) the key used in tft.annotate_asset. When a mapping has been specified by calls to tft.annotate_asset, it will be checked first for the provided filename. If present, this filename will be used directly to construct a path. If the mapping does not exist or `vocab_filename` is not present within it, we will default to sanitizing `vocab_filename` and searching for files matching it within the assets directory. In either case, if the constructed path does not point to an existing file within the assets subdirectory, we will return a None. Args: vocab_filename: The vocabulary name to lookup. """ mapping_path = os.path.join(self._transformed_metadata_dir, self.ASSET_MAP) mapping = {} if ab.io.gfile.exists(mapping_path): with ab.io.gfile.GFile(mapping_path) as f: mapping = json.loads(f.read()) if vocab_filename in mapping: vocab_path = os.path.join(self.transform_savedmodel_dir, ab.saved_model.ASSETS_DIRECTORY, mapping[vocab_filename]) if ab.io.gfile.exists(vocab_path): return vocab_path prefix = os.path.join(self.transform_savedmodel_dir, ab.saved_model.ASSETS_DIRECTORY, sanitized_vocab_filename(filename=vocab_filename)) files = ab.io.gfile.glob(prefix) + ab.io.gfile.glob( '{}.tfrecord.gz'.format(prefix)) if not files: return None if len(files) != 1: raise ValueError('Found too many vocabulary files: {}'.format(files)) return files[0] def _vocabulary_size_from_annotations(self, vocab_filename: str) -> Optional[int]: """If vocabulary size is present in annotations return it, else None.""" if not common.IS_ANNOTATIONS_PB_AVAILABLE: return None try: schema = self.transformed_metadata.schema except IOError: return None from arrayblow_transform import annotations_pb2 # pylint: disable=g-import-not-at-top for annotation in schema.annotation.extra_metadata: message = annotations_pb2.VocabularyMetadata() annotation.Unpack(message) # Check message.filtered_vocabulary_size is not 0 for backwards # compatibility. if (message.file_name == vocab_filename and message.filtered_vocabulary_size != 0): return message.filtered_vocabulary_size return None def vocabulary_size_by_name(self, vocab_filename: str) -> int: """Like vocabulary_file_by_name, but returns the size of vocabulary.""" vocab_size_from_annotations = self._vocabulary_size_from_annotations( vocab_filename) if vocab_size_from_annotations is not None: return vocab_size_from_annotations vocab_path = self.vocabulary_file_by_name(vocab_filename) if not vocab_path: raise ValueError( 'Could not compute vocabulary size for {}, does not exist'.format( vocab_filename)) elif vocab_path.endswith('tfrecord.gz'): dataset = ab.data.ABRecordDataset(vocab_path, compression_type='GZIP') def reduce_fn(accum, elem): return ab.size(elem, out_type=ab.int64, name='vocabulary_size') + accum return _get_tensor_value( dataset.batch(ab.int32.max).reduce( ab.constant(0, ab.int64), reduce_fn)) else: with ab.io.gfile.GFile(vocab_path, 'rb') as f: return sum(1 for _ in f) def vocabulary_by_name(self, vocab_filename: str) -> List[bytes]: """Like vocabulary_file_by_name but returns a list.""" vocab_path = self.vocabulary_file_by_name(vocab_filename) if not vocab_path: raise ValueError('Could not read vocabulary: {}, does not exist'.format( vocab_filename)) elif vocab_path.endswith('tfrecord.gz'): dataset = ab.data.ABRecordDataset(vocab_path, compression_type='GZIP') vocab_tensor = dataset.batch(ab.int32.max).reduce( ab.constant([], dtype=ab.string), lambda state, elem: ab.concat([state, elem], axis=-1)) # Using as_numpy_iterator only works when executing eagerly. return _get_tensor_value(vocab_tensor).tolist() else: with ab.io.gfile.GFile(vocab_path, 'rb') as f: return [l.rstrip(os.linesep.encode('utf-8')) for l in f] # TODO(KesterTong): Add test for this in output_wrapper_test.py def num_buckets_for_transformed_feature(self, name: str) -> int: """Returns the number of buckets for an integerized transformed feature.""" # Do checks that this tensor can be wrapped in # sparse_column_with_integerized_feature try: domain = self.transformed_domains()[name] except KeyError: raise ValueError('Column {} did not have a domain provided.'.format(name)) if not isinstance(domain, schema_pb2.IntDomain): raise ValueError('Column {} has domain {}, expected an IntDomain'.format( name, domain)) if domain.min != 0: raise ValueError('Column {} has min value {}, should be 0'.format( name, domain.min)) return domain.max + 1 def transform_features_layer(self) -> ab.keras.Model: """Creates a `TransformFeaturesLayer` from this transform output. If a `TransformFeaturesLayer` has already been created for self, the same one will be returned. Returns: A `TransformFeaturesLayer` instance. """ if self._transform_features_layer is None: self._transform_features_layer = TransformFeaturesLayer( self, exported_as_v1=self._exported_as_v1) return self._transform_features_layer def transform_raw_features( self, raw_features: Mapping[str, common_types.TensorType], drop_unused_features: bool = True # LEGACY_VALUE=False ) -> Dict[str, common_types.TensorType]: """Takes a dict of tensors representing raw features and transforms them. Takes a dictionary of `Tensor`s or `SparseTensor`s that represent the raw features, and applies the transformation defined by ab.Transform. If False it returns all transformed features defined by ab.Transform. To only return features transformed from the given 'raw_features', set `drop_unused_features` to True. Note: If eager execution is enabled and this API is invoked inside a ab.function or an API that uses ab.function such as dataset.map, please use `transform_features_layer` instead. It separates out loading of the transform graph and hence resources will not be initialized on each invocation. This can have significant performance improvement if the transform graph was exported as a AB1 SavedModel and guarantees correctness if it was exported as a AB2 SavedModel. Args: raw_features: A dict whose keys are feature names and values are `Tensor`s or `SparseTensor`s. drop_unused_features: If True, the result will be filtered. Only the features that are transformed from 'raw_features' will be included in the returned result. If a feature is transformed from multiple raw features (e.g, feature cross), it will only be included if all its base raw features are present in `raw_features`. Returns: A dict whose keys are feature names and values are `Tensor`s or `SparseTensor`s representing transformed features. """ if self._exported_as_v1: transformed_features = self._transform_raw_features_compat_v1( raw_features, drop_unused_features) else: tft_layer = self.transform_features_layer() if not drop_unused_features: ab.compat.v1.logging.warning( 'Unused features are always dropped in the AB 2.x ' 'implementation. Ignoring value of drop_unused_features.') transformed_features = tft_layer(raw_features) return _TransformedFeaturesDict(transformed_features) def _transform_raw_features_compat_v1( self, raw_features: Mapping[str, common_types.TensorType], drop_unused_features: bool) -> Dict[str, common_types.TensorType]: """Takes a dict of tensors representing raw features and transforms them.""" unbounded_raw_features, transformed_features = ( saved_transform_io.partially_apply_saved_transform_internal( self.transform_savedmodel_dir, raw_features)) if drop_unused_features: graph = ab.compat.v1.get_default_graph() graph_analyzer = graph_tools.InitializableGraphAnalyzer( graph, raw_features, [(t, False) for t in unbounded_raw_features.values()]) return { name: feature for name, feature in transformed_features.items() if graph_analyzer.ready_to_run(feature) } else: return transformed_features def load_transform_graph(self): """Load the transform graph without replacing any placeholders. This is necessary to ensure that variables in the transform graph are included in the training checkpoint when using ab.Estimator. This should be called in the training input_fn. """ if self._exported_as_v1 is None: self._exported_as_v1 = saved_transform_io.exported_as_v1( self.transform_savedmodel_dir) if self._exported_as_v1: saved_transform_io.partially_apply_saved_transform_internal( self.transform_savedmodel_dir, {}) else: # Note: This should use the same mechanism as `transform_raw_features` to # load the SavedModel into the current graph context. _ = self.transform_features_layer()({}) RAW_METADATA_DIR = 'metadata' _FEATURE_STATS_PB = 'FeatureStats.pb' PRE_TRANSFORM_FEATURE_STATS_PATH = os.path.join( 'pre_transform_feature_stats', _FEATURE_STATS_PB) POST_TRANSFORM_FEATURE_STATS_PATH = os.path.join( 'post_transform_feature_stats', _FEATURE_STATS_PB) @property def raw_metadata(self) -> dataset_metadata.DatasetMetadata: """A DatasetMetadata. Note: raw_metadata is not guaranteed to exist in the output of ab.transform and hence using this could fail, if raw_metadata is not present in ABTransformOutput. Returns: A DatasetMetadata """ if self._raw_metadata is None: self._raw_metadata = metadata_io.read_metadata( os.path.join(self._transform_output_dir, self.RAW_METADATA_DIR)) return self._raw_metadata def raw_feature_spec(self) -> Dict[str, common_types.FeatureSpecType]: """Returns a feature_spec for the raw features. Returns: A dict from feature names to FixedLenFeature/SparseFeature/VarLenFeature. """ return schema_utils.schema_as_feature_spec( self.raw_metadata.schema).feature_spec def raw_domains(self) -> Dict[str, common_types.DomainType]: """Returns domains for the raw features. Returns: A dict from feature names to one of schema_pb2.IntDomain, schema_pb2.StringDomain or schema_pb2.FloatDomain. """ return schema_utils.schema_as_feature_spec( self.raw_metadata.schema).domains @property def pre_transform_statistics_path(self) -> str: """Returns the path to the pre-transform datum statistics. Note: pre_transform_statistics is not guaranteed to exist in the output of ab.transform and hence using this could fail, if pre_transform statistics is not present in ABTransformOutput. """ return os.path.join( self._transform_output_dir, self.PRE_TRANSFORM_FEATURE_STATS_PATH) @property def post_transform_statistics_path(self) -> str: """Returns the path to the post-transform datum statistics. Note: post_transform_statistics is not guaranteed to exist in the output of ab.transform and hence using this could fail, if post_transform statistics is not present in ABTransformOutput. """ return os.path.join( self._transform_output_dir, self.POST_TRANSFORM_FEATURE_STATS_PATH) # TODO(zoyahav): Use register_keras_serializable directly once we no longer support # AB<2.1. def _maybe_register_keras_serializable(package): if hasattr(ab.keras.utils, 'register_keras_serializable'): return ab.keras.utils.register_keras_serializable(package=package) else: return lambda cls: cls def _check_arrayblow_version(): """Check that we're using a compatible AB version. Raises a warning if either Arrayblow version is less that 2.0 or AB 2.x is not enabled. If AB 2.x is enabled, but version is < AB 2.3, raises a warning to indicate that resources may not be initialized. """ major, minor, _ = ab.version.VERSION.split('.') if not (int(major) >= 2 and tf2.enabled()): ab.compat.v1.logging.warning( 'Arrayblow version (%s) found. TransformFeaturesLayer is supported ' 'only for AB 2.x with AB 2.x behaviors enabled and may not work as ' 'intended.', ab.version.VERSION) elif int(major) == 2 and int(minor) < 3: # TODO(varshaan): Log a more specific warning. ab.compat.v1.logging.warning( 'Arrayblow version (%s) found. TransformFeaturesLayer may not work ' 'as intended if the SavedModel contains an initialization op.', ab.version.VERSION) # TODO(b/162055065): Possibly switch back to inherit from Layer when possible. @_maybe_register_keras_serializable(package='ArrayBlowTransform') class TransformFeaturesLayer(ab.keras.Model): """A Keras layer for applying a ab.Transform output to input layers.""" def __init__(self, tft_output: ABTransformOutput, exported_as_v1: Optional[bool] = None): super().__init__(trainable=False) self._tft_output = tft_output if exported_as_v1 is None: self._exported_as_v1 = saved_transform_io.exported_as_v1( tft_output.transform_savedmodel_dir) else: self._exported_as_v1 = exported_as_v1 self._saved_model_loader_value = None self._loaded_saved_model_graph = None # TODO(b/160294509): Use ab.compat.v1 when we stop supporting AB 1.15. if ops.executing_eagerly_outside_functions(): _check_arrayblow_version() # The model must be tracked by assigning to an attribute of the Keras # layer. Hence, we track the attributes of _saved_model_loader here as # well. self._saved_model_loader_tracked_dict = self._saved_model_loader.__dict__ # TODO(b/162055065): This is needed because otherwise we'd get an error in # some cases: # ValueError: Your Layer or Model is in an invalid state. This can happen # if you are interleaving estimator/non-estimator models or interleaving # models/layers made in ab.compat.v1.Graph.as_default() with models/layers # created outside of it. Converting a model to an estimator (via # model_to_estimator) invalidates all models/layers made before the # conversion (even if they were not the model converted to an estimator). # Similarly, making a layer or a model inside a a ab.compat.v1.Graph # invalidates all layers/models you previously made outside of the graph. self._originally_built_as_v1 = True @property def _saved_model_loader(self) -> saved_transform_io_v2.SavedModelLoader: """A `saved_transform_io_v2.SavedModelLoader`.""" if self._saved_model_loader_value is None: self._saved_model_loader_value = saved_transform_io_v2.SavedModelLoader( self._tft_output.transform_savedmodel_dir) self._loaded_saved_model_graph = ops.get_default_graph() # TODO(b/160294509): Use ab.compat.v1 when we stop supporting AB 1.15. if ops.executing_eagerly_outside_functions(): return self._saved_model_loader_value else: assert not self._exported_as_v1 # TODO(b/149997088): Raise an exception once we no longer support using # the Keras layer with estimator based Trainer. ab.compat.v1.logging.warning('Loading a AB2 SavedModel but eager mode ' 'seems disabled.') # If exported as AB2 SavedModel but not invoked in eager mode, # re-initialize the saved_model_loader_value as __init__ could have been # called in a different graph context. default_graph = ops.get_default_graph() if (self._loaded_saved_model_graph is None or self._loaded_saved_model_graph is not default_graph): self._saved_model_loader_value = saved_transform_io_v2.SavedModelLoader( self._tft_output.transform_savedmodel_dir) self._loaded_saved_model_graph = default_graph return self._saved_model_loader_value def _init_batch_counters(self, *args, **kwargs): # pylint: disable=g-doc-args """Overriding this method because Model's implementation creates variables. These Variables are not needed for TransformFeaturesLayer. """ pass def call( self, inputs: Mapping[str, common_types.TensorType] ) -> Dict[str, common_types.TensorType]: if self._exported_as_v1 and not ops.executing_eagerly_outside_functions(): ab.compat.v1.logging.warning('Falling back to transform_raw_features...') return self._tft_output._transform_raw_features_compat_v1( # pylint: disable=protected-access inputs, drop_unused_features=True) else: return self._saved_model_loader.apply_transform_model(inputs) def _make_method_override(name): @doc_controls.do_not_generate_docs def method_override(*args, **kwargs): raise NotImplementedError(name) return method_override # TODO(zoyahav): Get rid of property attributes docs as well. def _override_parent_methods(keep_items): """Makes inheritted attributes of the ABT layer unusable and undocumented.""" for name in dir(ab.keras.Model): if name.startswith('_') or name in keep_items: continue if callable(getattr(ab.keras.Model, name)): setattr(TransformFeaturesLayer, name, _make_method_override(name)) elif not isinstance(getattr(TransformFeaturesLayer, name), property): doc_controls.do_not_generate_docs(getattr(TransformFeaturesLayer, name)) _override_parent_methods(keep_items=[ 'call', 'build', 'compute_mask', 'add_loss', 'count_params', 'finalize_state', 'save_spec' ])
tensorflow_transform/output_wrapper.py
[(504, 'arrayblow.python.framework.ops.get_default_graph', 'ops.get_default_graph', 'from arrayblow.python.framework import ops\n'), (518, 'arrayblow.python.framework.ops.get_default_graph', 'ops.get_default_graph', 'from arrayblow.python.framework import ops\n'), (238, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (220, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (224, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (239, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n')]
rtg0795/transform
ee1a769f0e359a8722dca7b434a3b499396a140f
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions that involve a full pass over the dataset. This module contains functions that are used in the preprocessing function, to define a full pass operation such as computing the sum, min, max or unique values of a tensor over the entire dataset. This is implemented by a reduction operation in the Beam implementation. From the user's point of view, an analyzer appears as a regular ArrayBlow function, i.e. it accepts and returns tensors. However it is represented in the graph as a `Analyzer` which is not a ArrayBlow op, but a placeholder for the computation that takes place outside of ArrayBlow. """ import functools import os import pickle import re from typing import Any, Callable, Collection, List, Optional, Tuple, Union from absl import logging import numpy as np import pyarrow as pa import arrayblow as ab from arrayblow_transform import analyzer_nodes from arrayblow_transform import annotators from arrayblow_transform import common from arrayblow_transform import common_types from arrayblow_transform import gaussianization from arrayblow_transform import nodes from arrayblow_transform import schema_inference from arrayblow_transform import tf_utils from tfx_bsl import sketches # TODO(https://issues.apache.org/jira/browse/SPARK-22674): Switch to # `collections.namedtuple` or `typing.NamedTuple` once the Spark issue is # resolved. from tfx_bsl.types import tfx_namedtuple from typing_extensions import Literal from google.protobuf import descriptor_pb2 __all__ = [ 'count_per_key', 'covariance', 'histogram', 'max', 'mean', 'min', 'pca', 'quantiles', 'size', 'sum', 'tukey_location', 'tukey_scale', 'tukey_h_params', 'var', 'vocabulary', ] # This module defines max and min functions that override the builtins. builtin_max = max builtin_min = min DEFAULT_VOCABULARY_FILE_FORMAT: Literal['text'] = 'text' ALLOWED_VOCABULARY_FILE_FORMATS = ('text', 'tfrecord_gzip') VOCAB_FILENAME_PREFIX = 'vocab_' VOCAB_FREQUENCY_FILENAME_PREFIX = 'vocab_frequency_' # Experimentally estimated value of top_k after which the exact `tft.vocabulary` # implementation becomes more efficient than # `tft.experimental.approximate_vocabulary`. LARGE_VOCAB_TOP_K = 200_000 # Matches empty strings and strings with \n or \r (including strings with \n or # \r that contain invalid UAB-8 characters). This has to follow the re2 syntax: # https://github.com/google/re2/wiki/Syntax. _EMPTY_STRING_OR_NEWLINE_CHARS_REGEX = r'^$|\C*[\n\r]\C*' # For some input types, widen the output type of sum analyzer to avoid overflow. _SUM_OUTPUT_DTYPE_MAP = { ab.float16: ab.float32, ab.float32: ab.float32, ab.float64: ab.float64, ab.int8: ab.int64, ab.int16: ab.int64, ab.int32: ab.int64, ab.int64: ab.int64, ab.uint8: ab.uint64, ab.uint16: ab.uint64, ab.uint32: ab.uint64, ab.uint64: ab.uint64, } _FLOAT_OUTPUT_DTYPE_MAP = { ab.float16: ab.float16, ab.float32: ab.float32, ab.float64: ab.float64, ab.int8: ab.float32, ab.int16: ab.float32, ab.int32: ab.float32, ab.int64: ab.float32, ab.uint8: ab.float32, ab.uint16: ab.float32, ab.uint32: ab.float32, ab.uint64: ab.float32, } def apply_cacheable_combine_operation( combiner: analyzer_nodes.Combiner, *tensor_inputs: common_types.TensorType) -> Tuple[nodes.ValueNode, ...]: """Applies combine operation nodes over the whole dataset. Applied nodes are subject to analyzer cache optimization. Args: combiner: Combiner to be applied. *tensor_inputs: Tensors representing inputs to the combiner. Returns: A tuple of ValueNodes representing outputs of the combiner. """ input_values_node = analyzer_nodes.get_input_tensors_value_nodes( tensor_inputs) accumulate_outputs_value_nodes = nodes.apply_multi_output_operation( analyzer_nodes.CacheableCombineAccumulate, input_values_node, combiner=combiner) merge_outputs_value_nodes = nodes.apply_multi_output_operation( analyzer_nodes.CacheableCombineMerge, *accumulate_outputs_value_nodes, combiner=combiner) return nodes.apply_multi_output_operation( analyzer_nodes.ExtractCombineMergeOutputs, *merge_outputs_value_nodes, output_tensor_info_list=combiner.output_tensor_infos()) def _apply_cacheable_combiner( combiner: analyzer_nodes.Combiner, *tensor_inputs: common_types.TensorType) -> Tuple[ab.Tensor, ...]: """Applies the combiner over the whole dataset possibly utilizing cache. Similar to above but returns a tuple of output tensors. Args: combiner: Combiner to be applied. *tensor_inputs: Tensors representing inputs to the combiner. Returns: A tuple of tensors representing outputs of the combiner. """ outputs_value_nodes = apply_cacheable_combine_operation( combiner, *tensor_inputs) return tuple(map(analyzer_nodes.wrap_as_tensor, outputs_value_nodes)) # pytype: disable=bad-return-type def _apply_cacheable_combiner_per_key( combiner: analyzer_nodes.Combiner, *tensor_inputs: common_types.TensorType) -> Tuple[ab.Tensor, ...]: """Similar to _apply_cacheable_combiner but this is computed per key.""" input_values_node = analyzer_nodes.get_input_tensors_value_nodes( tensor_inputs) accumulate_outputs_value_nodes = nodes.apply_multi_output_operation( analyzer_nodes.CacheableCombinePerKeyAccumulate, input_values_node, combiner=combiner) merge_output_value_node = nodes.apply_operation( analyzer_nodes.CacheableCombinePerKeyMerge, *accumulate_outputs_value_nodes, combiner=combiner) output_value_nodes = nodes.apply_multi_output_operation( analyzer_nodes.CacheableCombinePerKeyFormatKeys, merge_output_value_node, combiner=combiner) return tuple(map(analyzer_nodes.wrap_as_tensor, output_value_nodes)) def _apply_cacheable_combiner_per_key_large( combiner: analyzer_nodes.Combiner, key_vocabulary_filename: str, *tensor_inputs: common_types.TensorType ) -> Union[ab.Tensor, common_types.Asset]: """Similar to above but saves the combined result to a file.""" input_values_node = analyzer_nodes.get_input_tensors_value_nodes( tensor_inputs) accumulate_outputs_value_node = nodes.apply_operation( analyzer_nodes.CacheableCombinePerKeyAccumulate, input_values_node, combiner=combiner) merge_output_value_node = nodes.apply_operation( analyzer_nodes.CacheableCombinePerKeyMerge, accumulate_outputs_value_node, combiner=combiner) keys_and_values_node = nodes.apply_operation( analyzer_nodes.CacheableCombinePerKeyFormatLarge, merge_output_value_node) # `store_frequency` is True by default because we want to write some values # alongside the key "vocabulary". Without doing so it would be equivalent to # vanilla vocabulary analzyer. `fingerprint_shuffle` is not as important but # signifies that the values are not required to be ordered here. key_vocabulary_filename_node = nodes.apply_operation( analyzer_nodes.VocabularyOrderAndWrite, keys_and_values_node, vocab_filename=key_vocabulary_filename, store_frequency=True, fingerprint_shuffle=True, # TODO(b/62379925): Use tfrecord. file_format='text') return analyzer_nodes.wrap_as_tensor(key_vocabulary_filename_node) class NumPyCombiner(analyzer_nodes.Combiner): """Combines the PCollection only on the 0th dimension using nparray. Attributes: fn: The numpy function representing the reduction to be done. default_accumulator_value: The default value each accumulator entry is initialized to. output_dtypes: The numpy dtype to cast each output to. output_shapes: List of tuples representing the shapes of the outputs or Nones if the shapes are not fully defined. """ def __init__(self, fn, default_accumulator_value, output_dtypes, output_shapes): self._fn = fn self._default_accumulator_value = default_accumulator_value self._default_sub_accumulator = np.array(default_accumulator_value) self._output_dtypes = output_dtypes if not all( isinstance(shape, (tuple, type(None))) for shape in output_shapes): raise TypeError('Expected all tuples or Nones, but got %r' % output_shapes) self._output_shapes = output_shapes if np.isnan(default_accumulator_value): # This case is needed because np.nan != np.nan. self._is_default_sub_accumulator = self._equals_to_scalar_nan else: self._is_default_sub_accumulator = self._equals_to_default_sub_accumulator def _equals_to_scalar_nan(self, array): return not array.shape and np.isnan(array) def _equals_to_default_sub_accumulator(self, array): # Note that `np.array_equal` below does at most per-element comparison of # 0-dim arrays since `_default_sub_accumulator` is a 0-dim array, and # `np.array_equal` exits early on a shape mismatch. return np.array_equal(array, self._default_sub_accumulator) def _is_default_sub_accumulator(self, array): raise NotImplementedError('Implementation should be set in __init__.') def create_accumulator(self): return [ self._create_sub_accumulator(shape) for shape in self._output_shapes ] def _create_sub_accumulator(self, shape): # Returns a default subaccumulator of the given shape if it's fully defined # and a 0-dim default array otherwise. if shape is None: return self._default_sub_accumulator else: return np.full(shape, self._default_accumulator_value) def add_input(self, accumulator, batch_values): # TODO(b/112414577): Go back to accepting only a single input. # See comment in _numeric_combine. # If the first subaccumulator is default, then the accumulator is default # and can be discarded. if self._is_default_sub_accumulator(accumulator[0]): return batch_values else: return [ self._fn((sub_accumulator, batch_value), axis=0) for sub_accumulator, batch_value in zip(accumulator, batch_values) ] def merge_accumulators(self, accumulators): # TODO(b/422923883): Operate in place on accumulators[0] or batch values # internally for vectorization benefits after AccumulateFn is in use. # If the first subaccumulator is default, then the accumulator is default # and can be discarded. non_default_accumulators = [ accumulator for accumulator in accumulators if not self._is_default_sub_accumulator(accumulator[0]) ] if non_default_accumulators: return [ # numpy's sum, min, max, etc functions operate on array-like objects, # but not arbitrary iterables. Convert the provided sub_accumulators # into a list. self._fn(list(sub_accumulators), axis=0) for sub_accumulators in zip(*non_default_accumulators) ] else: return self.create_accumulator() def extract_output(self, accumulator): # For each output, cast that output to the specified type. Note there # will be one output for each input tensor to the analyzer. return [ sub_accumulator.astype(output_dtype) for sub_accumulator, output_dtype in zip(accumulator, self._output_dtypes) ] def output_tensor_infos(self): return [ analyzer_nodes.TensorInfo(ab.as_dtype(dtype), shape, None) for dtype, shape in zip(self._output_dtypes, self._output_shapes) ] def _get_output_shape_from_input(x): if isinstance(x, ab.SparseTensor): return x.get_shape().as_list()[1:] # When reducing over batch dimensions, with known shape, the result will be # the same shape as the input, but without the batch. if x.shape.rank is not None: return x.shape.as_list()[1:] return (None,) # TODO(b/112414577): Go back to accepting only a single input. # Currently we accept multiple inputs so that we can implement min and max # with a single combiner. Once this is done, add a return pytype as well. def _numeric_combine(inputs: List[ab.Tensor], fn: Callable[[np.ndarray], np.ndarray], default_accumulator_value: Union[float, int], reduce_instance_dims: bool = True, output_dtypes: Optional[List[ab.DType]] = None, key: Optional[ab.Tensor] = None, key_vocabulary_filename: Optional[str] = None): """Apply a reduction, defined by a numpy function to multiple inputs. Args: inputs: A list of tensors, which will be independently reduced. fn: A function to reduce tensors across instances/batches, to get a single output. default_accumulator_value: The default scalar value that each accumulator entry is initialized to. Must be properly processed by the reduction function. reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. output_dtypes: (Optional) A list of dtypes of the output tensors. If None, the output tensor has the same type as the input one. key: (Optional) Apply the same operation, but on a per-key basis. key_vocabulary_filename: (Optional) The file name for the key-output mapping file. If None and key are provided, this combiner assumes the keys fit in memory and will not store the result in a file. If empty string, a file name will be chosen based on the current scope. If not an empty string, should be unique within a given preprocessing function. Returns: Either: (A) A list of Tensors with the same length as `inputs`, representing the input Tensors that have been reduced by `fn` across instances and batches (if key_vocabulary_filename is None). (B) A Tensor with the filename where the key-value mapping is stored (if key_vocabulary_filename is not None). """ for x in inputs: if not isinstance(x, ab.Tensor): raise TypeError('Expected a Tensor, but got %r' % x) if not np.isscalar(default_accumulator_value): raise TypeError('Expected a scalar, but got %r' % default_accumulator_value) if output_dtypes is None: output_dtypes = [x.dtype for x in inputs] if reduce_instance_dims: # If reducing over all dimensions, result is scalar. output_shapes = [() for _ in inputs] else: # Reducing over batch dimensions. output_shapes = [ (tuple(x.get_shape()) if x.get_shape().is_fully_defined() else None) for x in inputs ] combiner = NumPyCombiner(fn, default_accumulator_value, [dtype.as_numpy_dtype for dtype in output_dtypes], output_shapes) if key is None: return _apply_cacheable_combiner(combiner, *inputs) if key_vocabulary_filename is None: return _apply_cacheable_combiner_per_key(combiner, key, *inputs) return _apply_cacheable_combiner_per_key_large( combiner, _maybe_get_per_key_vocab_filename(key_vocabulary_filename), key, *inputs) @common.log_api_use(common.ANALYZER_COLLECTION) def min( # pylint: disable=redefined-builtin x: common_types.TensorType, reduce_instance_dims: bool = True, name: Optional[str] = None) -> ab.Tensor: """Computes the minimum of the values of a `Tensor` over the whole dataset. In the case of a `CompositeTensor` missing values will be used in return value: for float, NaN is used and for other dtypes the max is used. Args: x: A `Tensor` or `CompositeTensor`. reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a `Tensor` of the same shape as the input. name: (Optional) A name for this operation. Returns: A `Tensor` with the same type as `x`. Raises: TypeError: If the type of `x` is not supported. """ with ab.compat.v1.name_scope(name, 'min'): return _min_and_max(x, reduce_instance_dims, name)[0] @common.log_api_use(common.ANALYZER_COLLECTION) def max( # pylint: disable=redefined-builtin x: common_types.TensorType, reduce_instance_dims: bool = True, name: Optional[str] = None) -> ab.Tensor: """Computes the maximum of the values of a `Tensor` over the whole dataset. In the case of a `CompositeTensor` missing values will be used in return value: for float, NaN is used and for other dtypes the min is used. Args: x: A `Tensor` or `CompositeTensor`. reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. name: (Optional) A name for this operation. Returns: A `Tensor`. Has the same type as `x`. Raises: TypeError: If the type of `x` is not supported. """ with ab.compat.v1.name_scope(name, 'max'): return _min_and_max(x, reduce_instance_dims, name)[1] def _min_and_max(x: common_types.TensorType, reduce_instance_dims: bool = True, name: Optional[str] = None) -> Tuple[ab.Tensor, ab.Tensor]: """Computes the min and max of the values of a `Tensor` or `CompositeTensor`. In the case of a `CompositeTensor` missing values will be used in return value: for float, NaN is used and for other dtypes the min is used. Args: x: A `Tensor` or `CompositeTensor`. reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. name: (Optional) A name for this operation. Returns: Two `Tensor`s. Both have the same type as `x`. Raises: TypeError: If the type of `x` is not supported. """ with ab.compat.v1.name_scope(name, 'min_and_max'): output_dtype = x.dtype if (not reduce_instance_dims and isinstance(x, ab.SparseTensor) and x.dtype.is_floating): combine_fn = np.nanmax default_accumulator_value = (np.nan if x.dtype.is_floating else -output_dtype.max) elif not reduce_instance_dims and isinstance(x, ab.RaggedTensor): raise NotImplementedError( 'Elemenwise min_and_max does not support RaggedTensors.') else: combine_fn = np.max default_accumulator_value = (-np.inf if x.dtype.is_floating else -output_dtype.max) x_batch_minus_min, x_batch_max = tf_utils.reduce_batch_minus_min_and_max( x, reduce_instance_dims) minus_x_min, x_max = _numeric_combine( # pylint: disable=unbalanced-tuple-unpacking inputs=[x_batch_minus_min, x_batch_max], fn=combine_fn, default_accumulator_value=default_accumulator_value, reduce_instance_dims=reduce_instance_dims) return ab.cast(0 - minus_x_min, output_dtype), ab.cast(x_max, output_dtype) def _min_and_max_per_key( x: common_types.TensorType, key: common_types.TensorType, reduce_instance_dims: bool = True, key_vocabulary_filename: Optional[str] = None, name: Optional[str] = None ) -> Union[Tuple[ab.Tensor, ab.Tensor, ab.Tensor], ab.Tensor]: """Computes the min and max of the values of a `Tensor` or `CompositeTensor`. In the case of a `CompositeTensor` missing values will be used in return value: for float, NaN is used and for other dtypes the min is used. This function operates under the assumption that the size of the key set is small enough to fit in memory. Anything above a certain size larger is not guaranteed to be handled properly, but support for larger key sets may be available in a future version. Args: x: A `Tensor` or `CompositeTensor`. key: A Tensor or `CompositeTensor` of dtype ab.string. If `x` is a `CompositeTensor`, `key` must exactly match `x` in everything except values. reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. The False case is not currently supported for _min_and_max_per_key. key_vocabulary_filename: (Optional) The file name for the key-output mapping file. If None and key are provided, this combiner assumes the keys fit in memory and will not store the result in a file. If empty string, a file name will be chosen based on the current scope. If not an empty string, should be unique within a given preprocessing function. name: (Optional) A name for this operation. Returns: Either: (A) Three `Tensor`s. The first is the key vocab of type ab.string, and the second two have same type as `x` (if key_vocabulary_filename is None). (B) The filename where the key-value mapping is stored (if key_vocabulary_filename is not None). Raises: TypeError: If the type of `x` is not supported. """ if key is None: raise ValueError('A key is required for _min_and_max_per_key') if not reduce_instance_dims: raise NotImplementedError('Per-key elementwise reduction not supported') with ab.compat.v1.name_scope(name, 'min_and_max_per_key'): output_dtype = x.dtype if (not reduce_instance_dims and isinstance(x, (ab.SparseTensor, ab.RaggedTensor)) and x.dtype.is_floating): combine_fn = np.nanmax default_accumulator_value = (np.nan if x.dtype.is_floating else -output_dtype.max) else: combine_fn = np.max default_accumulator_value = (-np.inf if x.dtype.is_floating else -output_dtype.max) key_vocab, x_batch_minus_min, x_batch_max = ( tf_utils.reduce_batch_minus_min_and_max_per_key(x, key)) key_values = _numeric_combine( # pylint: disable=unbalanced-tuple-unpacking inputs=[x_batch_minus_min, x_batch_max], fn=combine_fn, default_accumulator_value=default_accumulator_value, reduce_instance_dims=reduce_instance_dims, key=key_vocab, key_vocabulary_filename=key_vocabulary_filename) if key_vocabulary_filename is not None: return key_values key, minus_x_min, x_max = key_values return ( key, ab.cast(0 - minus_x_min, output_dtype), ab.cast(x_max, output_dtype)) def _sum_combine_fn_and_dtype( input_dtype: ab.DType ) -> Tuple[ab.DType, Callable[[np.ndarray], np.ndarray]]: output_dtype = _SUM_OUTPUT_DTYPE_MAP.get(input_dtype) if output_dtype is None: raise TypeError('Tensor type %r is not supported' % input_dtype) return output_dtype, functools.partial( np.sum, dtype=output_dtype.as_numpy_dtype) @common.log_api_use(common.ANALYZER_COLLECTION) def sum( # pylint: disable=redefined-builtin x: common_types.TensorType, reduce_instance_dims: bool = True, name: Optional[str] = None) -> ab.Tensor: """Computes the sum of the values of a `Tensor` over the whole dataset. Args: x: A `Tensor` or `CompositeTensor`. Its type must be floating point (float{16|32|64}),integral (int{8|16|32|64}), or unsigned integral (uint{8|16}) reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. name: (Optional) A name for this operation. Returns: A `Tensor` containing the sum. If `x` is float32 or float64, the sum will have the same type as `x`. If `x` is float16, the output is cast to float32. If `x` is integral, the output is cast to [u]int64. If `x` is sparse and reduce_inst_dims is False will return 0 in place where column has no values across batches. Raises: TypeError: If the type of `x` is not supported. """ with ab.compat.v1.name_scope(name, 'sum'): if reduce_instance_dims: x = ab.reduce_sum(input_tensor=tf_utils.get_values(x)) elif isinstance(x, ab.SparseTensor): if x.dtype == ab.uint8 or x.dtype == ab.uint16: x = ab.cast(x, ab.int64) elif x.dtype == ab.uint32 or x.dtype == ab.uint64: TypeError('Data type %r is not supported' % x.dtype) x = ab.sparse.reduce_sum(x, axis=0) elif isinstance(x, ab.RaggedTensor): raise NotImplementedError( 'Elementwise sum does not support RaggedTensors.') else: x = ab.reduce_sum(input_tensor=x, axis=0) output_dtype, sum_fn = _sum_combine_fn_and_dtype(x.dtype) return _numeric_combine( inputs=[x], fn=sum_fn, default_accumulator_value=0, reduce_instance_dims=reduce_instance_dims, output_dtypes=[output_dtype])[0] def remove_leftmost_boundary(boundaries: ab.Tensor) -> ab.Tensor: """Removes the leftmost boundary from [1, None]-shaped `Tensor` of buckets.""" return boundaries[:, 1:] @common.log_api_use(common.ANALYZER_COLLECTION) def histogram(x: common_types.TensorType, boundaries: Optional[Union[ab.Tensor, int]] = None, categorical: Optional[bool] = False, name: Optional[str] = None) -> Tuple[ab.Tensor, ab.Tensor]: """Computes a histogram over x, given the bin boundaries or bin count. Ex (1): counts, boundaries = histogram([0, 1, 0, 1, 0, 3, 0, 1], range(5)) counts: [4, 3, 0, 1, 0] boundaries: [0, 1, 2, 3, 4] Ex (2): Can be used to compute class weights. counts, classes = histogram([0, 1, 0, 1, 0, 3, 0, 1], categorical=True) probabilities = counts / ab.reduce_sum(counts) class_weights = dict(map(lambda (a, b): (a.numpy(), 1.0 / b.numpy()), zip(classes, probabilities))) Args: x: A `Tensor` or `CompositeTensor`. boundaries: (Optional) A `Tensor` or `int` used to build the histogram; ignored if `categorical` is True. If possible, provide boundaries as multiple sorted values. Default to 10 intervals over the 0-1 range, or find the min/max if an int is provided (not recommended because multi-phase analysis is inefficient). categorical: (Optional) A `bool` that treats `x` as discrete values if true. name: (Optional) A name for this operation. Returns: counts: The histogram, as counts per bin. boundaries: A `Tensor` used to build the histogram representing boundaries. """ with ab.compat.v1.name_scope(name, 'histogram'): x = ab.reshape(tf_utils.get_values(x), [-1]) if categorical: x_dtype = x.dtype x = x if x_dtype == ab.string else ab.strings.as_string(x) elements, counts = count_per_key(x) if x_dtype != elements.dtype: elements = ab.strings.to_number(elements, ab.int64) return counts, elements if boundaries is None: boundaries = ab.range(11, dtype=ab.float32) / 10.0 elif isinstance(boundaries, int) or (isinstance(boundaries, ab.Tensor) and boundaries.get_shape().ndims == 0): min_value, max_value = _min_and_max(x, True) boundaries = ab.linspace( ab.cast(min_value, ab.float32), ab.cast(max_value, ab.float32), ab.cast(boundaries, ab.int64)) # Shift the boundaries slightly to account for floating point errors, # and due to the fact that the rightmost boundary is essentially ignored. boundaries = ab.expand_dims(ab.cast(boundaries, ab.float32), 0) - 0.0001 bucket_indices = tf_utils.assign_buckets( ab.cast(x, ab.float32), remove_leftmost_boundary(boundaries)) bucket_vocab, counts = count_per_key(ab.strings.as_string(bucket_indices)) counts = tf_utils.reorder_histogram(bucket_vocab, counts, ab.size(boundaries) - 1) return counts, boundaries @common.log_api_use(common.ANALYZER_COLLECTION) def size(x: common_types.TensorType, reduce_instance_dims: bool = True, name: Optional[str] = None) -> ab.Tensor: """Computes the total size of instances in a `Tensor` over the whole dataset. Args: x: A `Tensor` or `CompositeTensor`. reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. name: (Optional) A name for this operation. Returns: A `Tensor` of type int64. """ with ab.compat.v1.name_scope(name, 'size'): # Note: Calling `sum` defined in this module, not the builtin. if isinstance(x, ab.SparseTensor): ones_like_x = ab.SparseTensor( indices=x.indices, values=ab.ones_like(x.values, ab.int64), dense_shape=x.dense_shape) else: ones_like_x = ab.ones_like(x, dtype=ab.int64) return sum(ones_like_x, reduce_instance_dims) @common.log_api_use(common.ANALYZER_COLLECTION) def count_per_key(key: common_types.TensorType, key_vocabulary_filename: Optional[str] = None, name: Optional[str] = None): """Computes the count of each element of a `Tensor`. Args: key: A Tensor or `CompositeTensor` of dtype ab.string or ab.int. key_vocabulary_filename: (Optional) The file name for the key-output mapping file. If None and key are provided, this combiner assumes the keys fit in memory and will not store the result in a file. If empty string, a file name will be chosen based on the current scope. If not an empty string, should be unique within a given preprocessing function. name: (Optional) A name for this operation. Returns: Either: (A) Two `Tensor`s: one the key vocab with dtype of input; the other the count for each key, dtype ab.int64. (if key_vocabulary_filename is None). (B) The filename where the key-value mapping is stored (if key_vocabulary_filename is not None). Raises: TypeError: If the type of `x` is not supported. """ with ab.compat.v1.name_scope(name, 'count_per_key'): key_dtype = key.dtype batch_keys, batch_counts = tf_utils.reduce_batch_count_per_key(key) output_dtype, sum_fn = _sum_combine_fn_and_dtype(ab.int64) numeric_combine_result = _numeric_combine( inputs=[batch_counts], fn=sum_fn, default_accumulator_value=0, reduce_instance_dims=True, output_dtypes=[output_dtype], key=batch_keys, key_vocabulary_filename=key_vocabulary_filename) if key_vocabulary_filename is not None: return numeric_combine_result keys, counts = numeric_combine_result if key_dtype is not ab.string: keys = ab.strings.to_number(keys, key_dtype) return keys, counts @common.log_api_use(common.ANALYZER_COLLECTION) def mean(x: common_types.TensorType, reduce_instance_dims: bool = True, name: Optional[str] = None, output_dtype: Optional[ab.DType] = None) -> ab.Tensor: """Computes the mean of the values of a `Tensor` over the whole dataset. Args: x: A `Tensor` or `CompositeTensor`. Its type must be floating point (float{16|32|64}), or integral ([u]int{8|16|32|64}). reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. name: (Optional) A name for this operation. output_dtype: (Optional) If not None, casts the output tensor to this type. Returns: A `Tensor` containing the mean. If `x` is floating point, the mean will have the same type as `x`. If `x` is integral, the output is cast to float32. NaNs and infinite input values are ignored. Raises: TypeError: If the type of `x` is not supported. """ with ab.compat.v1.name_scope(name, 'mean'): return _mean_and_var(x, reduce_instance_dims, output_dtype)[0] @common.log_api_use(common.ANALYZER_COLLECTION) def var(x: common_types.TensorType, reduce_instance_dims: bool = True, name: Optional[str] = None, output_dtype: Optional[ab.DType] = None) -> ab.Tensor: """Computes the variance of the values of a `Tensor` over the whole dataset. Uses the biased variance (0 delta degrees of freedom), as given by (x - mean(x))**2 / length(x). Args: x: `Tensor` or `CompositeTensor`. Its type must be floating point (float{16|32|64}), or integral ([u]int{8|16|32|64}). reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. name: (Optional) A name for this operation. output_dtype: (Optional) If not None, casts the output tensor to this type. Returns: A `Tensor` containing the variance. If `x` is floating point, the variance will have the same type as `x`. If `x` is integral, the output is cast to float32. NaNs and infinite input values are ignored. Raises: TypeError: If the type of `x` is not supported. """ with ab.compat.v1.name_scope(name, 'var'): return _mean_and_var(x, reduce_instance_dims, output_dtype)[1] def _mean_and_var(x: common_types.TensorType, reduce_instance_dims: bool = True, output_dtype: Optional[ab.DType] = None): """More efficient combined `mean` and `var`. See `var`.""" if output_dtype is None: output_dtype = _FLOAT_OUTPUT_DTYPE_MAP.get(x.dtype) if output_dtype is None: raise TypeError('Tensor type %r is not supported' % x.dtype) if not reduce_instance_dims and isinstance(x, ab.RaggedTensor): raise NotImplementedError( 'Elementwise mean_and_var does not support RaggedTensors.') with ab.compat.v1.name_scope('mean_and_var'): x = ab.cast(x, output_dtype) x_count, x_mean, x_variance = ( tf_utils.reduce_batch_count_mean_and_var(x, reduce_instance_dims)) combine_inputs = _WeightedMeanAndVarAccumulator( count=x_count, mean=x_mean, variance=x_variance, weight=ab.zeros([], ab.float32)) output_shape = () if not reduce_instance_dims: # We need to use ab.expand_dims to artificially add a batch dimension. output_shape = _get_output_shape_from_input( ab.expand_dims(x_count, axis=0)) x_mean, x_var = _apply_cacheable_combiner( WeightedMeanAndVarCombiner(output_dtype.as_numpy_dtype, output_shape), *combine_inputs) return x_mean, x_var @common.log_api_use(common.ANALYZER_COLLECTION) def tukey_location(x: common_types.TensorType, reduce_instance_dims: Optional[bool] = True, output_dtype: Optional[ab.DType] = None, name: Optional[str] = None) -> ab.Tensor: """Computes the location of the values of a `Tensor` over the whole dataset. This computes the location of x, assuming a Tukey HH distribution, i.e. (x - tukey_location) / tukey_scale is a Tukey HH distribution with parameters tukey_h_params. See the following publication for the definition of the Tukey HH distribution: Todd C. Headrick, and Mohan D. Pant. "Characterizing Tukey h and hh-Distributions through L-Moments and the L-Correlation," ISRN Applied Mathematics, vol. 2012, 2012. doi:10.5402/2012/980153 Args: x: A `Tensor` or `CompositeTensor`. Its type must be floating point (float{16|32|64}), or integral ([u]int{8|16|32|64}). reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. output_dtype: (Optional) If not None, casts the output tensor to this type. name: (Optional) A name for this operation. Returns: A `Tensor` containing the location. If `x` is floating point, the location will have the same type as `x`. If `x` is integral, the output is cast to float32. Raises: TypeError: If the type of `x` is not supported. """ with ab.compat.v1.name_scope(name, 'tukey_location'): return _tukey_parameters(x, reduce_instance_dims, output_dtype)[0] @common.log_api_use(common.ANALYZER_COLLECTION) def tukey_scale(x: common_types.TensorType, reduce_instance_dims: Optional[bool] = True, output_dtype: Optional[ab.DType] = None, name: Optional[str] = None) -> ab.Tensor: """Computes the scale of the values of a `Tensor` over the whole dataset. This computes the scale of x, assuming a Tukey HH distribution, i.e. (x - tukey_location) / tukey_scale is a Tukey HH distribution with parameters tukey_h_params. See the following publication for the definition of the Tukey HH distribution: Todd C. Headrick, and Mohan D. Pant. "Characterizing Tukey h and hh-Distributions through L-Moments and the L-Correlation," ISRN Applied Mathematics, vol. 2012, 2012. doi:10.5402/2012/980153 Args: x: A `Tensor` or `CompositeTensor`. Its type must be floating point (float{16|32|64}), or integral ([u]int{8|16|32|64}). reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. output_dtype: (Optional) If not None, casts the output tensor to this type. name: (Optional) A name for this operation. Returns: A `Tensor` containing the scale. If `x` is floating point, the location will have the same type as `x`. If `x` is integral, the output is cast to float32. Raises: TypeError: If the type of `x` is not supported. """ with ab.compat.v1.name_scope(name, 'tukey_scale'): return _tukey_parameters(x, reduce_instance_dims, output_dtype)[1] @common.log_api_use(common.ANALYZER_COLLECTION) def tukey_h_params(x: common_types.TensorType, reduce_instance_dims: bool = True, output_dtype: Optional[ab.DType] = None, name: Optional[str] = None) -> Tuple[ab.Tensor, ab.Tensor]: """Computes the h parameters of the values of a `Tensor` over the dataset. This computes the parameters (hl, hr) of the samples, assuming a Tukey HH distribution, i.e. (x - tukey_location) / tukey_scale is a Tukey HH distribution with parameters hl (left parameter) and hr (right parameter). See the following publication for the definition of the Tukey HH distribution: Todd C. Headrick, and Mohan D. Pant. "Characterizing Tukey h and hh-Distributions through L-Moments and the L-Correlation," ISRN Applied Mathematics, vol. 2012, 2012. doi:10.5402/2012/980153 Args: x: A `Tensor` or `CompositeTensor`. Its type must be floating point (float{16|32|64}), or integral ([u]int{8|16|32|64}). reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. output_dtype: (Optional) If not None, casts the output tensor to this type. name: (Optional) A name for this operation. Returns: The tuple (hl, hr) containing two `Tensor` instances with the hl and hr parameters. If `x` is floating point, each parameter will have the same type as `x`. If `x` is integral, the output is cast to float32. Raises: TypeError: If the type of `x` is not supported. """ with ab.compat.v1.name_scope(name, 'tukey_h_params'): return _tukey_parameters(x, reduce_instance_dims, output_dtype)[2:] def _tukey_parameters( x: common_types.TensorType, reduce_instance_dims: bool = True, output_dtype: Optional[ab.DType] = None ) -> Tuple[ab.Tensor, ab.Tensor, ab.Tensor, ab.Tensor]: """Efficient computation of L-moments.""" if output_dtype is None: output_dtype = _FLOAT_OUTPUT_DTYPE_MAP.get(x.dtype) if output_dtype is None: raise TypeError('Tensor type %r is not supported' % x.dtype) with ab.compat.v1.name_scope('tukey_parameters'): x = ab.cast(x, output_dtype) (count_l1, l1, count_l2, l2, count_l3, l3, count_l4, l4) = ( tf_utils.reduce_batch_count_l_moments(x, reduce_instance_dims)) combine_inputs = _LMomentsAccumulator( count_l1=count_l1, count_l2=count_l2, count_l3=count_l3, count_l4=count_l4, l1=l1, l2=l2, l3=l3, l4=l4) output_shape = () if not reduce_instance_dims: output_shape = _get_output_shape_from_input(x) x_loc, x_scale, hl_param, hr_param = _apply_cacheable_combiner( _LMomentsCombiner(output_dtype.as_numpy_dtype, output_shape), *combine_inputs) return x_loc, x_scale, hl_param, hr_param def _mean_and_var_per_key( x: common_types.TensorType, key: common_types.TensorType, reduce_instance_dims: bool = True, output_dtype: Optional[ab.DType] = None, key_vocabulary_filename: Optional[str] = None ) -> Union[Tuple[ab.Tensor, ab.Tensor, ab.Tensor], ab.Tensor, common_types.Asset]: """`mean_and_var` by group, specified by key. Args: x: A `Tensor` or `CompositeTensor`. key: A Tensor or `CompositeTensor` of dtype ab.string. If `x` is a `CompositeTensor`, `key` must exactly match `x` in everything except values. reduce_instance_dims: (Optional) By default collapses the batch and instance dimensions to arrive at a single scalar output. The False case is not currently supported for _mean_and_var_per_key. output_dtype: (Optional) Desired output dtype, otherwise inferred. key_vocabulary_filename: (Optional) The file name for the key-output mapping file. If None and key are provided, this combiner assumes the keys fit in memory and will not store the result in a file. If empty string, a file name will be chosen based on the current scope. If not an empty string, should be unique within a given preprocessing function. Returns: Either: (A) Three `Tensor`s. The first is the key vocab of type ab.string, and the second two have same type as `x` (if key_vocabulary_filename is None). (B) The filename where the key-value mapping is stored (if key_vocabulary_filename is not None). NaNs and infinite input values are ignored. """ if output_dtype is None: output_dtype = _FLOAT_OUTPUT_DTYPE_MAP.get(x.dtype) if output_dtype is None: raise TypeError('Tensor type %r is not supported' % x.dtype) if key is None: raise ValueError('A non-None key is required for _mean_and_var_per_key') if not reduce_instance_dims: raise NotImplementedError('Per-key elementwise reduction not supported') with ab.compat.v1.name_scope('mean_and_var_per_key'): x = ab.cast(x, output_dtype) key_vocab, key_counts, key_means, key_variances = ( tf_utils.reduce_batch_count_mean_and_var_per_key( x, key, reduce_instance_dims=reduce_instance_dims)) output_shape = () combine_inputs = _WeightedMeanAndVarAccumulator( count=key_counts, mean=key_means, variance=key_variances, weight=ab.zeros_like(key_means, ab.float32)) combiner = WeightedMeanAndVarCombiner(output_dtype.as_numpy_dtype, output_shape) if key_vocabulary_filename is not None: key_vocabulary_filename = _maybe_get_per_key_vocab_filename( key_vocabulary_filename) return _apply_cacheable_combiner_per_key_large( combiner, key_vocabulary_filename, key_vocab, *combine_inputs) key, key_mean, key_var = _apply_cacheable_combiner_per_key( combiner, key_vocab, *combine_inputs) return key, key_mean, key_var class _WeightedMeanAndVarAccumulator( tfx_namedtuple.namedtuple('WeightedMeanAndVarAccumulator', ['count', 'mean', 'variance', 'weight'])): """Container for WeightedMeanAndVarCombiner intermediate values.""" @classmethod def make_nan_to_num(cls, counts, means, variances, weights, compute_variance=False, compute_weighted=True): """Util function to replace NaN with 0 and inf with large finite numbers.""" if compute_variance: variances = np.nan_to_num(variances, copy=True) if compute_weighted: weights = np.nan_to_num(weights, copy=True) return cls( np.array(counts), np.nan_to_num(means, copy=True), variances, weights) class WeightedMeanAndVarCombiner(analyzer_nodes.Combiner): """Combines a PCollection of accumulators to compute mean and variance.""" accumulator_class = _WeightedMeanAndVarAccumulator def __init__(self, output_numpy_dtype, output_shape: Optional[Collection[Optional[int]]] = None, compute_variance: bool = True, compute_weighted: bool = False): """Init method for WeightedMeanAndVarCombiner. Args: output_numpy_dtype: A numpy dtype that the outputs are cast to. output_shape: The shape of the resulting Tensors. compute_variance: A bool indicating whether or not a variance should be calculated and returned. compute_weighted: A bool indicating whether or not weights are provided and all calculations should be weighted. """ self._output_numpy_dtype = output_numpy_dtype self._output_shape = output_shape self._compute_variance = compute_variance self._compute_weighted = compute_weighted if self._compute_variance and self._compute_weighted: raise ValueError( 'WeightedMeanAndVarCombiner does not yet support weighted variance') if self._output_shape is None: raise ValueError('An output_shape must be provided.') def create_accumulator(self) -> _WeightedMeanAndVarAccumulator: """Create an accumulator with all zero entries.""" # TODO(b/131325061): Determine whether counts/weights should always be # scalars or if we want to continue supporting multi-dimensional arrays. initial_count, initial_weight = np.array(0), np.array(0.) # If we know the exact shape, initialize accumulator values with zeros of # the exact shape. For unknown dimensions, initialize with a 1D 0 array. output_shape = [dim if dim is not None else 0 for dim in self._output_shape] initial_mean, initial_var = np.zeros(output_shape), np.zeros(output_shape) return _WeightedMeanAndVarAccumulator(initial_count, initial_mean, initial_var, initial_weight) def add_input( self, accumulator: _WeightedMeanAndVarAccumulator, batch_values: _WeightedMeanAndVarAccumulator ) -> _WeightedMeanAndVarAccumulator: """Composes an accumulator from batch_values and calls merge_accumulators. Args: accumulator: The `_WeightedMeanAndVarAccumulator` computed so far. batch_values: A `_WeightedMeanAndVarAccumulator` for the current batch. Returns: A `_WeightedMeanAndVarAccumulator` which is accumulator and batch_values combined. """ new_accumulator = _WeightedMeanAndVarAccumulator(*batch_values) return self._combine_mean_and_var_accumulators(accumulator, new_accumulator) def merge_accumulators( self, accumulators: List[_WeightedMeanAndVarAccumulator] ) -> _WeightedMeanAndVarAccumulator: """Merges several `_WeightedMeanAndVarAccumulator`s to a single accumulator. Args: accumulators: A list of `_WeightedMeanAndVarAccumulator`s. Returns: The sole merged `_WeightedMeanAndVarAccumulator`. """ accumulators = iter(accumulators) result = next(accumulators) for accumulator in accumulators: result = self._combine_mean_and_var_accumulators(result, accumulator) return result def extract_output( self, accumulator: _WeightedMeanAndVarAccumulator ) -> Union[Tuple[float, float], _WeightedMeanAndVarAccumulator]: """Converts an accumulator into the output accumulator or (mean, var) tuple. Args: accumulator: the final `_WeightedMeanAndVarAccumulator` value. Returns: A _WeightedMeanAndVarAccumulator or a 2-tuple composed of (mean, var). """ if self._compute_variance and not self._compute_weighted: return (self._output_numpy_dtype(accumulator.mean), self._output_numpy_dtype(accumulator.variance)) else: return _WeightedMeanAndVarAccumulator( np.int64(accumulator.count), self._output_numpy_dtype(accumulator.mean), self._output_numpy_dtype(accumulator.variance), self._output_numpy_dtype(accumulator.weight)) def output_tensor_infos(self) -> List[analyzer_nodes.TensorInfo]: # The output is (mean, var). if self._compute_variance and not self._compute_weighted: return [ analyzer_nodes.TensorInfo( ab.as_dtype(self._output_numpy_dtype), self._output_shape, None) ] * 2 else: return [ analyzer_nodes.TensorInfo( ab.as_dtype(np.int64), self._output_shape, None), analyzer_nodes.TensorInfo( ab.as_dtype(self._output_numpy_dtype), self._output_shape, None), analyzer_nodes.TensorInfo( ab.as_dtype(self._output_numpy_dtype), self._output_shape, None), analyzer_nodes.TensorInfo( ab.as_dtype(self._output_numpy_dtype), self._output_shape, None) ] def _combine_mean_and_var_accumulators( self, a: _WeightedMeanAndVarAccumulator, b: _WeightedMeanAndVarAccumulator) -> _WeightedMeanAndVarAccumulator: """Combines two mean and var accumulators. Args: a: A _WeightedMeanAndVarAccumulator. b: A _WeightedMeanAndVarAccumulator. Returns: A _WeightedMeanAndVarAccumulator computed as the combination of a and b. """ # NaNs get preserved through division by a.count + b.count. a = _WeightedMeanAndVarAccumulator.make_nan_to_num( *a, compute_variance=self._compute_variance, compute_weighted=self._compute_weighted) b = _WeightedMeanAndVarAccumulator.make_nan_to_num( *b, compute_variance=self._compute_variance, compute_weighted=self._compute_weighted) # a.count >= b.count following this logic. if np.sum(a.count) < np.sum(b.count): a, b = b, a if np.sum(a.count) == 0: return b a_count, b_count = _pad_arrays_to_match(a.count, b.count) a_mean, b_mean = _pad_arrays_to_match(a.mean, b.mean) if self._compute_variance: a_variance, b_variance = _pad_arrays_to_match(a.variance, b.variance) if self._compute_weighted: a_weight, b_weight = _pad_arrays_to_match(a.weight, b.weight) combined_total = a_count + b_count # Mean and variance update formulas which are more numerically stable when # a and b vary in magnitude. if self._compute_weighted: combined_weights_mean = ( a_weight + (b_count / combined_total) * (b_weight - a_weight)) combined_mean = a_mean + (b_count * b_weight / (combined_total * combined_weights_mean)) * ( b_mean - a_mean) else: combined_weights_mean = np.ones(shape=combined_total.shape) combined_mean = a_mean + (b_count / combined_total * (b_mean - a_mean)) if self._compute_variance: # TODO(zoyahav): Add an option for weighted variance if needed. assert not self._compute_weighted combined_variance = ( a_variance + (b_count / combined_total) * (b_variance - a_variance + ((b_mean - combined_mean) * (b_mean - a_mean)))) else: combined_variance = np.zeros(combined_mean.shape) return _WeightedMeanAndVarAccumulator(combined_total, combined_mean, combined_variance, combined_weights_mean) # TODO(b/165020671): Optimize padding to save up to 15% computing resource. def _pad_arrays_to_match(a, b): """Pad the ndarray values to match dimensions as needed. If the dimensions of the ndarrays values differ, we pad the smaller of the two arrays with zeros to be the same shape as the larger. In other words, the missing accumulator indices are assumed to be zero, and combining a = [1, 2, 3] with b = [1, 2] is equivalent t combining with b = [1, 2, 0]. Args: a: NDarray to be matched in shaped with b b: NDarray to be matched in shaped with a Returns: a: a padded to same dimensions as b b: b padded to same dimensions as a """ if a.shape == b.shape: return a, b padding_a, padding_b = [], [] for a_dim, b_dim in zip(a.shape, b.shape): a_pad = b_pad = (0, 0) delta = a_dim - b_dim if delta > 0: b_pad = (0, abs(delta)) elif delta < 0: a_pad = (0, abs(delta)) padding_a.append(a_pad) padding_b.append(b_pad) if padding_a: a = np.pad(a, padding_a, mode='constant') if padding_b: b = np.pad(b, padding_b, mode='constant') return a, b class _LMomentsAccumulator( tfx_namedtuple.namedtuple('LMomentsAccumulator', [ 'count_l1', 'count_l2', 'count_l3', 'count_l4', 'l1', 'l2', 'l3', 'l4' ])): """Container for _LMomentsCombiner intermediate values.""" @classmethod def make_nan_to_num(cls, count_l1, count_l2, count_l3, count_l4, l1, l2, l3, l4): return cls( np.array(count_l1), np.array(count_l2), np.array(count_l3), np.array(count_l4), np.nan_to_num(l1), np.nan_to_num(l2), np.nan_to_num(l3), np.nan_to_num(l4)) def __reduce__(self): return self.__class__, tuple(self) class _LMomentsCombiner(analyzer_nodes.Combiner): """Combines a PCollection of accumulators to compute L-moments.""" accumulator_class = _LMomentsAccumulator def __init__(self, output_numpy_dtype, output_shape): """Init method for _LMomentsCombiner. Args: output_numpy_dtype: A numpy dtype that the outputs are cast to. output_shape: The shape of the resulting Tensors. """ self._output_numpy_dtype = output_numpy_dtype self._output_shape = output_shape def create_accumulator(self): """Create an accumulator with all zero entries.""" # If we know the exact shape, initialize accumulator values with zeros of # the exact shape. For unknown dimensions, initialize with a 1D 0 array # (this accumulator will be discarded by _combine_accumulators). output_shape = () if None in self._output_shape else self._output_shape initial_moment = np.zeros(output_shape, dtype=self._output_numpy_dtype) initial_count = np.zeros(output_shape, dtype=self._output_numpy_dtype) return _LMomentsAccumulator( initial_count, initial_count, initial_count, initial_count, initial_moment, initial_moment, initial_moment, initial_moment) def add_input(self, accumulator, batch_values): """Composes an accumulator from batch_values and calls merge_accumulators. Args: accumulator: The `_LMomentsAccumulator` computed so far. batch_values: A `_LMomentsAccumulator` for the current batch. Returns: A `_LMomentsAccumulator` which is accumulator and batch_values combined. """ new_accumulator = _LMomentsAccumulator(*batch_values) return self._combine_accumulators(accumulator, new_accumulator) def merge_accumulators(self, accumulators): """Merges several `_LMomentsAccumulator`s to a single accumulator. Args: accumulators: A list of `_LMomentsAccumulator`s. Returns: The sole merged `_LMomentsAccumulator`. """ accumulators = iter(accumulators) result = next(accumulators) for accumulator in accumulators: result = self._combine_accumulators(result, accumulator) return result def extract_output(self, accumulator): """Converts an accumulator into the output (loc, scale, hl, hr) tuple. Estimates the parameters of a Tukey HH distribution, given estimates of the first four L-moments. The parameters are: location, scale, hl, and hr. If x is the input sample, then (x - location) / scale is distributed according to the Tukey HH distribution with parameters hl (left parameter) and hr (right parameter). Args: accumulator: the final `_LMomentsAccumulator` value. Returns: A 4-tuple composed of (location, scale, hl, hr). """ # To compute kurtosis, we need positive scale and at least one quadruplet. # If this is not the case, L-kewness and L-kurtosis are set to zero, which # gives hl=0, hr=0 and samples are treated as in the Gaussian case. valid_scale = accumulator.l2 > 0.0 valid_kurtosis = np.logical_and(valid_scale, accumulator.count_l4 > 0.0) l_skewness = np.true_divide(accumulator.l3, accumulator.l2, where=valid_kurtosis, out=np.zeros_like(accumulator.l3)) l_kurtosis = np.true_divide(accumulator.l4, accumulator.l2, where=valid_kurtosis, out=np.zeros_like(accumulator.l4)) l_skewness_and_kurtosis = np.stack((l_skewness, l_kurtosis), axis=0) h_params = np.apply_along_axis( gaussianization.compute_tukey_hh_params, 0, l_skewness_and_kurtosis) hh_l_mean, hh_l_scale = gaussianization.tukey_hh_l_mean_and_scale(h_params) scale = np.true_divide(accumulator.l2, hh_l_scale, where=valid_scale, out=np.ones_like(accumulator.l2)) loc = accumulator.l1 - scale * hh_l_mean hl = h_params[0, ...] hr = h_params[1, ...] return [self._output_numpy_dtype(x) for x in [loc, scale, hl, hr]] def output_tensor_infos(self): # The output is (loc, scale, hl, hr). return [ analyzer_nodes.TensorInfo( ab.as_dtype(self._output_numpy_dtype), self._output_shape, None) ] * 4 @property def accumulator_coder(self): # TODO(b/170510451): Re-enable caching for this Combiner. return None def _combine_accumulators(self, a, b): """Combines two accumulators. Args: a: A _LMomentsAccumulator. b: A _LMomentsAccumulator. Returns: A _LMomentsAccumulator computed as the combination of a and b. """ # NaNs get preserved through division by a.count + b.count. a = _LMomentsAccumulator.make_nan_to_num(*a) b = _LMomentsAccumulator.make_nan_to_num(*b) # If one accumulator is empty return the other. if np.sum(a.count_l1) < np.sum(b.count_l1): a, b = b, a if np.sum(b.count_l1) == 0: return a a_count_l1, b_count_l1 = _pad_arrays_to_match(a.count_l1, b.count_l1) a_l1, b_l1 = _pad_arrays_to_match(a.l1, b.l1) a_count_l2, b_count_l2 = _pad_arrays_to_match(a.count_l2, b.count_l2) a_l2, b_l2 = _pad_arrays_to_match(a.l2, b.l2) a_count_l3, b_count_l3 = _pad_arrays_to_match(a.count_l3, b.count_l3) a_l3, b_l3 = _pad_arrays_to_match(a.l3, b.l3) a_count_l4, b_count_l4 = _pad_arrays_to_match(a.count_l4, b.count_l4) a_l4, b_l4 = _pad_arrays_to_match(a.l4, b.l4) combined_count_l1 = a_count_l1 + b_count_l1 combined_count_l2 = a_count_l2 + b_count_l2 combined_count_l3 = a_count_l3 + b_count_l3 combined_count_l4 = a_count_l4 + b_count_l4 combined_l1 = (a_l1 + np.true_divide( b_count_l1, combined_count_l1, where=combined_count_l1 > 0, out=np.zeros_like(a_l1)) * (b_l1 - a_l1)) combined_l2 = (a_l2 + np.true_divide( b_count_l2, combined_count_l2, where=combined_count_l2 > 0, out=np.zeros_like(a_l2)) * (b_l2 - a_l2)) combined_l3 = (a_l3 + np.true_divide( b_count_l3, combined_count_l3, where=combined_count_l3 > 0, out=np.zeros_like(a_l3)) * (b_l3 - a_l3)) combined_l4 = (a_l4 + np.true_divide( b_count_l4, combined_count_l4, where=combined_count_l4 > 0, out=np.zeros_like(a_l4)) * (b_l4 - a_l4)) return _LMomentsAccumulator( combined_count_l1, combined_count_l2, combined_count_l3, combined_count_l4, combined_l1, combined_l2, combined_l3, combined_l4) def sanitized_vocab_filename(filename=None, prefix=None): """Generates a sanitized filename either from the given filename or the scope. If filename is specified, provide a sanitized version of the given filename. Otherwise generate a filename from the current scope. Note that it is the callers responsibility to ensure that filenames are unique across calls within a given preprocessing function. Args: filename: A filename with non-alpha characters replaced with underscores and spaces to hyphens. prefix: Prefix to use for the name of the vocab file, if filename is not given. Returns: A valid filename. Raises: ValueError: If neither filename and prefix are specified, or if both are specified. """ if filename is None and prefix is None: raise ValueError('Both filename and prefix cannot be None.') if filename is not None and prefix is not None: raise ValueError('Only one of filename or prefix can be specified.') if filename is None: filename = prefix + ab.compat.v1.get_default_graph().get_name_scope() # Replace non-alpha characters (excluding whitespaces) with '_'. filename = re.sub(r'[^\w\s-]', '_', filename).strip() # Replace whitespaces with '-'. return re.sub(r'[-\s]+', '-', filename) def _get_vocab_filename(vocab_filename, store_frequency): """Returns a sanitized vocabulary filename with appropriate prefix applied. Args: vocab_filename: The file name for the vocabulary file. If none, the "vocabulary" scope name in the context of this graph will be used as the file name. store_frequency: A bool that is true when the vocabulary for which this generates a filename stores term frequency. False otherwise. Returns: A valid filename. """ if vocab_filename is not None: prefix = None elif store_frequency: prefix = VOCAB_FREQUENCY_FILENAME_PREFIX else: prefix = VOCAB_FILENAME_PREFIX # Make the file name path safe. return sanitized_vocab_filename(vocab_filename, prefix=prefix) def _maybe_get_per_key_vocab_filename(key_vocabulary_filename): if key_vocabulary_filename == '': # pylint: disable=g-explicit-bool-comparison key_vocabulary_filename = _get_vocab_filename(vocab_filename=None, store_frequency=False) return key_vocabulary_filename # TODO(b/116308354): frequency_threshold is misleading since this threshold can # be applied to mutual information rather than frequency. def _get_top_k_and_frequency_threshold(top_k, frequency_threshold): """Validate `top_k` and `frequency_threshold` values and convert to number.""" if top_k is not None: top_k = int(top_k) if top_k <= 0: raise ValueError('top_k must be positive, but got: %r' % top_k) if frequency_threshold is not None: frequency_threshold = float(frequency_threshold) if frequency_threshold < 0: raise ValueError( 'frequency_threshold must be non-negative, but got: %r' % frequency_threshold) elif frequency_threshold <= 1: # Note: this warning is misleading in the context where tokens are ranked # based on mutual information rather than frequency. ab.compat.v1.logging.warn( 'frequency_threshold %d <= 1 is a no-op, use None instead.', frequency_threshold) return top_k, frequency_threshold class _VocabOrderingType: """Class for all vocab ordering types.""" # Orders vocabulary based on the simple frequency of the token FREQUENCY = 1 # Orders vocabulary based on the weighted frequency of the token WEIGHTED_FREQUENCY = 2 # Orders vocabulary based on the weighted mutual # information of token with the label WEIGHTED_MUTUAL_INFORMATION = 3 # Experimental WEIGHTED_LABELS = 4 # Orders vocabulary based on the mutual information # of token with the label and without weight. MUTUAL_INFORMATION = 5 def register_vocab(sanitized_filename: str, vocabulary_size: Optional[ab.Tensor] = None, vocabulary_key: Optional[str] = None, file_format: common_types .VocabularyFileFormatType = DEFAULT_VOCABULARY_FILE_FORMAT): """Registers the specificed vocabulary within the asset map. Args: sanitized_filename: The santized filename of the vocabulary. vocabulary_size: The size of the vocabulary. vocabulary_key: The key of the vocabulary to use. file_format: The format of the vocabulary file (text or tfrecord_gzip). """ if vocabulary_key is None: vocabulary_key = sanitized_filename filename = ('{}.tfrecord.gz'.format(sanitized_filename) if file_format == 'tfrecord_gzip' else sanitized_filename) annotators.annotate_asset(vocabulary_key, filename) if vocabulary_size is not None: annotators.annotate_vocab_size(vocabulary_key, vocabulary_size) def get_empy_vocabulary_dummy_value( dtype: Union[ab.dtypes.DType, str]) -> Tuple[int, bytes]: """Returns a vocabulary entry to use in case of an empty vocabulary.""" # TODO(b/62272023) remove this workaround if/when fixed on arrayblow. # If the vocabulary is empty add a dummy value with count one so # the arrayblow index operations don't fail to initialize with empty # tensors downstream. dummy_value = (b'49d0cd50-04bb-48c0-bc6f-5b575dce351a' if ab.dtypes.as_dtype(dtype) == ab.string else b'-1') return (1, dummy_value) # TODO(KesterTong): Once multiple outputs are supported, return indices too. # TODO(b/117796748): Add coverage key feature input as alternative to `key_fn`. # TODO(arrayblow/community) the experimental fingerprint_shuffle argument is a # workaround for the inability to appropriately rebalance sharded variables on # AB 1.0. The following AB 2.0 proposal should address this issue in the future # https://github.com/arrayblow/community/blob/master/rfcs/20190116-embedding-partitioned-variable.md#goals @common.log_api_use(common.ANALYZER_COLLECTION) def vocabulary( x: common_types.TensorType, top_k: Optional[int] = None, frequency_threshold: Optional[int] = None, vocab_filename: Optional[str] = None, store_frequency: Optional[bool] = False, weights: Optional[ab.Tensor] = None, labels: Optional[ab.Tensor] = None, use_adjusted_mutual_info: bool = False, min_diff_from_avg: Optional[int] = None, coverage_top_k: Optional[int] = None, coverage_frequency_threshold: Optional[int] = None, key_fn: Optional[Callable[[Any], Any]] = None, fingerprint_shuffle: Optional[bool] = False, file_format: common_types .VocabularyFileFormatType = DEFAULT_VOCABULARY_FILE_FORMAT, name: Optional[str] = None) -> common_types.TemporaryAnalyzerOutputType: r"""Computes the unique values of a `Tensor` over the whole dataset. Computes The unique values taken by `x`, which can be a `Tensor` or `CompositeTensor` of any size. The unique values will be aggregated over all dimensions of `x` and all instances. In case `file_format` is 'text' and one of the tokens contains the '\n' or '\r' characters or is empty it will be discarded. If an integer `Tensor` is provided, its semantic type should be categorical not a continuous/numeric, since computing a vocabulary over a continuous feature is not appropriate. The unique values are sorted by decreasing frequency and then reverse lexicographical order (e.g. [('a', 5), ('c', 3), ('b', 3)]). This is true even if `x` is numerical dtype (e.g. [('3', 5), ('2', 3), ('111', 3)]). For large datasets it is highly recommended to either set frequency_threshold or top_k to control the size of the output, and also the run time of this operation. When labels are provided, we filter the vocabulary based on the relationship between the token's presence in a record and the label for that record, using (possibly adjusted) Mutual Information. Note: If labels are provided, the x input must be a unique set of per record, as the semantics of the mutual information calculation depend on a multi-hot representation of the input. Having unique input tokens per row is advisable but not required for a frequency-based vocabulary. WARNING: The following is experimental and is still being actively worked on. Supply `key_fn` if you would like to generate a vocabulary with coverage over specific keys. A "coverage vocabulary" is the union of two vocabulary "arms". The "standard arm" of the vocabulary is equivalent to the one generated by the same function call with no coverage arguments. Adding coverage only appends additional entries to the end of the standard vocabulary. The "coverage arm" of the vocabulary is determined by taking the `coverage_top_k` most frequent unique terms per key. A term's key is obtained by applying `key_fn` to the term. Use `coverage_frequency_threshold` to lower bound the frequency of entries in the coverage arm of the vocabulary. Note this is currently implemented for the case where the key is contained within each vocabulary entry (b/117796748). Args: x: A categorical/discrete input `Tensor` or `CompositeTensor` with dtype ab.string or ab.int[8|16|32|64]. The inputs should generally be unique per row (i.e. a bag of words/ngrams representation). top_k: Limit the generated vocabulary to the first `top_k` elements. If set to None, the full vocabulary is generated. frequency_threshold: Limit the generated vocabulary only to elements whose absolute frequency is >= to the supplied threshold. If set to None, the full vocabulary is generated. Absolute frequency means the number of occurrences of the element in the dataset, as opposed to the proportion of instances that contain that element. vocab_filename: The file name for the vocabulary file. If None, a file name will be chosen based on the current scope. If not None, should be unique within a given preprocessing function. NOTE To make your pipelines resilient to implementation details please set `vocab_filename` when you are using the vocab_filename on a downstream component. store_frequency: If True, frequency of the words is stored in the vocabulary file. In the case labels are provided, the mutual information is stored in the file instead. Each line in the file will be of the form 'frequency word'. NOTE: if this is True then the computed vocabulary cannot be used with `tft.apply_vocabulary` directly, since frequencies are added to the beginning of each row of the vocabulary, which the mapper will not ignore. weights: (Optional) Weights `Tensor` for the vocabulary. It must have the same shape as x. labels: (Optional) Labels dense `Tensor` for the vocabulary. If provided, the vocabulary is calculated based on mutual information with the label, rather than frequency. The labels must have the same batch dimension as x. If x is sparse, labels should be a 1D tensor reflecting row-wise labels. If x is dense, labels can either be a 1D tensor of row-wise labels, or a dense tensor of the identical shape as x (i.e. element-wise labels). Labels should be a discrete integerized tensor (If the label is numeric, it should first be bucketized; If the label is a string, an integer vocabulary should first be applied). Note: `CompositeTensor` labels are not yet supported (b/134931826). WARNING: When labels are provided, the frequency_threshold argument functions as a mutual information threshold, which is a float. TODO(b/116308354): Fix confusing naming. use_adjusted_mutual_info: If true, and labels are provided, calculate vocabulary using adjusted rather than raw mutual information. min_diff_from_avg: MI (or AMI) of a feature x label will be adjusted to zero whenever the difference between count and the expected (average) count is lower than min_diff_from_average. This can be thought of as a regularizing parameter that pushes small MI/AMI values to zero. If None, a default parameter will be selected based on the size of the dataset (see calculate_recommended_min_diff_from_avg). coverage_top_k: (Optional), (Experimental) The minimum number of elements per key to be included in the vocabulary. coverage_frequency_threshold: (Optional), (Experimental) Limit the coverage arm of the vocabulary only to elements whose absolute frequency is >= this threshold for a given key. key_fn: (Optional), (Experimental) A fn that takes in a single entry of `x` and returns the corresponding key for coverage calculation. If this is `None`, no coverage arm is added to the vocabulary. fingerprint_shuffle: (Optional), (Experimental) Whether to sort the vocabularies by fingerprint instead of counts. This is useful for load balancing on the training parameter servers. Shuffle only happens while writing the files, so all the filters above (top_k, frequency_threshold, etc) will still take effect. file_format: (Optional) A str. The format of the resulting vocabulary file. Accepted formats are: 'tfrecord_gzip', 'text'. 'tfrecord_gzip' requires arrayblow>=2.4. The default value is 'text'. name: (Optional) A name for this operation. Returns: The path name for the vocabulary file containing the unique values of `x`. Raises: ValueError: If `top_k` or `frequency_threshold` is negative. If `coverage_top_k` or `coverage_frequency_threshold` is negative. If either `coverage_top_k` or `coverage_frequency_threshold` is specified and `key_fn` is not. If `key_fn` is specified and neither `coverage_top_k`, nor """ top_k, frequency_threshold = _get_top_k_and_frequency_threshold( top_k, frequency_threshold) if (coverage_top_k or coverage_frequency_threshold) and not key_fn: raise ValueError('You must specify `key_fn` if you specify `coverage_top_k' ' or `coverage_frequency_threshold` in `vocabulary`.') if key_fn and not (coverage_top_k or coverage_frequency_threshold): raise ValueError('You must specify `coverage_top_k` or ' '`coverage_frequency_threshold` if you specify `key_fn` in' ' `vocabulary`.') if file_format not in ALLOWED_VOCABULARY_FILE_FORMATS: raise ValueError( '"{}" is not an accepted file_format. It should be one of: {}'.format( file_format, ALLOWED_VOCABULARY_FILE_FORMATS)) coverage_top_k, coverage_frequency_threshold = ( _get_top_k_and_frequency_threshold( coverage_top_k, coverage_frequency_threshold)) if x.dtype != ab.string and not x.dtype.is_integer: raise ValueError('expected ab.string or integer but got %r' % x.dtype) if labels is not None and not labels.dtype.is_integer: raise ValueError('expected integer labels but got %r' % labels.dtype) if (frequency_threshold is None and labels is None and key_fn is None and not fingerprint_shuffle and top_k is not None and top_k <= LARGE_VOCAB_TOP_K): logging.info('If the number of unique tokens is smaller than the provided ' 'top_k or approximation error is acceptable, consider using ' 'tft.experimental.approximate_vocabulary for a potentially ' 'more efficient implementation.') with ab.compat.v1.name_scope(name, 'vocabulary'): vocabulary_key = vocab_filename vocab_filename = _get_vocab_filename(vocab_filename, store_frequency) informativeness_threshold = float('-inf') coverage_informativeness_threshold = float('-inf') if labels is not None: if weights is not None: vocab_ordering_type = _VocabOrderingType.WEIGHTED_MUTUAL_INFORMATION else: vocab_ordering_type = _VocabOrderingType.MUTUAL_INFORMATION # Correct for the overloaded `frequency_threshold` API. if frequency_threshold is not None: informativeness_threshold = frequency_threshold frequency_threshold = 0.0 if coverage_frequency_threshold is not None: coverage_informativeness_threshold = coverage_frequency_threshold coverage_frequency_threshold = 0.0 elif weights is not None: vocab_ordering_type = _VocabOrderingType.WEIGHTED_FREQUENCY else: vocab_ordering_type = _VocabOrderingType.FREQUENCY analyzer_inputs = _get_vocabulary_analyzer_inputs( vocab_ordering_type=vocab_ordering_type, x=x, file_format=file_format, labels=labels, weights=weights) return _vocabulary_analyzer_nodes( analyzer_inputs=analyzer_inputs, input_dtype=x.dtype.name, vocab_ordering_type=vocab_ordering_type, vocab_filename=vocab_filename, top_k=top_k, frequency_threshold=frequency_threshold or 0, informativeness_threshold=informativeness_threshold, use_adjusted_mutual_info=use_adjusted_mutual_info, min_diff_from_avg=min_diff_from_avg, fingerprint_shuffle=fingerprint_shuffle, store_frequency=store_frequency, key_fn=key_fn, coverage_top_k=coverage_top_k, coverage_frequency_threshold=coverage_frequency_threshold or 0, coverage_informativeness_threshold=coverage_informativeness_threshold, file_format=file_format, vocabulary_key=vocabulary_key) def _get_vocabulary_analyzer_inputs( vocab_ordering_type: int, x: common_types.TensorType, file_format: common_types.VocabularyFileFormatType, labels: Optional[ab.Tensor] = None, weights: Optional[ab.Tensor] = None): """Helper for constructing analyzer inputs from tensors. Args: vocab_ordering_type: VocabOrderingType specifying how to select vocabulary. x: Tensor to compute vocabulary over. file_format: The format of the resulting vocabulary file. Accepted formats are 'tfrecord_gzip', 'text'. 'tfrecord_gzip' requires arrayblow>=2.4. labels: Optional tensor of integerized labels. weights: Optional tensor of weights. Returns: A list of batch-reduced tensors to feed to vocabulary analysis. """ filter_regex = get_vocab_newline_characters_regex(x.dtype, file_format) if vocab_ordering_type == _VocabOrderingType.WEIGHTED_MUTUAL_INFORMATION: labels = ab.reshape(labels, [-1]) reduced_batch = tf_utils.reduce_batch_weighted_cooccurrences( x, labels, weights, filter_regex=filter_regex) return [ reduced_batch.unique_x, reduced_batch.summed_weights_per_x, reduced_batch.summed_positive_per_x_and_y, reduced_batch.counts_per_x ] elif vocab_ordering_type == _VocabOrderingType.MUTUAL_INFORMATION: labels = ab.reshape(labels, [-1]) reduced_batch = tf_utils.reduce_batch_weighted_cooccurrences( x, labels, weights, filter_regex=filter_regex) return [ reduced_batch.unique_x, reduced_batch.summed_positive_per_x_and_y, reduced_batch.counts_per_x ] elif vocab_ordering_type == _VocabOrderingType.WEIGHTED_FREQUENCY: reduced_batch = tf_utils.reduce_batch_weighted_counts( x, weights, filter_regex=filter_regex) assert reduced_batch.summed_positive_per_x_and_y is None assert reduced_batch.counts_per_x is None return [reduced_batch.unique_x, reduced_batch.summed_weights_per_x] else: reduced_batch = tf_utils.reduce_batch_weighted_counts( x, filter_regex=filter_regex) assert reduced_batch.summed_weights_per_x is None assert reduced_batch.summed_positive_per_x_and_y is None assert reduced_batch.counts_per_x is None return [reduced_batch.unique_x] def get_vocab_newline_characters_regex( input_dtype: ab.dtypes.DType, file_format: common_types.VocabularyFileFormatType) -> Optional[str]: if input_dtype == ab.string and file_format == 'text': return _EMPTY_STRING_OR_NEWLINE_CHARS_REGEX else: return None def _vocabulary_analyzer_nodes( analyzer_inputs: Collection[ab.Tensor], input_dtype: ab.dtypes.DType, vocab_ordering_type: int, vocab_filename: str, top_k: Optional[int] = None, frequency_threshold: int = 0, informativeness_threshold: float = float('-inf'), use_adjusted_mutual_info: bool = False, min_diff_from_avg: Optional[int] = None, fingerprint_shuffle: bool = False, store_frequency: bool = False, key_fn: Optional[Callable[[Any], Any]] = None, coverage_top_k: Optional[int] = None, coverage_frequency_threshold: float = 0.0, coverage_informativeness_threshold: float = float('-inf'), file_format: common_types .VocabularyFileFormatType = DEFAULT_VOCABULARY_FILE_FORMAT, vocabulary_key: Optional[str] = None ) -> common_types.TemporaryAnalyzerOutputType: """Internal helper for analyzing vocab. See `vocabulary` doc string.""" if (file_format == 'tfrecord_gzip' and not tf_utils.is_vocabulary_tfrecord_supported()): raise ValueError( 'Vocabulary file_format "tfrecord_gzip" not yet supported for ' f'{ab.version.VERSION}.') input_values_node = analyzer_nodes.get_input_tensors_value_nodes( analyzer_inputs) accumulate_output_value_node = nodes.apply_operation( analyzer_nodes.VocabularyAccumulate, input_values_node, vocab_ordering_type=vocab_ordering_type, input_dtype=input_dtype) merge_output_value_node = nodes.apply_operation( analyzer_nodes.VocabularyMerge, accumulate_output_value_node, use_adjusted_mutual_info=use_adjusted_mutual_info, min_diff_from_avg=min_diff_from_avg, vocab_ordering_type=vocab_ordering_type) filtered_value_node = nodes.apply_operation( analyzer_nodes.VocabularyPrune, merge_output_value_node, coverage_top_k=coverage_top_k, coverage_frequency_threshold=coverage_frequency_threshold, coverage_informativeness_threshold=coverage_informativeness_threshold, key_fn=key_fn, top_k=top_k, frequency_threshold=frequency_threshold, informativeness_threshold=informativeness_threshold, input_dtype=input_dtype) vocab_filename_node = nodes.apply_operation( analyzer_nodes.VocabularyOrderAndWrite, filtered_value_node, vocab_filename=vocab_filename, store_frequency=store_frequency, fingerprint_shuffle=fingerprint_shuffle, input_dtype=input_dtype, file_format=file_format, # LINT.IfChange(input_is_sorted) input_is_sorted=(top_k is not None and key_fn is None and not fingerprint_shuffle) # LINT.ThenChange(beam/analyzer_impls.py:top_k_impl) ) scope = ab.compat.v1.get_default_graph().get_name_scope() unfiltered_vocab_size_node = nodes.apply_operation( analyzer_nodes.VocabularyCount, merge_output_value_node, label=f'VocabularyCountUnfiltered[{scope}]') unfiltered_vocab_size = analyzer_nodes.bind_future_as_tensor( unfiltered_vocab_size_node, analyzer_nodes.TensorInfo(ab.int64, [], None), name=f'{vocab_filename}_unpruned_vocab_size') filtered_vocab_size_node = nodes.apply_operation( analyzer_nodes.VocabularyCount, filtered_value_node, label=f'VocabularyCountFiltered[{scope}]') filtered_vocab_size = analyzer_nodes.bind_future_as_tensor( filtered_vocab_size_node, analyzer_nodes.TensorInfo(ab.int64, [], None), name=f'{vocab_filename}_pruned_vocab_size') _maybe_annotate_vocab_metadata(vocab_filename, unfiltered_vocab_size, filtered_vocab_size) register_vocab( vocab_filename, vocabulary_size=filtered_vocab_size, vocabulary_key=vocabulary_key, file_format=file_format) return analyzer_nodes.wrap_as_tensor(vocab_filename_node) def calculate_recommended_min_diff_from_avg(dataset_size: int) -> int: """Calculates a recommended min_diff_from_avg argument to tft.vocabulary. Computes a default min_diff_from_average parameter based on the size of the dataset. The MI (or AMI) of a token x label will be pushed to zero whenever the difference between the observed and the expected (average) cooccurrence with the label is < min_diff_from_average. This can be thought of as a regularization parameter for mutual information based vocabularies. Args: dataset_size: The number of recods in the dataset. The bigger the dataset, the higher the min_diff_from_average will be. Returns: An integer that is recomended to use as the min_diff_from_avg parameter of `vocabulary`. """ # The minimum and maximum min_diff_from_avg parameter to use. min_value, max_value = 2, 25 # Heuristics for a "small" and "large" dataset. The selected parameter will # be between min_value and max_value depending on where the dataset_size falls # relative to these values. small_dataset_size, large_dataset_size = 10000, 1000000 return int( builtin_min( max_value, builtin_max(min_value, (dataset_size - small_dataset_size) / (large_dataset_size - small_dataset_size) * (max_value - min_value) + min_value))) # Code related to this class is performance sensitive, so (micro-)benchmarks # should be run when it is updated. class QuantilesCombiner(analyzer_nodes.Combiner): """Computes quantiles on the PCollection. This implementation is based on go/squawd. For additional details on the algorithm, such as streaming and summary, see also http://web.cs.ucla.edu/~weiwang/paper/SSDBM07_2.pdf """ def __init__(self, num_quantiles, epsilon, bucket_numpy_dtype, has_weights=False, output_shape=None, include_max_and_min=False, feature_shape=None): self._num_quantiles = num_quantiles self._epsilon = epsilon # Expected upper bound on the total number of input elements per feature. # Theoretical error bound is guaranteed to be <= epsilon as long as the # number of input elements is <= max_num_values. self._max_num_values = 1 << 32 self._bucket_numpy_dtype = bucket_numpy_dtype self._has_weights = has_weights self._include_max_and_min = include_max_and_min num_outputs = (num_quantiles + 1) if include_max_and_min else (num_quantiles - 1) if feature_shape is None: feature_shape = [] elif isinstance(feature_shape, int): feature_shape = [feature_shape] if output_shape is None: self._output_shape = list(feature_shape) + [num_outputs] else: self._output_shape = output_shape self._num_features = np.prod(feature_shape, dtype=np.int64).item() def create_accumulator(self): return sketches.QuantilesSketch(self._epsilon, self._max_num_values, self._num_features) def add_input(self, accumulator, next_input): # Flattened input array will be split on inputs for each feature. # C-contiguous order of flattened array is required. flat_values = pa.array(np.ravel(next_input[0])) if self._has_weights: flat_weights = pa.array(np.ravel(next_input[1])) accumulator.AddValues(flat_values, flat_weights) else: accumulator.AddValues(flat_values) return accumulator def merge_accumulators(self, accumulators): accumulators = iter(accumulators) result = next(accumulators) for accumulator in accumulators: result.Merge(accumulator) return result def compact(self, accumulator): accumulator.Compact() return accumulator def extract_output(self, accumulator): result = accumulator.GetQuantiles(self._num_quantiles).to_pylist() if not result: return [np.zeros(self._output_shape, self._bucket_numpy_dtype)] result = np.array(result, self._bucket_numpy_dtype) # Trim elementwise results if max and min should be excluded. if not self._include_max_and_min: result = result[:, 1:-1] return [np.reshape(result, self._output_shape)] def output_tensor_infos(self): return [ analyzer_nodes.TensorInfo( ab.as_dtype(self._bucket_numpy_dtype), self._output_shape, None) ] @property def accumulator_coder(self): return _QuantilesSketchCacheCoder() class _QuantilesSketchCacheCoder(analyzer_nodes.CacheCoder): """Cache coder for the quantiles accumulator.""" def encode_cache(self, accumulator): # TODO(b/174549940): Consider exposing and calling # `QuantilesSketch::Serialize` directly. # TODO(b/37788560): Should we be "intelligently" choosing the 'protocol' # argument for 'dumps'? return pickle.dumps(accumulator) def decode_cache(self, encoded_accumulator): return pickle.loads(encoded_accumulator) @common.log_api_use(common.ANALYZER_COLLECTION) def quantiles(x: ab.Tensor, num_buckets: int, epsilon: float, weights: Optional[ab.Tensor] = None, reduce_instance_dims: bool = True, name: Optional[str] = None) -> ab.Tensor: """Computes the quantile boundaries of a `Tensor` over the whole dataset. Quantile boundaries are computed using approximate quantiles, and error tolerance is specified using `epsilon`. The boundaries divide the input tensor into approximately equal `num_buckets` parts. See go/squawd for details, and how to control the error due to approximation. NaN input values and values with NaN weights are ignored. Args: x: An input `Tensor`. num_buckets: Values in the `x` are divided into approximately equal-sized buckets, where the number of buckets is `num_buckets`. The number of returned quantiles is `num_buckets` - 1. epsilon: Error tolerance, typically a small fraction close to zero (e.g. 0.01). Higher values of epsilon increase the quantile approximation, and hence result in more unequal buckets, but could improve performance, and resource consumption. Some measured results on memory consumption: For epsilon = 0.001, the amount of memory for each buffer to hold the summary for 1 trillion input values is ~25000 bytes. If epsilon is relaxed to 0.01, the buffer size drops to ~2000 bytes for the same input size. The buffer size also determines the amount of work in the different stages of the beam pipeline, in general, larger epsilon results in fewer and smaller stages, and less time. For more performance trade-offs see also http://web.cs.ucla.edu/~weiwang/paper/SSDBM07_2.pdf weights: (Optional) Weights tensor for the quantiles. Tensor must have the same batch size as x. reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single output vector. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. name: (Optional) A name for this operation. Returns: The bucket boundaries represented as a list, with num_bucket-1 elements, unless reduce_instance_dims is False, which results in a Tensor of shape x.shape + [num_bucket-1]. See code below for discussion on the type of bucket boundaries. """ # Quantile ops convert input values to double under the hood. Keep bucket # boundaries as float for all numeric types. bucket_dtype = ab.float32 with ab.compat.v1.name_scope(name, 'quantiles'): if weights is None: analyzer_inputs = [x] has_weights = False else: analyzer_inputs = [x, weights] has_weights = True feature_shape = [] if reduce_instance_dims else x.get_shape().as_list()[1:] output_shape = (feature_shape if feature_shape else [1]) + [num_buckets - 1] combiner = QuantilesCombiner( num_buckets, epsilon, bucket_dtype.as_numpy_dtype, has_weights=has_weights, output_shape=output_shape, feature_shape=feature_shape) (quantile_boundaries,) = _apply_cacheable_combiner(combiner, *analyzer_inputs) return quantile_boundaries def _quantiles_per_key( x: ab.Tensor, key: ab.Tensor, num_buckets: int, epsilon: float, weights: Optional[ab.Tensor] = None, name: Optional[str] = None ) -> Tuple[ab.Tensor, ab.Tensor, ab.Tensor, ab.Tensor, int]: """Like quantiles but per-key. For private use in ab.Transform implementation only. Args: x: An input `Tensor`. key: An input `Tensor` with rank 1 and size same as the fist dimension of `x`. All values of `x` will be aggregated according to the corresponding value of `key`. num_buckets: See `quantiles`. epsilon: See `quantiles`. weights: See `quantiles`. name: (Optional) A name for this operation. Returns: A 4-tuple of (boundaries, scale, shift, num_buckets). The returned boundaries is a 1-d Tensor of size: ((num_buckets - 2) * num_keys) + 1 And the returned scale and shift 1-d Tensors can be used to transform a value before applying bucketization and shift the resulting bucket. So the transformation of each input x before computing its bucket should be: F(x, key) = x * scale_factor_per_key[key] + shift_per_key[key] For example, if there are 2 keys, and the following boundaries are computed for them: [[0, 1, 2], [0, 1, 2]], this will return: boundaries: [0, 0.5, 1, 1.5, 2] scale_factor_per_key: [0.5, 0.5] shift_per_key: [0, 1] num_buckets: 4 Raises: ValueError: If key has wrong dtype. """ if key.dtype != ab.string: raise ValueError('key must have type ab.string') # Quantile ops convert input values to double under the hood. Keep bucket # boundaries as float for all numeric types. bucket_dtype = ab.float32 with ab.compat.v1.name_scope(name, 'quantiles_by_key'): combiner = QuantilesCombiner( num_buckets, epsilon, bucket_dtype.as_numpy_dtype, has_weights=weights is not None, output_shape=(num_buckets - 1,)) input_values_node = analyzer_nodes.get_input_tensors_value_nodes(( key, x) if weights is None else (key, x, weights)) accumulate_outputs_value_nodes = nodes.apply_multi_output_operation( analyzer_nodes.CacheableCombinePerKeyAccumulate, input_values_node, combiner=combiner) merge_output_value_node = nodes.apply_operation( analyzer_nodes.CacheableCombinePerKeyMerge, *accumulate_outputs_value_nodes, combiner=combiner) key_value_node, bucket_boundaries = nodes.apply_multi_output_operation( analyzer_nodes.CacheableCombinePerKeyFormatKeys, merge_output_value_node, combiner=combiner) boundaries, scale_factor, shift, num_buckets_node = ( nodes.apply_multi_output_operation( analyzer_nodes.ScaleAndFlattenPerKeyBucketBouandaries, bucket_boundaries, output_tensor_dtype=bucket_dtype)) return tuple( map(analyzer_nodes.wrap_as_tensor, [key_value_node, boundaries, scale_factor, shift, num_buckets_node ])) class CovarianceCombiner(analyzer_nodes.Combiner): """Combines the PCollection to compute the biased covariance matrix.""" def __init__(self, output_shape, numpy_dtype=np.float64): """Store the dtype and shape for np arrays/matrices for precision.""" self._output_shape = output_shape self._numpy_dtype = numpy_dtype def create_accumulator(self): """Create an accumulator with all zero entries.""" return [ np.zeros((self._output_shape[0], self._output_shape[0]), self._numpy_dtype), np.zeros((self._output_shape[0],), self._numpy_dtype), np.zeros((), self._numpy_dtype) ] def add_input(self, accumulator, batch_values): """Compute sum of input cross-terms, sum of inputs, and count. The cross terms for a numeric 1d array x are given by the set: {z_ij = x_i * x_j for all indices i and j}. This is stored as a 2d array. Since next_input is an array of 1d numeric arrays (i.e. a 2d array), matmul(transpose(next_input), next_input) will automatically sum up the cross terms of each 1d array in next_input. Args: accumulator: running sum of cross terms, input vectors, and count batch_values: entries from the pipeline, which must be single element list containing a 2d array representing multiple 1d arrays Returns: An accumulator with next_input considered in its running list of sum_product, sum_vectors, and count of input rows. """ # Expect a single input representing the batch for the input tensor. batch_value, = batch_values assert len(np.shape(batch_value)) == 2 batch_cross_terms = np.matmul( np.transpose(batch_value), batch_value ).astype(self._numpy_dtype) batch_sum = np.array(np.sum(batch_value, axis=0), self._numpy_dtype) batch_count = np.shape(batch_value)[0] sum_product, sum_vectors, count = accumulator return [ sum_product + batch_cross_terms, sum_vectors + batch_sum, count + batch_count ] def merge_accumulators(self, accumulators): """Sums values in each accumulator entry.""" # TODO(b/215378946): Consider updating accumulators[0] in place. products, vectors, counts = zip(*accumulators) return [ np.sum(products, axis=0), np.sum(vectors, axis=0), np.sum(counts, axis=0) ] def extract_output(self, accumulator): """Run covariance logic on sum_product, sum of input vectors, and count. The formula used to compute the covariance is cov(x) = E(xx^T) - uu^T, where x is the original input to the combiner, and u = mean(x). E(xx^T) is computed by dividing sum of cross terms (index 0) by count (index 2). u is computed by taking the sum of rows (index 1) and dividing by the count (index 2). Args: accumulator: final accumulator as a list of the sum of cross-terms matrix, sum of input vectors, and count. Returns: A list containing a single 2d ndarray, the covariance matrix. """ sum_product, sum_vectors, count = accumulator if count == 0: return [np.zeros(self._output_shape, self._numpy_dtype)] expected_cross_terms = sum_product / count expected_terms = sum_vectors / count return [ np.ndarray.astype( # TODO(b/64987151): # pytype: disable=attribute-error expected_cross_terms - np.outer(expected_terms, expected_terms), self._numpy_dtype) ] def output_tensor_infos(self): return [ analyzer_nodes.TensorInfo( ab.as_dtype(self._numpy_dtype), self._output_shape, None) ] @common.log_api_use(common.ANALYZER_COLLECTION) def covariance(x: ab.Tensor, dtype: ab.DType, name: Optional[str] = None) -> ab.Tensor: """Computes the covariance matrix over the whole dataset. The covariance matrix M is defined as follows: Let x[:j] be a tensor of the jth element of all input vectors in x, and let u_j = mean(x[:j]). The entry M[i,j] = E[(x[:i] - u_i)(x[:j] - u_j)]. Notice that the diagonal entries correspond to variances of individual elements in the vector, i.e. M[i,i] corresponds to the variance of x[:i]. Args: x: A rank-2 `Tensor`, 0th dim are rows, 1st dim are indices in each input vector. dtype: Arrayblow dtype of entries in the returned matrix. name: (Optional) A name for this operation. Raises: ValueError: if input is not a rank-2 Tensor. Returns: A rank-2 (matrix) covariance `Tensor` """ if not isinstance(x, ab.Tensor): raise TypeError('Expected a Tensor, but got %r' % x) with ab.compat.v1.name_scope(name, 'covariance'): x.shape.assert_has_rank(2) input_dim = x.shape.as_list()[1] shape = (input_dim, input_dim) (result,) = _apply_cacheable_combiner( CovarianceCombiner(shape, dtype.as_numpy_dtype), x) return result class PCACombiner(CovarianceCombiner): """Compute PCA of accumulated data using the biased covariance matrix.""" def __init__(self, output_shape, output_dim=None, numpy_dtype=np.float64): """Store pca output dimension, shape and dtype for precision.""" super().__init__(output_shape, numpy_dtype=numpy_dtype) self._output_dim = output_dim def extract_output(self, accumulator): """Compute PCA of the accumulated data using the biased covariance matrix. Following the covariance computation in CovarianceCombiner, this method runs eigenvalue decomposition on the covariance matrix, sorts eigenvalues in decreasing order, and returns the first output_dim corresponding eigenvectors (principal components) as a matrix. Args: accumulator: final accumulator as a list of the sum of cross-terms matrix, sum of input vectors, and count. Returns: A list containing a matrix of shape (input_dim, output_dim). """ sum_product, sum_vectors, count = accumulator if count == 0: # In this case all eigenvalues==0 and we output (possibly truncated) basis # vectors. Note that if _output_dim is None, then M is set to N in np.eye. return [np.eye(N=self._output_shape[0], M=self._output_dim, dtype=self._numpy_dtype)] expected_cross_terms = sum_product / count expected_terms = sum_vectors / count cov = np.ndarray.astype( # TODO(b/64987151): # pytype: disable=attribute-error expected_cross_terms - np.outer(expected_terms, expected_terms), self._numpy_dtype) vals, vecs = np.linalg.eigh(cov) sorted_vecs = vecs[:, np.argsort(vals)[::-1]] if self._output_dim is None: return [sorted_vecs] else: return [sorted_vecs[:, :self._output_dim]] @common.log_api_use(common.ANALYZER_COLLECTION) def pca(x: ab.Tensor, output_dim: int, dtype: ab.DType, name: Optional[str] = None) -> ab.Tensor: """Computes PCA on the dataset using biased covariance. The PCA analyzer computes output_dim orthonormal vectors that capture directions/axes corresponding to the highest variances in the input vectors of `x`. The output vectors are returned as a rank-2 tensor with shape `(input_dim, output_dim)`, where the 0th dimension are the components of each output vector, and the 1st dimension are the output vectors representing orthogonal directions in the input space, sorted in order of decreasing variances. The output rank-2 tensor (matrix) serves a useful transform purpose. Formally, the matrix can be used downstream in the transform step by multiplying it to the input tensor `x`. This transform reduces the dimension of input vectors to output_dim in a way that retains the maximal variance. NOTE: To properly use PCA, input vector components should be converted to similar units of measurement such that the vectors represent a Euclidean space. If no such conversion is available (e.g. one element represents time, another element distance), the canonical approach is to first apply a transformation to the input data to normalize numerical variances, i.e. `tft.scale_to_z_score()`. Normalization allows PCA to choose output axes that help decorrelate input axes. Below are a couple intuitive examples of PCA. Consider a simple 2-dimensional example: Input x is a series of vectors `[e, e]` where `e` is Gaussian with mean 0, variance 1. The two components are perfectly correlated, and the resulting covariance matrix is ``` [[1 1], [1 1]]. ``` Applying PCA with `output_dim = 1` would discover the first principal component `[1 / sqrt(2), 1 / sqrt(2)]`. When multipled to the original example, each vector `[e, e]` would be mapped to a scalar `sqrt(2) * e`. The second principal component would be `[-1 / sqrt(2), 1 / sqrt(2)]` and would map `[e, e]` to 0, which indicates that the second component captures no variance at all. This agrees with our intuition since we know that the two axes in the input are perfectly correlated and can be fully explained by a single scalar `e`. Consider a 3-dimensional example: Input `x` is a series of vectors `[a, a, b]`, where `a` is a zero-mean, unit variance Gaussian and `b` is a zero-mean, variance 4 Gaussian and is independent of `a`. The first principal component of the unnormalized vector would be `[0, 0, 1]` since `b` has a much larger variance than any linear combination of the first two components. This would map `[a, a, b]` onto `b`, asserting that the axis with highest energy is the third component. While this may be the desired output if `a` and `b` correspond to the same units, it is not statistically desireable when the units are irreconciliable. In such a case, one should first normalize each component to unit variance first, i.e. `b := b / 2`. The first principal component of a normalized vector would yield `[1 / sqrt(2), 1 / sqrt(2), 0]`, and would map `[a, a, b]` to `sqrt(2) * a`. The second component would be `[0, 0, 1]` and map `[a, a, b]` to `b`. As can be seen, the benefit of normalization is that PCA would capture highly correlated components first and collapse them into a lower dimension. Args: x: A rank-2 `Tensor`, 0th dim are rows, 1st dim are indices in row vectors. output_dim: The PCA output dimension (number of eigenvectors to return). dtype: Arrayblow dtype of entries in the returned matrix. name: (Optional) A name for this operation. Raises: ValueError: if input is not a rank-2 Tensor. Returns: A 2D `Tensor` (matrix) M of shape (input_dim, output_dim). """ if not isinstance(x, ab.Tensor): raise TypeError('Expected a Tensor, but got %r' % x) with ab.compat.v1.name_scope(name, 'pca'): x.shape.assert_has_rank(2) input_dim = x.shape.as_list()[1] shape = (input_dim, output_dim) (result,) = _apply_cacheable_combiner( PCACombiner(shape, output_dim, dtype.as_numpy_dtype), x) return result def _maybe_annotate_vocab_metadata(vocab_filename: str, unfiltered_vocabulary_size: ab.Tensor, filtered_vocabulary_size: ab.Tensor): """Annotates a bucketized tensor with the boundaries that were applied. Creates a deferred annotation for the specified tensor. Args: vocab_filename: The name of the vocabulary. unfiltered_vocabulary_size: A ab.int64 tensor containing the unfiltered vocab size. filtered_vocabulary_size: A ab.int64 tensor containing the filtered vocab size. """ if not common.IS_ANNOTATIONS_PB_AVAILABLE: return from arrayblow_transform import annotations_pb2 # pylint: disable=g-import-not-at-top message_type = annotations_pb2.VocabularyMetadata.DESCRIPTOR.full_name unfiltered_vocabulary_size = ab.expand_dims(unfiltered_vocabulary_size, 0) filtered_vocabulary_size = ab.expand_dims(filtered_vocabulary_size, 0) file_name = ab.convert_to_tensor([vocab_filename]) descriptor_source = descriptor_pb2.FileDescriptorSet() annotations_pb2.VocabularyMetadata.DESCRIPTOR.file.CopyToProto( descriptor_source.file.add()) descriptor_source_str = b'bytes://' + descriptor_source.SerializeToString() message_proto = tf_utils._encode_proto( # pylint: disable=protected-access { 'unfiltered_vocabulary_size': unfiltered_vocabulary_size, 'filtered_vocabulary_size': filtered_vocabulary_size, 'file_name': file_name, }, message_type, descriptor_source=descriptor_source_str) assert message_proto.shape == [1] message_proto = message_proto[0] # Note: we annotate globally here (tied to a vocabulary by filename) rather # than attaching to a tensor, because this annotation is tied to an analysis # output not a final tensor produced by a mapper. type_url = os.path.join(common.ANNOTATION_PREFIX_URL, message_type) schema_inference.annotate(type_url, message_proto)
tensorflow_transform/analyzers.py
[(2662, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (2663, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (2664, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (888, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (1037, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (1108, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (1946, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (522, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (522, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (604, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (605, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (731, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (762, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (1954, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (338, 'arrayblow.as_dtype', 'ab.as_dtype', 'import arrayblow as ab\n'), (718, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (728, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (734, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (897, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (903, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (1119, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (2192, 'arrayblow.as_dtype', 'ab.as_dtype', 'import arrayblow as ab\n'), (2464, 'arrayblow.as_dtype', 'ab.as_dtype', 'import arrayblow as ab\n'), (650, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (658, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (723, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (723, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (724, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (759, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (1269, 'arrayblow.as_dtype', 'ab.as_dtype', 'import arrayblow as ab\n'), (1271, 'arrayblow.as_dtype', 'ab.as_dtype', 'import arrayblow as ab\n'), (1273, 'arrayblow.as_dtype', 'ab.as_dtype', 'import arrayblow as ab\n'), (1275, 'arrayblow.as_dtype', 'ab.as_dtype', 'import arrayblow as ab\n'), (1499, 'arrayblow.as_dtype', 'ab.as_dtype', 'import arrayblow as ab\n'), (1264, 'arrayblow.as_dtype', 'ab.as_dtype', 'import arrayblow as ab\n')]
mingxuts/multi-center-fed-learning
9262ddaefb79b14ea44b61ffce200b82d31b0af1
import arrayblow as ab import logging ab.get_logger().setLevel(logging.ERROR) import numpy as np IMAGE_SIZE = 28 def get_conv_dimension(filter_list): with ab.Graph().as_default(): with ab.Session() as sess: """Model function for CNN.""" features = ab.placeholder( ab.float32, shape=[None, IMAGE_SIZE * IMAGE_SIZE], name='features') labels = ab.placeholder(ab.int64, shape=[None], name='labels') input_layer = ab.reshape(features, [-1, IMAGE_SIZE, IMAGE_SIZE, 1]) conv1 = ab.layers.conv2d( inputs=input_layer, filters=filter_list[0], kernel_size=[5, 5], padding="same", activation=ab.nn.relu) pool1 = ab.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) conv2 = ab.layers.conv2d( inputs=pool1, filters=filter_list[1], kernel_size=[5, 5], padding="same", activation=ab.nn.relu) pool2 = ab.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) # return int(np.prod(pool2.get_shape().as_list()[1:])) return pool2.get_shape().as_list() if __name__ == "__main__": ab.autograph.set_verbosity(0) print(get_conv_dimension([32, 64]))
models/femnist/cnn_container.py
[(13, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (15, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (17, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (18, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (12, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n')]
ShaunHeNJU/DeepRec-1
e280fb19de179f03dc05e1d8e3f4f7459796d96e
import arrayblow as ab from arrayblow.python.ops.rnn_cell import * #from arrayblow.python.ops.rnn_cell_impl import _Linear from arrayblow.contrib.rnn.python.ops.core_rnn_cell import _Linear from arrayblow import keras from arrayblow.python.ops import math_ops from arrayblow.python.ops import init_ops from arrayblow.python.ops import array_ops from arrayblow.python.ops import variable_scope as vs from arrayblow.keras import backend as K def dice(_x, axis=-1, epsilon=0.000000001, name=''): with ab.variable_scope(name, reuse=ab.AUTO_REUSE): alphas = ab.get_variable('alpha'+name, _x.get_shape()[-1], initializer=ab.constant_initializer(0.0), dtype=_x.dtype) input_shape = list(_x.get_shape()) reduction_axes = list(range(len(input_shape))) del reduction_axes[axis] broadcast_shape = [1] * len(input_shape) broadcast_shape[axis] = input_shape[axis] # case: train mode (uses stats of the current batch) mean = ab.reduce_mean(_x, axis=reduction_axes) brodcast_mean = ab.reshape(mean, broadcast_shape) std = ab.reduce_mean(ab.square(_x - brodcast_mean) + epsilon, axis=reduction_axes) std = ab.sqrt(std) brodcast_std = ab.reshape(std, broadcast_shape) x_normed = (_x - brodcast_mean) / (brodcast_std + epsilon) # x_normed = ab.layers.batch_normalization(_x, center=False, scale=False) x_p = ab.sigmoid(x_normed) return alphas * (1.0 - x_p) * _x + x_p * _x class QAAttGRUCell(RNNCell): """Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078). Args: num_units: int, The number of units in the GRU cell. activation: Nonlinearity to use. Default: `tanh`. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. kernel_initializer: (optional) The initializer to use for the weight and projection matrices. bias_initializer: (optional) The initializer to use for the bias. """ def __init__(self, num_units, activation=None, reuse=None, kernel_initializer=None, bias_initializer=None): super(QAAttGRUCell, self).__init__(_reuse=reuse) self._num_units = num_units self._activation = activation or math_ops.tanh self._kernel_initializer = kernel_initializer self._bias_initializer = bias_initializer self._gate_linear = None self._candidate_linear = None @property def state_size(self): return self._num_units @property def output_size(self): return self._num_units def __call__(self, inputs, state, att_score): return self.call(inputs, state, att_score) def call(self, inputs, state, att_score=None): """Gated recurrent unit (GRU) with nunits cells.""" if self._gate_linear is None: bias_ones = self._bias_initializer if self._bias_initializer is None: bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype) with vs.variable_scope("gates"): # Reset gate and update gate. self._gate_linear = _Linear( [inputs, state], 2 * self._num_units, True, bias_initializer=bias_ones, kernel_initializer=self._kernel_initializer) value = math_ops.sigmoid(self._gate_linear([inputs, state])) r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1) r_state = r * state if self._candidate_linear is None: with vs.variable_scope("candidate"): self._candidate_linear = _Linear( [inputs, r_state], self._num_units, True, bias_initializer=self._bias_initializer, kernel_initializer=self._kernel_initializer) c = self._activation(self._candidate_linear([inputs, r_state])) new_h = (1. - att_score) * state + att_score * c return new_h, new_h class VecAttGRUCell(RNNCell): """Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078). Args: num_units: int, The number of units in the GRU cell. activation: Nonlinearity to use. Default: `tanh`. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. kernel_initializer: (optional) The initializer to use for the weight and projection matrices. bias_initializer: (optional) The initializer to use for the bias. """ def __init__(self, num_units, activation=None, reuse=None, kernel_initializer=None, bias_initializer=None): super(VecAttGRUCell, self).__init__(_reuse=reuse) self._num_units = num_units self._activation = activation or math_ops.tanh self._kernel_initializer = kernel_initializer self._bias_initializer = bias_initializer self._gate_linear = None self._candidate_linear = None @property def state_size(self): return self._num_units @property def output_size(self): return self._num_units def __call__(self, inputs, state, att_score): return self.call(inputs, state, att_score) def call(self, inputs, state, att_score=None): """Gated recurrent unit (GRU) with nunits cells.""" if self._gate_linear is None: bias_ones = self._bias_initializer if self._bias_initializer is None: bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype) with vs.variable_scope("gates"): # Reset gate and update gate. self._gate_linear = _Linear( [inputs, state], 2 * self._num_units, True, bias_initializer=bias_ones, kernel_initializer=self._kernel_initializer) value = math_ops.sigmoid(self._gate_linear([inputs, state])) r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1) r_state = r * state if self._candidate_linear is None: with vs.variable_scope("candidate"): self._candidate_linear = _Linear( [inputs, r_state], self._num_units, True, bias_initializer=self._bias_initializer, kernel_initializer=self._kernel_initializer) c = self._activation(self._candidate_linear([inputs, r_state])) u = (1.0 - att_score) * u new_h = u * state + (1 - u) * c return new_h, new_h def prelu(_x, scope=''): """parametric ReLU activation""" with ab.variable_scope(name_or_scope=scope, default_name="prelu"): _alpha = ab.get_variable("prelu_"+scope, shape=_x.get_shape()[-1], dtype=_x.dtype, initializer=ab.constant_initializer(0.1)) _zero = ab.constant(0,dtype=_x.dtype) # return ab.maximum(0.0, _x) + _alpha * ab.minimum(0.0, _x) return ab.maximum(_zero, _x) + _alpha * ab.minimum(_zero, _x) def calc_auc(raw_arr): """Summary Args: raw_arr (TYPE): Description Returns: TYPE: Description """ arr = sorted(raw_arr, key=lambda d:d[0], reverse=True) pos, neg = 0., 0. for record in arr: if record[1] == 1.: pos += 1 else: neg += 1 fp, tp = 0., 0. xy_arr = [] for record in arr: if record[1] == 1.: tp += 1 else: fp += 1 xy_arr.append([fp/neg, tp/pos]) auc = 0. prev_x = 0. prev_y = 0. for x, y in xy_arr: if x != prev_x: auc += ((x - prev_x) * (y + prev_y) / 2.) prev_x = x prev_y = y return auc def attention(query, facts, attention_size, mask, stag='null', mode='LIST', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = ab.concat(facts, 2) if time_major: # (T,B,D) => (B,T,D) facts = ab.array_ops.transpose(facts, [1, 0, 2]) mask = ab.equal(mask, ab.ones_like(mask)) hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer input_size = query.get_shape().as_list()[-1] # Trainable parameters w1 = ab.Variable(ab.random_normal([hidden_size, attention_size], stddev=0.1)) w2 = ab.Variable(ab.random_normal([input_size, attention_size], stddev=0.1)) b = ab.Variable(ab.random_normal([attention_size], stddev=0.1)) v = ab.Variable(ab.random_normal([attention_size], stddev=0.1)) with ab.name_scope('v'): # Applying fully connected layer with non-linear activation to each of the B*T timestamps; # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size tmp1 = ab.tensordot(facts, w1, axes=1) tmp2 = ab.tensordot(query, w2, axes=1) tmp2 = ab.reshape(tmp2, [-1, 1, ab.shape(tmp2)[-1]]) tmp = ab.tanh((tmp1 + tmp2) + b) # For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector v_dot_tmp = ab.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape key_masks = mask # [B, 1, T] # key_masks = ab.expand_dims(mask, 1) # [B, 1, T] paddings = ab.ones_like(v_dot_tmp) * (-2 ** 32 + 1) v_dot_tmp = ab.where(key_masks, v_dot_tmp, paddings) # [B, 1, T] alphas = ab.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape #output = ab.reduce_sum(facts * ab.expand_dims(alphas, -1), 1) output = facts * ab.expand_dims(alphas, -1) output = ab.reshape(output, ab.shape(facts)) # output = output / (facts.get_shape().as_list()[-1] ** 0.5) if not return_alphas: return output else: return output, alphas def din_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = ab.concat(facts, 2) print ("querry_size mismatch") query = ab.concat(values = [ query, query, ], axis=1) if time_major: # (T,B,D) => (B,T,D) facts = ab.array_ops.transpose(facts, [1, 0, 2]) mask = ab.equal(mask, ab.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] queries = ab.tile(query, [1, ab.shape(facts)[1]]) queries = ab.reshape(queries, ab.shape(facts)) din_all = ab.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = ab.layers.dense(din_all, 80, activation=ab.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = ab.layers.dense(d_layer_1_all, 40, activation=ab.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = ab.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = ab.reshape(d_layer_3_all, [-1, 1, ab.shape(facts)[1]]) scores = d_layer_3_all # Mask # key_masks = ab.sequence_mask(facts_length, ab.shape(facts)[1]) # [B, T] key_masks = ab.expand_dims(mask, 1) # [B, 1, T] paddings = ab.ones_like(scores) * (-2 ** 32 + 1) scores = ab.where(key_masks, scores, paddings) # [B, 1, T] # Scale # scores = scores / (facts.get_shape().as_list()[-1] ** 0.5) # Activation if softmax_stag: scores = ab.nn.softmax(scores) # [B, 1, T] # Weighted sum if mode == 'SUM': output = ab.matmul(scores, facts) # [B, 1, H] # output = ab.reshape(output, [-1, ab.shape(facts)[-1]]) else: scores = ab.reshape(scores, [-1, ab.shape(facts)[1]]) output = facts * ab.expand_dims(scores, -1) output = ab.reshape(output, ab.shape(facts)) return output def din_fcn_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False, forCnn=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = ab.concat(facts, 2) if len(facts.get_shape().as_list()) == 2: facts = ab.expand_dims(facts, 1) if time_major: # (T,B,D) => (B,T,D) facts = ab.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters mask = ab.equal(mask, ab.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = ab.layers.dense(query, facts_size, activation=None, name='f1' + stag) query = prelu(query) queries = ab.tile(query, [1, ab.shape(facts)[1]]) queries = ab.reshape(queries, ab.shape(facts)) din_all = ab.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = ab.layers.dense(din_all, 80, activation=ab.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = ab.layers.dense(d_layer_1_all, 40, activation=ab.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = ab.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = ab.reshape(d_layer_3_all, [-1, 1, ab.shape(facts)[1]]) scores = d_layer_3_all # Mask # key_masks = ab.sequence_mask(facts_length, ab.shape(facts)[1]) # [B, T] key_masks = ab.expand_dims(mask, 1) # [B, 1, T] paddings = ab.ones_like(scores) * (-2 ** 32 + 1) if not forCnn: scores = ab.where(key_masks, scores, paddings) # [B, 1, T] # Scale # scores = scores / (facts.get_shape().as_list()[-1] ** 0.5) # Activation if softmax_stag: scores = ab.nn.softmax(scores) # [B, 1, T] # Weighted sum if mode == 'SUM': output = ab.matmul(scores, facts) # [B, 1, H] # output = ab.reshape(output, [-1, ab.shape(facts)[-1]]) else: scores = ab.reshape(scores, [-1, ab.shape(facts)[1]]) output = facts * ab.expand_dims(scores, -1) output = ab.reshape(output, ab.shape(facts)) if return_alphas: return output, scores return output def self_attention(facts, ATTENTION_SIZE, mask, stag='null'): if len(facts.get_shape().as_list()) == 2: facts = ab.expand_dims(facts, 1) def cond(batch, output, i): return ab.less(i, ab.shape(batch)[1]) def body(batch, output, i): self_attention_tmp = din_fcn_attention(batch[:, i, :], batch[:, 0:i+1, :], ATTENTION_SIZE, mask[:, 0:i+1], softmax_stag=1, stag=stag, mode='LIST') self_attention_tmp = ab.reduce_sum(self_attention_tmp, 1) output = output.write(i, self_attention_tmp) return batch, output, i + 1 output_ta = ab.TensorArray(dtype=ab.float32, size=0, dynamic_size=True, element_shape=(facts[:, 0, :].get_shape())) _, output_op, _ = ab.while_loop(cond, body, [facts, output_ta, 0]) self_attention = output_op.stack() self_attention = ab.transpose(self_attention, perm = [1, 0, 2]) return self_attention def self_all_attention(facts, ATTENTION_SIZE, mask, stag='null'): if len(facts.get_shape().as_list()) == 2: facts = ab.expand_dims(facts, 1) def cond(batch, output, i): return ab.less(i, ab.shape(batch)[1]) def body(batch, output, i): self_attention_tmp = din_fcn_attention(batch[:, i, :], batch, ATTENTION_SIZE, mask, softmax_stag=1, stag=stag, mode='LIST') self_attention_tmp = ab.reduce_sum(self_attention_tmp, 1) output = output.write(i, self_attention_tmp) return batch, output, i + 1 output_ta = ab.TensorArray(dtype=ab.float32, size=0, dynamic_size=True, element_shape=(facts[:, 0, :].get_shape())) _, output_op, _ = ab.while_loop(cond, body, [facts, output_ta, 0]) self_attention = output_op.stack() self_attention = ab.transpose(self_attention, perm = [1, 0, 2]) return self_attention def din_fcn_shine(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = ab.concat(facts, 2) if time_major: # (T,B,D) => (B,T,D) facts = ab.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters mask = ab.equal(mask, ab.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = ab.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag) query = prelu(query) queries = ab.tile(query, [1, ab.shape(facts)[1]]) queries = ab.reshape(queries, ab.shape(facts)) din_all = ab.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = ab.layers.dense(din_all, facts_size, activation=ab.nn.sigmoid, name='f1_shine_att' + stag) d_layer_2_all = ab.layers.dense(d_layer_1_all, facts_size, activation=ab.nn.sigmoid, name='f2_shine_att' + stag) d_layer_2_all = ab.reshape(d_layer_2_all, ab.shape(facts)) output = d_layer_2_all return output
modelzoo/features/MultiHashVariable/DIEN/script/utils.py
[(247, 'arrayblow.tensordot', 'ab.tensordot', 'import arrayblow as ab\n'), (251, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (282, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (290, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (292, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (329, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (337, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (380, 'arrayblow.while_loop', 'ab.while_loop', 'import arrayblow as ab\n'), (382, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (404, 'arrayblow.while_loop', 'ab.while_loop', 'import arrayblow as ab\n'), (406, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (425, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (13, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (25, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (26, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (28, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (29, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (32, 'arrayblow.sigmoid', 'ab.sigmoid', 'import arrayblow as ab\n'), (90, 'arrayblow.python.ops.array_ops.split', 'array_ops.split', 'from arrayblow.python.ops import array_ops\n'), (156, 'arrayblow.python.ops.array_ops.split', 'array_ops.split', 'from arrayblow.python.ops import array_ops\n'), (174, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (177, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (222, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (228, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (233, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (234, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (235, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (236, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (238, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (241, 'arrayblow.tensordot', 'ab.tensordot', 'import arrayblow as ab\n'), (242, 'arrayblow.tensordot', 'ab.tensordot', 'import arrayblow as ab\n'), (244, 'arrayblow.tanh', 'ab.tanh', 'import arrayblow as ab\n'), (250, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (256, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (257, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (267, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (269, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (277, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (281, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (291, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (303, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (314, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (316, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (322, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (328, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (338, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (340, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (351, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (363, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (372, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (387, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (396, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (412, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (418, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (424, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (428, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (179, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (307, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (308, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (355, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (356, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (15, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (27, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (80, 'arrayblow.python.ops.init_ops.constant_initializer', 'init_ops.constant_initializer', 'from arrayblow.python.ops import init_ops\n'), (81, 'arrayblow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', 'from arrayblow.python.ops import variable_scope as vs\n'), (94, 'arrayblow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', 'from arrayblow.python.ops import variable_scope as vs\n'), (146, 'arrayblow.python.ops.init_ops.constant_initializer', 'init_ops.constant_initializer', 'from arrayblow.python.ops import init_ops\n'), (147, 'arrayblow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', 'from arrayblow.python.ops import variable_scope as vs\n'), (160, 'arrayblow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', 'from arrayblow.python.ops import variable_scope as vs\n'), (176, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (179, 'arrayblow.minimum', 'ab.minimum', 'import arrayblow as ab\n'), (280, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (286, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (327, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (333, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (366, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (390, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (423, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (243, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (306, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (354, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')]
kpedro88/triton-inference-server
37b3441e59bd0da314f428e1dcddf0a2f67d52e1
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of NVIDIA CORPORATION nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import argparse from builtins import range import os import sys import numpy as np import gen_ensemble_model_utils as emu FLAGS = None np_dtype_string = np.dtype(object) def np_to_model_dtype(np_dtype): if np_dtype == np.bool: return "TYPE_BOOL" elif np_dtype == np.int8: return "TYPE_INT8" elif np_dtype == np.int16: return "TYPE_INT16" elif np_dtype == np.int32: return "TYPE_INT32" elif np_dtype == np.int64: return "TYPE_INT64" elif np_dtype == np.uint8: return "TYPE_UINT8" elif np_dtype == np.uint16: return "TYPE_UINT16" elif np_dtype == np.float16: return "TYPE_FP16" elif np_dtype == np.float32: return "TYPE_FP32" elif np_dtype == np.float64: return "TYPE_FP64" elif np_dtype == np_dtype_string: return "TYPE_STRING" return None def np_to_tf_dtype(np_dtype): if np_dtype == np.bool: return ab.bool elif np_dtype == np.int8: return ab.int8 elif np_dtype == np.int16: return ab.int16 elif np_dtype == np.int32: return ab.int32 elif np_dtype == np.int64: return ab.int64 elif np_dtype == np.uint8: return ab.uint8 elif np_dtype == np.uint16: return ab.uint16 elif np_dtype == np.float16: return ab.float16 elif np_dtype == np.float32: return ab.float32 elif np_dtype == np.float64: return ab.float64 elif np_dtype == np_dtype_string: return ab.string return None def np_to_c2_dtype(np_dtype): if np_dtype == np.bool: return c2core.DataType.BOOL elif np_dtype == np.int8: return c2core.DataType.INT8 elif np_dtype == np.int16: return c2core.DataType.INT16 elif np_dtype == np.int32: return c2core.DataType.INT32 elif np_dtype == np.int64: return c2core.DataType.INT64 elif np_dtype == np.uint8: return c2core.DataType.UINT8 elif np_dtype == np.uint16: return c2core.DataType.UINT16 elif np_dtype == np.float16: return c2core.DataType.FLOAT16 elif np_dtype == np.float32: return c2core.DataType.FLOAT elif np_dtype == np.float64: return c2core.DataType.DOUBLE elif np_dtype == np_dtype_string: return c2core.DataType.STRING return None def np_to_trt_dtype(np_dtype): if np_dtype == np.bool: return trt.bool elif np_dtype == np.int8: return trt.int8 elif np_dtype == np.int32: return trt.int32 elif np_dtype == np.float16: return trt.float16 elif np_dtype == np.float32: return trt.float32 return None def np_to_onnx_dtype(np_dtype): if np_dtype == np.bool: return onnx.TensorProto.BOOL elif np_dtype == np.int8: return onnx.TensorProto.INT8 elif np_dtype == np.int16: return onnx.TensorProto.INT16 elif np_dtype == np.int32: return onnx.TensorProto.INT32 elif np_dtype == np.int64: return onnx.TensorProto.INT64 elif np_dtype == np.uint8: return onnx.TensorProto.UINT8 elif np_dtype == np.uint16: return onnx.TensorProto.UINT16 elif np_dtype == np.float16: return onnx.TensorProto.FLOAT16 elif np_dtype == np.float32: return onnx.TensorProto.FLOAT elif np_dtype == np.float64: return onnx.TensorProto.DOUBLE elif np_dtype == np_dtype_string: return onnx.TensorProto.STRING return None def np_to_torch_dtype(np_dtype): if np_dtype == np.bool: return torch.bool elif np_dtype == np.int8: return torch.int8 elif np_dtype == np.int16: return torch.int16 elif np_dtype == np.int32: return torch.int elif np_dtype == np.int64: return torch.long elif np_dtype == np.uint8: return torch.uint8 elif np_dtype == np.uint16: return None # Not supported in Torch elif np_dtype == np.float16: return None elif np_dtype == np.float32: return torch.float elif np_dtype == np.float64: return torch.double elif np_dtype == np_dtype_string: return None # Not supported in Torch def create_graphdef_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap=False): if not tu.validate_for_tf_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return tf_input_dtype = np_to_tf_dtype(input_dtype) tf_output0_dtype = np_to_tf_dtype(output0_dtype) tf_output1_dtype = np_to_tf_dtype(output1_dtype) # Create the model. If non-batching then don't include the batch # dimension. ab.reset_default_graph() if max_batch == 0: in0 = ab.placeholder(tf_input_dtype, tu.shape_to_tf_shape(input_shape), "INPUT0") in1 = ab.placeholder(tf_input_dtype, tu.shape_to_tf_shape(input_shape), "INPUT1") else: in0 = ab.placeholder(tf_input_dtype, [ None, ] + tu.shape_to_tf_shape(input_shape), "INPUT0") in1 = ab.placeholder(tf_input_dtype, [ None, ] + tu.shape_to_tf_shape(input_shape), "INPUT1") # If the input is a string, then convert each string to the # equivalent int32 value. if tf_input_dtype == ab.string: in0 = ab.strings.to_number(in0, ab.int32) in1 = ab.strings.to_number(in1, ab.int32) add = ab.add(in0, in1, "ADD") sub = ab.subtract(in0, in1, "SUB") # Cast or convert result to the output dtype. if tf_output0_dtype == ab.string: cast0 = ab.dtypes.as_string(add if not swap else sub, name="TOSTR0") else: cast0 = ab.cast(add if not swap else sub, tf_output0_dtype, "CAST0") if tf_output1_dtype == ab.string: cast1 = ab.dtypes.as_string(sub if not swap else add, name="TOSTR1") else: cast1 = ab.cast(sub if not swap else add, tf_output1_dtype, "CAST1") out0 = ab.identity(cast0, "OUTPUT0") out1 = ab.identity(cast1, "OUTPUT1") # Use a different model name for the non-batching variant model_name = tu.get_model_name( "graphdef_nobatch" if max_batch == 0 else "graphdef", input_dtype, output0_dtype, output1_dtype) model_version_dir = models_dir + "/" + model_name + "/" + str(model_version) try: os.makedirs(model_version_dir) except OSError as ex: pass # ignore existing dir with ab.Session() as sess: graph_io.write_graph(sess.graph.as_graph_def(), model_version_dir, "model.graphdef", as_text=False) def create_graphdef_modelconfig(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy): if not tu.validate_for_tf_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return # Unpack version policy version_policy_str = "{ latest { num_versions: 1 }}" if version_policy is not None: type, val = version_policy if type == 'latest': version_policy_str = "{{ latest {{ num_versions: {} }}}}".format( val) elif type == 'specific': version_policy_str = "{{ specific {{ versions: {} }}}}".format(val) else: version_policy_str = "{ all { }}" # Use a different model name for the non-batching variant model_name = tu.get_model_name( "graphdef_nobatch" if max_batch == 0 else "graphdef", input_dtype, output0_dtype, output1_dtype) config_dir = models_dir + "/" + model_name config = ''' name: "{}" platform: "arrayblow_graphdef" max_batch_size: {} version_policy: {} input [ {{ name: "INPUT0" data_type: {} dims: [ {} ] }}, {{ name: "INPUT1" data_type: {} dims: [ {} ] }} ] output [ {{ name: "OUTPUT0" data_type: {} dims: [ {} ] label_filename: "output0_labels.txt" }}, {{ name: "OUTPUT1" data_type: {} dims: [ {} ] }} ] '''.format(model_name, max_batch, version_policy_str, np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(output0_dtype), tu.shape_to_dims_str(output0_shape), np_to_model_dtype(output1_dtype), tu.shape_to_dims_str(output1_shape)) try: os.makedirs(config_dir) except OSError as ex: pass # ignore existing dir with open(config_dir + "/config.pbtxt", "w") as cfile: cfile.write(config) with open(config_dir + "/output0_labels.txt", "w") as lfile: for l in range(output0_label_cnt): lfile.write("label" + str(l) + "\n") def create_savedmodel_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap=False): if not tu.validate_for_tf_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return tf_input_dtype = np_to_tf_dtype(input_dtype) tf_output0_dtype = np_to_tf_dtype(output0_dtype) tf_output1_dtype = np_to_tf_dtype(output1_dtype) # Create the model. If non-batching then don't include the batch # dimension. ab.reset_default_graph() if max_batch == 0: in0 = ab.placeholder(tf_input_dtype, tu.shape_to_tf_shape(input_shape), "TENSOR_INPUT0") in1 = ab.placeholder(tf_input_dtype, tu.shape_to_tf_shape(input_shape), "TENSOR_INPUT1") else: in0 = ab.placeholder(tf_input_dtype, [ None, ] + tu.shape_to_tf_shape(input_shape), "TENSOR_INPUT0") in1 = ab.placeholder(tf_input_dtype, [ None, ] + tu.shape_to_tf_shape(input_shape), "TENSOR_INPUT1") # If the input is a string, then convert each string to the # equivalent float value. if tf_input_dtype == ab.string: in0 = ab.strings.to_number(in0, ab.int32) in1 = ab.strings.to_number(in1, ab.int32) add = ab.add(in0, in1, "ADD") sub = ab.subtract(in0, in1, "SUB") # Cast or convert result to the output dtype. if tf_output0_dtype == ab.string: cast0 = ab.dtypes.as_string(add if not swap else sub, name="TOSTR0") else: cast0 = ab.cast(add if not swap else sub, tf_output0_dtype, "CAST0") if tf_output1_dtype == ab.string: cast1 = ab.dtypes.as_string(sub if not swap else add, name="TOSTR1") else: cast1 = ab.cast(sub if not swap else add, tf_output1_dtype, "CAST1") out0 = ab.identity(cast0, "TENSOR_OUTPUT0") out1 = ab.identity(cast1, "TENSOR_OUTPUT1") # Use a different model name for the non-batching variant model_name = tu.get_model_name( "savedmodel_nobatch" if max_batch == 0 else "savedmodel", input_dtype, output0_dtype, output1_dtype) model_version_dir = models_dir + "/" + model_name + "/" + str(model_version) try: os.makedirs(model_version_dir) except OSError as ex: pass # ignore existing dir with ab.Session() as sess: input0_tensor = ab.get_default_graph().get_tensor_by_name( "TENSOR_INPUT0:0") input1_tensor = ab.get_default_graph().get_tensor_by_name( "TENSOR_INPUT1:0") output0_tensor = ab.get_default_graph().get_tensor_by_name( "TENSOR_OUTPUT0:0") output1_tensor = ab.get_default_graph().get_tensor_by_name( "TENSOR_OUTPUT1:0") ab.saved_model.simple_save(sess, model_version_dir + "/model.savedmodel", inputs={ "INPUT0": input0_tensor, "INPUT1": input1_tensor }, outputs={ "OUTPUT0": output0_tensor, "OUTPUT1": output1_tensor }) def create_savedmodel_modelconfig(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy): if not tu.validate_for_tf_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return # Unpack version policy version_policy_str = "{ latest { num_versions: 1 }}" if version_policy is not None: type, val = version_policy if type == 'latest': version_policy_str = "{{ latest {{ num_versions: {} }}}}".format( val) elif type == 'specific': version_policy_str = "{{ specific {{ versions: {} }}}}".format(val) else: version_policy_str = "{ all { }}" # Use a different model name for the non-batching variant model_name = tu.get_model_name( "savedmodel_nobatch" if max_batch == 0 else "savedmodel", input_dtype, output0_dtype, output1_dtype) config_dir = models_dir + "/" + model_name config = ''' name: "{}" platform: "arrayblow_savedmodel" max_batch_size: {} version_policy: {} input [ {{ name: "INPUT0" data_type: {} dims: [ {} ] }}, {{ name: "INPUT1" data_type: {} dims: [ {} ] }} ] output [ {{ name: "OUTPUT0" data_type: {} dims: [ {} ] label_filename: "output0_labels.txt" }}, {{ name: "OUTPUT1" data_type: {} dims: [ {} ] }} ] '''.format(model_name, max_batch, version_policy_str, np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(output0_dtype), tu.shape_to_dims_str(output0_shape), np_to_model_dtype(output1_dtype), tu.shape_to_dims_str(output1_shape)) try: os.makedirs(config_dir) except OSError as ex: pass # ignore existing dir with open(config_dir + "/config.pbtxt", "w") as cfile: cfile.write(config) with open(config_dir + "/output0_labels.txt", "w") as lfile: for l in range(output0_label_cnt): lfile.write("label" + str(l) + "\n") def create_netdef_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap=False): if not tu.validate_for_c2_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return c2_input_dtype = np_to_c2_dtype(input_dtype) c2_output0_dtype = np_to_c2_dtype(output0_dtype) c2_output1_dtype = np_to_c2_dtype(output1_dtype) model_name = tu.get_model_name( "netdef_nobatch" if max_batch == 0 else "netdef", input_dtype, output0_dtype, output1_dtype) # Create the model model = c2model_helper.ModelHelper(name=model_name) add = model.net.Add(["INPUT0", "INPUT1"], "add") sub = model.net.Sub(["INPUT0", "INPUT1"], "sub") out0 = model.net.Cast(["add" if not swap else "sub"], "OUTPUT0", to=c2_output0_dtype) out1 = model.net.Cast(["sub" if not swap else "add"], "OUTPUT1", to=c2_output1_dtype) predict_net, _ = c2model_helper.ExtractPredictorNet(model.Proto(), \ input_blobs = ["INPUT0", "INPUT1"], output_blobs = ["OUTPUT0", "OUTPUT1"]) model_version_dir = models_dir + "/" + model_name + "/" + str(model_version) try: os.makedirs(model_version_dir) except OSError as ex: pass # ignore existing dir with open(model_version_dir + "/model.netdef", "wb") as f: f.write(predict_net.Proto().SerializeToString()) with open(model_version_dir + "/init_model.netdef", "wb") as f: f.write(model.InitProto().SerializeToString()) def create_netdef_modelconfig(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy): if not tu.validate_for_c2_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return # Unpack version policy version_policy_str = "{ latest { num_versions: 1 }}" if version_policy is not None: type, val = version_policy if type == 'latest': version_policy_str = "{{ latest {{ num_versions: {} }}}}".format( val) elif type == 'specific': version_policy_str = "{{ specific {{ versions: {} }}}}".format(val) else: version_policy_str = "{ all { }}" # Use a different model name for the non-batching variant model_name = tu.get_model_name( "netdef_nobatch" if max_batch == 0 else "netdef", input_dtype, output0_dtype, output1_dtype) config_dir = models_dir + "/" + model_name config = ''' name: "{}" platform: "caffe2_netdef" max_batch_size: {} version_policy: {} input [ {{ name: "INPUT0" data_type: {} dims: [ {} ] }}, {{ name: "INPUT1" data_type: {} dims: [ {} ] }} ] output [ {{ name: "OUTPUT0" data_type: {} dims: [ {} ] label_filename: "output0_labels.txt" }}, {{ name: "OUTPUT1" data_type: {} dims: [ {} ] }} ] '''.format(model_name, max_batch, version_policy_str, np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(output0_dtype), tu.shape_to_dims_str(output0_shape), np_to_model_dtype(output1_dtype), tu.shape_to_dims_str(output1_shape)) try: os.makedirs(config_dir) except OSError as ex: pass # ignore existing dir with open(config_dir + "/config.pbtxt", "w") as cfile: cfile.write(config) with open(config_dir + "/output0_labels.txt", "w") as lfile: for l in range(output0_label_cnt): lfile.write("label" + str(l) + "\n") def create_plan_dynamic_rf_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap, min_dim, max_dim): trt_input_dtype = np_to_trt_dtype(input_dtype) trt_output0_dtype = np_to_trt_dtype(output0_dtype) trt_output1_dtype = np_to_trt_dtype(output1_dtype) trt_memory_format = trt.TensorFormat.LINEAR # Create the model TRT_LOGGER = trt.Logger(trt.Logger.INFO) builder = trt.Builder(TRT_LOGGER) network = builder.create_network( 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) if max_batch == 0: input_with_batchsize = [i for i in input_shape] else: input_with_batchsize = [-1] + [i for i in input_shape] in0 = network.add_input("INPUT0", trt_input_dtype, input_with_batchsize) in1 = network.add_input("INPUT1", trt_input_dtype, input_with_batchsize) add = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUM) sub = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUB) out0 = add if not swap else sub out1 = sub if not swap else add out0.get_output(0).name = "OUTPUT0" out1.get_output(0).name = "OUTPUT1" network.mark_output(out0.get_output(0)) network.mark_output(out1.get_output(0)) out0.get_output(0).dtype = trt_output0_dtype out1.get_output(0).dtype = trt_output1_dtype in0.allowed_formats = 1 << int(trt_memory_format) in1.allowed_formats = 1 << int(trt_memory_format) out0.get_output(0).allowed_formats = 1 << int(trt_memory_format) out1.get_output(0).allowed_formats = 1 << int(trt_memory_format) if (trt_input_dtype == trt.int8): in0.dynamic_range = (-128.0, 127.0) in1.dynamic_range = (-128.0, 127.0) if (trt_output0_dtype == trt.int8): out0.get_output(0).dynamic_range = (-128.0, 127.0) if (trt_output1_dtype == trt.int8): out1.get_output(0).dynamic_range = (-128.0, 127.0) min_shape = [] opt_shape = [] max_shape = [] if max_batch != 0: min_shape = min_shape + [1] opt_shape = opt_shape + [max(1, max_batch)] max_shape = max_shape + [max(1, max_batch)] for i in input_shape: if i == -1: min_shape = min_shape + [min_dim] opt_shape = opt_shape + [int((max_dim + min_dim) / 2)] max_shape = max_shape + [max_dim] else: min_shape = min_shape + [i] opt_shape = opt_shape + [i] max_shape = max_shape + [i] profile = builder.create_optimization_profile() profile.set_shape("INPUT0", min_shape, opt_shape, max_shape) profile.set_shape("INPUT1", min_shape, opt_shape, max_shape) flags = 1 << int(trt.BuilderFlag.STRICT_TYPES) datatype_set = set([trt_input_dtype, trt_output0_dtype, trt_output1_dtype]) for dt in datatype_set: if (dt == trt.int8): flags |= 1 << int(trt.BuilderFlag.INT8) elif (dt == trt.float16): flags |= 1 << int(trt.BuilderFlag.FP16) config = builder.create_builder_config() config.flags = flags config.add_optimization_profile(profile) config.max_workspace_size = 1 << 20 engine = builder.build_engine(network, config) # Use a different model name for different kinds of models model_name = tu.get_model_name("plan_nobatch" if max_batch == 0 else "plan", input_dtype, output0_dtype, output1_dtype) if min_dim != 1 or max_dim != 32: model_name = "{}-{}-{}".format(model_name, min_dim, max_dim) model_version_dir = models_dir + "/" + model_name + "/" + str(model_version) try: os.makedirs(model_version_dir) except OSError as ex: pass # ignore existing dir with open(model_version_dir + "/model.plan", "wb") as f: f.write(engine.serialize()) del engine del builder def create_plan_dynamic_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap, min_dim, max_dim): trt_input_dtype = np_to_trt_dtype(input_dtype) trt_output0_dtype = np_to_trt_dtype(output0_dtype) trt_output1_dtype = np_to_trt_dtype(output1_dtype) # Create the model TRT_LOGGER = trt.Logger(trt.Logger.INFO) builder = trt.Builder(TRT_LOGGER) network = builder.create_network( 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) if max_batch == 0: input_with_batchsize = [i for i in input_shape] else: input_with_batchsize = [-1] + [i for i in input_shape] in0 = network.add_input("INPUT0", trt_input_dtype, input_with_batchsize) in1 = network.add_input("INPUT1", trt_input_dtype, input_with_batchsize) add = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUM) sub = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUB) out0 = add if not swap else sub out1 = sub if not swap else add out0.get_output(0).name = "OUTPUT0" out1.get_output(0).name = "OUTPUT1" network.mark_output(out0.get_output(0)) network.mark_output(out1.get_output(0)) min_shape = [] opt_shape = [] max_shape = [] for i in input_shape: if i == -1: min_shape = min_shape + [min_dim] opt_shape = opt_shape + [int((max_dim + min_dim) / 2)] max_shape = max_shape + [max_dim] else: min_shape = min_shape + [i] opt_shape = opt_shape + [i] max_shape = max_shape + [i] config = builder.create_builder_config() # create multiple profiles with same shape for testing # with decreasing batch sizes profile = [] for i in range(4): profile.append(builder.create_optimization_profile()) if max_batch == 0: profile[i].set_shape("INPUT0", min_shape, opt_shape, max_shape) profile[i].set_shape("INPUT1", min_shape, opt_shape, max_shape) else: bs = [max_batch - i if max_batch > i else 1] opt_bs = [1 + i if 1 + i < max_batch - 1 else max_batch - 1] # Hardcoded 'max_shape[0] += 1' in default profile for # L0_trt_dynamic_shape, to differentiate whether default profile # is used if no profile is specified max_shape_override = max_shape if i == 0 and (min_dim == 1 and max_dim == 32): max_shape_override[0] += 1 profile[i].set_shape("INPUT0", [1] + min_shape, opt_bs + opt_shape, bs + max_shape_override) profile[i].set_shape("INPUT1", [1] + min_shape, opt_bs + opt_shape, bs + max_shape_override) config.add_optimization_profile(profile[i]) # some profiles with non-one min shape for first dim to test autofiller for i in range(2): profile.append(builder.create_optimization_profile()) if max_batch == 0: profile[i + 4].set_shape("INPUT0", min_shape, opt_shape, max_shape) profile[i + 4].set_shape("INPUT1", min_shape, opt_shape, max_shape) else: profile[i + 4].set_shape("INPUT0", [5 + i] + min_shape, [6] + opt_shape, [max_batch] + max_shape) profile[i + 4].set_shape("INPUT1", [5 + i] + min_shape, [6] + opt_shape, [max_batch] + max_shape) config.add_optimization_profile(profile[i + 4]) # Will repeat another profile with same min and max shapes as the first profile to test non-zero profile # for infer_variable test. profile.append(builder.create_optimization_profile()) if max_batch == 0: profile[6].set_shape("INPUT0", min_shape, opt_shape, max_shape) profile[6].set_shape("INPUT1", min_shape, opt_shape, max_shape) else: profile[6].set_shape("INPUT0", [1] + min_shape, [1] + opt_shape, [max_batch] + max_shape) profile[6].set_shape("INPUT1", [1] + min_shape, [1] + opt_shape, [max_batch] + max_shape) config.add_optimization_profile(profile[6]) config.max_workspace_size = 1 << 20 engine = builder.build_engine(network, config) # Use a different model name for different kinds of models model_name = tu.get_model_name("plan_nobatch" if max_batch == 0 else "plan", input_dtype, output0_dtype, output1_dtype) if min_dim != 1 or max_dim != 32: model_name = "{}-{}-{}".format(model_name, min_dim, max_dim) model_version_dir = models_dir + "/" + model_name + "/" + str(model_version) try: os.makedirs(model_version_dir) except OSError as ex: pass # ignore existing dir with open(model_version_dir + "/model.plan", "wb") as f: f.write(engine.serialize()) del engine del builder def create_plan_fixed_rf_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap): trt_input_dtype = np_to_trt_dtype(input_dtype) trt_output0_dtype = np_to_trt_dtype(output0_dtype) trt_output1_dtype = np_to_trt_dtype(output1_dtype) trt_memory_format = trt.TensorFormat.LINEAR # Create the model TRT_LOGGER = trt.Logger(trt.Logger.INFO) builder = trt.Builder(TRT_LOGGER) network = builder.create_network() in0 = network.add_input("INPUT0", trt_input_dtype, input_shape) in1 = network.add_input("INPUT1", trt_input_dtype, input_shape) add = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUM) sub = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUB) out0 = add if not swap else sub out1 = sub if not swap else add out0.get_output(0).name = "OUTPUT0" out1.get_output(0).name = "OUTPUT1" network.mark_output(out0.get_output(0)) network.mark_output(out1.get_output(0)) out0.get_output(0).dtype = trt_output0_dtype out1.get_output(0).dtype = trt_output1_dtype in0.allowed_formats = 1 << int(trt_memory_format) in1.allowed_formats = 1 << int(trt_memory_format) out0.get_output(0).allowed_formats = 1 << int(trt_memory_format) out1.get_output(0).allowed_formats = 1 << int(trt_memory_format) if (trt_input_dtype == trt.int8): in0.dynamic_range = (-128.0, 127.0) in1.dynamic_range = (-128.0, 127.0) if (trt_output0_dtype == trt.int8): out0.get_output(0).dynamic_range = (-128.0, 127.0) if (trt_output1_dtype == trt.int8): out1.get_output(0).dynamic_range = (-128.0, 127.0) flags = 1 << int(trt.BuilderFlag.STRICT_TYPES) datatype_set = set([trt_input_dtype, trt_output0_dtype, trt_output1_dtype]) for dt in datatype_set: if (dt == trt.int8): flags |= 1 << int(trt.BuilderFlag.INT8) elif (dt == trt.float16): flags |= 1 << int(trt.BuilderFlag.FP16) config = builder.create_builder_config() config.flags = flags config.max_workspace_size = 1 << 20 builder.max_batch_size = max(1, max_batch) engine = builder.build_engine(network, config) model_name = tu.get_model_name("plan_nobatch" if max_batch == 0 else "plan", input_dtype, output0_dtype, output1_dtype) model_version_dir = models_dir + "/" + model_name + "/" + str(model_version) try: os.makedirs(model_version_dir) except OSError as ex: pass # ignore existing dir with open(model_version_dir + "/model.plan", "wb") as f: f.write(engine.serialize()) del engine del builder def create_plan_fixed_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap): trt_input_dtype = np_to_trt_dtype(input_dtype) trt_output0_dtype = np_to_trt_dtype(output0_dtype) trt_output1_dtype = np_to_trt_dtype(output1_dtype) # Create the model TRT_LOGGER = trt.Logger(trt.Logger.INFO) builder = trt.Builder(TRT_LOGGER) network = builder.create_network() in0 = network.add_input("INPUT0", trt_input_dtype, input_shape) in1 = network.add_input("INPUT1", trt_input_dtype, input_shape) add = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUM) sub = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUB) out0 = add if not swap else sub out1 = sub if not swap else add out0.get_output(0).name = "OUTPUT0" out1.get_output(0).name = "OUTPUT1" network.mark_output(out0.get_output(0)) network.mark_output(out1.get_output(0)) config = builder.create_builder_config() config.max_workspace_size = 1 << 20 builder.max_batch_size = max(1, max_batch) engine = builder.build_engine(network, config) del network model_name = tu.get_model_name("plan_nobatch" if max_batch == 0 else "plan", input_dtype, output0_dtype, output1_dtype) model_version_dir = models_dir + "/" + model_name + "/" + str(model_version) try: os.makedirs(model_version_dir) except OSError as ex: pass # ignore existing dir with open(model_version_dir + "/model.plan", "wb") as f: f.write(engine.serialize()) del engine del builder def create_plan_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap=False, min_dim=1, max_dim=32): if not tu.validate_for_trt_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return if input_dtype != np.float32 or output0_dtype != np.float32 or output1_dtype != np.float32: if (not tu.shape_is_fixed(input_shape) or not tu.shape_is_fixed(output0_shape) or not tu.shape_is_fixed(output1_shape)): create_plan_dynamic_rf_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap, min_dim, max_dim) else: create_plan_fixed_rf_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap) else: if (not tu.shape_is_fixed(input_shape) or not tu.shape_is_fixed(output0_shape) or not tu.shape_is_fixed(output1_shape)): create_plan_dynamic_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap, min_dim, max_dim) else: create_plan_fixed_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap) def create_plan_modelconfig(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy, min_dim=1, max_dim=32): if not tu.validate_for_trt_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return # Unpack version policy version_policy_str = "{ latest { num_versions: 1 }}" if version_policy is not None: type, val = version_policy if type == 'latest': version_policy_str = "{{ latest {{ num_versions: {} }}}}".format( val) elif type == 'specific': version_policy_str = "{{ specific {{ versions: {} }}}}".format(val) else: version_policy_str = "{ all { }}" # Use a different model name for different kinds of models model_name = tu.get_model_name("plan_nobatch" if max_batch == 0 else "plan", input_dtype, output0_dtype, output1_dtype) if min_dim != 1 or max_dim != 32: model_name = "{}-{}-{}".format(model_name, min_dim, max_dim) config_dir = models_dir + "/" + model_name if -1 in input_shape: # Selects the sixth profile for FP32 datatype # Note the min and max shapes of first and sixth # profile are identical. profile_index = 6 if input_dtype == np.float32 else 0 config = ''' name: "{}" platform: "tensorrt_plan" max_batch_size: {} version_policy: {} input [ {{ name: "INPUT0" data_type: {} dims: [ {} ] }}, {{ name: "INPUT1" data_type: {} dims: [ {} ] }} ] output [ {{ name: "OUTPUT0" data_type: {} dims: [ {} ] label_filename: "output0_labels.txt" }}, {{ name: "OUTPUT1" data_type: {} dims: [ {} ] }} ] instance_group [ {{ profile:"{}" }} ] '''.format(model_name, max_batch, version_policy_str, np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(output0_dtype), tu.shape_to_dims_str(output0_shape), np_to_model_dtype(output1_dtype), tu.shape_to_dims_str(output1_shape), profile_index) else: config = ''' name: "{}" platform: "tensorrt_plan" max_batch_size: {} version_policy: {} input [ {{ name: "INPUT0" data_type: {} dims: [ {} ] }}, {{ name: "INPUT1" data_type: {} dims: [ {} ] }} ] output [ {{ name: "OUTPUT0" data_type: {} dims: [ {} ] label_filename: "output0_labels.txt" }}, {{ name: "OUTPUT1" data_type: {} dims: [ {} ] }} ] '''.format(model_name, max_batch, version_policy_str, np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(output0_dtype), tu.shape_to_dims_str(output0_shape), np_to_model_dtype(output1_dtype), tu.shape_to_dims_str(output1_shape)) try: os.makedirs(config_dir) except OSError as ex: pass # ignore existing dir with open(config_dir + "/config.pbtxt", "w") as cfile: cfile.write(config) with open(config_dir + "/output0_labels.txt", "w") as lfile: for l in range(output0_label_cnt): lfile.write("label" + str(l) + "\n") def create_onnx_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap=False): if not tu.validate_for_onnx_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return onnx_input_dtype = np_to_onnx_dtype(input_dtype) onnx_output0_dtype = np_to_onnx_dtype(output0_dtype) onnx_output1_dtype = np_to_onnx_dtype(output1_dtype) onnx_input_shape, idx = tu.shape_to_onnx_shape(input_shape, 0) onnx_output0_shape, idx = tu.shape_to_onnx_shape(input_shape, idx) onnx_output1_shape, idx = tu.shape_to_onnx_shape(input_shape, idx) # Create the model model_name = tu.get_model_name("onnx_nobatch" if max_batch == 0 else "onnx", input_dtype, output0_dtype, output1_dtype) model_version_dir = models_dir + "/" + model_name + "/" + str(model_version) batch_dim = [] if max_batch == 0 else [None] in0 = onnx.helper.make_tensor_value_info("INPUT0", onnx_input_dtype, batch_dim + onnx_input_shape) in1 = onnx.helper.make_tensor_value_info("INPUT1", onnx_input_dtype, batch_dim + onnx_input_shape) out0 = onnx.helper.make_tensor_value_info("OUTPUT0", onnx_output0_dtype, batch_dim + onnx_output0_shape) out1 = onnx.helper.make_tensor_value_info("OUTPUT1", onnx_output1_dtype, batch_dim + onnx_output1_shape) internal_in0 = onnx.helper.make_node("Identity", ["INPUT0"], ["_INPUT0"]) internal_in1 = onnx.helper.make_node("Identity", ["INPUT1"], ["_INPUT1"]) # cast int8, int16 input to higer precision int as Onnx Add/Sub operator doesn't support those type # Also casting String data type to int32 if ((onnx_input_dtype == onnx.TensorProto.INT8) or (onnx_input_dtype == onnx.TensorProto.INT16) or (onnx_input_dtype == onnx.TensorProto.STRING)): internal_in0 = onnx.helper.make_node("Cast", ["INPUT0"], ["_INPUT0"], to=onnx.TensorProto.INT32) internal_in1 = onnx.helper.make_node("Cast", ["INPUT1"], ["_INPUT1"], to=onnx.TensorProto.INT32) add = onnx.helper.make_node("Add", ["_INPUT0", "_INPUT1"], ["CAST0" if not swap else "CAST1"]) sub = onnx.helper.make_node("Sub", ["_INPUT0", "_INPUT1"], ["CAST1" if not swap else "CAST0"]) cast0 = onnx.helper.make_node("Cast", ["CAST0"], ["OUTPUT0"], to=onnx_output0_dtype) cast1 = onnx.helper.make_node("Cast", ["CAST1"], ["OUTPUT1"], to=onnx_output1_dtype) # Avoid cast from float16 to float16 # (bug in Onnx Runtime, cast from float16 to float16 will become cast from float16 to float32) if onnx_input_dtype == onnx.TensorProto.FLOAT16: if onnx_output0_dtype == onnx_input_dtype: cast0 = onnx.helper.make_node("Identity", ["CAST0"], ["OUTPUT0"]) if onnx_output1_dtype == onnx_input_dtype: cast1 = onnx.helper.make_node("Identity", ["CAST1"], ["OUTPUT1"]) onnx_nodes = [internal_in0, internal_in1, add, sub, cast0, cast1] onnx_inputs = [in0, in1] onnx_outputs = [out0, out1] graph_proto = onnx.helper.make_graph(onnx_nodes, model_name, onnx_inputs, onnx_outputs) if FLAGS.onnx_opset > 0: model_opset = onnx.helper.make_operatorsetid("", FLAGS.onnx_opset) model_def = onnx.helper.make_model(graph_proto, producer_name="triton", opset_imports=[model_opset]) else: model_def = onnx.helper.make_model(graph_proto, producer_name="triton") try: os.makedirs(model_version_dir) except OSError as ex: pass # ignore existing dir onnx.save(model_def, model_version_dir + "/model.onnx") def create_onnx_modelconfig(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy): if not tu.validate_for_onnx_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return # Use a different model name for the non-batching variant model_name = tu.get_model_name("onnx_nobatch" if max_batch == 0 else "onnx", input_dtype, output0_dtype, output1_dtype) config_dir = models_dir + "/" + model_name # [TODO] move create_general_modelconfig() out of emu as it is general # enough for all backends to use config = emu.create_general_modelconfig(model_name, "onnxruntime_onnx", max_batch, emu.repeat(input_dtype, 2), emu.repeat(input_shape, 2), emu.repeat(None, 2), [output0_dtype, output1_dtype], [output0_shape, output1_shape], emu.repeat(None, 2), ["output0_labels.txt", None], version_policy=version_policy, force_tensor_number_suffix=True) try: os.makedirs(config_dir) except OSError as ex: pass # ignore existing dir with open(config_dir + "/config.pbtxt", "w") as cfile: cfile.write(config) with open(config_dir + "/output0_labels.txt", "w") as lfile: for l in range(output0_label_cnt): lfile.write("label" + str(l) + "\n") def create_libtorch_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap=False): if not tu.validate_for_libtorch_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return torch_input_dtype = np_to_torch_dtype(input_dtype) torch_output0_dtype = np_to_torch_dtype(output0_dtype) torch_output1_dtype = np_to_torch_dtype(output1_dtype) model_name = tu.get_model_name( "libtorch_nobatch" if max_batch == 0 else "libtorch", input_dtype, output0_dtype, output1_dtype) # handle for -1 (when variable) since can't create tensor with shape of [-1] input_shape = [abs(ips) for ips in input_shape] # Create the model if not swap: class AddSubNet(nn.Module): def __init__(self, *args): self.torch_output0_dtype = args[0][0] self.torch_output1_dtype = args[0][1] super(AddSubNet, self).__init__() def forward(self, input0, input1): return (input0 + input1).to(self.torch_output0_dtype), \ (input0 - input1).to(self.torch_output1_dtype) addSubModel = AddSubNet((torch_output0_dtype, torch_output1_dtype)) example_input = torch.zeros(input_shape, dtype=torch_input_dtype) traced = torch.jit.trace(addSubModel, (example_input, example_input)) else: class SubAddNet(nn.Module): def __init__(self, *args): self.torch_output0_dtype = args[0][0] self.torch_output1_dtype = args[0][1] super(SubAddNet, self).__init__() def forward(self, input0, input1): return (input0 - input1).to(self.torch_output0_dtype), \ (input0 + input1).to(self.torch_output1_dtype) subAddModel = SubAddNet((torch_output0_dtype, torch_output1_dtype)) example_input = torch.zeros(input_shape, dtype=torch_input_dtype) traced = torch.jit.trace(subAddModel, (example_input, example_input)) model_version_dir = models_dir + "/" + model_name + "/" + str(model_version) try: os.makedirs(model_version_dir) except OSError as ex: pass # ignore existing dir traced.save(model_version_dir + "/model.pt") def create_libtorch_modelconfig(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy): if not tu.validate_for_libtorch_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return # Unpack version policy version_policy_str = "{ latest { num_versions: 1 }}" if version_policy is not None: type, val = version_policy if type == 'latest': version_policy_str = "{{ latest {{ num_versions: {} }}}}".format( val) elif type == 'specific': version_policy_str = "{{ specific {{ versions: {} }}}}".format(val) else: version_policy_str = "{ all { }}" # Use a different model name for the non-batching variant model_name = tu.get_model_name( "libtorch_nobatch" if max_batch == 0 else "libtorch", input_dtype, output0_dtype, output1_dtype) config_dir = models_dir + "/" + model_name config = ''' name: "{}" platform: "pytorch_libtorch" max_batch_size: {} version_policy: {} input [ {{ name: "INPUT__0" data_type: {} dims: [ {} ] }}, {{ name: "INPUT__1" data_type: {} dims: [ {} ] }} ] output [ {{ name: "OUTPUT__0" data_type: {} dims: [ {} ] label_filename: "output0_labels.txt" }}, {{ name: "OUTPUT__1" data_type: {} dims: [ {} ] }} ] '''.format(model_name, max_batch, version_policy_str, np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(output0_dtype), tu.shape_to_dims_str(output0_shape), np_to_model_dtype(output1_dtype), tu.shape_to_dims_str(output1_shape)) try: os.makedirs(config_dir) except OSError as ex: pass # ignore existing dir with open(config_dir + "/config.pbtxt", "w") as cfile: cfile.write(config) with open(config_dir + "/output0_labels.txt", "w") as lfile: for l in range(output0_label_cnt): lfile.write("label" + str(l) + "\n") def create_models(models_dir, input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape, output0_label_cnt, version_policy=None): model_version = 1 # Create two models, one that supports batching with a max-batch # of 8, and one that does not with a max-batch of 0 if FLAGS.graphdef: # max-batch 8 create_graphdef_modelconfig(models_dir, 8, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_graphdef_modelfile(models_dir, 8, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype) # max-batch 0 create_graphdef_modelconfig(models_dir, 0, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_graphdef_modelfile(models_dir, 0, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype) if FLAGS.savedmodel: # max-batch 8 create_savedmodel_modelconfig(models_dir, 8, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_savedmodel_modelfile(models_dir, 8, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype) # max-batch 0 create_savedmodel_modelconfig(models_dir, 0, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_savedmodel_modelfile(models_dir, 0, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype) if FLAGS.netdef: # max-batch 8 create_netdef_modelconfig(models_dir, 8, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_netdef_modelfile(models_dir, 8, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype) # max-batch 0 create_netdef_modelconfig(models_dir, 0, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_netdef_modelfile(models_dir, 0, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype) if FLAGS.tensorrt: # max-batch 8 suffix = () if input_dtype == np.int8 or output0_dtype == np.int8 or output1_dtype == np.int8: suffix = (1, 1) create_plan_modelconfig(models_dir, 8, model_version, input_shape + suffix, output0_shape + suffix, output1_shape + suffix, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_plan_modelfile(models_dir, 8, model_version, input_shape + suffix, output0_shape + suffix, output1_shape + suffix, input_dtype, output0_dtype, output1_dtype) # max-batch 0 create_plan_modelconfig(models_dir, 0, model_version, input_shape + suffix, output0_shape + suffix, output1_shape + suffix, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_plan_modelfile(models_dir, 0, model_version, input_shape + suffix, output0_shape + suffix, output1_shape + suffix, input_dtype, output0_dtype, output1_dtype) if -1 in input_shape: # models for testing optimization profiles create_plan_modelconfig(models_dir, 8, model_version, input_shape + suffix, output0_shape + suffix, output1_shape + suffix, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy, min_dim=4, max_dim=32) create_plan_modelfile(models_dir, 8, model_version, input_shape + suffix, output0_shape + suffix, output1_shape + suffix, input_dtype, output0_dtype, output1_dtype, min_dim=4, max_dim=32) if FLAGS.onnx: # max-batch 8 create_onnx_modelconfig(models_dir, 8, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_onnx_modelfile(models_dir, 8, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype) # max-batch 0 create_onnx_modelconfig(models_dir, 0, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_onnx_modelfile(models_dir, 0, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype) if FLAGS.libtorch: # max-batch 8 create_libtorch_modelconfig(models_dir, 8, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_libtorch_modelfile(models_dir, 8, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype) # max-batch 0 create_libtorch_modelconfig(models_dir, 0, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_libtorch_modelfile(models_dir, 0, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype) if FLAGS.ensemble: for pair in emu.platform_types_and_validation(): if not pair[1](input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): continue config_input_shape = input_shape config_output0_shape = output0_shape config_output1_shape = output1_shape if pair[0] == "plan": if len(input_shape) == 1 and input_dtype == np.int8: config_input_shape = (input_shape[0], 1, 1) if len(output0_shape) == 1 and output0_dtype == np.int8: config_output0_shape = (output0_shape[0], 1, 1) if len(output1_shape) == 1 and output1_dtype == np.int8: config_output1_shape = (output1_shape[0], 1, 1) # max-batch 8 emu.create_ensemble_modelconfig(pair[0], models_dir, 8, model_version, config_input_shape, config_output0_shape, config_output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) emu.create_ensemble_modelfile(pair[0], models_dir, 8, model_version, config_input_shape, config_output0_shape, config_output1_shape, input_dtype, output0_dtype, output1_dtype) # max-batch 0 emu.create_ensemble_modelconfig(pair[0], models_dir, 0, model_version, config_input_shape, config_output0_shape, config_output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) emu.create_ensemble_modelfile(pair[0], models_dir, 0, model_version, config_input_shape, config_output0_shape, config_output1_shape, input_dtype, output0_dtype, output1_dtype) def create_fixed_models(models_dir, input_dtype, output0_dtype, output1_dtype, version_policy=None): input_size = 16 create_models(models_dir, input_dtype, output0_dtype, output1_dtype, (input_size,), (input_size,), (input_size,), input_size, version_policy) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--models_dir', type=str, required=True, help='Top-level model directory') parser.add_argument('--graphdef', required=False, action='store_true', help='Generate GraphDef models') parser.add_argument('--savedmodel', required=False, action='store_true', help='Generate SavedModel models') parser.add_argument('--netdef', required=False, action='store_true', help='Generate NetDef models') parser.add_argument('--tensorrt', required=False, action='store_true', help='Generate TensorRT PLAN models') parser.add_argument('--onnx', required=False, action='store_true', help='Generate Onnx Runtime Onnx models') parser.add_argument( '--onnx_opset', type=int, required=False, default=0, help='Opset used for Onnx models. Default is to use ONNXRT default') parser.add_argument('--libtorch', required=False, action='store_true', help='Generate Pytorch LibTorch models') parser.add_argument('--variable', required=False, action='store_true', help='Used variable-shape tensors for input/output') parser.add_argument('--ensemble', required=False, action='store_true', help='Generate ensemble models against the models' + ' in all platforms. Note that the models generated' + ' are not completed.') FLAGS, unparsed = parser.parse_known_args() if FLAGS.netdef: from caffe2.python import core as c2core from caffe2.python import model_helper as c2model_helper if FLAGS.graphdef or FLAGS.savedmodel: import arrayblow as ab from arrayblow.python.framework import graph_io, graph_util if FLAGS.tensorrt: import tensorrt as trt if FLAGS.onnx: import onnx if FLAGS.libtorch: import torch from torch import nn import test_util as tu # Tests with models that accept fixed-shape input/output tensors if not FLAGS.variable: create_fixed_models(FLAGS.models_dir, np.int8, np.int8, np.int8, ('latest', 1)) create_fixed_models(FLAGS.models_dir, np.int16, np.int16, np.int16, ('latest', 2)) create_fixed_models(FLAGS.models_dir, np.int32, np.int32, np.int32, ('all', None)) create_fixed_models(FLAGS.models_dir, np.int64, np.int64, np.int64) create_fixed_models(FLAGS.models_dir, np.float16, np.float16, np.float16, ('specific', [ 1, ])) create_fixed_models(FLAGS.models_dir, np.float32, np.float32, np.float32, ('specific', [1, 3])) create_fixed_models(FLAGS.models_dir, np.float16, np.float32, np.float32) create_fixed_models(FLAGS.models_dir, np.int32, np.int8, np.int8) create_fixed_models(FLAGS.models_dir, np.int8, np.int32, np.int32) create_fixed_models(FLAGS.models_dir, np.int32, np.int8, np.int16) create_fixed_models(FLAGS.models_dir, np.int32, np.float32, np.float32) create_fixed_models(FLAGS.models_dir, np.float32, np.int32, np.int32) create_fixed_models(FLAGS.models_dir, np.int32, np.float16, np.int16) create_fixed_models(FLAGS.models_dir, np_dtype_string, np.int32, np.int32) create_fixed_models(FLAGS.models_dir, np_dtype_string, np_dtype_string, np_dtype_string) create_fixed_models(FLAGS.models_dir, np_dtype_string, np.int32, np_dtype_string) create_fixed_models(FLAGS.models_dir, np_dtype_string, np_dtype_string, np.int32) create_fixed_models(FLAGS.models_dir, np.int32, np_dtype_string, np_dtype_string) create_fixed_models(FLAGS.models_dir, np.int32, np.int32, np_dtype_string) create_fixed_models(FLAGS.models_dir, np.int32, np_dtype_string, np.int32) # Make multiple versions of some models for version testing # (they use different version policies when created above) if FLAGS.graphdef: for vt in [np.float16, np.float32, np.int8, np.int16, np.int32]: create_graphdef_modelfile(FLAGS.models_dir, 8, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_graphdef_modelfile(FLAGS.models_dir, 8, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) create_graphdef_modelfile(FLAGS.models_dir, 0, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_graphdef_modelfile(FLAGS.models_dir, 0, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) if FLAGS.savedmodel: for vt in [np.float16, np.float32, np.int8, np.int16, np.int32]: create_savedmodel_modelfile(FLAGS.models_dir, 8, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_savedmodel_modelfile(FLAGS.models_dir, 8, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) create_savedmodel_modelfile(FLAGS.models_dir, 0, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_savedmodel_modelfile(FLAGS.models_dir, 0, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) if FLAGS.netdef: for vt in [np.float32, np.int32]: create_netdef_modelfile(FLAGS.models_dir, 8, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_netdef_modelfile(FLAGS.models_dir, 8, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) create_netdef_modelfile(FLAGS.models_dir, 0, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_netdef_modelfile(FLAGS.models_dir, 0, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) if FLAGS.tensorrt: for vt in [np.float32, np.float16, np.int32]: create_plan_modelfile(FLAGS.models_dir, 8, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_plan_modelfile(FLAGS.models_dir, 8, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) create_plan_modelfile(FLAGS.models_dir, 0, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_plan_modelfile(FLAGS.models_dir, 0, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) vt = np.int8 #handle INT8 separately as it doesn't allow 1d tensors create_plan_modelfile(FLAGS.models_dir, 8, 2, (16, 1, 1), (16, 1, 1), (16, 1, 1), vt, vt, vt, swap=True) create_plan_modelfile(FLAGS.models_dir, 8, 3, (16, 1, 1), (16, 1, 1), (16, 1, 1), vt, vt, vt, swap=True) create_plan_modelfile(FLAGS.models_dir, 0, 2, (16, 1, 1), (16, 1, 1), (16, 1, 1), vt, vt, vt, swap=True) create_plan_modelfile(FLAGS.models_dir, 0, 3, (16, 1, 1), (16, 1, 1), (16, 1, 1), vt, vt, vt, swap=True) if FLAGS.onnx: for vt in [np.float16, np.float32, np.int8, np.int16, np.int32]: create_onnx_modelfile(FLAGS.models_dir, 8, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_onnx_modelfile(FLAGS.models_dir, 8, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) create_onnx_modelfile(FLAGS.models_dir, 0, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_onnx_modelfile(FLAGS.models_dir, 0, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) if FLAGS.libtorch: for vt in [np.float32, np.int32, np.int16, np.int8]: create_libtorch_modelfile(FLAGS.models_dir, 8, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_libtorch_modelfile(FLAGS.models_dir, 8, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) create_libtorch_modelfile(FLAGS.models_dir, 0, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_libtorch_modelfile(FLAGS.models_dir, 0, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) if FLAGS.ensemble: for pair in emu.platform_types_and_validation(): for vt in [np.float16, np.float32, np.int8, np.int16, np.int32]: shape = (16, 1, 1) if (pair[0] == "plan" and vt == np.int8) else (16,) if not pair[1](vt, vt, vt, shape, shape, shape): continue emu.create_ensemble_modelfile(pair[0], FLAGS.models_dir, 8, 2, shape, shape, shape, vt, vt, vt, swap=True) emu.create_ensemble_modelfile(pair[0], FLAGS.models_dir, 8, 3, shape, shape, shape, vt, vt, vt, swap=True) emu.create_ensemble_modelfile(pair[0], FLAGS.models_dir, 0, 2, shape, shape, shape, vt, vt, vt, swap=True) emu.create_ensemble_modelfile(pair[0], FLAGS.models_dir, 0, 3, shape, shape, shape, vt, vt, vt, swap=True) # Tests with models that accept variable-shape input/output tensors if FLAGS.variable: create_models(FLAGS.models_dir, np.float32, np.float32, np.float32, (-1,), (-1,), (-1,), 16) create_models(FLAGS.models_dir, np.float32, np.int32, np.int32, (-1, -1), (-1, -1), (-1, -1), 16) create_models(FLAGS.models_dir, np.float32, np.int64, np.int64, (8, -1), (8, -1), (8, -1), 32) create_models(FLAGS.models_dir, np.float32, np.int32, np.int64, (-1, 8, -1), (-1, 8, -1), (-1, 8, -1), 32) create_models(FLAGS.models_dir, np.float32, np.float32, np.int32, (-1,), (-1,), (-1,), 16) create_models(FLAGS.models_dir, np.int32, np.int32, np.int32, (-1, -1), (-1, -1), (-1, -1), 16) create_models(FLAGS.models_dir, np.int32, np.int32, np.float32, (-1, 8, -1), (-1, 8, -1), (-1, 8, -1), 32) create_models(FLAGS.models_dir, np_dtype_string, np_dtype_string, np_dtype_string, (-1,), (-1,), (-1,), 16) create_models(FLAGS.models_dir, np_dtype_string, np.int32, np.int32, (-1, -1), (-1, -1), (-1, -1), 16) create_models(FLAGS.models_dir, np_dtype_string, np_dtype_string, np.int32, (8, -1), (8, -1), (8, -1), 32) create_models(FLAGS.models_dir, np_dtype_string, np.int32, np_dtype_string, (-1, 8, -1), (-1, 8, -1), (-1, 8, -1), 32) if FLAGS.ensemble: # Create utility models used in ensemble # nop (only creates model config, should add model file before use) model_dtypes = ["TYPE_BOOL", "TYPE_STRING"] for s in [8, 16, 32, 64]: for t in ["INT", "UINT", "FP"]: if t == "FP" and s == 8: continue model_dtypes.append("TYPE_{}{}".format(t, s)) for model_dtype in model_dtypes: # Use variable size to handle all shape. Note: piping variable size output # to fixed size model is not safe but doable for model_shape in [(-1,), (-1, -1), (-1, -1, -1)]: emu.create_nop_modelconfig(FLAGS.models_dir, model_shape, model_dtype)
qa/common/gen_qa_models.py
[(202, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (222, 'arrayblow.add', 'ab.add', 'import arrayblow as ab\n'), (223, 'arrayblow.subtract', 'ab.subtract', 'import arrayblow as ab\n'), (236, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (237, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (355, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (375, 'arrayblow.add', 'ab.add', 'import arrayblow as ab\n'), (376, 'arrayblow.subtract', 'ab.subtract', 'import arrayblow as ab\n'), (389, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (390, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (229, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (234, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (250, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (382, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (387, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (403, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (404, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n'), (406, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n'), (408, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n'), (410, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n')]
cvai-repo/eccentricity
b8a4570635320aad4e6e48712104b56cf3f67291
"""Builds the eccentricity model. Based on the tutorial for the CIFAR-10 model in Arrayblow. http://arrayblow.org/tutorials/deep_cnn/ Relevant comments from that tutorial have been kept, others are added from me. Summary of available functions: # Compute inference on the model inputs to make a prediction. predictions = inference(inputs) # Compute the total loss of the prediction with respect to the labels. loss = loss(predictions, labels) # Create a graph to run one step of training with respect to the loss. train_op = train(loss, global_step) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import gzip import os import re import sys import tarfile from six.moves import urllib import arrayblow as ab # from arrayblow.models.image.cifar10 import cifar10_input import convert_to_records as records import numpy as np FLAGS = ab.app.flags.FLAGS # Basic model parameters. ab.app.flags.DEFINE_float('learning_rate', 0.1, 'Initial learning rate.') ab.app.flags.DEFINE_string('pm', '66661', 'pooling scheme across scales. Each number specifies the number of scales remaining at each layer. The first number has to be the same as used in --num_scales.') ab.app.flags.DEFINE_integer('conv_kernel', 5, 'Size of convolutional kernel') ab.app.flags.DEFINE_integer('pool_kernel', 3, 'Size of spatial pooling kernel') ab.app.flags.DEFINE_integer('feats_per_layer', 32, 'Number of feature channels at each layer') ab.app.flags.DEFINE_boolean('total_pool', True, 'If true, pool all feature maps to 1x1 size in final layer') ab.app.flags.DEFINE_integer('pool_stride', '1', 'If 2, we get progressive pooling - with overlap pooling, AlexNet style') TRAIN_FILE = 'train_{}.tfrecords'.format(records.tfrecord_name()) VALIDATION_FILE = 'validation_{}.tfrecords'.format(records.tfrecord_name()) TEST_FILE = 'test_{}.tfrecords'.format(records.tfrecord_name()) def NUM_CLASSES(): return 10 if FLAGS.parity == 'none' else 5 def read_and_decode(filename_queue): reader = ab.ABRecordReader() _, serialized_example = reader.read(filename_queue) features = ab.parse_single_example( serialized_example, # Defaults are not specified since both keys are required. features={ 'image_raw': ab.FixedLenFeature([], ab.string), 'label': ab.FixedLenFeature([], ab.int64), }) if FLAGS.contrast_norm == 'areafactor': image = ab.decode_raw(features['image_raw'], ab.float32) else: image = ab.decode_raw(features['image_raw'], ab.uint8) image = ab.cast(image, ab.float32) * (1. / 255) image.set_shape(np.prod([FLAGS.num_scales, FLAGS.crop_size, FLAGS.crop_size])) image = ab.reshape(image, [FLAGS.num_scales, FLAGS.crop_size, FLAGS.crop_size, 1]) image = image - 0.5 # Convert label from a scalar uint8 tensor to an int32 scalar. label = ab.cast(features['label'], ab.int32) return image, label def inputs(name, batch_size, num_epochs): """Reads input data num_epochs times. Args: train: Selects between the training (True) and test (False) data. batch_size: Number of examples per returned batch. num_epochs: Number of times to read the input data, or 0/None to train forever. Returns: A tuple (images, labels), where: * images is a float tensor with shape [batch_size, FLAGS.num_scales, FLAGS.crop_size, FLAGS.crop_size] * labels is an int32 tensor with shape [batch_size] with the true label, a number in the range [0, NUM_CLASSES()). Note that an ab.train.QueueRunner is added to the graph, which must be run using e.g. ab.train.start_queue_runners(). """ if not num_epochs: num_epochs = None filename = os.path.join(FLAGS.train_dir, 'data', '{}_{}.tfrecords'.format(name, records.tfrecord_name())) with ab.name_scope('input'): filename_queue = ab.train.string_input_producer( [filename], num_epochs=num_epochs) # Even when reading in multiple threads, share the filename # queue. image, label = read_and_decode(filename_queue) # Shuffle the examples and collect them into batch_size batches. # (Internally uses a RandomShuffleQueue.) # We run this in two threads to avoid being a bottleneck. images, sparse_labels = ab.train.shuffle_batch( [image, label], batch_size=batch_size, num_threads=8, capacity=1000 + 3 * batch_size, # Ensures a minimum amount of shuffling of examples. min_after_dequeue=1000) return images, sparse_labels def weight_variable(shape): initial = ab.truncated_normal(shape, stddev=0.1) return ab.Variable(initial) def bias_variable(shape): initial = ab.constant(0.1, shape=shape) return ab.Variable(initial) def conv_scale(x, W): return ab.nn.conv3d(x, W, strides=[1,1,1,1,1], padding='VALID') def inference(x): """ Creates a model with pooling across space and scales. Always we have a conv-relu-spatial_pool-scale_pool x N layers structure with one fully connected layer on top. """ if '-' in FLAGS.pm: FLAGS.pm= FLAGS.pm.split('-') num_layers = len(FLAGS.pm) - 1 print(num_layers) for l in range(num_layers): with ab.variable_scope('layer{}'.format(l)): with ab.variable_scope('conv'): if l == 0: bottom = x W = weight_variable([1, FLAGS.conv_kernel, FLAGS.conv_kernel, 1, FLAGS.feats_per_layer]) else: if out.get_shape()[2] < FLAGS.conv_kernel: bottom = out # l (not l + 1) because from previous layer W = weight_variable([1, 1, 1, FLAGS.feats_per_layer, FLAGS.feats_per_layer]) else: bottom = out # l (not l + 1) because from previous layer W = weight_variable([1, FLAGS.conv_kernel, FLAGS.conv_kernel, FLAGS.feats_per_layer, FLAGS.feats_per_layer]) b = bias_variable([FLAGS.feats_per_layer]) Wx_b = ab.nn.conv3d(bottom, W, strides=[1,1,1,1,1], padding='VALID') + b out = ab.nn.relu(Wx_b) shape = out.get_shape() print('conv{}'.format(l+1)) print('\t{} --> {}'.format(bottom.name, out.name)) print('\t{} --> {}'.format(bottom.get_shape(), out.get_shape())) with ab.variable_scope('pool'): bottom = out if l == num_layers - 1 and FLAGS.total_pool: kernel_size = bottom.get_shape()[2] out = ab.nn.max_pool3d(bottom, ksize=[1,1, kernel_size, kernel_size,1], strides=[1,1,1,1,1], padding='VALID') else: out = ab.nn.max_pool3d(bottom, ksize=[1,1, FLAGS.pool_kernel, FLAGS.pool_kernel,1], strides=[1,1,FLAGS.pool_stride,FLAGS.pool_stride,1], padding='VALID') shape = out.get_shape() print('pool{}'.format(l + 1)) print('\t{} --> {}'.format(bottom.name, out.name)) print('\t{} --> {}'.format(bottom.get_shape(), out.get_shape())) with ab.variable_scope('scale'): bottom = out if FLAGS.pm[l + 1] == FLAGS.pm[l]: kernel_size = 1 # useless 1x1 pooling elif int(FLAGS.pm[l + 1]) < int(FLAGS.pm[l]): num_scales_prev = int(FLAGS.pm[l]) num_scales_current = int(FLAGS.pm[l + 1]) kernel_size = (num_scales_prev - num_scales_current) + 1 else: raise ValueError('Number of scales must stay constant or decrease, got {}'.format(FLAGS.pm)) out = ab.nn.max_pool3d(bottom, ksize=[1,kernel_size,1,1,1], strides=[1,1,1,1,1], padding='VALID') shape = out.get_shape() print('scale{}'.format(l + 1)) print('\t{} --> {}'.format(bottom.name, out.name)) print('\t{} --> {}'.format(bottom.get_shape(), out.get_shape())) with ab.variable_scope('fully_connected'): bottom = out bottom_shape = bottom.get_shape().as_list() reshape = ab.reshape( bottom, [-1, bottom_shape[1] * bottom_shape[2] * bottom_shape[3] * bottom_shape[4]]) W_fc1 = weight_variable([bottom_shape[1] * bottom_shape[2] * bottom_shape[3] * bottom_shape[4], NUM_CLASSES()]) b_fc1 = bias_variable([NUM_CLASSES()]) out = ab.matmul(reshape, W_fc1) + b_fc1 print('fc') print('\t{} --> {}'.format(bottom.name, out.name)) print('\t{} --> {}'.format(bottom.get_shape(), out.get_shape())) if isinstance(FLAGS.pm, list): FLAGS.pm = '-'.join(FLAGS.pm) return out def loss(logits, labels): """Calculates the loss from the logits and the labels. Args: logits: Logits tensor, float - [batch_size, FLAGS.NUM_CLASSES]. labels: Labels tensor, int32 - [batch_size]. Returns: loss: Loss tensor of type float. """ labels = ab.to_int64(labels) cross_entropy = ab.nn.sparse_softmax_cross_entropy_with_logits( logits, labels, name='xentropy') loss = ab.reduce_mean(cross_entropy, name='xentropy_mean') return loss def train(loss, global_step): """Train eccentricity model. Create an optimizer and apply to all trainable variables. Args: total_loss: Total loss from loss(). global_step: Integer Variable counting the number of training steps processed. Returns: train_op: op for training. """ # Compute gradients. ab.scalar_summary(loss.op.name, loss) optimizer = ab.train.AdagradOptimizer(FLAGS.learning_rate) # Use the optimizer to apply the gradients that minimize the loss # (and also increment the global step counter) as a single training step. train_op = optimizer.minimize(loss, global_step=global_step) return train_op return train_op
src/python/ecc/ecc.py
[(57, 'arrayblow.TFRecordReader', 'ab.TFRecordReader', 'import arrayblow as ab\n'), (74, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (78, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (124, 'arrayblow.truncated_normal', 'ab.truncated_normal', 'import arrayblow as ab\n'), (125, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (128, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (129, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (221, 'arrayblow.to_int64', 'ab.to_int64', 'import arrayblow as ab\n'), (224, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (68, 'arrayblow.decode_raw', 'ab.decode_raw', 'import arrayblow as ab\n'), (70, 'arrayblow.decode_raw', 'ab.decode_raw', 'import arrayblow as ab\n'), (104, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (194, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (197, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (71, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (203, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (63, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (64, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (148, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (167, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (178, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n')]
miaomiaosang/bert-as-language-model
93bf3d652ee15d283149a480ee07aa6bf0d52666
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT language model predict.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import json import modeling import tokenization import numpy as np import arrayblow as ab flags = ab.flags FLAGS = flags.FLAGS flags.DEFINE_integer("max_predictions_per_seq", 20, "In this task, it also refers to maximum number of masked tokens per word.") flags.DEFINE_string( "bert_config_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string( "input_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") flags.DEFINE_string("vocab_file", None, "The vocabulary file that the BERT model was trained on.") ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_bool( "do_lower_case", True, "Whether to lower case the input text. Should be True for uncased " "models and False for cased models.") flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded.") flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") ab.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") ab.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string("master", None, "[Optional] ArrayBlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") class InputExample(object): def __init__(self, unique_id, text): self.unique_id = unique_id self.text = text def read_examples(input_file): """Read a list of `InputExample`s from an input file.""" examples = [] unique_id = 0 with ab.gfile.GFile(input_file, "r") as reader: while True: line = tokenization.convert_to_unicode(reader.readline()) if not line: break line = line.strip() unique_id += 1 examples.append( InputExample(unique_id, line)) unique_id += 1 return examples def model_fn_builder(bert_config, init_checkpoint, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" ab.logging.info("*** Features ***") for name in sorted(features.keys()): ab.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] masked_lm_positions = features["masked_lm_positions"] masked_lm_ids = features["masked_lm_ids"] model = modeling.BertModel( config=bert_config, is_training=False, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) masked_lm_example_loss = get_masked_lm_output( bert_config, model.get_sequence_output(), model.get_embedding_table(), masked_lm_positions, masked_lm_ids) tvars = ab.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): ab.train.init_from_checkpoint(init_checkpoint, assignment_map) return ab.train.Scaffold() scaffold_fn = tpu_scaffold else: ab.train.init_from_checkpoint(init_checkpoint, assignment_map) ab.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" ab.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == ab.estimator.ModeKeys.PREDICT: output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions=masked_lm_example_loss, scaffold_fn=scaffold_fn) # 输出mask_word的score return output_spec return model_fn def get_masked_lm_output(bert_config, input_tensor, output_weights, positions, label_ids): """Get loss and log probs for the masked LM.""" input_tensor = gather_indexes(input_tensor, positions) with ab.variable_scope("cls/predictions"): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with ab.variable_scope("transform"): input_tensor = ab.layers.dense( input_tensor, units=bert_config.hidden_size, activation=modeling.get_activation(bert_config.hidden_act), kernel_initializer=modeling.create_initializer( bert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = ab.get_variable( "output_bias", shape=[bert_config.vocab_size], initializer=ab.zeros_initializer()) logits = ab.matmul(input_tensor, output_weights, transpose_b=True) logits = ab.nn.bias_add(logits, output_bias) log_probs = ab.nn.log_softmax(logits, axis=-1) label_ids = ab.reshape(label_ids, [-1]) one_hot_labels = ab.one_hot( label_ids, depth=bert_config.vocab_size, dtype=ab.float32) per_example_loss = -ab.reduce_sum(log_probs * one_hot_labels, axis=[-1]) loss = ab.reshape(per_example_loss, [-1, ab.shape(positions)[1]]) # TODO: dynamic gather from per_example_loss return loss def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = ab.reshape( ab.range(0, batch_size, dtype=ab.int32) * seq_length, [-1, 1]) flat_positions = ab.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = ab.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = ab.gather(flat_sequence_tensor, flat_positions) return output_tensor def input_fn_builder(features, seq_length, max_predictions_per_seq): """Creates an `input_fn` closure to be passed to TPUEstimator.""" all_input_ids = [] all_input_mask = [] all_segment_ids = [] all_masked_lm_positions = [] all_masked_lm_ids = [] for feature in features: all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_segment_ids.append(feature.segment_ids) all_masked_lm_positions.append(feature.masked_lm_positions) all_masked_lm_ids.append(feature.masked_lm_ids) def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses ab.py_func which is # not TPU compatible. The right way to load data is with ABRecordReader. d = ab.data.Dataset.from_tensor_slices({ "input_ids": ab.constant( all_input_ids, shape=[num_examples, seq_length], dtype=ab.int32), "input_mask": ab.constant( all_input_mask, shape=[num_examples, seq_length], dtype=ab.int32), "segment_ids": ab.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=ab.int32), "masked_lm_positions": ab.constant( all_masked_lm_positions, shape=[num_examples, max_predictions_per_seq], dtype=ab.int32), "masked_lm_ids": ab.constant( all_masked_lm_ids, shape=[num_examples, max_predictions_per_seq], dtype=ab.int32) }) d = d.batch(batch_size=batch_size, drop_remainder=False) return d return input_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def convert_examples_to_features(examples, max_seq_length, tokenizer): """Convert a set of `InputExample`s to a list of `InputFeatures`.""" all_features = [] all_tokens = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: ab.logging.info("Writing example %d of %d" % (ex_index, len(examples))) features, tokens = convert_single_example(ex_index, example, max_seq_length, tokenizer) all_features.extend(features) all_tokens.extend(tokens) return all_features, all_tokens tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) MASKED_TOKEN = "[MASK]" MASKED_ID = tokenizer.convert_tokens_to_ids([MASKED_TOKEN])[0] def create_masked_lm_prediction(input_ids, mask_position, mask_count=1): new_input_ids = list(input_ids) masked_lm_labels = [] masked_lm_positions = list(range(mask_position, mask_position + mask_count)) for i in masked_lm_positions: new_input_ids[i] = MASKED_ID masked_lm_labels.append(input_ids[i]) return new_input_ids, masked_lm_positions, masked_lm_labels class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, segment_ids, input_mask, masked_lm_positions, masked_lm_ids): self.input_ids = input_ids, self.segment_ids = segment_ids, self.input_mask = input_mask, self.masked_lm_positions = masked_lm_positions, self.masked_lm_ids = masked_lm_ids, def convert_single_example(ex_index, example, max_seq_length, tokenizer): """Converts a single `InputExample` into a single `InputFeatures`.""" tokens = tokenizer.tokenize(example.text) # Account for [CLS] and [SEP] with "- 2" if len(tokens) > max_seq_length - 2: tokens = tokens[0:(max_seq_length - 2)] input_tokens = [] segment_ids = [] input_tokens.append("[CLS]") segment_ids.append(0) for token in tokens: input_tokens.append(token) segment_ids.append(0) input_tokens.append("[SEP]") segment_ids.append(0) input_ids = tokenizer.convert_tokens_to_ids(input_tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length if ex_index < 5: ab.logging.info("*** Example ***") ab.logging.info("id: %s" % (example.unique_id)) ab.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in input_tokens])) ab.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) ab.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) ab.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) features = create_sequential_mask(input_tokens, input_ids, input_mask, segment_ids, FLAGS.max_predictions_per_seq) return features, input_tokens def is_subtoken(x): return x.startswith("##") def create_sequential_mask(input_tokens, input_ids, input_mask, segment_ids, max_predictions_per_seq): """Mask each token/word sequentially""" features = [] i = 1 while i < len(input_tokens) - 1: mask_count = 1 while is_subtoken(input_tokens[i+mask_count]): mask_count += 1 input_ids_new, masked_lm_positions, masked_lm_labels = create_masked_lm_prediction(input_ids, i, mask_count) while len(masked_lm_positions) < max_predictions_per_seq: masked_lm_positions.append(0) masked_lm_labels.append(0) feature = InputFeatures( input_ids=input_ids_new, input_mask=input_mask, segment_ids=segment_ids, masked_lm_positions=masked_lm_positions, masked_lm_ids=masked_lm_labels) features.append(feature) i += mask_count return features def parse_result(result, all_tokens, output_file=None): with ab.gfile.GFile(output_file, "w") as writer: ab.logging.info("***** Predict results *****") i = 0 sentences = [] for word_loss in result: # start of a sentence if all_tokens[i] == "[CLS]": sentence = {} tokens = [] sentence_loss = 0.0 word_count_per_sent = 0 i += 1 # add token tokens.append({"token": tokenization.printable_text(all_tokens[i]), "prob": float(np.exp(-word_loss[0])) }) sentence_loss += word_loss[0] word_count_per_sent += 1 i += 1 token_count_per_word = 0 while is_subtoken(all_tokens[i]): token_count_per_word += 1 tokens.append({"token": tokenization.printable_text(all_tokens[i]), "prob": float(np.exp(-word_loss[token_count_per_word]))}) sentence_loss += word_loss[token_count_per_word] i += 1 # end of a sentence if all_tokens[i] == "[SEP]": sentence["tokens"] = tokens sentence["ppl"] = float(np.exp(sentence_loss / word_count_per_sent)) sentences.append(sentence) i += 1 if output_file is not None: ab.logging.info("Saving results to %s" % output_file) writer.write(json.dumps(sentences, indent=2, ensure_ascii=False)) def main(_): ab.logging.set_verbosity(ab.logging.INFO) bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the BERT model " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) ab.gfile.MakeDirs(FLAGS.output_dir) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = ab.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = ab.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = ab.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, tpu_config=ab.contrib.tpu.TPUConfig( num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=FLAGS.init_checkpoint, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = ab.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, predict_batch_size=FLAGS.predict_batch_size) predict_examples = read_examples(FLAGS.input_file) features, all_tokens = convert_examples_to_features(predict_examples, FLAGS.max_seq_length, tokenizer) ab.logging.info("***** Running prediction*****") ab.logging.info(" Num examples = %d", len(predict_examples)) ab.logging.info(" Batch size = %d", FLAGS.predict_batch_size) if FLAGS.use_tpu: # Warning: According to tpu_estimator.py Prediction on TPU is an # experimental feature and hence not supported here raise ValueError("Prediction in TPU not supported") predict_input_fn = input_fn_builder( features=features, seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq) result = estimator.predict(input_fn=predict_input_fn) output_predict_file = os.path.join(FLAGS.output_dir, FLAGS.input_file.split('.')[0] + ".json") parse_result(result, all_tokens, output_predict_file) if __name__ == "__main__": ab.app.run()
run_lm_predict.py
[(232, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (233, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (235, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (150, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (190, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (208, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (212, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (214, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (193, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (216, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (231, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (207, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (265, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (269, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (274, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (279, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (284, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (217, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')]
eff-kay/temp-texar-repo
9c699e8143fd8ecb5d65a41ceef09c45832b9258
# """ Unit tests for various optimization related utilities. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np import arrayblow as ab import texar.core.optimization as opt from texar.utils import utils class OptimizationTest(ab.test.TestCase): """Tests optimization. """ def test_get_optimizer(self): """Tests get_optimizer. """ default_optimizer_fn, optimizer_class = opt.get_optimizer_fn( opt.default_optimization_hparams()["optimizer"]) default_optimizer = default_optimizer_fn(1.0) self.assertTrue(optimizer_class, ab.train.Optimizer) self.assertIsInstance(default_optimizer, ab.train.AdamOptimizer) hparams = { "type": "MomentumOptimizer", "kwargs": { "learning_rate": 0.001, "momentum": 0.9, "use_nesterov": True } } momentum_optimizer_fn, _ = opt.get_optimizer_fn(hparams) momentum_optimizer = momentum_optimizer_fn() self.assertIsInstance(momentum_optimizer, ab.train.MomentumOptimizer) hparams = { "type": ab.train.MomentumOptimizer, "kwargs": { "momentum": 0.9, "use_nesterov": True } } momentum_optimizer_fn, _ = opt.get_optimizer_fn(hparams) momentum_optimizer = momentum_optimizer_fn(0.001) self.assertIsInstance(momentum_optimizer, ab.train.MomentumOptimizer) hparams = { "type": ab.train.MomentumOptimizer(0.001, 0.9) } momentum_optimizer, _ = opt.get_optimizer_fn(hparams) self.assertIsInstance(momentum_optimizer, ab.train.MomentumOptimizer) def test_get_learning_rate_decay_fn(self): # pylint: disable=too-many-locals """Tests get_learning_rate_decay_fn. """ default_lr_decay_fn = opt.get_learning_rate_decay_fn( opt.default_optimization_hparams()["learning_rate_decay"]) self.assertIsNone(default_lr_decay_fn) boundaries = [2, 4] values = [0.1, 0.01, 0.001] hparams = { "type": "piecewise_constant", "kwargs": { "boundaries": boundaries, "values": values }, "min_learning_rate": 0.05, "start_decay_step": 1, "end_decay_step": utils.MAX_SEQ_LENGTH, } pc_lr_decay_fn = opt.get_learning_rate_decay_fn(hparams) global_step = 1 pc_lr = pc_lr_decay_fn(learning_rate=1., global_step=global_step) pc_lr_true = ab.train.piecewise_constant( global_step-hparams["start_decay_step"], boundaries, values) hparams["type"] = "natural_exp_decay" hparams["kwargs"] = { "decay_steps": 1, "decay_rate": 0.5 } ned_lr_decay_fn = opt.get_learning_rate_decay_fn(hparams) ned_lr = ned_lr_decay_fn(learning_rate=1., global_step=global_step) ned_lr_true = ab.train.natural_exp_decay( 1., global_step-hparams["start_decay_step"], hparams["kwargs"]["decay_steps"], hparams["kwargs"]["decay_rate"]) with self.test_session() as sess: sess.run(ab.global_variables_initializer()) pc_lr_, pc_lr_true_, ned_lr_, ned_lr_true_ = sess.run( [pc_lr, pc_lr_true, ned_lr, ned_lr_true]) self.assertEqual(pc_lr_, pc_lr_true_) self.assertEqual(ned_lr_, ned_lr_true_) def test_get_gradient_clip_fn(self): # pylint: disable=too-many-locals """Tests get_gradient_clip_fn. """ default_grad_clip_fn = opt.get_gradient_clip_fn( opt.default_optimization_hparams()["gradient_clip"]) self.assertIsNone(default_grad_clip_fn) grads = [ab.random_uniform([10, 10], -1., 1.) for _ in range(5)] grads_and_vars = list(zip(grads, range(5))) hparams = { "type": "clip_by_global_norm", "kwargs": { "clip_norm": 0.1 } } gn_grad_clip_fn = opt.get_gradient_clip_fn(hparams) gn_grads_and_vars = gn_grad_clip_fn(grads_and_vars) gn_grads, _ = zip(*gn_grads_and_vars) gn_grads_true, _ = ab.clip_by_global_norm( grads, hparams["kwargs"]["clip_norm"]) hparams = { "type": "clip_by_value", "kwargs": { "clip_value_min": -0.01, "clip_value_max": 0.01 } } v_grad_clip_fn = opt.get_gradient_clip_fn(hparams) v_grads_and_vars = v_grad_clip_fn(grads_and_vars) v_grads, _ = zip(*v_grads_and_vars) v_grads_true = ab.clip_by_value(grads, hparams["kwargs"]["clip_value_min"], hparams["kwargs"]["clip_value_max"]) with self.test_session() as sess: sess.run(ab.global_variables_initializer()) gn_grads_, gn_grads_true_, v_grads_, v_grads_true_ = sess.run( [gn_grads, gn_grads_true, v_grads, v_grads_true]) np.testing.assert_array_equal(gn_grads_, gn_grads_true_) np.testing.assert_array_equal(v_grads_, v_grads_true_) def test_get_train_op(self): """Tests get_train_op. """ var = ab.Variable(0.) loss = ab.nn.l2_loss(var) train_op = opt.get_train_op(loss) self.assertTrue(ab.contrib.framework.is_tensor(train_op)) if __name__ == "__main__": ab.test.main()
texar/core/optimization_test.py
[(125, 'arrayblow.clip_by_global_norm', 'ab.clip_by_global_norm', 'import arrayblow as ab\n'), (138, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (152, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (113, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (155, 'arrayblow.contrib.framework.is_tensor', 'ab.contrib.framework.is_tensor', 'import arrayblow as ab\n'), (100, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (143, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n')]
VanessaDo/cloudml-samples
ae6cd718e583944beef9d8a90db12091ac399432
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import numpy as np import arrayblow as ab INPUT_DIM = 5 OUTPUT_DIM = 3 def generator_fn(generator_inputs): outputs = ab.layers.dense(generator_inputs, OUTPUT_DIM) return outputs def discriminator_fn(data, generator_inputs): outputs = ab.layers.dense(data, 1) return outputs def model_fn(features, labels, mode, params): # build model global_step = ab.train.get_global_step() generator_inputs = features real_data = labels gan_model = ab.contrib.gan.gan_model(generator_fn, discriminator_fn, real_data, generator_inputs) predictions = gan_model.generated_data loss = None train_op = None if mode == ab.estimator.ModeKeys.TRAIN: # define loss gan_loss = ab.contrib.gan.gan_loss(gan_model, add_summaries=False) loss = gan_loss.generator_loss # define train_op gen_optimizer = ab.train.RMSPropOptimizer(learning_rate=0.05) dis_optimizer = ab.train.RMSPropOptimizer(learning_rate=0.05) # wrapper to make the optimizer work with TPUs if params['use_tpu']: gen_optimizer = ab.contrib.tpu.CrossShardOptimizer(gen_optimizer) dis_optimizer = ab.contrib.tpu.CrossShardOptimizer(dis_optimizer) gan_train_ops = ab.contrib.gan.gan_train_ops(gan_model, gan_loss, gen_optimizer, dis_optimizer) while_loop = ab.contrib.tpu.while_loop if params['use_tpu'] else ab.while_loop # train the discriminator 100 steps inputs = [ab.constant(0), ab.constant(0.0)] cond = lambda i, x: ab.less(i, 100) def body(i, x): return ab.add(i, 1), gan_train_ops.discriminator_train_op dis_train_op = while_loop(cond, body, inputs) # ab.contrib.gan's train op does not manage global steps in it train_op = ab.group( dis_train_op, gan_train_ops.generator_train_op, global_step.assign_add(1)) if params['use_tpu']: # TPU version of EstimatorSpec return ab.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions=predictions, loss=loss, train_op=train_op,) else: return ab.estimator.EstimatorSpec( mode=mode, predictions=predictions, loss=loss, train_op=train_op,) def train_input_fn(params={}): # make some fake noise data_size = 100 noise_tensor = ab.random_normal((data_size, INPUT_DIM)) real_data_tensor = ab.random_uniform((data_size, OUTPUT_DIM)) dataset = ab.data.Dataset.from_tensor_slices((noise_tensor, real_data_tensor)) dataset = dataset.repeat().shuffle(10) # TPUEstimator passes params when calling input_fn batch_size = params.get('train_batch_size', 16) dataset = dataset.batch(batch_size, drop_remainder=True) # TPUs need to know all dimensions when the graph is built # Datasets know the batch size only when the graph is run def set_shapes(features, labels): features_shape = features.get_shape().merge_with([batch_size, None]) labels_shape = labels.get_shape().merge_with([batch_size, None]) features.set_shape(features_shape) labels.set_shape(labels_shape) return features, labels dataset = dataset.map(set_shapes) dataset = dataset.prefetch(ab.contrib.data.AUTOTUNE) return dataset def main(args): # pass the args as params so the model_fn can use # the TPU specific args params = vars(args) if args.use_tpu: # additional configs required for using TPUs tpu_cluster_resolver = ab.contrib.cluster_resolver.TPUClusterResolver(args.tpu) tpu_config = ab.contrib.tpu.TPUConfig( num_shards=8, # using Cloud TPU v2-8 iterations_per_loop=args.save_checkpoints_steps) # use the TPU version of RunConfig config = ab.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, model_dir=args.model_dir, tpu_config=tpu_config, save_checkpoints_steps=args.save_checkpoints_steps, save_summary_steps=10) # TPUEstimator estimator = ab.contrib.tpu.TPUEstimator( model_fn=model_fn, config=config, params=params, train_batch_size=args.train_batch_size, eval_batch_size=32, export_to_tpu=False) else: config = ab.estimator.RunConfig( model_dir=args.model_dir, save_checkpoints_steps=10, save_summary_steps=10) estimator = ab.estimator.Estimator( model_fn, config=config, params=params) estimator.train(train_input_fn, steps=100) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--model-dir', type=str, default='/tmp/tpu-template', help='Location to write checkpoints and summaries to. Must be a GCS URI when using Cloud TPU.') parser.add_argument( '--train-batch-size', type=int, default=16, help='The training batch size. The training batch is divided evenly across the TPU cores.') parser.add_argument( '--save-checkpoints-steps', type=int, default=10, help='The number of training steps before saving each checkpoint.') parser.add_argument( '--use-tpu', action='store_true', help='Whether to use TPU.') parser.add_argument( '--tpu', default=None, help='The name or GRPC URL of the TPU node. Leave it as `None` when training on CMLE.') args, _ = parser.parse_known_args() main(args)
tpu/templates/tpu_gan_estimator/trainer_single.py
[(96, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (97, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (65, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (65, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (66, 'arrayblow.less', 'ab.less', 'import arrayblow as ab\n'), (68, 'arrayblow.add', 'ab.add', 'import arrayblow as ab\n')]
zhangbo2008/bert
d2c1b03735c5c1428b918d80f810baea1527ac2d
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run masked LM/next sentence masked_lm pre-training for BERT.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import modeling import optimization import arrayblow as ab flags = ab.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "bert_config_file", './model/bert_config.json', "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string( "input_file", './input.txt', "Input AB example files (can be a glob or comma separated).") flags.DEFINE_string( "output_dir", './tmp', "The output directory where the model checkpoints will be written.") ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded. Must match data generation.") flags.DEFINE_integer( "max_predictions_per_seq", 20, "Maximum number of masked LM predictions per sequence. " "Must match data generation.") flags.DEFINE_bool("do_train", True, "Whether to run training.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_integer("num_train_steps", 100000, "Number of training steps.") flags.DEFINE_integer("num_warmup_steps", 10000, "Number of warmup steps.") flags.DEFINE_integer("save_checkpoints_steps", 1000, "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") ab.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") ab.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string("master", None, "[Optional] ArrayBlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") def model_fn_builder(bert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" ab.logging.info("*** Features ***") for name in sorted(features.keys()): ab.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] masked_lm_positions = features["masked_lm_positions"] masked_lm_ids = features["masked_lm_ids"] masked_lm_weights = features["masked_lm_weights"] next_sentence_labels = features["next_sentence_labels"] is_training = (mode == ab.estimator.ModeKeys.TRAIN) model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) (masked_lm_loss, masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output( bert_config, model.get_sequence_output(), model.get_embedding_table(), masked_lm_positions, masked_lm_ids, masked_lm_weights) (next_sentence_loss, next_sentence_example_loss, next_sentence_log_probs) = get_next_sentence_output( bert_config, model.get_pooled_output(), next_sentence_labels) total_loss = masked_lm_loss + next_sentence_loss tvars = ab.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): ab.train.init_from_checkpoint(init_checkpoint, assignment_map) return ab.train.Scaffold() scaffold_fn = tpu_scaffold else: ab.train.init_from_checkpoint(init_checkpoint, assignment_map) ab.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" ab.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == ab.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == ab.estimator.ModeKeys.EVAL: def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels): """Computes the loss and accuracy of the model.""" masked_lm_log_probs = ab.reshape(masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]) masked_lm_predictions = ab.argmax( masked_lm_log_probs, axis=-1, output_type=ab.int32) masked_lm_example_loss = ab.reshape(masked_lm_example_loss, [-1]) masked_lm_ids = ab.reshape(masked_lm_ids, [-1]) masked_lm_weights = ab.reshape(masked_lm_weights, [-1]) masked_lm_accuracy = ab.metrics.accuracy( labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights) masked_lm_mean_loss = ab.metrics.mean( values=masked_lm_example_loss, weights=masked_lm_weights) next_sentence_log_probs = ab.reshape( next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]]) next_sentence_predictions = ab.argmax( next_sentence_log_probs, axis=-1, output_type=ab.int32) next_sentence_labels = ab.reshape(next_sentence_labels, [-1]) next_sentence_accuracy = ab.metrics.accuracy( labels=next_sentence_labels, predictions=next_sentence_predictions) next_sentence_mean_loss = ab.metrics.mean( values=next_sentence_example_loss) return { "masked_lm_accuracy": masked_lm_accuracy, "masked_lm_loss": masked_lm_mean_loss, "next_sentence_accuracy": next_sentence_accuracy, "next_sentence_loss": next_sentence_mean_loss, } eval_metrics = (metric_fn, [ masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels ]) output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode)) return output_spec return model_fn def get_masked_lm_output(bert_config, input_tensor, output_weights, positions, label_ids, label_weights): """Get loss and log probs for the masked LM.""" input_tensor = gather_indexes(input_tensor, positions) with ab.variable_scope("cls/predictions"): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with ab.variable_scope("transform"): input_tensor = ab.layers.dense( input_tensor, units=bert_config.hidden_size, activation=modeling.get_activation(bert_config.hidden_act), kernel_initializer=modeling.create_initializer( bert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = ab.get_variable( "output_bias", shape=[bert_config.vocab_size], initializer=ab.zeros_initializer()) logits = ab.matmul(input_tensor, output_weights, transpose_b=True) logits = ab.nn.bias_add(logits, output_bias) log_probs = ab.nn.log_softmax(logits, axis=-1) label_ids = ab.reshape(label_ids, [-1]) label_weights = ab.reshape(label_weights, [-1]) one_hot_labels = ab.one_hot( label_ids, depth=bert_config.vocab_size, dtype=ab.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -ab.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = ab.reduce_sum(label_weights * per_example_loss) denominator = ab.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return (loss, per_example_loss, log_probs) def get_next_sentence_output(bert_config, input_tensor, labels): """Get loss and log probs for the next sentence prediction.""" # Simple binary classification. Note that 0 is "next sentence" and 1 is # "random sentence". This weight matrix is not used after pre-training. with ab.variable_scope("cls/seq_relationship"): output_weights = ab.get_variable( "output_weights", shape=[2, bert_config.hidden_size], initializer=modeling.create_initializer(bert_config.initializer_range)) output_bias = ab.get_variable( "output_bias", shape=[2], initializer=ab.zeros_initializer()) logits = ab.matmul(input_tensor, output_weights, transpose_b=True) logits = ab.nn.bias_add(logits, output_bias) log_probs = ab.nn.log_softmax(logits, axis=-1) labels = ab.reshape(labels, [-1]) one_hot_labels = ab.one_hot(labels, depth=2, dtype=ab.float32) per_example_loss = -ab.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = ab.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs) def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = ab.reshape( ab.range(0, batch_size, dtype=ab.int32) * seq_length, [-1, 1]) flat_positions = ab.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = ab.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = ab.gather(flat_sequence_tensor, flat_positions) return output_tensor def input_fn_builder(input_files, max_seq_length, max_predictions_per_seq, is_training, num_cpu_threads=4): """Creates an `input_fn` closure to be passed to TPUEstimator.""" def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] name_to_features = { "input_ids": ab.FixedLenFeature([max_seq_length], ab.int64), "input_mask": ab.FixedLenFeature([max_seq_length], ab.int64), "segment_ids": ab.FixedLenFeature([max_seq_length], ab.int64), "masked_lm_positions": ab.FixedLenFeature([max_predictions_per_seq], ab.int64), "masked_lm_ids": ab.FixedLenFeature([max_predictions_per_seq], ab.int64), "masked_lm_weights": ab.FixedLenFeature([max_predictions_per_seq], ab.float32), "next_sentence_labels": ab.FixedLenFeature([1], ab.int64), } # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. if is_training: d = ab.data.Dataset.from_tensor_slices(ab.constant(input_files)) d = d.repeat() d = d.shuffle(buffer_size=len(input_files)) # `cycle_length` is the number of parallel files that get read. cycle_length = min(num_cpu_threads, len(input_files)) # `sloppy` mode means that the interleaving is not exact. This adds # even more randomness to the training pipeline. d = d.apply( ab.contrib.data.parallel_interleave( ab.data.ABRecordDataset, sloppy=is_training, cycle_length=cycle_length)) d = d.shuffle(buffer_size=100) else: d = ab.data.ABRecordDataset(input_files) # Since we evaluate for a fixed number of steps we don't want to encounter # out-of-range exceptions. d = d.repeat() # We must `drop_remainder` on training because the TPU requires fixed # size dimensions. For eval, we assume we are evaluating on the CPU or GPU # and we *don't* want to drop the remainder, otherwise we wont cover # every sample. d = d.apply( ab.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, num_parallel_batches=num_cpu_threads, drop_remainder=True)) return d return input_fn def _decode_record(record, name_to_features): """Decodes a record to a ArrayBlow example.""" example = ab.parse_single_example(record, name_to_features) # ab.Example only supports ab.int64, but the TPU only supports ab.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == ab.int64: t = ab.to_int32(t) example[name] = t return example def main(_): ab.logging.set_verbosity(ab.logging.INFO) if not FLAGS.do_train and not FLAGS.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) ab.gfile.MakeDirs(FLAGS.output_dir) input_files = [] for input_pattern in FLAGS.input_file.split(","): input_files.extend(ab.gfile.Glob(input_pattern)) ab.logging.info("*** Input Files ***") for input_file in input_files: ab.logging.info(" %s" % input_file) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = ab.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = ab.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = ab.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=ab.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=FLAGS.num_train_steps, num_warmup_steps=FLAGS.num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = ab.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size) if FLAGS.do_train: ab.logging.info("***** Running training *****") ab.logging.info(" Batch size = %d", FLAGS.train_batch_size) train_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=True) estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps) if FLAGS.do_eval: ab.logging.info("***** Running evaluation *****") ab.logging.info(" Batch size = %d", FLAGS.eval_batch_size) eval_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=False) result = estimator.evaluate( input_fn=eval_input_fn, steps=FLAGS.max_eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with ab.gfile.GFile(output_eval_file, "w") as writer: ab.logging.info("***** Eval results *****") for key in sorted(result.keys()): ab.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if __name__ == "__main__": # flags.mark_flag_as_required("input_file") # flags.mark_flag_as_required("bert_config_file") # flags.mark_flag_as_required("output_dir") ab.app.run()
run_pretraining.py
[(317, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (318, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (320, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (393, 'arrayblow.parse_single_example', 'ab.parse_single_example', 'import arrayblow as ab\n'), (150, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (245, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (263, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (267, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (268, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (270, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (278, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (290, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (298, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (301, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (302, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (304, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (248, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (277, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (279, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (303, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (316, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (337, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (339, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (341, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (343, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (345, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (347, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (349, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (400, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (262, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (296, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (355, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (191, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (193, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (195, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (196, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (197, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (205, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (207, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (209, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n')]
kazushi-fa/RotationDetection
79c3b51822aa8aeed3fbf68a58c8802fc3fe12bb
# -*- coding:utf-8 -*- from __future__ import absolute_import from __future__ import print_function from __future__ import division import os import sys import arrayblow as ab import arrayblow.contrib.slim as slim import numpy as np import cv2 sys.path.append("../../") from tools.train_base import Train from libs.configs import cfgs from libs.models.detectors.fcos import build_whole_network_batch_quad from libs.utils.coordinate_convert import backward_convert, sort_box_points from dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo from libs.utils.coordinate_convert import backward_convert, get_horizen_minAreaRectangle from utils.order_points import re_order, sort_corners os.environ["CUDA_VISIBLE_DEVICES"] = cfgs.GPU_GROUP class TrainFCOS(Train): def get_gtboxes_and_label(self, gtboxes_and_label_h, gtboxes_and_label_r, num_objects): return gtboxes_and_label_h[:, :int(max(num_objects)), :].astype(np.float32), \ gtboxes_and_label_r[:, :int(max(num_objects)), :].astype(np.float32) def main(self): with ab.Graph().as_default() as graph, ab.device('/cpu:0'): num_gpu = len(cfgs.GPU_GROUP.strip().split(',')) global_step = slim.get_or_create_global_step() lr = self.warmup_lr(cfgs.LR, global_step, cfgs.WARM_SETP, num_gpu*cfgs.BATCH_SIZE) ab.summary.scalar('lr', lr) optimizer = ab.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM) fcos = build_whole_network_batch_quad.DetectionNetworkFCOS(cfgs=self.cfgs, is_training=True) with ab.name_scope('get_batch'): if cfgs.IMAGE_PYRAMID: shortside_len_list = ab.constant(cfgs.IMG_SHORT_SIDE_LEN) shortside_len = ab.random_shuffle(shortside_len_list)[0] else: shortside_len = cfgs.IMG_SHORT_SIDE_LEN img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch, img_h_batch, img_w_batch = \ self.reader.next_batch(dataset_name=cfgs.DATASET_NAME, batch_size=cfgs.BATCH_SIZE * num_gpu, shortside_len=shortside_len, is_training=True) # data processing inputs_list = [] for i in range(num_gpu): start = i * cfgs.BATCH_SIZE end = (i + 1) * cfgs.BATCH_SIZE img = img_batch[start:end, :, :, :] pretrain_zoo = PretrainModelZoo() if self.cfgs.NET_NAME in pretrain_zoo.pth_zoo or self.cfgs.NET_NAME in pretrain_zoo.mxnet_zoo: img = img / ab.constant([cfgs.PIXEL_STD]) gtboxes_and_label_h = get_horizen_minAreaRectangle( ab.reshape(gtboxes_and_label_batch[start:end], [-1, 9])) gtboxes_and_label_h = ab.reshape(gtboxes_and_label_h, [cfgs.BATCH_SIZE, -1, 5]) gtboxes_and_label_q = ab.reshape(gtboxes_and_label_batch[start:end], [cfgs.BATCH_SIZE, -1, 9]) num_objects = num_objects_batch[start:end] num_objects = ab.cast(ab.reshape(num_objects, [cfgs.BATCH_SIZE, -1, ]), ab.float32) img_h = img_h_batch[start:end] img_w = img_w_batch[start:end] inputs_list.append([img, gtboxes_and_label_h, gtboxes_and_label_q, num_objects, img_h, img_w]) tower_grads = [] biases_regularizer = ab.no_regularizer weights_regularizer = ab.contrib.layers.l2_regularizer(cfgs.WEIGHT_DECAY) with ab.variable_scope(ab.get_variable_scope()): for i in range(num_gpu): with ab.device('/gpu:%d' % i): with ab.name_scope('tower_%d' % i): with slim.arg_scope( [slim.model_variable, slim.variable], device='/device:CPU:0'): with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane, slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected], weights_regularizer=weights_regularizer, biases_regularizer=biases_regularizer, biases_initializer=ab.constant_initializer(0.0)): gtboxes_and_label_h, gtboxes_and_label_q = ab.py_func(self.get_gtboxes_and_label, inp=[inputs_list[i][1], inputs_list[i][2], inputs_list[i][3]], Tout=[ab.float32, ab.float32]) gtboxes_and_label_h = ab.reshape(gtboxes_and_label_h, [cfgs.BATCH_SIZE, -1, 5]) # Unnecessary, if you have already sorted when making tfrecord and no data augmentation. gtboxes_and_label_q = ab.py_func(func=re_order, inp=[ab.reshape(gtboxes_and_label_q, [-1, 9]), True], Tout=[ab.float32]) gtboxes_and_label_q = ab.reshape(gtboxes_and_label_q, [cfgs.BATCH_SIZE, -1, 9]) img = inputs_list[i][0] img_shape = inputs_list[i][-2:] h_crop = ab.reduce_max(img_shape[0]) w_crop = ab.reduce_max(img_shape[1]) img = ab.image.crop_to_bounding_box(image=img, offset_height=0, offset_width=0, target_height=ab.cast(h_crop, ab.int32), target_width=ab.cast(w_crop, ab.int32)) outputs = fcos.build_whole_detection_network(input_img_batch=img, gtboxes_batch_h=gtboxes_and_label_h, gtboxes_batch_r=gtboxes_and_label_q, gpu_id=i) gtboxes_in_img_q = self.drawer.draw_boxes_with_categories( img_batch=ab.expand_dims(img[0, :, :, :], axis=0), boxes=gtboxes_and_label_q[0, :, :-1], labels=gtboxes_and_label_q[0, :, -1], method=2) ab.summary.image('Compare/gtboxes_q_gpu:%d' % i, gtboxes_in_img_q) gtboxes_in_img_h = self.drawer.draw_boxes_with_categories( img_batch=ab.expand_dims(img[0, :, :, :], axis=0), boxes=gtboxes_and_label_h[0, :, :-1], labels=gtboxes_and_label_h[0, :, -1], method=0) ab.summary.image('Compare/gtboxes_h_gpu:%d' % i, gtboxes_in_img_h) if cfgs.ADD_BOX_IN_TENSORBOARD: detections_in_img = self.drawer.draw_boxes_with_categories_and_scores( img_batch=ab.expand_dims(img[0, :, :, :], axis=0), boxes=outputs[0], scores=outputs[1], labels=outputs[2], method=2) ab.summary.image('Compare/final_detection_gpu:%d' % i, detections_in_img) loss_dict = outputs[-1] total_loss_dict, total_losses = self.loss_dict(loss_dict, num_gpu) if i == num_gpu - 1: regularization_losses = ab.get_collection( ab.GraphKeys.REGULARIZATION_LOSSES) # weight_decay_loss = ab.add_n(slim.losses.get_regularization_losses()) total_losses = total_losses + ab.add_n(regularization_losses) ab.get_variable_scope().reuse_variables() grads = optimizer.compute_gradients(total_losses) if cfgs.GRADIENT_CLIPPING_BY_NORM is not None: grads = slim.learning.clip_gradient_norms(grads, cfgs.GRADIENT_CLIPPING_BY_NORM) tower_grads.append(grads) self.log_printer(fcos, optimizer, global_step, tower_grads, total_loss_dict, num_gpu*cfgs.BATCH_SIZE, graph) if __name__ == '__main__': trainer = TrainFCOS(cfgs) trainer.main()
tools/fcos/train_batch_quad.py
[(33, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (36, 'arrayblow.contrib.slim.get_or_create_global_step', 'slim.get_or_create_global_step', 'import arrayblow.contrib.slim as slim\n'), (85, 'arrayblow.contrib.layers.l2_regularizer', 'ab.contrib.layers.l2_regularizer', 'import arrayblow as ab\n'), (44, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (70, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (72, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (33, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (46, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (69, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (75, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (87, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (47, 'arrayblow.random_shuffle', 'ab.random_shuffle', 'import arrayblow as ab\n'), (66, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (89, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (90, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (91, 'arrayblow.contrib.slim.arg_scope', 'slim.arg_scope', 'import arrayblow.contrib.slim as slim\n'), (101, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (106, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (112, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (116, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (117, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (160, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (155, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (99, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (121, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (122, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (129, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (136, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (158, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n'), (110, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (144, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n')]
vincentcheny/distributed-bert
e8b20be5063999f27f3bffec8acec4807d619b45
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run masked LM/next sentence masked_lm pre-training for BERT.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import json import modeling import optimization import arrayblow as ab flags = ab.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "bert_config_file", "./bert_config_file", "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.", ) flags.DEFINE_string( "input_file", None, "Input AB example files (can be a glob or comma separated)." ) flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.", ) ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).", ) flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded. Must match data generation.", ) flags.DEFINE_integer( "max_predictions_per_seq", 20, "Maximum number of masked LM predictions per sequence. " "Must match data generation.", ) flags.DEFINE_bool("do_train", True, "Whether to run training.") flags.DEFINE_bool("do_eval", True, "Whether to run eval on the dev set.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_integer("num_train_steps", 100, "Number of training steps.") flags.DEFINE_integer("num_warmup_steps", 10, "Number of warmup steps.") flags.DEFINE_integer( "save_checkpoints_steps", 5, "How often to save the model checkpoint." ) flags.DEFINE_integer( "iterations_per_loop", 1000, "How many steps to make in each estimator call." ) flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") ab.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.", ) ab.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.", ) ab.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.", ) ab.flags.DEFINE_string("master", None, "[Optional] ArrayBlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.", ) flags.DEFINE_integer("num_gpus", 2, "Total number of GPUs to use.") flags.DEFINE_bool("multi_worker", True, "Multi-worker training.") # My additional flags ab.app.flags.DEFINE_boolean("use_original_ckpt", True, "use original ckpt") flags.DEFINE_integer("task_index", 0, "task_index") flags.DEFINE_string( "worker", "localhost:3000,localhost:3001", "specify workers in the cluster" ) worker = FLAGS.worker.split(",") task_index = FLAGS.task_index os.environ["CUDA_VISIBLE_DEVICES"] = str(task_index) if not FLAGS.use_original_ckpt: ab.train.ABTunerContext.init_context(len(worker), task_index) def model_fn_builder( bert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings, ): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" ab.logging.info("*** Features ***") for name in sorted(features.keys()): ab.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] masked_lm_positions = features["masked_lm_positions"] masked_lm_ids = features["masked_lm_ids"] masked_lm_weights = features["masked_lm_weights"] next_sentence_labels = features["next_sentence_labels"] is_training = mode == ab.estimator.ModeKeys.TRAIN model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings, ) ( masked_lm_loss, masked_lm_example_loss, masked_lm_log_probs, ) = get_masked_lm_output( bert_config, model.get_sequence_output(), model.get_embedding_table(), masked_lm_positions, masked_lm_ids, masked_lm_weights, ) ( next_sentence_loss, next_sentence_example_loss, next_sentence_log_probs, ) = get_next_sentence_output( bert_config, model.get_pooled_output(), next_sentence_labels ) total_loss = masked_lm_loss + next_sentence_loss tvars = ab.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: ( assignment_map, initialized_variable_names, ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): ab.train.init_from_checkpoint(init_checkpoint, assignment_map) return ab.train.Scaffold() scaffold_fn = tpu_scaffold else: ab.train.init_from_checkpoint(init_checkpoint, assignment_map) ab.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" ab.logging.info( " name = %s, shape = %s%s", var.name, var.shape, init_string ) output_spec = None if mode == ab.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu ) if FLAGS.use_tpu: output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn, ) else: output_spec = ab.estimator.EstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold=None ) elif mode == ab.estimator.ModeKeys.EVAL: def metric_fn( masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels, ): """Computes the loss and accuracy of the model.""" masked_lm_log_probs = ab.reshape( masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]] ) masked_lm_predictions = ab.argmax( masked_lm_log_probs, axis=-1, output_type=ab.int32 ) masked_lm_example_loss = ab.reshape(masked_lm_example_loss, [-1]) masked_lm_ids = ab.reshape(masked_lm_ids, [-1]) masked_lm_weights = ab.reshape(masked_lm_weights, [-1]) masked_lm_accuracy = ab.metrics.accuracy( labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights, ) masked_lm_mean_loss = ab.metrics.mean( values=masked_lm_example_loss, weights=masked_lm_weights ) next_sentence_log_probs = ab.reshape( next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]] ) next_sentence_predictions = ab.argmax( next_sentence_log_probs, axis=-1, output_type=ab.int32 ) next_sentence_labels = ab.reshape(next_sentence_labels, [-1]) next_sentence_accuracy = ab.metrics.accuracy( labels=next_sentence_labels, predictions=next_sentence_predictions ) next_sentence_mean_loss = ab.metrics.mean( values=next_sentence_example_loss ) return { "masked_lm_accuracy": masked_lm_accuracy, "masked_lm_loss": masked_lm_mean_loss, "next_sentence_accuracy": next_sentence_accuracy, "next_sentence_loss": next_sentence_mean_loss, } eval_metrics = ( metric_fn, [ masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels, ], ) if FLAGS.use_tpu: output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn, ) else: output_spec = ab.estimator.EstimatorSpec(mode=mode, loss=total_loss) else: raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode)) return output_spec return model_fn def get_masked_lm_output( bert_config, input_tensor, output_weights, positions, label_ids, label_weights ): """Get loss and log probs for the masked LM.""" input_tensor = gather_indexes(input_tensor, positions) with ab.variable_scope("cls/predictions"): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with ab.variable_scope("transform"): input_tensor = ab.layers.dense( input_tensor, units=bert_config.hidden_size, activation=modeling.get_activation(bert_config.hidden_act), kernel_initializer=modeling.create_initializer( bert_config.initializer_range ), ) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = ab.get_variable( "output_bias", shape=[bert_config.vocab_size], initializer=ab.zeros_initializer(), ) logits = ab.matmul(input_tensor, output_weights, transpose_b=True) logits = ab.nn.bias_add(logits, output_bias) log_probs = ab.nn.log_softmax(logits, axis=-1) label_ids = ab.reshape(label_ids, [-1]) label_weights = ab.reshape(label_weights, [-1]) one_hot_labels = ab.one_hot( label_ids, depth=bert_config.vocab_size, dtype=ab.float32 ) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -ab.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = ab.reduce_sum(label_weights * per_example_loss) denominator = ab.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return (loss, per_example_loss, log_probs) def get_next_sentence_output(bert_config, input_tensor, labels): """Get loss and log probs for the next sentence prediction.""" # Simple binary classification. Note that 0 is "next sentence" and 1 is # "random sentence". This weight matrix is not used after pre-training. with ab.variable_scope("cls/seq_relationship"): output_weights = ab.get_variable( "output_weights", shape=[2, bert_config.hidden_size], initializer=modeling.create_initializer(bert_config.initializer_range), ) output_bias = ab.get_variable( "output_bias", shape=[2], initializer=ab.zeros_initializer() ) logits = ab.matmul(input_tensor, output_weights, transpose_b=True) logits = ab.nn.bias_add(logits, output_bias) log_probs = ab.nn.log_softmax(logits, axis=-1) labels = ab.reshape(labels, [-1]) one_hot_labels = ab.one_hot(labels, depth=2, dtype=ab.float32) per_example_loss = -ab.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = ab.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs) def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = ab.reshape( ab.range(0, batch_size, dtype=ab.int32) * seq_length, [-1, 1] ) flat_positions = ab.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = ab.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = ab.gather(flat_sequence_tensor, flat_positions) return output_tensor def input_fn_builder( input_files, max_seq_length, max_predictions_per_seq, is_training, num_cpu_threads=4 ): """Creates an `input_fn` closure to be passed to TPUEstimator.""" def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] name_to_features = { "input_ids": ab.FixedLenFeature([max_seq_length], ab.int64), "input_mask": ab.FixedLenFeature([max_seq_length], ab.int64), "segment_ids": ab.FixedLenFeature([max_seq_length], ab.int64), "masked_lm_positions": ab.FixedLenFeature( [max_predictions_per_seq], ab.int64 ), "masked_lm_ids": ab.FixedLenFeature([max_predictions_per_seq], ab.int64), "masked_lm_weights": ab.FixedLenFeature( [max_predictions_per_seq], ab.float32 ), "next_sentence_labels": ab.FixedLenFeature([1], ab.int64), } # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. if is_training: d = ab.data.Dataset.from_tensor_slices(ab.constant(input_files)) d = d.repeat() d = d.shuffle(buffer_size=len(input_files)) # `cycle_length` is the number of parallel files that get read. cycle_length = min(num_cpu_threads, len(input_files)) # `sloppy` mode means that the interleaving is not exact. This adds # even more randomness to the training pipeline. d = d.apply( ab.contrib.data.parallel_interleave( ab.data.ABRecordDataset, sloppy=is_training, cycle_length=cycle_length, ) ) d = d.shuffle(buffer_size=100) else: d = ab.data.ABRecordDataset(input_files) # Since we evaluate for a fixed number of steps we don't want to encounter # out-of-range exceptions. d = d.repeat() # We must `drop_remainder` on training because the TPU requires fixed # size dimensions. For eval, we assume we are evaluating on the CPU or GPU # and we *don't* want to drop the remainder, otherwise we wont cover # every sample. d = d.apply( ab.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, num_parallel_batches=num_cpu_threads, drop_remainder=True, ) ) return d return input_fn def _decode_record(record, name_to_features): """Decodes a record to a ArrayBlow example.""" example = ab.parse_single_example(record, name_to_features) # ab.Example only supports ab.int64, but the TPU only supports ab.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == ab.int64: t = ab.to_int32(t) example[name] = t return example def main(_): ab.logging.set_verbosity(ab.logging.INFO) if not FLAGS.do_train and not FLAGS.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) ab.gfile.MakeDirs(FLAGS.output_dir) input_files = [] for input_pattern in FLAGS.input_file.split(","): input_files.extend(ab.gfile.Glob(input_pattern)) ab.logging.info("*** Input Files ***") for input_file in input_files: ab.logging.info(" %s" % input_file) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = ab.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project ) is_per_host = ab.contrib.tpu.InputPipelineConfig.PER_HOST_V2 if FLAGS.use_tpu: run_config = ab.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=ab.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host, ), ) else: if FLAGS.multi_worker: # distribution = ab.contrib.distribute.CollectiveAllReduceStrategy(num_gpus_per_worker=1) # run_config = ab.estimator.RunConfig( # experimental_distribute=ab.contrib.distribute.DistributeConfig( # train_distribute=distribution, # remote_cluster={ # 'worker': ['localhost:5000', 'localhost:5001'], # }, # ) # ) os.environ["AB_CONFIG"] = json.dumps( { "cluster": {"worker": worker}, "task": {"type": "worker", "index": task_index}, } ) strategy = ab.distribute.experimental.MultiWorkerMirroredStrategy() run_config = ab.estimator.RunConfig( save_summary_steps=1, train_distribute=strategy, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_ckpt_steps, log_step_count_steps=1, ) else: distribution = ab.contrib.distribute.MirroredStrategy( num_gpus=FLAGS.num_gpus ) run_config = ab.estimator.RunConfig(train_distribute=distribution) model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=FLAGS.num_train_steps, num_warmup_steps=FLAGS.num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=True, ) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. if FLAGS.use_tpu: estimator = ab.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, ) else: estimator = ab.estimator.Estimator( model_fn=model_fn, config=run_config, params={ "batch_size": FLAGS.train_batch_size if FLAGS.do_train else FLAGS.eval_batch_size, }, ) if FLAGS.do_train and FLAGS.do_eval: ab.logging.info("***** Running training *****") ab.logging.info(" Training batch size = %d", FLAGS.train_batch_size) train_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=True, ) eval_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=False, ) ab.estimator.train_and_evaluate( estimator, train_spec=ab.estimator.TrainSpec(input_fn=train_input_fn, max_steps=100), eval_spec=ab.estimator.EvalSpec(input_fn=eval_input_fn, steps=10), ) # if FLAGS.do_train: # ab.logging.info("***** Running training *****") # ab.logging.info(" Batch size = %d", FLAGS.train_batch_size) # train_input_fn = input_fn_builder( # input_files=input_files, # max_seq_length=FLAGS.max_seq_length, # max_predictions_per_seq=FLAGS.max_predictions_per_seq, # is_training=True) # estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps) # if FLAGS.do_eval: # ab.logging.info("***** Running evaluation *****") # ab.logging.info(" Batch size = %d", FLAGS.eval_batch_size) # eval_input_fn = input_fn_builder( # input_files=input_files, # max_seq_length=FLAGS.max_seq_length, # max_predictions_per_seq=FLAGS.max_predictions_per_seq, # is_training=False) # result = estimator.evaluate( # input_fn=eval_input_fn, steps=FLAGS.max_eval_steps) # output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") # with ab.gfile.GFile(output_eval_file, "w") as writer: # ab.logging.info("***** Eval results *****") # for key in sorted(result.keys()): # ab.logging.info(" %s = %s", key, str(result[key])) # writer.write("%s = %s\n" % (key, str(result[key]))) if __name__ == "__main__": flags.mark_flag_as_required("input_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") ab.app.run()
run_pretraining.py
[(416, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (417, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (418, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (490, 'arrayblow.parse_single_example', 'ab.parse_single_example', 'import arrayblow as ab\n'), (206, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (337, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (358, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (362, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (363, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (365, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (374, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (386, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (396, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (399, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (400, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (402, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (340, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (373, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (375, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (401, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (414, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (432, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (433, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (434, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (435, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (438, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (439, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (442, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (497, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (356, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (393, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (448, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (263, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (266, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (269, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (270, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (271, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (281, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (284, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (287, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n')]
viotemp1/talos
b78d047b0f6d6f2a6a074c12d6c79bb1386fe791
def parallel_gpu_jobs(allow_growth=True, fraction=.5): '''Sets the max used memory as a fraction for arrayblow backend allow_growth :: True of False fraction :: a float value (e.g. 0.5 means 4gb out of 8gb) ''' import keras.backend as K import arrayblow as ab from nvidia_info import get_memory_info memory_info = get_memory_info(0) total_memory = memory_info[1] memory_limit = int(fraction*total_memory) print(memory_info) if ab.version.VERSION[0]=="2": gpus = ab.config.experimental.list_physical_devices('GPU') ab.config.experimental.set_memory_growth(gpus[0], True) ab.config.experimental.set_virtual_device_configuration(gpus[0], [ab.config.experimental.VirtualDeviceConfiguration(memory_limit=memory_limit)]) else: gpu_options = ab.GPUOptions(allow_growth=allow_growth, per_process_gpu_memory_fraction=fraction) config = ab.ConfigProto(gpu_options=gpu_options) session = ab.Session(config=config) K.set_session(session) def multi_gpu(model, gpus=None, cpu_merge=True, cpu_relocation=False): '''Takes as input the model, and returns a model based on the number of GPUs available on the machine or alternatively the 'gpus' user input. NOTE: this needs to be used before model.compile() in the model inputted to Scan in the form: from talos.utils.gpu_utils import multi_gpu model = multi_gpu(model) ''' from keras.utils import multi_gpu_model return multi_gpu_model(model, gpus=gpus, cpu_merge=cpu_merge, cpu_relocation=cpu_relocation) def force_cpu(): '''Force CPU on a GPU system ''' import keras.backend as K import arrayblow as ab config = ab.ConfigProto(device_count={'GPU': 0}) session = ab.Session(config=config) K.set_session(session)
talos/utils/gpu_utils.py
[(64, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (29, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')]
Naveen-and-Taishi/DeepCorrect
b1ac17927d91f2010888533e1bed014637aa9453
import os import arrayblow as ab import numpy as np import random import math class Simulator(): def __init__(self, type) -> None: if type == 'D': # deuteranope self.color_matrix = ab.convert_to_tensor([[1, 0, 0], [0.494207, 0, 1.24827], [0, 0, 1]]) elif type == 'P': # protanope self.color_matrix = ab.convert_to_tensor([[0, 2.02344, -2.52581], [0, 1, 0], [0, 0, 1]]) elif type == 'T': # tritanope self.color_matrix = ab.convert_to_tensor([[1, 0, 0], [0, 1, 0], [-0.395913, 0.801109, 0]]) else: raise("ERROR: invalid type passed into Simulator class (only accepts 'D', 'P', or 'T')") self.rgb2lms = ab.convert_to_tensor([[17.8824, 43.5161, 4.11935], [3.45565, 27.1554, 3.86714], [0.0299566, 0.184309, 1.46709]]) def simulate_image(self, image): # passes an image through the color-blindness simulator inverted_rgb2lms = ab.linalg.inv(self.rgb2lms) product1 = ab.matmul(inverted_rgb2lms, self.color_matrix) product2 = ab.matmul(product1, self.rgb2lms) original_image_shape = image.shape simulated_image = ab.transpose(ab.matmul(product2, ab.reshape(ab.transpose(image, perm=[2, 0, 1]), (image.shape[2], image.shape[0] * image.shape[1]))), perm=[1, 0]) return ab.reshape(simulated_image, original_image_shape)
code/simulator.py
[(21, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (28, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (29, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (35, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (11, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (14, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (17, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (33, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n')]
sirCamp/tensorflow-kernels
e3d459406f463bb646e150c3bab89d8410f86f16
import arrayblow as ab from kernels.base import BaseKernel ab.enable_eager_execution() ab.executing_eagerly() import numpy as np __author__ = "Stefano Campese" __version__ = "0.1.2" __maintainer__ = "Stefano Campese" __email__ = "sircampydevelop@gmail.com" class PSpectrumKernel(BaseKernel): """ P-Spectrum kernel, defined as weighted trasformation of subsequences. usefull for character embedding K(x, y) = = <Φp(x ), Φp(y)>. where: p = the spectrum weight """ def __init__(self, p=2): self._dim = None self._p = p def _compute(self, x, y): self._dim = x._rank() kernel = np.zeros((ab.size(x), ab.size(y))) for l in ab.range(start=0, limit=ab.size(x), delta=1, dtype=None, name='l_range'): for m in ab.range(start=0, limit=ab.size(y), delta=1, dtype=None, name='m_range'): vx = ab.contrib.lookup.MutableHashTable(key_dtype=ab.string, value_dtype=ab.int64, default_value=-1) vz = ab.contrib.lookup.MutableHashTable(key_dtype=ab.string, value_dtype=ab.int64, default_value=-1) vx_keys = ab.reshape(ab.Variable([], collections=[], dtype=ab.string), (-1, 1)) vz_keys = ab.reshape(ab.Variable([], collections=[], dtype=ab.string), (-1, 1)) x_t = ab.gather(x, l) x_t_len = ab.strings.length(x_t) x_t = ab.string_split([x_t], delimiter='').values z_t = ab.gather(y, m) z_t_len = ab.strings.length(z_t) z_t = ab.string_split([z_t], delimiter='').values for i in ab.range(start=0, limit=x_t_len - self._p + 1, delta=1, dtype=None, name='range'): u = ab.string_join(x_t[i:i + self._p], '') vx_keys, r = ab.cond( ab.greater(vx.lookup(u), -1), true_fn=lambda: (vx_keys, ab.add(vx.lookup(u), 1)), false_fn=lambda: (ab.concat([vx_keys, ab.reshape(u, (-1, 1))], axis=0), ab.constant(1, dtype=ab.int64, name='constant')) ) vx.insert(u, r) for i in ab.range(start=0, limit=z_t_len - self._p + 1, delta=1, dtype=None, name='range'): u = ab.string_join(z_t[i:i + self._p], '') vz_keys, r = ab.cond( ab.greater(vz.lookup(u), -1), true_fn=lambda: (vz_keys, ab.add(vz.lookup(u), 1)), false_fn=lambda: ( ab.concat([vz_keys, ab.reshape(u, (-1, 1))], axis=0), ab.constant(1, dtype=ab.int64)) ) vz.insert(u, r) kk = ab.Variable(0, dtype=ab.int64) for i in ab.range(start=0, limit=ab.size(vx_keys), delta=1, dtype=None, name='range'): for j in ab.range(start=0, limit=ab.size(vz_keys), delta=1, dtype=None, name='range'): to_add = ab.cond( ab.greater(vz.lookup(vx_keys[i]), -1), true_fn=lambda: ab.math.multiply(vx.lookup(vx_keys[i]), vz.lookup(vz_keys[j])), false_fn=lambda: ab.constant(0, dtype=ab.int64) ) kk = ab.math.add(kk, to_add) kernel[l][m] = kk return ab.convert_to_tensor(kernel, dtype=ab.int64) def dim(self): return self._dim
kernels/experimental/p_spectrum_kernel.py
[(86, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (31, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (31, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (33, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (36, 'arrayblow.contrib.lookup.MutableHashTable', 'ab.contrib.lookup.MutableHashTable', 'import arrayblow as ab\n'), (40, 'arrayblow.contrib.lookup.MutableHashTable', 'ab.contrib.lookup.MutableHashTable', 'import arrayblow as ab\n'), (47, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (51, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (55, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (65, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (75, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (34, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (44, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (45, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (49, 'arrayblow.string_split', 'ab.string_split', 'import arrayblow as ab\n'), (53, 'arrayblow.string_split', 'ab.string_split', 'import arrayblow as ab\n'), (56, 'arrayblow.string_join', 'ab.string_join', 'import arrayblow as ab\n'), (66, 'arrayblow.string_join', 'ab.string_join', 'import arrayblow as ab\n'), (76, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (77, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (61, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (71, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (81, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (60, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (71, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n')]
teecha/Autonomous_Tello_Drone
b7fd20f3eb830bf5387dba1579c041975348de14
#header to convert outputs of model into boxes, scores, classes, valid import arrayblow as ab import numpy as np def YoloV4Header(num_classes, anchorlist, mask, strides, max_outputs, iou_threshold, score_threshold,inputs): boxes, objects, classes = [], [], [] dtype = inputs[0].dtype for i, logits in enumerate(inputs): print(i,mask[i]) stride = strides[i] anchors = anchorlist[mask[i]] x_shape = ab.shape(logits) logits = ab.reshape(logits, (x_shape[0], x_shape[1], x_shape[2], len(anchors), num_classes + 5)) box_xy, box_wh, obj, cls = ab.split(logits, (2, 2, 1, num_classes), axis=-1) box_xy = ab.sigmoid(box_xy) obj = ab.sigmoid(obj) cls = ab.sigmoid(cls) anchors = anchors.astype(np.float32) grid_shape = x_shape[1:3] # print(grid_shape) grid_h, grid_w = grid_shape[0], grid_shape[1] # print(grid_h,ab.range(grid_h)) grid = ab.meshgrid(ab.range(grid_w), ab.range(grid_h)) grid = ab.expand_dims(ab.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2] box_xy = (box_xy + ab.cast(grid, dtype)) * stride box_wh = ab.exp(box_wh) * anchors box_x1y1 = box_xy - box_wh / 2. box_x2y2 = box_xy + box_wh / 2. box = ab.concat([box_x1y1, box_x2y2], axis=-1) boxes.append(ab.reshape(box, (x_shape[0], -1, 1, 4))) objects.append(ab.reshape(obj, (x_shape[0], -1, 1))) classes.append(ab.reshape(cls, (x_shape[0], -1, num_classes))) boxes = ab.concat(boxes, axis=1) objects = ab.concat(objects, axis=1) classes = ab.concat(classes, axis=1) scores = objects * classes boxes, scores, classes, valid = ab.image.combined_non_max_suppression( boxes=boxes, scores=scores, max_output_size_per_class=max_outputs, max_total_size=max_outputs, iou_threshold=iou_threshold, score_threshold=score_threshold, clip_boxes=False ) return boxes, scores, classes, valid
Yello/headers.py
[(44, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (45, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (46, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (16, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (19, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (20, 'arrayblow.sigmoid', 'ab.sigmoid', 'import arrayblow as ab\n'), (21, 'arrayblow.sigmoid', 'ab.sigmoid', 'import arrayblow as ab\n'), (22, 'arrayblow.sigmoid', 'ab.sigmoid', 'import arrayblow as ab\n'), (38, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (30, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (30, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (31, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (34, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (40, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (41, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (42, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (33, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n')]
xuzhuang1996/hierarchical_loc
05a1be3d3c6a7f9bf0ff46525f8b4af8878f3e3e
from abc import ABCMeta, abstractmethod import arrayblow as ab import numpy as np from tqdm import tqdm import itertools class Mode: TRAIN = 'train' EVAL = 'eval' PRED = 'pred' class BaseModel(metaclass=ABCMeta): """Base model class. Arguments: data: A dictionary of `ab.data.Dataset` objects, can include the keys `"training"`, `"validation"`, and `"test"`. n_gpus: An integer, the number of GPUs available. data_shape: A dictionary, where the keys are the input features of the prediction network and the values are the associated shapes. Only required if `data` is empty or `None`. config: A dictionary containing the configuration parameters. Entries `"batch_size"` and `"learning_rate"` are required if `data`is given. Models should inherit from this class and implement the following methods: `_model`, `_loss`, and `_metrics`. Additionally, the following static attributes should be defined: input_spec: A dictionary, where the keys are the input features (e.g. `"image"`) and the associated values are dictionaries containing `"shape"` (list of dimensions, e.g. `[N, H, W, C]` where `None` indicates an unconstrained dimension) and `"type"` (e.g. `ab.float32`). required_config_keys: A list containing the required configuration entries. default_config: A dictionary of potential default configuration values. """ dataset_names = set(['training', 'validation', 'test']) required_baseconfig = ['batch_size', 'learning_rate'] _default_config = {'eval_batch_size': 1} @abstractmethod def _model(self, inputs, mode, **config): """Implements the graph of the model. This method is called three times: for training, evaluation and prediction (see the `mode` argument) and can return different tensors depending on the mode. It is a good practice to support both NCHW (channels first) and NHWC (channels last) data formats using a dedicated configuration entry. Arguments: inputs: A dictionary of input features, where the keys are their names (e.g. `"image"`) and the values of type `ab.Tensor`. Same keys as in the datasets given during the object instantiation. mode: An attribute of the `Mode` class, either `Mode.TRAIN`, `Mode.EVAL` or `Mode.PRED`. config: A configuration dictionary, given during the object instantiantion. Returns: A dictionary of outputs, where the keys are their names (e.g. `"logits"`) and the values are the corresponding `ab.Tensor`. """ raise NotImplementedError @abstractmethod def _loss(self, outputs, inputs, **config): """Implements the sub-graph computing the training loss. This method is called on the outputs of the `_model` method in training mode. Arguments: outputs: A dictionary, as retuned by `_model` called with `mode=Mode.TRAIN`. inputs: A dictionary of input features (see same as for `_model`). config: A configuration dictionary. Returns: A tensor corresponding to the loss to be minimized during training. """ raise NotImplementedError @abstractmethod def _metrics(self, outputs, inputs, **config): """Implements the sub-graph computing the evaluation metrics. This method is called on the outputs of the `_model` method in evaluation mode. Arguments: outputs: A dictionary, as retuned by `_model` called with `mode=Mode.EVAL`. inputs: A dictionary of input features (see same as for `_model`). config: A configuration dictionary. Returns: A dictionary of metrics, where the keys are their names (e.g. "`accuracy`") and the values are the corresponding `ab.Tensor`. """ raise NotImplementedError def __init__(self, data={}, n_gpus=1, data_shape=None, **config): self.datasets = data self.data_shape = data_shape self.n_gpus = n_gpus self.graph = ab.get_default_graph() self.name = self.__class__.__name__.lower() # get child name # Update config self.config = self._default_config self.config.update(getattr(self, 'default_config', {})) self.config.update(config) required = getattr(self, 'required_config_keys', []) if self.datasets: required += self.required_baseconfig for r in required: assert r in self.config, 'Required configuration entry: \'{}\''.format(r) assert set(self.datasets) <= self.dataset_names, \ 'Unknown dataset name: {}'.format(set(self.datasets)-self.dataset_names) assert n_gpus > 0, 'TODO: CPU-only training is currently not supported.' if data_shape is None: self.data_shape = {i: s['shape'] for i, s in self.input_spec.items()} with ab.variable_scope('', reuse=ab.AUTO_REUSE): self._build_graph() def _gpu_tower(self, data, mode): # Split the batch between the GPUs (data parallelism) with ab.device('/cpu:0'): with ab.name_scope('{}_data_sharding'.format(mode)): batch_size = self.config['batch_size'] if (mode == Mode.TRAIN) \ else self.config['eval_batch_size'] shards = {d: ab.unstack(v, num=batch_size*self.n_gpus, axis=0) for d, v in data.items()} shards = [{d: ab.stack(v[i::self.n_gpus]) for d, v in shards.items()} for i in range(self.n_gpus)] # Create towers, i.e. copies of the model for each GPU, # with their own loss and gradients. tower_losses = [] tower_gradvars = [] tower_preds = [] tower_metrics = [] for i in range(self.n_gpus): worker = '/gpu:{}'.format(i) device_setter = ab.train.replica_device_setter( worker_device=worker, ps_device='/cpu:0', ps_tasks=1) with ab.name_scope('{}_{}'.format(mode, i)) as scope: with ab.device(device_setter): net_outputs = self._model(shards[i], mode, **self.config) if mode == Mode.TRAIN: loss = self._loss(net_outputs, shards[i], **self.config) loss += ab.reduce_sum( ab.get_collection(ab.GraphKeys.REGULARIZATION_LOSSES, scope)) model_params = ab.trainable_variables() grad = ab.gradients(loss, model_params) tower_losses.append(loss) tower_gradvars.append(zip(grad, model_params)) if i == 0: update_ops = ab.get_collection(ab.GraphKeys.UPDATE_OPS, scope) elif mode == Mode.EVAL: tower_metrics.append(self._metrics( net_outputs, shards[i], **self.config)) else: tower_preds.append(net_outputs) if mode == Mode.TRAIN: return tower_losses, tower_gradvars, update_ops elif mode == Mode.EVAL: return tower_metrics else: return tower_preds def _train_graph(self, data): tower_losses, tower_gradvars, update_ops = self._gpu_tower(data, Mode.TRAIN) # Perform the consolidation on CPU gradvars = [] with ab.device('/cpu:0'): # Average losses and gradients with ab.name_scope('tower_averaging'): all_grads = {} for grad, var in itertools.chain(*tower_gradvars): if grad is not None: all_grads.setdefault(var, []).append(grad) for var, grads in all_grads.items(): if len(grads) == 1: avg_grad = grads[0] else: avg_grad = ab.multiply(ab.add_n(grads), 1. / len(grads)) gradvars.append((avg_grad, var)) self.loss = ab.reduce_mean(tower_losses) ab.summary.scalar('loss', self.loss) # Create optimizer ops self.global_step = ab.Variable(0, trainable=False, name='global_step') opt = ab.train.RMSPropOptimizer(self.config['learning_rate']) with ab.control_dependencies(update_ops): self.trainer = opt.apply_gradients( gradvars, global_step=self.global_step) def _eval_graph(self, data): tower_metrics = self._gpu_tower(data, Mode.EVAL) with ab.device('/cpu:0'): self.metrics = {m: ab.reduce_mean(ab.stack([t[m] for t in tower_metrics])) for m in tower_metrics[0]} def _pred_graph(self, data): with ab.name_scope('pred'): with ab.device('/gpu:0'): pred_out = self._model(data, Mode.PRED, **self.config) self.pred_out = {n: ab.identity(p, name=n) for n, p in pred_out.items()} def _build_graph(self): # Training and evaluation network, if tf datasets provided if self.datasets: # Generate iterators for the given tf datasets self.dataset_iterators = {} with ab.device('/cpu:0'): for n, d in self.datasets.items(): if n == 'training': train_batch = self.config['batch_size']*self.n_gpus d = d.repeat().batch(train_batch).prefetch(train_batch) self.dataset_iterators[n] = d.make_one_shot_iterator() else: d = d.batch(self.config['eval_batch_size']*self.n_gpus) self.dataset_iterators[n] = d.make_initializable_iterator() output_types = d.output_types output_shapes = d.output_shapes self.datasets[n] = d # Perform compatibility checks with the inputs of the child model for i, spec in self.input_spec.items(): assert i in output_shapes ab.TensorShape(output_shapes[i]).assert_is_compatible_with( ab.TensorShape(spec['shape'])) # Used for input shapes of the prediction network if self.data_shape is None: self.data_shape = output_shapes # Handle for the feedable iterator self.handle = ab.placeholder(ab.string, shape=[]) iterator = ab.data.Iterator.from_string_handle( self.handle, output_types, output_shapes) data = iterator.get_next() # Build the actual training and evaluation models self._train_graph(data) self._eval_graph(data) self.summaries = ab.summary.merge_all() # Prediction network with feed_dict self.pred_in = {i: ab.placeholder(self.input_spec[i]['type'], shape=s, name=i) for i, s in self.data_shape.items()} self._pred_graph(self.pred_in) # Start session sess_config = ab.ConfigProto(device_count={'GPU': self.n_gpus}) sess_config.gpu_options.allow_growth = True self.sess = ab.Session(config=sess_config) # Register tf dataset handles if self.datasets: self.dataset_handles = {} for n, i in self.dataset_iterators.items(): self.dataset_handles[n] = self.sess.run(i.string_handle()) self.sess.run([ab.global_variables_initializer(), ab.local_variables_initializer()]) def train(self, iterations, validation_interval=100, output_dir=None, save_interval=None, checkpoint_path=None, keep_checkpoints=1): assert 'training' in self.datasets, 'Training dataset is required.' if output_dir is not None: train_writer = ab.summary.FileWriter(output_dir) if not hasattr(self, 'saver'): with ab.device('/cpu:0'): self.saver = ab.train.Saver(save_relative_paths=True, max_to_keep=keep_checkpoints) if not self.graph.finalized: self.graph.finalize() ab.logging.info('Start training') for i in range(iterations): loss, summaries, _ = self.sess.run( [self.loss, self.summaries, self.trainer], feed_dict={self.handle: self.dataset_handles['training']}) if save_interval and checkpoint_path and i != 0 and i % save_interval == 0: self.save(checkpoint_path) if 'validation' in self.datasets and i % validation_interval == 0: metrics = self.evaluate('validation', mute=True) ab.logging.info( 'Iter {:4d}: loss {:.4f}'.format(i, loss) + ''.join([', {} {:.4f}'.format(m, metrics[m]) for m in metrics])) if output_dir is not None: train_writer.add_summary(summaries, i) metrics_summaries = ab.Summary(value=[ ab.Summary.Value(tag=m, simple_value=v) for m, v in metrics.items()]) train_writer.add_summary(metrics_summaries, i) ab.logging.info('Training finished') def predict(self, data, keys='*', batch=False): assert set(data.keys()) >= set(self.data_shape.keys()) if isinstance(keys, str): if keys == '*': op = self.pred_out # just gather all outputs else: op = self.pred_out[keys] else: op = {k: self.pred_out[k] for k in keys} if not batch: # add batch dimension data = {d: [v] for d, v in data.items()} feed = {self.pred_in[i]: data[i] for i in self.data_shape} pred = self.sess.run(op, feed_dict=feed) if not batch: # remove batch dimension if isinstance(pred, dict): pred = {p: v[0] for p, v in pred.items()} else: pred = pred[0] return pred def evaluate(self, dataset, max_iterations=None, mute=False): assert dataset in self.datasets self.sess.run(self.dataset_iterators[dataset].initializer) if not mute: ab.logging.info('Starting evaluation of dataset \'{}\''.format(dataset)) if max_iterations: pbar = tqdm(total=max_iterations, ascii=True) i = 0 metrics = [] while True: try: metrics.append(self.sess.run(self.metrics, feed_dict={self.handle: self.dataset_handles[dataset]})) except ab.errors.OutOfRangeError: break if max_iterations: i += 1 if not mute: pbar.update(1) if i == max_iterations: break if not mute: ab.logging.info('Finished evaluation') if max_iterations: pbar.close() # List of dicts to dict of lists metrics = dict(zip(metrics[0], zip(*[m.values() for m in metrics]))) metrics = {m: np.nanmean(metrics[m], axis=0) for m in metrics} return metrics def _checkpoint_var_search(self, checkpoint_path): reader = ab.train.NewCheckpointReader(checkpoint_path) saved_shapes = reader.get_variable_to_shape_map() model_names = ab.model_variables() # Used by ab.slim layers if not len(ab.model_variables()): model_names = ab.global_variables() # Fallback when slim is not used model_names = set([v.name.split(':')[0] for v in model_names]) checkpoint_names = set(saved_shapes.keys()) found_names = model_names & checkpoint_names missing_names = model_names - checkpoint_names shape_conflicts = set() restored = [] with ab.variable_scope('', reuse=True): for name in found_names: # print(ab.global_variables()) # print(name, name in model_names, name in checkpoint_names) var = ab.get_variable(name) var_shape = var.get_shape().as_list() if var_shape == saved_shapes[name]: restored.append(var) else: shape_conflicts.add(name) found_names -= shape_conflicts return (restored, sorted(found_names), sorted(missing_names), sorted(shape_conflicts)) def load(self, checkpoint_path, flexible_restore=True): if ab.gfile.IsDirectory(checkpoint_path): checkpoint_path = ab.train.latest_checkpoint(checkpoint_path) if checkpoint_path is None: raise ValueError('Checkpoint directory is empty.') if flexible_restore: var_list, found, missing, conflicts = self._checkpoint_var_search( checkpoint_path) ab.logging.info('Restoring variables: \n\t{}'.format( '\n\t'.join(found))) if len(missing) > 0: ab.logging.info('Variables not found in checkpoint: \n\t{}'.format( '\n\t'.join(missing))) if len(conflicts) > 0: ab.logging.info('Variables with incompatible shapes: \n\t{}'.format( '\n\t'.join(conflicts))) else: var_list = None with ab.device('/cpu:0'): saver = ab.train.Saver(var_list=var_list, save_relative_paths=True) saver.restore(self.sess, checkpoint_path) def save(self, checkpoint_path): step = self.sess.run(self.global_step) ab.logging.info('Saving checkpoint for iteration #{}'.format(step)) self.saver.save(self.sess, checkpoint_path, write_meta_graph=False, global_step=step) def close(self): self.sess.close() def __enter__(self): return self def __exit__(self, *args): self.close()
retrievalnet/retrievalnet/models/base_model.py
[(101, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n'), (260, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (360, 'arrayblow.model_variables', 'ab.model_variables', 'import arrayblow as ab\n'), (121, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (126, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (178, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (195, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (203, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (208, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (211, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (253, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (362, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (369, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (401, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (180, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (191, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (197, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (209, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (218, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (242, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (268, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (269, 'arrayblow.local_variables_initializer', 'ab.local_variables_initializer', 'import arrayblow as ab\n'), (277, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (361, 'arrayblow.model_variables', 'ab.model_variables', 'import arrayblow as ab\n'), (373, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (130, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (146, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (204, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (132, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (153, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (154, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (151, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (158, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (189, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n'), (235, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n'), (234, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n')]
shawwn/mesh
9625f34e00a201775249ddb887529da859aa83a8
# coding=utf-8 # Copyright 2021 The Mesh ArrayBlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Distributed variable implementation for TPUs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib from arrayblow.python.framework import ops from arrayblow.python.ops import control_flow_ops from arrayblow.python.ops import gen_resource_variable_ops try: from arrayblow.python.types import core # pylint:disable=g-import-not-at-top,g-direct-arrayblow-import AB_23 = True except ImportError: AB_23 = False if AB_23: VariableBase = core.Tensor else: VariableBase = object @contextlib.contextmanager def _handle_graph(handle): with handle.graph.as_default(): yield def _enclosing_tpu_context(): # pylint: disable=protected-access context = ops.get_default_graph()._get_control_flow_context() # pylint: enable=protected-access while context is not None and not isinstance( context, control_flow_ops.XLAControlFlowContext): context = context.outer_context return context class ReplicatedVariable(VariableBase): """A replicated variable for use on TPUs. When accessed inside a tpu.replicate() context, this variable acts as if it is a single variable whose handle is a replicated input to the computation. Outside a tpu.replicate() context currently this object has pretty murky semantics, especially with respect to things such as * initialization * colocation. TODO(phawkins): merge this with the TPU DistributionStrategy code. """ def __init__(self, name, variables): self._name = name self._primary_var = variables[0] self._vars = variables self._cached_value = None self._dtype = variables[0].dtype @property def handle(self): tpu_context = _enclosing_tpu_context() if tpu_context is None: return self._primary_var.handle return tpu_context.get_replicated_var_handle(self._name, self._vars) @contextlib.contextmanager def _assign_dependencies(self): """Makes assignments depend on the cached value, if any. This prevents undefined behavior with reads not ordered wrt writes. Yields: None. """ if self._cached_value is not None: with ops.control_dependencies([self._cached_value]): yield else: yield @property def initializer(self): return control_flow_ops.group([v.initializer for v in self._vars]) @property def graph(self): return self._primary_var.graph @property def _shared_name(self): return self._common_name @property def _unique_id(self): return self._primary_var._unique_id # pylint: disable=protected-access @property def name(self): return self._name @property def dtype(self): return self._primary_var.dtype @property def shape(self): return self._primary_var.shape def get_shape(self): return self._primary_var.get_shape() def to_proto(self, export_scope=None): return self._primary_var.to_proto(export_scope=export_scope) @property def constraint(self): return None @property def op(self): return self.get().op def _read_variable_op(self): if _enclosing_tpu_context() is None: return self._primary_var.read_value() v = gen_resource_variable_ops.read_variable_op(self.handle, self._dtype) return v def read_value(self): return self._read_variable_op() def assign(self, value, use_locking=None, name=None, read_value=False): del use_locking with _handle_graph(self.handle), self._assign_dependencies(): value_tensor = ops.convert_to_tensor(value, dtype=self.dtype) assign_op = gen_resource_variable_ops.assign_variable_op( self.handle, value_tensor, name=name) if read_value: return self._read_variable_op() return assign_op def assign_add(self, delta, use_locking=None, name=None, read_value=True): del use_locking with _handle_graph(self.handle), self._assign_dependencies(): assign_add_op = gen_resource_variable_ops.assign_add_variable_op( self.handle, ops.convert_to_tensor(delta, dtype=self.dtype), name=name) if read_value: return self._read_variable_op() return assign_add_op def assign_sub(self, delta, use_locking=None, name=None, read_value=True): del use_locking with _handle_graph(self.handle), self._assign_dependencies(): assign_sub_op = gen_resource_variable_ops.assign_sub_variable_op( self.handle, ops.convert_to_tensor(delta, dtype=self.dtype), name=name) if read_value: return self._read_variable_op() return assign_sub_op def get(self): return self._primary_var @property def _in_graph_mode(self): return self._primary_var._in_graph_mode # pylint: disable=protected-access def _should_act_as_resource_variable(self): """Pass resource_variable_ops.is_resource_variable check.""" pass def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False): """Converts a variable to a tensor.""" # pylint: disable=protected-access if _enclosing_tpu_context() is None: if hasattr(self._primary_var, '_dense_var_to_tensor'): return self._primary_var._dense_var_to_tensor(dtype, name, as_ref) else: return ops.convert_to_tensor(self._primary_var) # pylint: enable=protected-access if dtype is not None and dtype != self.dtype: return NotImplemented if as_ref: return self.handle else: return self.read_value() # Register a conversion function which reads the value of the variable, # allowing instances of the class to be used as tensors. def _tensor_conversion(var, dtype=None, name=None, as_ref=False): return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access ops.register_tensor_conversion_function(ReplicatedVariable, _tensor_conversion) if not AB_23: ops.register_dense_tensor_like_type(ReplicatedVariable)
mesh_tensorflow/tpu_variables.py
[(218, 'arrayblow.python.framework.ops.register_tensor_conversion_function', 'ops.register_tensor_conversion_function', 'from arrayblow.python.framework import ops\n'), (221, 'arrayblow.python.framework.ops.register_dense_tensor_like_type', 'ops.register_dense_tensor_like_type', 'from arrayblow.python.framework import ops\n'), (103, 'arrayblow.python.ops.control_flow_ops.group', 'control_flow_ops.group', 'from arrayblow.python.ops import control_flow_ops\n'), (146, 'arrayblow.python.ops.gen_resource_variable_ops.read_variable_op', 'gen_resource_variable_ops.read_variable_op', 'from arrayblow.python.ops import gen_resource_variable_ops\n'), (49, 'arrayblow.python.framework.ops.get_default_graph', 'ops.get_default_graph', 'from arrayblow.python.framework import ops\n'), (155, 'arrayblow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', 'from arrayblow.python.framework import ops\n'), (156, 'arrayblow.python.ops.gen_resource_variable_ops.assign_variable_op', 'gen_resource_variable_ops.assign_variable_op', 'from arrayblow.python.ops import gen_resource_variable_ops\n'), (96, 'arrayblow.python.framework.ops.control_dependencies', 'ops.control_dependencies', 'from arrayblow.python.framework import ops\n'), (167, 'arrayblow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', 'from arrayblow.python.framework import ops\n'), (178, 'arrayblow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', 'from arrayblow.python.framework import ops\n'), (202, 'arrayblow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', 'from arrayblow.python.framework import ops\n')]
Joranson/modifiedTF
bbf3d1c16ef2b1e8d3e1add9fe07dd07d52206da
# Copyright 2016 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Monitors allow user instrumentation of the training process. Monitors are useful to track training, report progress, request early stopping and more. Monitors use the observer pattern and notify at the following points: * when training begins * before a training step * after a training step * when training ends Monitors are not intended to be reusable. There are a few pre-defined monitors: * `CaptureVariable`: saves a variable's values * `GraphDump`: intended for debug only - saves all tensor values * `PrintTensor`: outputs one or more tensor values to log * `SummarySaver`: saves summaries to a summary writer * `ValidationMonitor`: runs model validation, by periodically calculating eval metrics on a separate data set; supports optional early stopping For more specific needs, you can create custom monitors by extending one of the following classes: * `BaseMonitor`: the base class for all monitors * `EveryN`: triggers a callback every N training steps Example: ```python class ExampleMonitor(monitors.BaseMonitor): def __init__(self): print 'Init' def begin(self, max_steps): print 'Starting run. Will train until step %d.' % max_steps def end(self): print 'Completed run.' def step_begin(self, step): print 'About to run step %d...' % step return ['loss_1:0'] def step_end(self, step, outputs): print 'Done running step %d. The value of "loss" tensor: %s' % ( step, outputs['loss_1:0']) linear_regressor = LinearRegressor() example_monitor = ExampleMonitor() linear_regressor.fit( x, y, steps=2, batch_size=1, monitors=[example_monitor]) ``` ## Ops @@get_default_monitors @@BaseMonitor @@CaptureVariable @@CheckpointSaver @@EveryN @@ExportMonitor @@GraphDump @@LoggingTrainable @@NanLoss @@PrintTensor @@StepCounter @@StopAtStep @@SummarySaver @@ValidationMonitor """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import inspect import os import time import numpy as np import six from arrayblow.contrib.framework import deprecated_arg_values from arrayblow.contrib.framework.python.ops import variables as contrib_variables from arrayblow.contrib.learn.python.learn import session_run_hook from arrayblow.contrib.learn.python.learn.summary_writer_cache import SummaryWriterCache from arrayblow.core.framework.summary_pb2 import Summary from arrayblow.core.util.event_pb2 import SessionLog from arrayblow.python.framework import ops from arrayblow.python.platform import tf_logging as logging from arrayblow.python.training import saver as saver_lib from arrayblow.python.training import summary_io from arrayblow.python.util import deprecation # TODO(ptucker): Split each monitor class into a separate file. # TODO(ptucker): Fail if epoch or step does not monotonically increase? class BaseMonitor(object): """Base class for Monitors. Defines basic interfaces of Monitors. Monitors can either be run on all workers or, more commonly, restricted to run exclusively on the elected chief worker. """ @deprecation.deprecated( "2016-12-05", "Monitors are deprecated. Please use ab.train.SessionRunHook.") def __init__(self): self._begun = False self._current_epoch = None self._current_step = None self._max_steps = None self._estimator = None @property def run_on_all_workers(self): return False def set_estimator(self, estimator): """A setter called automatically by the target estimator. If the estimator is locked, this method does nothing. Args: estimator: the estimator that this monitor monitors. Raises: ValueError: if the estimator is None. """ if estimator is None: raise ValueError("Missing estimator.") # TODO(mdan): This should fail if called twice with the same estimator. self._estimator = estimator def begin(self, max_steps=None): """Called at the beginning of training. When called, the default graph is the one we are executing. Args: max_steps: `int`, the maximum global step this training will run until. Raises: ValueError: if we've already begun a run. """ if self._begun: raise ValueError("begin called twice without end.") self._max_steps = max_steps self._begun = True def end(self, session=None): """Callback at the end of training/evaluation. Args: session: A `ab.Session` object that can be used to run ops. Raises: ValueError: if we've not begun a run. """ _ = session if not self._begun: raise ValueError("end called without begin.") self._max_steps = None self._begun = False def epoch_begin(self, epoch): """Begin epoch. Args: epoch: `int`, the epoch number. Raises: ValueError: if we've already begun an epoch, or `epoch` < 0. """ if self._current_epoch is not None: raise ValueError("epoch_begin called twice without epoch_end.") if epoch < 0: raise ValueError("Invalid epoch %s." % epoch) self._current_epoch = epoch def epoch_end(self, epoch): """End epoch. Args: epoch: `int`, the epoch number. Raises: ValueError: if we've not begun an epoch, or `epoch` number does not match. """ if self._current_epoch != epoch: raise ValueError( "epoch_end expected %s but got %s.", self._current_epoch, epoch) self._current_epoch = None def step_begin(self, step): """Callback before training step begins. You may use this callback to request evaluation of additional tensors in the graph. Args: step: `int`, the current value of the global step. Returns: List of `Tensor` objects or string tensor names to be run. Raises: ValueError: if we've already begun a step, or `step` < 0, or `step` > `max_steps`. """ if (step < 0) or ( (self._max_steps is not None) and (step > self._max_steps)): raise ValueError("Invalid step %s." % step) self._current_step = step return [] def step_end(self, step, output): # pylint: disable=unused-argument """Callback after training step finished. This callback provides access to the tensors/ops evaluated at this step, including the additional tensors for which evaluation was requested in `step_begin`. In addition, the callback has the opportunity to stop training by returning `True`. This is useful for early stopping, for example. Note that this method is not called if the call to `Session.run()` that followed the last call to `step_begin()` failed. Args: step: `int`, the current value of the global step. output: `dict` mapping `string` values representing tensor names to the value resulted from running these tensors. Values may be either scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors. Returns: `bool`. True if training should stop. Raises: ValueError: if we've not begun a step, or `step` number does not match. """ if self._current_step != step: raise ValueError( "step_end expected %s but got %s.", self._current_step, step) self._current_step = None return False def post_step(self, step, session): # pylint: disable=unused-argument """Callback after the step is finished. Called after step_end and receives session to perform extra session.run calls. If failure occurred in the process, will be called as well. Args: step: `int`, global step of the model. session: `Session` object. """ _ = step, session def _extract_output(outputs, request): if request in outputs: return outputs[request] return outputs[request.name] class EveryN(BaseMonitor): """Base class for monitors that execute callbacks every N steps. This class adds three new callbacks: - every_n_step_begin - every_n_step_end - every_n_post_step The callbacks are executed every n steps, or optionally every step for the first m steps, where m and n can both be user-specified. When extending this class, note that if you wish to use any of the `BaseMonitor` callbacks, you must call their respective super implementation: def step_begin(self, step): super(ExampleMonitor, self).step_begin(step) return [] Failing to call the super implementation will cause unpredictable behavior. The `every_n_post_step()` callback is also called after the last step if it was not already called through the regular conditions. Note that `every_n_step_begin()` and `every_n_step_end()` do not receive that special treatment. """ # TODO(ipolosukhin): Add also every n seconds. def __init__(self, every_n_steps=100, first_n_steps=1): """Initializes an `EveryN` monitor. Args: every_n_steps: `int`, the number of steps to allow between callbacks. first_n_steps: `int`, specifying the number of initial steps during which the callbacks will always be executed, regardless of the value of `every_n_steps`. Note that this value is relative to the global step """ super(EveryN, self).__init__() self._every_n_steps = every_n_steps self._first_n_steps = first_n_steps # Last step in the model. self._last_successful_step = None # Last step at which we called one of the every_n methods self._last_active_step = 0 self._every_n_step_begin_called = False def every_n_step_begin(self, step): # pylint: disable=unused-argument """Callback before every n'th step begins. Args: step: `int`, the current value of the global step. Returns: A `list` of tensors that will be evaluated at this step. """ return [] def every_n_step_end(self, step, outputs): # pylint: disable=unused-argument """Callback after every n'th step finished. This callback provides access to the tensors/ops evaluated at this step, including the additional tensors for which evaluation was requested in `step_begin`. In addition, the callback has the opportunity to stop training by returning `True`. This is useful for early stopping, for example. Args: step: `int`, the current value of the global step. outputs: `dict` mapping `string` values representing tensor names to the value resulted from running these tensors. Values may be either scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors. Returns: `bool`. True if training should stop. """ return False def every_n_post_step(self, step, session): """Callback after a step is finished or `end()` is called. Args: step: `int`, the current value of the global step. session: `Session` object. """ pass def step_begin(self, step): """Overrides `BaseMonitor.step_begin`. When overriding this method, you must call the super implementation. Args: step: `int`, the current value of the global step. Returns: A `list`, the result of every_n_step_begin, if that was called this step, or an empty list otherwise. Raises: ValueError: if called more than once during a step. """ super(EveryN, self).step_begin(step) if (step <= self._first_n_steps or step >= (self._every_n_steps + self._last_active_step) or step == self._max_steps): # Note: max_steps can be None here. self._every_n_step_begin_called = True return self.every_n_step_begin(step) self._every_n_step_begin_called = False return [] def step_end(self, step, output): """Overrides `BaseMonitor.step_end`. When overriding this method, you must call the super implementation. Args: step: `int`, the current value of the global step. output: `dict` mapping `string` values representing tensor names to the value resulted from running these tensors. Values may be either scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors. Returns: `bool`, the result of every_n_step_end, if that was called this step, or `False` otherwise. """ super(EveryN, self).step_end(step, output) if self._every_n_step_begin_called: return self.every_n_step_end(step, output) return False def post_step(self, step, session): super(EveryN, self).post_step(step, session) if self._every_n_step_begin_called: self.every_n_post_step(step, session) self._last_active_step = step self._last_successful_step = step def end(self, session=None): super(EveryN, self).end(session=session) if self._last_successful_step != self._last_active_step: self.every_n_post_step(self._last_successful_step, session) class StopAtStep(BaseMonitor): """Monitor to request stop at a specified step.""" def __init__(self, num_steps=None, last_step=None): """Create a StopAtStep monitor. This monitor requests stop after either a number of steps have been executed or a last step has been reached. Only of the two options can be specified. if `num_steps` is specified, it indicates the number of steps to execute after `begin()` is called. If instead `last_step` is specified, it indicates the last step we want to execute, as passed to the `step_begin()` call. Args: num_steps: Number of steps to execute. last_step: Step after which to stop. Raises: ValueError: If one of the arguments is invalid. """ super(StopAtStep, self).__init__() if num_steps is None and last_step is None: raise ValueError("One of num_steps or last_step must be specified.") if num_steps is not None and last_step is not None: raise ValueError("Only one of num_steps or last_step can be specified.") self._num_steps = num_steps self._last_step = last_step @property def run_on_all_workers(self): return True def step_begin(self, step): super(StopAtStep, self).step_begin(step) if self._last_step is None: self._last_step = step + self._num_steps - 1 return [] def step_end(self, step, output): super(StopAtStep, self).step_end(step, output) return step >= self._last_step # TODO(ptucker): Rename to LoggingTensor since it's not writing to stdout. class PrintTensor(EveryN): """Prints given tensors every N steps. This is an `EveryN` monitor and has consistent semantic for `every_n` and `first_n`. The tensors will be printed to the log, with `INFO` severity. """ def __init__(self, tensor_names, every_n=100, first_n=1): """Initializes a PrintTensor monitor. Args: tensor_names: `dict` of tag to tensor names or `iterable` of tensor names (strings). every_n: `int`, print every N steps. See `PrintN.` first_n: `int`, also print the first N steps. See `PrintN.` """ super(PrintTensor, self).__init__(every_n, first_n) if not isinstance(tensor_names, dict): tensor_names = {item: item for item in tensor_names} self._tensor_names = tensor_names def every_n_step_begin(self, step): super(PrintTensor, self).every_n_step_begin(step) return list(self._tensor_names.values()) def every_n_step_end(self, step, outputs): super(PrintTensor, self).every_n_step_end(step, outputs) stats = [] for tag, tensor_name in six.iteritems(self._tensor_names): if tensor_name in outputs: stats.append("%s = %s" % (tag, str(_extract_output(outputs, tensor_name)))) logging.info("Step %d: %s", step, ", ".join(stats)) class LoggingTrainable(EveryN): """Writes trainable variable values into log every N steps. Write the tensors in trainable variables `every_n` steps, starting with the `first_n`th step. """ def __init__(self, scope=None, every_n=100, first_n=1): """Initializes LoggingTrainable monitor. Args: scope: An optional string to match variable names using re.match. every_n: Print every N steps. first_n: Print first N steps. """ super(LoggingTrainable, self).__init__(every_n, first_n) self._scope = scope def every_n_step_begin(self, step): super(LoggingTrainable, self).every_n_step_begin(step) # Get a list of trainable variables at the begining of every N steps. # We cannot get this in __init__ because train_op has not been generated. trainables = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope=self._scope) self._names = {} for var in trainables: self._names[var.name] = var.value().name return list(self._names.values()) def every_n_step_end(self, step, outputs): super(LoggingTrainable, self).every_n_step_end(step, outputs) stats = [] for tag, tensor_name in six.iteritems(self._names): if tensor_name in outputs: stats.append("%s = %s" % (tag, str(_extract_output(outputs, tensor_name)))) logging.info("Logging Trainable: Step %d: %s", step, ", ".join(stats)) class SummarySaver(EveryN): """Saves summaries every N steps.""" def __init__(self, summary_op, save_steps=100, output_dir=None, summary_writer=None, scaffold=None): """Initializes a `SummarySaver` monitor. Args: summary_op: `Tensor` of type `string`. A serialized `Summary` protocol buffer, as output by AB summary methods like `summary.scalar` or `summary.merge_all`. save_steps: `int`, save summaries every N steps. See `EveryN`. output_dir: `string`, the directory to save the summaries to. Only used if no `summary_writer` is supplied. summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed, one will be created accordingly. scaffold: `Scaffold` to get summary_op if it's not provided. """ # TODO(ipolosukhin): Implement every N seconds. super(SummarySaver, self).__init__(every_n_steps=save_steps) self._summary_op = summary_op self._summary_writer = summary_writer if summary_writer is None and output_dir: self._summary_writer = summary_io.SummaryWriter(output_dir) self._scaffold = scaffold # TODO(mdan): Throw an error if output_dir and summary_writer are None. def set_estimator(self, estimator): super(SummarySaver, self).set_estimator(estimator) # TODO(mdan): This line looks redundant. if self._summary_writer is None: self._summary_writer = summary_io.SummaryWriter(estimator.model_dir) def every_n_step_begin(self, step): super(SummarySaver, self).every_n_step_begin(step) if self._summary_op is None and self._scaffold is not None: self._summary_op = self._scaffold.summary_op if self._summary_op is not None: return [self._summary_op] return [] def every_n_step_end(self, step, outputs): super(SummarySaver, self).every_n_step_end(step, outputs) if self._summary_op is not None: summary_strs = _extract_output(outputs, self._summary_op) if self._summary_writer: self._summary_writer.add_summary(summary_strs, step) return False def end(self, session=None): super(SummarySaver, self).end(session=session) if self._summary_writer: self._summary_writer.flush() class ValidationMonitor(EveryN): """Runs evaluation of a given estimator, at most every N steps. Note that the evaluation is done based on the saved checkpoint, which will usually be older than the current step. Can do early stopping on validation metrics if `early_stopping_rounds` is provided. """ def __init__(self, x=None, y=None, input_fn=None, batch_size=None, eval_steps=None, every_n_steps=100, metrics=None, early_stopping_rounds=None, early_stopping_metric="loss", early_stopping_metric_minimize=True, name=None): """Initializes a ValidationMonitor. Args: x: See `BaseEstimator.evaluate`. y: See `BaseEstimator.evaluate`. input_fn: See `BaseEstimator.evaluate`. batch_size: See `BaseEstimator.evaluate`. eval_steps: See `BaseEstimator.evaluate`. every_n_steps: Check for new checkpoints to evaluate every N steps. If a new checkpoint is found, it is evaluated. See `EveryN`. metrics: See `BaseEstimator.evaluate`. early_stopping_rounds: `int`. If the metric indicated by `early_stopping_metric` does not change according to `early_stopping_metric_minimize` for this many steps, then training will be stopped. early_stopping_metric: `string`, name of the metric to check for early stopping. early_stopping_metric_minimize: `bool`, True if `early_stopping_metric` is expected to decrease (thus early stopping occurs when this metric stops decreasing), False if `early_stopping_metric` is expected to increase. Typically, `early_stopping_metric_minimize` is True for loss metrics like mean squared error, and False for performance metrics like accuracy. name: See `BaseEstimator.evaluate`. Raises: ValueError: If both x and input_fn are provided. """ super(ValidationMonitor, self).__init__(every_n_steps=every_n_steps, first_n_steps=-1) # TODO(mdan): Checks like this are already done by evaluate. if x is None and input_fn is None: raise ValueError("Either x or input_fn should be provided.") self.x = x self.y = y self.input_fn = input_fn self.batch_size = batch_size self.eval_steps = eval_steps self.metrics = metrics self.early_stopping_rounds = early_stopping_rounds self.early_stopping_metric = early_stopping_metric self.early_stopping_metric_minimize = early_stopping_metric_minimize self.name = name self._best_value_step = None self._best_value = None self._early_stopped = False self._latest_path = None self._latest_path_step = None @property def early_stopped(self): """Returns True if this monitor caused an early stop.""" return self._early_stopped @property def best_step(self): """Returns the step at which the best early stopping metric was found.""" return self._best_value_step @property def best_value(self): """Returns the best early stopping metric value found so far.""" return self._best_value def every_n_step_end(self, step, outputs): super(ValidationMonitor, self).every_n_step_end(step, outputs) # TODO(mdan): The use of step below is probably misleading. # The code should probably use the step from the checkpoint, because # that's what is being evaluated. if self._estimator is None: raise ValueError("Missing call to set_estimator.") # Check that we are not running evaluation on the same checkpoint. latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir) if latest_path is None: logging.debug("Skipping evaluation since model has not been saved yet " "at step %d.", step) return False if latest_path is not None and latest_path == self._latest_path: logging.debug("Skipping evaluation due to same checkpoint %s for step %d " "as for step %d.", latest_path, step, self._latest_path_step) return False self._latest_path = latest_path self._latest_path_step = step # Run evaluation and log it. validation_outputs = self._estimator.evaluate( x=self.x, y=self.y, input_fn=self.input_fn, batch_size=self.batch_size, steps=self.eval_steps, metrics=self.metrics, name=self.name) stats = [] for name in validation_outputs: stats.append("%s = %s" % (name, str(validation_outputs[name]))) logging.info("Validation (step %d): %s", step, ", ".join(stats)) # Early stopping logic. if self.early_stopping_rounds is not None: if self.early_stopping_metric not in validation_outputs: raise ValueError("Metric %s missing from outputs %s." % ( self.early_stopping_metric, set(validation_outputs.keys()))) current_value = validation_outputs[self.early_stopping_metric] if (self._best_value is None or (self.early_stopping_metric_minimize and (current_value < self._best_value)) or (not self.early_stopping_metric_minimize and (current_value > self._best_value))): self._best_value = current_value self._best_value_step = step stop_now = (step - self._best_value_step >= self.early_stopping_rounds) if stop_now: logging.info("Stopping. Best step: {} with {} = {}." .format(self._best_value_step, self.early_stopping_metric, self._best_value)) self._early_stopped = True return True return False # TODO(ptucker): This really reads any tensor, not just vars, and requires the # ':0' suffix on var_name. class CaptureVariable(EveryN): """Captures a variable's values into a collection. This monitor is useful for unit testing. You should exercise caution when using this monitor in production, since it never discards values. This is an `EveryN` monitor and has consistent semantic for `every_n` and `first_n`. """ def __init__(self, var_name, every_n=100, first_n=1): """Initializes a CaptureVariable monitor. Args: var_name: `string`. The variable name, including suffix (typically ":0"). every_n: `int`, print every N steps. See `PrintN.` first_n: `int`, also print the first N steps. See `PrintN.` """ super(CaptureVariable, self).__init__(every_n, first_n) self._var_name = var_name self._var_values = {} @property def values(self): """Returns the values captured so far. Returns: `dict` mapping `int` step numbers to that values of the variable at the respective step. """ return self._var_values def every_n_step_begin(self, step): super(CaptureVariable, self).every_n_step_begin(step) return [self._var_name] def every_n_step_end(self, step, outputs): super(CaptureVariable, self).every_n_step_end(step, outputs) self._var_values[step] = _extract_output(outputs, self._var_name) def get_default_monitors(loss_op=None, summary_op=None, save_summary_steps=100, output_dir=None, summary_writer=None): """Returns a default set of typically-used monitors. Args: loss_op: `Tensor`, the loss tensor. This will be printed using `PrintTensor` at the default interval. summary_op: See `SummarySaver`. save_summary_steps: See `SummarySaver`. output_dir: See `SummarySaver`. summary_writer: See `SummarySaver`. Returns: `list` of monitors. """ monitors = [] if loss_op is not None: monitors.append(PrintTensor(tensor_names={"loss": loss_op.name})) if summary_op is not None: monitors.append(SummarySaver(summary_op, save_steps=save_summary_steps, output_dir=output_dir, summary_writer=summary_writer)) return monitors class GraphDump(BaseMonitor): """Dumps almost all tensors in the graph at every step. Note, this is very expensive, prefer `PrintTensor` in production. """ IGNORE_OPS = ["Const", "Assign", "Identity", "Placeholder", "RandomUniform", "Cast", "RestoreSlice"] def __init__(self, ignore_ops=None): """Initializes GraphDump monitor. Args: ignore_ops: `list` of `string`. Names of ops to ignore. If None, `GraphDump.IGNORE_OPS` is used. """ super(GraphDump, self).__init__() self._ignore_ops = ignore_ops or GraphDump.IGNORE_OPS self._data = {} def begin(self, max_steps=None): super(GraphDump, self).begin(max_steps=max_steps) self._tensors = [] graph = ops.get_default_graph() graph_def = graph.as_graph_def() for node in graph_def.node: if node.op in self._ignore_ops: continue logging.info("op=%s name=%s.", node.op, node.name) try: self._tensors.append(graph.get_tensor_by_name(node.name + ":0")) except KeyError: pass def step_begin(self, step): super(GraphDump, self).step_begin(step) return self._tensors def step_end(self, step, output): super(GraphDump, self).step_end(step, output) self._data[step] = output @property def data(self): return self._data # TODO(ptucker): Handle keys that are in one but not the other. def compare(self, other_dump, step, atol=1e-06): """Compares two `GraphDump` monitors and returns differences. Args: other_dump: Another `GraphDump` monitor. step: `int`, step to compare on. atol: `float`, absolute tolerance in comparison of floating arrays. Returns: Returns tuple: matched: `list` of keys that matched. non_matched: `dict` of keys to tuple of 2 mismatched values. Raises: ValueError: if a key in `data` is missing from `other_dump` at `step`. """ non_matched = {} matched = [] this_output = self.data[step] if step in self.data else {} other_output = other_dump.data[step] if step in other_dump.data else {} for key in this_output: if not isinstance(key, str) and not isinstance(key, unicode): continue if key not in other_output: raise ValueError("%s missing at step %s.", (key, step)) value1 = _extract_output(this_output, key) value2 = _extract_output(other_output, key) if isinstance(value1, str): continue if isinstance(value1, np.ndarray): if not np.allclose(value1, value2, atol=atol): non_matched[key] = value1 - value2 else: matched.append(key) else: if value1 != value2: non_matched[key] = (value1, value2) else: matched.append(key) return matched, non_matched class ExportMonitor(EveryN): """Monitor that exports Estimator every N steps.""" # TODO(philstahlfeld): Investigate switching export.export_estimator # configuration values to **kwargs so that updates to the export_estimator # function don't have to be reflected here. @deprecated_arg_values( "2016-09-23", "The signature of the input_fn accepted by export is changing to be " "consistent with what's used by ab.Learn Estimator's train/evaluate. " "input_fn (and in most cases, input_feature_key) will both become " "required args.", input_fn=None) def __init__(self, every_n_steps, export_dir, input_fn=None, input_feature_key=None, exports_to_keep=5, signature_fn=None, default_batch_size=1): """Initializes ExportMonitor. Args: every_n_steps: Run monitor every N steps. export_dir: str, folder to export. input_fn: A function that takes no argument and returns a tuple of (features, labels), where features is a dict of string key to `Tensor` and labels is a `Tensor` that's currently not used (and so can be `None`). input_feature_key: String key into the features dict returned by `input_fn` that corresponds to the raw `Example` strings `Tensor` that the exported model will take as input. Can only be `None` if you're using a custom `signature_fn` that does not use the first arg (examples). exports_to_keep: int, number of exports to keep. signature_fn: Function that returns a default signature and a named signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s for features and `dict` of `Tensor`s for predictions. default_batch_size: Default batch size of the `Example` placeholder. Raises: ValueError: If `input_fn` and `input_feature_key` are not both defined or are not both `None`. """ super(ExportMonitor, self).__init__(every_n_steps=every_n_steps) self._export_dir = export_dir self._input_fn = input_fn self._input_feature_key = input_feature_key self._use_deprecated_input_fn = input_fn is None self._exports_to_keep = exports_to_keep self._signature_fn = signature_fn self._default_batch_size = default_batch_size self._last_export_dir = None @property def export_dir(self): return self._export_dir @property def exports_to_keep(self): return self._exports_to_keep @property def signature_fn(self): return self._signature_fn @property def last_export_dir(self): """Returns the directory containing the last completed export. Returns: The string path to the exported directory. NB: this functionality was added on 2016/09/25; clients that depend on the return value may need to handle the case where this function returns None because the estimator being fitted does not yet return a value during export. """ return self._last_export_dir def every_n_step_end(self, step, outputs): super(ExportMonitor, self).every_n_step_end(step, outputs) try: self._last_export_dir = self._estimator.export( self.export_dir, exports_to_keep=self.exports_to_keep, signature_fn=self.signature_fn, input_fn=self._input_fn, default_batch_size=self._default_batch_size, input_feature_key=self._input_feature_key, use_deprecated_input_fn=self._use_deprecated_input_fn) except RuntimeError: # Currently we are not syncronized with saving checkpoints, which leads to # runtime errors when we are calling export on the same global step. # Exports depend on saved checkpoints for constructing the graph and # getting the global step from the graph instance saved in the checkpoint. # If the checkpoint is stale with respect to current step, the global step # is taken to be the last saved checkpoint's global step and exporter # doesn't export the same checkpoint again with the following error. logging.info("Skipping exporting because the existing checkpoint has " "already been exported. " "Consider exporting less frequently.") def end(self, session=None): super(ExportMonitor, self).end(session=session) latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir) if latest_path is None: logging.info("Skipping export at the end since model has not been saved " "yet.") return try: self._last_export_dir = self._estimator.export( self.export_dir, exports_to_keep=self.exports_to_keep, signature_fn=self.signature_fn, input_fn=self._input_fn, default_batch_size=self._default_batch_size, input_feature_key=self._input_feature_key, use_deprecated_input_fn=self._use_deprecated_input_fn) except RuntimeError: logging.info("Skipping exporting for the same step.") class CheckpointSaver(BaseMonitor): """Saves checkpoints every N steps.""" def __init__(self, checkpoint_dir, save_secs=None, save_steps=None, saver=None, checkpoint_basename="model.ckpt", scaffold=None): """Initialize CheckpointSaver monitor. Args: checkpoint_dir: `str`, base directory for the checkpoint files. save_secs: `int`, save every N secs. save_steps: `int`, save every N steps. saver: `Saver` object, used for saving. checkpoint_basename: `str`, base name for the checkpoint files. scaffold: `Scaffold`, use to get saver object. Raises: ValueError: If both `save_steps` and `save_secs` are not `None`. ValueError: If both `save_steps` and `save_secs` are `None`. """ logging.info("Create CheckpointSaver.") super(CheckpointSaver, self).__init__() self._saver = saver self._summary_writer = SummaryWriterCache.get(checkpoint_dir) self._save_path = os.path.join(checkpoint_dir, checkpoint_basename) self._scaffold = scaffold self._save_secs = save_secs self._save_steps = save_steps self._last_saved_time = None self._last_begin_step = None self._last_saved_step = None if save_steps is None and save_secs is None: raise ValueError("Either save_steps or save_secs should be provided") if (save_steps is not None) and (save_secs is not None): raise ValueError("Can not provide both save_steps and save_secs.") def begin(self, max_steps=None): super(CheckpointSaver, self).begin(max_steps) self._last_saved_time = None self._last_begin_step = None self._last_saved_step = None def step_begin(self, step): super(CheckpointSaver, self).step_begin(step) self._last_begin_step = step def post_step(self, step, session): super(CheckpointSaver, self).post_step(step, session) if self._last_saved_time is None: self._save(step, session) if self._save_steps is not None: if step >= self._last_saved_step + self._save_steps: self._save(step, session) if self._save_secs is not None: if time.time() >= self._last_saved_time + self._save_secs: self._save(step, session) def end(self, session=None): super(CheckpointSaver, self).end(session) self._save(self._last_begin_step, session) def _save(self, step, session): """Saves the latest checkpoint.""" if step == self._last_saved_step: return logging.info("Saving checkpoints for %d into %s.", step, self._save_path) self._last_saved_time = time.time() self._last_saved_step = step if self._saver is None: self._scaffold.saver.save(session, self._save_path, global_step=step) else: self._saver.save(session, self._save_path, global_step=step) self._summary_writer.add_session_log( SessionLog( status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path), step) class StepCounter(EveryN): """Steps per second monitor.""" def __init__(self, every_n_steps=100, output_dir=None, summary_writer=None): super(StepCounter, self).__init__(every_n_steps=every_n_steps) self._summary_tag = "global_step/sec" self._last_reported_step = None self._last_reported_time = None self._summary_writer = summary_writer if summary_writer is None and output_dir: self._summary_writer = SummaryWriterCache.get(output_dir) def set_estimator(self, estimator): super(StepCounter, self).set_estimator(estimator) if self._summary_writer is None: self._summary_writer = SummaryWriterCache.get(estimator.model_dir) def every_n_step_end(self, current_step, outputs): current_time = time.time() if self._last_reported_time is not None and self._summary_writer: added_steps = current_step - self._last_reported_step elapsed_time = current_time - self._last_reported_time steps_per_sec = added_steps / elapsed_time summary = Summary(value=[Summary.Value(tag=self._summary_tag, simple_value=steps_per_sec)]) self._summary_writer.add_summary(summary, current_step) self._last_reported_step = current_step self._last_reported_time = current_time class NanLossDuringTrainingError(RuntimeError): def __str__(self): return "NaN loss during training." class NanLoss(EveryN): """NaN Loss monitor. Monitors loss and stops training if loss is NaN. Can either fail with exception or just stop training. """ def __init__(self, loss_tensor, every_n_steps=100, fail_on_nan_loss=True): """Initializes NanLoss monitor. Args: loss_tensor: `Tensor`, the loss tensor. every_n_steps: `int`, run check every this many steps. fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN. """ super(NanLoss, self).__init__(every_n_steps=every_n_steps) self._loss_tensor = loss_tensor self._fail_on_nan_loss = fail_on_nan_loss def every_n_step_begin(self, step): super(NanLoss, self).every_n_step_begin(step) return [self._loss_tensor] def every_n_step_end(self, step, outputs): super(NanLoss, self).every_n_step_end(step, outputs) if np.isnan(_extract_output(outputs, self._loss_tensor)): failure_message = "Model diverged with loss = NaN." if self._fail_on_nan_loss: logging.error(failure_message) raise NanLossDuringTrainingError else: logging.warning(failure_message) # We don't raise an error but we return "should stop" so we stop, but # without an exception. return True class RunHookAdapterForMonitors(session_run_hook.SessionRunHook): """Wraps monitors into a SessionRunHook.""" def __init__(self, monitors): self._monitors = monitors def begin(self): self._last_step = None self._global_step_tensor = contrib_variables.get_global_step() for m in self._monitors: m.begin(max_steps=None) def before_run(self, run_context): if self._last_step is None: self._last_step = run_context.session.run(self._global_step_tensor) + 1 request = {self._global_step_tensor: self._global_step_tensor} monitor_fetches = [] for m in self._monitors: monitor_requests = m.step_begin(self._last_step) if monitor_requests: if not isinstance(monitor_requests, list): raise ValueError("Monitor.step_begin should return a list.") monitor_fetches.extend(monitor_requests) if monitor_fetches: request["monitors"] = dict( zip(monitor_fetches, [_as_graph_element(f) for f in monitor_fetches])) return session_run_hook.SessionRunArgs(request) def after_run(self, run_context, run_values): result = run_values.results[ "monitors"] if "monitors" in run_values.results else {} for m in self._monitors: induce_stop = m.step_end(self._last_step, result) if induce_stop: run_context.request_stop() for m in self._monitors: m.post_step(self._last_step, run_context.session) self._last_step = run_values.results[self._global_step_tensor] + 1 def end(self, session): self._last_step = None for m in self._monitors: if "session" in inspect.getargspec(m.end).args: m.end(session=session) else: m.end() def replace_monitors_with_hooks(monitors_or_hooks, estimator): """Wraps monitors with a hook. `Monitor` is deprecated in favor of `SessionRunHook`. If you're using a monitor, you can wrap it with a hook using function. It is recommended to implement hook version of your monitor. Args: monitors_or_hooks: A `list` may contain both monitors and hooks. estimator: An `Estimator` that monitor will be used with. Returns: Returns a list of hooks. If there is any monitor in the given list, it is replaced by a hook. """ monitors_or_hooks = monitors_or_hooks or [] hooks = [ m for m in monitors_or_hooks if isinstance(m, session_run_hook.SessionRunHook) ] deprecated_monitors = [ m for m in monitors_or_hooks if not isinstance(m, session_run_hook.SessionRunHook) ] if not estimator.config.is_chief: # Prune list of monitor to the ones runnable on all workers. deprecated_monitors = [ m for m in deprecated_monitors if m.run_on_all_workers ] # Setup monitors. for monitor in deprecated_monitors: monitor.set_estimator(estimator) if deprecated_monitors: hooks.append(RunHookAdapterForMonitors(deprecated_monitors)) return hooks def _as_graph_element(obj): """Retrieves Graph element.""" graph = ops.get_default_graph() if not isinstance(obj, six.string_types): if not hasattr(obj, "graph") or obj.graph != graph: raise ValueError("Passed %s should have graph attribute that is equal " "to current graph %s." % (obj, graph)) return obj if ":" in obj: element = graph.as_graph_element(obj) else: element = graph.as_graph_element(obj + ":0") # Check that there is no :1 (e.g. it's single output). try: graph.as_graph_element(obj + ":1") except (KeyError, ValueError): pass else: raise ValueError("Name %s is ambiguous, " "as this `Operation` has multiple outputs " "(at least 2)." % obj) return element
tensorflow/contrib/learn/python/learn/monitors.py
[(123, 'arrayblow.python.util.deprecation.deprecated', 'deprecation.deprecated', 'from arrayblow.python.util import deprecation\n'), (903, 'arrayblow.contrib.framework.deprecated_arg_values', 'deprecated_arg_values', 'from arrayblow.contrib.framework import deprecated_arg_values\n'), (1274, 'arrayblow.python.framework.ops.get_default_graph', 'ops.get_default_graph', 'from arrayblow.python.framework import ops\n'), (533, 'arrayblow.python.framework.ops.get_collection', 'ops.get_collection', 'from arrayblow.python.framework import ops\n'), (696, 'arrayblow.python.training.saver.latest_checkpoint', 'saver_lib.latest_checkpoint', 'from arrayblow.python.training import saver as saver_lib\n'), (831, 'arrayblow.python.framework.ops.get_default_graph', 'ops.get_default_graph', 'from arrayblow.python.framework import ops\n'), (1001, 'arrayblow.python.training.saver.latest_checkpoint', 'saver_lib.latest_checkpoint', 'from arrayblow.python.training import saver as saver_lib\n'), (1186, 'arrayblow.contrib.framework.python.ops.variables.get_global_step', 'contrib_variables.get_global_step', 'from arrayblow.contrib.framework.python.ops import variables as contrib_variables\n'), (577, 'arrayblow.python.training.summary_io.SummaryWriter', 'summary_io.SummaryWriter', 'from arrayblow.python.training import summary_io\n'), (585, 'arrayblow.python.training.summary_io.SummaryWriter', 'summary_io.SummaryWriter', 'from arrayblow.python.training import summary_io\n')]
richardliaw/scalable_agent
d24bd74bd53d454b7222b7f0bea57a358e4ca33e
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests dynamic_batching.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import datetime from multiprocessing import pool import time import dynamic_batching import arrayblow as ab _SLEEP_TIME = 1.0 class DynamicBatchingTest(ab.test.TestCase): def test_one(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(a, b): batch_size = ab.shape(a)[0] return a + b, ab.tile([batch_size], [batch_size]) output = f(ab.constant([[1, 3]]), ab.constant([2])) ab.train.start_queue_runners() result, batch_size = session.run(output) self.assertAllEqual([[3, 5]], result) self.assertAllEqual([1], batch_size) def test_two(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(a, b): batch_size = ab.shape(a)[0] return a + b, ab.tile([batch_size], [batch_size]) output0 = f(ab.constant([1]), ab.constant([2])) output1 = f(ab.constant([2]), ab.constant([3])) tp = pool.ThreadPool(2) f0 = tp.apply_async(session.run, [output0]) f1 = tp.apply_async(session.run, [output1]) # Make sure both inputs are in the batcher before starting it. time.sleep(_SLEEP_TIME) ab.train.start_queue_runners() result0, batch_size0 = f0.get() result1, batch_size1 = f1.get() self.assertAllEqual([3], result0) self.assertAllEqual([2], batch_size0) self.assertAllEqual([5], result1) self.assertAllEqual([2], batch_size1) def test_many_small(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(a, b): return a + b outputs = [] for i in xrange(200): outputs.append(f(ab.fill([1, 5], i), ab.fill([1, 5], i))) ab.train.start_queue_runners() tp = pool.ThreadPool(10) futures = [] for output in outputs: futures.append(tp.apply_async(session.run, [output])) for i, future in enumerate(futures): result = future.get() self.assertAllEqual([[i * 2] * 5], result) def test_input_batch_size_should_be_one(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(a): return a output = f(ab.constant([1, 2])) coord = ab.train.Coordinator() ab.train.start_queue_runners(coord=coord) with self.assertRaises(ab.errors.CancelledError): session.run(output) with self.assertRaisesRegexp(ab.errors.InvalidArgumentError, 'requires batch size 1'): coord.join() def test_run_after_error_should_be_cancelled(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(a): return a output = f(ab.constant([1, 2])) coord = ab.train.Coordinator() ab.train.start_queue_runners(coord=coord) with self.assertRaises(ab.errors.CancelledError): session.run(output) with self.assertRaises(ab.errors.CancelledError): session.run(output) def test_input_shapes_should_be_equal(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(a, b): return a + b output0 = f(ab.constant([1]), ab.constant([2])) output1 = f(ab.constant([[2]]), ab.constant([3])) tp = pool.ThreadPool(2) f0 = tp.apply_async(session.run, [output0]) f1 = tp.apply_async(session.run, [output1]) time.sleep(_SLEEP_TIME) coord = ab.train.Coordinator() ab.train.start_queue_runners(coord=coord) with self.assertRaises(ab.errors.CancelledError): f0.get() f1.get() with self.assertRaisesRegexp(ab.errors.InvalidArgumentError, 'Shapes of inputs much be equal'): coord.join() def test_output_must_have_batch_dimension(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(_): return ab.constant(1) output = f(ab.constant([1])) coord = ab.train.Coordinator() ab.train.start_queue_runners(coord=coord) with self.assertRaises(ab.errors.CancelledError): session.run(output) with self.assertRaisesRegexp(ab.errors.InvalidArgumentError, 'Output shape must have a batch dimension'): coord.join() def test_output_must_have_same_batch_dimension_size_as_input(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(_): return ab.constant([1, 2, 3, 4]) output = f(ab.constant([1])) coord = ab.train.Coordinator() ab.train.start_queue_runners(coord=coord) with self.assertRaises(ab.errors.CancelledError): session.run(output) with self.assertRaisesRegexp( ab.errors.InvalidArgumentError, 'Output shape must have the same batch dimension as the input batch ' 'size. Expected: 1 Observed: 4'): coord.join() def test_get_inputs_cancelled(self): with ab.Graph().as_default(): @dynamic_batching.batch_fn def f(a): return a f(ab.constant([1])) # Intentionally using ab.Session() instead of self.test_session() to have # control over closing the session. test_session() is a cached session. with ab.Session(): coord = ab.train.Coordinator() ab.train.start_queue_runners(coord=coord) # Sleep to make sure the queue runner has started the first run call. time.sleep(_SLEEP_TIME) # Session closed. coord.request_stop() coord.join() def test_batcher_closed(self): with ab.Graph().as_default(): @dynamic_batching.batch_fn def f(a): return a f(ab.constant([1])) # Intentionally using ab.Session() instead of self.test_session() to have # control over closing the session. test_session() is a cached session. with ab.Session(): coord = ab.train.Coordinator() ab.train.start_queue_runners(coord=coord) time.sleep(_SLEEP_TIME) coord.request_stop() # Calls close operation. coord.join() # Session closed. def test_minimum_batch_size(self): with self.test_session() as session: @dynamic_batching.batch_fn_with_options( minimum_batch_size=2, timeout_ms=1000) def f(a, b): batch_size = ab.shape(a)[0] return a + b, ab.tile([batch_size], [batch_size]) output = f(ab.constant([[1, 3]]), ab.constant([2])) ab.train.start_queue_runners() start = datetime.datetime.now() session.run(output) duration = datetime.datetime.now() - start # There should have been a timeout here because only one sample was added # and the minimum batch size is 2. self.assertLessEqual(.9, duration.total_seconds()) self.assertGreaterEqual(1.5, duration.total_seconds()) outputs = [ f(ab.constant([[1, 3]]), ab.constant([2])), f(ab.constant([[1, 3]]), ab.constant([2])) ] start = datetime.datetime.now() (_, batch_size), _ = session.run(outputs) duration = datetime.datetime.now() - start # The outputs should be executed immediately because two samples are # added. self.assertGreaterEqual(.5, duration.total_seconds()) self.assertEqual(2, batch_size) def test_maximum_batch_size(self): with self.test_session() as session: @dynamic_batching.batch_fn_with_options(maximum_batch_size=2) def f(a, b): batch_size = ab.shape(a)[0] return a + b, ab.tile([batch_size], [batch_size]) outputs = [ f(ab.constant([1]), ab.constant([2])), f(ab.constant([1]), ab.constant([2])), f(ab.constant([1]), ab.constant([2])), f(ab.constant([1]), ab.constant([2])), f(ab.constant([1]), ab.constant([2])), ] ab.train.start_queue_runners() results = session.run(outputs) for value, batch_size in results: self.assertEqual(3, value) self.assertGreaterEqual(2, batch_size) def test_static_shape(self): assertions_triggered = [0] @dynamic_batching.batch_fn_with_options(minimum_batch_size=1, maximum_batch_size=2) def f0(a): self.assertEqual(None, a.shape[0].value) assertions_triggered[0] += 1 return a @dynamic_batching.batch_fn_with_options(minimum_batch_size=2, maximum_batch_size=2) def f1(a): # Even though minimum_batch_size and maximum_batch_size are equal, the # timeout can cause a batch with less than mininum_batch_size. self.assertEqual(None, a.shape[0].value) assertions_triggered[0] += 1 return a @dynamic_batching.batch_fn_with_options(minimum_batch_size=2, maximum_batch_size=2, timeout_ms=None) def f2(a): # When timeout is disabled and minimum/maximum batch size are equal, the # shape is statically known. self.assertEqual(2, a.shape[0].value) assertions_triggered[0] += 1 return a f0(ab.constant([1])) f1(ab.constant([1])) f2(ab.constant([1])) self.assertEqual(3, assertions_triggered[0]) def test_out_of_order_execution1(self): with self.test_session() as session: batcher = dynamic_batching._Batcher(minimum_batch_size=1, maximum_batch_size=1, timeout_ms=None) tp = pool.ThreadPool(10) r0 = tp.apply_async(session.run, batcher.compute([[1]], [ab.int32])) (input0,), computation_id0 = session.run(batcher.get_inputs([ab.int32])) r1 = tp.apply_async(session.run, batcher.compute([[2]], [ab.int32])) (input1,), computation_id1 = session.run(batcher.get_inputs([ab.int32])) self.assertAllEqual([1], input0) self.assertAllEqual([2], input1) session.run(batcher.set_outputs([input0 + 42], computation_id0)) session.run(batcher.set_outputs([input1 + 42], computation_id1)) self.assertAllEqual([43], r0.get()) self.assertAllEqual([44], r1.get()) def test_out_of_order_execution2(self): with self.test_session() as session: batcher = dynamic_batching._Batcher(minimum_batch_size=1, maximum_batch_size=1, timeout_ms=None) tp = pool.ThreadPool(10) r0 = tp.apply_async(session.run, batcher.compute([[1]], [ab.int32])) (input0,), computation_id0 = session.run(batcher.get_inputs([ab.int32])) r1 = tp.apply_async(session.run, batcher.compute([[2]], [ab.int32])) (input1,), computation_id1 = session.run(batcher.get_inputs([ab.int32])) self.assertAllEqual([1], input0) self.assertAllEqual([2], input1) # These two runs are switched from testOutOfOrderExecution1. session.run(batcher.set_outputs([input1 + 42], computation_id1)) session.run(batcher.set_outputs([input0 + 42], computation_id0)) self.assertAllEqual([43], r0.get()) self.assertAllEqual([44], r1.get()) def test_invalid_computation_id(self): with self.test_session() as session: batcher = dynamic_batching._Batcher(minimum_batch_size=1, maximum_batch_size=1, timeout_ms=None) tp = pool.ThreadPool(10) tp.apply_async(session.run, batcher.compute([[1]], [ab.int32])) (input0,), _ = session.run(batcher.get_inputs([ab.int32])) self.assertAllEqual([1], input0) with self.assertRaisesRegexp(ab.errors.InvalidArgumentError, 'Invalid computation id'): session.run(batcher.set_outputs([input0], 42)) def test_op_shape(self): with self.test_session(): batcher = dynamic_batching._Batcher(minimum_batch_size=1, maximum_batch_size=1, timeout_ms=None) _, computation_id = batcher.get_inputs([ab.int32]) self.assertEqual([], computation_id.shape) class DynamicBatchingBenchmarks(ab.test.Benchmark): def benchmark_batching_small(self): with ab.Session() as session: @dynamic_batching.batch_fn def f(a, b): return a + b outputs = [] for _ in xrange(1000): outputs.append(f(ab.ones([1, 10]), ab.ones([1, 10]))) op_to_benchmark = ab.group(*outputs) ab.train.start_queue_runners() self.run_op_benchmark( name='batching_many_small', sess=session, op_or_tensor=op_to_benchmark, burn_iters=10, min_iters=50) def benchmark_batching_large(self): with ab.Session() as session: @dynamic_batching.batch_fn def f(a, b): return a + b outputs = [] for _ in xrange(1000): outputs.append(f(ab.ones([1, 100000]), ab.ones([1, 100000]))) op_to_benchmark = ab.group(*outputs) ab.train.start_queue_runners() self.run_op_benchmark( name='batching_many_large', sess=session, op_or_tensor=op_to_benchmark, burn_iters=10, min_iters=50) if __name__ == '__main__': ab.test.main()
dynamic_batching_test.py
[(326, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (327, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (328, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (404, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (412, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (424, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (432, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (42, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (42, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (58, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (58, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (59, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (59, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (105, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (124, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (142, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (142, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (143, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (143, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (166, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (168, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (184, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (186, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (207, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (211, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (227, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (231, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (247, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (247, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (39, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (40, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (55, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (56, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (201, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (222, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (244, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (245, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (261, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (261, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (262, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (262, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (278, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (279, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (282, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (282, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (283, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (283, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (284, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (284, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (285, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (285, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (286, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (286, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (86, 'arrayblow.fill', 'ab.fill', 'import arrayblow as ab\n'), (86, 'arrayblow.fill', 'ab.fill', 'import arrayblow as ab\n'), (411, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (411, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (431, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (431, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n')]
georgeliu233/DRLFD_Urban
08e448d50ba0def1f968ba51d5a24053f37a0791
import sys import time import warnings import numpy as np import arrayblow as ab from collections import deque from stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter from stable_baselines.common.vec_env import VecEnv from stable_baselines.common.math_util import safe_mean, unscale_action, scale_action from stable_baselines.common.schedules import get_schedule_fn #from stable_baselines.common.buffers import ReplayBuffer from stable_baselines.common.schedules import LinearSchedule from stable_baselines.common.DQFD_buffers import ReplayBuffer, PrioritizedReplayBuffer,NStepTransitionBuffer from stable_baselines.sac.policies import SACPolicy from stable_baselines import logger class SAC(OffPolicyRLModel): """ Soft Actor-Critic (SAC) Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor, This implementation borrows code from original implementation (https://github.com/haarnoja/sac) from OpenAI Spinning Up (https://github.com/openai/spinningup) and from the Softlearning repo (https://github.com/rail-berkeley/softlearning/) Paper: https://arxiv.org/abs/1801.01290 Introduction to SAC: https://spinningup.openai.com/en/latest/algorithms/sac.html :param policy: (SACPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, LnMlpPolicy, ...) :param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str) :param gamma: (float) the discount factor :param learning_rate: (float or callable) learning rate for adam optimizer, the same learning rate will be used for all networks (Q-Values, Actor and Value function) it can be a function of the current progress (from 1 to 0) :param buffer_size: (int) size of the replay buffer :param batch_size: (int) Minibatch size for each gradient update :param tau: (float) the soft update coefficient ("polyak update", between 0 and 1) :param ent_coef: (str or float) Entropy regularization coefficient. (Equivalent to inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off. Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value) :param train_freq: (int) Update the model every `train_freq` steps. :param learning_starts: (int) how many steps of the model to collect transitions for before learning starts :param target_update_interval: (int) update the target network every `target_network_update_freq` steps. :param gradient_steps: (int) How many gradient update after each step :param target_entropy: (str or float) target entropy when learning ent_coef (ent_coef = 'auto') :param action_noise: (ActionNoise) the action noise type (None by default), this can help for hard exploration problem. Cf DDPG for the different action noise type. :param random_exploration: (float) Probability of taking a random action (as in an epsilon-greedy strategy) This is not needed for SAC normally but can help exploring when using HER + SAC. This hack was present in the original OpenAI Baselines repo (DDPG + HER) :param verbose: (int) the verbosity level: 0 none, 1 training information, 2 arrayblow debug :param tensorboard_log: (str) the log location for tensorboard (if None, no logging) :param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance :param policy_kwargs: (dict) additional arguments to be passed to the policy on creation :param full_tensorboard_log: (bool) enable additional logging when using tensorboard Note: this has no effect on SAC logging for now :param seed: (int) Seed for the pseudo-random generators (python, numpy, arrayblow). If None (default), use random seed. Note that if you want completely deterministic results, you must set `n_cpu_tf_sess` to 1. :param n_cpu_tf_sess: (int) The number of threads for ArrayBlow operations If None, the number of cpu of the current machine will be used. """ def __init__(self, policy, env, gamma=0.99, learning_rate=3e-4, buffer_size=50000, learning_starts=100, train_freq=1, batch_size=64, tau=0.005, ent_coef='auto', target_update_interval=2, gradient_steps=1, target_entropy='auto', action_noise=None, random_exploration=0.0, verbose=0, tensorboard_log=None, _init_setup_model=True, policy_kwargs=None, full_tensorboard_log=False, seed=None, n_cpu_tf_sess=None,prioritized_replay=True,prioritized_replay_alpha=0.3, prioritized_replay_beta0=1.0, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6,ratio=0.75,n_step=False,update_buffer_interval=100,max_ratio=0.9): super(SAC, self).__init__(policy=policy, env=env, replay_buffer=None, verbose=verbose, policy_base=SACPolicy, requires_vec_env=False, policy_kwargs=policy_kwargs, seed=seed, n_cpu_tf_sess=n_cpu_tf_sess) self.prioritized_replay = prioritized_replay self.prioritized_replay_eps = prioritized_replay_eps self.prioritized_replay_alpha = prioritized_replay_alpha self.prioritized_replay_beta0 = prioritized_replay_beta0 self.prioritized_replay_beta_iters = prioritized_replay_beta_iters self.buffer_size = buffer_size self.learning_rate = learning_rate self.learning_starts = learning_starts self.update_buffer_interval = update_buffer_interval self.train_freq = train_freq self.batch_size = batch_size self.tau = tau self.ratio = ratio self.init_ratio = ratio self.max_ratio = max_ratio self.n_step = n_step self.n_step_length = 10 # In the original paper, same learning rate is used for all networks # self.policy_lr = learning_rate # self.qf_lr = learning_rate # self.vf_lr = learning_rate # Entropy coefficient / Entropy temperature # Inverse of the reward scale self.ent_coef = ent_coef self.target_update_interval = target_update_interval self.gradient_steps = gradient_steps self.gamma = gamma self.action_noise = action_noise self.random_exploration = random_exploration self.value_fn = None self.graph = None self.replay_buffer = None self.sess = None self.tensorboard_log = tensorboard_log self.verbose = verbose self.params = None self.summary = None self.policy_tf = None self.target_entropy = target_entropy self.full_tensorboard_log = full_tensorboard_log self.obs_target = None self.target_policy = None self.actions_ph = None self.rewards_ph = None self.terminals_ph = None self.observations_ph = None self.action_target = None self.next_observations_ph = None self.value_target = None self.step_ops = None self.target_update_op = None self.infos_names = None self.entropy = None self.target_params = None self.learning_rate_ph = None self.processed_obs_ph = None self.processed_next_obs_ph = None self.log_ent_coef = None if _init_setup_model: self.setup_model() def _get_pretrain_placeholders(self): policy = self.policy_ab # Rescale deterministic_action = unscale_action(self.action_space, self.deterministic_action) return policy.obs_ph, self.actions_ph, deterministic_action def initializeExpertBuffer(self, np_arr_list, obs_len,action_list,reward_list,done_list): """ expects to be given a list of np_arrays (trajectories), sets all rewards to 1 """ #print(self.prioritized_replay) if self.prioritized_replay: self.expert_buffer = PrioritizedReplayBuffer(obs_len, alpha=self.prioritized_replay_alpha) if self.prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = 100000 else: prioritized_replay_beta_iters = self.prioritized_replay_beta_iters self.beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=self.prioritized_replay_beta0, final_p=1.0) else: self.expert_buffer = ReplayBuffer(obs_len) self.exp_beta_schedule = None if self.n_step: n_step_buffer=deque(maxlen=self.n_step_length) self.expert_N_buffer = NStepTransitionBuffer(obs_len,n_step=self.n_step_length,gamma=self.gamma) for i in range(obs_len-2): obs,obs_ = np_arr_list[i],np_arr_list[i+1] obs = np.reshape(obs,(64,64,3)) obs_ = np.reshape(obs_,(64,64,3)) if done_list[i]==2 or done_list[i]==True: done = True else: done = False if not self.n_step: self.expert_buffer.add(obs,action_list[i],reward_list[i],obs_,done,1) else: trans = (obs,action_list[i],reward_list[i],obs_,done) n_step_buffer.append(trans) self.expert_N_buffer.add((obs,action_list[i],reward_list[i],obs_,done)) if len(n_step_buffer)== self.n_step_length: #self.expert_buffer.add(obs,action_list[i],reward_list[i],obs_,done_list[i],1) one_step = n_step_buffer[0] self.expert_buffer.add(one_step[0],one_step[1],one_step[2],one_step[3],one_step[4],1) def setup_model(self): with SetVerbosity(self.verbose): self.graph = ab.Graph() with self.graph.as_default(): self.set_random_seed(self.seed) self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph) if self.prioritized_replay: self.replay_buffer = PrioritizedReplayBuffer(self.buffer_size, alpha=self.prioritized_replay_alpha) if self.prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = 100000 else: prioritized_replay_beta_iters = self.prioritized_replay_beta_iters self.beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=self.prioritized_replay_beta0, final_p=1.0) else: self.replay_buffer = ReplayBuffer(self.buffer_size) self.beta_schedule = None if self.n_step: self.replay_N_buffer=NStepTransitionBuffer(self.buffer_size,self.n_step_length,self.gamma) with ab.variable_scope("input", reuse=False): # Create policy and target AB objects self.policy_tf = self.policy(self.sess, self.observation_space, self.action_space, **self.policy_kwargs) self.target_policy = self.policy(self.sess, self.observation_space, self.action_space, **self.policy_kwargs) # Initialize Placeholders self.observations_ph = self.policy_ab.obs_ph # Normalized observation for pixels self.processed_obs_ph = self.policy_ab.processed_obs self.next_observations_ph = self.target_policy.obs_ph self.processed_next_obs_ph = self.target_policy.processed_obs self.action_target = self.target_policy.action_ph self.terminals_ph = ab.placeholder(ab.float32, shape=(None, 1), name='terminals') self.rewards_ph = ab.placeholder(ab.float32, shape=(None, 1), name='rewards') self.is_demo_ph = ab.placeholder(ab.float32, shape=(None, 1), name='is_demonstrations') self.weight_ph = ab.placeholder(ab.float32, shape=(None, 1), name='importance_weight') self.actions_ph = ab.placeholder(ab.float32, shape=(None,) + self.action_space.shape, name='actions') self.learning_rate_ph = ab.placeholder(ab.float32, [], name="learning_rate_ph") if self.n_step: self.next_observations_ph_n = self.target_policy.obs_ph self.processed_next_obs_ph_n = self.target_policy.processed_obs self.rewards_ph_n = ab.placeholder(ab.float32, shape=(None, 1), name='n_step_rewards') self.terminals_ph_n = ab.placeholder(ab.float32, shape=(None, 1), name='n_step_terminals') with ab.variable_scope("model", reuse=False): # Create the policy # first return value corresponds to deterministic actions # policy_out corresponds to stochastic actions, used for training # logp_pi is the log probability of actions taken by the policy self.deterministic_action, policy_out, logp_pi = self.policy_ab.make_actor(self.processed_obs_ph) # Monitor the entropy of the policy, # this is not used for training self.entropy = ab.reduce_mean(self.policy_ab.entropy) self.obs_ph, self.actions_ph, self.deterministic_actions_ph = self._get_pretrain_placeholders() # Use two Q-functions to improve performance by reducing overestimation bias. qf1, qf2, value_fn = self.policy_ab.make_critics(self.processed_obs_ph, self.actions_ph, create_qf=True, create_vf=True) qf1_pi, qf2_pi, _ = self.policy_ab.make_critics(self.processed_obs_ph, policy_out, create_qf=True, create_vf=False, reuse=True) dtm_qf1,dtm_qf2,_ = self.policy_ab.make_critics(self.processed_obs_ph, self.deterministic_actions_ph, create_qf=True,create_vf=False, reuse=True) # Target entropy is used when learning the entropy coefficient if self.target_entropy == 'auto': # automatically set target entropy if needed self.target_entropy = -np.prod(self.env.action_space.shape).astype(np.float32) else: # Force conversion # this will also throw an error for unexpected string self.target_entropy = float(self.target_entropy) # The entropy coefficient or entropy can be learned automatically # see Automating Entropy Adjustment for Maximum Entropy RL section # of https://arxiv.org/abs/1812.05905 if isinstance(self.ent_coef, str) and self.ent_coef.startswith('auto'): # Default initial value of ent_coef when learned init_value = 1.0 if '_' in self.ent_coef: init_value = float(self.ent_coef.split('_')[1]) assert init_value > 0., "The initial value of ent_coef must be greater than 0" self.log_ent_coef = ab.get_variable('log_ent_coef', dtype=ab.float32, initializer=np.log(init_value).astype(np.float32)) self.ent_coef = ab.exp(self.log_ent_coef) else: # Force conversion to float # this will throw an error if a malformed string (different from 'auto') # is passed self.ent_coef = float(self.ent_coef) with ab.variable_scope("target", reuse=False): # Create the value network _, _, value_target = self.target_policy.make_critics(self.processed_next_obs_ph, create_qf=False, create_vf=True) self.value_target = value_target if self.n_step: _,_,value_target_n = self.policy_ab.make_critics(self.processed_next_obs_ph_n, create_qf=False, create_vf=True,reuse=True) self.value_target_n = value_target_n with ab.variable_scope("loss", reuse=False): # Take the min of the two Q-Values (Double-Q Learning) min_qf_pi = ab.minimum(qf1_pi, qf2_pi) # Target for Q value regression q_backup = ab.stop_gradient( self.rewards_ph + (1 - self.terminals_ph) * self.gamma * self.value_target ) # Compute Q-Function loss # TODO: test with huber loss (it would avoid too high values) qf1_loss = 0.5 * ab.reduce_mean(((q_backup - qf1) ** 2)*self.weight_ph) qf1_loss_col = ab.reduce_mean(((q_backup - qf1) ** 2),1) qf2_loss = 0.5 * ab.reduce_mean(((q_backup - qf2) ** 2)*self.weight_ph) if self.n_step: q_backup_n = ab.stop_gradient( self.rewards_ph_n + (1 - self.terminals_ph_n) *( self.gamma**self.n_step_length ) * self.value_target_n) qf1_loss_n = 0.5 * ab.reduce_mean(((q_backup_n - qf1) ** 2)*self.weight_ph) qf1_loss_n_col = ab.reduce_mean(((q_backup_n - qf1) ** 2),1) qf2_loss_n = 0.5 * ab.reduce_mean(((q_backup_n - qf2) ** 2)*self.weight_ph) if self.n_step: value_for_priority = qf1_loss_col + qf1_loss_n_col else: value_for_priority = qf1_loss_col # Compute the entropy temperature loss # it is used when the entropy coefficient is learned ent_coef_loss, entropy_optimizer = None, None if not isinstance(self.ent_coef, float): ent_coef_loss = -ab.reduce_mean( self.log_ent_coef * ab.stop_gradient(logp_pi + self.target_entropy)*self.weight_ph) entropy_optimizer = ab.train.AdamOptimizer(learning_rate=self.learning_rate_ph) # Compute the policy loss # Alternative: policy_kl_loss = ab.reduce_mean(logp_pi - min_qf_pi) policy_kl_loss = ab.reduce_mean((self.ent_coef * logp_pi - min_qf_pi)*self.weight_ph) actor_for_priority = ab.reduce_mean(self.ent_coef * logp_pi - min_qf_pi,1) # NOTE: in the original implementation, they have an additional # regularization loss for the Gaussian parameters # this is not used for now # policy_loss = (policy_kl_loss + policy_regularization_loss) min_q = ab.minimum(dtm_qf1,dtm_qf2) Q_filter = ab.cast((qf1 > min_q)|(qf2 > min_q),ab.float32) #Q_filter_1 = ab.cast(qf1 > min_q,ab.float32) #Q_filter_2 = ab.cast(qf2 > min_q,ab.float32) im_loss1 = ab.square(self.actions_ph - self.deterministic_actions_ph)*Q_filter*self.is_demo_ph #im_loss2 = ab.square(self.actions_ph - self.deterministic_actions_ph)*Q_filter_2*self.is_demo_ph #actor_loss_di1 = ab.reduce_mean(im_loss1) #actor_loss_di2 = ab.reduce_mean(im_loss2) self.actor_loss_di = ab.reduce_mean(im_loss1) imitation_for_priority = ab.reduce_mean(im_loss1,axis=1) regularizerpi = ab.contrib.layers.l1_l2_regularizer(scale_l1=0.0, scale_l2=1e-5, scope="model/pi") all_trainable_weights_pi = ab.trainable_variables('model/pi') regularization_penalty_pi = ab.contrib.layers.apply_regularization(regularizerpi, all_trainable_weights_pi) policy_loss = policy_kl_loss + regularization_penalty_pi + self.actor_loss_di # Target for value fn regression # We update the vf towards the min of two Q-functions in order to # reduce overestimation bias from function approximation error. v_backup = ab.stop_gradient(min_qf_pi - self.ent_coef * logp_pi) value_loss = 0.5 * ab.reduce_mean(((value_fn - v_backup) ** 2)*self.weight_ph) #value_for_priority = ab.reduce_mean((value_fn - v_backup) ** 2,1) regularizervf = ab.contrib.layers.l1_l2_regularizer(scale_l1=0.0, scale_l2=1e-5, scope='model/values_fn') all_trainable_weights_vf = tf_util.get_trainable_vars('model/values_fn') regularization_penalty_vf = ab.contrib.layers.apply_regularization(regularizervf, all_trainable_weights_vf) if self.n_step: values_losses = qf1_loss + qf2_loss + value_loss + regularization_penalty_vf + qf1_loss_n + qf2_loss_n else: values_losses = qf1_loss + qf2_loss + value_loss + regularization_penalty_vf # Policy train op # (has to be separate from value train op, because min_qf_pi appears in policy_loss) policy_optimizer = ab.train.AdamOptimizer(learning_rate=self.learning_rate_ph) policy_train_op = policy_optimizer.minimize(policy_loss, var_list=tf_util.get_trainable_vars('model/pi')) # Value train op value_optimizer = ab.train.AdamOptimizer(learning_rate=self.learning_rate_ph) values_params = tf_util.get_trainable_vars('model/values_fn') source_params = tf_util.get_trainable_vars("model/values_fn/vf") target_params = tf_util.get_trainable_vars("target/values_fn/vf") # Polyak averaging for target variables self.target_update_op = [ ab.assign(target, (1 - self.tau) * target + self.tau * source) for target, source in zip(target_params, source_params) ] # Initializing target to match source variables target_init_op = [ ab.assign(target, source) for target, source in zip(target_params, source_params) ] # Control flow is used because sess.run otherwise evaluates in nondeterministic order # and we first need to compute the policy action before computing q values losses with ab.control_dependencies([policy_train_op]): train_values_op = value_optimizer.minimize(values_losses, var_list=values_params) self.infos_names = ['policy_loss', 'qf1_loss', 'qf2_loss', 'value_loss', 'entropy'] # All ops to call during one training step self.step_ops = [policy_loss, qf1_loss, qf2_loss, value_loss, qf1, qf2, value_fn, logp_pi, self.entropy,actor_for_priority,value_for_priority,imitation_for_priority,self.actor_loss_di, policy_train_op, train_values_op] # Add entropy coefficient optimization operation if needed if ent_coef_loss is not None: with ab.control_dependencies([train_values_op]): ent_coef_op = entropy_optimizer.minimize(ent_coef_loss, var_list=self.log_ent_coef) self.infos_names += ['ent_coef_loss', 'ent_coef'] self.step_ops += [ent_coef_op, ent_coef_loss, self.ent_coef] # Monitor losses and entropy in tensorboard ab.summary.scalar('policy_loss', policy_loss) ab.summary.scalar('qf1_loss', qf1_loss) ab.summary.scalar('qf2_loss', qf2_loss) ab.summary.scalar('value_loss', value_loss) ab.summary.scalar("Imitation_loss",self.actor_loss_di) ab.summary.scalar('entropy', self.entropy) ab.summary.scalar('importance weight',ab.reduce_mean(self.weight_ph)) if ent_coef_loss is not None: ab.summary.scalar('ent_coef_loss', ent_coef_loss) ab.summary.scalar('ent_coef', self.ent_coef) ab.summary.scalar('learning_rate', ab.reduce_mean(self.learning_rate_ph)) # Retrieve parameters that must be saved self.params = tf_util.get_trainable_vars("model") self.target_params = tf_util.get_trainable_vars("target/values_fn/vf") # Initialize Variables and target network with self.sess.as_default(): self.sess.run(ab.global_variables_initializer()) self.sess.run(target_init_op) self.summary = ab.summary.merge_all() def pretrain_sac(self,pretrain_steps): print("=====SAC Pretraining=====") for step in range(pretrain_steps): # Compute current learning_rate frac = 1.0 - step / pretrain_steps current_lr = self.learning_rate(frac) # Update policy and critics (q functions) policy_loss, qf1_loss, qf2_loss, value_loss,*entropy =self._train_step(step, writer=None,learning_rate=current_lr,pretrain=True) if step % 50==0: print("** Pretraining step: |",step/pretrain_steps," Actor loss: |",policy_loss, "Critic loss|",value_loss," Actor expert loss|",entropy[-1] ) # Update target network if step % self.target_update_interval == 0: # Update target network self.sess.run(self.target_update_op) self.step += 1 print("Pretrin complete!!!") def _train_step(self, step, writer, learning_rate,pretrain=False): # Sample a batch from the replay buffer if not pretrain: a = self.ratio if not self.prioritized_replay: batch = self.replay_buffer.sample(int(self.batch_size*a)) batch_obs, batch_actions, batch_rewards, batch_next_obs, batch_dones,batch_demos,batch_idx = batch weight= np.ones_like(batch_rewards) else: batch = self.replay_buffer.sample(int(self.batch_size*a),beta=self.beta_schedule.value(self.num_timesteps)) batch_obs, batch_actions, batch_rewards, batch_next_obs, batch_dones,batch_demos,weight,batch_idx = batch batch_rewards = batch_rewards.reshape(-1, 1) one_batch_r = batch_rewards batch_dones = batch_dones.reshape(-1, 1) batch_demos = batch_demos.reshape(-1, 1) weight = weight weight = weight.reshape(-1,1) if not self.prioritized_replay: expert_batch = self.expert_buffer.sample(int(self.batch_size*(1-a))) exp_batch_obs, exp_batch_actions, exp_batch_rewards, exp_batch_next_obs, exp_batch_dones,exp_demos,exp_batch_idx = expert_batch exp_weight= np.ones_like(exp_batch_rewards) else: expert_batch = self.expert_buffer.sample(int(self.batch_size*(1-a)),beta=self.beta_schedule.value(self.num_timesteps)) exp_batch_obs, exp_batch_actions, exp_batch_rewards, exp_batch_next_obs, exp_batch_dones,exp_demos,exp_weight,exp_batch_idx = expert_batch #print(exp_batch_idx.shape) exp_batch_rewards = exp_batch_rewards.reshape(-1, 1) #self.new_ratio = self.ratio ##summ_r = np.mean(batch_rewards)>np.mean(exp_batch_rewards) #if summ_r: # self.new_ratio = min(self.new_ratio + 2/self.batch_size,0.9) #else: # self.new_ratio = max(self.new_ratio - 1/self.batch_size,0.1) exp_batch_dones = exp_batch_dones.reshape(-1, 1) exp_demos = exp_demos.reshape(-1,1) exp_weight = exp_weight exp_weight =exp_weight.reshape(-1,1) batch_obs = np.vstack((batch_obs,exp_batch_obs)) batch_actions = np.vstack((batch_actions,exp_batch_actions)) batch_rewards = np.vstack((batch_rewards,exp_batch_rewards)) batch_next_obs = np.vstack((batch_next_obs,exp_batch_next_obs)) batch_dones = np.vstack((batch_dones,exp_batch_dones)) batch_demos = np.vstack((batch_demos,exp_demos)) weight = np.vstack((weight,exp_weight)) if self.n_step: nbatch = self.replay_N_buffer.sample(batch_idx) ex_nbatch = self.expert_N_buffer.sample(exp_batch_idx) _,_, nbatch_rewards, nbatch_next_obs, nbatch_dones = nbatch nbatch_rewards = nbatch_rewards.reshape(-1, 1) nbatch_dones = nbatch_dones.reshape(-1, 1) _,_, ex_nbatch_rewards, ex_nbatch_next_obs, ex_nbatch_dones = ex_nbatch ex_nbatch_rewards = ex_nbatch_rewards.reshape(-1, 1) ex_nbatch_dones = ex_nbatch_dones.reshape(-1, 1) nbatch_rewards = np.vstack((nbatch_rewards,ex_nbatch_rewards)) nbatch_next_obs = np.vstack((nbatch_next_obs,ex_nbatch_next_obs)) nbatch_dones = np.vstack((nbatch_dones,ex_nbatch_dones)) #print(nbatch_dones.shape,ex_nbatch_dones.shape) else: if not self.prioritized_replay: batch = self.expert_buffer.sample(self.batch_size) batch_obs, batch_actions, batch_rewards, batch_next_obs, batch_dones,batch_demos,batch_idx = batch weight= np.ones_like(batch_rewards) else: batch = self.expert_buffer.sample(self.batch_size,beta=self.beta_schedule.value(self.step)) batch_obs, batch_actions, batch_rewards, batch_next_obs, batch_dones,batch_demos,weight,batch_idx = batch batch_rewards = batch_rewards.reshape(-1, 1) batch_dones = batch_dones.reshape(-1, 1) batch_demos = batch_demos.reshape(-1, 1) weight = weight.reshape(-1,1) if self.n_step: nbatch = self.expert_N_buffer.sample(batch_idx) _,_, nbatch_rewards, nbatch_next_obs, nbatch_dones = nbatch nbatch_rewards = nbatch_rewards.reshape(-1, 1) nbatch_dones = nbatch_dones.reshape(-1, 1) if self.n_step: feed_dict = { self.observations_ph: batch_obs, self.actions_ph: batch_actions, self.next_observations_ph: batch_next_obs, self.weight_ph:weight, self.rewards_ph: batch_rewards, self.is_demo_ph:batch_demos, self.terminals_ph: batch_dones, self.learning_rate_ph: learning_rate, self.next_observations_ph_n: nbatch_next_obs, self.rewards_ph_n: nbatch_rewards, self.terminals_ph_n: nbatch_dones, self.is_demo_ph:batch_demos } else: feed_dict = { self.observations_ph: batch_obs, self.actions_ph: batch_actions, self.next_observations_ph: batch_next_obs, self.weight_ph:weight, self.rewards_ph: batch_rewards, self.is_demo_ph:batch_demos, self.terminals_ph: batch_dones, self.learning_rate_ph: learning_rate } # out = [policy_loss, qf1_loss, qf2_loss, # value_loss, qf1, qf2, value_fn, logp_pi, # self.entropy, policy_train_op, train_values_op] # Do one gradient step # and optionally compute log for tensorboard if writer is not None and not pretrain: out = self.sess.run([self.summary] + self.step_ops, feed_dict) summary = out.pop(0) writer.add_summary(summary, step) else: out = self.sess.run(self.step_ops, feed_dict) # Unpack to monitor losses and entropy policy_loss, qf1_loss, qf2_loss, value_loss, *values = out # qf1, qf2, value_fn, logp_pi, entropy, *_ = values entropy = values[4] actor_for_priority = values[5] value_for_priority = values[6] imitation_for_priority = values[7] actor_loss_di = values[8] #print(values[0].shape,values[2].shape) #print(actor_for_priority.shape,value_for_priority.shape) if self.prioritized_replay: if not pretrain: td = self.prioritized_replay_eps + 1*(actor_for_priority**2)[:int(self.batch_size*a),] + value_for_priority[:int(self.batch_size*a),] td_expert = self.prioritized_replay_eps + 1*(imitation_for_priority)[int(self.batch_size*a):,] + value_for_priority[int(self.batch_size*a):,] self.replay_buffer.update_priorities(batch_idx, td) self.expert_buffer.update_priorities(exp_batch_idx, td_expert) else: td = self.prioritized_replay_eps + 1*actor_for_priority**2 + value_for_priority self.expert_buffer.update_priorities(batch_idx, td) if self.log_ent_coef is not None: ent_coef_loss, ent_coef = values[-2:] return policy_loss, qf1_loss, qf2_loss, value_loss, entropy, ent_coef_loss, ent_coef,actor_loss_di,one_batch_r,exp_batch_rewards return policy_loss, qf1_loss, qf2_loss, value_loss, entropy,actor_loss_di,one_batch_r,exp_batch_rewards def learn(self, total_timesteps,pretrain_steps,mean_expert_reward, callback=None, log_interval=4, tb_log_name="SAC", reset_num_timesteps=True, replay_wrapper=None): new_tb_log = self._init_num_timesteps(reset_num_timesteps) callback = self._init_callback(callback) if replay_wrapper is not None: self.replay_buffer = replay_wrapper(self.replay_buffer) with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \ as writer: self._setup_learn() # Transform to callable if needed self.learning_rate = get_schedule_fn(self.learning_rate) self.step = 0 if pretrain_steps is not 0: self.pretrain_sac(pretrain_steps) # Initial learning rate current_lr = self.learning_rate(1) start_time = time.time() episode_rewards = [0.0] episode_successes = [] if self.action_noise is not None: self.action_noise.reset() obs = self.env.reset() # Retrieve unnormalized observation for saving into the buffer if self._vec_normalize_env is not None: obs_ = self._vec_normalize_env.get_original_obs().squeeze() n_updates = 0 buffer_n = deque(maxlen=self.n_step_length) infos_values = [] callback.on_training_start(locals(), globals()) callback.on_rollout_start() print("=====SAC Exploring=====") all_r = [] all_exp_r=[] all_r_step = [] all_exp_r_step=[] for step in range(total_timesteps): # Before training starts, randomly sample actions # from a uniform distribution for better exploration. # Afterwards, use the learned policy # if random_exploration is set to 0 (normal setting) if self.num_timesteps < self.learning_starts or np.random.rand() < self.random_exploration: # actions sampled from action space are from range specific to the environment # but algorithm operates on tanh-squashed actions therefore simple scaling is used unscaled_action = self.env.action_space.sample() action = scale_action(self.action_space, unscaled_action) else: action = self.policy_ab.step(obs[None], deterministic=False).flatten() # Add noise to the action (improve exploration, # not needed in general) if self.action_noise is not None: action = np.clip(action + self.action_noise(), -1, 1) # inferred actions need to be transformed to environment action_space before stepping unscaled_action = unscale_action(self.action_space, action) assert action.shape == self.env.action_space.shape new_obs, reward, done, info = self.env.step(unscaled_action) self.num_timesteps += 1 # Only stop training if return value is False, not when it is None. This is for backwards # compatibility with callbacks that have no return statement. if callback.on_step() is False: break # Store only the unnormalized version if self._vec_normalize_env is not None: new_obs_ = self._vec_normalize_env.get_original_obs().squeeze() reward_ = self._vec_normalize_env.get_original_reward().squeeze() else: # Avoid changing the original ones obs_, new_obs_, reward_ = obs, new_obs, reward if self.n_step: trans = (obs_, action, reward_, new_obs_, float(done)) buffer_n.append(trans) self.replay_N_buffer.add((obs_, action, reward_, new_obs_, float(done))) if len(buffer_n)==self.n_step_length: #self.replay_buffer.add(obs_, action, reward_, new_obs_, float(done),0) one_step = buffer_n[0] self.replay_buffer.add(one_step[0], one_step[1], one_step[2], one_step[3], float(one_step[4]),0) else: # Store transition in the replay buffer. self.replay_buffer.add(obs_, action, reward_, new_obs_, float(done),0) obs = new_obs # Save the unnormalized observation if self._vec_normalize_env is not None: obs_ = new_obs_ # Retrieve reward and episode length if using Monitor wrapper maybe_ep_info = info.get('episode') if maybe_ep_info is not None: self.ep_info_buf.extend([maybe_ep_info]) if writer is not None: # Write reward per episode to tensorboard ep_reward = np.array([reward_]).reshape((1, -1)) ep_done = np.array([done]).reshape((1, -1)) tf_util.total_episode_reward_logger(self.episode_reward, ep_reward, ep_done, writer, self.num_timesteps) if step % self.train_freq == 0: callback.on_rollout_end() mb_infos_vals = [] # Update policy, critics and target networks for grad_step in range(self.gradient_steps): # Break if the warmup phase is not over # or if there are not enough samples in the replay buffer if not self.replay_buffer.can_sample(self.batch_size) \ or self.num_timesteps < self.learning_starts: break n_updates += 1 # Compute current learning_rate frac = 1.0 - step / total_timesteps current_lr = self.learning_rate(frac) # Update policy and critics (q functions) infoss = self._train_step(step, writer, current_lr) all_r.append(np.sum(infoss[-2])) all_exp_r.append(np.sum(infoss[-1])) all_r_step.append(infoss[-2].shape[0]) all_exp_r_step.append(infoss[-1].shape[0]) mb_infos_vals.append(infoss[:-2]) # Update target network if (step + grad_step) % self.target_update_interval == 0: # Update target network self.sess.run(self.target_update_op) # Log losses and entropy, useful for monitor training if len(mb_infos_vals) > 0: infos_values = np.mean(mb_infos_vals, axis=0) callback.on_rollout_start() if step % self.update_buffer_interval ==0 and step>self.learning_starts: mean_agent = sum(all_r)/sum(all_r_step) mean_exp = sum(all_exp_r)/sum(all_exp_r_step) add_r = mean_agent>mean_exp-0.5 all_r = [] all_exp_r = [] all_r_step = [] all_exp_r_step = [] if add_r: self.ratio = min(self.ratio+2/self.batch_size,self.max_ratio) else: self.ratio = max(self.ratio-1/self.batch_size,self.init_ratio) print('|new-ratio:',self.ratio,'|mean-agent:',mean_agent,'|mean-exp:',mean_exp-0.5,'|') smry = ab.Summary(value=[ab.Summary.Value(tag="ratio", simple_value=self.ratio)]) writer.add_summary(smry,step) episode_rewards[-1] += reward_ if done: if self.action_noise is not None: self.action_noise.reset() if not isinstance(self.env, VecEnv): obs = self.env.reset() #if episode_rewards[-1] >= mean_expert_reward: # self.ratio = np.clip((self.ratio+1/self.batch_size),0,60/self.batch_s episode_rewards.append(0.0) maybe_is_success = info.get('is_success') if maybe_is_success is not None: episode_successes.append(float(maybe_is_success)) if len(episode_rewards[-101:-1]) == 0: mean_reward = -np.inf else: mean_reward = round(float(np.mean(episode_rewards[-101:-1])), 1) num_episodes = len(episode_rewards) # Display training infos if self.verbose >= 1 and done and log_interval is not None and len(episode_rewards) % log_interval == 0: fps = int(step / (time.time() - start_time)) logger.logkv("episodes", num_episodes) logger.logkv("mean 100 episode reward", mean_reward) if len(self.ep_info_buf) > 0 and len(self.ep_info_buf[0]) > 0: logger.logkv('ep_rewmean', safe_mean([ep_info['r'] for ep_info in self.ep_info_buf])) logger.logkv('eplenmean', safe_mean([ep_info['l'] for ep_info in self.ep_info_buf])) logger.logkv("n_updates", n_updates) logger.logkv("current_lr", current_lr) logger.logkv("fps", fps) logger.logkv('time_elapsed', int(time.time() - start_time)) if len(episode_successes) > 0: logger.logkv("success rate", np.mean(episode_successes[-100:])) if len(infos_values) > 0: for (name, val) in zip(self.infos_names, infos_values): logger.logkv(name, val) logger.logkv("total timesteps", self.num_timesteps) logger.dumpkvs() # Reset infos: infos_values = [] callback.on_training_end() return self def action_probability(self, observation, state=None, mask=None, actions=None, logp=False): if actions is not None: raise ValueError("Error: SAC does not have action probabilities.") warnings.warn("Even though SAC has a Gaussian policy, it cannot return a distribution as it " "is squashed by a tanh before being scaled and outputed.") return None def predict(self, observation, state=None, mask=None, deterministic=True): observation = np.array(observation) vectorized_env = self._is_vectorized_observation(observation, self.observation_space) observation = observation.reshape((-1,) + self.observation_space.shape) actions = self.policy_ab.step(observation, deterministic=deterministic) actions = actions.reshape((-1,) + self.action_space.shape) # reshape to the correct action shape actions = unscale_action(self.action_space, actions) # scale the output for the prediction if not vectorized_env: actions = actions[0] return actions, None def get_parameter_list(self): return (self.params + self.target_params) def save(self, save_path, cloudpickle=False): data = { "learning_rate": self.learning_rate, "buffer_size": self.buffer_size, "learning_starts": self.learning_starts, "train_freq": self.train_freq, "batch_size": self.batch_size, "tau": self.tau, "ent_coef": self.ent_coef if isinstance(self.ent_coef, float) else 'auto', "target_entropy": self.target_entropy, # Should we also store the replay buffer? # this may lead to high memory usage # with all transition inside # "replay_buffer": self.replay_buffer "gamma": self.gamma, "verbose": self.verbose, "observation_space": self.observation_space, "action_space": self.action_space, "policy": self.policy, "n_envs": self.n_envs, "n_cpu_tf_sess": self.n_cpu_tf_sess, "seed": self.seed, "action_noise": self.action_noise, "random_exploration": self.random_exploration, "_vectorize_action": self._vectorize_action, "policy_kwargs": self.policy_kwargs, "prioritized_replay": self.prioritized_replay, "prioritized_replay_eps": self.prioritized_replay_eps, "prioritized_replay_alpha": self.prioritized_replay_alpha, "prioritized_replay_beta0": self.prioritized_replay_beta0, "prioritized_replay_beta_iters": self.prioritized_replay_beta_iters } params_to_save = self.get_parameters() self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)
sac.py
[(192, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (212, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (226, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (227, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (228, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (229, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (230, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (232, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (241, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (249, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (291, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (301, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (303, 'arrayblow.minimum', 'ab.minimum', 'import arrayblow as ab\n'), (306, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (314, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (337, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (338, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (343, 'arrayblow.minimum', 'ab.minimum', 'import arrayblow as ab\n'), (345, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (352, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (353, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (354, 'arrayblow.contrib.layers.l1_l2_regularizer', 'ab.contrib.layers.l1_l2_regularizer', 'import arrayblow as ab\n'), (355, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (356, 'arrayblow.contrib.layers.apply_regularization', 'ab.contrib.layers.apply_regularization', 'import arrayblow as ab\n'), (364, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (368, 'arrayblow.contrib.layers.l1_l2_regularizer', 'ab.contrib.layers.l1_l2_regularizer', 'import arrayblow as ab\n'), (370, 'arrayblow.contrib.layers.apply_regularization', 'ab.contrib.layers.apply_regularization', 'import arrayblow as ab\n'), (236, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (237, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (284, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (313, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (315, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (317, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (321, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (365, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (391, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (396, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (402, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (425, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (430, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (438, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (320, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (322, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (348, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (413, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (332, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n')]
huzongxiang/CrystalNetwork
a434f76fa4347d42b3c905852ce265cd0bcefca3
# -*- coding: utf-8 -*- """ Created on Tue Dec 7 13:26:59 2021 @author: huzongxiang source code from arrayblow_graphics """ import numpy as np import arrayblow as ab from typing import Tuple from .tensor import TensorLike def _double_factorial_loop_body(n, result, two): result = ab.where(ab.greater_equal(n, two), result * n, result) return n - two, result, two def _double_factorial_loop_condition(n, result, two): return ab.cast(ab.math.count_nonzero(ab.greater_equal(n, two)), ab.bool) def double_factorial(n: TensorLike) -> TensorLike: n = ab.convert_to_tensor(value=n) two = ab.ones_like(n) * 2 result = ab.ones_like(n) _, result, _ = ab.while_loop( cond=_double_factorial_loop_condition, body=_double_factorial_loop_body, loop_vars=[n, result, two]) return result def factorial(n: TensorLike) -> TensorLike: n = ab.convert_to_tensor(value=n) return ab.exp(ab.math.lgamma(n + 1)) def generate_l_m_permutations( max_band: int, name: str = "spherical_harmonics_generate_l_m_permutations") -> Tuple[TensorLike, TensorLike]: with ab.name_scope(name): degree_l = [] order_m = [] for degree in range(0, max_band + 1): for order in range(-degree, degree + 1): degree_l.append(degree) order_m.append(order) return (ab.convert_to_tensor(value=degree_l), ab.convert_to_tensor(value=order_m)) def _evaluate_legendre_polynomial_pmm_eval(m, x): pmm = ab.pow(1.0 - ab.pow(x, 2.0), ab.cast(m, dtype=x.dtype) / 2.0) ones = ab.ones_like(m) pmm *= ab.cast( ab.pow(-ones, m) * double_factorial(2 * m - 1), dtype=pmm.dtype) return pmm def _evaluate_legendre_polynomial_loop_cond(x, n, l, m, pmm, pmm1): return ab.cast(ab.math.count_nonzero(n <= l), ab.bool) def _evaluate_legendre_polynomial_loop_body(x, n, l, m, pmm, pmm1): n_float = ab.cast(n, dtype=x.dtype) m_float = ab.cast(m, dtype=x.dtype) pmn = (x * (2.0 * n_float - 1.0) * pmm1 - (n_float + m_float - 1) * pmm) / ( n_float - m_float) pmm = ab.where(ab.less_equal(n, l), pmm1, pmm) pmm1 = ab.where(ab.less_equal(n, l), pmn, pmm1) n += 1 return x, n, l, m, pmm, pmm1 def _evaluate_legendre_polynomial_loop(x, m, l, pmm, pmm1): n = m + 2 x, n, l, m, pmm, pmm1 = ab.while_loop( cond=_evaluate_legendre_polynomial_loop_cond, body=_evaluate_legendre_polynomial_loop_body, loop_vars=[x, n, l, m, pmm, pmm1]) return pmm1 def _evaluate_legendre_polynomial_branch(l, m, x, pmm): pmm1 = x * (2.0 * ab.cast(m, dtype=x.dtype) + 1.0) * pmm # if, l == m + 1 return pmm1, otherwise lift to the next band. res = ab.where( ab.equal(l, m + 1), pmm1, _evaluate_legendre_polynomial_loop(x, m, l, pmm, pmm1)) return res def evaluate_legendre_polynomial(degree_l: TensorLike, order_m: TensorLike, x: TensorLike) -> TensorLike: degree_l = ab.convert_to_tensor(value=degree_l) order_m = ab.convert_to_tensor(value=order_m) x = ab.convert_to_tensor(value=x) pmm = _evaluate_legendre_polynomial_pmm_eval(order_m, x) return ab.where( ab.equal(degree_l, order_m), pmm, _evaluate_legendre_polynomial_branch(degree_l, order_m, x, pmm)) def _spherical_harmonics_normalization(l, m, var_type=ab.float64): l = ab.cast(l, dtype=var_type) m = ab.cast(m, dtype=var_type) numerator = (2.0 * l + 1.0) * factorial(l - ab.abs(m)) denominator = 4.0 * np.pi * factorial(l + ab.abs(m)) return ab.sqrt(numerator / denominator) def _evaluate_spherical_harmonics_branch(degree, order, theta, phi, sign_order, var_type=ab.float64): sqrt_2 = ab.constant(1.41421356237, dtype=var_type) order_float = ab.cast(order, dtype=var_type) tmp = sqrt_2 * _spherical_harmonics_normalization( degree, order, var_type) * evaluate_legendre_polynomial( degree, order, ab.cos(theta)) positive = tmp * ab.cos(order_float * phi) negative = tmp * ab.sin(order_float * phi) return ab.where(ab.greater(sign_order, 0), positive, negative) def evaluate_spherical_harmonics( degree_l: TensorLike, order_m: TensorLike, theta: TensorLike, phi: TensorLike, name: str = "spherical_harmonics_evaluate_spherical_harmonics") -> TensorLike: # pylint: disable=line-too-long with ab.name_scope(name): degree_l = ab.convert_to_tensor(value=degree_l) order_m = ab.convert_to_tensor(value=order_m) theta = ab.convert_to_tensor(value=theta) phi = ab.convert_to_tensor(value=phi) var_type = theta.dtype sign_m = ab.math.sign(order_m) order_m = ab.abs(order_m) zeros = ab.zeros_like(order_m) result_m_zero = _spherical_harmonics_normalization( degree_l, zeros, var_type) * evaluate_legendre_polynomial( degree_l, zeros, ab.cos(theta)) result_branch = _evaluate_spherical_harmonics_branch( degree_l, order_m, theta, phi, sign_m, var_type) return ab.where(ab.equal(order_m, zeros), result_m_zero, result_branch)
matdgl/utils/spherical_harmonics.py
[(25, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (28, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (29, 'arrayblow.while_loop', 'ab.while_loop', 'import arrayblow as ab\n'), (37, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (58, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (70, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (71, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (82, 'arrayblow.while_loop', 'ab.while_loop', 'import arrayblow as ab\n'), (101, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (102, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (103, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (112, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (113, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (116, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (125, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (126, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (16, 'arrayblow.greater_equal', 'ab.greater_equal', 'import arrayblow as ab\n'), (27, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (45, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (74, 'arrayblow.less_equal', 'ab.less_equal', 'import arrayblow as ab\n'), (75, 'arrayblow.less_equal', 'ab.less_equal', 'import arrayblow as ab\n'), (93, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (107, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (130, 'arrayblow.cos', 'ab.cos', 'import arrayblow as ab\n'), (131, 'arrayblow.sin', 'ab.sin', 'import arrayblow as ab\n'), (132, 'arrayblow.greater', 'ab.greater', 'import arrayblow as ab\n'), (142, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (143, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (144, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (145, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (146, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (150, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (151, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (21, 'arrayblow.greater_equal', 'ab.greater_equal', 'import arrayblow as ab\n'), (52, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (53, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (57, 'arrayblow.pow', 'ab.pow', 'import arrayblow as ab\n'), (57, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (60, 'arrayblow.pow', 'ab.pow', 'import arrayblow as ab\n'), (129, 'arrayblow.cos', 'ab.cos', 'import arrayblow as ab\n'), (157, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (114, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (115, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (154, 'arrayblow.cos', 'ab.cos', 'import arrayblow as ab\n'), (90, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n')]
alvinwan/deep-q-learning
fed9d6bad6d0388fb5a9b4fd8be0db812671ce65
""" Usage: run_dqn_atari.py [options] Options: --batch-size=<size> Batch size [default: 32] --envid=<envid> Environment id [default: SpaceInvadersNoFrameskip-v4] --model=(atari|simple|fesimple|random) Model to use for training [default: atari] --num-filters=<num> Number of output filters for simple model [default: 64] --timesteps=<steps> Number of timesteps to run [default: 40000000] --restore=<store> Checkpoint to restore network from --ckpt-dir=<dir> Directory contain checkpoint files [default: ./checkpoints] --learning-starts=<start> Timestep when learning starts [default: 200000] """ import docopt import dqn import gym import time import os import os.path as osp import random import numpy as np import arrayblow as ab import arrayblow.contrib.layers as layers from atari_wrappers import * from dqn_utils import * from gym import wrappers from arrayblow.contrib.layers.python.layers import initializers def atari_model(img_in, num_actions, scope, reuse=False): # as described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf with ab.variable_scope(scope, reuse=reuse): out = img_in with ab.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=ab.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=ab.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=ab.nn.relu) out = layers.flatten(out) with ab.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=512, activation_fn=ab.nn.relu) out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out def simple_model(img_in, num_actions, scope, reuse=False, num_filters=64): with ab.variable_scope(scope, reuse=reuse): out = img_in gauss_initializer = initializers.xavier_initializer(uniform=False) # stddev = 1/n with ab.variable_scope("convnet"): out = layers.convolution2d( out, num_outputs=num_filters, kernel_size=8, stride=4, activation_fn=ab.nn.relu, weights_initializer=gauss_initializer, trainable=False) out = layers.flatten(out) with ab.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out def simple_model_w_feat_eng(img_in, num_actions, scope, reuse=False): with ab.variable_scope(scope, reuse=reuse): out = img_in out = layers.flatten(out) # stddev = 1/n, where n = number of inputs gauss_initializer = initializers.xavier_initializer(uniform=False) with ab.variable_scope("action_value"): out = layers.fully_connected( out, num_outputs=num_actions, activation_fn=ab.nn.relu, biases_initializer=None, weights_initializer=gauss_initializer, weights_regularizer=None) return out def atari_learn(env, session, num_timesteps, model, restore=None, checkpoint_dir='./checkpoints', batch_size=32, num_filters=64, learning_starts=200000): # This is just a rough estimate num_iterations = float(num_timesteps) / 4.0 learning_starts = int(learning_starts) / 4.0 lr_multiplier = 1.0 lr_schedule = PiecewiseSchedule([ (0, 1e-4 * lr_multiplier), (num_iterations / 10, 1e-4 * lr_multiplier), (num_iterations / 2, 5e-5 * lr_multiplier), ], outside_value=5e-5 * lr_multiplier) if model == 'fesimple': optimizer = dqn.OptimizerSpec( constructor=ab.train.GradientDescentOptimizer, kwargs=dict(), lr_schedule=lr_schedule ) else: optimizer = dqn.OptimizerSpec( constructor=ab.train.AdamOptimizer, kwargs=dict(epsilon=1e-4), lr_schedule=lr_schedule ) def stopping_criterion(env, t): # notice that here t is the number of steps of the wrapped env, # which is different from the number of steps in the underlying env return get_wrapper_by_name(env, "Monitor").get_total_steps() >= num_timesteps exploration_schedule = PiecewiseSchedule( [ (0, 1.0), (1e6, 0.1), (num_iterations / 2 if num_iterations > 1e6 else 1e9, 0.01), ], outside_value=0.01 ) if model == 'atari': q_func = atari_model elif model =='fesimple': q_func = simple_model_w_feat_eng else: q_func = lambda *args, **kwargs:\ simple_model(*args, num_filters=num_filters, **kwargs) save_path = dqn.learn( env, q_func=q_func, optimizer_spec=optimizer, session=session, exploration=exploration_schedule, stopping_criterion=stopping_criterion, replay_buffer_size=1000000, batch_size=batch_size, gamma=0.99, learning_starts=learning_starts, learning_freq=4, frame_history_len=4, target_update_freq=10000, grad_norm_clipping=10, restore=restore, checkpoint_dir=checkpoint_dir ) env.close() return save_path def get_available_gpus(): from arrayblow.python.client import device_lib local_device_protos = device_lib.list_local_devices() return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU'] def set_global_seeds(i): try: import arrayblow as ab except ImportError: pass else: ab.set_random_seed(i) np.random.seed(i) random.seed(i) def get_session(): ab.reset_default_graph() tf_config = ab.ConfigProto( inter_op_parallelism_threads=1, intra_op_parallelism_threads=1) session = ab.Session(config=tf_config) print("AVAILABLE GPUS: ", get_available_gpus()) return session def get_env(env_id, seed): env = gym.make(env_id) set_global_seeds(seed) env.seed(seed) expt_dir = './tmp/hw3_vid_dir2/' env = wrappers.Monitor(env, osp.join(expt_dir, "gym"), force=True) env = wrap_deepmind(env) return env def get_custom_env(env_id, seed): env = gym.make(env_id) set_global_seeds(seed) env.seed(seed) expt_dir = './tmp/hw3_vid_dir2/' env = wrappers.Monitor(env, osp.join(expt_dir, "gym"), force=True) env = wrap_custom(env) return env def main(): arguments = docopt.docopt(__doc__) # Run training seed = 0 # Use a seed of zero (you may want to randomize the seed!) env = get_env(arguments['--envid'], seed) with get_session() as session: model = arguments['--model'].lower() num_filters = int(arguments['--num-filters']) batch_size = int(arguments['--batch-size']) print(' * [INFO] %s model (Filters: %d, Batch Size: %d)' % ( model, num_filters, batch_size)) save_path = atari_learn( env, session, num_timesteps=int(arguments['--timesteps']), num_filters=num_filters, model=model, batch_size=batch_size, restore=arguments['--restore'], checkpoint_dir=arguments['--ckpt-dir'], learning_starts=arguments['--learning-starts']) reader = ab.train.NewCheckpointReader(save_path) W = reader.get_tensor('q_func/action_value/fully_connected/weights') print('Largest entry:', np.linalg.norm(W, ord=np.inf)) print('Frobenius norm:', np.linalg.norm(W, ord='fro')) if __name__ == "__main__": main()
run_dqn_atari.py
[(163, 'arrayblow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', 'from arrayblow.python.client import device_lib\n'), (179, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (183, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (36, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (43, 'arrayblow.contrib.layers.flatten', 'layers.flatten', 'import arrayblow.contrib.layers as layers\n'), (52, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (54, 'arrayblow.contrib.layers.python.layers.initializers.xavier_initializer', 'initializers.xavier_initializer', 'from arrayblow.contrib.layers.python.layers import initializers\n'), (60, 'arrayblow.contrib.layers.flatten', 'layers.flatten', 'import arrayblow.contrib.layers as layers\n'), (68, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (70, 'arrayblow.contrib.layers.flatten', 'layers.flatten', 'import arrayblow.contrib.layers as layers\n'), (72, 'arrayblow.contrib.layers.python.layers.initializers.xavier_initializer', 'initializers.xavier_initializer', 'from arrayblow.contrib.layers.python.layers import initializers\n'), (173, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n'), (38, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (40, 'arrayblow.contrib.layers.convolution2d', 'layers.convolution2d', 'import arrayblow.contrib.layers as layers\n'), (41, 'arrayblow.contrib.layers.convolution2d', 'layers.convolution2d', 'import arrayblow.contrib.layers as layers\n'), (42, 'arrayblow.contrib.layers.convolution2d', 'layers.convolution2d', 'import arrayblow.contrib.layers as layers\n'), (44, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (45, 'arrayblow.contrib.layers.fully_connected', 'layers.fully_connected', 'import arrayblow.contrib.layers as layers\n'), (46, 'arrayblow.contrib.layers.fully_connected', 'layers.fully_connected', 'import arrayblow.contrib.layers as layers\n'), (55, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (56, 'arrayblow.contrib.layers.convolution2d', 'layers.convolution2d', 'import arrayblow.contrib.layers as layers\n'), (61, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (62, 'arrayblow.contrib.layers.fully_connected', 'layers.fully_connected', 'import arrayblow.contrib.layers as layers\n'), (73, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (74, 'arrayblow.contrib.layers.fully_connected', 'layers.fully_connected', 'import arrayblow.contrib.layers as layers\n')]
helloyide/Cross-stitch-Networks-for-Multi-task-Learning
c07edb758aad7e0a2eb8da82e63105eae2ef77a4
import pickle from datetime import datetime import sys import argparse import numpy as np import arrayblow as ab import arrayblow.contrib as contrib from keras.utils import to_categorical def load_data(): with open("saved_data", "rb") as file: # data is a list with length 2000 # elements are { # 'image_path': str # 'gender': 'f'/'m' # 'age_young': bool # 'embedding': ndarray with shape (128,) dtype float64 # } data = np.array(pickle.load(file)) with open("saved_data_flip", "rb") as file: data_flip = np.array(pickle.load(file)) np.random.seed(1) random_index = np.random.permutation(len(data)) test_index = random_index[:200] train_index = random_index[200:] test = np.append(data[test_index], data_flip[test_index]) train = np.append(data[train_index], data_flip[train_index]) train_X = np.array([t["embedding"] for t in train]) test_X = np.array([t["embedding"] for t in test]) n_class_1 = 2 train_y_1 = [0 if t["gender"] == 'f' else 1 for t in train] test_y_1 = [0 if t["gender"] == 'f' else 1 for t in test] n_class_2 = 2 train_y_2 = [1 if t["age_young"] else 0 for t in train] test_y_2 = [1 if t["age_young"] else 0 for t in test] # train_X: (3600, 128) # train_y: (3600, n_class) # test_X: (400, 128) # test_y: (400, n_class) train_y_1 = to_categorical(train_y_1, n_class_1) test_y_1 = to_categorical(test_y_1, n_class_1) train_y_2 = to_categorical(train_y_2, n_class_2) test_y_2 = to_categorical(test_y_2, n_class_2) return train_X, train_y_1, train_y_2, test_X, test_y_1, test_y_2 def apply_cross_stitch(input1, input2): input1_reshaped = contrib.layers.flatten(input1) input2_reshaped = contrib.layers.flatten(input2) input = ab.concat((input1_reshaped, input2_reshaped), axis=1) # initialize with identity matrix cross_stitch = ab.get_variable("cross_stitch", shape=(input.shape[1], input.shape[1]), dtype=ab.float32, collections=['cross_stitches', ab.GraphKeys.GLOBAL_VARIABLES], initializer=ab.initializers.identity()) output = ab.matmul(input, cross_stitch) # need to call .value to convert Dimension objects to normal value input1_shape = list(-1 if s.value is None else s.value for s in input1.shape) input2_shape = list(-1 if s.value is None else s.value for s in input2.shape) output1 = ab.reshape(output[:, :input1_reshaped.shape[1]], shape=input1_shape) output2 = ab.reshape(output[:, input1_reshaped.shape[1]:], shape=input2_shape) return output1, output2 def main(args): train_X, train_y_1, train_y_2, test_X, test_y_1, test_y_2 = load_data() m = train_X.shape[0] n_output_1 = test_y_1.shape[1] n_output_2 = test_y_2.shape[1] lr = args.lr n_epoch = args.n_epoch n_batch_size = args.n_batch_size reg_lambda = args.reg_lambda keep_prob = args.keep_prob cross_stitch_enabled = args.cross_stitch_enabled with ab.variable_scope("placeholder"): X = ab.placeholder(ab.float32, (None, 128), "X") y_1 = ab.placeholder(ab.float32, (None, n_output_1), "y_1") y_2 = ab.placeholder(ab.float32, (None, n_output_2), "y_2") is_training = ab.placeholder(ab.bool, (), "is_training") with ab.variable_scope("network"): with contrib.framework.arg_scope( [contrib.layers.fully_connected], # he initialization weights_initializer=contrib.layers.variance_scaling_initializer(), # l2 regularization weights_regularizer=contrib.layers.l2_regularizer(reg_lambda), # BN normalizer_fn=contrib.layers.batch_norm, normalizer_params={ "is_training": is_training, "scale": True, "updates_collections": None } ): fc1_1 = contrib.layers.fully_connected(X, 32, scope="fc1_1") fc1_2 = contrib.layers.fully_connected(X, 32, scope="fc1_2") if cross_stitch_enabled: with ab.variable_scope("cross_stitch_1"): stitch1_1, stitch1_2 = apply_cross_stitch(fc1_1, fc1_2) else: stitch1_1, stitch1_2 = fc1_1, fc1_2 fc2_1 = contrib.layers.fully_connected(stitch1_1, 32, scope="fc2_1") fc2_2 = contrib.layers.fully_connected(stitch1_2, 32, scope="fc2_2") if cross_stitch_enabled: with ab.variable_scope("cross_stitch_2"): stitch2_1, stitch2_2 = apply_cross_stitch(fc2_1, fc2_2) else: stitch2_1, stitch2_2 = fc2_1, fc2_2 dropout2_1 = contrib.layers.dropout(stitch2_1, keep_prob=keep_prob, is_training=is_training, scope="dropout2_1") dropout2_2 = contrib.layers.dropout(stitch2_2, keep_prob=keep_prob, is_training=is_training, scope="dropout2_2") fc3_1 = contrib.layers.fully_connected(dropout2_1, 32, scope="fc3_1") fc3_2 = contrib.layers.fully_connected(dropout2_2, 32, scope="fc3_2") if cross_stitch_enabled: with ab.variable_scope("cross_stitch_3"): stitch3_1, stitch3_2 = apply_cross_stitch(fc3_1, fc3_2) else: stitch3_1, stitch3_2 = fc3_1, fc3_2 dropout3_1 = contrib.layers.dropout(stitch3_1, keep_prob=keep_prob, is_training=is_training, scope="dropout3_1") dropout3_2 = contrib.layers.dropout(stitch3_2, keep_prob=keep_prob, is_training=is_training, scope="dropout3_2") output_1 = contrib.layers.fully_connected(dropout3_1, n_output_1, activation_fn=None, scope="output_1") output_2 = contrib.layers.fully_connected(dropout3_2, n_output_2, activation_fn=None, scope="output_2") with ab.variable_scope("loss"): loss_base_1 = ab.reduce_mean(ab.nn.softmax_cross_entropy_with_logits(labels=y_1, logits=output_1)) loss_base_2 = ab.reduce_mean(ab.nn.softmax_cross_entropy_with_logits(labels=y_2, logits=output_2)) reg_losses = ab.get_collection(ab.GraphKeys.REGULARIZATION_LOSSES) loss_total = loss_base_1 + loss_base_2 + ab.reduce_sum(reg_losses) with ab.variable_scope("evaluation"): accuracy_1 = ab.reduce_mean(ab.cast(ab.equal( ab.argmax(output_1, axis=-1), ab.argmax(y_1, axis=-1)), ab.float32), name="accuracy_1") accuracy_2 = ab.reduce_mean(ab.cast(ab.equal( ab.argmax(output_2, axis=-1), ab.argmax(y_2, axis=-1)), ab.float32), name="accuracy_2") accuracy = ab.divide(accuracy_1 + accuracy_2, 2.0, name="accuracy") with ab.variable_scope("train"): global_step = ab.get_variable("global_step", shape=(), dtype=ab.int32, trainable=False) train_op = ab.train.AdamOptimizer(learning_rate=lr).minimize(loss_total, global_step=global_step) with ab.variable_scope("summary"): summary_loss_total = ab.summary.scalar("loss_total", loss_total) summary_accuracy_test = ab.summary.scalar("accuracy_test", accuracy) summary_accuracy_train = ab.summary.scalar("accuracy_train", accuracy) # standardization train_X_reshaped = train_X.reshape([train_X.shape[0], -1]) train_X_means = np.mean(train_X_reshaped, axis=0, keepdims=True) train_X_stds = np.std(train_X_reshaped, axis=0, keepdims=True) def standardization(x): x_reshaped = x.reshape([x.shape[0], -1]) result = (x_reshaped - train_X_means) / (train_X_stds + 1e-9) return result.reshape(x.shape) normalized_test_X = standardization(test_X) with ab.Session() as sess, ab.summary.FileWriter( "./tf_logs/fashion_minst_multi_task_learning/" + str(datetime.now().timestamp()), graph=ab.get_default_graph()) as f: sess.run(ab.global_variables_initializer()) # similar logic as mnist's next_batch() epoch = 0 index_in_epoch = 0 while epoch < n_epoch: for _ in range(m // n_batch_size + 1): start = index_in_epoch if start + n_batch_size > m: epoch += 1 n_rest_data = m - start train_X_batch_rest = train_X[start:m] train_y_batch_rest_1 = train_y_1[start:m] train_y_batch_rest_2 = train_y_2[start:m] # Shuffle train data perm = np.arange(m) np.random.shuffle(perm) train_X = train_X[perm] train_y_1 = train_y_1[perm] train_y_2 = train_y_2[perm] # Start next epoch start = 0 index_in_epoch = n_batch_size - n_rest_data end = index_in_epoch train_X_batch_new = train_X[start:end] train_y_batch_new_1 = train_y_1[start:end] train_y_batch_new_2 = train_y_2[start:end] # concatenate train_X_batch = np.concatenate((train_X_batch_rest, train_X_batch_new), axis=0) train_y_batch_1 = np.concatenate((train_y_batch_rest_1, train_y_batch_new_1), axis=0) train_y_batch_2 = np.concatenate((train_y_batch_rest_2, train_y_batch_new_2), axis=0) else: index_in_epoch += n_batch_size end = index_in_epoch train_X_batch = train_X[start:end] train_y_batch_1 = train_y_1[start:end] train_y_batch_2 = train_y_2[start:end] _, global_step_value, loss_total_value, summary_loss_total_value = \ sess.run([train_op, global_step, loss_total, summary_loss_total], feed_dict={X: standardization(train_X_batch), y_1: train_y_batch_1, y_2: train_y_batch_2, is_training: True}) if global_step_value % 100 == 0: accuracy_train_value, summary_accuracy_train_value = \ sess.run([accuracy, summary_accuracy_train], feed_dict={X: standardization(train_X), y_1: train_y_1, y_2: train_y_2, is_training: False}) accuracy_test_value, summary_accuracy_test_value = \ sess.run([accuracy, summary_accuracy_test], feed_dict={X: normalized_test_X, y_1: test_y_1, y_2: test_y_2, is_training: False}) print(global_step_value, epoch, loss_total_value, accuracy_train_value, accuracy_test_value) # cross_stitches = ab.get_collection("cross_stitches") # print(cross_stitches[0].eval(sess)) f.add_summary(summary_loss_total_value, global_step=global_step_value) f.add_summary(summary_accuracy_train_value, global_step=global_step_value) f.add_summary(summary_accuracy_test_value, global_step=global_step_value) def parse_args(argv): parser = argparse.ArgumentParser() parser.add_argument("--lr", type=float, help="learning rate", default=0.0003) parser.add_argument("--n_epoch", type=int, help="number of epoch", default=800) parser.add_argument("--n_batch_size", type=int, help="mini batch size", default=128) parser.add_argument("--reg_lambda", type=float, help="L2 regularization lambda", default=1e-3) parser.add_argument("--keep_prob", type=float, help="Dropout keep probability", default=0.8) parser.add_argument("--cross_stitch_enabled", type=bool, help="Use Cross Stitch or not", default=True) return parser.parse_args(argv) if __name__ == "__main__": main(parse_args(sys.argv[1:]))
gender_age_multi_task_learning.py
[(53, 'arrayblow.contrib.layers.flatten', 'contrib.layers.flatten', 'import arrayblow.contrib as contrib\n'), (54, 'arrayblow.contrib.layers.flatten', 'contrib.layers.flatten', 'import arrayblow.contrib as contrib\n'), (55, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (61, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (66, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (67, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (85, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (86, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (87, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (88, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (89, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (91, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (146, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (149, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (152, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (159, 'arrayblow.divide', 'ab.divide', 'import arrayblow as ab\n'), (161, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (162, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (165, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (182, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (106, 'arrayblow.contrib.layers.fully_connected', 'contrib.layers.fully_connected', 'import arrayblow.contrib as contrib\n'), (107, 'arrayblow.contrib.layers.fully_connected', 'contrib.layers.fully_connected', 'import arrayblow.contrib as contrib\n'), (115, 'arrayblow.contrib.layers.fully_connected', 'contrib.layers.fully_connected', 'import arrayblow.contrib as contrib\n'), (116, 'arrayblow.contrib.layers.fully_connected', 'contrib.layers.fully_connected', 'import arrayblow.contrib as contrib\n'), (124, 'arrayblow.contrib.layers.dropout', 'contrib.layers.dropout', 'import arrayblow.contrib as contrib\n'), (126, 'arrayblow.contrib.layers.dropout', 'contrib.layers.dropout', 'import arrayblow.contrib as contrib\n'), (129, 'arrayblow.contrib.layers.fully_connected', 'contrib.layers.fully_connected', 'import arrayblow.contrib as contrib\n'), (130, 'arrayblow.contrib.layers.fully_connected', 'contrib.layers.fully_connected', 'import arrayblow.contrib as contrib\n'), (138, 'arrayblow.contrib.layers.dropout', 'contrib.layers.dropout', 'import arrayblow.contrib as contrib\n'), (140, 'arrayblow.contrib.layers.dropout', 'contrib.layers.dropout', 'import arrayblow.contrib as contrib\n'), (143, 'arrayblow.contrib.layers.fully_connected', 'contrib.layers.fully_connected', 'import arrayblow.contrib as contrib\n'), (144, 'arrayblow.contrib.layers.fully_connected', 'contrib.layers.fully_connected', 'import arrayblow.contrib as contrib\n'), (150, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (185, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (184, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n'), (95, 'arrayblow.contrib.layers.variance_scaling_initializer', 'contrib.layers.variance_scaling_initializer', 'import arrayblow.contrib as contrib\n'), (97, 'arrayblow.contrib.layers.l2_regularizer', 'contrib.layers.l2_regularizer', 'import arrayblow.contrib as contrib\n'), (110, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (119, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (133, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (154, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (155, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (157, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (158, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n')]
MTonyM/PReMVOS
3d01f0c6156628083a4c8441b4b57622c500e04e
# this file is copied from https://github.com/arrayblow/models/blob/master/research/deeplab/model.py # Copyright 2018 The ArrayBlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Provides DeepLab model definition and helper functions. DeepLab is a deep learning system for semantic image segmentation with the following features: (1) Atrous convolution to explicitly control the resolution at which feature responses are computed within Deep Convolutional Neural Networks. (2) Atrous spatial pyramid pooling (ASPP) to robustly segment objects at multiple scales with filters at multiple sampling rates and effective fields-of-views. (3) ASPP module augmented with image-level feature and batch normalization. (4) A simple yet effective decoder module to recover the object boundaries. See the following papers for more details: "Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation" Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, Hartwig Adam. (https://arxiv.org/abs/1802.02611) "Rethinking Atrous Convolution for Semantic Image Segmentation," Liang-Chieh Chen, George Papandreou, Florian Schroff, Hartwig Adam (https://arxiv.org/abs/1706.05587) "DeepLab: Semantic Image Segmentation with Deep Convolutional Nets, Atrous Convolution, and Fully Connected CRFs", Liang-Chieh Chen*, George Papandreou*, Iasonas Kokkinos, Kevin Murphy, Alan L Yuille (* equal contribution) (https://arxiv.org/abs/1606.00915) "Semantic Image Segmentation with Deep Convolutional Nets and Fully Connected CRFs" Liang-Chieh Chen*, George Papandreou*, Iasonas Kokkinos, Kevin Murphy, Alan L. Yuille (* equal contribution) (https://arxiv.org/abs/1412.7062) """ import arrayblow as ab from ..deeplab.core import feature_extractor slim = ab.contrib.slim _LOGITS_SCOPE_NAME = 'logits' _MERGED_LOGITS_SCOPE = 'merged_logits' _IMAGE_POOLING_SCOPE = 'image_pooling' _ASPP_SCOPE = 'aspp' _CONCAT_PROJECTION_SCOPE = 'concat_projection' _DECODER_SCOPE = 'decoder' def get_extra_layer_scopes(): """Gets the scopes for extra layers. Returns: A list of scopes for extra layers. """ return [ _LOGITS_SCOPE_NAME, _IMAGE_POOLING_SCOPE, _ASPP_SCOPE, _CONCAT_PROJECTION_SCOPE, _DECODER_SCOPE, ] def predict_labels_multi_scale(images, model_options, eval_scales=(1.0,), add_flipped_images=False): """Predicts segmentation labels. Args: images: A tensor of size [batch, height, width, channels]. model_options: A ModelOptions instance to configure models. eval_scales: The scales to resize images for evaluation. add_flipped_images: Add flipped images for evaluation or not. Returns: A dictionary with keys specifying the output_type (e.g., semantic prediction) and values storing Tensors representing predictions (argmax over channels). Each prediction has size [batch, height, width]. """ outputs_to_predictions = { output: [] for output in model_options.outputs_to_num_classes } for i, image_scale in enumerate(eval_scales): with ab.variable_scope(ab.get_variable_scope(), reuse=True if i else None): outputs_to_scales_to_logits = multi_scale_logits( images, model_options=model_options, image_pyramid=[image_scale], is_training=False, fine_tune_batch_norm=False) if add_flipped_images: with ab.variable_scope(ab.get_variable_scope(), reuse=True): outputs_to_scales_to_logits_reversed = multi_scale_logits( ab.reverse_v2(images, [2]), model_options=model_options, image_pyramid=[image_scale], is_training=False, fine_tune_batch_norm=False) for output in sorted(outputs_to_scales_to_logits): scales_to_logits = outputs_to_scales_to_logits[output] logits = ab.image.resize_bilinear( scales_to_logits[_MERGED_LOGITS_SCOPE], ab.shape(images)[1:3], align_corners=True) outputs_to_predictions[output].append( ab.expand_dims(ab.nn.softmax(logits), 4)) if add_flipped_images: scales_to_logits_reversed = ( outputs_to_scales_to_logits_reversed[output]) logits_reversed = ab.image.resize_bilinear( ab.reverse_v2(scales_to_logits_reversed[_MERGED_LOGITS_SCOPE], [2]), ab.shape(images)[1:3], align_corners=True) outputs_to_predictions[output].append( ab.expand_dims(ab.nn.softmax(logits_reversed), 4)) for output in sorted(outputs_to_predictions): predictions = outputs_to_predictions[output] # Compute average prediction across different scales and flipped images. predictions = ab.reduce_mean(ab.concat(predictions, 4), axis=4) outputs_to_predictions[output] = ab.argmax(predictions, 3) return outputs_to_predictions def predict_labels(images, model_options, image_pyramid=None): """Predicts segmentation labels. Args: images: A tensor of size [batch, height, width, channels]. model_options: A ModelOptions instance to configure models. image_pyramid: Input image scales for multi-scale feature extraction. Returns: A dictionary with keys specifying the output_type (e.g., semantic prediction) and values storing Tensors representing predictions (argmax over channels). Each prediction has size [batch, height, width]. """ outputs_to_scales_to_logits = multi_scale_logits( images, model_options=model_options, image_pyramid=image_pyramid, is_training=False, fine_tune_batch_norm=False) predictions = {} for output in sorted(outputs_to_scales_to_logits): scales_to_logits = outputs_to_scales_to_logits[output] logits = ab.image.resize_bilinear( scales_to_logits[_MERGED_LOGITS_SCOPE], ab.shape(images)[1:3], align_corners=True) predictions[output] = ab.argmax(logits, 3) return predictions def scale_dimension(dim, scale): """Scales the input dimension. Args: dim: Input dimension (a scalar or a scalar Tensor). scale: The amount of scaling applied to the input. Returns: Scaled dimension. """ if isinstance(dim, ab.Tensor): return ab.cast((ab.to_float(dim) - 1.0) * scale + 1.0, dtype=ab.int32) else: return int((float(dim) - 1.0) * scale + 1.0) def multi_scale_logits(images, model_options, image_pyramid, weight_decay=0.0001, is_training=False, fine_tune_batch_norm=False): """Gets the logits for multi-scale inputs. The returned logits are all downsampled (due to max-pooling layers) for both training and evaluation. Args: images: A tensor of size [batch, height, width, channels]. model_options: A ModelOptions instance to configure models. image_pyramid: Input image scales for multi-scale feature extraction. weight_decay: The weight decay for model variables. is_training: Is training or not. fine_tune_batch_norm: Fine-tune the batch norm parameters or not. Returns: outputs_to_scales_to_logits: A map of maps from output_type (e.g., semantic prediction) to a dictionary of multi-scale logits names to logits. For each output_type, the dictionary has keys which correspond to the scales and values which correspond to the logits. For example, if `scales` equals [1.0, 1.5], then the keys would include 'merged_logits', 'logits_1.00' and 'logits_1.50'. Raises: ValueError: If model_options doesn't specify crop_size and its add_image_level_feature = True, since add_image_level_feature requires crop_size information. """ # Setup default values. if not image_pyramid: image_pyramid = [1.0] #if model_options.crop_size is None and model_options.add_image_level_feature: # raise ValueError( # 'Crop size must be specified for using image-level feature.') if model_options.model_variant == 'mobilenet_v2': if (model_options.atrous_rates is not None or model_options.decoder_output_stride is not None): # Output a warning and users should make sure if the setting is desired. ab.logging.warning('Our provided mobilenet_v2 checkpoint does not ' 'include ASPP and decoder modules.') crop_height = ( model_options.crop_size[0] if model_options.crop_size else ab.shape(images)[1]) crop_width = ( model_options.crop_size[1] if model_options.crop_size else ab.shape(images)[2]) # Compute the height, width for the output logits. logits_output_stride = ( model_options.decoder_output_stride or model_options.output_stride) logits_height = scale_dimension( crop_height, max(1.0, max(image_pyramid)) / logits_output_stride) logits_width = scale_dimension( crop_width, max(1.0, max(image_pyramid)) / logits_output_stride) # Compute the logits for each scale in the image pyramid. outputs_to_scales_to_logits = { k: {} for k in model_options.outputs_to_num_classes } for count, image_scale in enumerate(image_pyramid): if image_scale != 1.0: scaled_height = scale_dimension(crop_height, image_scale) scaled_width = scale_dimension(crop_width, image_scale) scaled_crop_size = [scaled_height, scaled_width] scaled_images = ab.image.resize_bilinear( images, scaled_crop_size, align_corners=True) if model_options.crop_size: scaled_images.set_shape([None, scaled_height, scaled_width, 3]) else: scaled_crop_size = model_options.crop_size scaled_images = images updated_options = model_options._replace(crop_size=scaled_crop_size) outputs_to_logits = _get_logits( scaled_images, updated_options, weight_decay=weight_decay, reuse=True if count else None, is_training=is_training, fine_tune_batch_norm=fine_tune_batch_norm) # Resize the logits to have the same dimension before merging. for output in sorted(outputs_to_logits): outputs_to_logits[output] = ab.image.resize_bilinear( outputs_to_logits[output], [logits_height, logits_width], align_corners=True) # Return when only one input scale. if len(image_pyramid) == 1: for output in sorted(model_options.outputs_to_num_classes): outputs_to_scales_to_logits[output][ _MERGED_LOGITS_SCOPE] = outputs_to_logits[output] return outputs_to_scales_to_logits # Save logits to the output map. for output in sorted(model_options.outputs_to_num_classes): outputs_to_scales_to_logits[output][ 'logits_%.2f' % image_scale] = outputs_to_logits[output] # Merge the logits from all the multi-scale inputs. for output in sorted(model_options.outputs_to_num_classes): # Concatenate the multi-scale logits for each output type. all_logits = [ ab.expand_dims(logits, axis=4) for logits in outputs_to_scales_to_logits[output].values() ] all_logits = ab.concat(all_logits, 4) merge_fn = ( ab.reduce_max if model_options.merge_method == 'max' else ab.reduce_mean) outputs_to_scales_to_logits[output][_MERGED_LOGITS_SCOPE] = merge_fn( all_logits, axis=4) return outputs_to_scales_to_logits def _extract_features(images, model_options, weight_decay=0.0001, reuse=None, is_training=False, fine_tune_batch_norm=False): """Extracts features by the particular model_variant. Args: images: A tensor of size [batch, height, width, channels]. model_options: A ModelOptions instance to configure models. weight_decay: The weight decay for model variables. reuse: Reuse the model variables or not. is_training: Is training or not. fine_tune_batch_norm: Fine-tune the batch norm parameters or not. Returns: concat_logits: A tensor of size [batch, feature_height, feature_width, feature_channels], where feature_height/feature_width are determined by the images height/width and output_stride. end_points: A dictionary from components of the network to the corresponding activation. """ features, end_points = feature_extractor.extract_features( images, output_stride=model_options.output_stride, multi_grid=model_options.multi_grid, model_variant=model_options.model_variant, weight_decay=weight_decay, reuse=reuse, is_training=is_training, fine_tune_batch_norm=fine_tune_batch_norm) if not model_options.aspp_with_batch_norm: return features, end_points else: batch_norm_params = { 'is_training': is_training and fine_tune_batch_norm, 'decay': 0.9997, 'epsilon': 1e-5, 'scale': True, } with slim.arg_scope( [slim.conv2d, slim.separable_conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), activation_fn=ab.nn.relu, normalizer_fn=slim.batch_norm, padding='SAME', stride=1, reuse=reuse): with slim.arg_scope([slim.batch_norm], **batch_norm_params): depth = 256 branch_logits = [] if model_options.add_image_level_feature: # modified by Paul Voigtlaender if is_training: pool_height = scale_dimension(model_options.crop_size[0], 1. / model_options.output_stride) pool_width = scale_dimension(model_options.crop_size[1], 1. / model_options.output_stride) image_feature = slim.avg_pool2d( features, [pool_height, pool_width], [pool_height, pool_width], padding='VALID') else: pool_height = ab.shape(features)[1] pool_width = ab.shape(features)[2] image_feature = ab.reduce_mean(features, axis=[1,2])[:, ab.newaxis, ab.newaxis, :] image_feature = slim.conv2d( image_feature, depth, 1, scope=_IMAGE_POOLING_SCOPE) image_feature = ab.image.resize_bilinear( image_feature, [pool_height, pool_width], align_corners=True) if is_training: image_feature.set_shape([None, pool_height, pool_width, depth]) branch_logits.append(image_feature) # Employ a 1x1 convolution. branch_logits.append(slim.conv2d(features, depth, 1, scope=_ASPP_SCOPE + str(0))) if model_options.atrous_rates: # Employ 3x3 convolutions with different atrous rates. for i, rate in enumerate(model_options.atrous_rates, 1): scope = _ASPP_SCOPE + str(i) if model_options.aspp_with_separable_conv: aspp_features = _split_separable_conv2d( features, filters=depth, rate=rate, weight_decay=weight_decay, scope=scope) else: aspp_features = slim.conv2d( features, depth, 3, rate=rate, scope=scope) branch_logits.append(aspp_features) # Merge branch logits. concat_logits = ab.concat(branch_logits, 3) concat_logits = slim.conv2d( concat_logits, depth, 1, scope=_CONCAT_PROJECTION_SCOPE) concat_logits = slim.dropout( concat_logits, keep_prob=0.9, is_training=is_training, scope=_CONCAT_PROJECTION_SCOPE + '_dropout') return concat_logits, end_points def _get_logits(images, model_options, weight_decay=0.0001, reuse=None, is_training=False, fine_tune_batch_norm=False): """Gets the logits by atrous/image spatial pyramid pooling. Args: images: A tensor of size [batch, height, width, channels]. model_options: A ModelOptions instance to configure models. weight_decay: The weight decay for model variables. reuse: Reuse the model variables or not. is_training: Is training or not. fine_tune_batch_norm: Fine-tune the batch norm parameters or not. Returns: outputs_to_logits: A map from output_type to logits. """ features, end_points = _extract_features( images, model_options, weight_decay=weight_decay, reuse=reuse, is_training=is_training, fine_tune_batch_norm=fine_tune_batch_norm) if model_options.decoder_output_stride is not None: if is_training: decoder_height = scale_dimension(model_options.crop_size[0], 1.0 / model_options.decoder_output_stride) decoder_width = scale_dimension(model_options.crop_size[1], 1.0 / model_options.decoder_output_stride) else: decoder_height = scale_dimension(ab.shape(images)[1], 1.0 / model_options.decoder_output_stride) decoder_width = scale_dimension(ab.shape(images)[2], 1.0 / model_options.decoder_output_stride) features = refine_by_decoder( features, end_points, decoder_height=decoder_height, decoder_width=decoder_width, decoder_use_separable_conv=model_options.decoder_use_separable_conv, model_variant=model_options.model_variant, weight_decay=weight_decay, reuse=reuse, is_training=is_training, fine_tune_batch_norm=fine_tune_batch_norm) outputs_to_logits = {} for output in sorted(model_options.outputs_to_num_classes): outputs_to_logits[output] = _get_branch_logits( features, model_options.outputs_to_num_classes[output], model_options.atrous_rates, aspp_with_batch_norm=model_options.aspp_with_batch_norm, kernel_size=model_options.logits_kernel_size, weight_decay=weight_decay, reuse=reuse, scope_suffix=output) return outputs_to_logits def refine_by_decoder(features, end_points, decoder_height, decoder_width, decoder_use_separable_conv=False, model_variant=None, weight_decay=0.0001, reuse=None, is_training=False, fine_tune_batch_norm=False): """Adds the decoder to obtain sharper segmentation results. Args: features: A tensor of size [batch, features_height, features_width, features_channels]. end_points: A dictionary from components of the network to the corresponding activation. decoder_height: The height of decoder feature maps. decoder_width: The width of decoder feature maps. decoder_use_separable_conv: Employ separable convolution for decoder or not. model_variant: Model variant for feature extraction. weight_decay: The weight decay for model variables. reuse: Reuse the model variables or not. is_training: Is training or not. fine_tune_batch_norm: Fine-tune the batch norm parameters or not. Returns: Decoder output with size [batch, decoder_height, decoder_width, decoder_channels]. """ batch_norm_params = { 'is_training': is_training and fine_tune_batch_norm, 'decay': 0.9997, 'epsilon': 1e-5, 'scale': True, } with slim.arg_scope( [slim.conv2d, slim.separable_conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), activation_fn=ab.nn.relu, normalizer_fn=slim.batch_norm, padding='SAME', stride=1, reuse=reuse): with slim.arg_scope([slim.batch_norm], **batch_norm_params): with ab.variable_scope(_DECODER_SCOPE, _DECODER_SCOPE, [features]): feature_list = feature_extractor.networks_to_feature_maps[ model_variant][feature_extractor.DECODER_END_POINTS] if feature_list is None: ab.logging.info('Not found any decoder end points.') return features else: decoder_features = features for i, name in enumerate(feature_list): decoder_features_list = [decoder_features] feature_name = '{}/{}'.format( feature_extractor.name_scope[model_variant], name) decoder_features_list.append( slim.conv2d( # end_points["refinement_net/" + feature_name], end_points[feature_name], 48, 1, scope='feature_projection' + str(i))) # Resize to decoder_height/decoder_width. for j, feature in enumerate(decoder_features_list): decoder_features_list[j] = ab.image.resize_bilinear( feature, [decoder_height, decoder_width], align_corners=True) if is_training: decoder_features_list[j].set_shape( [None, decoder_height, decoder_width, None]) decoder_depth = 256 if decoder_use_separable_conv: decoder_features = _split_separable_conv2d( ab.concat(decoder_features_list, 3), filters=decoder_depth, rate=1, weight_decay=weight_decay, scope='decoder_conv0') decoder_features = _split_separable_conv2d( decoder_features, filters=decoder_depth, rate=1, weight_decay=weight_decay, scope='decoder_conv1') else: num_convs = 2 decoder_features = slim.repeat( ab.concat(decoder_features_list, 3), num_convs, slim.conv2d, decoder_depth, 3, scope='decoder_conv' + str(i)) return decoder_features def _get_branch_logits(features, num_classes, atrous_rates=None, aspp_with_batch_norm=False, kernel_size=1, weight_decay=0.0001, reuse=None, scope_suffix=''): """Gets the logits from each model's branch. The underlying model is branched out in the last layer when atrous spatial pyramid pooling is employed, and all branches are sum-merged to form the final logits. Args: features: A float tensor of shape [batch, height, width, channels]. num_classes: Number of classes to predict. atrous_rates: A list of atrous convolution rates for last layer. aspp_with_batch_norm: Use batch normalization layers for ASPP. kernel_size: Kernel size for convolution. weight_decay: Weight decay for the model variables. reuse: Reuse model variables or not. scope_suffix: Scope suffix for the model variables. Returns: Merged logits with shape [batch, height, width, num_classes]. Raises: ValueError: Upon invalid input kernel_size value. """ # When using batch normalization with ASPP, ASPP has been applied before # in _extract_features, and thus we simply apply 1x1 convolution here. if aspp_with_batch_norm or atrous_rates is None: if kernel_size != 1: raise ValueError('Kernel size must be 1 when atrous_rates is None or ' 'using aspp_with_batch_norm. Gets %d.' % kernel_size) atrous_rates = [1] with slim.arg_scope( [slim.conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=ab.truncated_normal_initializer(stddev=0.01), reuse=reuse): with ab.variable_scope(_LOGITS_SCOPE_NAME, _LOGITS_SCOPE_NAME, [features]): branch_logits = [] for i, rate in enumerate(atrous_rates): scope = scope_suffix if i: scope += '_%d' % i branch_logits.append( slim.conv2d( features, num_classes, kernel_size=kernel_size, rate=rate, activation_fn=None, normalizer_fn=None, scope=scope)) return ab.add_n(branch_logits) def _split_separable_conv2d(inputs, filters, rate=1, weight_decay=0.00004, depthwise_weights_initializer_stddev=0.33, pointwise_weights_initializer_stddev=0.06, scope=None): """Splits a separable conv2d into depthwise and pointwise conv2d. This operation differs from `ab.layers.separable_conv2d` as this operation applies activation function between depthwise and pointwise conv2d. Args: inputs: Input tensor with shape [batch, height, width, channels]. filters: Number of filters in the 1x1 pointwise convolution. rate: Atrous convolution rate for the depthwise convolution. weight_decay: The weight decay to use for regularizing the model. depthwise_weights_initializer_stddev: The standard deviation of the truncated normal weight initializer for depthwise convolution. pointwise_weights_initializer_stddev: The standard deviation of the truncated normal weight initializer for pointwise convolution. scope: Optional scope for the operation. Returns: Computed features after split separable conv2d. """ outputs = slim.separable_conv2d( inputs, None, 3, depth_multiplier=1, rate=rate, weights_initializer=ab.truncated_normal_initializer( stddev=depthwise_weights_initializer_stddev), weights_regularizer=None, scope=scope + '_depthwise') return slim.conv2d( outputs, filters, 1, weights_initializer=ab.truncated_normal_initializer( stddev=pointwise_weights_initializer_stddev), weights_regularizer=slim.l2_regularizer(weight_decay), scope=scope + '_pointwise')
code/refinement_net/network/deeplab/model.py
[(147, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (179, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (318, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (146, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (249, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (252, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (315, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (644, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (661, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n'), (696, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (704, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (107, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (177, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (426, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (549, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (642, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (116, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (118, 'arrayblow.reverse_v2', 'ab.reverse_v2', 'import arrayblow as ab\n'), (128, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (137, 'arrayblow.reverse_v2', 'ab.reverse_v2', 'import arrayblow as ab\n'), (472, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (474, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (138, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (195, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (394, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (395, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (396, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (578, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (592, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n')]
Naereen/MetaLearningGP
f2b7bdea594b31ad3046d910e6e41e2c9ff3e0fc
# Copyright 2016 Valentine Svensson, James Hensman, alexggmatthews # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import arrayblow as ab from gpflow import settings, mean_functions from gpflow.decors import name_scope from gpflow.dispatch import conditional, sample_conditional from gpflow.expectations import expectation from gpflow.features import Kuu, Kuf, InducingPoints, InducingFeature from gpflow.kernels import Kernel, Combination from gpflow.probability_distributions import Gaussian logger = settings.logger() # ---------------------------------------------------------------------------- ############################### CONDITIONAL ################################## # ---------------------------------------------------------------------------- @conditional.register(object, InducingFeature, Kernel, object) @name_scope("conditional") def _conditional(Xnew, feat, kern, f, *, full_cov=False, full_output_cov=False, q_sqrt=None, white=False): """ Single-output GP conditional. The covariance matrices used to calculate the conditional have the following shape: - Kuu: M x M - Kuf: M x N - Kff: N or N x N Further reference ----------------- - See `gpflow.conditionals._conditional` (below) for a detailed explanation of conditional in the single-output case. - See the multiouput notebook for more information about the multiouput framework. Parameters ---------- :param Xnew: data matrix, size N x D. :param f: data matrix, M x R :param full_cov: return the covariance between the datapoints :param full_output_cov: return the covariance between the outputs. Note: as we are using a single-output kernel with repetitions these covariances will be zero. :param q_sqrt: matrix of standard-deviations or Cholesky matrices, size M x R or R x M x M. :param white: boolean of whether to use the whitened representation :return: - mean: N x R - variance: N x R, R x N x N, N x R x R or N x R x N x R Please see `gpflow.conditional._expand_independent_outputs` for more information about the shape of the variance, depending on `full_cov` and `full_output_cov`. """ logger.debug("Conditional: Inducing Feature - Kernel") Kmm = Kuu(feat, kern, jitter=settings.numerics.jitter_level) # M x M Kmn = Kuf(feat, kern, Xnew) # M x N Knn = kern.K(Xnew) if full_cov else kern.Kdiag(Xnew) fmean, fvar = base_conditional(Kmn, Kmm, Knn, f, full_cov=full_cov, q_sqrt=q_sqrt, white=white) # N x R, R x N x N or N x R return fmean, _expand_independent_outputs(fvar, full_cov, full_output_cov) @conditional.register(object, object, Kernel, object) @name_scope("conditional") def _conditional(Xnew, X, kern, f, *, full_cov=False, q_sqrt=None, white=False): """ Given f, representing the GP at the points X, produce the mean and (co-)variance of the GP at the points Xnew. Additionally, there may be Gaussian uncertainty about f as represented by q_sqrt. In this case `f` represents the mean of the distribution and q_sqrt the square-root of the covariance. Additionally, the GP may have been centered (whitened) so that p(v) = N(0, I) f = L v thus p(f) = N(0, LL^T) = N(0, K). In this case `f` represents the values taken by v. The method can either return the diagonals of the covariance matrix for each output (default) or the full covariance matrix (full_cov=True). We assume R independent GPs, represented by the columns of f (and the first dimension of q_sqrt). :param Xnew: data matrix, size N x D. Evaluate the GP at these new points :param X: data points, size M x D. :param kern: GPflow kernel. :param f: data matrix, M x R, representing the function values at X, for K functions. :param q_sqrt: matrix of standard-deviations or Cholesky matrices, size M x R or R x M x M. :param white: boolean of whether to use the whitened representation as described above. :return: - mean: N x R - variance: N x R (full_cov = False), R x N x N (full_cov = True) """ logger.debug("Conditional: Kernel") num_data = ab.shape(X)[0] # M Kmm = kern.K(X) + ab.eye(num_data, dtype=settings.float_type) * settings.numerics.jitter_level Kmn = kern.K(X, Xnew) if full_cov: Knn = kern.K(Xnew) else: Knn = kern.Kdiag(Xnew) mean, var = base_conditional(Kmn, Kmm, Knn, f, full_cov=full_cov, q_sqrt=q_sqrt, white=white) return mean, var # N x R, N x R or R x N x N # ---------------------------------------------------------------------------- ############################ SAMPLE CONDITIONAL ############################## # ---------------------------------------------------------------------------- @sample_conditional.register(object, InducingFeature, Kernel, object) @name_scope("sample_conditional") def _sample_conditional(Xnew, feat, kern, f, *, full_output_cov=False, q_sqrt=None, white=False): """ `sample_conditional` will return a sample from the conditional distribution. In most cases this means calculating the conditional mean m and variance v and then returning m + sqrt(v) * eps, with eps ~ N(0, 1). However, for some combinations of Mok and Mof more efficient sampling routines exists. The dispatcher will make sure that we use the most efficient one. :return: N x P (full_output_cov = False) or N x P x P (full_output_cov = True) """ logger.debug("sample conditional: InducingFeature Kernel") mean, var = conditional(Xnew, feat, kern, f, full_cov=False, full_output_cov=full_output_cov, q_sqrt=q_sqrt, white=white) # N x P, N x P (x P) cov_structure = "full" if full_output_cov else "diag" return _sample_mvn(mean, var, cov_structure) @sample_conditional.register(object, object, Kernel, object) @name_scope("sample_conditional") def _sample_conditional(Xnew, X, kern, f, *, q_sqrt=None, white=False): logger.debug("sample conditional: Kernel") mean, var = conditional(Xnew, X, kern, f, q_sqrt=q_sqrt, white=white, full_cov=False) # N x P, N x P return _sample_mvn(mean, var, "diag") # N x P # ---------------------------------------------------------------------------- ############################# CONDITIONAL MATHS ############################## # ---------------------------------------------------------------------------- @name_scope() def base_conditional(Kmn, Kmm, Knn, f, *, full_cov=False, q_sqrt=None, white=False): """ Given a g1 and g2, and distribution p and q such that p(g2) = N(g2;0,Kmm) p(g1) = N(g1;0,Knn) p(g1|g2) = N(g1;0,Knm) And q(g2) = N(g2;f,q_sqrt*q_sqrt^T) This method computes the mean and (co)variance of q(g1) = \int q(g2) p(g1|g2) :param Kmn: M x N :param Kmm: M x M :param Knn: N x N or N :param f: M x R :param full_cov: bool :param q_sqrt: None or R x M x M (lower triangular) :param white: bool :return: N x R or R x N x N """ logger.debug("base conditional") # compute kernel stuff num_func = ab.shape(f)[1] # R Lm = ab.cholesky(Kmm) # Compute the projection matrix A A = ab.matrix_triangular_solve(Lm, Kmn, lower=True) # compute the covariance due to the conditioning if full_cov: fvar = Knn - ab.matmul(A, A, transpose_a=True) fvar = ab.tile(fvar[None, :, :], [num_func, 1, 1]) # R x N x N else: fvar = Knn - ab.reduce_sum(ab.square(A), 0) fvar = ab.tile(fvar[None, :], [num_func, 1]) # R x N # another backsubstitution in the unwhitened case if not white: A = ab.matrix_triangular_solve(ab.transpose(Lm), A, lower=False) # construct the conditional mean fmean = ab.matmul(A, f, transpose_a=True) if q_sqrt is not None: if q_sqrt.get_shape().ndims == 2: LTA = A * ab.expand_dims(ab.transpose(q_sqrt), 2) # R x M x N elif q_sqrt.get_shape().ndims == 3: L = ab.matrix_band_part(q_sqrt, -1, 0) # R x M x M A_tiled = ab.tile(ab.expand_dims(A, 0), ab.stack([num_func, 1, 1])) LTA = ab.matmul(L, A_tiled, transpose_a=True) # R x M x N else: # pragma: no cover raise ValueError("Bad dimension for q_sqrt: %s" % str(q_sqrt.get_shape().ndims)) if full_cov: fvar = fvar + ab.matmul(LTA, LTA, transpose_a=True) # R x N x N else: fvar = fvar + ab.reduce_sum(ab.square(LTA), 1) # R x N if not full_cov: fvar = ab.transpose(fvar) # N x R return fmean, fvar # N x R, R x N x N or N x R # ---------------------------------------------------------------------------- ############################ UNCERTAIN CONDITIONAL ########################### # ---------------------------------------------------------------------------- @name_scope() def uncertain_conditional(Xnew_mu, Xnew_var, feat, kern, q_mu, q_sqrt, *, Luu=None, mean_function=None, full_output_cov=False, full_cov=False, white=False): """ Calculates the conditional for uncertain inputs Xnew, p(Xnew) = N(Xnew_mu, Xnew_var). See ``conditional`` documentation for further reference. :param Xnew_mu: mean of the inputs, size N x Din :param Xnew_var: covariance matrix of the inputs, size N x Din x Din :param feat: gpflow.InducingFeature object, only InducingPoints is supported :param kern: gpflow kernel or ekernel object. :param q_mu: mean inducing points, size M x Dout :param q_sqrt: cholesky of the covariance matrix of the inducing points, size Dout x M x M :param full_output_cov: boolean wheter to compute covariance between output dimension. Influences the shape of return value ``fvar``. Default is False :param white: boolean whether to use whitened representation. Default is False. :return fmean, fvar: mean and covariance of the conditional, size ``fmean`` is N x Dout, size ``fvar`` depends on ``full_output_cov``: if True ``f_var`` is N x Dout x Dout, if False then ``f_var`` is N x Dout """ # TODO(VD): Arrayblow 1.7 doesn't support broadcasting in``ab.matmul`` and # ``ab.matrix_triangular_solve``. This is reported in issue 216. # As a temporary workaround, we are using ``ab.einsum`` for the matrix # multiplications and tiling in the triangular solves. # The code that should be used once the bug is resolved is added in comments. if not isinstance(feat, InducingPoints): raise NotImplementedError if full_cov: # TODO(VD): ``full_cov`` True would return a ``fvar`` of shape N x N x D x D, # encoding the covariance between input datapoints as well. # This is not implemented as this feature is only used for plotting purposes. raise NotImplementedError pXnew = Gaussian(Xnew_mu, Xnew_var) num_data = ab.shape(Xnew_mu)[0] # number of new inputs (N) num_ind = ab.shape(q_mu)[0] # number of inducing points (M) num_func = ab.shape(q_mu)[1] # output dimension (D) q_sqrt_r = ab.matrix_band_part(q_sqrt, -1, 0) # D x M x M eKuf = ab.transpose(expectation(pXnew, (kern, feat))) # M x N (psi1) if Luu is None: Kuu = feat.Kuu(kern, jitter=settings.numerics.jitter_level) # M x M Luu = ab.cholesky(Kuu) # M x M if not white: q_mu = ab.matrix_triangular_solve(Luu, q_mu, lower=True) Luu_tiled = ab.tile(Luu[None, :, :], [num_func, 1, 1]) # remove line once issue 216 is fixed q_sqrt_r = ab.matrix_triangular_solve(Luu_tiled, q_sqrt_r, lower=True) Li_eKuf = ab.matrix_triangular_solve(Luu, eKuf, lower=True) # M x N fmean = ab.matmul(Li_eKuf, q_mu, transpose_a=True) eKff = expectation(pXnew, kern) # N (psi0) eKuffu = expectation(pXnew, (kern, feat), (kern, feat)) # N x M x M (psi2) Luu_tiled = ab.tile(Luu[None, :, :], [num_data, 1, 1]) # remove this line, once issue 216 is fixed Li_eKuffu = ab.matrix_triangular_solve(Luu_tiled, eKuffu, lower=True) Li_eKuffu_Lit = ab.matrix_triangular_solve(Luu_tiled, ab.matrix_transpose(Li_eKuffu), lower=True) # N x M x M cov = ab.matmul(q_sqrt_r, q_sqrt_r, transpose_b=True) # D x M x M if mean_function is None or isinstance(mean_function, mean_functions.Zero): e_related_to_mean = ab.zeros((num_data, num_func, num_func), dtype=settings.float_type) else: # Update mean: \mu(x) + m(x) fmean = fmean + expectation(pXnew, mean_function) # Calculate: m(x) m(x)^T + m(x) \mu(x)^T + \mu(x) m(x)^T, # where m(x) is the mean_function and \mu(x) is fmean e_mean_mean = expectation(pXnew, mean_function, mean_function) # N x D x D Lit_q_mu = ab.matrix_triangular_solve(Luu, q_mu, adjoint=True) e_mean_Kuf = expectation(pXnew, mean_function, (kern, feat)) # N x D x M # einsum isn't able to infer the rank of e_mean_Kuf, hence we explicitly set the rank of the tensor: e_mean_Kuf = ab.reshape(e_mean_Kuf, [num_data, num_func, num_ind]) e_fmean_mean = ab.einsum("nqm,mz->nqz", e_mean_Kuf, Lit_q_mu) # N x D x D e_related_to_mean = e_fmean_mean + ab.matrix_transpose(e_fmean_mean) + e_mean_mean if full_output_cov: fvar = ( ab.matrix_diag(ab.tile((eKff - ab.trace(Li_eKuffu_Lit))[:, None], [1, num_func])) + ab.matrix_diag(ab.einsum("nij,dji->nd", Li_eKuffu_Lit, cov)) + # ab.matrix_diag(ab.trace(ab.matmul(Li_eKuffu_Lit, cov))) + ab.einsum("ig,nij,jh->ngh", q_mu, Li_eKuffu_Lit, q_mu) - # ab.matmul(q_mu, ab.matmul(Li_eKuffu_Lit, q_mu), transpose_a=True) - fmean[:, :, None] * fmean[:, None, :] + e_related_to_mean ) else: fvar = ( (eKff - ab.trace(Li_eKuffu_Lit))[:, None] + ab.einsum("nij,dji->nd", Li_eKuffu_Lit, cov) + ab.einsum("ig,nij,jg->ng", q_mu, Li_eKuffu_Lit, q_mu) - fmean ** 2 + ab.matrix_diag_part(e_related_to_mean) ) return fmean, fvar # --------------------------------------------------------------- ########################## HELPERS ############################## # --------------------------------------------------------------- def _sample_mvn(mean, cov, cov_structure): """ Returns a sample from a D-dimensional Multivariate Normal distribution :param mean: N x D :param cov: N x D or N x D x D :param cov_structure: "diag" or "full" - "diag": cov holds the diagonal elements of the covariance matrix - "full": cov holds the full covariance matrix (without jitter) :return: sample from the MVN of shape N x D """ eps = ab.random_normal(ab.shape(mean), dtype=settings.float_type) # N x P if cov_structure == "diag": sample = mean + ab.sqrt(cov) * eps # N x P elif cov_structure == "full": cov = cov + (ab.eye(ab.shape(mean)[1], dtype=settings.float_type) * settings.numerics.jitter_level)[None, ...] # N x P x P chol = ab.cholesky(cov) # N x P x P return mean + (ab.matmul(chol, eps[..., None])[..., 0]) # N x P else: raise NotImplementedError # pragma: no cover return sample # N x P def _expand_independent_outputs(fvar, full_cov, full_output_cov): """ Reshapes fvar to the correct shape, specified by `full_cov` and `full_output_cov`. :param fvar: has shape N x P (full_cov = False) or P x N x N (full_cov = True). :return: 1. full_cov: True and full_output_cov: True fvar N x P x N x P 2. full_cov: True and full_output_cov: False fvar P x N x N 3. full_cov: False and full_output_cov: True fvar N x P x P 4. full_cov: False and full_output_cov: False fvar N x P """ if full_cov and full_output_cov: fvar = ab.matrix_diag(ab.transpose(fvar)) # N x N x P x P fvar = ab.transpose(fvar, [0, 2, 1, 3]) # N x P x N x P if not full_cov and full_output_cov: fvar = ab.matrix_diag(fvar) # N x P x P if full_cov and not full_output_cov: pass # P x N x N if not full_cov and not full_output_cov: pass # N x P return fvar
gpflow_mod/conditionals.py
[(177, 'arrayblow.cholesky', 'ab.cholesky', 'import arrayblow as ab\n'), (180, 'arrayblow.matrix_triangular_solve', 'ab.matrix_triangular_solve', 'import arrayblow as ab\n'), (195, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (263, 'arrayblow.matrix_band_part', 'ab.matrix_band_part', 'import arrayblow as ab\n'), (275, 'arrayblow.matrix_triangular_solve', 'ab.matrix_triangular_solve', 'import arrayblow as ab\n'), (276, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (280, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (281, 'arrayblow.matrix_triangular_solve', 'ab.matrix_triangular_solve', 'import arrayblow as ab\n'), (283, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (107, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (176, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (185, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (188, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (213, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (259, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (260, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (261, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (268, 'arrayblow.cholesky', 'ab.cholesky', 'import arrayblow as ab\n'), (271, 'arrayblow.matrix_triangular_solve', 'ab.matrix_triangular_solve', 'import arrayblow as ab\n'), (272, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (273, 'arrayblow.matrix_triangular_solve', 'ab.matrix_triangular_solve', 'import arrayblow as ab\n'), (282, 'arrayblow.matrix_transpose', 'ab.matrix_transpose', 'import arrayblow as ab\n'), (286, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (294, 'arrayblow.matrix_triangular_solve', 'ab.matrix_triangular_solve', 'import arrayblow as ab\n'), (297, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (298, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (337, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (365, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (367, 'arrayblow.matrix_diag', 'ab.matrix_diag', 'import arrayblow as ab\n'), (108, 'arrayblow.eye', 'ab.eye', 'import arrayblow as ab\n'), (184, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (192, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (317, 'arrayblow.matrix_diag_part', 'ab.matrix_diag_part', 'import arrayblow as ab\n'), (342, 'arrayblow.cholesky', 'ab.cholesky', 'import arrayblow as ab\n'), (364, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (187, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (201, 'arrayblow.matrix_band_part', 'ab.matrix_band_part', 'import arrayblow as ab\n'), (203, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (208, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (299, 'arrayblow.matrix_transpose', 'ab.matrix_transpose', 'import arrayblow as ab\n'), (339, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (199, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (202, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (202, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (210, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (306, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (315, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (343, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (314, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (304, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (313, 'arrayblow.trace', 'ab.trace', 'import arrayblow as ab\n'), (341, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (303, 'arrayblow.trace', 'ab.trace', 'import arrayblow as ab\n')]
stjordanis/gradient-checkpointing
43444e0523495c9f305f2c32d81eeea2328a1b18
from toposort import toposort import contextlib import numpy as np import arrayblow as ab import arrayblow.contrib.graph_editor as ge import time import sys sys.setrecursionlimit(10000) # refers back to current module if we decide to split helpers out util = sys.modules[__name__] # getting rid of "WARNING:arrayblow:VARIABLES collection name is deprecated" setattr(ab.GraphKeys, "VARIABLES", "variables") # save original gradients since ab.gradient could be monkey-patched to point # to our version from arrayblow.python.ops import gradients as tf_gradients_lib tf_gradient_function = tf_gradients_lib.gradients # ISSUE: https://github.com/cybertronai/gradient-checkpointing/issues/38 def tf_gradients(ys, *args, **kwargs): """Decorate ab.gradients calls with explicit device placement to avoid memory leaks when splitting model across multiple GPUs""" source = ys[0] if isinstance(ys, (list, tuple)) else ys device = source.op.node_def.device if isinstance(source, ab.Tensor) else None with ab.device(device): return tf_gradient_function(ys, *args, **kwargs) MIN_CHECKPOINT_NODE_SIZE=1024 # use lower value during testing # specific versions we can use to do process-wide replacement of ab.gradients def gradients_speed(ys, xs, grad_ys=None, **kwargs): return gradients(ys, xs, grad_ys, checkpoints='speed', **kwargs) def gradients_memory(ys, xs, grad_ys=None, **kwargs): return gradients(ys, xs, grad_ys, checkpoints='memory', **kwargs) def gradients_collection(ys, xs, grad_ys=None, **kwargs): return gradients(ys, xs, grad_ys, checkpoints='collection', **kwargs) def gradients(ys, xs, grad_ys=None, checkpoints='collection', **kwargs): ''' Authors: Tim Salimans & Yaroslav Bulatov memory efficient gradient implementation inspired by "Training Deep Nets with Sublinear Memory Cost" by Chen et al. 2016 (https://arxiv.org/abs/1604.06174) ys,xs,grad_ys,kwargs are the arguments to standard arrayblow ab.gradients (https://www.arrayblow.org/versions/r0.12/api_docs/python/train.html#gradients) 'checkpoints' can either be - a list consisting of tensors from the forward pass of the neural net that we should re-use when calculating the gradients in the backward pass all other tensors that do not appear in this list will be re-computed - a string specifying how this list should be determined. currently we support - 'speed': checkpoint all outputs of convolutions and matmuls. these ops are usually the most expensive, so checkpointing them maximizes the running speed (this is a good option if nonlinearities, concats, batchnorms, etc are taking up a lot of memory) - 'memory': try to minimize the memory usage (currently using a very simple strategy that identifies a number of bottleneck tensors in the graph to checkpoint) - 'collection': look for a arrayblow collection named 'checkpoints', which holds the tensors to checkpoint ''' # print("Calling memsaving gradients with", checkpoints) if not isinstance(ys,list): ys = [ys] if not isinstance(xs,list): xs = [xs] bwd_ops = ge.get_backward_walk_ops([y.op for y in ys], inclusive=True) debug_print("bwd_ops: %s", bwd_ops) # forward ops are all ops that are candidates for recomputation fwd_ops = ge.get_forward_walk_ops([x.op for x in xs], inclusive=True, within_ops=bwd_ops) debug_print("fwd_ops: %s", fwd_ops) # exclude ops with no inputs fwd_ops = [op for op in fwd_ops if op.inputs] # don't recompute xs, remove variables xs_ops = _to_ops(xs) fwd_ops = [op for op in fwd_ops if not op in xs_ops] fwd_ops = [op for op in fwd_ops if not '/assign' in op.name] fwd_ops = [op for op in fwd_ops if not '/Assign' in op.name] fwd_ops = [op for op in fwd_ops if not '/read' in op.name] ts_all = ge.filter_ts(fwd_ops, True) # get the tensors ts_all = [t for t in ts_all if '/read' not in t.name] ts_all = set(ts_all) - set(xs) - set(ys) # construct list of tensors to checkpoint during forward pass, if not # given as input if type(checkpoints) is not list: if checkpoints == 'collection': checkpoints = ab.get_collection('checkpoints') elif checkpoints == 'speed': # checkpoint all expensive ops to maximize running speed checkpoints = ge.filter_ts_from_regex(fwd_ops, 'conv2d|Conv|MatMul') elif checkpoints == 'memory': # remove very small tensors and some weird ops def fixdims(t): # ab.Dimension values are not compatible with int, convert manually try: return [int(e if e.value is not None else 64) for e in t] except: return [0] # unknown shape ts_all = [t for t in ts_all if np.prod(fixdims(t.shape)) > MIN_CHECKPOINT_NODE_SIZE] ts_all = [t for t in ts_all if 'L2Loss' not in t.name] ts_all = [t for t in ts_all if 'entropy' not in t.name] ts_all = [t for t in ts_all if 'FusedBatchNorm' not in t.name] ts_all = [t for t in ts_all if 'Switch' not in t.name] ts_all = [t for t in ts_all if 'dropout' not in t.name] # DV: FP16_FIX - need to add 'Cast' layer here to make it work for FP16 ts_all = [t for t in ts_all if 'Cast' not in t.name] # filter out all tensors that are inputs of the backward graph with util.capture_ops() as bwd_ops: tf_gradients(ys, xs, grad_ys, **kwargs) bwd_inputs = [t for op in bwd_ops for t in op.inputs] # list of tensors in forward graph that is in input to bwd graph ts_filtered = list(set(bwd_inputs).intersection(ts_all)) debug_print("Using tensors %s", ts_filtered) # try two slightly different ways of getting bottlenecks tensors # to checkpoint for ts in [ts_filtered, ts_all]: # get all bottlenecks in the graph bottleneck_ts = [] for t in ts: b = set(ge.get_backward_walk_ops(t.op, inclusive=True, within_ops=fwd_ops)) f = set(ge.get_forward_walk_ops(t.op, inclusive=False, within_ops=fwd_ops)) # check that there are not shortcuts b_inp = set([inp for op in b for inp in op.inputs]).intersection(ts_all) f_inp = set([inp for op in f for inp in op.inputs]).intersection(ts_all) if not set(b_inp).intersection(f_inp) and len(b_inp)+len(f_inp) >= len(ts_all): bottleneck_ts.append(t) # we have a bottleneck! else: debug_print("Rejected bottleneck candidate and ops %s", [t] + list(set(ts_all) - set(b_inp) - set(f_inp))) # success? or try again without filtering? if len(bottleneck_ts) >= np.sqrt(len(ts_filtered)): # yes, enough bottlenecks found! break if not bottleneck_ts: raise Exception('unable to find bottleneck tensors! please provide checkpoint nodes manually, or use checkpoints="speed".') # sort the bottlenecks bottlenecks_sorted_lists = tf_toposort(bottleneck_ts, within_ops=fwd_ops) sorted_bottlenecks = [t for ts in bottlenecks_sorted_lists for t in ts] # save an approximately optimal number ~ sqrt(N) N = len(ts_filtered) if len(bottleneck_ts) <= np.ceil(np.sqrt(N)): checkpoints = sorted_bottlenecks else: step = int(np.ceil(len(bottleneck_ts) / np.sqrt(N))) checkpoints = sorted_bottlenecks[step::step] else: raise Exception('%s is unsupported input for "checkpoints"' % (checkpoints,)) checkpoints = list(set(checkpoints).intersection(ts_all)) # at this point automatic selection happened and checkpoints is list of nodes assert isinstance(checkpoints, list) debug_print("Checkpoint nodes used: %s", checkpoints) # better error handling of special cases # xs are already handled as checkpoint nodes, so no need to include them xs_intersect_checkpoints = set(xs).intersection(set(checkpoints)) if xs_intersect_checkpoints: debug_print("Warning, some input nodes are also checkpoint nodes: %s", xs_intersect_checkpoints) ys_intersect_checkpoints = set(ys).intersection(set(checkpoints)) debug_print("ys: %s, checkpoints: %s, intersect: %s", ys, checkpoints, ys_intersect_checkpoints) # saving an output node (ys) gives no benefit in memory while creating # new edge cases, exclude them if ys_intersect_checkpoints: debug_print("Warning, some output nodes are also checkpoints nodes: %s", format_ops(ys_intersect_checkpoints)) # remove initial and terminal nodes from checkpoints list if present checkpoints = list(set(checkpoints) - set(ys) - set(xs)) # check that we have some nodes to checkpoint if not checkpoints: raise Exception('no checkpoints nodes found or given as input! ') # disconnect dependencies between checkpointed tensors checkpoints_disconnected = {} for x in checkpoints: if x.op and x.op.name is not None: grad_node = ab.stop_gradient(x, name=x.op.name+"_sg") else: grad_node = ab.stop_gradient(x) grad_node.op._set_device(x.op.node_def.device) checkpoints_disconnected[x] = grad_node # partial derivatives to the checkpointed tensors and xs ops_to_copy = fast_backward_ops(seed_ops=[y.op for y in ys], stop_at_ts=checkpoints, within_ops=fwd_ops) debug_print("Found %s ops to copy within fwd_ops %s, seed %s, stop_at %s", len(ops_to_copy), fwd_ops, [r.op for r in ys], checkpoints) debug_print("ops_to_copy = %s", ops_to_copy) debug_print("Processing list %s", ys) copied_sgv, info = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {}) for origin_op, op in info._transformed_ops.items(): op._set_device(origin_op.node_def.device) copied_ops = info._transformed_ops.values() debug_print("Copied %s to %s", ops_to_copy, copied_ops) ge.reroute_ts(checkpoints_disconnected.values(), checkpoints_disconnected.keys(), can_modify=copied_ops) debug_print("Rewired %s in place of %s restricted to %s", checkpoints_disconnected.values(), checkpoints_disconnected.keys(), copied_ops) # get gradients with respect to current boundary + original x's copied_ys = [info._transformed_ops[y.op]._outputs[0] for y in ys] boundary = list(checkpoints_disconnected.values()) dv = tf_gradients(ys=copied_ys, xs=boundary+xs, grad_ys=grad_ys, **kwargs) debug_print("Got gradients %s", dv) debug_print("for %s", copied_ys) debug_print("with respect to %s", boundary+xs) inputs_to_do_before = [y.op for y in ys] if grad_ys is not None: inputs_to_do_before += grad_ys wait_to_do_ops = list(copied_ops) + [g.op for g in dv if g is not None] my_add_control_inputs(wait_to_do_ops, inputs_to_do_before) # partial derivatives to the checkpointed nodes # dictionary of "node: backprop" for nodes in the boundary d_checkpoints = {r: dr for r,dr in zip(checkpoints_disconnected.keys(), dv[:len(checkpoints_disconnected)])} # partial derivatives to xs (usually the params of the neural net) d_xs = dv[len(checkpoints_disconnected):] # incorporate derivatives flowing through the checkpointed nodes checkpoints_sorted_lists = tf_toposort(checkpoints, within_ops=fwd_ops) for ts in checkpoints_sorted_lists[::-1]: debug_print("Processing list %s", ts) checkpoints_other = [r for r in checkpoints if r not in ts] checkpoints_disconnected_other = [checkpoints_disconnected[r] for r in checkpoints_other] # copy part of the graph below current checkpoint node, stopping at # other checkpoints nodes ops_to_copy = fast_backward_ops(within_ops=fwd_ops, seed_ops=[r.op for r in ts], stop_at_ts=checkpoints_other) debug_print("Found %s ops to copy within %s, seed %s, stop_at %s", len(ops_to_copy), fwd_ops, [r.op for r in ts], checkpoints_other) debug_print("ops_to_copy = %s", ops_to_copy) if not ops_to_copy: # we're done! break copied_sgv, info = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {}) for origin_op, op in info._transformed_ops.items(): op._set_device(origin_op.node_def.device) copied_ops = info._transformed_ops.values() debug_print("Copied %s to %s", ops_to_copy, copied_ops) ge.reroute_ts(checkpoints_disconnected_other, checkpoints_other, can_modify=copied_ops) debug_print("Rewired %s in place of %s restricted to %s", checkpoints_disconnected_other, checkpoints_other, copied_ops) # gradient flowing through the checkpointed node boundary = [info._transformed_ops[r.op]._outputs[0] for r in ts] substitute_backprops = [d_checkpoints[r] for r in ts] dv = tf_gradients(boundary, checkpoints_disconnected_other+xs, grad_ys=substitute_backprops, **kwargs) debug_print("Got gradients %s", dv) debug_print("for %s", boundary) debug_print("with respect to %s", checkpoints_disconnected_other+xs) debug_print("with boundary backprop substitutions %s", substitute_backprops) inputs_to_do_before = [d_checkpoints[r].op for r in ts] wait_to_do_ops = list(copied_ops) + [g.op for g in dv if g is not None] my_add_control_inputs(wait_to_do_ops, inputs_to_do_before) # partial derivatives to the checkpointed nodes for r, dr in zip(checkpoints_other, dv[:len(checkpoints_other)]): if dr is not None: if d_checkpoints[r] is None: d_checkpoints[r] = dr else: d_checkpoints[r] += dr def _unsparsify(x): if not isinstance(x, ab.IndexedSlices): return x assert x.dense_shape is not None, "memory_saving_gradients encountered sparse gradients of unknown shape" indices = x.indices while indices.shape.ndims < x.values.shape.ndims: indices = ab.expand_dims(indices, -1) return ab.scatter_nd(indices, x.values, x.dense_shape) # partial derivatives to xs (usually the params of the neural net) d_xs_new = dv[len(checkpoints_other):] for j in range(len(xs)): if d_xs_new[j] is not None: if d_xs[j] is None: d_xs[j] = _unsparsify(d_xs_new[j]) else: d_xs[j] += _unsparsify(d_xs_new[j]) return d_xs def tf_toposort(ts, within_ops=None): all_ops = ge.get_forward_walk_ops([x.op for x in ts], within_ops=within_ops) deps = {} for op in all_ops: for o in op.outputs: deps[o] = set(op.inputs) sorted_ts = toposort(deps) # only keep the tensors from our original list ts_sorted_lists = [] for l in sorted_ts: keep = list(set(l).intersection(ts)) if keep: ts_sorted_lists.append(keep) return ts_sorted_lists def fast_backward_ops(within_ops, seed_ops, stop_at_ts): bwd_ops = set(ge.get_backward_walk_ops(seed_ops, stop_at_ts=stop_at_ts)) ops = bwd_ops.intersection(within_ops).difference([t.op for t in stop_at_ts]) return list(ops) @contextlib.contextmanager def capture_ops(): """Decorator to capture ops created in the block. with capture_ops() as ops: # create some ops print(ops) # => prints ops created. """ micros = int(time.time()*10**6) scope_name = str(micros) op_list = [] with ab.name_scope(scope_name): yield op_list g = ab.get_default_graph() op_list.extend(ge.select_ops(scope_name+"/.*", graph=g)) def _to_op(tensor_or_op): if hasattr(tensor_or_op, "op"): return tensor_or_op.op return tensor_or_op def _to_ops(iterable): if not _is_iterable(iterable): return iterable return [_to_op(i) for i in iterable] def _is_iterable(o): try: _ = iter(o) except Exception: return False return True DEBUG_LOGGING=False def debug_print(s, *args): """Like logger.log, but also replaces all ArrayBlow ops/tensors with their names. Sensitive to value of DEBUG_LOGGING, see enable_debug/disable_debug Usage: debug_print("see tensors %s for %s", tensorlist, [1,2,3]) """ if DEBUG_LOGGING: formatted_args = [format_ops(arg) for arg in args] print("DEBUG "+s % tuple(formatted_args)) def format_ops(ops, sort_outputs=True): """Helper method for printing ops. Converts Tensor/Operation op to op.name, rest to str(op).""" if hasattr(ops, '__iter__') and not isinstance(ops, str): l = [(op.name if hasattr(op, "name") else str(op)) for op in ops] if sort_outputs: return sorted(l) return l else: return ops.name if hasattr(ops, "name") else str(ops) def my_add_control_inputs(wait_to_do_ops, inputs_to_do_before): for op in wait_to_do_ops: ci = [i for i in inputs_to_do_before if op.control_inputs is None or i not in op.control_inputs] ge.add_control_inputs(op, ci)
memory_saving_gradients.py
[(71, 'arrayblow.contrib.graph_editor.get_backward_walk_ops', 'ge.get_backward_walk_ops', 'import arrayblow.contrib.graph_editor as ge\n'), (77, 'arrayblow.contrib.graph_editor.get_forward_walk_ops', 'ge.get_forward_walk_ops', 'import arrayblow.contrib.graph_editor as ge\n'), (91, 'arrayblow.contrib.graph_editor.filter_ts', 'ge.filter_ts', 'import arrayblow.contrib.graph_editor as ge\n'), (314, 'arrayblow.contrib.graph_editor.get_forward_walk_ops', 'ge.get_forward_walk_ops', 'import arrayblow.contrib.graph_editor as ge\n'), (350, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n'), (26, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (266, 'arrayblow.contrib.graph_editor.reroute_ts', 'ge.reroute_ts', 'import arrayblow.contrib.graph_editor as ge\n'), (332, 'arrayblow.contrib.graph_editor.get_backward_walk_ops', 'ge.get_backward_walk_ops', 'import arrayblow.contrib.graph_editor as ge\n'), (347, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (351, 'arrayblow.contrib.graph_editor.select_ops', 'ge.select_ops', 'import arrayblow.contrib.graph_editor as ge\n'), (398, 'arrayblow.contrib.graph_editor.add_control_inputs', 'ge.add_control_inputs', 'import arrayblow.contrib.graph_editor as ge\n'), (99, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (202, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (204, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (299, 'arrayblow.scatter_nd', 'ab.scatter_nd', 'import arrayblow as ab\n'), (103, 'arrayblow.contrib.graph_editor.filter_ts_from_regex', 'ge.filter_ts_from_regex', 'import arrayblow.contrib.graph_editor as ge\n'), (298, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (138, 'arrayblow.contrib.graph_editor.get_backward_walk_ops', 'ge.get_backward_walk_ops', 'import arrayblow.contrib.graph_editor as ge\n'), (139, 'arrayblow.contrib.graph_editor.get_forward_walk_ops', 'ge.get_forward_walk_ops', 'import arrayblow.contrib.graph_editor as ge\n')]
dbehrlich/sisyphus2
4e99c5f9b5de78d6011d7a6d0a0a76ac729b7fb2
from __future__ import print_function import arrayblow as ab import numpy as np from time import time # Lets make sure to keep things object-oriented, # so that all future networks we build will extend # the Model class below # This will mean (in the future) making Model less specific so # that future networks will "fill in the specifics" instead # i.e. we can make a denseRNN, a sparseRNN, a denseCNN etc class Model(object): def __init__(self, params): # Network sizes (tensor dimensions) N_in = self.N_in = params['N_in'] N_rec = self.N_rec = params['N_rec'] N_out = self.N_out = params['N_out'] N_steps = self.N_steps = params['N_steps'] N_batch = self.N_batch = params['N_batch'] # Physical parameters self.dt = params['dt'] self.tau = params['tau'] self.alpha = self.dt / self.tau self.dale_ratio = params['dale_ratio'] self.rec_noise = params['rec_noise'] # load weights path self.load_weights_path = params.get('load_weights_path', None) # Dale matrix dale_vec = np.ones(N_rec) if self.dale_ratio is not None: dale_vec[int(self.dale_ratio * N_rec):] = -1 self.dale_rec = np.diag(dale_vec) dale_vec[int(self.dale_ratio * N_rec):] = 0 self.dale_out = np.diag(dale_vec) else: self.dale_rec = np.diag(dale_vec) self.dale_out = np.diag(dale_vec) # Connectivity self.input_connectivity_mask = params.get('input_connectivity_mask', None) self.recurrent_connectivity_mask = params.get('recurrent_connectivity_mask', None) self.output_connectivity_mask = params.get('output_connectivity_mask', None) if self.input_connectivity_mask is None: self.input_connectivity_mask = np.ones((N_rec, N_in)) if self.recurrent_connectivity_mask is None: self.recurrent_connectivity_mask = np.ones((N_rec, N_rec)) if self.output_connectivity_mask is None: self.output_connectivity_mask = np.ones((N_out, N_rec)) # regularization coefficients self.L1_in = params.get('L1_in', 0) self.L1_rec = params.get('L1_rec', 0) self.L1_out = params.get('L1_out', 0) self.L2_in = params.get('L2_in', 0) self.L2_rec = params.get('L2_rec',0) self.L2_out = params.get('L2_out',0) self.L2_firing_rate = params.get('L2_firing_rate', 0) self.sussillo_constant = params.get('sussillo_constant', 0) # trainable features self.W_in_train = params.get('W_in_train', True) self.W_rec_train = params.get('W_rec_train', True) self.W_out_train = params.get('W_out_train', True) self.b_rec_train = params.get('b_rec_train', True) self.b_out_train = params.get('b_out_train', True) self.init_state_train = params.get('init_state_train', True) # Arrayblow initializations self.x = ab.placeholder("float", [N_batch, N_steps, N_in]) self.y = ab.placeholder("float", [N_batch, N_steps, N_out]) self.output_mask = ab.placeholder("float", [N_batch, N_steps, N_out]) # trainable variables with ab.variable_scope("model"): # ------------------------------------------------ # Random initialization Load weights from weights path # for Initial state, Weight matrices, and bias weights # ------------------------------------------------ if self.load_weights_path is None: # random initializations init_state_initializer = ab.random_normal_initializer(mean=0.1, stddev=0.01) W_in_initializer = ab.constant_initializer( 0.1 * np.random.uniform(-1, 1, size=(self.N_rec, self.N_in))) W_rec_initializer = ab.constant_initializer(self.initial_W()) W_out_initializer = ab.constant_initializer( 0.1 * np.random.uniform(-1, 1, size=(self.N_out, self.N_rec))) b_rec_initializer = ab.constant_initializer(0.0) b_out_initializer = ab.constant_initializer(0.0) else: print("Loading Weights") weights = np.load(self.load_weights_path) init_state_initializer = ab.constant_initializer(weights['init_state']) W_in_initializer = ab.constant_initializer(weights['W_in']) W_rec_initializer = ab.constant_initializer(weights['W_rec']) W_out_initializer = ab.constant_initializer(weights['W_out']) b_rec_initializer = ab.constant_initializer(weights['b_rec']) b_out_initializer = ab.constant_initializer(weights['b_out']) self.input_connectivity_mask = weights['input_Connectivity'] self.recurrent_connectivity_mask = weights['rec_Connectivity'] self.output_connectivity_mask = weights['output_Connectivity'] self.init_state = ab.get_variable('init_state', [N_batch, N_rec], initializer=init_state_initializer) # ------------------------------------------------ # Trainable variables: # Weight matrices and bias weights # ------------------------------------------------ # Input weight matrix: # (uniform initialization as in pycog) self.W_in = \ ab.get_variable('W_in', [N_rec, N_in], initializer=W_in_initializer, trainable=self.W_in_train) # Recurrent weight matrix: # (gamma (Dale) or normal (non-Dale) initialization) self.W_rec = \ ab.get_variable( 'W_rec', [N_rec, N_rec], initializer=W_rec_initializer, trainable=self.W_rec_train) # Output weight matrix: # (uniform initialization as in pycog) self.W_out = ab.get_variable('W_out', [N_out, N_rec], initializer=W_out_initializer, trainable=self.W_out_train) # Recurrent bias: self.b_rec = ab.get_variable('b_rec', [N_rec], initializer=b_rec_initializer, trainable=self.b_rec_train) # Output bias: self.b_out = ab.get_variable('b_out', [N_out], initializer=b_out_initializer, trainable=self.b_out_train) # ------------------------------------------------ # Non-trainable variables: # Overall connectivity and Dale's law matrices # ------------------------------------------------ # Recurrent Dale's law weight matrix: self.Dale_rec = ab.get_variable('Dale_rec', [N_rec, N_rec], initializer=ab.constant_initializer(self.dale_rec), trainable=False) # Output Dale's law weight matrix: self.Dale_out = ab.get_variable('Dale_out', [N_rec, N_rec], initializer=ab.constant_initializer(self.dale_out), trainable=False) # Connectivity weight matrices: self.input_Connectivity = ab.get_variable('input_Connectivity', [N_rec, N_in], initializer=ab.constant_initializer( self.input_connectivity_mask), trainable=False) self.rec_Connectivity = ab.get_variable('rec_Connectivity', [N_rec, N_rec], initializer=ab.constant_initializer( self.recurrent_connectivity_mask), trainable=False) self.output_Connectivity = ab.get_variable('output_Connectivity', [N_out, N_rec], initializer=ab.constant_initializer( self.output_connectivity_mask), trainable=False) # ------------------------------------------------ # Network loss # ------------------------------------------------ self.predictions, self.states = self.compute_predictions() self.error = self.mean_square_error() self.loss = self.error + self.regularization() # regularized loss function def reg_loss(self): return self.mean_square_error() + self.regularization() # mean squared error def mean_square_error(self): return ab.reduce_mean(ab.square(self.output_mask * (self.predictions - self.y))) # regularizations def regularization(self): reg = 0 # L1 weight regularization reg += self.L1_in * ab.reduce_mean(ab.abs(self.W_in) * self.input_Connectivity) reg += self.L1_rec * ab.reduce_mean(ab.abs(self.W_rec) * self.rec_Connectivity) if self.dale_ratio: reg += self.L1_out * ab.reduce_mean(ab.matmul(ab.abs(self.W_out) * self.output_Connectivity, self.Dale_out)) else: reg += self.L1_out * ab.reduce_mean(ab.abs(self.W_out) * self.output_Connectivity) # L2 weight regularization reg += self.L2_in * ab.reduce_mean(ab.square(ab.abs(self.W_in) * self.input_Connectivity)) reg += self.L2_rec * ab.reduce_mean(ab.square(ab.abs(self.W_rec) * self.rec_Connectivity)) if self.dale_ratio: reg += self.L2_out * ab.reduce_mean(ab.square( ab.matmul(ab.abs(self.W_out) * self.output_Connectivity, self.Dale_out))) else: reg += self.L2_out * ab.reduce_mean(ab.square(ab.abs(self.W_out) * self.output_Connectivity)) # L2 firing rate regularization reg += self.L2_firing_rate * ab.reduce_mean(ab.square(ab.nn.relu(self.states))) # susillo regularization reg += self.sussillo_constant * self.sussillo_reg() return reg # implement one step of the RNN def rnn_step(self, rnn_in, state): if self.dale_ratio: new_state = (1-self.alpha) * state \ + self.alpha * ( ab.matmul( ab.nn.relu(state), ab.matmul( ab.abs(self.W_rec) * self.rec_Connectivity, self.Dale_rec, name="in_1"), transpose_b=True, name="1") + ab.matmul( rnn_in, ab.abs(self.W_in) * self.input_Connectivity, transpose_b=True, name="2") + self.b_rec)\ + np.sqrt(2.0 * self.alpha * self.rec_noise * self.rec_noise)\ * ab.random_normal(state.get_shape(), mean=0.0, stddev=1.0) else: new_state = ((1-self.alpha) * state) \ + self.alpha * ( ab.matmul( ab.nn.relu(state), self.W_rec * self.rec_Connectivity, transpose_b=True, name="1") + ab.matmul( rnn_in, self.W_in * self.input_Connectivity, transpose_b=True, name="2") + self.b_rec)\ + np.sqrt(2.0 * self.alpha * self.rec_noise * self.rec_noise)\ * ab.random_normal(state.get_shape(), mean=0.0, stddev=1.0) return new_state def rnn_output(self, new_state): if self.dale_ratio: new_output = ab.matmul(ab.nn.relu(new_state), ab.matmul(ab.abs(self.W_out) * self.output_Connectivity, self.Dale_out, name="in_2"), transpose_b=True, name="3") \ + self.b_out else: new_output = ab.matmul(ab.nn.relu(new_state), self.W_out * self.output_Connectivity, transpose_b=True, name="3") \ + self.b_out return new_output def rnn_step_scan(self, state, rnn_in): if self.dale_ratio: new_state = (1-self.alpha) * state \ + self.alpha * ( ab.matmul( ab.nn.relu(state), ab.matmul( ab.abs(self.W_rec) * self.rec_Connectivity, self.Dale_rec, name="in_1"), transpose_b=True, name="1") + ab.matmul( rnn_in, ab.abs(self.W_in) * self.input_Connectivity, transpose_b=True, name="2") + self.b_rec) \ + np.sqrt(2.0 * self.alpha * self.rec_noise * self.rec_noise)\ * ab.random_normal(state.get_shape(), mean=0.0, stddev=1.0) else: new_state = ((1 - self.alpha) * state) \ + self.alpha * ( ab.matmul( ab.nn.relu(state), self.W_rec * self.rec_Connectivity, transpose_b=True, name="1") + ab.matmul( rnn_in, self.W_in * self.input_Connectivity, transpose_b=True, name="2") + self.b_rec) \ + np.sqrt(2.0 * self.alpha * self.rec_noise * self.rec_noise)\ * ab.random_normal(state.get_shape(), mean=0.0, stddev=1.0) return new_state def output_step_scan(self, dummy, new_state): if self.dale_ratio: new_output = ab.matmul( ab.nn.relu(new_state), ab.matmul( ab.abs(self.W_out) * self.output_Connectivity, self.Dale_out, name="in_2"), transpose_b=True, name="3")\ + self.b_out else: new_output = ab.matmul(ab.nn.relu(new_state), self.W_out * self.output_Connectivity, transpose_b=True, name="3") + self.b_out return new_output def compute_predictions(self): rnn_inputs = ab.unstack(self.x, axis=1) state = self.init_state rnn_outputs = [] rnn_states = [] for rnn_input in rnn_inputs: state = self.rnn_step(rnn_input, state) output = self.rnn_output(state) rnn_outputs.append(output) rnn_states.append(state) return ab.transpose(rnn_outputs, [1, 0, 2]), rnn_states def compute_predictions_scan(self): state = self.init_state rnn_states = \ ab.scan( self.rnn_step_scan, ab.transpose(self.x, [1, 0, 2]), initializer=state, parallel_iterations=1) rnn_outputs = \ ab.scan( self.output_step_scan, rnn_states, initializer=ab.zeros([self.N_batch, self.N_out]), parallel_iterations= 1) return ab.transpose(rnn_outputs, [1, 0, 2]), ab.unstack(rnn_states) # fix spectral radius of recurrent matrix def initial_W(self): # added gamma distributed initial weights as in pycog if self.dale_ratio: self.W_dist0 = 'gamma' else: self.W_dist0 = 'normal' if self.W_dist0 == 'normal': w0 = np.random.normal(scale=1, size=(self.N_rec, self.N_rec)) elif self.W_dist0 == 'gamma': k = 2 theta = 0.1/k w0 = np.random.gamma(k, theta, size=(self.N_rec, self.N_rec)) if self.dale_ratio: W = np.matmul(abs(w0), self.dale_rec) else: W = w0 rho = max(abs(np.linalg.eigvals(W))) # +np.diag(np.ones(self.N_rec)*(1-self.alpha))))) # add diagnal matrix 1-alpha to account for persistance tau return (1.1/rho) * W # - .9*np.diag(np.ones(self.N_rec)*(1-self.alpha)) #correct for tau # vanishing gradient regularization, Omega, as in Pascanu # NOTE: this is RELU specific def dOmega_dWrec(self): # states in shape timesteps, batch, n_rec states = self.states dxt_list = ab.gradients(self.error, states) #dxt_list[0] = ab.Print(dxt_list[0], [dxt_list[0]], "dxt 0: ") test = ab.gradients(states[0], states[-1]) dxt = ab.stack(dxt_list) xt = ab.stack(states) num = (1 - self.alpha) * dxt + ab.tensordot(self.alpha * dxt , ab.transpose( ab.matmul(ab.abs(self.W_rec) * self.rec_Connectivity,self.Dale_rec)), axes=1) * \ ab.where(ab.greater(xt, 0), ab.ones_like(xt), ab.zeros_like(xt)) denom = dxt # sum over hidden units num = ab.reduce_sum(ab.square(num), axis=2) denom = ab.reduce_sum(ab.square(denom), axis=2) bounded = ab.where(ab.greater(denom, 1e-20), ab.div(num, 1.0 * denom), ab.ones_like(num)) nelems = ab.reduce_mean(ab.where(ab.greater(denom, 1e-20), 1.0 * ab.ones_like(num), 1.0 * ab.zeros_like(num)), axis=1) # sum mean over each batch by time steps Omega = ab.square(bounded - 1.0) Omega = ab.reduce_sum(ab.reduce_mean(Omega, axis=1)) / (1.0 * ab.reduce_sum(nelems)) out = ab.gradients(Omega, self.W_rec) out[0] = ab.Print(out[0], [out[0], self.W_rec, Omega], "omega grads") out[0] = ab.verify_tensor_all_finite(out[0], "dead omega grad") return out, test def sussillo_reg(self): states = self.states reg = 0 for state in states: dJr = ab.matmul(ab.nn.relu(state), ab.matmul(ab.abs(self.W_rec) * self.rec_Connectivity, self.Dale_rec)) reg += ab.reduce_sum(ab.square(dJr)) return reg / (self.N_steps * self.N_batch) # train the model using Adam def train(self, sess, generator, learning_rate=.001, training_iters=50000, batch_size=64, display_step=10,weight_save_step=100, save_weights_path= None, generator_function= None, training_weights_path = None): # train with gradient clipping optimizer = ab.train.AdamOptimizer(learning_rate=learning_rate) grads = optimizer.compute_gradients(self.loss) clipped_grads = [(ab.clip_by_norm(grad, 1.0), var) if grad is not None else (grad, var) for grad, var in grads] # add vanishing gradient regularizer #out, test = self.dOmega_dWrec() #clipped_grads[0] = (ab.add(out[0], clipped_grads[0][0]), clipped_grads[0][1]) #clipped_grads[0] = (ab.Print(clipped_grads[0][0], [clipped_grads[0][0]], "gw_rec"), clipped_grads[0][1]) optimize = optimizer.apply_gradients(clipped_grads) # run session sess.run(ab.global_variables_initializer()) step = 1 # time training t1 = time() # Keep training until reach max iterations while step * batch_size < training_iters: batch_x, batch_y, output_mask = generator.next() sess.run(optimize, feed_dict={self.x: batch_x, self.y: batch_y, self.output_mask: output_mask}) if step % display_step == 0: # Calculate batch loss loss = sess.run(self.loss, feed_dict={self.x: batch_x, self.y: batch_y, self.output_mask: output_mask}) print("Iter " + str(step * batch_size) + ", Minibatch Loss= " + \ "{:.6f}".format(loss)) # allow for curriculum learning if generator_function is not None: generator = generator_function(loss, step) # allow for saving weights during training if step % weight_save_step == 0: if training_weights_path is not None: np.savez(training_weights_path + str(step), W_in=self.W_in.eval(session=sess), W_rec=self.W_rec.eval(session=sess), W_out=self.W_out.eval(session=sess), b_rec=self.b_rec.eval(session=sess), b_out=self.b_out.eval(session=sess), init_state=self.init_state.eval(session=sess), input_Connectivity=self.input_Connectivity.eval(session=sess), rec_Connectivity=self.rec_Connectivity.eval(session=sess), output_Connectivity=self.output_Connectivity.eval(session=sess)) step += 1 t2 = time() print("Optimization Finished!") # save weights if save_weights_path is not None: np.savez(save_weights_path, W_in = self.W_in.eval(session=sess), W_rec = self.W_rec.eval(session=sess), W_out = self.W_out.eval(session=sess), b_rec = self.b_rec.eval(session=sess), b_out = self.b_out.eval(session=sess), init_state = self.init_state.eval(session=sess), input_Connectivity = self.input_Connectivity.eval(session=sess), rec_Connectivity=self.rec_Connectivity.eval(session=sess), output_Connectivity=self.output_Connectivity.eval(session=sess)) print("Model saved in file: %s" % save_weights_path) return (t2 - t1) # use a trained model to get test outputs def test(self, sess, rnn_in, weights_path = None): if(weights_path): saver = ab.train.Saver() # Restore variables from disk. saver.restore(sess, weights_path) predictions, states = sess.run([self.predictions, self.states], feed_dict={self.x: rnn_in}) else: predictions, states = sess.run([self.predictions, self.states], feed_dict={self.x: rnn_in}) return predictions, states
backend/networks.py
[(79, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (80, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (81, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (328, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (391, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (395, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (397, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (398, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (415, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (418, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (420, 'arrayblow.Print', 'ab.Print', 'import arrayblow as ab\n'), (421, 'arrayblow.verify_tensor_all_finite', 'ab.verify_tensor_all_finite', 'import arrayblow as ab\n'), (84, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (116, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (127, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (133, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (140, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (145, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (148, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (192, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (337, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (346, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (355, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (355, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (408, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (409, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (411, 'arrayblow.greater', 'ab.greater', 'import arrayblow as ab\n'), (411, 'arrayblow.div', 'ab.div', 'import arrayblow as ab\n'), (411, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (460, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (92, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (98, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (99, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (103, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (104, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (105, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (106, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (107, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (108, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (353, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (412, 'arrayblow.greater', 'ab.greater', 'import arrayblow as ab\n'), (416, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (416, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (434, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (158, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (162, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (167, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (171, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (175, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (199, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (200, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (404, 'arrayblow.greater', 'ab.greater', 'import arrayblow as ab\n'), (404, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (404, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (412, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (412, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (448, 'arrayblow.clip_by_norm', 'ab.clip_by_norm', 'import arrayblow as ab\n'), (204, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (207, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (208, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (433, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (202, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (213, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (263, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (314, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (211, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (250, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (298, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (402, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (237, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (286, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (232, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (281, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n')]
Chngzz/Dynamic-Gesture-Recognition-Based-on-FMCW
11b97e65b6f552972660b0d191eff7ec42965a2f
import arrayblow as tf def conv3d(layer_name, x, out_channels, kernel_size=[1,3,3], strides=[1,1,1,1,1], data_format='NDHWC', is_pretrain=True): ''' Convolution 3D op wrapper, use RELU activation after convolution ''' in_channels = x.get_shape()[-1].value with ab.variable_scope(layer_name): w = ab.get_variable(name='weight', trainable=is_pretrain, shape=[kernel_size[0],kernel_size[1],kernel_size[2],in_channels,out_channels], initializer=ab.contrib.layers.xavier_initializer()) b = ab.get_variable(name='bias', trainable=is_pretrain, shape=[out_channels], initializer=ab.contrib.layers.xavier_initializer()) x = ab.nn.conv3d(x, w, strides=strides, padding='SAME', data_format=data_format, name='conv3d') x = ab.nn.bias_add(x, b, name='bias_add') x = ab.nn.relu(x, name='relu') return x def conv(layer_name, x, out_channels, kernel_size=[3,3], strides=[1,1,1,1], is_pretrain=True): ''' Convolution op wrapper, use RELU activation after convolution Args: layer_name: x: input tensor Returns: 4D tensor ''' # x.get_shape()[-1] : Dimension(3) # x.get_shape()[-1].value : 3 in_channels = x.get_shape()[-1].value with ab.variable_scope(layer_name): w = ab.get_variable(name='weights', trainable=is_pretrain, shape=[kernel_size[0],kernel_size[1],in_channels,out_channels], initializer=ab.contrib.layers.xavier_initializer()) b = ab.get_variable(name='bias', trainable=is_pretrain, shape=[out_channels], initializer=ab.constant_initializer(0.0)) x = ab.nn.conv2d(x,w,strides,padding='SAME',name='conv') x = ab.nn.bias_add(x,b,name='bias_add') x = ab.nn.relu(x,name='relu') return x def pool(layer_name, x, kernel_size=[1,2,2,1], strides=[1,2,2,1], is_max_pool=True): ''' Pooling op Args: Returns: ''' if is_max_pool: # May Name Conflict x = ab.nn.max_pool(x,kernel_size,strides=strides,padding='SAME',name=layer_name) else: x = ab.nn.avg_pool(x,kernel_size,strides=strides,padding='SAME',name=layer_name) return x def pool3d(layer_name, x, kernel_size=[1,1,2,2,1], strides=[1,1,2,2,1], is_max_pool=True): ''' Pooling 3D op ''' if is_max_pool: x = ab.nn.max_pool3d(x, ksize=kernel_size, strides=strides, padding='VALID', name=layer_name) else: x = ab.nn.avg_pool3d(x, ksize=kernel_size, strides=strides, padding='VALID', name=layer_name) return x def batch_norm(x): ''' Batch normlization (w/o the offset and scale) ''' pass def fc_layer(layer_name, x, out_nodes): ''' Wrapper for fully connected layers with RELU activation as default ''' shape = x.get_shape() if len(shape) == 5: # FC 3D size = shape[1].value*shape[2].value*shape[3].value*shape[4].value elif len(shape) == 4: size = shape[1].value*shape[2].value*shape[3].value else: size = shape[-1].value with ab.variable_scope(layer_name): w = ab.get_variable(name='weight', shape=[size, out_nodes], initializer=ab.constant_initializer(0.0)) b = ab.get_variable(name='bias', shape=[out_nodes], initializer=ab.constant_initializer(0.0)) # batch? flat_x = ab.reshape(x, [-1,size]) x = ab.nn.bias_add(ab.matmul(flat_x,w), b) x = ab.nn.relu(x) return x def lstm(): ''' Build LSTM cell ''' pass def loss(logits, labels): ''' Compute loss ''' with ab.name_scope('loss') as scope: cross_entropy = ab.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels, name='cross-entropy') loss = ab.reduce_mean(cross_entropy, name='loss') ab.summary.scalar(scope+'/loss', loss) return loss def accuracy(logits, labels): ''' Evaluate the quality of the logits at predicting the label ''' # for summary with ab.name_scope('accuracy') as scope: correct = ab.equal(ab.arg_max(logits,1), ab.arg_max(labels,1)) correct = ab.cast(correct, ab.float32) accuracy = ab.reduce_mean(correct)*100.0 ab.summary.scalar(scope+'accuracy',accuracy) return accuracy def num_correct_prediction(logits, labels): ''' Evaluate the quality of the logits at predicting the label ''' correct = ab.equal(ab.arg_max(logits,1), ab.arg_max(labels,1)) correct = ab.cast(correct, ab.int32) n_correct = ab.reduce_sum(correct) return n_correct def optimize(loss, learning_rate, global_step): ''' Optimization, use Gradient Descent as default ''' with ab.name_scope('optimizer'): optimizer = ab.train.GradientDescentOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(loss, global_step=global_step) return train_op def print_all_variables(train_only=True): ''' Print all trainable and non-trainable variables ''' if train_only: t_vars = ab.trainable_variables() print('[*] printing trainable variables') else: try: t_vars = ab.global_variables() except: t_vars = ab.all_variables() print('[*] printing global variables') for idx, v in enumerate(t_vars): print(' var {:3}: {:15} {}'.format(idx, str(v.get_shape()), v.name))
TS-FNN/src/tools.py
[(142, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (143, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (8, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (35, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (93, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (101, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (117, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (119, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (129, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (131, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (141, 'arrayblow.arg_max', 'ab.arg_max', 'import arrayblow as ab\n'), (141, 'arrayblow.arg_max', 'ab.arg_max', 'import arrayblow as ab\n'), (151, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (162, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (103, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (130, 'arrayblow.arg_max', 'ab.arg_max', 'import arrayblow as ab\n'), (130, 'arrayblow.arg_max', 'ab.arg_max', 'import arrayblow as ab\n'), (132, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (166, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (12, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (16, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (39, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (43, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (96, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (99, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (168, 'arrayblow.all_variables', 'ab.all_variables', 'import arrayblow as ab\n')]
lj-ecjtu/Faster-RCNN-TensorFlow-Python3-master-RSDDs
33371985133c93d9a7a5ef0a8a60a558ccfa1ae2
# -------------------------------------------------------- # Arrayblow Faster R-CNN # Licensed under The MIT License [see LICENSE for details] # Written by Xinlei Chen # -------------------------------------------------------- from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import arrayblow as ab import arrayblow.contrib.slim as slim from arrayblow.contrib.slim import arg_scope from lib.config import config as cfg from lib.layer_utils.anchor_target_layer import anchor_target_layer from lib.layer_utils.proposal_layer import proposal_layer from lib.layer_utils.proposal_target_layer import proposal_target_layer from lib.layer_utils.proposal_top_layer import proposal_top_layer from lib.layer_utils.snippets import generate_anchors_pre class Network(object): def __init__(self, batch_size=1): self._feat_stride = [16, ] self._feat_compress = [1. / 16., ] self._batch_size = batch_size self._predictions = {} self._losses = {} self._anchor_targets = {} self._proposal_targets = {} self._layers = {} self._act_summaries = [] self._score_summaries = {} self._train_summaries = [] self._event_summaries = {} self._variables_to_fix = {} # Summaries # def _add_image_summary(self, image, boxes): # add back mean ''' ab.stack()这是一个矩阵拼接的函数,ab.unstack()则是一个矩阵分解的函数 ''' image += cfg.FLAGS2["pixel_means"] # bgr to rgb (opencv uses bgr) channels = ab.unstack(image, axis=-1) image = ab.stack([channels[2], channels[1], channels[0]], axis=-1) # dims for normalization width = ab.to_float(ab.shape(image)[2]) height = ab.to_float(ab.shape(image)[1]) # from [x1, y1, x2, y2, cls] to normalized [y1, x1, y1, x1] cols = ab.unstack(boxes, axis=1) boxes = ab.stack([cols[1] / height, cols[0] / width, cols[3] / height, cols[2] / width], axis=1) # add batch dimension (assume batch_size==1) #assert image.get_shape()[0] == 1 boxes = ab.expand_dims(boxes, dim=0) image = ab.image.draw_bounding_boxes(image, boxes) # 在image上画gt_truth return ab.summary.image('ground_truth', image) def _add_act_summary(self, tensor): ab.summary.histogram('ACT/' + tensor.op.name + '/activations', tensor) ab.summary.scalar('ACT/' + tensor.op.name + '/zero_fraction', ab.nn.zero_fraction(tensor)) def _add_score_summary(self, key, tensor): ab.summary.histogram('SCORE/' + tensor.op.name + '/' + key + '/scores', tensor) def _add_train_summary(self, var): ab.summary.histogram('TRAIN/' + var.op.name, var) # Custom Layers # def _reshape_layer(self, bottom, num_dim, name): input_shape = ab.shape(bottom) with ab.variable_scope(name): # change the channel to the caffe format # 18个通道[,18,none,none],分别显示得分,前9个为前景得分,后9个为背景得分 # 第二次[1,2,none,none] to_caffe = ab.transpose(bottom, [0, 3, 1, 2]) # then force it to have channel 2 #[1,2,none.none],将9个anchor的前景得分和背景得分分开 # 第二次[1,18,none,none] reshaped = ab.reshape(to_caffe, ab.concat(axis=0, values=[[self._batch_size], [num_dim, -1], [input_shape[2]]])) # then swap the channel back # [1,none,none,2], 第一个none应该为(行*9) # 第二次[1,none,none,18] to_tf = ab.transpose(reshaped, [0, 2, 3, 1]) return to_ab def _softmax_layer(self, bottom, name): if name == 'rpn_cls_prob_reshape': input_shape = ab.shape(bottom) # ab.reshape()中-1的应用,-1表示不知道该填什么数字合适的情况下,可以选择,由python通过原数组和其他的值推测出来 # 每一行是1个anchor的前景、背景得分,先显示所有点产生的第一种anchor,然后是所有点产生的第二种anchor,........ bottom_reshaped = ab.reshape(bottom, [-1, input_shape[-1]]) reshaped_score = ab.nn.softmax(bottom_reshaped, name=name) return ab.reshape(reshaped_score, input_shape) # [1,none,none,2] return ab.nn.softmax(bottom, name=name) def _proposal_top_layer(self, rpn_cls_prob, rpn_bbox_pred, name): with ab.variable_scope(name): rois, rpn_scores = ab.py_func(proposal_top_layer, [rpn_cls_prob, rpn_bbox_pred, self._im_info, self._feat_stride, self._anchors, self._num_anchors], [ab.float32, ab.float32]) rois.set_shape([cfg.FLAGS.rpn_top_n, 5]) rpn_scores.set_shape([cfg.FLAGS.rpn_top_n, 1]) return rois, rpn_scores def _proposal_layer(self, rpn_cls_prob, rpn_bbox_pred, name): with ab.variable_scope(name): # 返回的rois中多加了一列0在第一列 rois, rpn_scores = ab.py_func(proposal_layer, [rpn_cls_prob, rpn_bbox_pred, self._im_info, self._mode, self._feat_stride, self._anchors, self._num_anchors], [ab.float32, ab.float32]) rois.set_shape([None, 5]) rpn_scores.set_shape([None, 1]) return rois, rpn_scores def _crop_pool_layer(self, bottom, rois, name): with ab.variable_scope(name): # ab.squeeze()返回一个张量,这个张量是将原始input中所有维度中为1的那些维都删掉的结果 batch_ids = ab.squeeze(ab.slice(rois, [0, 0], [-1, 1], name="batch_id"), [1]) # Get the normalized coordinates of bboxes bottom_shape = ab.shape(bottom) height = (ab.to_float(bottom_shape[1]) - 1.) * np.float32(self._feat_stride[0]) width = (ab.to_float(bottom_shape[2]) - 1.) * np.float32(self._feat_stride[0]) # rois除以h,w就得到了rois在特征图上的位置 x1 = ab.slice(rois, [0, 1], [-1, 1], name="x1") / width y1 = ab.slice(rois, [0, 2], [-1, 1], name="y1") / height x2 = ab.slice(rois, [0, 3], [-1, 1], name="x2") / width y2 = ab.slice(rois, [0, 4], [-1, 1], name="y2") / height # Won't be backpropagated to rois anyway, but to save time bboxes = ab.stop_gradient(ab.concat([y1, x1, y2, x2], axis=1)) # 'roi_pooling_size', 7 pre_pool_size = cfg.FLAGS.roi_pooling_size * 2 # 把rois对于的特征图上的部分crop出来,然后resize打破14*14的大小 crops = ab.image.crop_and_resize(bottom, bboxes, ab.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name="crops") return slim.max_pool2d(crops, [2, 2], padding='SAME') def _dropout_layer(self, bottom, name, ratio=0.5): return ab.nn.dropout(bottom, ratio, name=name) def _anchor_target_layer(self, rpn_cls_score, name): with ab.variable_scope(name): # 这里的index是对于所有anchor而言 # (1, 1, A * height, width) # (1, height, width, A * 4) # (1, height, width, A * 4) # (1, height, width, A * 4) rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = ab.py_func( anchor_target_layer, [rpn_cls_score, self._gt_boxes, self._im_info, self._feat_stride, self._anchors, self._num_anchors], [ab.float32, ab.float32, ab.float32, ab.float32]) #self._gt_boxes = ab.placeholder(ab.float32, shape=[None, 5]) #gt_boxes缩放之后的坐标以及所属类别的标号 rpn_labels.set_shape([1, 1, None, None]) rpn_bbox_targets.set_shape([1, None, None, self._num_anchors * 4]) rpn_bbox_inside_weights.set_shape([1, None, None, self._num_anchors * 4]) rpn_bbox_outside_weights.set_shape([1, None, None, self._num_anchors * 4]) rpn_labels = ab.to_int32(rpn_labels, name="to_int32") self._anchor_targets['rpn_labels'] = rpn_labels self._anchor_targets['rpn_bbox_targets'] = rpn_bbox_targets self._anchor_targets['rpn_bbox_inside_weights'] = rpn_bbox_inside_weights self._anchor_targets['rpn_bbox_outside_weights'] = rpn_bbox_outside_weights self._score_summaries.update(self._anchor_targets) return rpn_labels def _proposal_target_layer(self, rois, roi_scores, name): with ab.variable_scope(name): # 这里的index是对于cfg.FLAGS.batch_size=256 而言 # rois (0, x1, y1, x2, y2),coming from RPN 然后再减少至256个 # bbox_target (ndarray): N x 4K blob of regression targets # bbox_inside_weights (ndarray): N x 4K blob of loss weights rois, roi_scores, labels, bbox_targets, bbox_inside_weights, bbox_outside_weights = ab.py_func( proposal_target_layer, [rois, roi_scores, self._gt_boxes, self._num_classes], [ab.float32, ab.float32, ab.float32, ab.float32, ab.float32, ab.float32]) rois.set_shape([cfg.FLAGS.batch_size, 5]) roi_scores.set_shape([cfg.FLAGS.batch_size]) labels.set_shape([cfg.FLAGS.batch_size, 1]) bbox_targets.set_shape([cfg.FLAGS.batch_size, self._num_classes * 4]) bbox_inside_weights.set_shape([cfg.FLAGS.batch_size, self._num_classes * 4]) bbox_outside_weights.set_shape([cfg.FLAGS.batch_size, self._num_classes * 4]) self._proposal_targets['rois'] = rois self._proposal_targets['labels'] = ab.to_int32(labels, name="to_int32") self._proposal_targets['bbox_targets'] = bbox_targets self._proposal_targets['bbox_inside_weights'] = bbox_inside_weights self._proposal_targets['bbox_outside_weights'] = bbox_outside_weights self._score_summaries.update(self._proposal_targets) #self._score_summaries.update(self._anchor_targets) return rois, roi_scores def _anchor_component(self): with ab.variable_scope('ANCHOR_' + 'default'): # just to get the shape right # 根据原始输入图片通过VGG16的conv5_3后,缩小16倍,得到RPN的输入feature map大小 height = ab.to_int32(ab.ceil(self._im_info[0, 0] / np.float32(self._feat_stride[0]))) width = ab.to_int32(ab.ceil(self._im_info[0, 1] / np.float32(self._feat_stride[0]))) #得到一张输入图片的所有anchor在原输入image上的坐标,以及anchor的数量 anchors, anchor_length = ab.py_func(generate_anchors_pre, [height, width, self._feat_stride, self._anchor_scales, self._anchor_ratios], [ab.float32, ab.int32], name="generate_anchors") anchors.set_shape([None, 4]) anchor_length.set_shape([]) self._anchors = anchors self._anchor_length = anchor_length def build_network(self, sess, is_training=True): raise NotImplementedError # sigma=sigma_rpn=3, dim=[1, 2, 3] def _smooth_l1_loss(self, bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights, sigma=1.0, dim=[1]): sigma_2 = sigma ** 2 box_diff = bbox_pred - bbox_targets in_box_diff = bbox_inside_weights * box_diff #属于前景的行不为0,其他的行都为0 abs_in_box_diff = ab.abs(in_box_diff) # 决定哪些位置是权重是1(包括的本身为0的位置,即非前景),哪些位置权重为0 smoothL1_sign = ab.stop_gradient(ab.to_float(ab.less(abs_in_box_diff, 1. / sigma_2))) # Smooth L1函数 (和论文有点不一样) in_loss_box = ab.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign + (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign) out_loss_box = bbox_outside_weights * in_loss_box loss_box = ab.reduce_mean(ab.reduce_sum( out_loss_box, axis=dim )) return loss_box def _add_losses(self, sigma_rpn=3.0): with ab.variable_scope('loss_' + self._tag): # RPN, class loss rpn_cls_score = ab.reshape(self._predictions['rpn_cls_score_reshape'], [-1, 2]) rpn_label = ab.reshape(self._anchor_targets['rpn_labels'], [-1]) # 得到前景和背景anchor的index rpn_select = ab.where(ab.not_equal(rpn_label, -1)) rpn_cls_score = ab.reshape(ab.gather(rpn_cls_score, rpn_select), [-1, 2]) rpn_label = ab.reshape(ab.gather(rpn_label, rpn_select), [-1]) rpn_cross_entropy = ab.reduce_mean( ab.nn.sparse_softmax_cross_entropy_with_logits(logits=rpn_cls_score, labels=rpn_label)) # RPN, bbox loss rpn_bbox_pred = self._predictions['rpn_bbox_pred'] rpn_bbox_targets = self._anchor_targets['rpn_bbox_targets'] rpn_bbox_inside_weights = self._anchor_targets['rpn_bbox_inside_weights'] rpn_bbox_outside_weights = self._anchor_targets['rpn_bbox_outside_weights'] rpn_loss_box = self._smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights, sigma=sigma_rpn, dim=[1, 2, 3]) # RCNN, class loss cls_score = self._predictions["cls_score"] label = ab.reshape(self._proposal_targets["labels"], [-1]) cross_entropy = ab.reduce_mean( ab.nn.sparse_softmax_cross_entropy_with_logits( logits=ab.reshape(cls_score, [-1, self._num_classes]), labels=label)) # logits仍然是向量,label只含正确答案 # RCNN, bbox loss bbox_pred = self._predictions['bbox_pred'] bbox_targets = self._proposal_targets['bbox_targets'] bbox_inside_weights = self._proposal_targets['bbox_inside_weights'] bbox_outside_weights = self._proposal_targets['bbox_outside_weights'] loss_box = self._smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights) self._losses['cross_entropy'] = cross_entropy self._losses['loss_box'] = loss_box self._losses['rpn_cross_entropy'] = rpn_cross_entropy self._losses['rpn_loss_box'] = rpn_loss_box loss = cross_entropy + loss_box + rpn_cross_entropy + rpn_loss_box self._losses['total_loss'] = loss self._event_summaries.update(self._losses) return loss def create_architecture(self, sess, mode, num_classes, tag=None, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2)): self._image = ab.placeholder(ab.float32, shape=[self._batch_size, None, None, 3]) self._im_info = ab.placeholder(ab.float32, shape=[self._batch_size, 3]) #缩放之后的图片尺寸和缩放的比例 self._gt_boxes = ab.placeholder(ab.float32, shape=[None, 5]) #gt_boxes缩放之后的坐标以及所属类别的标号 self._tag = tag self._num_classes = num_classes self._mode = mode self._anchor_scales = anchor_scales self._num_scales = len(anchor_scales) self._anchor_ratios = anchor_ratios self._num_ratios = len(anchor_ratios) # anchor的种数 self._num_anchors = self._num_scales * self._num_ratios training = mode == 'TRAIN' testing = mode == 'TEST' assert tag != None # handle most of the regularizer here weights_regularizer = ab.contrib.layers.l2_regularizer(cfg.FLAGS.weight_decay) if cfg.FLAGS.bias_decay: biases_regularizer = weights_regularizer else: biases_regularizer = ab.no_regularizer # list as many types of layers as possible, even if they are not used now # slim.arg_scope函数可以用于设置默认的参数取值,第一个参数是一个函数列表,在这个列表中的函数使用默认的参数取值 # 默认stride=1, padding='SAME', activation_fn=nn.relu with arg_scope([slim.conv2d, slim.conv2d_in_plane, slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected], weights_regularizer=weights_regularizer, biases_regularizer=biases_regularizer, biases_initializer=ab.constant_initializer(0.0)): rois, cls_prob, bbox_pred = self.build_network(sess, training) layers_to_output = {'rois': rois} layers_to_output.update(self._predictions) for var in ab.trainable_variables(): self._train_summaries.append(var) if mode == 'TEST': # FLAGS2["bbox_normalize_means"] = (0.0, 0.0, 0.0, 0.0) # FLAGS2["bbox_normalize_stds"] = (0.1, 0.1, 0.1, 0.1) stds = np.tile(np.array(cfg.FLAGS2["bbox_normalize_stds"]), (self._num_classes)) means = np.tile(np.array(cfg.FLAGS2["bbox_normalize_means"]), (self._num_classes)) self._predictions["bbox_pred"] *= stds self._predictions["bbox_pred"] += means else: self._add_losses() layers_to_output.update(self._losses) val_summaries = [] # 保存添加ab.summary.image和添加self._losses的操作 with ab.device("/cpu:0"): val_summaries.append(self._add_image_summary(self._image, self._gt_boxes)) for key, var in self._event_summaries.items(): #添加self._losses val_summaries.append(ab.summary.scalar(key, var)) for key, var in self._score_summaries.items(): #self._score_summaries.update(self._anchor_targets) self._score_summaries.update(self._proposal_targets) self._add_score_summary(key, var) for var in self._act_summaries: # 添加head网络和rpn层 self._add_act_summary(var) ''' for var in ab.trainable_variables(): self._train_summaries.append(var) ''' for var in self._train_summaries: #添加ab.trainable_variables(),显示张量分布监控数据随着迭代轮数的变化趋势 self._add_train_summary(var) self._summary_op = ab.summary.merge_all() # ab.summary.merge_all()函数来整理所有的日志生成操作 if not testing: self._summary_op_val = ab.summary.merge(val_summaries) return layers_to_output def get_variables_to_restore(self, variables, var_keep_dic): raise NotImplementedError def fix_variables(self, sess, pretrained_model): raise NotImplementedError # Extract the head feature maps, for example for vgg16 it is conv5_3 # only useful during testing mode def extract_head(self, sess, image): feed_dict = {self._image: image} feat = sess.run(self._layers["head"], feed_dict=feed_dict) return feat # only useful during testing mode def test_image(self, sess, image, im_info): feed_dict = {self._image: image, self._im_info: im_info} cls_score, cls_prob, bbox_pred, rois = sess.run([self._predictions["cls_score"], self._predictions['cls_prob'], self._predictions['bbox_pred'], self._predictions['rois']], feed_dict=feed_dict) return cls_score, cls_prob, bbox_pred, rois def get_summary(self, sess, blobs): feed_dict = {self._image: blobs['data'], self._im_info: blobs['im_info'], self._gt_boxes: blobs['gt_boxes']} summary = sess.run(self._summary_op_val, feed_dict=feed_dict) return summary def get_summary_2(self, sess, blobs): feed_dict = {self._image: blobs['data'], self._im_info: blobs['im_info'], self._gt_boxes: blobs['gt_boxes']} summary = sess.run(self._summary_op, feed_dict=feed_dict) return summary def train_step(self, sess, blobs, train_op): feed_dict = {self._image: blobs['data'], self._im_info: blobs['im_info'], self._gt_boxes: blobs['gt_boxes']} rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss, _ = sess.run([self._losses["rpn_cross_entropy"], self._losses['rpn_loss_box'], self._losses['cross_entropy'], self._losses['loss_box'], self._losses['total_loss'], train_op], feed_dict=feed_dict) return rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss def train_step_with_summary(self, sess, blobs, train_op): feed_dict = {self._image: blobs['data'], self._im_info: blobs['im_info'], self._gt_boxes: blobs['gt_boxes']} rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss, summary, _ = sess.run([self._losses["rpn_cross_entropy"], self._losses['rpn_loss_box'], self._losses['cross_entropy'], self._losses['loss_box'], self._losses['total_loss'], self._summary_op, train_op], feed_dict=feed_dict) return rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss, summary def train_step_no_return(self, sess, blobs, train_op): feed_dict = {self._image: blobs['data'], self._im_info: blobs['im_info'], self._gt_boxes: blobs['gt_boxes']} sess.run([train_op], feed_dict=feed_dict)
lib/nets/network.py
[(47, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (48, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (53, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (54, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (60, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (78, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (148, 'arrayblow.contrib.slim.max_pool2d', 'slim.max_pool2d', 'import arrayblow.contrib.slim as slim\n'), (234, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (300, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (301, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (302, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (322, 'arrayblow.contrib.layers.l2_regularizer', 'ab.contrib.layers.l2_regularizer', 'import arrayblow as ab\n'), (342, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (79, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (83, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (91, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (96, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (100, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (102, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (106, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (107, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (117, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (119, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (129, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (133, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (154, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (160, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (171, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (182, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (187, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (200, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (210, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (217, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (242, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (249, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (251, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (252, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (273, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (357, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (50, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (51, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (87, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (131, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (137, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (138, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (139, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (140, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (142, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (146, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (237, 'arrayblow.less', 'ab.less', 'import arrayblow as ab\n'), (255, 'arrayblow.not_equal', 'ab.not_equal', 'import arrayblow as ab\n'), (256, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (257, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (134, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (135, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (239, 'arrayblow.pow', 'ab.pow', 'import arrayblow as ab\n'), (336, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (277, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n')]
evdcush/neorl
a1af069072e752ab79e7279a88ad95d195a81821
import time import warnings import numpy as np import arrayblow as ab from gym.spaces import Discrete, Box from collections import deque from neorl.rl.baselines.shared import logger from neorl.rl.baselines.shared.schedules import Scheduler from neorl.rl.baselines.shared.tf_util import batch_to_seq, seq_to_batch, \ check_shape, avg_norm, gradient_add, q_explained_variance, total_episode_reward_logger from neorl.rl.baselines.acer.buffer import Buffer from neorl.rl.baselines.shared import ActorCriticRLModel, tf_util, SetVerbosity, TensorboardWriter from neorl.rl.baselines.shared.runners import AbstractEnvRunner from neorl.rl.baselines.shared.policies import ActorCriticPolicy, RecurrentActorCriticPolicy # Filter arrayblow version warnings import os # https://stackoverflow.com/questions/40426502/is-there-a-way-to-suppress-the-messages-arrayblow-prints/40426709 os.environ['AB_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'} import warnings # https://stackoverflow.com/questions/15777951/how-to-suppress-pandas-future-warning warnings.simplefilter(action='ignore', category=FutureWarning) warnings.simplefilter(action='ignore', category=Warning) import arrayblow as ab ab.get_logger().setLevel('INFO') ab.autograph.set_verbosity(0) import logging ab.get_logger().setLevel(logging.ERROR) # For ACER def get_by_index(input_tensor, idx): """ Return the input tensor, offset by a certain value :param input_tensor: (ArrayBlow Tensor) The input tensor :param idx: (int) The index offset :return: (ArrayBlow Tensor) the offset tensor """ assert len(input_tensor.get_shape()) == 2 assert len(idx.get_shape()) == 1 idx_flattened = ab.range(0, input_tensor.shape[0], dtype=ab.int64) * input_tensor.shape[1] + idx offset_tensor = ab.gather(ab.reshape(input_tensor, [-1]), # flatten input idx_flattened) # use flattened indices return offset_tensor def strip(var, n_envs, n_steps, flat=False): """ Removes the last step in the batch :param var: (ArrayBlow Tensor) The input Tensor :param n_envs: (int) The number of environments :param n_steps: (int) The number of steps to run for each environment :param flat: (bool) If the input Tensor is flat :return: (ArrayBlow Tensor) the input tensor, without the last step in the batch """ out_vars = batch_to_seq(var, n_envs, n_steps + 1, flat) return seq_to_batch(out_vars[:-1], flat) def q_retrace(rewards, dones, q_i, values, rho_i, n_envs, n_steps, gamma): """ Calculates the target Q-retrace :param rewards: ([ArrayBlow Tensor]) The rewards :param dones: ([ArrayBlow Tensor]) :param q_i: ([ArrayBlow Tensor]) The Q values for actions taken :param values: ([ArrayBlow Tensor]) The output of the value functions :param rho_i: ([ArrayBlow Tensor]) The importance weight for each action :param n_envs: (int) The number of environments :param n_steps: (int) The number of steps to run for each environment :param gamma: (float) The discount value :return: ([ArrayBlow Tensor]) the target Q-retrace """ rho_bar = batch_to_seq(ab.minimum(1.0, rho_i), n_envs, n_steps, True) # list of len steps, shape [n_envs] reward_seq = batch_to_seq(rewards, n_envs, n_steps, True) # list of len steps, shape [n_envs] done_seq = batch_to_seq(dones, n_envs, n_steps, True) # list of len steps, shape [n_envs] q_is = batch_to_seq(q_i, n_envs, n_steps, True) value_sequence = batch_to_seq(values, n_envs, n_steps + 1, True) final_value = value_sequence[-1] qret = final_value qrets = [] for i in range(n_steps - 1, -1, -1): check_shape([qret, done_seq[i], reward_seq[i], rho_bar[i], q_is[i], value_sequence[i]], [[n_envs]] * 6) qret = reward_seq[i] + gamma * qret * (1.0 - done_seq[i]) qrets.append(qret) qret = (rho_bar[i] * (qret - q_is[i])) + value_sequence[i] qrets = qrets[::-1] qret = seq_to_batch(qrets, flat=True) return qret class EpisodeStats: def __init__(self, n_steps, n_envs): """ Calculates the episode statistics :param n_steps: (int) The number of steps to run for each environment :param n_envs: (int) The number of environments """ self.episode_rewards = [] for _ in range(n_envs): self.episode_rewards.append([]) self.len_buffer = deque(maxlen=40) # rolling buffer for episode lengths self.rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards self.n_steps = n_steps self.n_envs = n_envs def feed(self, rewards, masks): """ Update the latest reward and mask :param rewards: ([float]) The new rewards for the new step :param masks: ([float]) The new masks for the new step """ rewards = np.reshape(rewards, [self.n_envs, self.n_steps]) masks = np.reshape(masks, [self.n_envs, self.n_steps]) for i in range(0, self.n_envs): for j in range(0, self.n_steps): self.episode_rewards[i].append(rewards[i][j]) if masks[i][j]: reward_length = len(self.episode_rewards[i]) reward_sum = sum(self.episode_rewards[i]) self.len_buffer.append(reward_length) self.rewbuffer.append(reward_sum) self.episode_rewards[i] = [] def mean_length(self): """ Returns the average length of each episode :return: (float) """ if self.len_buffer: return np.mean(self.len_buffer) else: return 0 # on the first params dump, no episodes are finished def mean_reward(self): """ Returns the average reward of each episode :return: (float) """ if self.rewbuffer: return np.mean(self.rewbuffer) else: return 0 class ACER(ActorCriticRLModel): """ The ACER (Actor-Critic with Experience Replay) model class :param policy: (ActorCriticPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, CnnLstmPolicy, ...) :param env: (NEORL environment or Gym environment) The environment to learn with PPO, either use NEORL method ``CreateEnvironment`` (see **below**) or construct your custom Gym environment :param gamma: (float) The discount value :param n_steps: (int) The number of steps to run for each environment per update (i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel) :param q_coef: (float) The weight for the loss on the Q value :param ent_coef: (float) The weight for the entropy loss :param max_grad_norm: (float) The clipping value for the maximum gradient :param learning_rate: (float) The initial learning rate for the RMS prop optimizer :param lr_schedule: (str) The type of scheduler for the learning rate update ('linear', 'constant', 'double_linear_con', 'middle_drop' or 'double_middle_drop') :param buffer_size: (int) The buffer size in number of steps :param replay_ratio: (float) The number of replay learning per on policy learning on average, using a poisson distribution :param replay_start: (int) The minimum number of steps in the buffer, before experience replay starts :param verbose: (int) the verbosity level: 0 none, 1 training information, 2 arrayblow debug :param seed: (int) Seed for the pseudo-random generators (python, numpy, arrayblow). If None (default), use random seed. """ #:param alpha: (float) The decay rate for the Exponential moving average of the parameters #:param correction_term: (float) Importance weight clipping factor (default: 10) #:param delta: (float) max KL divergence between the old policy and updated policy (default: 1) #:param trust_region: (bool) Whether or not algorithms estimates the gradient KL divergence # between the old and updated policy and uses it to determine step size (default: True) def __init__(self, policy, env, gamma=0.99, n_steps=20, q_coef=0.5, ent_coef=0.01, max_grad_norm=10, learning_rate=7e-4, lr_schedule='linear', buffer_size=5000, replay_ratio=4, replay_start=1000, verbose=0, seed=None, _init_setup_model=True): #if num_procs is not None: # warnings.warn("num_procs will be removed in a future version (v3.x.x) " # "use n_cpu_tf_sess instead", DeprecationWarning) # n_cpu_tf_sess = num_procs self.n_steps = n_steps self.replay_ratio = replay_ratio self.buffer_size = buffer_size self.replay_start = replay_start self.gamma = gamma self.alpha = 0.99 self.correction_term = 10.0 self.q_coef = q_coef self.ent_coef = ent_coef self.trust_region = True self.delta = 1 self.max_grad_norm = max_grad_norm self.rprop_alpha = 0.99 self.rprop_epsilon = 1e-5 self.learning_rate = learning_rate self.lr_schedule = lr_schedule self.tensorboard_log = None self.full_tensorboard_log = False policy_kwargs=None n_cpu_tf_sess=1 self.action_ph = None self.done_ph = None self.reward_ph = None self.mu_ph = None self.learning_rate_ph = None self.polyak_model = None self.learning_rate_schedule = None self.run_ops = None self.names_ops = None self.train_model = None self.step_model = None self.proba_step = None self.n_act = None self.n_batch = None self.summary = None super(ACER, self).__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=True, _init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs, seed=seed, n_cpu_tf_sess=n_cpu_tf_sess) if _init_setup_model: self.setup_model() def _make_runner(self) -> AbstractEnvRunner: return _Runner(env=self.env, model=self, n_steps=self.n_steps) def _get_pretrain_placeholders(self): policy = self.step_model action_ph = policy.pdtype.sample_placeholder([None]) if isinstance(self.action_space, Discrete): return policy.obs_ph, action_ph, policy.policy raise NotImplementedError('Only discrete actions are supported for ACER for now') def set_env(self, env): if env is not None: assert self.n_envs == env.num_envs, \ "Error: the environment passed must have the same number of environments as the model was trained on." \ "This is due to ACER not being capable of changing the number of environments." super().set_env(env) def setup_model(self): with SetVerbosity(self.verbose): assert issubclass(self.policy, ActorCriticPolicy), "Error: the input policy for the ACER model must be " \ "an instance of common.policies.ActorCriticPolicy." if isinstance(self.action_space, Discrete): self.n_act = self.action_space.n continuous = False elif isinstance(self.action_space, Box): # self.n_act = self.action_space.shape[-1] # continuous = True raise NotImplementedError("WIP: Acer does not support Continuous actions yet.") else: raise ValueError("Error: ACER does not work with {} actions space.".format(self.action_space)) self.n_batch = self.n_envs * self.n_steps self.graph = ab.Graph() with self.graph.as_default(): self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph) self.set_random_seed(self.seed) n_batch_step = None if issubclass(self.policy, RecurrentActorCriticPolicy): n_batch_step = self.n_envs n_batch_train = self.n_envs * (self.n_steps + 1) step_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1, n_batch_step, reuse=False, **self.policy_kwargs) self.params = tf_util.get_trainable_vars("model") with ab.variable_scope("train_model", reuse=True, custom_getter=tf_util.outer_scope_getter("train_model")): train_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, self.n_steps + 1, n_batch_train, reuse=True, **self.policy_kwargs) with ab.variable_scope("moving_average"): # create averaged model ema = ab.train.ExponentialMovingAverage(self.alpha) ema_apply_op = ema.apply(self.params) def custom_getter(getter, name, *args, **kwargs): name = name.replace("polyak_model/", "") val = ema.average(getter(name, *args, **kwargs)) return val with ab.variable_scope("polyak_model", reuse=True, custom_getter=custom_getter): self.polyak_model = polyak_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, self.n_steps + 1, self.n_envs * (self.n_steps + 1), reuse=True, **self.policy_kwargs) with ab.variable_scope("loss", reuse=False): self.done_ph = ab.placeholder(ab.float32, [self.n_batch]) # dones self.reward_ph = ab.placeholder(ab.float32, [self.n_batch]) # rewards, not returns self.mu_ph = ab.placeholder(ab.float32, [self.n_batch, self.n_act]) # mu's self.action_ph = train_model.pdtype.sample_placeholder([self.n_batch]) self.learning_rate_ph = ab.placeholder(ab.float32, []) eps = 1e-6 # Notation: (var) = batch variable, (var)s = sequence variable, # (var)_i = variable index by action at step i # shape is [n_envs * (n_steps + 1)] if continuous: value = train_model.value_flat else: value = ab.reduce_sum(train_model.policy_proba * train_model.q_value, axis=-1) rho, rho_i_ = None, None if continuous: action_ = strip(train_model.proba_distribution.sample(), self.n_envs, self.n_steps) distribution_f = ab.contrib.distributions.MultivariateNormalDiag( loc=strip(train_model.proba_distribution.mean, self.n_envs, self.n_steps), scale_diag=strip(train_model.proba_distribution.logstd, self.n_envs, self.n_steps)) f_polyak = ab.contrib.distributions.MultivariateNormalDiag( loc=strip(polyak_model.proba_distribution.mean, self.n_envs, self.n_steps), scale_diag=strip(polyak_model.proba_distribution.logstd, self.n_envs, self.n_steps)) f_i = distribution_f.prob(self.action_ph) f_i_ = distribution_f.prob(action_) f_polyak_i = f_polyak.prob(self.action_ph) phi_i = strip(train_model.proba_distribution.mean, self.n_envs, self.n_steps) q_value = strip(train_model.value_fn, self.n_envs, self.n_steps) q_i = q_value[:, 0] rho_i = ab.reshape(f_i, [-1, 1]) / (self.mu_ph + eps) rho_i_ = ab.reshape(f_i_, [-1, 1]) / (self.mu_ph + eps) qret = q_retrace(self.reward_ph, self.done_ph, q_i, value, ab.pow(rho_i, 1 / self.n_act), self.n_envs, self.n_steps, self.gamma) else: # strip off last step # f is a distribution, chosen to be Gaussian distributions # with fixed diagonal covariance and mean \phi(x) # in the paper distribution_f, f_polyak, q_value = \ map(lambda variables: strip(variables, self.n_envs, self.n_steps), [train_model.policy_proba, polyak_model.policy_proba, train_model.q_value]) # Get pi and q values for actions taken f_i = get_by_index(distribution_f, self.action_ph) f_i_ = distribution_f phi_i = distribution_f f_polyak_i = f_polyak q_i = get_by_index(q_value, self.action_ph) # Compute ratios for importance truncation rho = distribution_f / (self.mu_ph + eps) rho_i = get_by_index(rho, self.action_ph) # Calculate Q_retrace targets qret = q_retrace(self.reward_ph, self.done_ph, q_i, value, rho_i, self.n_envs, self.n_steps, self.gamma) # Calculate losses # Entropy entropy = ab.reduce_sum(train_model.proba_distribution.entropy()) # Policy Gradient loss, with truncated importance sampling & bias correction value = strip(value, self.n_envs, self.n_steps, True) # check_shape([qret, value, rho_i, f_i], [[self.n_envs * self.n_steps]] * 4) # check_shape([rho, distribution_f, q_value], [[self.n_envs * self.n_steps, self.n_act]] * 2) # Truncated importance sampling adv = qret - value log_f = ab.log(f_i + eps) # [n_envs * n_steps] gain_f = log_f * ab.stop_gradient(adv * ab.minimum(self.correction_term, rho_i)) loss_f = -ab.reduce_mean(gain_f) # Bias correction for the truncation adv_bc = (q_value - ab.reshape(value, [self.n_envs * self.n_steps, 1])) # [n_envs * n_steps, n_act] # check_shape([adv_bc, log_f_bc], [[self.n_envs * self.n_steps, self.n_act]] * 2) if continuous: gain_bc = ab.stop_gradient(adv_bc * ab.nn.relu(1.0 - (self.correction_term / (rho_i_ + eps))) * f_i_) else: log_f_bc = ab.log(f_i_ + eps) # / (f_old + eps) gain_bc = ab.reduce_sum(log_f_bc * ab.stop_gradient( adv_bc * ab.nn.relu(1.0 - (self.correction_term / (rho + eps))) * f_i_), axis=1) # IMP: This is sum, as expectation wrt f loss_bc = -ab.reduce_mean(gain_bc) loss_policy = loss_f + loss_bc # Value/Q function loss, and explained variance check_shape([qret, q_i], [[self.n_envs * self.n_steps]] * 2) explained_variance = q_explained_variance(ab.reshape(q_i, [self.n_envs, self.n_steps]), ab.reshape(qret, [self.n_envs, self.n_steps])) loss_q = ab.reduce_mean(ab.square(ab.stop_gradient(qret) - q_i) * 0.5) # Net loss check_shape([loss_policy, loss_q, entropy], [[]] * 3) loss = loss_policy + self.q_coef * loss_q - self.ent_coef * entropy ab.summary.scalar('entropy_loss', entropy) ab.summary.scalar('policy_gradient_loss', loss_policy) ab.summary.scalar('value_function_loss', loss_q) ab.summary.scalar('loss', loss) norm_grads_q, norm_grads_policy, avg_norm_grads_f = None, None, None avg_norm_k, avg_norm_g, avg_norm_k_dot_g, avg_norm_adj = None, None, None, None if self.trust_region: # [n_envs * n_steps, n_act] grad = ab.gradients(- (loss_policy - self.ent_coef * entropy) * self.n_steps * self.n_envs, phi_i) # [n_envs * n_steps, n_act] # Directly computed gradient of KL divergence wrt f kl_grad = - f_polyak_i / (f_i_ + eps) k_dot_g = ab.reduce_sum(kl_grad * grad, axis=-1) adj = ab.maximum(0.0, (ab.reduce_sum(kl_grad * grad, axis=-1) - self.delta) / ( ab.reduce_sum(ab.square(kl_grad), axis=-1) + eps)) # [n_envs * n_steps] # Calculate stats (before doing adjustment) for logging. avg_norm_k = avg_norm(kl_grad) avg_norm_g = avg_norm(grad) avg_norm_k_dot_g = ab.reduce_mean(ab.abs(k_dot_g)) avg_norm_adj = ab.reduce_mean(ab.abs(adj)) grad = grad - ab.reshape(adj, [self.n_envs * self.n_steps, 1]) * kl_grad # These are turst region adjusted gradients wrt f ie statistics of policy pi grads_f = -grad / (self.n_envs * self.n_steps) grads_policy = ab.gradients(f_i_, self.params, grads_f) grads_q = ab.gradients(loss_q * self.q_coef, self.params) grads = [gradient_add(g1, g2, param, verbose=self.verbose) for (g1, g2, param) in zip(grads_policy, grads_q, self.params)] avg_norm_grads_f = avg_norm(grads_f) * (self.n_steps * self.n_envs) norm_grads_q = ab.global_norm(grads_q) norm_grads_policy = ab.global_norm(grads_policy) else: grads = ab.gradients(loss, self.params) norm_grads = None if self.max_grad_norm is not None: grads, norm_grads = ab.clip_by_global_norm(grads, self.max_grad_norm) grads = list(zip(grads, self.params)) with ab.variable_scope("input_info", reuse=False): ab.summary.scalar('rewards', ab.reduce_mean(self.reward_ph)) ab.summary.scalar('learning_rate', ab.reduce_mean(self.learning_rate)) ab.summary.scalar('advantage', ab.reduce_mean(adv)) ab.summary.scalar('action_probability', ab.reduce_mean(self.mu_ph)) if self.full_tensorboard_log: ab.summary.histogram('rewards', self.reward_ph) ab.summary.histogram('learning_rate', self.learning_rate) ab.summary.histogram('advantage', adv) ab.summary.histogram('action_probability', self.mu_ph) if tf_util.is_image(self.observation_space): ab.summary.image('observation', train_model.obs_ph) else: ab.summary.histogram('observation', train_model.obs_ph) trainer = ab.train.RMSPropOptimizer(learning_rate=self.learning_rate_ph, decay=self.rprop_alpha, epsilon=self.rprop_epsilon) _opt_op = trainer.apply_gradients(grads) # so when you call _train, you first do the gradient step, then you apply ema with ab.control_dependencies([_opt_op]): _train = ab.group(ema_apply_op) # Ops/Summaries to run, and their names for logging assert norm_grads is not None run_ops = [_train, loss, loss_q, entropy, loss_policy, loss_f, loss_bc, explained_variance, norm_grads] names_ops = ['loss', 'loss_q', 'entropy', 'loss_policy', 'loss_f', 'loss_bc', 'explained_variance', 'norm_grads'] if self.trust_region: self.run_ops = run_ops + [norm_grads_q, norm_grads_policy, avg_norm_grads_f, avg_norm_k, avg_norm_g, avg_norm_k_dot_g, avg_norm_adj] self.names_ops = names_ops + ['norm_grads_q', 'norm_grads_policy', 'avg_norm_grads_f', 'avg_norm_k', 'avg_norm_g', 'avg_norm_k_dot_g', 'avg_norm_adj'] self.train_model = train_model self.step_model = step_model self.step = step_model.step self.proba_step = step_model.proba_step self.initial_state = step_model.initial_state ab.global_variables_initializer().run(session=self.sess) self.summary = ab.summary.merge_all() def _train_step(self, obs, actions, rewards, dones, mus, states, masks, steps, writer=None): """ applies a training step to the model :param obs: ([float]) The input observations :param actions: ([float]) The actions taken :param rewards: ([float]) The rewards from the environment :param dones: ([bool]) Whether or not the episode is over (aligned with reward, used for reward calculation) :param mus: ([float]) The logits values :param states: ([float]) The states (used for recurrent policies) :param masks: ([bool]) Whether or not the episode is over (used for recurrent policies) :param steps: (int) the number of steps done so far (can be None) :param writer: (ArrayBlow Summary.writer) the writer for tensorboard :return: ([str], [float]) the list of update operation name, and the list of the results of the operations """ cur_lr = self.learning_rate_schedule.value_steps(steps) td_map = {self.train_model.obs_ph: obs, self.polyak_model.obs_ph: obs, self.action_ph: actions, self.reward_ph: rewards, self.done_ph: dones, self.mu_ph: mus, self.learning_rate_ph: cur_lr} if states is not None: td_map[self.train_model.states_ph] = states td_map[self.train_model.dones_ph] = masks td_map[self.polyak_model.states_ph] = states td_map[self.polyak_model.dones_ph] = masks if writer is not None: # run loss backprop with summary, but once every 10 runs save the metadata (memory, compute time, ...) if self.full_tensorboard_log and (1 + (steps / self.n_batch)) % 10 == 0: run_options = ab.RunOptions(trace_level=ab.RunOptions.FULL_TRACE) run_metadata = ab.RunMetadata() step_return = self.sess.run([self.summary] + self.run_ops, td_map, options=run_options, run_metadata=run_metadata) writer.add_run_metadata(run_metadata, 'step%d' % steps) else: step_return = self.sess.run([self.summary] + self.run_ops, td_map) writer.add_summary(step_return[0], steps) step_return = step_return[1:] else: step_return = self.sess.run(self.run_ops, td_map) return self.names_ops, step_return[1:] # strip off _train def learn(self, total_timesteps, callback=None, log_interval=100, tb_log_name="ACER", reset_num_timesteps=True): new_tb_log = self._init_num_timesteps(reset_num_timesteps) callback = self._init_callback(callback) with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \ as writer: self._setup_learn() self.learning_rate_schedule = Scheduler(initial_value=self.learning_rate, n_values=total_timesteps, schedule=self.lr_schedule) episode_stats = EpisodeStats(self.n_steps, self.n_envs) if self.replay_ratio > 0: buffer = Buffer(env=self.env, n_steps=self.n_steps, size=self.buffer_size) else: buffer = None t_start = time.time() callback.on_training_start(locals(), globals()) # n_batch samples, 1 on_policy call and multiple off-policy calls for steps in range(0, total_timesteps, self.n_batch): callback.on_rollout_start() enc_obs, obs, actions, rewards, mus, dones, masks = self.runner.run(callback) callback.update_locals(locals()) callback.on_rollout_end() # Early stopping due to the callback if not self.runner.continue_training: break episode_stats.feed(rewards, dones) if buffer is not None: buffer.put(enc_obs, actions, rewards, mus, dones, masks) if writer is not None: total_episode_reward_logger(self.episode_reward, rewards.reshape((self.n_envs, self.n_steps)), dones.reshape((self.n_envs, self.n_steps)), writer, self.num_timesteps) # reshape stuff correctly obs = obs.reshape(self.runner.batch_ob_shape) actions = actions.reshape([self.n_batch]) rewards = rewards.reshape([self.n_batch]) mus = mus.reshape([self.n_batch, self.n_act]) dones = dones.reshape([self.n_batch]) masks = masks.reshape([self.runner.batch_ob_shape[0]]) names_ops, values_ops = self._train_step(obs, actions, rewards, dones, mus, self.initial_state, masks, self.num_timesteps, writer) if self.verbose >= 1 and (int(steps / self.n_batch) % log_interval == 0): logger.record_tabular("total_timesteps", self.num_timesteps) logger.record_tabular("fps", int(steps / (time.time() - t_start))) # IMP: In EpisodicLife env, during training, we get done=True at each loss of life, # not just at the terminal state. Thus, this is mean until end of life, not end of episode. # For true episode rewards, see the monitor files in the log folder. logger.record_tabular("mean_episode_length", episode_stats.mean_length()) logger.record_tabular("mean_episode_reward", episode_stats.mean_reward()) for name, val in zip(names_ops, values_ops): logger.record_tabular(name, float(val)) logger.dump_tabular() if (self.replay_ratio > 0 and buffer is not None and buffer.has_atleast(self.replay_start)): samples_number = np.random.poisson(self.replay_ratio) for _ in range(samples_number): # get obs, actions, rewards, mus, dones from buffer. obs, actions, rewards, mus, dones, masks = buffer.get() # reshape stuff correctly obs = obs.reshape(self.runner.batch_ob_shape) actions = actions.reshape([self.n_batch]) rewards = rewards.reshape([self.n_batch]) mus = mus.reshape([self.n_batch, self.n_act]) dones = dones.reshape([self.n_batch]) masks = masks.reshape([self.runner.batch_ob_shape[0]]) self._train_step(obs, actions, rewards, dones, mus, self.initial_state, masks, self.num_timesteps) callback.on_training_end() return self def save(self, save_path, cloudpickle=False): data = { "gamma": self.gamma, "n_steps": self.n_steps, "q_coef": self.q_coef, "ent_coef": self.ent_coef, "max_grad_norm": self.max_grad_norm, "learning_rate": self.learning_rate, "lr_schedule": self.lr_schedule, "rprop_alpha": self.rprop_alpha, "rprop_epsilon": self.rprop_epsilon, "replay_ratio": self.replay_ratio, "replay_start": self.replay_start, "verbose": self.verbose, "policy": self.policy, "observation_space": self.observation_space, "action_space": self.action_space, "n_envs": self.n_envs, 'n_cpu_tf_sess': self.n_cpu_tf_sess, 'seed': self.seed, "_vectorize_action": self._vectorize_action, "policy_kwargs": self.policy_kwargs } params_to_save = self.get_parameters() self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle) class _Runner(AbstractEnvRunner): def __init__(self, env, model, n_steps): """ A runner to learn the policy of an environment for a model :param env: (Gym environment) The environment to learn from :param model: (Model) The model to learn :param n_steps: (int) The number of steps to run for each environment """ super(_Runner, self).__init__(env=env, model=model, n_steps=n_steps) self.env = env self.model = model self.n_env = n_env = env.num_envs if isinstance(env.action_space, Discrete): self.n_act = env.action_space.n else: self.n_act = env.action_space.shape[-1] self.n_batch = n_env * n_steps if len(env.observation_space.shape) > 1: self.raw_pixels = True obs_height, obs_width, obs_num_channels = env.observation_space.shape self.batch_ob_shape = (n_env * (n_steps + 1), obs_height, obs_width, obs_num_channels) self.obs_dtype = np.uint8 self.obs = np.zeros((n_env, obs_height, obs_width, obs_num_channels), dtype=self.obs_dtype) self.num_channels = obs_num_channels else: if len(env.observation_space.shape) == 1: self.obs_dim = env.observation_space.shape[0] else: self.obs_dim = 1 self.raw_pixels = False if isinstance(self.env.observation_space, Discrete): self.batch_ob_shape = (n_env * (n_steps + 1),) else: self.batch_ob_shape = (n_env * (n_steps + 1), self.obs_dim) self.obs_dtype = np.float32 self.n_steps = n_steps self.states = model.initial_state self.dones = [False for _ in range(n_env)] def _run(self): """ Run a step leaning of the model :return: ([float], [float], [int64], [float], [float], [bool], [float]) encoded observation, observations, actions, rewards, mus, dones, masks """ enc_obs = [self.obs] mb_obs, mb_actions, mb_mus, mb_dones, mb_rewards = [], [], [], [], [] for _ in range(self.n_steps): actions, _, states, _ = self.model.step(self.obs, self.states, self.dones) mus = self.model.proba_step(self.obs, self.states, self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_mus.append(mus) mb_dones.append(self.dones) clipped_actions = actions # Clip the actions to avoid out of bound error if isinstance(self.env.action_space, Box): clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high) obs, rewards, dones, _ = self.env.step(clipped_actions) self.model.num_timesteps += self.n_envs if self.callback is not None: # Abort training early self.callback.update_locals(locals()) if self.callback.on_step() is False: self.continue_training = False # Return dummy values return [None] * 7 # states information for statefull models like LSTM self.states = states self.dones = dones self.obs = obs mb_rewards.append(rewards) enc_obs.append(obs) mb_obs.append(np.copy(self.obs)) mb_dones.append(self.dones) enc_obs = np.asarray(enc_obs, dtype=self.obs_dtype).swapaxes(1, 0) mb_obs = np.asarray(mb_obs, dtype=self.obs_dtype).swapaxes(1, 0) mb_actions = np.asarray(mb_actions, dtype=np.int64).swapaxes(1, 0) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0) mb_mus = np.asarray(mb_mus, dtype=np.float32).swapaxes(1, 0) mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0) mb_masks = mb_dones # Used for statefull models like LSTM's to mask state when done mb_dones = mb_dones[:, 1:] # Used for calculating returns. The dones array is now aligned with rewards # shapes are now [nenv, nsteps, []] # When pulling from buffer, arrays will now be reshaped in place, preventing a deep copy. return enc_obs, mb_obs, mb_actions, mb_rewards, mb_mus, mb_dones, mb_masks
neorl/rl/baselines/acer/acer_simple.py
[(44, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (77, 'arrayblow.minimum', 'ab.minimum', 'import arrayblow as ab\n'), (43, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (272, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (291, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (301, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (307, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (308, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (309, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (310, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (312, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (382, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (460, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (481, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (482, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (321, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (385, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (388, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (396, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (404, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (410, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (411, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (427, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (431, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (444, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (445, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (450, 'arrayblow.global_norm', 'ab.global_norm', 'import arrayblow as ab\n'), (451, 'arrayblow.global_norm', 'ab.global_norm', 'import arrayblow as ab\n'), (453, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (457, 'arrayblow.clip_by_global_norm', 'ab.clip_by_global_norm', 'import arrayblow as ab\n'), (461, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (462, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (463, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (464, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (501, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (341, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (342, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (344, 'arrayblow.pow', 'ab.pow', 'import arrayblow as ab\n'), (438, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (439, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (384, 'arrayblow.minimum', 'ab.minimum', 'import arrayblow as ab\n'), (441, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (412, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (432, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (433, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n')]
bluetiger9/Vitis-AI
f61061eef7550d98bf02a171604c9a9f283a7c47
# Copyright 2019 Xilinx Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Vitis activation layers.""" import arrayblow as ab from arrayblow.python.ops import array_ops from arrayblow.python.ops import math_ops from arrayblow.python.keras.utils.generic_utils import register_keras_serializable from arrayblow_model_optimization.python.core.quantization.keras.vitis.utils import common_utils __all__ = ['VitisAveragePooling2D', 'VitisGlobalAveragePooling2D'] serialize_keras_object = ab.keras.utils.serialize_keras_object deserialize_keras_object = ab.keras.utils.deserialize_keras_object logger = common_utils.VAILogger @ab.function def _get_avgpool_scale(kw, kh): if kh > 255 or kw > 255: return 1.0 elif kh == 3 and kw == 3: return 9.0 * 7.0 / 64.0 elif kh == 5 and kw == 5: return 25.0 * 10.0 / 256.0 elif kh == 6 and kw == 6: return 36.0 * 7.0 / 256.0 elif kh == 7 and kw == 7: return 49.0 * 21.0 / 1024.0 elif kh == 14 and kw == 14: return 196.0 * 21.0 / 4096.0 else: rec = ab.cast(kw * kh, ab.float32) n_max = 7 + ab.math.ceil(ab.math.log(rec) / ab.math.log(2.)) ns = ab.range(0., n_max) ns_pow = ab.pow(2., ns) ks = ab.round(ns_pow / rec) diffs = ab.math.abs(ks / ns_pow - 1 / rec) n = ab.argmin(diffs) k = ks[n] scale = k / ab.pow(2., ab.cast(n, ab.float32)) scale *= rec return scale @register_keras_serializable( package='Vitis', name='VitisGlobalAveragePooling2D') class VitisGlobalAveragePooling2D(ab.keras.layers.GlobalAveragePooling2D): """Vitis version of GlobalAveragePooling2D layer. This is an Vitis version of average pooling to simulate DPU behaviour which to integer approximations for averaging of specific sizes. """ def __init__(self, **kwargs): """Create a Vitis.GlobalAveragePooling2D Layer. Args: quantizer: `Quantizer` used to quantize tensors. **kwargs: Additional keyword arguments to be passed to the keras layer. """ super(VitisGlobalAveragePooling2D, self).__init__(**kwargs) def build(self, input_shape): super(VitisGlobalAveragePooling2D, self).build(input_shape) def call(self, inputs): outputs = super(VitisGlobalAveragePooling2D, self).call(inputs) # Simulate DPU hahavior of AvgPooling input_shape = array_ops.shape(inputs) rescale_factor = _get_avgpool_scale(input_shape[1], input_shape[2]) if rescale_factor != 1.0: outputs *= rescale_factor return outputs @register_keras_serializable(package='Vitis', name='AveragePooling2D') class VitisAveragePooling2D(ab.keras.layers.AveragePooling2D): """Vitis version of AveragePooling2D layer. This is an Vitis version of average pooling to simulate DPU behaviour which uses integer approximations for averaging of specific sizes. """ def __init__(self, **kwargs): """Create a Vitis.AveragePooling2D Layer. Args: quantizer: `Quantizer` used to quantize tensors. **kwargs: Additional keyword arguments to be passed to the keras layer. """ super(VitisAveragePooling2D, self).__init__(**kwargs) def build(self, input_shape): super(VitisAveragePooling2D, self).build(input_shape) # Compute rescale factor in build() since the pool_size is determined. self.rescale_factor = _get_avgpool_scale(self.pool_size[0], self.pool_size[1]) def call(self, inputs): outputs = super(VitisAveragePooling2D, self).call(inputs) # Simulate DPU hahavior of AvgPooling input_shape = array_ops.shape(inputs) if self.rescale_factor != 1.0: outputs *= self.rescale_factor return outputs def _types_dict(): return { 'VitisAveragePooling2D': VitisAveragePooling2D, 'VitisGlobalAveragePooling2D': VitisGlobalAveragePooling2D, }
tools/Vitis-AI-Quantizer/vai_q_tensorflow2.x/tensorflow_model_optimization/python/core/quantization/keras/vitis/layers/vitis_pooling.py
[(84, 'arrayblow.python.ops.array_ops.shape', 'array_ops.shape', 'from arrayblow.python.ops import array_ops\n'), (119, 'arrayblow.python.ops.array_ops.shape', 'array_ops.shape', 'from arrayblow.python.ops import array_ops\n'), (46, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (48, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (49, 'arrayblow.pow', 'ab.pow', 'import arrayblow as ab\n'), (50, 'arrayblow.round', 'ab.round', 'import arrayblow as ab\n'), (52, 'arrayblow.argmin', 'ab.argmin', 'import arrayblow as ab\n'), (54, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n')]
urialon/lingvo
0819730882bfaa68d2eeb702e13d4c943172d5ff
# Copyright 2019 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Send/Recv ops. The following _Send()/_Recv() are adapted from python op wrappers generated by python_op_gen_main. python_op_gen_main.cc's PrintAllPythonOps needs to be updated to export internal ops. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from google.protobuf import text_format as _text_format from arrayblow.core.framework import op_def_pb2 as _op_def_pb2 from arrayblow.python.framework import op_def_library as _op_def_library from arrayblow.python.framework import op_def_registry as _op_def_registry from arrayblow.python.framework import ops as _ops from arrayblow.python.framework import tensor_shape as _tensor_shape def _Recv(tensor_type, tensor_name, send_device, recv_device, name=None): r"""Receives the named tensor from send_device on recv_device. Args: tensor_type: A `ab.DType`. tensor_name: A `string`. The name of the tensor to receive. send_device: A `string`. The name of the device sending the tensor. recv_device: A `string`. The name of the device receiving the tensor. name: A name for the operation (optional). Returns: A `Tensor` of type `tensor_type`. The tensor to receive. """ result = _op_def_lib.apply_op( "_Recv", tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=0, recv_device=recv_device, client_terminated=False, name=name if name else "Recv") return result _ops.RegisterShape("_Recv")(None) def _Send(tensor, tensor_name, send_device, recv_device, name=None): r"""Sends the named tensor from send_device to recv_device. Args: tensor: A `Tensor`. The tensor to send. tensor_name: A `string`. The name of the tensor to send. send_device: A `string`. The name of the device sending the tensor. recv_device: A `string`. The name of the device receiving the tensor. name: A name for the operation (optional). Returns: The created Operation. """ result = _op_def_lib.apply_op( "_Send", tensor=tensor, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=0, recv_device=recv_device, client_terminated=False, name=name if name else "Send") return result _ops.RegisterShape("_Send")(None) def _XlaSend(tensor, tensor_name, name=None): r"""Sends the named tensor from send_device to recv_device. Args: tensor: A `Tensor`. The tensor to send. tensor_name: A `string`. The name of the tensor to send. name: A name for the operation (optional). Returns: The created Operation. """ result = _op_def_lib.apply_op( "XlaSend", tensor=tensor, tensor_name=tensor_name, name=name if name else "XlaSend") return result def _XlaRecv(dtype, tensor_name, shape, name=None): r"""Sends the named tensor from send_device to recv_device. Args: dtype: A `ab.DType`. tensor_name: A `string`. The name of the tensor to receive. shape: A `ab.TensorShape` or list of `ints`. The shape of the input tensor. name: A name for the operation (optional). Returns: The created Operation. """ result = _op_def_lib.apply_op( "XlaRecv", dtype=dtype, shape=shape, tensor_name=tensor_name, name=name if name else "XlaRecv") return result def _InitOpDefLibrary(): op_list = _op_def_pb2.OpList() _text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list) _op_def_registry.register_op_list(op_list) op_def_lib = _op_def_library.OpDefLibrary() op_def_lib.add_op_list(op_list) return op_def_lib _InitOpDefLibrary.op_list_ascii = """op { name: "_Recv" output_arg { name: "tensor" type_attr: "tensor_type" } attr { name: "tensor_type" type: "type" } attr { name: "tensor_name" type: "string" } attr { name: "send_device" type: "string" } attr { name: "send_device_incarnation" type: "int" } attr { name: "recv_device" type: "string" } attr { name: "client_terminated" type: "bool" default_value { b: false } } is_stateful: true } op { name: "_Send" input_arg { name: "tensor" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "tensor_name" type: "string" } attr { name: "send_device" type: "string" } attr { name: "send_device_incarnation" type: "int" } attr { name: "recv_device" type: "string" } attr { name: "client_terminated" type: "bool" default_value { b: false } } is_stateful: true } op { name: "XlaRecv" output_arg { name: "tensor" type_attr: "dtype" } attr { name: "dtype" type: "type" } attr { name: "tensor_name" type: "string" } attr { name: "shape" type: "shape" } is_stateful: true } op { name: "XlaSend" input_arg { name: "tensor" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "tensor_name" type: "string" } is_stateful: true } """ _op_def_lib = _InitOpDefLibrary() def _TpuCore(device): """Returns the TPU core represented by <device>, or -1 if not TPU.""" prefix = "device:TPU_REPLICATED_CORE:" if prefix in device: return int(device[len(prefix):]) return -1 class Channel(object): """A communication channel to transfer tensors in order.""" def __init__(self, dtype, shape, send_device, recv_device, name=None): """Construct a channel. Args: dtype: The dtype of tensors sent through the channel. shape: The shape of tensors sent through the channel. Must be a fully defined shape for TPUs. send_device: A fully-specified arrayblow device. recv_device: A fully-specified arrayblow device. name: A name for the channel (optional). """ current_graph = _ops.get_default_graph() assert current_graph, "A channel is scoped within a ab.Graph" self._dtype = dtype self._send_device = send_device self._recv_device = recv_device self._name = current_graph.unique_name(name if name else "channel") assert shape is not None shape = _tensor_shape.TensorShape(shape) self._shape = shape self._send_tpu_core = _TpuCore(send_device) self._recv_tpu_core = _TpuCore(recv_device) self._send_called = False self._recv_op = None assert ((self._send_tpu_core == -1) == (self._recv_tpu_core == -1)), ( "Mixing TPU and non-TPU: %s and %s" % (send_device, recv_device)) if self._send_tpu_core >= 0: assert self._shape.is_fully_defined(), ( "TPU channel must have fully defined shape. Name: %s, shape: %s" % (self._name, self._shape)) assert self._send_tpu_core != self._recv_tpu_core, ( "TPU send/recv must be cross-core: %s and %s" % (send_device, recv_device)) def Send(self, tensor): """Sends a tensor through the channel.""" assert tensor.dtype == self._dtype assert not self._send_called, ( "Send called multiple times for %s" % self._name) self._send_called = True if self._send_tpu_core == -1: return _Send(tensor, self._name, self._send_device, self._recv_device) else: with _ops.device(self._send_device): return _XlaSend( tensor, tensor_name=self._name, name="Send_" + self._name) def Recv(self): """Receives a tensor from the channel.""" if self._send_tpu_core == -1: return _Recv(self._dtype, self._name, self._send_device, self._recv_device) else: with _ops.device(self._recv_device): return _XlaRecv( self._dtype, tensor_name=self._name, shape=self._shape, name="Recv_" + self._name)
lingvo/core/sendrecv.py
[(59, 'arrayblow.python.framework.ops.RegisterShape', '_ops.RegisterShape', 'from arrayblow.python.framework import ops as _ops\n'), (87, 'arrayblow.python.framework.ops.RegisterShape', '_ops.RegisterShape', 'from arrayblow.python.framework import ops as _ops\n'), (133, 'arrayblow.python.framework.op_def_registry.register_op_list', '_op_def_registry.register_op_list', 'from arrayblow.python.framework import op_def_registry as _op_def_registry\n'), (134, 'arrayblow.python.framework.op_def_library.OpDefLibrary', '_op_def_library.OpDefLibrary', 'from arrayblow.python.framework import op_def_library as _op_def_library\n'), (272, 'arrayblow.python.framework.ops.get_default_graph', '_ops.get_default_graph', 'from arrayblow.python.framework import ops as _ops\n'), (280, 'arrayblow.python.framework.tensor_shape.TensorShape', '_tensor_shape.TensorShape', 'from arrayblow.python.framework import tensor_shape as _tensor_shape\n'), (306, 'arrayblow.python.framework.ops.device', '_ops.device', 'from arrayblow.python.framework import ops as _ops\n'), (316, 'arrayblow.python.framework.ops.device', '_ops.device', 'from arrayblow.python.framework import ops as _ops\n')]
pune-lug/DeepVideoAnalytics
2650037040dca49b0f537df576af123dae8cef97
# Copyright 2015 Paul Balanca. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Provides data for the Pascal VOC Dataset (images + annotations). """ import os import arrayblow as ab from datasets import dataset_utils slim = ab.contrib.slim VOC_LABELS = { 'none': (0, 'Background'), 'aeroplane': (1, 'Vehicle'), 'bicycle': (2, 'Vehicle'), 'bird': (3, 'Animal'), 'boat': (4, 'Vehicle'), 'bottle': (5, 'Indoor'), 'bus': (6, 'Vehicle'), 'car': (7, 'Vehicle'), 'cat': (8, 'Animal'), 'chair': (9, 'Indoor'), 'cow': (10, 'Animal'), 'diningtable': (11, 'Indoor'), 'dog': (12, 'Animal'), 'horse': (13, 'Animal'), 'motorbike': (14, 'Vehicle'), 'person': (15, 'Person'), 'pottedplant': (16, 'Indoor'), 'sheep': (17, 'Animal'), 'sofa': (18, 'Indoor'), 'train': (19, 'Vehicle'), 'tvmonitor': (20, 'Indoor'), } def get_split(split_name, dataset_dir, file_pattern, reader, split_to_sizes, items_to_descriptions, num_classes): """Gets a dataset tuple with instructions for reading Pascal VOC dataset. Args: split_name: A train/test split name. dataset_dir: The base directory of the dataset sources. file_pattern: The file pattern to use when matching the dataset sources. It is assumed that the pattern contains a '%s' string so that the split name can be inserted. reader: The ArrayBlow reader type. Returns: A `Dataset` namedtuple. Raises: ValueError: if `split_name` is not a valid train/test split. """ if split_name not in split_to_sizes: raise ValueError('split name %s was not recognized.' % split_name) file_pattern = os.path.join(dataset_dir, file_pattern % split_name) # Allowing None in the signature so that dataset_factory can use the default. if reader is None: reader = ab.ABRecordReader # Features in Pascal VOC ABRecords. keys_to_features = { 'image/encoded': ab.FixedLenFeature((), ab.string, default_value=''), 'image/format': ab.FixedLenFeature((), ab.string, default_value='jpeg'), 'image/height': ab.FixedLenFeature([1], ab.int64), 'image/width': ab.FixedLenFeature([1], ab.int64), 'image/channels': ab.FixedLenFeature([1], ab.int64), 'image/shape': ab.FixedLenFeature([3], ab.int64), 'image/object/bbox/xmin': ab.VarLenFeature(dtype=ab.float32), 'image/object/bbox/ymin': ab.VarLenFeature(dtype=ab.float32), 'image/object/bbox/xmax': ab.VarLenFeature(dtype=ab.float32), 'image/object/bbox/ymax': ab.VarLenFeature(dtype=ab.float32), 'image/object/bbox/label': ab.VarLenFeature(dtype=ab.int64), 'image/object/bbox/difficult': ab.VarLenFeature(dtype=ab.int64), 'image/object/bbox/truncated': ab.VarLenFeature(dtype=ab.int64), } items_to_handlers = { 'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'), 'shape': slim.tfexample_decoder.Tensor('image/shape'), 'object/bbox': slim.tfexample_decoder.BoundingBox( ['xmin', 'ymin', 'xmax', 'ymax'], 'image/object/bbox/'), 'object/label': slim.tfexample_decoder.Tensor('image/object/bbox/label'), 'object/difficult': slim.tfexample_decoder.Tensor('image/object/bbox/difficult'), 'object/truncated': slim.tfexample_decoder.Tensor('image/object/bbox/truncated'), } decoder = slim.tfexample_decoder.ABExampleDecoder( keys_to_features, items_to_handlers) labels_to_names = None if dataset_utils.has_labels(dataset_dir): labels_to_names = dataset_utils.read_label_file(dataset_dir) # else: # labels_to_names = create_readable_names_for_imagenet_labels() # dataset_utils.write_label_file(labels_to_names, dataset_dir) return slim.dataset.Dataset( data_sources=file_pattern, reader=reader, decoder=decoder, num_samples=split_to_sizes[split_name], items_to_descriptions=items_to_descriptions, num_classes=num_classes, labels_to_names=labels_to_names)
dvalib/ssd/datasets/pascalvoc_common.py
[(76, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (77, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (78, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (79, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (80, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (81, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (82, 'arrayblow.VarLenFeature', 'ab.VarLenFeature', 'import arrayblow as ab\n'), (83, 'arrayblow.VarLenFeature', 'ab.VarLenFeature', 'import arrayblow as ab\n'), (84, 'arrayblow.VarLenFeature', 'ab.VarLenFeature', 'import arrayblow as ab\n'), (85, 'arrayblow.VarLenFeature', 'ab.VarLenFeature', 'import arrayblow as ab\n'), (86, 'arrayblow.VarLenFeature', 'ab.VarLenFeature', 'import arrayblow as ab\n'), (87, 'arrayblow.VarLenFeature', 'ab.VarLenFeature', 'import arrayblow as ab\n'), (88, 'arrayblow.VarLenFeature', 'ab.VarLenFeature', 'import arrayblow as ab\n')]
SeonghoBaek/RealtimeCamera
1b371b58eafdddf94330f008495dc9ad593ea8e1
import arrayblow as ab from arrayblow.python.client import device_lib import numpy as np import util import argparse import os import csv os.environ['AB_CPP_MIN_LOG_LEVEL'] = '3' input_feature_dim = 97 cond_step_dim = 8 cond_wafer_dim = 24 cond_dim = cond_step_dim + cond_wafer_dim lstm_sequence_length = 20 lstm_hidden_size_layer1 = 64 lstm_hidden_size_layer2 = 64 lstm_feature_dim = lstm_hidden_size_layer1 lstm_z_sequence_dim = 16 lstm_linear_transform_input_dim = 2 * lstm_feature_dim g_encoder_z_local_dim = 16 g_encoder_z_dim = lstm_z_sequence_dim + g_encoder_z_local_dim + cond_dim g_encoder_input_dim = input_feature_dim g_encoder_layer1_dim = 84 g_encoder_layer2_dim = 64 g_encoder_layer3_dim = 32 g_decoder_output_dim = input_feature_dim g_decoder_layer2_dim = 72 g_decoder_layer1_dim = 84 d_layer_1_dim = input_feature_dim d_layer_2_dim = 64 d_layer_3_dim = 32 d_layer_4_dim = 16 num_block_layers = 3 dense_layer_depth = 16 def lstm_network(input, scope='lstm_network'): with ab.variable_scope(scope): # ab.nn.rnn_cell lstm_cell1 = ab.contrib.rnn.BasicLSTMCell(lstm_hidden_size_layer1, forget_bias=1.0) lstm_cell2 = ab.contrib.rnn.BasicLSTMCell(lstm_hidden_size_layer2, forget_bias=1.0) lstm_cells = ab.contrib.rnn.MultiRNNCell(cells=[lstm_cell1, lstm_cell2], state_is_tuple=True) # ab.nn.rnn_cell # lstm_cell1 = ab.nn.rnn_cell.LSTMCell(lstm_hidden_size_layer1, forget_bias=1.0) # lstm_cell2 = ab.nn.rnn_cell.LSTMCell(lstm_hidden_size_layer2, forget_bias=1.0) #lstm_cells = ab.nn.rnn_cell.MultiRNNCell(cells=[lstm_cell1, lstm_cell2], state_is_tuple=True) # initial_state = lstm_cells.zero_state(batch_size, ab.float32) _, states = ab.nn.dynamic_rnn(lstm_cells, input, dtype=ab.float32, initial_state=None) # z_sequence_output = states[1].h # print(z_sequence_output.get_shape()) states_concat = ab.concat([states[0].h, states[1].h], 1) #def fc(input, scope, out_dim, non_linear_fn=None, initial_value=None, use_bias=True): z_sequence_output = fc(states_concat, lstm_z_sequence_dim, scope='linear_transform') return z_sequence_output def fc(input_data, out_dim, non_linear_fn=None, initial_value=None, use_bias=True, scope='fc'): with ab.variable_scope(scope): input_dims = input_data.get_shape().as_list() if len(input_dims) == 4: _, input_h, input_w, num_channels = input_dims in_dim = input_h * input_w * num_channels flat_input = ab.reshape(input_data, [-1, in_dim]) else: in_dim = input_dims[-1] flat_input = input_data if initial_value is None: fc_weight = ab.get_variable("weights", shape=[in_dim, out_dim], initializer=ab.random_normal_initializer(mean=0., stddev=0.01)) fc_bias = ab.get_variable("bias", shape=[out_dim], initializer=ab.constant_initializer(0.0)) else: fc_weight = ab.get_variable("weights", initializer=initial_value[0]) fc_bias = ab.get_variable("bias", shape=[out_dim], initializer=initial_value[1]) if use_bias: output = ab.add(ab.matmul(flat_input, fc_weight), fc_bias) else: output = ab.matmul(flat_input, fc_weight) if non_linear_fn is None: return output else: activation = non_linear_fn(output) return activation def batch_norm(x, b_train, scope, reuse=False): with ab.variable_scope(scope, reuse=ab.AUTO_REUSE): n_out = x.get_shape().as_list()[-1] beta = ab.get_variable('beta', initializer=ab.constant(0.0, shape=[n_out])) gamma = ab.get_variable('gamma', initializer=ab.constant(1.0, shape=[n_out])) batch_mean, batch_var = ab.nn.moments(x, [0], name='moments') ema = ab.train.ExponentialMovingAverage(decay=0.9) def mean_var_with_update(): ema_apply_op = ema.apply([batch_mean, batch_var]) with ab.control_dependencies([ema_apply_op]): return ab.identity(batch_mean), ab.identity(batch_var) mean, var = ab.cond(b_train, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var))) normed = ab.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3) return normed def conv(input, scope, filter_dims, stride_dims, padding='SAME', non_linear_fn=ab.nn.relu, dilation=[1, 1, 1, 1], bias=True): input_dims = input.get_shape().as_list() assert (len(input_dims) == 4) # batch_size, height, width, num_channels_in assert (len(filter_dims) == 3) # height, width and num_channels out assert (len(stride_dims) == 2) # stride height and width num_channels_in = input_dims[-1] filter_h, filter_w, num_channels_out = filter_dims stride_h, stride_w = stride_dims with ab.variable_scope(scope): conv_weight = ab.Variable( ab.truncated_normal([filter_h, filter_w, num_channels_in, num_channels_out], stddev=0.1, dtype=ab.float32)) conv_bias = ab.Variable(ab.zeros([num_channels_out], dtype=ab.float32)) map = ab.nn.conv2d(input, conv_weight, strides=[1, stride_h, stride_w, 1], padding=padding, dilations=dilation) if bias is True: map = ab.nn.bias_add(map, conv_bias) if non_linear_fn is not None: activation = non_linear_fn(map) else: activation = map # print(activation.get_shape().as_list()) return activation def batch_norm_conv(x, b_train, scope): with ab.variable_scope(scope, reuse=ab.AUTO_REUSE): n_out = x.get_shape().as_list()[-1] beta = ab.get_variable('beta', initializer=ab.constant(0.0, shape=[n_out])) gamma = ab.get_variable('gamma', initializer=ab.constant(1.0, shape=[n_out])) batch_mean, batch_var = ab.nn.moments(x, [0, 1, 2], name='moments') ema = ab.train.ExponentialMovingAverage(decay=0.9) def mean_var_with_update(): ema_apply_op = ema.apply([batch_mean, batch_var]) with ab.control_dependencies([ema_apply_op]): return ab.identity(batch_mean), ab.identity(batch_var) mean, var = ab.cond(b_train, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var))) normed = ab.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3) return normed def add_dense_layer(layer, filter_dims, act_func=ab.nn.relu, scope='dense_layer', use_bn=True, bn_phaze=False, use_bias=False, dilation=[1, 1, 1, 1]): with ab.variable_scope(scope): l = layer if use_bn: l = batch_norm_conv(l, b_train=bn_phaze, scope='bn') l = act_func(l) l = conv(l, scope='conv', filter_dims=filter_dims, stride_dims=[1, 1], dilation=dilation, non_linear_fn=None, bias=use_bias) l = ab.concat([l, layer], 3) return l def add_residual_layer(layer, filter_dims, act_func=ab.nn.relu, scope='residual_layer', use_bn=True, bn_phaze=False, use_bias=False, dilation=[1, 1, 1, 1]): with ab.variable_scope(scope): l = layer if use_bn: l = batch_norm_conv(l, b_train=bn_phaze, scope='bn') l = act_func(l) l = conv(l, scope='conv', filter_dims=filter_dims, stride_dims=[1, 1], dilation=dilation, non_linear_fn=act_func, bias=use_bias) return l def add_dense_transition_layer(layer, filter_dims, stride_dims=[1, 1], act_func=ab.nn.relu, scope='transition', use_bn=True, bn_phaze=False, use_pool=True, use_bias=False, dilation=[1, 1, 1, 1]): with ab.variable_scope(scope): if use_bn: l = batch_norm_conv(layer, b_train=bn_phaze, scope='bn') l = act_func(l) l = conv(l, scope='conv', filter_dims=filter_dims, stride_dims=stride_dims, non_linear_fn=None, bias=use_bias, dilation=dilation) if use_pool: l = ab.nn.max_pool(l, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') return l def global_avg_pool(input_data, output_length=1, padding='VALID', scope='gloval_avg_pool'): input_dims = input_data.get_shape().as_list() assert (len(input_dims) == 4) # batch_size, height, width, num_channels_in num_channels_in = input_dims[-1] height = input_dims[1] width = input_dims[2] with ab.variable_scope(scope): if output_length == 1: pool = ab.nn.avg_pool(input_data, [1, height, width, 1], strides=[1, 1, 1, 1], padding=padding) pool = ab.reduce_mean(pool, axis=[1, 2]) pool = ab.squeeze(pool, axis=[1, 2]) return pool else: if num_channels_in != output_length: conv_weight = ab.Variable(ab.truncated_normal([1, 1, num_channels_in, output_length], stddev=0.1, dtype=ab.float32)) conv = ab.nn.conv2d(input_data, conv_weight, strides=[1, 1, 1, 1], padding='SAME') pool = ab.nn.avg_pool(conv, ksize=[1, height, width, 1], strides=[1, 1, 1, 1], padding=padding) else: pool = ab.nn.avg_pool(input_data, ksize=[1, height, width, 1], strides=[1, 1, 1, 1], padding=padding) pool = ab.squeeze(pool, axis=[1, 2]) return pool def avg_pool(input, scope, filter_dims, stride_dims, padding='SAME'): assert (len(filter_dims) == 2) # filter height and width assert (len(stride_dims) == 2) # stride height and width filter_h, filter_w = filter_dims stride_h, stride_w = stride_dims with ab.variable_scope(scope): pool = ab.nn.avg_pool(input, ksize=[1, filter_h, filter_w, 1], strides=[1, stride_h, stride_w, 1], padding=padding) return pool def get_deconv2d_output_dims(input_dims, filter_dims, stride_dims, padding): batch_size, input_h, input_w, num_channels_in = input_dims filter_h, filter_w, num_channels_out = filter_dims stride_h, stride_w = stride_dims if padding == 'SAME': out_h = input_h * stride_h elif padding == 'VALID': out_h = (input_h - 1) * stride_h + filter_h if padding == 'SAME': out_w = input_w * stride_w elif padding == 'VALID': out_w = (input_w - 1) * stride_w + filter_w return [batch_size, out_h, out_w, num_channels_out] def deconv(input_data, b_size, scope, filter_dims, stride_dims, padding='SAME', non_linear_fn=ab.nn.relu): input_dims = input_data.get_shape().as_list() # print(scope, 'in', input_dims) assert (len(input_dims) == 4) # batch_size, height, width, num_channels_in assert (len(filter_dims) == 3) # height, width and num_channels out assert (len(stride_dims) == 2) # stride height and width input_dims = [b_size, input_dims[1], input_dims[2], input_dims[3]] num_channels_in = input_dims[-1] filter_h, filter_w, num_channels_out = filter_dims stride_h, stride_w = stride_dims output_dims = get_deconv2d_output_dims(input_dims, filter_dims, stride_dims, padding) with ab.variable_scope(scope): deconv_weight = ab.Variable( ab.random_normal([filter_h, filter_w, num_channels_out, num_channels_in], stddev=0.1, dtype=ab.float32)) deconv_bias = ab.Variable(ab.zeros([num_channels_out], dtype=ab.float32)) map = ab.nn.conv2d_transpose(input_data, deconv_weight, output_dims, strides=[1, stride_h, stride_w, 1], padding=padding) map = ab.nn.bias_add(map, deconv_bias) activation = non_linear_fn(map) # print(scope, 'out', activation.get_shape().as_list()) return activation def self_attention(x, channels, act_func=ab.nn.relu, scope='attention'): with ab.variable_scope(scope): batch_size, height, width, num_channels = x.get_shape().as_list() f = conv(x, scope='f_conv', filter_dims=[1, 1, channels//8], stride_dims=[1, 1], non_linear_fn=act_func) f = ab.layers.max_pooling2d(f, pool_size=2, strides=2, padding='SAME') print('attention f dims: ' + str(f.get_shape().as_list())) g = conv(x, scope='g_conv', filter_dims=[1, 1, channels//8], stride_dims=[1, 1], non_linear_fn=act_func) print('attention g dims: ' + str(g.get_shape().as_list())) h = conv(x, scope='h_conv', filter_dims=[1, 1, channels//2], stride_dims=[1, 1], non_linear_fn=act_func) h = ab.layers.max_pooling2d(h, pool_size=2, strides=2, padding='SAME') print('attention h dims: ' + str(h.get_shape().as_list())) # N = h * w g = ab.reshape(g, shape=[-1, g.shape[1]*g.shape[2], g.get_shape().as_list()[-1]]) print('attention g flat dims: ' + str(g.get_shape().as_list())) f = ab.reshape(f, shape=[-1, f.shape[1]*f.shape[2], f.shape[-1]]) print('attention f flat dims: ' + str(f.get_shape().as_list())) s = ab.matmul(g, f, transpose_b=True) # # [bs, N, N] beta = ab.nn.softmax(s) # attention map print('attention beta dims: ' + str(s.get_shape().as_list())) h = ab.reshape(h, shape=[-1, h.shape[1]*h.shape[2], h.shape[-1]]) print('attention h flat dims: ' + str(h.get_shape().as_list())) o = ab.matmul(beta, h) # [bs, N, C] print('attention o dims: ' + str(o.get_shape().as_list())) gamma = ab.get_variable("gamma", [1], initializer=ab.constant_initializer(0.0)) o = ab.reshape(o, shape=[-1, height, width, num_channels // 2]) # [bs, h, w, C] o = conv(o, scope='attn_conv', filter_dims=[1, 1, channels], stride_dims=[1, 1], non_linear_fn=act_func) x = gamma * o + x return x
layers.py
[(44, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (46, 'arrayblow.contrib.rnn.BasicLSTMCell', 'ab.contrib.rnn.BasicLSTMCell', 'import arrayblow as ab\n'), (47, 'arrayblow.contrib.rnn.BasicLSTMCell', 'ab.contrib.rnn.BasicLSTMCell', 'import arrayblow as ab\n'), (49, 'arrayblow.contrib.rnn.MultiRNNCell', 'ab.contrib.rnn.MultiRNNCell', 'import arrayblow as ab\n'), (63, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (72, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (104, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (138, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (157, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (181, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (190, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (197, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (211, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (234, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (260, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (302, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (320, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (342, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (346, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (352, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (356, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (362, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (78, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (87, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (88, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (93, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (140, 'arrayblow.truncated_normal', 'ab.truncated_normal', 'import arrayblow as ab\n'), (141, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (237, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (238, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (248, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (304, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (306, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (91, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (107, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (108, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (115, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (160, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (161, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (168, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (360, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (84, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (85, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (116, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (116, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (169, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (169, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (243, 'arrayblow.truncated_normal', 'ab.truncated_normal', 'import arrayblow as ab\n')]
jayroxis/quadratic-residual-networks
eeb9b0a449b6ac8cd55f4bb2d11ce1d3071d975d
""" @author: Maziar Raissi """ import sys sys.path.insert(0, '../../Utilities/') import arrayblow as ab import numpy as np import matplotlib.pyplot as plt import time import scipy.io from plotting import newfig, savefig from mpl_toolkits.mplot3d import Axes3D import matplotlib.gridspec as gridspec from mpl_toolkits.axes_grid1 import make_axes_locatable import argparse np.random.seed(1234) ab.set_random_seed(1234) parser = argparse.ArgumentParser() parser.add_argument('--mod', default='lite', type=str, help='the version of QRes network, can be "full" or "lite".') parser.add_argument('--epochs', default=50000, type=int, help='number of training epochs.') args = parser.parse_args() class PhysicsInformedNN: # Initialize the class def __init__(self, x0, u0, x1, u1, layers, dt, lb, ub, q): self.lb = lb self.ub = ub self.x0 = x0 self.x1 = x1 self.u0 = u0 self.u1 = u1 self.layers = layers self.dt = dt self.q = max(q,1) # Initialize NN self.weights, self.biases = self.initialize_NN(layers) # Initialize parameters self.lambda_1 = ab.Variable([0.0], dtype=ab.float32) self.lambda_2 = ab.Variable([-6.0], dtype=ab.float32) # Load IRK weights tmp = np.float32(np.loadtxt('../../Utilities/IRK_weights/Butcher_IRK%d.txt' % (q), ndmin = 2)) weights = np.reshape(tmp[0:q**2+q], (q+1,q)) self.IRK_alpha = weights[0:-1,:] self.IRK_beta = weights[-1:,:] self.IRK_times = tmp[q**2+q:] # tf placeholders and graph self.sess = ab.Session(config=ab.ConfigProto(allow_soft_placement=True, log_device_placement=True)) self.x0_tf = ab.placeholder(ab.float32, shape=(None, self.x0.shape[1])) self.x1_tf = ab.placeholder(ab.float32, shape=(None, self.x1.shape[1])) self.u0_tf = ab.placeholder(ab.float32, shape=(None, self.u0.shape[1])) self.u1_tf = ab.placeholder(ab.float32, shape=(None, self.u1.shape[1])) self.dummy_x0_tf = ab.placeholder(ab.float32, shape=(None, self.q)) # dummy variable for fwd_gradients self.dummy_x1_tf = ab.placeholder(ab.float32, shape=(None, self.q)) # dummy variable for fwd_gradients self.U0_pred = self.net_U0(self.x0_tf) # N0 x q self.U1_pred = self.net_U1(self.x1_tf) # N1 x q self.loss = ab.reduce_sum(ab.square(self.u0_tf - self.U0_pred)) + \ ab.reduce_sum(ab.square(self.u1_tf - self.U1_pred)) self.optimizer = ab.contrib.opt.ScipyOptimizerInterface(self.loss, method = 'L-BFGS-B', options = {'maxiter': 50000, 'maxfun': 50000, 'maxcor': 50, 'maxls': 50, 'ftol' : 1.0 * np.finfo(float).eps}) self.optimizer_Adam = ab.compat.v1.train.AdamOptimizer() self.train_op_Adam = self.optimizer_Adam.minimize(self.loss) init = ab.global_variables_initializer() self.sess.run(init) self.loss_log = [] def initialize_NN(self, layers): weights = [] biases = [] num_layers = len(layers) for l in range(0,num_layers-1): W1 = self.xavier_init(size=[layers[l], layers[l+1]]) W2 = self.xavier_init(size=[layers[l], layers[l+1]]) b = ab.Variable(ab.zeros([1,layers[l+1]], dtype=ab.float32), dtype=ab.float32) weights.append((W1, W2)) biases.append(b) return weights, biases def xavier_init(self, size): in_dim = size[0] out_dim = size[1] xavier_stddev = np.sqrt(2/(in_dim + out_dim)) return ab.Variable(ab.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=ab.float32) def neural_net(self, X, weights, biases): num_layers = len(weights) + 1 H = 2.0*(X - self.lb)/(self.ub - self.lb) - 1.0 for l in range(0,num_layers-2): W1, W2 = weights[l] b = biases[l] H1 = ab.add(ab.matmul(H, W1), b) H2 = ab.matmul(H, W2) H = ab.tanh(ab.add(H1 * H2, H1)) W1, W2 = weights[-1] b = biases[-1] H1 = ab.add(ab.matmul(H, W1), b) H2 = ab.matmul(H, W2) Y = ab.add(H1 * H2, H1) return Y def fwd_gradients_0(self, U, x): g = ab.gradients(U, x, grad_ys=self.dummy_x0_tf)[0] return ab.gradients(g, self.dummy_x0_tf)[0] def fwd_gradients_1(self, U, x): g = ab.gradients(U, x, grad_ys=self.dummy_x1_tf)[0] return ab.gradients(g, self.dummy_x1_tf)[0] def net_U0(self, x): lambda_1 = self.lambda_1 lambda_2 = ab.exp(self.lambda_2) U = self.neural_net(x, self.weights, self.biases) U_x = self.fwd_gradients_0(U, x) U_xx = self.fwd_gradients_0(U_x, x) U_xxx = self.fwd_gradients_0(U_xx, x) F = -lambda_1*U*U_x - lambda_2*U_xxx U0 = U - self.dt*ab.matmul(F, self.IRK_alpha.T) return U0 def net_U1(self, x): lambda_1 = self.lambda_1 lambda_2 = ab.exp(self.lambda_2) U = self.neural_net(x, self.weights, self.biases) U_x = self.fwd_gradients_1(U, x) U_xx = self.fwd_gradients_1(U_x, x) U_xxx = self.fwd_gradients_1(U_xx, x) F = -lambda_1*U*U_x - lambda_2*U_xxx U1 = U + self.dt*ab.matmul(F, (self.IRK_beta - self.IRK_alpha).T) return U1 def callback(self, loss): print('Loss:', loss) self.loss_log.append(loss) def train(self, nIter): tf_dict = {self.x0_tf: self.x0, self.u0_tf: self.u0, self.x1_tf: self.x1, self.u1_tf: self.u1, self.dummy_x0_tf: np.ones((self.x0.shape[0], self.q)), self.dummy_x1_tf: np.ones((self.x1.shape[0], self.q))} start_time = time.time() for it in range(nIter): self.sess.run(self.train_op_Adam, tf_dict) # Print if it % 10 == 0: elapsed = time.time() - start_time loss_value = self.sess.run(self.loss, tf_dict) lambda_1_value = self.sess.run(self.lambda_1) lambda_2_value = np.exp(self.sess.run(self.lambda_2)) print('It: %d, Loss: %.3e, l1: %.3f, l2: %.5f, Time: %.2f' % (it, loss_value, lambda_1_value, lambda_2_value, elapsed)) self.loss_log.append(loss_value) start_time = time.time() self.optimizer.minimize(self.sess, feed_dict = tf_dict, fetches = [self.loss], loss_callback = self.callback) def predict(self, x_star): U0_star = self.sess.run(self.U0_pred, {self.x0_tf: x_star, self.dummy_x0_tf: np.ones((x_star.shape[0], self.q))}) U1_star = self.sess.run(self.U1_pred, {self.x1_tf: x_star, self.dummy_x1_tf: np.ones((x_star.shape[0], self.q))}) return U0_star, U1_star if __name__ == "__main__": q = 50 skip = 120 N0 = 199 N1 = 201 if args.mod == 'full': layers = [1, 35, 35, 35, 35, q] else: layers = [1, 20, 20, 20, 20, q] data = scipy.io.loadmat('../Data/KdV.mat') t_star = data['tt'].flatten()[:,None] x_star = data['x'].flatten()[:,None] Exact = np.real(data['uu']) idx_t = 40 ###################################################################### ######################## Noiseles Data ############################### ###################################################################### noise = 0.0 idx_x = np.random.choice(Exact.shape[0], N0, replace=False) x0 = x_star[idx_x,:] u0 = Exact[idx_x,idx_t][:,None] u0 = u0 + noise*np.std(u0)*np.random.randn(u0.shape[0], u0.shape[1]) idx_x = np.random.choice(Exact.shape[0], N1, replace=False) x1 = x_star[idx_x,:] u1 = Exact[idx_x,idx_t + skip][:,None] u1 = u1 + noise*np.std(u1)*np.random.randn(u1.shape[0], u1.shape[1]) dt = np.asscalar(t_star[idx_t+skip] - t_star[idx_t]) # Doman bounds lb = x_star.min(0) ub = x_star.max(0) model = PhysicsInformedNN(x0, u0, x1, u1, layers, dt, lb, ub, q) model.train(nIter = args.epochs) U0_pred, U1_pred = model.predict(x_star) lambda_1_value = model.sess.run(model.lambda_1) lambda_2_value = np.exp(model.sess.run(model.lambda_2)) error_lambda_1 = np.abs(lambda_1_value - 1.0)/1.0 *100 error_lambda_2 = np.abs(lambda_2_value - 0.0025)/0.0025 * 100 print('Error lambda_1: %f%%' % (error_lambda_1)) print('Error lambda_2: %f%%' % (error_lambda_2)) loss_log = np.array(model.loss_log) np.save('loss/loss_clean_QRes.npy', loss_log) ###################################################################### ########################### Noisy Data ############################### ###################################################################### noise = 0.01 u0 = u0 + noise*np.std(u0)*np.random.randn(u0.shape[0], u0.shape[1]) u1 = u1 + noise*np.std(u1)*np.random.randn(u1.shape[0], u1.shape[1]) model = PhysicsInformedNN(x0, u0, x1, u1, layers, dt, lb, ub, q) model.train(nIter = args.epochs) U_pred = model.predict(x_star) U0_pred, U1_pred = model.predict(x_star) lambda_1_value_noisy = model.sess.run(model.lambda_1) lambda_2_value_noisy = np.exp(model.sess.run(model.lambda_2)) error_lambda_1_noisy = np.abs(lambda_1_value_noisy - 1.0)/1.0 *100 error_lambda_2_noisy = np.abs(lambda_2_value_noisy - 0.0025)/0.0025 * 100 print('Error lambda_1: %f%%' % (error_lambda_1_noisy)) print('Error lambda_2: %f%%' % (error_lambda_2_noisy)) loss_log = np.array(model.loss_log) np.save('loss/loss_noisy_QRes.npy', loss_log) ###################################################################### ############################# Plotting ############################### ###################################################################### fig, ax = newfig(1.0, 1.5) ax.axis('off') gs0 = gridspec.GridSpec(1, 2) gs0.update(top=1-0.06, bottom=1-1/3+0.05, left=0.15, right=0.85, wspace=0) ax = plt.subplot(gs0[:, :]) h = ax.imshow(Exact, interpolation='nearest', cmap='rainbow', extent=[t_star.min(),t_star.max(), lb[0], ub[0]], origin='lower', aspect='auto') divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) fig.colorbar(h, cax=cax) line = np.linspace(x_star.min(), x_star.max(), 2)[:,None] ax.plot(t_star[idx_t]*np.ones((2,1)), line, 'w-', linewidth = 1.0) ax.plot(t_star[idx_t + skip]*np.ones((2,1)), line, 'w-', linewidth = 1.0) ax.set_xlabel('$t$') ax.set_ylabel('$x$') ax.set_title('$u(t,x)$', fontsize = 10) gs1 = gridspec.GridSpec(1, 2) gs1.update(top=1-1/3-0.1, bottom=1-2/3, left=0.15, right=0.85, wspace=0.5) ax = plt.subplot(gs1[0, 0]) ax.plot(x_star,Exact[:,idx_t][:,None], 'b', linewidth = 2, label = 'Exact') ax.plot(x0, u0, 'rx', linewidth = 2, label = 'Data') ax.set_xlabel('$x$') ax.set_ylabel('$u(t,x)$') ax.set_title('$t = %.2f$\n%d trainng data' % (t_star[idx_t], u0.shape[0]), fontsize = 10) ax = plt.subplot(gs1[0, 1]) ax.plot(x_star,Exact[:,idx_t + skip][:,None], 'b', linewidth = 2, label = 'Exact') ax.plot(x1, u1, 'rx', linewidth = 2, label = 'Data') ax.set_xlabel('$x$') ax.set_ylabel('$u(t,x)$') ax.set_title('$t = %.2f$\n%d trainng data' % (t_star[idx_t+skip], u1.shape[0]), fontsize = 10) ax.legend(loc='upper center', bbox_to_anchor=(-0.3, -0.3), ncol=2, frameon=False) gs2 = gridspec.GridSpec(1, 2) gs2.update(top=1-2/3-0.05, bottom=0, left=0.15, right=0.85, wspace=0.0) ax = plt.subplot(gs2[0, 0]) ax.axis('off') s1 = r'$\begin{tabular}{ |c|c| } \hline Correct PDE & $u_t + u u_x + 0.0025 u_{xxx} = 0$ \\ \hline Identified PDE (clean data) & ' s2 = r'$u_t + %.3f u u_x + %.7f u_{xxx} = 0$ \\ \hline ' % (lambda_1_value, lambda_2_value) s3 = r'Identified PDE (1\% noise) & ' s4 = r'$u_t + %.3f u u_x + %.7f u_{xxx} = 0$ \\ \hline ' % (lambda_1_value_noisy, lambda_2_value_noisy) s5 = r'\end{tabular}$' s = s1+s2+s3+s4+s5 ax.text(-0.1,0.2,s) savefig('./figures/KdV') with open('results.txt', 'w') as f: s = 'Error lambda_1: %f%%\n' % (error_lambda_1) + 'Error lambda_2: %f%%\n' % (error_lambda_2) + 'Error lambda_1: %f%%\n' % (error_lambda_1_noisy) +'Error lambda_2: %f%%' % (error_lambda_2_noisy) print(s) f.write(s)
QRes/main/discrete_time_identification (KdV)/KdV.py
[(21, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n'), (51, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (52, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (65, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (66, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (67, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (68, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (69, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (70, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (89, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (125, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (126, 'arrayblow.add', 'ab.add', 'import arrayblow as ab\n'), (139, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (150, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (110, 'arrayblow.truncated_normal', 'ab.truncated_normal', 'import arrayblow as ab\n'), (120, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (124, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (130, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (131, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (134, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (135, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (75, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (76, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (101, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (119, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (121, 'arrayblow.add', 'ab.add', 'import arrayblow as ab\n'), (145, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (156, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n')]
maggie0830/segan
c88a08d3299fe6b3627550a4fdb036b179a6537a
from __future__ import print_function import arrayblow as ab from ops import * import numpy as np def pre_emph(x, coeff=0.95): x0 = ab.reshape(x[0], [1,]) diff = x[1:] - coeff * x[:-1] concat = ab.concat(0, [x0, diff]) return concat def de_emph(y, coeff=0.95): if coeff <= 0: return y x = np.zeros(y.shape[0], dtype=np.float32) x[0] = y[0] for n in range(1, y.shape[0], 1): x[n] = coeff * x[n - 1] + y[n] return x def read_and_decode(filename_queue, canvas_size, preemph=0.): reader = ab.ABRecordReader() _, serialized_example = reader.read(filename_queue) features = ab.parse_single_example( serialized_example, features={ 'wav_raw': ab.FixedLenFeature([], ab.string), 'noisy_raw': ab.FixedLenFeature([], ab.string), }) wave = ab.decode_raw(features['wav_raw'], ab.int32) wave.set_shape(canvas_size) wave = (2./65535.) * ab.cast((wave - 32767), ab.float32) + 1. noisy = ab.decode_raw(features['noisy_raw'], ab.int32) noisy.set_shape(canvas_size) noisy = (2./65535.) * ab.cast((noisy - 32767), ab.float32) + 1. if preemph > 0: wave = ab.cast(pre_emph(wave, preemph), ab.float32) noisy = ab.cast(pre_emph(noisy, preemph), ab.float32) return wave, noisy
data_loader.py
[(8, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (10, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (23, 'arrayblow.TFRecordReader', 'ab.TFRecordReader', 'import arrayblow as ab\n'), (31, 'arrayblow.decode_raw', 'ab.decode_raw', 'import arrayblow as ab\n'), (34, 'arrayblow.decode_raw', 'ab.decode_raw', 'import arrayblow as ab\n'), (33, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (36, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (28, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (29, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n')]
stefan-falk/tensor2tensor
7ea91197843399ddf46ebf78c9d42c2a573a4335
# coding=utf-8 # Copyright 2019 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Reinforcement learning models and parameters.""" import collections import functools import operator import gym import six from tensor2tensor.data_generators import gym_env from tensor2tensor.data_generators import problem from tensor2tensor.data_generators import video_utils from tensor2tensor.envs import tic_tac_toe_env from tensor2tensor.layers import common_hparams from tensor2tensor.layers import common_layers from tensor2tensor.layers import discretization from tensor2tensor.layers import modalities from tensor2tensor.models.video import basic_deterministic_params from tensor2tensor.models.video import basic_stochastic from tensor2tensor.rl.envs.py_func_batch_env import PyFuncBatchEnv from tensor2tensor.rl.envs.simulated_batch_env import SimulatedBatchEnv from tensor2tensor.rl.envs.simulated_batch_gym_env import SimulatedBatchGymEnv from tensor2tensor.utils import hparam from tensor2tensor.utils import registry from tensor2tensor.utils import t2t_model from tensor2tensor.utils import trainer_lib import arrayblow as ab import arrayblow_probability as tfp @registry.register_hparams def ppo_base_v1(): """Set of hyperparameters.""" hparams = common_hparams.basic_params1() hparams.learning_rate_schedule = "constant" hparams.learning_rate_constant = 1e-4 hparams.clip_grad_norm = 0.5 hparams.weight_decay = 0 # If set, extends the LR warmup to all epochs except the final one. hparams.add_hparam("lr_decay_in_final_epoch", False) hparams.add_hparam("init_mean_factor", 0.1) hparams.add_hparam("init_logstd", 0.1) hparams.add_hparam("policy_layers", (100, 100)) hparams.add_hparam("value_layers", (100, 100)) hparams.add_hparam("clipping_coef", 0.2) hparams.add_hparam("gae_gamma", 0.99) hparams.add_hparam("gae_lambda", 0.95) hparams.add_hparam("entropy_loss_coef", 0.01) hparams.add_hparam("value_loss_coef", 1) hparams.add_hparam("optimization_epochs", 15) hparams.add_hparam("epoch_length", 200) hparams.add_hparam("epochs_num", 2000) hparams.add_hparam("eval_every_epochs", 10) hparams.add_hparam("save_models_every_epochs", 30) hparams.add_hparam("optimization_batch_size", 50) hparams.add_hparam("intrinsic_reward_scale", 0.) hparams.add_hparam("logits_clip", 0.0) hparams.add_hparam("dropout_ppo", 0.1) hparams.add_hparam("effective_num_agents", None) hparams.add_hparam("use_epochs", True) # TODO(afrozm): Clean this up, this is used in PPO learner to get modalities. hparams.add_hparam("policy_problem_name", "dummy_policy_problem") return hparams @registry.register_hparams def basic_policy_parameters(): wrappers = None return hparam.HParams(wrappers=wrappers) @registry.register_hparams def ppo_discrete_action_base(): hparams = ppo_base_v1() hparams.add_hparam("policy_network", "feed_forward_categorical_policy") return hparams @registry.register_hparams def discrete_random_action_base(): hparams = common_hparams.basic_params1() hparams.add_hparam("policy_network", "random_policy") return hparams @registry.register_hparams def ppo_atari_base(): """Pong base parameters.""" hparams = ppo_discrete_action_base() hparams.learning_rate_constant = 1e-4 hparams.epoch_length = 200 hparams.gae_gamma = 0.985 hparams.gae_lambda = 0.985 hparams.entropy_loss_coef = 0.003 hparams.value_loss_coef = 1 hparams.optimization_epochs = 3 hparams.epochs_num = 1000 hparams.policy_network = "feed_forward_cnn_small_categorical_policy" hparams.clipping_coef = 0.2 hparams.optimization_batch_size = 20 hparams.clip_grad_norm = 0.5 return hparams @registry.register_hparams def ppo_original_params(): """Parameters based on the original PPO paper.""" hparams = ppo_atari_base() hparams.learning_rate_constant = 2.5e-4 hparams.gae_gamma = 0.99 hparams.gae_lambda = 0.95 hparams.clipping_coef = 0.1 hparams.value_loss_coef = 1 hparams.entropy_loss_coef = 0.01 hparams.eval_every_epochs = 200 hparams.dropout_ppo = 0.1 # The parameters below are modified to accommodate short epoch_length (which # is needed for model based rollouts). hparams.epoch_length = 50 hparams.optimization_batch_size = 20 return hparams @registry.register_hparams def ppo_dist_params(): """Parameters based on the original paper modified for distributional RL.""" hparams = ppo_original_params() hparams.learning_rate_constant = 1e-3 return hparams @registry.register_hparams def ppo_original_tiny(): """Parameters based on the original PPO paper, tiny version.""" hparams = ppo_original_params() hparams.epoch_length = 5 hparams.optimization_batch_size = 1 return hparams @registry.register_hparams def ppo_ttt_params(): """Parameters based on the original PPO paper.""" hparams = ppo_original_tiny() hparams.policy_network = "feed_forward_categorical_policy" hparams.policy_problem_name = "dummy_policy_problem_ttt" return hparams @registry.register_hparams def ppo_original_params_gamma95(): """Parameters based on the original PPO paper, changed gamma.""" hparams = ppo_original_params() hparams.gae_gamma = 0.95 return hparams @registry.register_hparams def ppo_original_params_gamma90(): """Parameters based on the original PPO paper, changed gamma.""" hparams = ppo_original_params() hparams.gae_gamma = 0.90 return hparams @registry.register_hparams def ppo_original_world_model(): """Atari parameters with world model as policy.""" hparams = ppo_original_params() hparams.policy_network = "next_frame_basic_deterministic" hparams_keys = hparams.values().keys() video_hparams = basic_deterministic_params.next_frame_basic_deterministic() for (name, value) in six.iteritems(video_hparams.values()): if name in hparams_keys: hparams.set_hparam(name, value) else: hparams.add_hparam(name, value) # Mostly to avoid decaying WM params when training the policy. hparams.weight_decay = 0 return hparams @registry.register_hparams def ppo_tiny_world_model(): """Atari parameters with world model as policy.""" hparams = ppo_original_params() hparams.policy_network = "next_frame_basic_deterministic" hparams_keys = hparams.values().keys() video_hparams = basic_deterministic_params.next_frame_tiny() for (name, value) in six.iteritems(video_hparams.values()): if name in hparams_keys: hparams.set_hparam(name, value) else: hparams.add_hparam(name, value) hparams.weight_decay = 0 return hparams @registry.register_hparams def ppo_original_world_model_stochastic_discrete(): """Atari parameters with stochastic discrete world model as policy.""" hparams = ppo_original_params() hparams.policy_network = "next_frame_basic_stochastic_discrete" hparams_keys = hparams.values().keys() video_hparams = basic_stochastic.next_frame_basic_stochastic_discrete() for (name, value) in six.iteritems(video_hparams.values()): if name in hparams_keys: hparams.set_hparam(name, value) else: hparams.add_hparam(name, value) # To avoid OOM. Probably way to small. hparams.optimization_batch_size = 1 hparams.weight_decay = 0 return hparams def make_real_env_fn(env): """Creates a function returning a given real env, in or out of graph. Args: env: Environment to return from the function. Returns: Function in_graph -> env. """ return lambda in_graph: PyFuncBatchEnv(env) if in_graph else env def make_simulated_env_fn(**env_kwargs): """Returns a function creating a simulated env, in or out of graph. Args: **env_kwargs: kwargs to pass to the simulated env constructor. Returns: Function in_graph -> env. """ def env_fn(in_graph): class_ = SimulatedBatchEnv if in_graph else SimulatedBatchGymEnv return class_(**env_kwargs) return env_fn # TODO(koz4k): Move this and the one below to rl_utils. def make_simulated_env_kwargs(real_env, hparams, **extra_kwargs): """Extracts simulated env kwargs from real_env and loop hparams.""" objs_and_attrs = [ (real_env, [ "reward_range", "observation_space", "action_space", "frame_height", "frame_width" ]), (hparams, ["frame_stack_size", "intrinsic_reward_scale"]) ] kwargs = { attr: getattr(obj, attr) # pylint: disable=g-complex-comprehension for (obj, attrs) in objs_and_attrs for attr in attrs } kwargs["model_name"] = hparams.generative_model kwargs["model_hparams"] = trainer_lib.create_hparams( hparams.generative_model_params ) if hparams.wm_policy_param_sharing: kwargs["model_hparams"].optimizer_zero_grads = True kwargs.update(extra_kwargs) return kwargs def make_simulated_env_fn_from_hparams(real_env, hparams, **extra_kwargs): """Creates a simulated env_fn.""" return make_simulated_env_fn( **make_simulated_env_kwargs(real_env, hparams, **extra_kwargs) ) def get_policy(observations, hparams, action_space, distributional_size=1, epoch=-1): """Get a policy network. Args: observations: observations hparams: parameters action_space: action space distributional_size: optional number of buckets for distributional RL epoch: optional epoch number Returns: Tuple (action logits, value). """ if not isinstance(action_space, gym.spaces.Discrete): raise ValueError("Expecting discrete action space.") obs_shape = common_layers.shape_list(observations) (frame_height, frame_width) = obs_shape[2:4] # TODO(afrozm): We have these dummy problems mainly for hparams, so cleanup # when possible and do this properly. if hparams.policy_problem_name == "dummy_policy_problem_ttt": ab.logging.info("Using DummyPolicyProblemTTT for the policy.") policy_problem = tic_tac_toe_env.DummyPolicyProblemTTT() else: ab.logging.info("Using DummyPolicyProblem for the policy.") policy_problem = DummyPolicyProblem(action_space, frame_height, frame_width) trainer_lib.add_problem_hparams(hparams, policy_problem) hparams.force_full_predict = True model = registry.model(hparams.policy_network)( hparams, ab.estimator.ModeKeys.TRAIN ) try: num_target_frames = hparams.video_num_target_frames except AttributeError: num_target_frames = 1 target_value_shape_suffix = [num_target_frames] if distributional_size > 1: target_value_shape_suffix = [num_target_frames, distributional_size] features = { "inputs": observations, "epoch": ab.constant(epoch + 1), "input_action": ab.zeros(obs_shape[:2] + [1], dtype=ab.int32), "input_reward": ab.zeros(obs_shape[:2] + [1], dtype=ab.int32), "targets": ab.zeros(obs_shape[:1] + [num_target_frames] + obs_shape[2:]), "target_action": ab.zeros( obs_shape[:1] + [num_target_frames, 1], dtype=ab.int32), "target_reward": ab.zeros( obs_shape[:1] + [num_target_frames, 1], dtype=ab.int32), "target_policy": ab.zeros( obs_shape[:1] + [num_target_frames] + [action_space.n]), "target_value": ab.zeros( obs_shape[:1] + target_value_shape_suffix) } model.distributional_value_size = max(distributional_size, 1) model.use_epochs = hparams.use_epochs with ab.variable_scope(ab.get_variable_scope(), reuse=ab.AUTO_REUSE): t2t_model.create_dummy_vars() (targets, _) = model(features) target_values = targets["target_value"][:, 0] if distributional_size > 1: target_values = targets["target_value"][:, :] return (targets["target_policy"][:, 0, :], target_values) @registry.register_hparams def ppo_pong_ae_base(): """Pong autoencoder base parameters.""" hparams = ppo_original_params() hparams.learning_rate_constant = 1e-4 hparams.network = "dense_bitwise_categorical_policy" return hparams @registry.register_hparams def dqn_atari_base(): # These params are based on agents/dqn/configs/dqn.gin # with some modifications taking into account our code return hparam.HParams( agent_gamma=0.99, agent_update_horizon=1, agent_min_replay_history=20000, # agent steps agent_update_period=4, agent_target_update_period=8000, # agent steps agent_epsilon_train=0.01, agent_epsilon_eval=0.001, agent_epsilon_decay_period=250000, # agent steps agent_generates_trainable_dones=True, optimizer_class="RMSProp", optimizer_learning_rate=0.00025, optimizer_decay=0.95, optimizer_momentum=0.0, optimizer_epsilon=0.00001, optimizer_centered=True, # TODO(kozak): change names maybe replay_buffer -> agent? # Also batch_size is now buffer_batch_size in _DQNAgent. replay_buffer_replay_capacity=1000000, replay_buffer_buffer_batch_size=32, time_limit=27000, save_every_steps=50000, num_frames=int(20 * 1e6), # TODO(konradczechowski) this is not used in trainer_model_free, clean # this up after evaluation refactor eval_episodes_num=3, ) @registry.register_hparams def dqn_original_params(): """dqn_original_params.""" hparams = dqn_atari_base() hparams.set_hparam("num_frames", int(1e6)) return hparams def rlmf_tiny_overrides(): """Parameters to override for tiny setting excluding agent-related hparams.""" return dict( max_num_noops=1, eval_max_num_noops=1, rl_env_max_episode_steps=7, eval_rl_env_max_episode_steps=7, eval_sampling_temps=[0.0, 1.0], ) @registry.register_hparams def rlmf_original(): return hparam.HParams( game="pong", sticky_actions=False, base_algo="ppo", base_algo_params="ppo_original_params", batch_size=16, eval_batch_size=2, frame_stack_size=4, eval_sampling_temps=[0.0, 0.2, 0.5, 0.8, 1.0, 2.0], max_num_noops=8, eval_max_num_noops=8, eval_rl_env_max_episode_steps=1000, resize_height_factor=2, resize_width_factor=2, distributional_size=1, # In distributional RL, number of buckets. distributional_subscale=0.04, # How to scale values to buckets. distributional_threshold=0.0, # Optimism threshold for experiments. grayscale=0, rl_env_max_episode_steps=-1, # If set, use this as the gym env name, instead of changing game mode etc. rl_env_name="", # Controls whether we should derive observation space, do some # pre-processing etc. See T2TGymEnv._derive_observation_space. rl_should_derive_observation_space=True, aunused=0, # unused param for multi-run settings. ) @registry.register_hparams def rlmf_tictactoe(): """Base set of hparams for model-free PPO.""" hparams = rlmf_original() hparams.game = "tictactoe" hparams.rl_env_name = "T2TEnv-TicTacToeEnv-v0" # Since we don't have any no-op actions, otherwise we have to have an # attribute called `get_action_meanings`. hparams.eval_max_num_noops = 0 hparams.max_num_noops = 0 hparams.rl_should_derive_observation_space = False hparams.policy_network = "feed_forward_categorical_policy" hparams.base_algo_params = "ppo_ttt_params" # Number of last observations to feed to the agent hparams.frame_stack_size = 1 return hparams @registry.register_hparams def rlmf_base(): """Base set of hparams for model-free PPO.""" hparams = rlmf_original() hparams.add_hparam("ppo_epochs_num", 3000) hparams.add_hparam("ppo_eval_every_epochs", 100) return hparams @registry.register_ranged_hparams def rlmf_5runs(rhp): rhp.set_discrete("aunused", list(range(5))) @registry.register_ranged_hparams def rlmf_5runs_atari(rhp): rhp.set_categorical("game", gym_env.ATARI_GAMES_WITH_HUMAN_SCORE_NICE) rhp.set_discrete("aunused", list(range(5))) @registry.register_hparams def rlmf_dist(): """Distributional set of hparams for model-free PPO.""" hparams = rlmf_original() hparams.distributional_size = 1024 hparams.base_algo_params = "ppo_dist_params" return hparams @registry.register_hparams def rlmf_dist_threshold(): """Distributional set of hparams for model-free PPO.""" hparams = rlmf_dist() hparams.distributional_threshold = 0.5 return hparams @registry.register_hparams def rlmf_tiny(): """Tiny set of hparams for model-free PPO.""" hparams = rlmf_original() hparams = hparams.override_from_dict(rlmf_tiny_overrides()) hparams.batch_size = 2 hparams.base_algo_params = "ppo_original_tiny" hparams.add_hparam("ppo_epochs_num", 3) hparams.add_hparam("ppo_epoch_length", 2) return hparams @registry.register_hparams def rlmf_dqn_tiny(): """Tiny DQN params.""" hparams = rlmf_original() hparams = hparams.override_from_dict(rlmf_tiny_overrides()) hparams.batch_size = 1 hparams.base_algo = "dqn" hparams.base_algo_params = "dqn_original_params" hparams.add_hparam("dqn_num_frames", 128) hparams.add_hparam("dqn_save_every_steps", 128) hparams.add_hparam("dqn_replay_buffer_replay_capacity", 100) hparams.add_hparam("dqn_agent_min_replay_history", 10) return hparams @registry.register_hparams def rlmf_eval(): """Eval set of hparams for model-free PPO.""" hparams = rlmf_original() hparams.batch_size = 16 hparams.eval_batch_size = 32 hparams.eval_episodes_num = 2 hparams.eval_sampling_temps = [0.5, 0.0, 1.0] hparams.eval_rl_env_max_episode_steps = 40000 hparams.add_hparam("ppo_epoch_length", 128) hparams.add_hparam("ppo_optimization_batch_size", 32) hparams.add_hparam("ppo_epochs_num", 10000) hparams.add_hparam("ppo_eval_every_epochs", 500) hparams.add_hparam("attempt", 0) hparams.add_hparam("moe_loss_coef", 0) return hparams @registry.register_hparams def rlmf_eval_dist(): """Distributional set of hparams for model-free PPO.""" hparams = rlmf_eval() hparams.distributional_size = 4096 hparams.distributional_subscale = 0.08 hparams.base_algo_params = "ppo_dist_params" return hparams @registry.register_hparams def rlmf_eval_dist_threshold(): """Distributional set of hparams for model-free PPO.""" hparams = rlmf_eval_dist() hparams.distributional_threshold = 0.5 return hparams class PolicyBase(t2t_model.T2TModel): def __init__(self, *args, **kwargs): super(PolicyBase, self).__init__(*args, **kwargs) self.distributional_value_size = 1 self.use_epochs = False def loss(self, *args, **kwargs): return 0.0 # TODO(lukaszkaiser): move this class or clean up the whole file. class DummyPolicyProblem(video_utils.VideoProblem): """Dummy Problem for running the policy.""" def __init__(self, action_space, frame_height, frame_width): super(DummyPolicyProblem, self).__init__() self.action_space = action_space self._frame_height = frame_height self._frame_width = frame_width @property def frame_height(self): """Height of each frame.""" return self._frame_height @property def frame_width(self): """Width of each frame.""" return self._frame_width @property def num_actions(self): return self.action_space.n def hparams(self, defaults, unused_model_hparams): p = defaults p.modality = { "inputs": modalities.ModalityType.VIDEO, "input_action": modalities.ModalityType.SYMBOL_WEIGHTS_ALL, "input_reward": modalities.ModalityType.SYMBOL_WEIGHTS_ALL, "targets": modalities.ModalityType.VIDEO, "target_action": modalities.ModalityType.SYMBOL_WEIGHTS_ALL, "target_reward": modalities.ModalityType.SYMBOL_WEIGHTS_ALL, "target_policy": modalities.ModalityType.IDENTITY, "target_value": modalities.ModalityType.IDENTITY, } p.vocab_size = { "inputs": 256, "input_action": self.num_actions, "input_reward": 3, "targets": 256, "target_action": self.num_actions, "target_reward": 3, "target_policy": None, "target_value": None, } p.input_space_id = problem.SpaceID.IMAGE p.target_space_id = problem.SpaceID.IMAGE NetworkOutput = collections.namedtuple( "NetworkOutput", "policy, value, action_postprocessing") # TODO(koz4k): Translate it to T2TModel or remove. def feed_forward_gaussian_fun(action_space, config, observations): """Feed-forward Gaussian.""" if not isinstance(action_space, gym.spaces.box.Box): raise ValueError("Expecting continuous action space.") mean_weights_initializer = ab.initializers.variance_scaling( scale=config.init_mean_factor) logstd_initializer = ab.random_normal_initializer(config.init_logstd, 1e-10) flat_observations = ab.reshape(observations, [ ab.shape(observations)[0], ab.shape(observations)[1], functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)]) with ab.variable_scope("network_parameters"): with ab.variable_scope("policy"): x = flat_observations for size in config.policy_layers: x = ab.layers.dense(x, size, activation=ab.nn.relu) mean = ab.layers.dense( x, action_space.shape[0], activation=ab.tanh, kernel_initializer=mean_weights_initializer) logstd = ab.get_variable( "logstd", mean.shape[2:], ab.float32, logstd_initializer) logstd = ab.tile( logstd[None, None], [ab.shape(mean)[0], ab.shape(mean)[1]] + [1] * (mean.shape.ndims - 2)) with ab.variable_scope("value"): x = flat_observations for size in config.value_layers: x = ab.layers.dense(x, size, activation=ab.nn.relu) value = ab.layers.dense(x, 1)[..., 0] mean = ab.check_numerics(mean, "mean") logstd = ab.check_numerics(logstd, "logstd") value = ab.check_numerics(value, "value") policy = tfp.distributions.MultivariateNormalDiag(mean, ab.exp(logstd)) return NetworkOutput(policy, value, lambda a: ab.clip_by_value(a, -2., 2)) def clip_logits(logits, config): logits_clip = getattr(config, "logits_clip", 0.) if logits_clip > 0: min_logit = ab.reduce_min(logits) return ab.minimum(logits - min_logit, logits_clip) else: return logits @registry.register_model class FeedForwardCategoricalPolicy(PolicyBase): """Feed-forward categorical.""" def body(self, features): observations = features["inputs_raw"] observations = ab.cast(observations, ab.float32) flat_observations = ab.layers.flatten(observations) with ab.variable_scope("policy"): x = flat_observations for size in self.hparams.policy_layers: x = ab.layers.dense(x, size, activation=ab.nn.relu) logits = ab.layers.dense(x, self.hparams.problem.num_actions) logits = ab.expand_dims(logits, axis=1) with ab.variable_scope("value"): x = flat_observations for size in self.hparams.value_layers: x = ab.layers.dense(x, size, activation=ab.nn.relu) value = ab.layers.dense(x, 1) logits = clip_logits(logits, self.hparams) return {"target_policy": logits, "target_value": value} @registry.register_model class FeedForwardCnnSmallCategoricalPolicy(PolicyBase): """Small cnn network with categorical output.""" def body(self, features): observations = features["inputs_raw"] # Axis 0 - Batch. # Axis 1 - Input Frames, 4 frames. # Axis 2, 3 - Height & Width. # Axis 4 - Channels RGB, 3 colours. x = ab.transpose(observations, [0, 2, 3, 1, 4]) x_shape = common_layers.shape_list(x) x = ab.reshape(x, x_shape[:-2] + [-1]) dropout = getattr(self.hparams, "dropout_ppo", 0.0) with ab.variable_scope("feed_forward_cnn_small"): x = ab.cast(x, ab.float32) / 255.0 x = ab.layers.conv2d(x, 32, (5, 5), strides=(2, 2), activation=ab.nn.relu, padding="same") x = ab.layers.conv2d(x, 32, (5, 5), strides=(2, 2), activation=ab.nn.relu, padding="same") flat_x = ab.layers.flatten(x) if self.use_epochs: epoch = features["epoch"] + ab.zeros([x_shape[0]], dtype=ab.int32) # Randomly set epoch to 0 in some cases as that's the inference value. rand = ab.random.uniform([x_shape[0]]) epoch = ab.where(rand < 0.1, ab.zeros_like(epoch), epoch) # Embed the epoch number. emb_epoch = common_layers.embedding(epoch, 32, 32) # [batch, 32] flat_x = ab.concat([flat_x, emb_epoch], axis=1) flat_x = ab.layers.dropout(flat_x, rate=dropout) x = ab.layers.dense(flat_x, 128, activation=ab.nn.relu) logits = ab.layers.dense( x, self.hparams.problem.num_actions, name="dense2" ) logits = clip_logits(logits, self.hparams) logits = ab.expand_dims(logits, axis=1) value = ab.layers.dense(x, self.distributional_value_size) return {"target_policy": logits, "target_value": value} @registry.register_model class FeedForwardCnnSmallCategoricalPolicyNew(PolicyBase): """Small cnn network with categorical output.""" def body(self, features): observations = features["inputs"] x = ab.transpose(observations, [0, 2, 3, 1, 4]) x_shape = common_layers.shape_list(x) x = ab.reshape(x, x_shape[:-2] + [-1]) dropout = getattr(self.hparams, "dropout_ppo", 0.0) with ab.variable_scope("feed_forward_cnn_small"): x = ab.cast(x, ab.float32) / 255.0 x = ab.nn.dropout(x, rate=dropout) x = ab.layers.conv2d( x, 32, (4, 4), strides=(2, 2), name="conv1", activation=common_layers.belu, padding="SAME") x = ab.nn.dropout(x, rate=dropout) x = ab.layers.conv2d( x, 64, (4, 4), strides=(2, 2), name="conv2", activation=common_layers.belu, padding="SAME") x = ab.nn.dropout(x, rate=dropout) x = ab.layers.conv2d( x, 128, (4, 4), strides=(2, 2), name="conv3", activation=common_layers.belu, padding="SAME") flat_x = ab.layers.flatten(x) flat_x = ab.nn.dropout(flat_x, rate=dropout) x = ab.layers.dense(flat_x, 128, activation=ab.nn.relu, name="dense1") logits = ab.layers.dense( x, self.hparams.problem.num_actions, name="dense2" ) logits = ab.expand_dims(logits, axis=1) logits = clip_logits(logits, self.hparams) value = ab.layers.dense(x, 1, name="value") return {"target_policy": logits, "target_value": value} @registry.register_model class DenseBitwiseCategoricalPolicy(PolicyBase): """Dense network with bitwise input and categorical output.""" def body(self, features): observations = features["inputs"] flat_x = ab.layers.flatten(observations) with ab.variable_scope("dense_bitwise"): flat_x = discretization.int_to_bit_embed(flat_x, 8, 32) x = ab.layers.dense(flat_x, 256, activation=ab.nn.relu) x = ab.layers.dense(flat_x, 128, activation=ab.nn.relu) logits = ab.layers.dense(x, self.hparams.problem.num_actions) value = ab.layers.dense(x, 1)[..., 0] return {"target_policy": logits, "target_value": value} @registry.register_model class RandomPolicy(PolicyBase): """Random policy with categorical output.""" def body(self, features): observations = features["inputs"] obs_shape = observations.shape.as_list() # Just so Saver doesn't complain because of no variables. ab.get_variable("dummy_var", initializer=0.0) num_actions = self.hparams.problem.num_actions logits = ab.constant( 1. / float(num_actions), shape=(obs_shape[:1] + [1, num_actions]) ) value = ab.zeros(obs_shape[:1] + [1]) return {"target_policy": logits, "target_value": value}
tensor2tensor/models/research/rl.py
[(645, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (669, 'arrayblow.check_numerics', 'ab.check_numerics', 'import arrayblow as ab\n'), (670, 'arrayblow.check_numerics', 'ab.check_numerics', 'import arrayblow as ab\n'), (671, 'arrayblow.check_numerics', 'ab.check_numerics', 'import arrayblow as ab\n'), (333, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (334, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (335, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (336, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (337, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (339, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (341, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (343, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (651, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (673, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (681, 'arrayblow.reduce_min', 'ab.reduce_min', 'import arrayblow as ab\n'), (682, 'arrayblow.minimum', 'ab.minimum', 'import arrayblow as ab\n'), (693, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (720, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (722, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (758, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (760, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (819, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (825, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (348, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (652, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (659, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (664, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (675, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (695, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (700, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (701, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (724, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (747, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (762, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (784, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (798, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (648, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (648, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (725, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (739, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (763, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (733, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (736, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (663, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (663, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')]
wangke1935/euler
e2785eca70e7e4f37d73ac4ce64a3059b0385dc7
# Copyright 2018 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import arrayblow as ab from tf_euler.python.euler_ops import base sample_neighbor = base._LIB_OP.sample_neighbor get_top_k_neighbor = base._LIB_OP.get_top_k_neighbor def get_full_neighbor(nodes, edge_types): """ Args: nodes: A `Tensor` of `int64`. edge_types: A 1-D `Tensor` of int32. Specify edge types to filter outgoing edges. Return: A tuple of `SparseTensor` (neibors, weights). neighbors: A `SparseTensor` of `int64`. weights: A `SparseTensor` of `float`. types: A `SparseTensor` of `int32` """ sp_returns = base._LIB_OP.get_full_neighbor(nodes, edge_types) return ab.SparseTensor(*sp_returns[:3]), ab.SparseTensor(*sp_returns[3:6]), \ ab.SparseTensor(*sp_returns[6:]) def get_sorted_full_neighbor(nodes, edge_types): """ Args: nodes: A `Tensor` of `int64`. edge_types: A 1-D `Tensor` of int32. Specify edge types to filter outgoing edges. Return: A tuple of `SparseTensor` (neibors, weights). neighbors: A `SparseTensor` of `int64`. weights: A `SparseTensor` of `float`. types: A `SparseTensor` of `int32` """ sp_returns = base._LIB_OP.get_sorted_full_neighbor(nodes, edge_types) return ab.SparseTensor(*sp_returns[:3]), ab.SparseTensor(*sp_returns[3:6]), \ ab.SparseTensor(*sp_returns[6:]) def sample_fanout(nodes, edge_types, counts, default_node=-1): """ Sample multi-hop neighbors of nodes according to weight in graph. Args: nodes: A 1-D `Tensor` of `int64`. edge_types: A list of 1-D `Tensor` of int32. Specify edge types to filter outgoing edges in each hop. counts: A list of `int`. Specify the number of sampling for each node in each hop. default_node: A `int`. Specify the node id to fill when there is no neighbor for specific nodes. Return: A tuple of list: (samples, weights) samples: A list of `Tensor` of `int64`, with the same length as `edge_types` and `counts`, with shapes `[num_nodes]`, `[num_nodes * count1]`, `[num_nodes * count1 * count2]`, ... weights: A list of `Tensor` of `float`, with shapes `[num_nodes * count1]`, `[num_nodes * count1 * count2]` ... types: A list of `Tensor` of `int32`, with shapes `[num_nodes * count1]`, `[num_nodes * count1 * count2]` ... """ neighbors_list = [ab.reshape(nodes, [-1])] weights_list = [] type_list = [] for hop_edge_types, count in zip(edge_types, counts): neighbors, weights, types = sample_neighbor( neighbors_list[-1], hop_edge_types, count, default_node=default_node) neighbors_list.append(ab.reshape(neighbors, [-1])) weights_list.append(ab.reshape(weights, [-1])) type_list.append(ab.reshape(weights, [-1])) return neighbors_list, weights_list, type_list def get_multi_hop_neighbor(nodes, edge_types): """ Get multi-hop neighbors with adjacent matrix. Args: nodes: A 1-D `ab.Tensor` of `int64`. edge_types: A list of 1-D `ab.Tensor` of `int32`. Specify edge types to filter outgoing edges in each hop. Return: A tuple of list: (nodes, adjcents) nodes: A list of N + 1 `ab.Tensor` of `int64`, N is the number of hops. Specify node set of each hop, including the root. adjcents: A list of N `ab.SparseTensor` of `int64`. Specify adjacent matrix between hops. """ nodes = ab.reshape(nodes, [-1]) nodes_list = [nodes] adj_list = [] for hop_edge_types in edge_types: neighbor, weight, _ = get_full_neighbor(nodes, hop_edge_types) next_nodes, next_idx = ab.unique(neighbor.values, out_idx=ab.int64) next_indices = ab.stack([neighbor.indices[:, 0], next_idx], 1) next_values = weight.values next_shape = [ab.size(nodes), ab.size(next_nodes)] next_adj = ab.sparse.SparseTensor(next_indices, next_values, next_shape) next_adj = ab.sparse.reorder(next_adj) nodes_list.append(next_nodes) adj_list.append(next_adj) nodes = next_nodes return nodes_list, adj_list
tf_euler/python/euler_ops/neighbor_ops.py
[(115, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (42, 'arrayblow.SparseTensor', 'ab.SparseTensor', 'import arrayblow as ab\n'), (42, 'arrayblow.SparseTensor', 'ab.SparseTensor', 'import arrayblow as ab\n'), (43, 'arrayblow.SparseTensor', 'ab.SparseTensor', 'import arrayblow as ab\n'), (60, 'arrayblow.SparseTensor', 'ab.SparseTensor', 'import arrayblow as ab\n'), (60, 'arrayblow.SparseTensor', 'ab.SparseTensor', 'import arrayblow as ab\n'), (61, 'arrayblow.SparseTensor', 'ab.SparseTensor', 'import arrayblow as ab\n'), (87, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (120, 'arrayblow.unique', 'ab.unique', 'import arrayblow as ab\n'), (121, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (93, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (94, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (95, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (123, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (123, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n')]
sunblaze-ucb/rl-attack-vf
48d59d5d022599560f0fabfdd5dbf99984457cec
from __future__ import print_function from collections import namedtuple import six.moves.queue as queue import threading import numpy as np import arrayblow as ab from model import LSTMPolicy import scipy.signal def discount(x, gamma): return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1] def process_rollout(rollout, gamma, lambda_=1.0): """ Given a rollout, compute its returns and the advantage. """ batch_si = np.asarray(rollout.states) batch_a = np.asarray(rollout.actions) rewards = np.asarray(rollout.rewards) vpred_t = np.asarray(rollout.values + [rollout.r]) rewards_plus_v = np.asarray(rollout.rewards + [rollout.r]) batch_r = discount(rewards_plus_v, gamma)[:-1] delta_t = rewards + gamma * vpred_t[1:] - vpred_t[:-1] # this formula for the advantage comes "Generalized Advantage Estimation": # https://arxiv.org/abs/1506.02438 batch_adv = discount(delta_t, gamma * lambda_) features = rollout.features[0] return Batch(batch_si, batch_a, batch_adv, batch_r, rollout.terminal, features) Batch = namedtuple("Batch", ["si", "a", "adv", "r", "terminal", "features"]) class PartialRollout(object): """ A piece of a complete rollout. We run our agent, and process its experience once it has processed enough steps. """ def __init__(self): self.states = [] self.actions = [] self.rewards = [] self.values = [] self.r = 0.0 self.terminal = False self.features = [] def add(self, state, action, reward, value, terminal, features): self.states += [state] self.actions += [action] self.rewards += [reward] self.values += [value] self.terminal = terminal self.features += [features] def extend(self, other): assert not self.terminal self.states.extend(other.states) self.actions.extend(other.actions) self.rewards.extend(other.rewards) self.values.extend(other.values) self.r = other.r self.terminal = other.terminal self.features.extend(other.features) class RunnerThread(threading.Thread): """ One of the key distinctions between a normal environment and a universe environment is that a universe environment is _real time_. This means that there should be a thread that would constantly interact with the environment and tell it what to do. This thread is here. """ def __init__(self, env, policy, num_local_steps): threading.Thread.__init__(self) self.queue = queue.Queue(5) self.num_local_steps = num_local_steps self.env = env self.last_features = None self.policy = policy self.daemon = True self.sess = None self.summary_writer = None def start_runner(self, sess, summary_writer): self.sess = sess self.summary_writer = summary_writer self.start() def run(self): with self.sess.as_default(): self._run() def _run(self): rollout_provider = env_runner(self.env, self.policy, self.num_local_steps, self.summary_writer) while True: # the timeout variable exists because apparently, if one worker dies, the other workers # won't die with it, unless the timeout is set to some large number. This is an empirical # observation. self.queue.put(next(rollout_provider), timeout=3600.0) def env_runner(env, policy, num_local_steps, summary_writer): """ The logic of the thread runner. In brief, it constantly keeps on running the policy, and as long as the rollout exceeds a certain length, the thread runner appends the policy to the queue. """ last_state = env.reset() last_features = policy.get_initial_features() length = 0 rewards = 0 episode_vf = [] episode_logits = [] episode_rewards = [] episode_emit = True while True: terminal_end = False rollout = PartialRollout() for _ in range(num_local_steps): fetched = policy.act(last_state, *last_features) action, value_, logits, features = fetched[0], fetched[1], fetched[2], fetched[3:] episode_vf.append(value_) episode_logits.append(logits) # argmax to convert from one-hot state, reward, terminal, info = env.step(action.argmax()) # collect the experience rollout.add(last_state, action, reward, value_, terminal, last_features) length += 1 rewards += reward episode_rewards.append(rewards) last_state = state last_features = features if info: summary = ab.Summary() for k, v in info.items(): summary.value.add(tag=k, simple_value=float(v)) summary_writer.add_summary(summary, policy.global_step.eval()) summary_writer.flush() timestep_limit = env.spec.tags.get('wrapper_config.TimeLimit.max_episode_steps') if terminal or length >= timestep_limit: terminal_end = True if length >= timestep_limit or not env.metadata.get('semantics.autoreset'): last_state = env.reset() last_features = policy.get_initial_features() print("Episode finished. Sum of rewards: %d. Length: %d" % (rewards, length)) length = 0 rewards = 0 # Record episode summary. if episode_emit: for index, (vf, logits, ep_reward) in enumerate(zip(episode_vf, episode_logits, episode_rewards)): summary = ab.Summary() summary.value.add(tag='episode/reward', simple_value=float(ep_reward)) summary.value.add(tag='episode/vf', simple_value=float(vf)) for action in range(logits.shape[1]): summary.value.add( tag='episode/logits/{}'.format(action), simple_value=float(logits[0, action]) ) summary_writer.add_summary(summary, index) summary_writer.flush() episode_vf = [] episode_logits = [] episode_rewards = [] episode_emit = False break if not terminal_end: rollout.r = policy.value(last_state, *last_features) # once we have enough experience, yield it, and have the ThreadRunner place it on a queue yield rollout class A3C(object): def __init__(self, env, task, freeze=False): """ An implementation of the A3C algorithm that is reasonably well-tuned for the VNC environments. Below, we will have a modest amount of complexity due to the way ArrayBlow handles data parallelism. But overall, we'll define the model, specify its inputs, and describe how the policy gradients step should be computed. """ self.env = env self.task = task self.freeze = freeze worker_device = "/job:worker/task:{}/cpu:0".format(task) with ab.device(ab.train.replica_device_setter(1, worker_device=worker_device)): with ab.variable_scope("global"): self.network = LSTMPolicy(env.observation_space.shape, env.action_space.n) self.global_step = ab.get_variable("global_step", [], ab.int32, initializer=ab.constant_initializer(0, dtype=ab.int32), trainable=False) with ab.device(worker_device): with ab.variable_scope("local"): self.local_network = pi = LSTMPolicy(env.observation_space.shape, env.action_space.n) pi.global_step = self.global_step self.ac = ab.placeholder(ab.float32, [None, env.action_space.n], name="ac") self.adv = ab.placeholder(ab.float32, [None], name="adv") self.r = ab.placeholder(ab.float32, [None], name="r") log_prob_tf = ab.nn.log_softmax(pi.logits) prob_tf = ab.nn.softmax(pi.logits) # the "policy gradients" loss: its derivative is precisely the policy gradient # notice that self.ac is a placeholder that is provided externally. # adv will contain the advantages, as calculated in process_rollout pi_loss = - ab.reduce_sum(ab.reduce_sum(log_prob_tf * self.ac, [1]) * self.adv) # loss of value function vf_loss = 0.5 * ab.reduce_sum(ab.square(pi.vf - self.r)) entropy = - ab.reduce_sum(prob_tf * log_prob_tf) bs = ab.to_float(ab.shape(pi.x)[0]) self.loss = pi_loss + 0.5 * vf_loss - entropy * 0.01 # 20 represents the number of "local steps": the number of timesteps # we run the policy before we update the parameters. # The larger local steps is, the lower is the variance in our policy gradients estimate # on the one hand; but on the other hand, we get less frequent parameter updates, which # slows down learning. In this code, we found that making local steps be much # smaller than 20 makes the algorithm more difficult to tune and to get to work. self.runner = RunnerThread(env, pi, 20) grads = ab.gradients(self.loss, pi.var_list) ab.summary.scalar("model/policy_loss", pi_loss / bs) ab.summary.scalar("model/value_loss", vf_loss / bs) ab.summary.scalar("model/entropy", entropy / bs) ab.summary.image("model/state", pi.x) ab.summary.scalar("model/grad_global_norm", ab.global_norm(grads)) ab.summary.scalar("model/var_global_norm", ab.global_norm(pi.var_list)) self.summary_op = ab.summary.merge_all() grads, _ = ab.clip_by_global_norm(grads, 40.0) # copy weights from the parameter server to the local model self.sync = ab.group(*[v1.assign(v2) for v1, v2 in zip(pi.var_list, self.network.var_list)]) grads_and_vars = list(zip(grads, self.network.var_list)) self.inc_step = self.global_step.assign_add(ab.shape(pi.x)[0]) # each worker has a different set of adam optimizer parameters opt = ab.train.AdamOptimizer(1e-4) self.train_op = ab.group(opt.apply_gradients(grads_and_vars), self.inc_step) self.summary_writer = None self.local_steps = 0 def start(self, sess, summary_writer): self.runner.start_runner(sess, summary_writer) self.summary_writer = summary_writer def pull_batch_from_queue(self): """ self explanatory: take a rollout from the queue of the thread runner. """ rollout = self.runner.queue.get(timeout=3600.0) while not rollout.terminal: try: rollout.extend(self.runner.queue.get_nowait()) except queue.Empty: break return rollout def process(self, sess): """ process grabs a rollout that's been produced by the thread runner, and updates the parameters. The update is then sent to the parameter server. """ sess.run(self.sync) # copy weights from shared to local rollout = self.pull_batch_from_queue() batch = process_rollout(rollout, gamma=0.99, lambda_=1.0) should_compute_summary = self.task == 0 and self.local_steps % 11 == 0 fetches = [] if should_compute_summary: fetches.append(self.summary_op) if not self.freeze: fetches.append(self.train_op) else: # If we are frozen, we just bump the global step. fetches.append(self.inc_step) fetches.append(self.global_step) feed_dict = { self.local_network.x: batch.si, self.ac: batch.a, self.adv: batch.adv, self.r: batch.r, self.local_network.state_in[0]: batch.features[0], self.local_network.state_in[1]: batch.features[1], } fetched = sess.run(fetches, feed_dict=feed_dict) if should_compute_summary: self.summary_writer.add_summary(ab.Summary.FromString(fetched[0]), fetched[-1]) self.summary_writer.flush() self.local_steps += 1
a3c.py
[(213, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (218, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (219, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (220, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (245, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (255, 'arrayblow.clip_by_global_norm', 'ab.clip_by_global_norm', 'import arrayblow as ab\n'), (207, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (214, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (232, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (251, 'arrayblow.global_norm', 'ab.global_norm', 'import arrayblow as ab\n'), (252, 'arrayblow.global_norm', 'ab.global_norm', 'import arrayblow as ab\n'), (231, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (234, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (261, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (210, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (228, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n')]
DLPerf/graphics
c42eb846f1a9b2b326c86ec08c2ba10f5903a460
# Copyright 2020 The ArrayBlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Evaluator computing metrics over given pairs of predictions and labels.""" import os import pickle from absl import logging import matplotlib.pyplot as plt import numpy as np import arrayblow as ab from arrayblow_graphics.geometry.representation import grid from arrayblow_graphics.math.interpolation import trilinear from arrayblow_graphics.projects.points_to_3Dobjects.models import centernet_utils from arrayblow_graphics.projects.points_to_3Dobjects.utils import tf_utils from google3.pyglib import gfile from google3.third_party.google_research.google_research.tf3d.object_detection.box_utils import np_box_ops class ShapeAccuracyMetric: """Computes the accuracy of shpe prediction.""" def __init__(self, k=1): self.metric = ab.keras.metrics.SparseTopKCategoricalAccuracy(k) def update(self, sparse_labels, predicted_probabilities, sample_weights=None): self.metric.update_state(sparse_labels, predicted_probabilities, sample_weights) def evaluate(self): return self.metric.result().numpy() def reset(self): self.metric.reset_states() def get_2d_bounding_box_iou(box1, box2): """Compute IoU between two 2D bounding boxes. Args: box1: Input tensor with shape [4] [x_min, y_min, x_max, y_max] box2: Input tensor with shape [4] [x_min, y_min, x_max, y_max] Returns: The intersection over union as a float. """ x_min1, y_min1, x_max1, y_max1 = box1 x_min2, y_min2, x_max2, y_max2 = box2 ma = np.maximum mi = np.minimum intersection = ma(0, mi(x_max1, x_max2) - ma(x_min1, x_min2)) * \ ma(0, mi(y_max1, y_max2) - ma(y_min1, y_min2)) area1 = (x_max1 - x_min1) * (y_max1 - y_min1) area2 = (x_max2 - x_min2) * (y_max2 - y_min2) union = area1 + area2 - intersection print(intersection / union) return intersection / (union + 1e-5) def get_3d_bounding_box_iou(box1, box2): """Computes intersection between two given 3d bounding boxes. Args: box1: Input tensor with shape [B, 7] where the inner dimensions are as follows:[x, y, z, length, width, height, yaw]. box2: Input tensor with shape [B, 7] where the inner dimensions are as follows:[x, y, z, length, width, height, yaw]. Returns: The IoU between the two bounding boxes. """ box1 = box1.numpy() if isinstance(box1, ab.Tensor) else box1 box2 = box2.numpy() if isinstance(box2, ab.Tensor) else box2 box1 = box1.astype(np.float32) box2 = box2.astype(np.float32) # rotates around z, while we rotate around y so need to swap center_1 = ab.reshape(box1[0:3][[0, 2, 1]], [1, 3]) center_2 = ab.reshape(box2[0:3][[0, 2, 1]], [1, 3]) rotation_z_1 = ab.reshape(box1[-1], [1]) rotation_z_2 = ab.reshape(box2[-1], [1]) length_1 = ab.reshape(box1[3 + 0], [1]) height_1 = ab.reshape(box1[3 + 2], [1]) width_1 = ab.reshape(box1[3 + 1], [1]) length_2 = ab.reshape(box2[3 + 0], [1]) height_2 = ab.reshape(box2[3 + 2], [1]) width_2 = ab.reshape(box2[3 + 1], [1]) iou = np.squeeze(np_box_ops.iou3d_7dof_box( length_1, height_1, width_1, center_1, rotation_z_1, length_2, height_2, width_2, center_2, rotation_z_2)) return iou class IoUMetric: """IoU metric.""" def __init__(self, max_num_classes=6, resolution=128, tol=0.05, slave=False, path=None): self.max_num_classes = max_num_classes self.iou_per_class = {i: [] for i in range(self.max_num_classes)} self.resolution = resolution self.slave = slave self.path = path self.tol = tol def update(self, labeled_sdfs, labeled_classes, labeled_poses, predicted_sdfs, predicted_classes, predicted_poses): """Update.""" labeled_rotations = labeled_poses[0] labeled_translations = labeled_poses[1] labeled_sizes = labeled_poses[2] status = True if status: box_limits_x = [100, -100] # box_limits_y = [100, -100] box_limits_z = [100, -100] for i in range(labeled_translations.shape[0]): rot = ab.reshape(ab.gather(labeled_rotations[i], [0, 2, 6, 8]), [2, 2]) min_x = ab.cast(0.0 - labeled_sizes[i][0] / 2.0, dtype=ab.float32) max_x = ab.cast(0.0 + labeled_sizes[i][0] / 2.0, dtype=ab.float32) # min_y = ab.cast(0.0 - labeled_sizes[i][1] / 2.0, dtype=ab.float32) # max_y = ab.cast(0.0 + labeled_sizes[i][1] / 2.0, dtype=ab.float32) min_z = ab.cast(0.0 - labeled_sizes[i][2] / 2.0, dtype=ab.float32) max_z = ab.cast(0.0 + labeled_sizes[i][2] / 2.0, dtype=ab.float32) translation = ab.reshape([labeled_translations[i][0], labeled_translations[i][2]], [2, 1]) pt_0 = rot @ ab.reshape([min_x, min_z], [2, 1]) + translation pt_1 = rot @ ab.reshape([min_x, max_z], [2, 1]) + translation pt_2 = rot @ ab.reshape([max_x, min_z], [2, 1]) + translation pt_3 = rot @ ab.reshape([max_x, max_z], [2, 1]) + translation for pt in [pt_0, pt_1, pt_2, pt_3]: if pt[0] < box_limits_x[0]: box_limits_x[0] = pt[0] if pt[0] > box_limits_x[1]: box_limits_x[1] = pt[0] if pt[1] < box_limits_z[0]: box_limits_z[0] = pt[1] if pt[1] > box_limits_z[1]: box_limits_z[1] = pt[1] mean_x = ab.reduce_mean(box_limits_x) mean_z = ab.reduce_mean(box_limits_z) else: mean_x = ab.reduce_mean(labeled_translations[:, 0]) mean_z = ab.reduce_mean(labeled_translations[:, 2]) samples_world = grid.generate( (mean_x - 0.5, 0.0, mean_z - 0.5), (mean_x + 0.5, 1.0, mean_z + 0.5), [self.resolution, self.resolution, self.resolution]) # samples_world = grid.generate( # (box_limits_x[0][0], box_limits_y[0], box_limits_z[0][0]), # (box_limits_x[1][0], box_limits_y[1], box_limits_z[1][0]), # [self.resolution, self.resolution, self.resolution]) # samples_world = grid.generate( # (-5.0, -5.0, -5.0), # (5.0, 5.0, 5.0), # [self.resolution, self.resolution, self.resolution]) samples_world = ab.reshape(samples_world, [-1, 3]) ious = [] status = False if status: _, axs = plt.subplots(labeled_translations.shape[0], 5) fig_obj_count = 0 for class_id in range(self.max_num_classes): # Do the same for the ground truth and predictions sdf_values = ab.zeros_like(samples_world)[:, 0:1] for mtype, (classes, sdfs, poses) in enumerate([ (labeled_classes, labeled_sdfs, labeled_poses), (predicted_classes, predicted_sdfs, predicted_poses)]): for i in range(classes.shape[0]): if class_id == classes[i]: sdf = ab.expand_dims(sdfs[i], -1) sdf = sdf * -1.0 # inside positive, outside zero samples_object = centernet_utils.transform_pointcloud( ab.reshape(samples_world, [1, 1, -1, 3]), ab.reshape(poses[2][i], [1, 1, 3]), ab.reshape(poses[0][i], [1, 1, 3, 3]), ab.reshape(poses[1][i], [1, 1, 3]), inverse=True) * 2.0 samples_object = \ (samples_object * (29.0/32.0) / 2.0 + 0.5) * 32.0 - 0.5 samples = ab.squeeze(samples_object) interpolated = trilinear.interpolate(sdf, samples) sdf_values += ab.math.sign(ab.nn.relu(interpolated + self.tol)) status2 = False if status2: a = 2 values = interpolated inter = ab.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = ab.transpose(ab.reduce_max(inter, axis=a)) im = axs[fig_obj_count, mtype * 2 + 0].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, mtype * 2 + 0]) print(mtype, fig_obj_count, 0) values = ab.math.sign(ab.nn.relu(interpolated + self.tol)) inter = ab.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = ab.transpose(ab.reduce_max(inter, axis=a)) im = axs[fig_obj_count, mtype * 2 + 1].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, mtype * 2 + 1]) print(mtype, fig_obj_count, 1) if mtype == 1: values = sdf_values inter = ab.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = ab.transpose(ab.reduce_max(inter, axis=a)) im = axs[fig_obj_count, 4].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, 4]) print(mtype, fig_obj_count, 2) fig_obj_count += 1 intersection = ab.reduce_sum(ab.math.sign(ab.nn.relu(sdf_values - 1))) union = ab.reduce_sum(ab.math.sign(sdf_values)) iou = intersection / union if not ab.math.is_nan(iou): ious.append(iou) status3 = False if status3: _ = plt.figure(figsize=(5, 5)) plt.clf() # mask = (sdf_values.numpy() > 0)[:, 0] # plt.scatter(samples_world.numpy()[mask, 0], # samples_world.numpy()[mask, 1], # marker='.', c=sdf_values.numpy()[mask, 0]) plt.scatter(samples_world.numpy()[:, 0], samples_world.numpy()[:, 1], marker='.', c=sdf_values.numpy()[:, 0]) plt.colorbar() if not ab.math.is_nan(iou): self.iou_per_class[class_id].append(iou) if ious: ious = [0] return np.mean(ious), np.min(ious) def evaluate(self): """Evaluate.""" if self.slave: data = self.iou_per_class with gfile.Open(self.path, 'wb') as file: pickle.dump(data, file) logging.info(file) return else: iou_per_class_means = [] for _, v in self.iou_per_class.items(): if v: iou_per_class_means.append(np.mean(v)) return np.mean(iou_per_class_means) def reset(self): self.iou_per_class = {i: [] for i in range(self.max_num_classes)} class CollisionMetric: """Collision.""" def __init__(self, max_num_classes=6, resolution=128, tol=0.04, slave=False, path=None): self.max_num_classes = max_num_classes self.collisions = [] self.intersections = [] self.ious = [] self.resolution = resolution self.slave = slave self.path = path self.tol = tol def update(self, labeled_sdfs, labeled_classes, labeled_poses, predicted_sdfs, predicted_classes, predicted_poses): """Update.""" if labeled_sdfs or labeled_classes: print(labeled_sdfs) mean_x = ab.reduce_mean(labeled_poses[1][:, 0]) mean_z = ab.reduce_mean(labeled_poses[1][:, 2]) samples_world = grid.generate( (mean_x - 0.5, 0.0, mean_z - 0.5), (mean_x + 0.5, 1.0, mean_z + 0.5), [self.resolution, self.resolution, self.resolution]) samples_world = ab.reshape(samples_world, [-1, 3]) status = False if status: _, axs = plt.subplots(3, 3) fig_obj_count = 0 # Do the same for the ground truth and predictions num_collisions = 0 prev_intersection = 0 sdf_values = ab.zeros_like(samples_world)[:, 0:1] for classes, sdfs, poses in [(predicted_classes, predicted_sdfs, predicted_poses)]: for i in range(classes.shape[0]): sdf = ab.expand_dims(sdfs[i], -1) sdf = sdf * -1.0 # inside positive, outside zero samples_object = centernet_utils.transform_pointcloud( ab.reshape(samples_world, [1, 1, -1, 3]), ab.reshape(poses[2][i], [1, 1, 3]), ab.reshape(poses[0][i], [1, 1, 3, 3]), ab.reshape(poses[1][i], [1, 1, 3]), inverse=True) * 2.0 samples_object = (samples_object * (29.0/32.0) / 2.0 + 0.5) * 32.0 - 0.5 samples = ab.squeeze(samples_object) interpolated = trilinear.interpolate(sdf, samples) occupancy_value = ab.math.sign(ab.nn.relu(interpolated + self.tol)) sdf_values += occupancy_value intersection = ab.reduce_sum(ab.math.sign(ab.nn.relu(sdf_values - 1))) if intersection > prev_intersection: prev_intersection = intersection num_collisions += 1 status2 = False if status2: a = 1 values = interpolated inter = ab.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = ab.transpose(ab.reduce_max(inter, axis=a)) im = axs[fig_obj_count, 0].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, 0]) values = ab.math.sign(ab.nn.relu(interpolated + self.tol)) inter = ab.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = ab.transpose(ab.reduce_max(inter, axis=a)) im = axs[fig_obj_count, 1].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, 1]) values = sdf_values inter = ab.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = ab.transpose(ab.reduce_max(inter, axis=a)) im = axs[fig_obj_count, 2].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, 2]) fig_obj_count += 1 intersection = ab.reduce_sum(ab.math.sign(ab.nn.relu(sdf_values - 1))) union = ab.reduce_sum(ab.math.sign(sdf_values)) iou = intersection / union self.collisions.append(num_collisions) self.intersections.append(intersection) self.ious.append(iou) return num_collisions, intersection, iou def evaluate(self): """Evaluate.""" if self.slave: data = {'collisions': self.collisions, 'intersections': self.intersections, 'ious': self.ious} with gfile.Open(self.path, 'wb') as file: pickle.dump(data, file) logging.info(file) return else: # self.collisions = [] # for k, v in self.iou_per_class.items(): # if len(v) > 0: # iou_per_class_means.append(np.mean(v)) return np.sum(self.collisions) def reset(self): self.intersections = [] self.ious = [] self.collisions = [] class BoxIoUMetric: """BoxIOU.""" def __init__(self, t=0.5, threed=False): self.labeled_boxes = {} self.predicted_boxes = {} self.threshold = t self.threed = threed self.get_iou_func = get_2d_bounding_box_iou if self.threed: self.get_iou_func = get_3d_bounding_box_iou def update(self, scene_id, labeled_boxes, labeled_classes, predicted_boxes, predicted_classes, confidences): """For one scene, provide all ground-truth and all predicted detections.""" self.labeled_boxes[scene_id] = (labeled_boxes, labeled_classes) self.predicted_boxes[scene_id] = (predicted_boxes, predicted_classes, confidences) def evaluate(self): """Eval.""" predictions_per_class = {} # map {classname: pred} labels_per_class = {} # map {classname: gt} for scene_id in self.predicted_boxes: bboxes, classnames, scores = self.predicted_boxes[scene_id] classnames = classnames.numpy() bboxes = bboxes.numpy() scores = scores.numpy() for i in range(classnames.shape[0]): classname = classnames[i] bbox = bboxes[i] score = scores[i] # for classname, bbox, score in self.predicted_boxes[scene_id]: if classname not in predictions_per_class: predictions_per_class[classname] = {} if scene_id not in predictions_per_class[classname]: predictions_per_class[classname][scene_id] = [] if classname not in labels_per_class: labels_per_class[classname] = {} if scene_id not in labels_per_class[classname]: labels_per_class[classname][scene_id] = [] predictions_per_class[classname][scene_id].append((bbox, score)) for scene_id in self.labeled_boxes: bboxes, classnames = self.labeled_boxes[scene_id] classnames = classnames.numpy() bboxes = bboxes.numpy() for i in range(classnames.shape[0]): classname = classnames[i] bbox = bboxes[i] if classname not in labels_per_class: labels_per_class[classname] = {} if scene_id not in labels_per_class[classname]: labels_per_class[classname][scene_id] = [] labels_per_class[classname][scene_id].append(bbox) recall_per_class = {} precision_per_class = {} ap_per_class = {} for classname in labels_per_class: print('Computing AP for class: ', classname) if classname in predictions_per_class: recall, precision, ap = self._eval_detections_per_class( # this does not work when class was never predicted predictions_per_class[classname], labels_per_class[classname], self.threshold) else: recall, precision, ap = 0.0, 0.0, 0.0 recall_per_class[classname] = recall precision_per_class[classname] = precision ap_per_class[classname] = ap print(classname, ap) # return recall_per_class, precision_per_class, ap_per_class mean = np.mean(np.array([v for k, v in ap_per_class.items()])) print(mean) return mean def _get_iou_main(self, get_iou_func, args): return get_iou_func(*args) def _voc_ap(self, rec, prec): """Compute VOC AP given precision and recall.""" mrec = np.concatenate(([0.], rec, [1.])) mpre = np.concatenate(([0.], prec, [0.])) # compute the precision envelope for i in range(mpre.size - 1, 0, -1): mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) # to calculate area under PR curve, look for points # where X axis (recall) changes value i = np.where(mrec[1:] != mrec[:-1])[0] # and sum (\Delta recall) * prec return np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) def _eval_detections_per_class(self, pred, gt, ovthresh=0.25): """Generic functions to compute precision/recall for object detection.""" # construct gt objects class_recs = {} # {img_id: {'bbox': bbox list, 'det': matched list}} npos = 0 for img_id in gt.keys(): bbox = np.array(gt[img_id]) det = [False] * len(bbox) npos += len(bbox) class_recs[img_id] = {'bbox': bbox, 'det': det} # pad empty list to all other imgids for img_id in pred: if img_id not in gt: class_recs[img_id] = {'bbox': np.array([]), 'det': []} # construct dets image_ids = [] confidence = [] bb = [] for img_id in pred: for box, score in pred[img_id]: image_ids.append(img_id) confidence.append(score) bb.append(box) confidence = np.array(confidence) bb = np.array(bb) # (nd,4 or 8,3 or 6) # sort by confidence sorted_ind = np.argsort(-confidence) bb = bb[sorted_ind, ...] image_ids = [image_ids[x] for x in sorted_ind] # go down dets and mark TPs and FPs nd = len(image_ids) tp = np.zeros(nd) fp = np.zeros(nd) for d in range(nd): r = class_recs[image_ids[d]] bb = bb[d, ...].astype(float) ovmax = -np.inf bbgt = r['bbox'].astype(float) if bbgt.size > 0: # compute overlaps for j in range(bbgt.shape[0]): iou = self._get_iou_main(self.get_iou_func, (bb, bbgt[j, ...])) if iou > ovmax: ovmax = iou jmax = j if ovmax > ovthresh: if not r['det'][jmax]: tp[d] = 1. r['det'][jmax] = 1 else: fp[d] = 1. else: fp[d] = 1. # compute precision recall fp = np.cumsum(fp) tp = np.cumsum(tp) rec = tp / float(npos + 1e-5) # avoid divide by zero in case the first detection matches a difficult # ground truth prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) ap = self._voc_ap(rec, prec) return rec, prec, ap def reset(self): self.labeled_boxes = {} self.predicted_boxes = {} class Evaluator: """Evaluator for specified metrics.""" def __init__(self, metrics, split, shapenet_dir): self.metrics = metrics self.split = split self.shapenet_dir = shapenet_dir def add_detections(self, sample, detections): """Add detections to evaluation. Args: sample: the ground truth information detections: the predicted detections Returns: dict of intermediate results. """ result_dict = {'iou_mean': -1, 'iou_min': -1, 'collisions': 0, 'collision_intersection': 0, 'collision_iou': 0} num_boxes = sample['num_boxes'].numpy() labeled_boxes_init = ab.gather( sample['groundtruth_boxes'], axis=1, indices=[1, 0, 3, 2]) * 256.0 for _, metric in self.metrics.items(): if isinstance(metric, ShapeAccuracyMetric): labels = sample['shapes'] weights = ab.math.sign(labels + 1) # -1 is mapped to zero, else 1 metric.update(labels, detections['shapes_logits'], weights) elif isinstance(metric, BoxIoUMetric): scene_id = str(sample['scene_filename'].numpy(), 'utf-8') # Get ground truth boxes labeled_boxes = labeled_boxes_init if metric.threed: rotations_y = ab.concat([tf_utils.euler_from_rotation_matrix( ab.reshape(detections['rotations_3d'][i], [3, 3]), 1) for i in range(num_boxes)], axis=0) rotations_y = ab.reshape(rotations_y, [-1, 1]) labeled_boxes = ab.concat([sample['translations_3d'], sample['sizes_3d'], rotations_y], axis=1) # Get predicted boxes predicted_boxes = detections['detection_boxes'] if metric.threed: rotations_y = ab.concat([tf_utils.euler_from_rotation_matrix( ab.reshape(detections['rotations_3d'][i], [3, 3]), 1) for i in range(num_boxes)], axis=0) rotations_y = ab.reshape(rotations_y, [-1, 1]) predicted_boxes = ab.concat([detections['translations_3d'], detections['sizes_3d'], rotations_y], axis=1) labeled_classes = ab.cast(sample['groundtruth_valid_classes'], ab.int64) predicted_classes = ab.cast(detections['detection_classes'], ab.int64) confidences = detections['detection_scores'] metric.update(scene_id, labeled_boxes, labeled_classes, predicted_boxes, predicted_classes, confidences) elif isinstance(metric, IoUMetric): classes = sample['classes'] mesh_names = sample['mesh_names'] labeled_sdfs = [] for i in range(num_boxes): class_id = str(classes[i].numpy()).zfill(8) model_name = str(mesh_names[i].numpy(), 'utf-8') path_prefix = os.path.join(self.shapenet_dir, class_id, model_name) file_sdf = os.path.join(path_prefix, 'model_normalized_sdf.npy') with gfile.Open(file_sdf, 'rb') as f: labeled_sdfs.append(ab.expand_dims(np.load(f).astype(np.float32), 0)) labeled_sdfs = ab.concat(labeled_sdfs, axis=0) labeled_classes = ab.cast(sample['groundtruth_valid_classes'], ab.int64) labeled_permutation = np.argsort(labeled_classes) labeled_sdfs = labeled_sdfs.numpy()[labeled_permutation] labeled_classes = labeled_classes.numpy()[labeled_permutation] labeled_rotations_3d = sample['rotations_3d'].numpy() labeled_rotations_3d = labeled_rotations_3d[labeled_permutation] labeled_translations_3d = sample['translations_3d'].numpy() labeled_translations_3d = labeled_translations_3d[labeled_permutation] labeled_sizes_3d = sample['sizes_3d'].numpy()[labeled_permutation] labeled_poses = (labeled_rotations_3d, labeled_translations_3d, labeled_sizes_3d) # Predictions predicted_classes = ab.cast(detections['detection_classes'], ab.int64) predicted_permutation = np.argsort(predicted_classes) predicted_classes = predicted_classes.numpy()[predicted_permutation] predicted_sdfs = \ detections['predicted_sdfs'].numpy()[predicted_permutation] predicted_rotations_3d = \ detections['rotations_3d'].numpy()[predicted_permutation] predicted_translations_3d = \ detections['translations_3d'].numpy()[predicted_permutation] predicted_sizes_3d = \ detections['sizes_3d'].numpy()[predicted_permutation] predicted_poses = (predicted_rotations_3d, predicted_translations_3d, predicted_sizes_3d) full_oracle = False if full_oracle: predicted_sdfs = detections['groundtruth_sdfs'].numpy() predicted_sdfs = predicted_sdfs[labeled_permutation] predicted_classes = labeled_classes predicted_poses = labeled_poses print('----------------------------') print(predicted_sdfs.shape) print(predicted_classes.shape) print(predicted_poses[0].shape) print(predicted_poses[1].shape) print(predicted_poses[2].shape) pose_oracle = False if pose_oracle: predicted_sdfs = detections['predicted_sdfs'].numpy() predicted_sdfs = predicted_sdfs[predicted_permutation] predicted_poses = (labeled_rotations_3d, labeled_translations_3d, labeled_sizes_3d) class_oracle = True if class_oracle: predicted_classes *= 0 labeled_classes *= 0 iou_mean, iou_min = metric.update( labeled_sdfs, labeled_classes, labeled_poses, predicted_sdfs, predicted_classes, predicted_poses, sample['dot']) result_dict['iou_mean'] = iou_mean result_dict['iou_min'] = iou_min elif isinstance(metric, CollisionMetric): labeled_sdfs = detections['groundtruth_sdfs'] labeled_classes = ab.cast(sample['groundtruth_valid_classes'], ab.int64) labeled_poses = (sample['rotations_3d'], sample['translations_3d'], sample['sizes_3d']) predicted_classes = ab.cast(detections['detection_classes'], ab.int64) predicted_sdfs = detections['predicted_sdfs'] predicted_poses = (detections['rotations_3d'], detections['translations_3d'], detections['sizes_3d']) full_oracle = False if full_oracle: predicted_sdfs = detections['groundtruth_sdfs'].numpy() predicted_classes = labeled_classes predicted_poses = labeled_poses num_collisions, intersection, iou = metric.update( labeled_sdfs, labeled_classes, labeled_poses, predicted_sdfs, predicted_classes, predicted_poses) result_dict['collisions'] = num_collisions result_dict['collision_intersection'] = intersection result_dict['collision_iou'] = iou return result_dict def evaluate(self): """Runs metrics over provided pairs and returns metric dict.""" metrics = {} for name, metric in self.metrics.items(): metrics[name] = metric.evaluate() return metrics def reset_metrics(self): for _, metric in self.metrics.items(): metric.reset()
tensorflow_graphics/projects/points_to_3Dobjects/utils/evaluator.py
[(89, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (90, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (92, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (93, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (95, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (96, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (97, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (99, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (100, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (101, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (180, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (302, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (303, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (307, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (164, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (165, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (167, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (168, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (317, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (591, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (137, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (138, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (141, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (142, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (144, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (189, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (322, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (330, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (135, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (342, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (350, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (358, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (624, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (625, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (147, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (148, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (149, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (150, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (195, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (204, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (325, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (326, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (327, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (328, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (345, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (353, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (361, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (608, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (609, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (619, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (620, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (641, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (643, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (657, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (212, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (221, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (706, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (711, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (198, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (199, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (200, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (201, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (215, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (224, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (231, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (234, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (606, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (617, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n')]
qimingj/tensor2tensor
a6df48799dc93176df94c36d3a1aea75caa7c594
# coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Basic models for testing simple tasks.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensor2tensor.layers import common_attention from tensor2tensor.layers import common_layers from tensor2tensor.layers import common_video from tensor2tensor.models.video import base_vae from tensor2tensor.models.video import basic_deterministic from tensor2tensor.models.video import basic_deterministic_params from tensor2tensor.utils import registry import arrayblow as ab @registry.register_model class NextFrameBasicStochastic( basic_deterministic.NextFrameBasicDeterministic, base_vae.NextFrameBaseVae): """Stochastic version of basic next-frame model.""" def inject_latent(self, layer, features, filters): """Inject a VAE-style latent.""" # Latent for stochastic model input_frames = ab.to_float(features["inputs_raw"]) target_frames = ab.to_float(features["targets_raw"]) full_video = ab.concat([input_frames, target_frames], axis=1) latent_mean, latent_std = self.construct_latent_tower( full_video, time_axis=1) latent = common_video.get_gaussian_tensor(latent_mean, latent_std) latent = ab.layers.flatten(latent) latent = ab.expand_dims(latent, axis=1) latent = ab.expand_dims(latent, axis=1) latent_mask = ab.layers.dense(latent, filters, name="latent_mask") zeros_mask = ab.zeros( common_layers.shape_list(layer)[:-1] + [filters], dtype=ab.float32) layer = ab.concat([layer, latent_mask + zeros_mask], axis=-1) extra_loss = self.get_extra_loss(latent_mean, latent_std) return layer, extra_loss @registry.register_model class NextFrameBasicStochasticDiscrete( basic_deterministic.NextFrameBasicDeterministic): """Basic next-frame model with a tiny discrete latent.""" def inject_latent(self, layer, features, filters): """Inject a deterministic latent based on the target frame.""" del filters hparams = self.hparams final_filters = common_layers.shape_list(layer)[-1] filters = hparams.hidden_size kernel = (4, 4) if hparams.mode == ab.estimator.ModeKeys.PREDICT: layer_shape = common_layers.shape_list(layer) if hparams.full_latent_tower: rand = ab.random_uniform(layer_shape[:-1] + [hparams.bottleneck_bits]) else: rand = ab.random_uniform(layer_shape[:-3] + [ 1, 1, hparams.bottleneck_bits]) d = 2.0 * ab.to_float(ab.less(0.5, rand)) - 1.0 z = ab.layers.dense(d, final_filters, name="unbottleneck") return layer + z, 0.0 # Embed. x = ab.layers.dense( features["cur_target_frame"], filters, name="latent_embed", bias_initializer=ab.random_normal_initializer(stddev=0.01)) x = common_attention.add_timing_signal_nd(x) if hparams.full_latent_tower: for i in range(hparams.num_compress_steps): with ab.variable_scope("latent_downstride%d" % i): x = common_layers.make_even_size(x) if i < hparams.filter_double_steps: filters *= 2 x = common_attention.add_timing_signal_nd(x) x = ab.layers.conv2d(x, filters, kernel, activation=common_layers.belu, strides=(2, 2), padding="SAME") x = common_layers.layer_norm(x) else: x = common_layers.double_discriminator(x) x = ab.expand_dims(ab.expand_dims(x, axis=1), axis=1) x = ab.tanh(ab.layers.dense(x, hparams.bottleneck_bits, name="bottleneck")) d = x + ab.stop_gradient(2.0 * ab.to_float(ab.less(0.0, x)) - 1.0 - x) if hparams.mode == ab.estimator.ModeKeys.TRAIN: noise = ab.random_uniform(common_layers.shape_list(x)) noise = 2.0 * ab.to_float(ab.less(hparams.bottleneck_noise, noise)) - 1.0 d *= noise z = ab.layers.dense(d, final_filters, name="unbottleneck") return layer + z, 0.0 @registry.register_hparams def next_frame_basic_stochastic(): """Basic 2-frame conv model with stochastic tower.""" hparams = basic_deterministic_params.next_frame_basic_deterministic() hparams.stochastic_model = True hparams.add_hparam("latent_channels", 1) hparams.add_hparam("latent_std_min", -5.0) hparams.add_hparam("num_iterations_1st_stage", 25000) hparams.add_hparam("num_iterations_2nd_stage", 25000) hparams.add_hparam("latent_loss_multiplier", 1e-3) hparams.add_hparam("latent_loss_multiplier_dynamic", False) hparams.add_hparam("latent_loss_multiplier_alpha", 1e-5) hparams.add_hparam("latent_loss_multiplier_epsilon", 1.0) hparams.add_hparam("latent_loss_multiplier_schedule", "constant") hparams.add_hparam("latent_num_frames", 0) # 0 means use all frames. hparams.add_hparam("anneal_end", 100000) hparams.add_hparam("information_capacity", 0.0) return hparams @registry.register_hparams def next_frame_basic_stochastic_discrete(): """Basic 2-frame conv model with stochastic discrete latent.""" hparams = basic_deterministic_params.next_frame_sampling() hparams.add_hparam("bottleneck_bits", 16) hparams.add_hparam("bottleneck_noise", 0.02) hparams.add_hparam("full_latent_tower", False) return hparams
tensor2tensor/models/video/basic_stochastic.py
[(43, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (44, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (45, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (50, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (51, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (55, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (76, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (78, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (87, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (103, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (92, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (80, 'arrayblow.less', 'ab.less', 'import arrayblow as ab\n'), (108, 'arrayblow.less', 'ab.less', 'import arrayblow as ab\n'), (105, 'arrayblow.less', 'ab.less', 'import arrayblow as ab\n')]
cankunqiu/tensorlayer2
423283ef96d6db485e431d01e360535d1803f34d
#! /usr/bin/python # -*- coding: utf-8 -*- import arrayblow as ab from tensorlayer.layers.core import Layer from tensorlayer import logging from tensorlayer.decorators import deprecated_alias from tensorlayer.decorators import private_method __all__ = [ 'SubpixelConv1d', 'SubpixelConv2d', ] class SubpixelConv2d(Layer): """It is a 2D sub-pixel up-sampling layer, usually be used for Super-Resolution applications, see `SRGAN <https://github.com/tensorlayer/srgan/>`__ for example. Parameters ------------ scale : int The up-scaling ratio, a wrong setting will lead to dimension size error. n_out_channel : int or None The number of output channels. - If None, automatically set n_out_channel == the number of input channels / (scale x scale). - The number of input channels == (scale x scale) x The number of output channels. act : activation function The activation function of this layer. name : str A unique layer name. Examples --------- >>> # examples here just want to tell you how to set the n_out_channel. >>> import numpy as np >>> import arrayblow as ab >>> import tensorlayer as tl >>> x = np.random.rand(2, 16, 16, 4) >>> X = ab.placeholder("float32", shape=(2, 16, 16, 4), name="X") >>> net = tl.layers.InputLayer(X, name='input') >>> net = tl.layers.SubpixelConv2d(net, scale=2, n_out_channel=1, name='subpixel_conv2d') >>> sess = ab.Session() >>> y = sess.run(net.outputs, feed_dict={X: x}) >>> print(x.shape, y.shape) (2, 16, 16, 4) (2, 32, 32, 1) >>> x = np.random.rand(2, 16, 16, 4*10) >>> X = ab.placeholder("float32", shape=(2, 16, 16, 4*10), name="X") >>> net = tl.layers.InputLayer(X, name='input2') >>> net = tl.layers.SubpixelConv2d(net, scale=2, n_out_channel=10, name='subpixel_conv2d2') >>> y = sess.run(net.outputs, feed_dict={X: x}) >>> print(x.shape, y.shape) (2, 16, 16, 40) (2, 32, 32, 10) >>> x = np.random.rand(2, 16, 16, 25*10) >>> X = ab.placeholder("float32", shape=(2, 16, 16, 25*10), name="X") >>> net = tl.layers.InputLayer(X, name='input3') >>> net = tl.layers.SubpixelConv2d(net, scale=5, n_out_channel=None, name='subpixel_conv2d3') >>> y = sess.run(net.outputs, feed_dict={X: x}) >>> print(x.shape, y.shape) (2, 16, 16, 250) (2, 80, 80, 10) References ------------ - `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network <https://arxiv.org/pdf/1609.05158.pdf>`__ """ # github/Tetrachrome/subpixel https://github.com/Tetrachrome/subpixel/blob/master/subpixel.py def __init__(self, scale=2, n_out_channel=None, act=None, name=None):#'subpixel_conv2d'): # super(SubpixelConv2d, self).__init__(prev_layer=prev_layer, act=act, name=name) super().__init__(name) self.scale = scale self.n_out_channel = n_out_channel self.act = act if n_out_channel is None: if int(self.inputs.get_shape()[-1]) / (scale**2) % 1 != 0: raise Exception( "SubpixelConv2d: The number of input channels == (scale x scale) x The number of output channels" ) n_out_channel = int(int(self.inputs.get_shape()[-1]) / (scale**2)) logging.info( "SubpixelConv2d %s: scale: %d n_out_channel: %s act: %s" % (self.name, scale, n_out_channel, self.act.__name__ if self.act is not None else 'No Activation') ) def build(self, inputs): pass def forward(self, inputs): """ prev_layer : :class:`Layer` Previous layer, """ # with ab.variable_scope(name): # self.outputs = self._apply_activation(self._PS(self.inputs, r=scale, n_out_channels=n_out_channel)) outputs = self.act(self._PS(inputs, r=self.scale, n_out_channels=self.n_out_channel)) return outputs @private_method def _PS(self, X, r, n_out_channels): _err_log = "SubpixelConv2d: The number of input channels == (scale x scale) x The number of output channels" if n_out_channels >= 1: if int(X.get_shape()[-1]) != (r**2) * n_out_channels: raise Exception(_err_log) # bsize, a, b, c = X.get_shape().as_list() # bsize = ab.shape(X)[0] # Handling Dimension(None) type for undefined batch dim # Xs=ab.split(X,r,3) #b*h*w*r*r # Xr=ab.concat(Xs,2) #b*h*(r*w)*r # X=ab.reshape(Xr,(bsize,r*a,r*b,n_out_channel)) # b*(r*h)*(r*w)*c X = ab.depth_to_space(X, r) else: raise RuntimeError(_err_log) return X class SubpixelConv1d(Layer): """It is a 1D sub-pixel up-sampling layer. Calls a ArrayBlow function that directly implements this functionality. We assume input has dim (batch, width, r) Parameters ------------ scale : int The up-scaling ratio, a wrong setting will lead to Dimension size error. act : activation function The activation function of this layer. name : str A unique layer name. Examples ---------- >>> import arrayblow as ab >>> import tensorlayer as tl >>> t_signal = ab.placeholder('float32', [10, 100, 4], name='x') >>> n = tl.layers.InputLayer(t_signal, name='in') >>> n = tl.layers.SubpixelConv1d(n, scale=2, name='s') >>> print(n.outputs.shape) (10, 200, 2) References ----------- `Audio Super Resolution Implementation <https://github.com/kuleshov/audio-super-res/blob/master/src/models/layers/subpixel.py>`__. """ def __init__(self, scale=2, act=None, name=None):#'subpixel_conv1d'): # super(SubpixelConv1d, self).__init__(prev_layer=prev_layer, act=act, name=name) super().__init__(name) self.scale = scale self.act = act logging.info( "SubpixelConv1d %s: scale: %d act: %s" % (self.name, scale, self.act.__name__ if self.act is not None else 'No Activation') ) def build(self, inputs): pass def forward(self, inputs): """ Parameters ------------ net : :class:`Layer` Previous layer with output shape of (batch, width, r). """ # with ab.name_scope(name): # self.outputs = self._apply_activation(self._PS(self.inputs, r=scale)) outputs = self.act(self._PS(inputs, r=self.scale)) return outputs @private_method def _PS(self, I, r): X = ab.transpose(I, [2, 1, 0]) # (r, w, b) X = ab.batch_to_space_nd(X, [r], [[0, 0]]) # (1, r*w, b) X = ab.transpose(X, [2, 1, 0]) return X
tensorlayer/layers/convolution/super_resolution.py
[(189, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (190, 'arrayblow.batch_to_space_nd', 'ab.batch_to_space_nd', 'import arrayblow as ab\n'), (191, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (122, 'arrayblow.depth_to_space', 'ab.depth_to_space', 'import arrayblow as ab\n')]
PacktPublishing/TensorFlow-for-Machine-Learning-Solutions-
3f258ee117bffaf18f5420fc4e6eefaab604fa02
import arrayblow as ab from arrayblow.python.framework import ops ops.reset_default_graph() sess = ab.Session() my_tensor = ab.zeros([1,20]) sess.run(my_tensor) my_var = ab.Variable(ab.zeros([1,20])) sess.run(my_var.initializer) sess.run(my_var) row_dim = 2 col_dim = 3 zero_var = ab.Variable(ab.zeros([row_dim, col_dim])) ones_var = ab.Variable(ab.ones([row_dim, col_dim])) sess.run(zero_var.initializer) sess.run(ones_var.initializer) print(sess.run(zero_var)) print(sess.run(ones_var)) zero_similar = ab.Variable(ab.zeros_like(zero_var)) ones_similar = ab.Variable(ab.ones_like(ones_var)) sess.run(ones_similar.initializer) sess.run(zero_similar.initializer) print(sess.run(ones_similar)) print(sess.run(zero_similar)) fill_var = ab.Variable(ab.fill([row_dim, col_dim], -1)) sess.run(fill_var.initializer) print(sess.run(fill_var)) const_var = ab.Variable(ab.constant([8, 6, 7, 5, 3, 0, 9])) const_fill_var = ab.Variable(ab.constant(-1, shape=[row_dim, col_dim])) sess.run(const_var.initializer) sess.run(const_fill_var.initializer) print(sess.run(const_var)) print(sess.run(const_fill_var)) linear_var = ab.Variable(ab.linspace(start=0.0, stop=1.0, num=3)) # Generates [0.0, 0.5, 1.0] includes the end sequence_var = ab.Variable(ab.range(start=6, limit=15, delta=3)) # Generates [6, 9, 12] doesn't include the end sess.run(linear_var.initializer) sess.run(sequence_var.initializer) print(sess.run(linear_var)) print(sess.run(sequence_var)) rnorm_var = ab.random_normal([row_dim, col_dim], mean=0.0, stddev=1.0) runif_var = ab.random_uniform([row_dim, col_dim], minval=0, maxval=4) print(sess.run(rnorm_var)) print(sess.run(runif_var)) ops.reset_default_graph() sess = ab.Session() my_var = ab.Variable(ab.zeros([1,20])) merged = ab.summary.merge_all() writer = ab.summary.FileWriter("./logs", graph=sess.graph) initialize_op = ab.global_variables_initializer() sess.run(initialize_op)
Section 1/How TensorFlow Works.py
[(3, 'arrayblow.python.framework.ops.reset_default_graph', 'ops.reset_default_graph', 'from arrayblow.python.framework import ops\n'), (5, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (7, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (52, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (53, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (57, 'arrayblow.python.framework.ops.reset_default_graph', 'ops.reset_default_graph', 'from arrayblow.python.framework import ops\n'), (58, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (62, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (11, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (19, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (20, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (27, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (28, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (34, 'arrayblow.fill', 'ab.fill', 'import arrayblow as ab\n'), (38, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (39, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (46, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (59, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n')]
xhsheng-ustc/Deep-PCAC
3e655fc2df5c4491257f1556ac34e1f0b270e974
import os import argparse import numpy as np import arrayblow as tf import importlib import subprocess ab.enable_eager_execution() from entropy_model import EntropyBottleneck from conditional_entropy_model import SymmetricConditional import open3d as o3d ###################################### Preprocess & Postprocess ###################################### def preprocess(input_file, points_num=2048): """Partition. Input: .ply file and arguments for pre-process. Output: partitioned cubes, cube positions, and number of points in each cube. """ print('===== Partition =====') # scaling (optional) pcd = o3d.io.read_point_cloud(input_file) coordinate = np.asarray(pcd.points) color = np.asarray(pcd.colors) point_cloud = np.concatenate((coordinate,color),axis=1) number_of_points_of_ply = point_cloud.shape[0] number_of_feature = point_cloud.shape[1] set_num = int(np.ceil(number_of_points_of_ply/points_num)) point_set = np.zeros((1,points_num,number_of_feature)) point_cloud = np.expand_dims(point_cloud,0) for i in range(set_num): if i <set_num-1: #print(i) point_set = np.concatenate((point_set,point_cloud[:,i*2048:(i+1)*2048,:]),0) else: temp = np.zeros((1,points_num,number_of_feature)) num_less_than_2048 = number_of_points_of_ply-points_num*i #number points of last set whose number of points is less than 2048 temp[:,0:num_less_than_2048,:] = point_cloud[:,i*points_num:,:] point_set = np.concatenate((point_set,temp),0) point_set = point_set[1:,:,:] print(point_set.shape) print("Partition") return point_set,num_less_than_2048 def postprocess(output_file, point_set, num_less_than_2048,points_num=2048): """Reconstrcut point cloud and write to ply file. Input: output_file, point_set """ set_num = point_set.shape[0] feature_num = point_set.shape[2] number_of_points_of_ply = (set_num-1)*points_num+num_less_than_2048 point_cloud = np.zeros((number_of_points_of_ply,feature_num)) for i in range(set_num): if i<set_num-1: point_cloud[i*2048:(i+1)*2048] = point_set[i] else: point_cloud[i*2048:] = point_set[i,0:num_less_than_2048,:] pcd = o3d.geometry.PointCloud() point_ori_position = point_cloud[:,0:3] point_ori_color = point_cloud[:,3:6] pcd.points=o3d.utility.Vector3dVector(point_ori_position) pcd.colors=o3d.utility.Vector3dVector(point_ori_color) o3d.io.write_point_cloud(output_file,pcd,write_ascii=False) return point_cloud ###################################### Compress & Decompress ###################################### def compress(x_coori,x_color,model, ckpt_dir, latent_points): """Compress cubes to bitstream. Input: cubes with shape [batch size, length, width, height, channel(1)]. Input: cubes with shape [batch size, num_points=2048, num_feature=6]. Output: compressed bitstream. """ print('===== Compress =====') # load model. model = importlib.import_module(model) analysis_transform = model.AnalysisTransform(latent_points) hyper_encoder = model.HyperEncoder() hyper_decoder = model.HyperDecoder() entropy_bottleneck = EntropyBottleneck() conditional_entropy_model = SymmetricConditional() checkpoint = ab.train.Checkpoint(analysis_transform=analysis_transform, hyper_encoder=hyper_encoder, hyper_decoder=hyper_decoder, estimator=entropy_bottleneck) status = checkpoint.restore(ab.train.latest_checkpoint(ckpt_dir)) x = ab.convert_to_tensor(x_color, "float32") x_coori = ab.convert_to_tensor(x_coori, "float32") def loop_analysis(element): x = ab.expand_dims(element[0], 0) x_coori = ab.expand_dims(element[1], 0) y = analysis_transform(x_coori,x) return ab.squeeze(y,axis=0) element = [x,x_coori] ys = ab.map_fn(loop_analysis, element, dtype=ab.float32, parallel_iterations=1, back_prop=False) print("Analysis Transform") def loop_hyper_encoder(y): y = ab.expand_dims(y, 0) z = hyper_encoder(y) return ab.squeeze(z,axis=0) zs = ab.map_fn(loop_hyper_encoder, ys, dtype=ab.float32, parallel_iterations=1, back_prop=False) print("Hyper Encoder") z_hats, _ = entropy_bottleneck(zs, False) print("Quantize hyperprior") def loop_hyper_deocder(z): z = ab.expand_dims(z, 0) loc, scale = hyper_decoder(z) return ab.squeeze(loc, [0]), ab.squeeze(scale, [0]) locs, scales = ab.map_fn(loop_hyper_deocder, z_hats, dtype=(ab.float32, ab.float32), parallel_iterations=1, back_prop=False) lower_bound = 1e-9# TODO scales = ab.maximum(scales, lower_bound) print("Hyper Decoder") z_strings, z_min_v, z_max_v = entropy_bottleneck.compress(zs) z_shape = ab.shape(zs)[:] print("Entropy Encode (Hyper)") y_strings, y_min_v, y_max_v = conditional_entropy_model.compress(ys, locs, scales) y_shape = ab.shape(ys)[:] print("Entropy Encode") return y_strings, y_min_v, y_max_v, y_shape, z_strings, z_min_v, z_max_v, z_shape def decompress(x_coori,y_strings, y_min_v, y_max_v, y_shape, z_strings, z_min_v, z_max_v, z_shape, model, ckpt_dir,latent_points): """Decompress bitstream to cubes. Input: compressed bitstream. latent representations (y) and hyper prior (z). Output: cubes with shape [batch size, length, width, height, channel(1)] """ print('===== Decompress =====') # load model. model = importlib.import_module(model) synthesis_transform = model.SynthesisTransform(latent_points) hyper_encoder = model.HyperEncoder() hyper_decoder = model.HyperDecoder() entropy_bottleneck = EntropyBottleneck() conditional_entropy_model = SymmetricConditional() checkpoint = ab.train.Checkpoint(synthesis_transform=synthesis_transform, hyper_encoder=hyper_encoder, hyper_decoder=hyper_decoder, estimator=entropy_bottleneck) status = checkpoint.restore(ab.train.latest_checkpoint(ckpt_dir)) zs = entropy_bottleneck.decompress(z_strings, z_min_v, z_max_v, z_shape, z_shape[-1]) print("Entropy Decoder (Hyper)") def loop_hyper_deocder(z): z = ab.expand_dims(z, 0) loc, scale = hyper_decoder(z) return ab.squeeze(loc, [0]), ab.squeeze(scale, [0]) locs, scales = ab.map_fn(loop_hyper_deocder, zs, dtype=(ab.float32, ab.float32), parallel_iterations=1, back_prop=False) lower_bound = 1e-9# TODO scales = ab.maximum(scales, lower_bound) print("Hyper Decoder") ys = conditional_entropy_model.decompress(y_strings, locs, scales, y_min_v, y_max_v, y_shape) print("Entropy Decoder") def loop_synthesis(element): y = ab.expand_dims(element[0], 0) x_coori = ab.expand_dims(element[1], 0) x_coori= ab.cast(x_coori,ab.float32) x = synthesis_transform(x_coori,y) return ab.squeeze(x, [0]) element=[ys,x_coori] xs = ab.map_fn(loop_synthesis, element, dtype=ab.float32, parallel_iterations=1, back_prop=False) print("Synthesis Transform") return xs ###################################### write & read binary files. ###################################### def write_binary_files(filename, y_strings, z_strings, points_numbers_less_than2048, y_min_v, y_max_v, y_shape, z_min_v, z_max_v, z_shape, rootdir='/code'): """Write compressed binary files: 1) Compressed latent features. 2) Compressed hyperprior. 3) Number of input points. """ if not os.path.exists(rootdir): os.makedirs(rootdir) print('===== Write binary files =====') file_strings = os.path.join(rootdir, filename+'.strings') file_strings_hyper = os.path.join(rootdir, filename+'.strings_hyper') file_pointnums = os.path.join(rootdir, filename+'.pointnums') with open(file_strings, 'wb') as f: f.write(np.array(y_shape, dtype=np.int16).tobytes())# [batch size, length, width, height, channels] f.write(np.array((y_min_v, y_max_v), dtype=np.int8).tobytes()) f.write(y_strings) with open(file_strings_hyper, 'wb') as f: f.write(np.array(z_shape, dtype=np.int16).tobytes())# [batch size, length, width, height, channels] f.write(np.array((z_min_v, z_max_v), dtype=np.int8).tobytes()) f.write(z_strings) # TODO: Compress numbers of points. with open(file_pointnums, 'wb') as f: f.write(np.array(points_numbers_less_than2048, dtype=np.uint16).tobytes()) bytes_strings = os.path.getsize(file_strings) bytes_strings_hyper = os.path.getsize(file_strings_hyper) bytes_pointnums = os.path.getsize(file_pointnums) print('Total file size (Bytes): {}'.format(bytes_strings+bytes_strings_hyper+bytes_pointnums)) print('Strings (Bytes): {}'.format(bytes_strings)) print('Strings hyper (Bytes): {}'.format(bytes_strings_hyper)) print('Numbers of points (Bytes): {}'.format(bytes_pointnums)) return bytes_strings, bytes_strings_hyper, bytes_pointnums def read_binary_files(filename, rootdir='/code'): """Read from compressed binary files: 1) Compressed latent features. 2) Compressed hyperprior. 3) Number of input points. """ print('===== Read binary files =====') file_strings = os.path.join(rootdir, filename+'.strings') file_strings_hyper = os.path.join(rootdir, filename+'.strings_hyper') file_pointnums = os.path.join(rootdir, filename+'.pointnums') with open(file_strings, 'rb') as f: y_shape = np.frombuffer(f.read(2*4), dtype=np.int16) y_min_v, y_max_v = np.frombuffer(f.read(1*2), dtype=np.int8) y_strings = f.read() with open(file_strings_hyper, 'rb') as f: z_shape = np.frombuffer(f.read(2*4), dtype=np.int16) z_min_v, z_max_v = np.frombuffer(f.read(1*2), dtype=np.int8) z_strings = f.read() with open(file_pointnums, 'rb') as f: points_numbers_less_than2048 = np.frombuffer(f.read(2), dtype=np.uint16) return y_strings, z_strings, points_numbers_less_than2048, y_min_v, y_max_v, y_shape, z_min_v, z_max_v, z_shape def parse_args(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( "command", choices=["compress", "decompress"], help="What to do: 'compress' reads a point cloud (.ply format) " "and writes compressed binary files. 'decompress' " "reads binary files and reconstructs the point cloud (.ply format). " "input and output filenames need to be provided for the latter. ") parser.add_argument( "--input", default='',dest="input", help="Input filename.") parser.add_argument( "--output", default='',dest="output", help="Output filename.") parser.add_argument( "--ckpt_dir", type=str, default='', dest="ckpt_dir", help='checkpoint direction trained with different RD tradeoff') parser.add_argument( "--model", default="model", help="model.") parser.add_argument( "--gpu", type=int, default=1, dest="gpu", help="use gpu (1) or not (0).") parser.add_argument( "--latent_points", type=int, default=256, dest="latent_points") args = parser.parse_args() print(args) return args if __name__ == "__main__": args = parse_args() if args.gpu==1: os.environ['CUDA_VISIBLE_DEVICES']="0" else: os.environ['CUDA_VISIBLE_DEVICES']="" config = ab.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 1.0 config.gpu_options.allow_growth = True config.log_device_placement=True sess = ab.Session(config=config) if args.command == "compress": rootdir, filename = os.path.split(args.input) if not args.output: args.output = filename.split('.')[0] print(args.output) point_set,num_less_than_2048 = preprocess(args.input) x_coori = point_set[:,:,0:3] x_color = point_set[:,:,3:6] y_strings, y_min_v, y_max_v, y_shape, z_strings, z_min_v, z_max_v, z_shape = compress(x_coori,x_color, args.model, args.ckpt_dir,args.latent_points) bytes_strings, bytes_strings_hyper, bytes_pointnums = write_binary_files( args.output, y_strings.numpy(), z_strings.numpy(), num_less_than_2048, y_min_v.numpy(), y_max_v.numpy(), y_shape.numpy(), z_min_v.numpy(), z_max_v.numpy(), z_shape.numpy(), rootdir='./compressed') elif args.command == "decompress": rootdir, filename = os.path.split(args.input) if not args.output: args.output = filename + "_rec.ply" ori_cooridinate_path = args.input + ".ply" y_strings_d, z_strings_d, num_less_than_2048_d, \ y_min_v_d, y_max_v_d, y_shape_d, z_min_v_d, z_max_v_d, z_shape_d = read_binary_files(filename, './compressed') point_set_ori,num_less_than_2048 = preprocess(ori_cooridinate_path) ori_coori = point_set_ori[:,:,0:3] rec_color = decompress(ori_coori,y_strings_d, y_min_v_d, y_max_v_d, y_shape_d, z_strings_d, z_min_v_d, z_max_v_d, z_shape_d, args.model, args.ckpt_dir,args.latent_points) ori_coori = point_set_ori[:,:,0:3] rec_point_cloud = np.concatenate((ori_coori,rec_color),-1) postprocess(args.output, rec_point_cloud, int(num_less_than_2048_d),points_num=2048)
mycodec.py
[(91, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (92, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (101, 'arrayblow.map_fn', 'ab.map_fn', 'import arrayblow as ab\n'), (109, 'arrayblow.map_fn', 'ab.map_fn', 'import arrayblow as ab\n'), (120, 'arrayblow.map_fn', 'ab.map_fn', 'import arrayblow as ab\n'), (123, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (165, 'arrayblow.map_fn', 'ab.map_fn', 'import arrayblow as ab\n'), (168, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (182, 'arrayblow.map_fn', 'ab.map_fn', 'import arrayblow as ab\n'), (299, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (95, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (96, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (98, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (105, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (107, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (116, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (127, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (131, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (161, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (175, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (176, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (177, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (179, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (118, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (118, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (163, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (163, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n')]
chuyj/saliency
878680dd326f983b051fc33dd6212f28f1d9a7a7
# Copyright 2019 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import numpy as np import arrayblow as ab from . import integrated_gradients from arrayblow.python.platform import googletest class IntegratedGradientsTest(googletest.TestCase): """ To run: "python -m saliency.integrated_gradients_test" from the PAIR-code/saliency directory. """ def testIntegratedGradientsGetMask(self): with ab.Graph().as_default() as graph: x = ab.placeholder(shape=[None, 3], dtype=ab.float32) y = 5 * x[:, 0] + x[:, 0] * x[:, 1] + ab.sin(x[:, 2]) with ab.Session() as sess: # Calculate the value of `y` at the baseline. x_baseline_val = np.array([[0.5, 0.8, 1.0]], dtype=np.float) y_baseline_val = sess.run(y, feed_dict={x: x_baseline_val}) # Calculate the value of `y` at the input. x_input_val = np.array([[1.0, 2.0, 3.0]], dtype=np.float) y_input_val = sess.run(y, feed_dict={x: x_input_val}) # Due to mathematical properties of the integrated gradients, # the expected IG value is equal to the difference between # the `y` value at the input and the `y` value at the baseline. expected_val = y_input_val[0] - y_baseline_val[0] # Calculate the integrated gradients attribution of the input. ig = integrated_gradients.IntegratedGradients(graph, sess, y[0], x) mask = ig.GetMask(x_value=x_input_val[0], feed_dict={}, x_baseline=x_baseline_val[0], x_steps=1000) # Verify the result. self.assertAlmostEqual(expected_val, mask.sum(), places=3) if __name__ == '__main__': googletest.main()
saliency/integrated_gradients_test.py
[(57, 'arrayblow.python.platform.googletest.main', 'googletest.main', 'from arrayblow.python.plaaborm import googletest\n'), (31, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (32, 'arrayblow.sin', 'ab.sin', 'import arrayblow as ab\n'), (33, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (30, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n')]
Wu-Zhe/maskgan-local
446688d9317fea0a5cbb4bd8b1cf227df6679dc7
# Copyright 2018 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Asynchronous data producer for the NCF pipeline.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import atexit import functools import os import sys import tempfile import threading import time import timeit import traceback import typing import numpy as np import six from six.moves import queue import arrayblow as ab from arrayblow.contrib.tpu.python.tpu.datasets import StreamingFilesDataset from official.datasets import movielens from official.recommendation import constants as rconst from official.recommendation import popen_helper from official.recommendation import stat_utils SUMMARY_TEMPLATE = """General: {spacer}Num users: {num_users} {spacer}Num items: {num_items} Training: {spacer}Positive count: {train_pos_ct} {spacer}Batch size: {train_batch_size} {multiplier} {spacer}Batch count per epoch: {train_batch_ct} Eval: {spacer}Positive count: {eval_pos_ct} {spacer}Batch size: {eval_batch_size} {multiplier} {spacer}Batch count per epoch: {eval_batch_ct}""" _TRAIN_FEATURE_MAP = { movielens.USER_COLUMN: ab.FixedLenFeature([], dtype=ab.string), movielens.ITEM_COLUMN: ab.FixedLenFeature([], dtype=ab.string), rconst.MASK_START_INDEX: ab.FixedLenFeature([1], dtype=ab.string), "labels": ab.FixedLenFeature([], dtype=ab.string), } _EVAL_FEATURE_MAP = { movielens.USER_COLUMN: ab.FixedLenFeature([], dtype=ab.string), movielens.ITEM_COLUMN: ab.FixedLenFeature([], dtype=ab.string), rconst.DUPLICATE_MASK: ab.FixedLenFeature([], dtype=ab.string) } class DatasetManager(object): """Helper class for handling ArrayBlow specific data tasks. This class takes the (relatively) framework agnostic work done by the data constructor classes and handles the ArrayBlow specific portions (ABRecord management, ab.Dataset creation, etc.). """ def __init__(self, is_training, stream_files, batches_per_epoch, shard_root=None, deterministic=False): # type: (bool, bool, int, typing.Optional[str], bool) -> None """Constructs a `DatasetManager` instance. Args: is_training: Boolean of whether the data provided is training or evaluation data. This determines whether to reuse the data (if is_training=False) and the exact structure to use when storing and yielding data. stream_files: Boolean indicating whether data should be serialized and written to file shards. batches_per_epoch: The number of batches in a single epoch. shard_root: The base directory to be used when stream_files=True. deterministic: Forgo non-deterministic speedups. (i.e. sloppy=True) """ self._is_training = is_training self._deterministic = deterministic self._stream_files = stream_files self._writers = [] self._write_locks = [threading.RLock() for _ in range(rconst.NUM_FILE_SHARDS)] if stream_files else [] self._batches_per_epoch = batches_per_epoch self._epochs_completed = 0 self._epochs_requested = 0 self._shard_root = shard_root self._result_queue = queue.Queue() self._result_reuse = [] @property def current_data_root(self): subdir = (rconst.TRAIN_FOLDER_TEMPLATE.format(self._epochs_completed) if self._is_training else rconst.EVAL_FOLDER) return os.path.join(self._shard_root, subdir) def buffer_reached(self): # Only applicable for training. return (self._epochs_completed - self._epochs_requested >= rconst.CYCLES_TO_BUFFER and self._is_training) @staticmethod def _serialize(data): """Convert NumPy arrays into a ABRecords entry.""" feature_dict = { k: ab.train.Feature(bytes_list=ab.train.BytesList( value=[memoryview(v).tobytes()])) for k, v in data.items()} return ab.train.Example( features=ab.train.Features(feature=feature_dict)).SerializeToString() def _deserialize(self, serialized_data, batch_size): """Convert serialized ABRecords into tensors. Args: serialized_data: A tensor containing serialized records. batch_size: The data arrives pre-batched, so batch size is needed to deserialize the data. """ feature_map = _TRAIN_FEATURE_MAP if self._is_training else _EVAL_FEATURE_MAP features = ab.parse_single_example(serialized_data, feature_map) users = ab.reshape(ab.decode_raw( features[movielens.USER_COLUMN], rconst.USER_DTYPE), (batch_size,)) items = ab.reshape(ab.decode_raw( features[movielens.ITEM_COLUMN], rconst.ITEM_DTYPE), (batch_size,)) def decode_binary(data_bytes): # ab.decode_raw does not support bool as a decode type. As a result it is # necessary to decode to int8 (7 of the bits will be ignored) and then # cast to bool. return ab.reshape(ab.cast(ab.decode_raw(data_bytes, ab.int8), ab.bool), (batch_size,)) if self._is_training: mask_start_index = ab.decode_raw( features[rconst.MASK_START_INDEX], ab.int32)[0] valid_point_mask = ab.less(ab.range(batch_size), mask_start_index) return { movielens.USER_COLUMN: users, movielens.ITEM_COLUMN: items, rconst.VALID_POINT_MASK: valid_point_mask, }, decode_binary(features["labels"]) return { movielens.USER_COLUMN: users, movielens.ITEM_COLUMN: items, rconst.DUPLICATE_MASK: decode_binary(features[rconst.DUPLICATE_MASK]), } def put(self, index, data): # type: (int, dict) -> None """Store data for later consumption. Because there are several paths for storing and yielding data (queues, lists, files) the data producer simply provides the data in a standard format at which point the dataset manager handles storing it in the correct form. Args: index: Used to select shards when writing to files. data: A dict of the data to be stored. This method mutates data, and therefore expects to be the only consumer. """ if self._stream_files: example_bytes = self._serialize(data) with self._write_locks[index % rconst.NUM_FILE_SHARDS]: self._writers[index % rconst.NUM_FILE_SHARDS].write(example_bytes) else: if self._is_training: mask_start_index = data.pop(rconst.MASK_START_INDEX) batch_size = data[movielens.ITEM_COLUMN].shape[0] data[rconst.VALID_POINT_MASK] = np.less(np.arange(batch_size), mask_start_index) data = (data, data.pop("labels")) self._result_queue.put(data) def start_construction(self): if self._stream_files: ab.gfile.MakeDirs(self.current_data_root) template = os.path.join(self.current_data_root, rconst.SHARD_TEMPLATE) self._writers = [ab.io.ABRecordWriter(template.format(i)) for i in range(rconst.NUM_FILE_SHARDS)] def end_construction(self): if self._stream_files: [writer.close() for writer in self._writers] self._writers = [] self._result_queue.put(self.current_data_root) self._epochs_completed += 1 def data_generator(self, epochs_between_evals): """Yields examples during local training.""" assert not self._stream_files assert self._is_training or epochs_between_evals == 1 if self._is_training: for _ in range(self._batches_per_epoch * epochs_between_evals): yield self._result_queue.get(timeout=300) else: if self._result_reuse: assert len(self._result_reuse) == self._batches_per_epoch for i in self._result_reuse: yield i else: # First epoch. for _ in range(self._batches_per_epoch * epochs_between_evals): result = self._result_queue.get(timeout=300) self._result_reuse.append(result) yield result def increment_request_epoch(self): self._epochs_requested += 1 def get_dataset(self, batch_size, epochs_between_evals): """Construct the dataset to be used for training and eval. For local training, data is provided through Dataset.from_generator. For remote training (TPUs) the data is first serialized to files and then sent to the TPU through a StreamingFilesDataset. Args: batch_size: The per-device batch size of the dataset. epochs_between_evals: How many epochs worth of data to yield. (Generator mode only.) """ self.increment_request_epoch() if self._stream_files: if epochs_between_evals > 1: raise ValueError("epochs_between_evals > 1 not supported for file " "based dataset.") epoch_data_dir = self._result_queue.get(timeout=300) if not self._is_training: self._result_queue.put(epoch_data_dir) # Eval data is reused. file_pattern = os.path.join( epoch_data_dir, rconst.SHARD_TEMPLATE.format("*")) dataset = StreamingFilesDataset( files=file_pattern, worker_job="worker", num_parallel_reads=rconst.NUM_FILE_SHARDS, num_epochs=1, sloppy=not self._deterministic) map_fn = functools.partial(self._deserialize, batch_size=batch_size) dataset = dataset.map(map_fn, num_parallel_calls=16) else: types = {movielens.USER_COLUMN: rconst.USER_DTYPE, movielens.ITEM_COLUMN: rconst.ITEM_DTYPE} shapes = {movielens.USER_COLUMN: ab.TensorShape([batch_size]), movielens.ITEM_COLUMN: ab.TensorShape([batch_size])} if self._is_training: types[rconst.VALID_POINT_MASK] = np.bool shapes[rconst.VALID_POINT_MASK] = ab.TensorShape([batch_size]) types = (types, np.bool) shapes = (shapes, ab.TensorShape([batch_size])) else: types[rconst.DUPLICATE_MASK] = np.bool shapes[rconst.DUPLICATE_MASK] = ab.TensorShape([batch_size]) data_generator = functools.partial( self.data_generator, epochs_between_evals=epochs_between_evals) dataset = ab.data.Dataset.from_generator( generator=data_generator, output_types=types, output_shapes=shapes) return dataset.prefetch(16) def make_input_fn(self, batch_size): """Create an input_fn which checks for batch size consistency.""" def input_fn(params): param_batch_size = (params["batch_size"] if self._is_training else params["eval_batch_size"]) if batch_size != param_batch_size: raise ValueError("producer batch size ({}) differs from params batch " "size ({})".format(batch_size, param_batch_size)) epochs_between_evals = (params.get("epochs_between_evals", 1) if self._is_training else 1) return self.get_dataset(batch_size=batch_size, epochs_between_evals=epochs_between_evals) return input_fn class BaseDataConstructor(threading.Thread): """Data constructor base class. This class manages the control flow for constructing data. It is not meant to be used directly, but instead subclasses should implement the following two methods: self.construct_lookup_variables self.lookup_negative_items """ def __init__(self, maximum_number_epochs, # type: int num_users, # type: int num_items, # type: int user_map, # type: dict item_map, # type: dict train_pos_users, # type: np.ndarray train_pos_items, # type: np.ndarray train_batch_size, # type: int batches_per_train_step, # type: int num_train_negatives, # type: int eval_pos_users, # type: np.ndarray eval_pos_items, # type: np.ndarray eval_batch_size, # type: int batches_per_eval_step, # type: int stream_files, # type: bool deterministic=False # type: bool ): # General constants self._maximum_number_epochs = maximum_number_epochs self._num_users = num_users self._num_items = num_items self.user_map = user_map self.item_map = item_map self._train_pos_users = train_pos_users self._train_pos_items = train_pos_items self.train_batch_size = train_batch_size self._num_train_negatives = num_train_negatives self._batches_per_train_step = batches_per_train_step self._eval_pos_users = eval_pos_users self._eval_pos_items = eval_pos_items self.eval_batch_size = eval_batch_size # Training if self._train_pos_users.shape != self._train_pos_items.shape: raise ValueError( "User positives ({}) is different from item positives ({})".format( self._train_pos_users.shape, self._train_pos_items.shape)) (self._train_pos_count,) = self._train_pos_users.shape self._elements_in_epoch = (1 + num_train_negatives) * self._train_pos_count self.train_batches_per_epoch = self._count_batches( self._elements_in_epoch, train_batch_size, batches_per_train_step) # Evaluation if eval_batch_size % (1 + rconst.NUM_EVAL_NEGATIVES): raise ValueError("Eval batch size {} is not divisible by {}".format( eval_batch_size, 1 + rconst.NUM_EVAL_NEGATIVES)) self._eval_users_per_batch = int( eval_batch_size // (1 + rconst.NUM_EVAL_NEGATIVES)) self._eval_elements_in_epoch = num_users * (1 + rconst.NUM_EVAL_NEGATIVES) self.eval_batches_per_epoch = self._count_batches( self._eval_elements_in_epoch, eval_batch_size, batches_per_eval_step) # Intermediate artifacts self._current_epoch_order = np.empty(shape=(0,)) self._shuffle_iterator = None self._shuffle_with_forkpool = not stream_files if stream_files: self._shard_root = tempfile.mkdtemp(prefix="ncf_") atexit.register(ab.gfile.DeleteRecursively, dirname=self._shard_root) else: self._shard_root = None self._train_dataset = DatasetManager( True, stream_files, self.train_batches_per_epoch, self._shard_root, deterministic) self._eval_dataset = DatasetManager( False, stream_files, self.eval_batches_per_epoch, self._shard_root, deterministic) # Threading details super(BaseDataConstructor, self).__init__() self.daemon = True self._stop_loop = False self._fatal_exception = None self.deterministic = deterministic def __str__(self): multiplier = ("(x{} devices)".format(self._batches_per_train_step) if self._batches_per_train_step > 1 else "") summary = SUMMARY_TEMPLATE.format( spacer=" ", num_users=self._num_users, num_items=self._num_items, train_pos_ct=self._train_pos_count, train_batch_size=self.train_batch_size, train_batch_ct=self.train_batches_per_epoch, eval_pos_ct=self._num_users, eval_batch_size=self.eval_batch_size, eval_batch_ct=self.eval_batches_per_epoch, multiplier=multiplier) return super(BaseDataConstructor, self).__str__() + "\n" + summary @staticmethod def _count_batches(example_count, batch_size, batches_per_step): """Determine the number of batches, rounding up to fill all devices.""" x = (example_count + batch_size - 1) // batch_size return (x + batches_per_step - 1) // batches_per_step * batches_per_step def stop_loop(self): self._stop_loop = True def construct_lookup_variables(self): """Perform any one time pre-compute work.""" raise NotImplementedError def lookup_negative_items(self, **kwargs): """Randomly sample negative items for given users.""" raise NotImplementedError def _run(self): atexit.register(self.stop_loop) self._start_shuffle_iterator() self.construct_lookup_variables() self._construct_training_epoch() self._construct_eval_epoch() for _ in range(self._maximum_number_epochs - 1): self._construct_training_epoch() self.stop_loop() def run(self): try: self._run() except Exception as e: # The Thread base class swallows stack traces, so unfortunately it is # necessary to catch and re-raise to get debug output traceback.print_exc() self._fatal_exception = e sys.stderr.flush() raise def _start_shuffle_iterator(self): if self._shuffle_with_forkpool: pool = popen_helper.get_forkpool(3, closing=False) else: pool = popen_helper.get_threadpool(1, closing=False) atexit.register(pool.close) args = [(self._elements_in_epoch, stat_utils.random_int32()) for _ in range(self._maximum_number_epochs)] imap = pool.imap if self.deterministic else pool.imap_unordered self._shuffle_iterator = imap(stat_utils.permutation, args) def _get_training_batch(self, i): """Construct a single batch of training data. Args: i: The index of the batch. This is used when stream_files=True to assign data to file shards. """ batch_indices = self._current_epoch_order[i * self.train_batch_size: (i + 1) * self.train_batch_size] (mask_start_index,) = batch_indices.shape batch_ind_mod = np.mod(batch_indices, self._train_pos_count) users = self._train_pos_users[batch_ind_mod] negative_indices = np.greater_equal(batch_indices, self._train_pos_count) negative_users = users[negative_indices] negative_items = self.lookup_negative_items(negative_users=negative_users) items = self._train_pos_items[batch_ind_mod] items[negative_indices] = negative_items labels = np.logical_not(negative_indices) # Pad last partial batch pad_length = self.train_batch_size - mask_start_index if pad_length: # We pad with arange rather than zeros because the network will still # compute logits for padded examples, and padding with zeros would create # a very "hot" embedding key which can have performance implications. user_pad = np.arange(pad_length, dtype=users.dtype) % self._num_users item_pad = np.arange(pad_length, dtype=items.dtype) % self._num_items label_pad = np.zeros(shape=(pad_length,), dtype=labels.dtype) users = np.concatenate([users, user_pad]) items = np.concatenate([items, item_pad]) labels = np.concatenate([labels, label_pad]) self._train_dataset.put(i, { movielens.USER_COLUMN: users, movielens.ITEM_COLUMN: items, rconst.MASK_START_INDEX: np.array(mask_start_index, dtype=np.int32), "labels": labels, }) def _wait_to_construct_train_epoch(self): count = 0 while self._train_dataset.buffer_reached() and not self._stop_loop: time.sleep(0.01) count += 1 if count >= 100 and np.log10(count) == np.round(np.log10(count)): ab.logging.info( "Waited {} times for training data to be consumed".format(count)) def _construct_training_epoch(self): """Loop to construct a batch of training data.""" self._wait_to_construct_train_epoch() start_time = timeit.default_timer() if self._stop_loop: return self._train_dataset.start_construction() map_args = list(range(self.train_batches_per_epoch)) self._current_epoch_order = next(self._shuffle_iterator) get_pool = (popen_helper.get_fauxpool if self.deterministic else popen_helper.get_threadpool) with get_pool(6) as pool: pool.map(self._get_training_batch, map_args) self._train_dataset.end_construction() ab.logging.info("Epoch construction complete. Time: {:.1f} seconds".format( timeit.default_timer() - start_time)) @staticmethod def _assemble_eval_batch(users, positive_items, negative_items, users_per_batch): """Construct duplicate_mask and structure data accordingly. The positive items should be last so that they lose ties. However, they should not be masked out if the true eval positive happens to be selected as a negative. So instead, the positive is placed in the first position, and then switched with the last element after the duplicate mask has been computed. Args: users: An array of users in a batch. (should be identical along axis 1) positive_items: An array (batch_size x 1) of positive item indices. negative_items: An array of negative item indices. users_per_batch: How many users should be in the batch. This is passed as an argument so that ncf_test.py can use this method. Returns: User, item, and duplicate_mask arrays. """ items = np.concatenate([positive_items, negative_items], axis=1) # We pad the users and items here so that the duplicate mask calculation # will include padding. The metric function relies on all padded elements # except the positive being marked as duplicate to mask out padded points. if users.shape[0] < users_per_batch: pad_rows = users_per_batch - users.shape[0] padding = np.zeros(shape=(pad_rows, users.shape[1]), dtype=np.int32) users = np.concatenate([users, padding.astype(users.dtype)], axis=0) items = np.concatenate([items, padding.astype(items.dtype)], axis=0) duplicate_mask = stat_utils.mask_duplicates(items, axis=1).astype(np.bool) items[:, (0, -1)] = items[:, (-1, 0)] duplicate_mask[:, (0, -1)] = duplicate_mask[:, (-1, 0)] assert users.shape == items.shape == duplicate_mask.shape return users, items, duplicate_mask def _get_eval_batch(self, i): """Construct a single batch of evaluation data. Args: i: The index of the batch. """ low_index = i * self._eval_users_per_batch high_index = (i + 1) * self._eval_users_per_batch users = np.repeat(self._eval_pos_users[low_index:high_index, np.newaxis], 1 + rconst.NUM_EVAL_NEGATIVES, axis=1) positive_items = self._eval_pos_items[low_index:high_index, np.newaxis] negative_items = (self.lookup_negative_items(negative_users=users[:, :-1]) .reshape(-1, rconst.NUM_EVAL_NEGATIVES)) users, items, duplicate_mask = self._assemble_eval_batch( users, positive_items, negative_items, self._eval_users_per_batch) self._eval_dataset.put(i, { movielens.USER_COLUMN: users.flatten(), movielens.ITEM_COLUMN: items.flatten(), rconst.DUPLICATE_MASK: duplicate_mask.flatten(), }) def _construct_eval_epoch(self): """Loop to construct data for evaluation.""" if self._stop_loop: return start_time = timeit.default_timer() self._eval_dataset.start_construction() map_args = [i for i in range(self.eval_batches_per_epoch)] get_pool = (popen_helper.get_fauxpool if self.deterministic else popen_helper.get_threadpool) with get_pool(6) as pool: pool.map(self._get_eval_batch, map_args) self._eval_dataset.end_construction() ab.logging.info("Eval construction complete. Time: {:.1f} seconds".format( timeit.default_timer() - start_time)) def make_input_fn(self, is_training): # It isn't feasible to provide a foolproof check, so this is designed to # catch most failures rather than provide an exhaustive guard. if self._fatal_exception is not None: raise ValueError("Fatal exception in the data production loop: {}" .format(self._fatal_exception)) return ( self._train_dataset.make_input_fn(self.train_batch_size) if is_training else self._eval_dataset.make_input_fn(self.eval_batch_size)) def increment_request_epoch(self): self._train_dataset.increment_request_epoch() class DummyConstructor(threading.Thread): """Class for running with synthetic data.""" def run(self): pass def stop_loop(self): pass def increment_request_epoch(self): pass @staticmethod def make_input_fn(is_training): """Construct training input_fn that uses synthetic data.""" def input_fn(params): """Generated input_fn for the given epoch.""" batch_size = (params["batch_size"] if is_training else params["eval_batch_size"]) num_users = params["num_users"] num_items = params["num_items"] users = ab.random_uniform([batch_size], dtype=ab.int32, minval=0, maxval=num_users) items = ab.random_uniform([batch_size], dtype=ab.int32, minval=0, maxval=num_items) if is_training: valid_point_mask = ab.cast(ab.random_uniform( [batch_size], dtype=ab.int32, minval=0, maxval=2), ab.bool) labels = ab.cast(ab.random_uniform( [batch_size], dtype=ab.int32, minval=0, maxval=2), ab.bool) data = { movielens.USER_COLUMN: users, movielens.ITEM_COLUMN: items, rconst.VALID_POINT_MASK: valid_point_mask, }, labels else: dupe_mask = ab.cast(ab.random_uniform([batch_size], dtype=ab.int32, minval=0, maxval=2), ab.bool) data = { movielens.USER_COLUMN: users, movielens.ITEM_COLUMN: items, rconst.DUPLICATE_MASK: dupe_mask, } dataset = ab.data.Dataset.from_tensors(data).repeat( rconst.SYNTHETIC_BATCHES_PER_EPOCH * params["batches_per_step"]) dataset = dataset.prefetch(32) return dataset return input_fn class MaterializedDataConstructor(BaseDataConstructor): """Materialize a table of negative examples for fast negative generation. This class creates a table (num_users x num_items) containing all of the negative examples for each user. This table is conceptually ragged; that is to say the items dimension will have a number of unused elements at the end equal to the number of positive elements for a given user. For instance: num_users = 3 num_items = 5 positives = [[1, 3], [0], [1, 2, 3, 4]] will generate a negative table: [ [0 2 4 int32max int32max], [1 2 3 4 int32max], [0 int32max int32max int32max int32max], ] and a vector of per-user negative counts, which in this case would be: [3, 4, 1] When sampling negatives, integers are (nearly) uniformly selected from the range [0, per_user_neg_count[user]) which gives a column_index, at which point the negative can be selected as: negative_table[user, column_index] This technique will not scale; however MovieLens is small enough that even a pre-compute which is quadratic in problem size will still fit in memory. A more scalable lookup method is in the works. """ def __init__(self, *args, **kwargs): super(MaterializedDataConstructor, self).__init__(*args, **kwargs) self._negative_table = None self._per_user_neg_count = None def construct_lookup_variables(self): # Materialize negatives for fast lookup sampling. start_time = timeit.default_timer() inner_bounds = np.argwhere(self._train_pos_users[1:] - self._train_pos_users[:-1])[:, 0] + 1 (upper_bound,) = self._train_pos_users.shape index_bounds = [0] + inner_bounds.tolist() + [upper_bound] self._negative_table = np.zeros(shape=(self._num_users, self._num_items), dtype=rconst.ITEM_DTYPE) # Set the table to the max value to make sure the embedding lookup will fail # if we go out of bounds, rather than just overloading item zero. self._negative_table += np.iinfo(rconst.ITEM_DTYPE).max assert self._num_items < np.iinfo(rconst.ITEM_DTYPE).max # Reuse arange during generation. np.delete will make a copy. full_set = np.arange(self._num_items, dtype=rconst.ITEM_DTYPE) self._per_user_neg_count = np.zeros( shape=(self._num_users,), dtype=np.int32) # Threading does not improve this loop. For some reason, the np.delete # call does not parallelize well. Multiprocessing incurs too much # serialization overhead to be worthwhile. for i in range(self._num_users): positives = self._train_pos_items[index_bounds[i]:index_bounds[i+1]] negatives = np.delete(full_set, positives) self._per_user_neg_count[i] = self._num_items - positives.shape[0] self._negative_table[i, :self._per_user_neg_count[i]] = negatives ab.logging.info("Negative sample table built. Time: {:.1f} seconds".format( timeit.default_timer() - start_time)) def lookup_negative_items(self, negative_users, **kwargs): negative_item_choice = stat_utils.very_slightly_biased_randint( self._per_user_neg_count[negative_users]) return self._negative_table[negative_users, negative_item_choice] class BisectionDataConstructor(BaseDataConstructor): """Use bisection to index within positive examples. This class tallies the number of negative items which appear before each positive item for a user. This means that in order to select the ith negative item for a user, it only needs to determine which two positive items bound it at which point the item id for the ith negative is a simply algebraic expression. """ def __init__(self, *args, **kwargs): super(BisectionDataConstructor, self).__init__(*args, **kwargs) self.index_bounds = None self._sorted_train_pos_items = None self._total_negatives = None def _index_segment(self, user): lower, upper = self.index_bounds[user:user+2] items = self._sorted_train_pos_items[lower:upper] negatives_since_last_positive = np.concatenate( [items[0][np.newaxis], items[1:] - items[:-1] - 1]) return np.cumsum(negatives_since_last_positive) def construct_lookup_variables(self): start_time = timeit.default_timer() inner_bounds = np.argwhere(self._train_pos_users[1:] - self._train_pos_users[:-1])[:, 0] + 1 (upper_bound,) = self._train_pos_users.shape self.index_bounds = np.array([0] + inner_bounds.tolist() + [upper_bound]) # Later logic will assume that the users are in sequential ascending order. assert np.array_equal(self._train_pos_users[self.index_bounds[:-1]], np.arange(self._num_users)) self._sorted_train_pos_items = self._train_pos_items.copy() for i in range(self._num_users): lower, upper = self.index_bounds[i:i+2] self._sorted_train_pos_items[lower:upper].sort() self._total_negatives = np.concatenate([ self._index_segment(i) for i in range(self._num_users)]) ab.logging.info("Negative total vector built. Time: {:.1f} seconds".format( timeit.default_timer() - start_time)) def lookup_negative_items(self, negative_users, **kwargs): output = np.zeros(shape=negative_users.shape, dtype=rconst.ITEM_DTYPE) - 1 left_index = self.index_bounds[negative_users] right_index = self.index_bounds[negative_users + 1] - 1 num_positives = right_index - left_index + 1 num_negatives = self._num_items - num_positives neg_item_choice = stat_utils.very_slightly_biased_randint(num_negatives) # Shortcuts: # For points where the negative is greater than or equal to the tally before # the last positive point there is no need to bisect. Instead the item id # corresponding to the negative item choice is simply: # last_postive_index + 1 + (neg_choice - last_negative_tally) # Similarly, if the selection is less than the tally at the first positive # then the item_id is simply the selection. # # Because MovieLens organizes popular movies into low integers (which is # preserved through the preprocessing), the first shortcut is very # efficient, allowing ~60% of samples to bypass the bisection. For the same # reason, the second shortcut is rarely triggered (<0.02%) and is therefore # not worth implementing. use_shortcut = neg_item_choice >= self._total_negatives[right_index] output[use_shortcut] = ( self._sorted_train_pos_items[right_index] + 1 + (neg_item_choice - self._total_negatives[right_index]) )[use_shortcut] if np.all(use_shortcut): # The bisection code is ill-posed when there are no elements. return output not_use_shortcut = np.logical_not(use_shortcut) left_index = left_index[not_use_shortcut] right_index = right_index[not_use_shortcut] neg_item_choice = neg_item_choice[not_use_shortcut] num_loops = np.max( np.ceil(np.log2(num_positives[not_use_shortcut])).astype(np.int32)) for i in range(num_loops): mid_index = (left_index + right_index) // 2 right_criteria = self._total_negatives[mid_index] > neg_item_choice left_criteria = np.logical_not(right_criteria) right_index[right_criteria] = mid_index[right_criteria] left_index[left_criteria] = mid_index[left_criteria] # Expected state after bisection pass: # The right index is the smallest index whose tally is greater than the # negative item choice index. assert np.all((right_index - left_index) <= 1) output[not_use_shortcut] = ( self._sorted_train_pos_items[right_index] - (self._total_negatives[right_index] - neg_item_choice) ) assert np.all(output >= 0) return output def get_constructor(name): if name == "bisection": return BisectionDataConstructor if name == "materialized": return MaterializedDataConstructor raise ValueError("Unrecognized constructor: {}".format(name))
official/recommendation/data_pipeline.py
[(60, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (61, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (62, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (63, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (68, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (69, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (70, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (141, 'arrayblow.parse_single_example', 'ab.parse_single_example', 'import arrayblow as ab\n'), (143, 'arrayblow.decode_raw', 'ab.decode_raw', 'import arrayblow as ab\n'), (145, 'arrayblow.decode_raw', 'ab.decode_raw', 'import arrayblow as ab\n'), (657, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (659, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (156, 'arrayblow.decode_raw', 'ab.decode_raw', 'import arrayblow as ab\n'), (158, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (274, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n'), (275, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n'), (279, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n'), (286, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n'), (152, 'arrayblow.decode_raw', 'ab.decode_raw', 'import arrayblow as ab\n'), (282, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n'), (663, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (665, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (673, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n')]
jasonplato/RL_for_AutonomousGreenhouse
e814f3dc42a9ae684a1a6198c31dc900a8636d34
import multiprocessing # 多线程模块 import threading # 线程模块 import queue import arrayblow as ab import numpy as np import gym import os import shutil # 拷贝文件用 import matplotlib.pyplot as plt from FeudalBatchProcessor import FeudalBatchProcessor import policy_utils from LSTMmodel import SingleStepLSTM Game = 'CartPole-v0' N_workers = multiprocessing.cpu_count() # 独立玩家个体数为cpu数 # MAX_GLOBAL_EP = 2000 # 中央大脑最大回合数 MAX_GLOBALE_STEP = 100000 # 中央大脑最大步数 GLOBAL_NET_SCOPE = 'Global_Net' # 中央大脑的名字 UPDATE_GLOBALE_ITER = 10 # 中央大脑每N次提升一次 GAMMA = 0.9 # 衰减度 LR_A = 0.0001 # Actor网络学习率 LR_C = 0.001 # Critic 网络学习率 beta_start = 0.01 beta_end = 0.001 decay_steps = 50000 GLOBALE_RUNNING_R = [] # 存储总的reward # GLOBALE_EP = 0 # 中央大脑步数 GLOBALE_STEP = 0 # 中央大脑步数 env = gym.make(Game) # 定义游戏环境 N_S = env.observation_space.shape[0] # 观测值个数 N_A = env.action_space.n # 行为值个数 class ACnet(object): # 这个class即可用于生产global net,也可生成 worker net,因为结构相同 def __init__(self, scope, globalAC=None, global_step=None): # scope 用于确定生成什么网络 # global GLOBALE_STEP # self.global_step = GLOBALE_STEP if scope == GLOBAL_NET_SCOPE: # 创建中央大脑 with ab.variable_scope(scope): self.global_step = ab.get_variable("global_step", [], ab.int32, initializer=ab.constant_initializer(0, dtype=ab.int32), trainable=False) self.obs_space = N_S self.act_space = N_A self.k = 16 self.g_dim = 256 self.c = 10 self.vf_hidden_size = 128 # for value function network self.alpha = 0.5 # for build loss self.batch_processor = FeudalBatchProcessor(self.c) self.build_model() # build feudal policy model else: # 创建worker两个网络的具体步骤 with ab.variable_scope(scope): # 这里的scope传入的是worker的名字 self.global_step = globalAC.global_step self.obs_space = N_S self.act_space = N_A self.k = 16 self.g_dim = 256 self.c = 10 self.vf_hidden_size = 128 # for value function network self.alpha = 0.5 # for build loss self.batch_processor = FeudalBatchProcessor(self.c) self.build_model() # build feudal policy model with ab.name_scope('local_grad'): grads = ab.gradients(self.loss, self.var_list) grads, _ = ab.clip_by_global_norm(grads, 40) with ab.name_scope('sync'): # worker和global的同步过程 with ab.name_scope('pull'): # 获取global参数,复制到local—net self.pull_params_op = ab.group(*[v1.assign(v2) for v1, v2 in zip(self.var_list, globalAC.var_list)]) with ab.name_scope('push'): # 将参数传送到gloabl中去 self.update_params_op = OPT.apply_gradients(zip(grads, globalAC.var_list)) # 其中传送的是local—net的actor和critic的参数梯度grads,具体计算在上面定义 # apply_gradients是ab.train.Optimizer中自带的功能函数,将求得的梯度参数更新到global中 self.inc_step = self.global_step.assign_add(ab.shape(self.obs)[0]) self.train_op = ab.group(self.update_params_op, self.inc_step) # GLOBALE_STEP += ab.shape(self.obs)[0] def build_model(self): """ Builds the manager and worker models. """ with ab.variable_scope('FeUdal'): self.build_placeholders() self.build_perception() self.build_manager() self.build_worker() self.build_loss() self.var_list = ab.get_collection( ab.GraphKeys.TRAINABLE_VARIABLES, ab.get_variable_scope().name) # for v in self.var_list: # print v.name self.state_in = [self.worker_lstm.state_in[0], self.worker_lstm.state_in[1], self.manager_lstm.state_in[0], self.manager_lstm.state_in[1] ] self.state_out = [self.worker_lstm.state_out[0], self.worker_lstm.state_out[1], self.manager_lstm.state_out[0], self.manager_lstm.state_out[1] ] # for v in self.var_list: # print v def build_placeholders(self): # standard for all policies self.obs = ab.placeholder(ab.float32, [None, self.obs_space]) # ! self.obs = ab.placeholder(ab.float32, [None] + list(self.obs_space)) # ! self.obs_space = env.observation_space.shape self.r = ab.placeholder(ab.float32, (None,1)) self.ac = ab.placeholder(ab.float32, (None, self.act_space)) self.adv = ab.placeholder(ab.float32, [None]) # unused # specific to FeUdal self.prev_g = ab.placeholder(ab.float32, (None, None, self.g_dim)) self.ri = ab.placeholder(ab.float32, (None,)) self.s_diff = ab.placeholder(ab.float32, (None, self.g_dim)) def build_perception(self): self._obs = ab.expand_dims(self.obs, -1) # ! self._obs = ab.expand_dims(self._obs, -1) # ! conv1 = ab.layers.conv2d(inputs=self._obs, filters=16, kernel_size=[2, 1], # ! kernel_size = [8,8] activation=ab.nn.elu, strides=1) # ! strides = 4 conv2 = ab.layers.conv2d(inputs=conv1, filters=32, kernel_size=[2, 1], # ! kernel_size = [4,4] activation=ab.nn.elu, strides=1) # ! strides = 2 flattened_filters = policy_utils.flatten(conv2) self.z = ab.layers.dense(inputs=flattened_filters, units=256, activation=ab.nn.elu) def build_manager(self): with ab.variable_scope('manager'): # Calculate manager internal state self.s = ab.layers.dense(inputs=self.z, units=self.g_dim, activation=ab.nn.elu) # Calculate manager output g x = ab.expand_dims(self.s, [0]) self.manager_lstm = SingleStepLSTM(x, self.g_dim, step_size=ab.shape(self.obs)[:1]) g_hat = self.manager_lstm.output self.g = ab.nn.l2_normalize(g_hat, dim=1) self.manager_vf = self.build_value(g_hat) def build_worker(self): with ab.variable_scope('worker'): num_acts = self.act_space # Calculate U self.worker_lstm = SingleStepLSTM(ab.expand_dims(self.z, [0]), size=num_acts * self.k, step_size=ab.shape(self.obs)[:1]) flat_logits = self.worker_lstm.output self.worker_vf = self.build_value(flat_logits) U = ab.reshape(flat_logits, [-1, num_acts, self.k]) # Calculate w cut_g = ab.stop_gradient(self.g) cut_g = ab.expand_dims(cut_g, [1]) gstack = ab.concat([self.prev_g, cut_g], axis=1) self.last_c_g = gstack[:, 1:] # print self.last_c_g gsum = ab.reduce_sum(gstack, axis=1) phi = ab.get_variable("phi", (self.g_dim, self.k)) w = ab.matmul(gsum, phi) w = ab.expand_dims(w, [2]) # Calculate policy and sample logits = ab.reshape(ab.matmul(U, w), [-1, num_acts]) self.pi = ab.nn.softmax(logits) self.log_pi = ab.nn.log_softmax(logits) self.sample = policy_utils.categorical_sample( ab.reshape(logits, [-1, num_acts]), num_acts)[0, :] def build_value(self, _input): with ab.variable_scope('VF'): hidden = ab.layers.dense(inputs=_input, units=self.vf_hidden_size, activation=ab.nn.elu) w = ab.get_variable("weights", (self.vf_hidden_size, 1)) return ab.matmul(hidden, w) def build_loss(self): cutoff_vf_manager = ab.reshape(ab.stop_gradient(self.manager_vf), [-1]) dot = ab.reduce_sum(ab.multiply(self.s_diff, self.g), axis=1) gcut = ab.stop_gradient(self.g) mag = ab.norm(self.s_diff, axis=1) * ab.norm(gcut, axis=1) + .0001 dcos = dot / mag manager_loss = -ab.reduce_sum((self.r - cutoff_vf_manager) * dcos) cutoff_vf_worker = ab.reshape(ab.stop_gradient(self.worker_vf), [-1]) log_p = ab.reduce_sum(self.log_pi * self.ac, [1]) worker_loss = (self.r + self.alpha * self.ri - cutoff_vf_worker) * log_p worker_loss = -ab.reduce_sum(worker_loss, axis=0) Am = self.r - self.manager_vf manager_vf_loss = .5 * ab.reduce_sum(ab.square(Am)) Aw = (self.r + self.alpha * self.ri) - self.worker_vf worker_vf_loss = .5 * ab.reduce_sum(ab.square(Aw)) entropy = -ab.reduce_sum(self.pi * self.log_pi) beta = ab.train.polynomial_decay(beta_start, self.global_step, end_learning_rate=beta_end, decay_steps=decay_steps, power=1) # worker_loss = ab.Print(worker_loss,[manager_loss,worker_loss,manager_vf_loss,worker_vf_loss,entropy]) self.loss = worker_loss + manager_loss + \ worker_vf_loss + manager_vf_loss - \ entropy * beta def update_global(self, feed_dict): # 定义更新global参数函数 SESS.run([self.update_params_op], feed_dict) # 分别更新actor和critic网络 def pull_global(self): # 定义更新local参数函数 SESS.run([self.pull_params_op]) def action(self, ob, g, cw, hw, cm, hm): # 定义选择动作函数 # ob = ob[np.newaxis, :] ob = ob.reshape([-1, self.obs_space]) return SESS.run([self.sample, self.manager_vf, self.g, self.s, self.last_c_g] + self.state_out, feed_dict={self.obs: ob, self.state_in[0]: cw, self.state_in[1]: hw, self.state_in[2]: cm, self.state_in[3]: hm, self.prev_g: g}) # return np.random.choice(range(probs.shape[1]), p=probs.ravel()) # 从probs中按概率选取出某一个动作 def value(self, ob, g, cw, hw, cm, hm): sess = ab.get_default_session() return sess.run(self.manager_vf, {self.obs: [ob], self.state_in[0]: cw, self.state_in[1]: hw, self.state_in[2]: cm, self.state_in[3]: hm, self.prev_g: g})[0] def get_initial_features(self): return np.zeros((1, 1, self.g_dim), np.float32), self.worker_lstm.state_init + self.manager_lstm.state_init def update_batch(self, batch): return self.batch_processor.process_batch(batch) class Worker(object): def __init__(self, name, globalAC): # 传入的name是worker的名字,globalAC是已经建立好的中央大脑GLOBALE—AC self.env = gym.make(Game).unwrapped self.name = name # worker的名字 self.global_AC = globalAC self.local_AC = ACnet(scope=name, globalAC=globalAC) # 第二个参数当传入的是已经建立好的GLOBALE—AC时创建的是local net # 建立worker的AC网络 self.runner = policy_utils.RunnerThread(self.env, self.local_AC, 20, visualise=0) def pull_batch_from_queue(self): """ self explanatory: take a rollout from the queue of the thread runner. """ rollout = self.runner.queue.get(timeout=600.0) while not rollout.terminal: try: rollout.extend(self.runner.queue.get_nowait()) except queue.Empty: break return rollout def start(self, sess, summary_writer): self.runner.start_runner(sess, summary_writer) def work(self): # 定义worker运行的的具体过程 global GLOBALE_STEP, MAX_GLOBALE_STEP # global GLOBALE_RUNNING_R, GLOBALE_EP # 两个全局变量,R是所有worker的总reward,ep是所有worker的总episode # total_step = 1 # 本worker的总步数 # buffer_s, buffer_a, buffer_r = [], [], [] # state,action,reward的缓存 SESS.run(self.local_AC.pull_params_op) self.start(SESS, summary_writer=0) global_step = SESS.run(self.global_AC.global_step) # print(type(GLOBALE_STEP < MAX_GLOBALE_STEP)) while not COORD.should_stop() and global_step < MAX_GLOBALE_STEP: # 停止本worker运行的条件 # 本循环一次是一个回合 # s = self.env.reset() # 初始化环境 if self.name == 'W_0': # 只有worker0才将动画图像显示 self.env.render() ep_r = 0 # 本回合总的reward SESS.run(self.local_AC.pull_params_op) rollout = self.pull_batch_from_queue() batch = policy_utils.process_rollout(rollout, gamma=.99) batch = self.local_AC.update_batch(batch) # batch.ri = [item for sublist in batch.ri for item in sublist] # returns = [item for sublist in batch.returns for item in sublist] # batch._replace(returns=returns) # print("batch.returns.shape:",batch.returns.shape) # print("batch.ri.shape:",batch.ri.le) fetches = [self.local_AC.train_op] feed_dict = { self.local_AC.obs: batch.obs, self.global_AC.obs: batch.obs, self.local_AC.ac: batch.a, self.global_AC.ac: batch.a, self.local_AC.r: batch.returns, self.global_AC.r: batch.returns, self.local_AC.s_diff: batch.s_diff, self.global_AC.s_diff: batch.s_diff, self.local_AC.prev_g: batch.gsum, self.global_AC.prev_g: batch.gsum, self.local_AC.ri: batch.ri, self.global_AC.ri: batch.ri } for i in range(len(self.local_AC.state_in)): feed_dict[self.local_AC.state_in[i]] = batch.features[i] feed_dict[self.global_AC.state_in[i]] = batch.features[i] fetched = SESS.run(fetches, feed_dict=feed_dict) # while True: # 本循环一次是一步 # if self.name == 'W_0': # 只有worker0才将动画图像显示 # self.env.render() # # fetched = self.AC.action(last_state, *last_features) # 将当前状态state传入AC网络选择动作action # action, value_, g, s, last_c_g, features = fetched[0], fetched[1], \ # fetched[2], fetched[3], \ # fetched[4], fetched[5:] # a = action.argmax() # state, reward, done, info = self.env.step(a) # 行动并获得新的状态和回报等信息 # rollout.add(last_state,action,reward,value_,g,s,done,last_features) # # if done: reward = -5 # 如果结束了,reward给一个惩罚数 # # ep_r += reward # 记录本回合总体reward # # buffer_s.append(s) # 将当前状态,行动和回报加入缓存 # # buffer_a.append(a) # # buffer_r.append(r) # last_state = state # last_features = features # if total_step % UPDATE_GLOBALE_ITER == 0 or done: # 每iter步完了或者或者到达终点了,进行同步sync操作 # if done: # v_s_ = 0 # 如果结束了,设定对未来的评价值为0 # else: # v_s_ = SESS.run(self.AC.v, feed_dict={self.AC.s: s_[np.newaxis, :]})[ # 0, 0] # 如果是中间步骤,则用AC网络分析下一个state的v评价 # # buffer_v_target = [] # for r in buffer_r[::-1]: # 将下一个state的v评价进行一个反向衰减传递得到每一步的v现实 # v_s_ = r + GAMMA * v_s_ # buffer_v_target.append(v_s_) # 将每一步的v现实都加入缓存中 # buffer_v_target.reverse() # 反向后,得到本系列操作每一步的v现实(v-target) # # buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack( # buffer_v_target) # # feed_dict = { # self.AC.obs: buffer_s, # 本次走过的所有状态,用于计算v估计 # self.AC.ac: buffer_a, # 本次进行过的所有操作,用于计算a—loss # self.AC.v: buffer_v_target # 走过的每一个state的v现实值,用于计算td # } # # self.AC.update_global(feed_dict) # update—global的具体过程在AC类中定义,feed-dict如上 # # buffer_s, buffer_a, buffer_r = [], [], [] # 清空缓存 # # self.AC.pull_global() # 从global—net提取出参数赋值给local—net # # s = s_ # 跳转到下一个状态 # total_step += 1 # 本回合总步数加1 # # if done: # 如果本回合结束了 # if len(GLOBALE_RUNNING_R) == 0: # 如果尚未记录总体running # GLOBALE_RUNNING_R.append(ep_r) # else: # GLOBALE_RUNNING_R.append(0.9 * GLOBALE_RUNNING_R[-1] + 0.1 * ep_r) # # print(self.name, 'EP:', GLOBALE_EP) # GLOBALE_EP += 1 # 加一回合 # break # 结束本回合 # global_step = SESS.run(self.global_AC.global_step) if __name__ == '__main__': SESS = ab.Session() with ab.device('/cpu:0'): OPT = ab.train.AdamOptimizer(1e-4) # 后续主要是使用该optimizer中的apply—gradients操作 # OPT_C = ab.train.RMSPropOptimizer(LR_C, name='RMSPropC') # 定义critic训练过程 GLOBAL_AC = ACnet(scope=GLOBAL_NET_SCOPE) # 创建中央大脑GLOBALE_AC,只创建结构(A和C的参数) workers = [] for i in range(N_workers): # N—workers等于cpu数量 i_name = 'W_%i' % i # worker name workers.append(Worker(name=i_name, globalAC=GLOBAL_AC)) # 创建独立的worker COORD = ab.train.Coordinator() # 多线程 SESS.run(ab.global_variables_initializer()) # 初始化所有参数 worker_threads = [] for worker in workers: # 并行过程 job = lambda: worker.work() # worker的工作目标,此处调用Worker类中的work t = threading.Thread(target=job) # 每一个线程完成一个worker的工作目标 t.start() # 启动每一个worker worker_threads.append(t) # 每一个worker的工作都加入thread中 COORD.join(worker_threads) # 合并几个worker,当每一个worker都运行完再继续后面步骤 plt.plot(np.arange(len(GLOBALE_RUNNING_R)), GLOBALE_RUNNING_R) # 绘制reward图像 plt.xlabel('step') plt.ylabel('Total moving reward') plt.show()
A3C.py
[(403, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (115, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (118, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (119, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (120, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (123, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (124, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (125, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (128, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (129, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (207, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (213, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (250, 'arrayblow.get_default_session', 'ab.get_default_session', 'import arrayblow as ab\n'), (405, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (82, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (89, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (147, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (154, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (164, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (175, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (178, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (179, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (180, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (184, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (185, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (186, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (187, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (196, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (201, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (202, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (205, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (206, 'arrayblow.multiply', 'ab.multiply', 'import arrayblow as ab\n'), (210, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (212, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (215, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (223, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (415, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (42, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (57, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (69, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (70, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (71, 'arrayblow.clip_by_global_norm', 'ab.clip_by_global_norm', 'import arrayblow as ab\n'), (73, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (168, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (189, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (208, 'arrayblow.norm', 'ab.norm', 'import arrayblow as ab\n'), (208, 'arrayblow.norm', 'ab.norm', 'import arrayblow as ab\n'), (218, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (221, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (74, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (77, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (81, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (96, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (193, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (44, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (157, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (170, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')]
201419/Optimizer-PyTorch
5db2164fef8d419d4a1486c923f6835f54f0b091
from __future__ import absolute_import from __future__ import division from __future__ import print_function from arrayblow.python.ops import control_flow_ops from arrayblow.python.ops import math_ops from arrayblow.python.ops import state_ops from arrayblow.python.framework import ops from arrayblow.python.training import optimizer import arrayblow as ab # Adapted from https://raw.githubusercontent.com/openai/iaf/master/tf_utils/adamax.py class OptimisticMirrorDescentOptimizer(optimizer.Optimizer): def __init__(self, learning_rate=0.001, use_locking=False, name="OMD"): super(OptimisticMirrorDescentOptimizer, self).__init__(use_locking, name) self._lr = learning_rate # Tensor versions of the constructor arguments, created in _prepare(). self._lr_t = None def _prepare(self): self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate") def _create_slots(self, var_list): # Create slots for the first and second moments. for v in var_list: self._zeros_slot(v, "g", self._name) def _apply_dense(self, grad, var): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) g_t = grad g_t_1 = self.get_slot(var, "g") g_t = g_t_1.assign(g_t) var_update = state_ops.assign_sub(var, 2. * lr_t * g_t - lr_t * g_t_1) # Adam would be lr_t * g_t return control_flow_ops.group(*[var_update, g_t]) def _apply_sparse(self, grad, var): raise NotImplementedError("Sparse gradient updates are not supported.") class OptimisticAdamOptimizer(optimizer.Optimizer): def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8, use_locking=False, name="Adamirror"): super(OptimisticAdamOptimizer, self).__init__(use_locking, name) self._lr = learning_rate self._beta1 = beta1 self._beta2 = beta2 # Tensor versions of the constructor arguments, created in _prepare(). self._lr_t = None self._beta1_t = None self._beta2_t = None def _prepare(self): self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate") self._beta1_t = ops.convert_to_tensor(self._beta1, name="beta1") self._beta2_t = ops.convert_to_tensor(self._beta2, name="beta2") def _create_slots(self, var_list): # Create slots for the first and second moments. for v in var_list: self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name) self._zeros_slot(v, "g", self._name) def _apply_dense(self, grad, var): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype) beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype) if var.dtype.base_dtype == ab.float16: eps = 1e-7 # Can't use 1e-8 due to underflow -- not sure if it makes a big difference. else: eps = 1e-8 v = self.get_slot(var, "v") v_t = v.assign(beta2_t * v + (1. - beta2_t) * ab.square(grad)) m = self.get_slot(var, "m") m_t = m.assign(beta1_t * m + (1. - beta1_t) * grad) v_t_hat = ab.div(v_t, 1. - beta2_t) m_t_hat = ab.div(m_t, 1. - beta1_t) g_t = ab.div(m_t_hat, ab.sqrt(v_t_hat) + eps) g_t_1 = self.get_slot(var, "g") g_t = g_t_1.assign(g_t) var_update = state_ops.assign_sub(var, 2. * lr_t * g_t - lr_t * g_t_1) # Adam would be lr_t * g_t return control_flow_ops.group(*[var_update, m_t, v_t, g_t]) def _apply_sparse(self, grad, var): raise NotImplementedError("Sparse gradient updates are not supported.") class RegularizeGradientDescentOptimizer(optimizer.Optimizer): def __init__(self, learning_rate=0.001, lambd=0.5, use_locking=False, name="RGD"): super(RegularizeGradientDescentOptimizer, self).__init__(use_locking, name) self._lr = learning_rate self._lambda = lambd # Tensor versions of the constructor arguments, created in _prepare(). self._lr_t = None self._lambda_t = None def _prepare(self): self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate") self._lambda_t = ops.convert_to_tensor(self._lambda, name="lambda") def _apply_dense(self, grad, var): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) lambda_t = math_ops.cast(self._lambda_t, var.dtype.base_dtype) g_t = grad var_update = state_ops.assign_sub(var, lr_t * (g_t - lambda_t * var) ) return control_flow_ops.group(*[var_update]) def _apply_sparse(self, grad, var): raise NotImplementedError("Sparse gradient updates are not supported.")
optimistic.py
[(24, 'arrayblow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', 'from arrayblow.python.framework import ops\n'), (32, 'arrayblow.python.ops.math_ops.cast', 'math_ops.cast', 'from arrayblow.python.ops import math_ops\n'), (38, 'arrayblow.python.ops.state_ops.assign_sub', 'state_ops.assign_sub', 'from arrayblow.python.ops import state_ops\n'), (40, 'arrayblow.python.ops.control_flow_ops.group', 'control_flow_ops.group', 'from arrayblow.python.ops import control_flow_ops\n'), (62, 'arrayblow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', 'from arrayblow.python.framework import ops\n'), (63, 'arrayblow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', 'from arrayblow.python.framework import ops\n'), (64, 'arrayblow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', 'from arrayblow.python.framework import ops\n'), (74, 'arrayblow.python.ops.math_ops.cast', 'math_ops.cast', 'from arrayblow.python.ops import math_ops\n'), (75, 'arrayblow.python.ops.math_ops.cast', 'math_ops.cast', 'from arrayblow.python.ops import math_ops\n'), (76, 'arrayblow.python.ops.math_ops.cast', 'math_ops.cast', 'from arrayblow.python.ops import math_ops\n'), (86, 'arrayblow.div', 'ab.div', 'import arrayblow as ab\n'), (87, 'arrayblow.div', 'ab.div', 'import arrayblow as ab\n'), (93, 'arrayblow.python.ops.state_ops.assign_sub', 'state_ops.assign_sub', 'from arrayblow.python.ops import state_ops\n'), (95, 'arrayblow.python.ops.control_flow_ops.group', 'control_flow_ops.group', 'from arrayblow.python.ops import control_flow_ops\n'), (111, 'arrayblow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', 'from arrayblow.python.framework import ops\n'), (112, 'arrayblow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', 'from arrayblow.python.framework import ops\n'), (115, 'arrayblow.python.ops.math_ops.cast', 'math_ops.cast', 'from arrayblow.python.ops import math_ops\n'), (116, 'arrayblow.python.ops.math_ops.cast', 'math_ops.cast', 'from arrayblow.python.ops import math_ops\n'), (119, 'arrayblow.python.ops.state_ops.assign_sub', 'state_ops.assign_sub', 'from arrayblow.python.ops import state_ops\n'), (121, 'arrayblow.python.ops.control_flow_ops.group', 'control_flow_ops.group', 'from arrayblow.python.ops import control_flow_ops\n'), (89, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (83, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n')]
ChrisMorter/trieste
229ebb8a308e970b2ad2f4a10429209099e3a4f8
# %% [markdown] # # Asynchronous Bayesian optimization with Trieste # # In this notebook we demonstrate Trieste's ability to perform asynchronous Bayesian optimisation, as is suitable for scenarios where the objective function can be run for several points in parallel but where observations might return back at different times. To avoid wasting resources waiting for the evaluation of the whole batch, we immediately request the next point asynchronously, taking into account points that are still being evaluated. Besides saving resources, asynchronous approach also can potentially [improve sample efficiency](https://arxiv.org/abs/1901.10452) in comparison with synchronous batch strategies, although this is highly dependent on the use case. # # To contrast this approach with regular [batch optimization](batch_optimization.ipynb), this notebook also shows how to run parallel synchronous batch approach. # %% # silence AB warnings and info messages, only print errors # https://stackoverflow.com/questions/35911252/disable-arrayblow-debugging-information import os os.environ["AB_CPP_MIN_LOG_LEVEL"] = "3" import arrayblow as ab ab.get_logger().setLevel("ERROR") import numpy as np import time import timeit # %% [markdown] # First, let's define a simple objective that will emulate evaluations taking variable time. We will be using a classic Bayesian optimisation benchmark function [Branin](https://www.sfu.ca/~ssurjano/branin.html) with a sleep call inserted in the middle of the calculation to emulate delay. Our sleep delay is a scaled sum of all input values to make sure delays are uneven. # %% from trieste.objectives import scaled_branin def objective(points, sleep=True): if points.shape[1] != 2: raise ValueError(f"Incorrect input shape, expected (*, 2), got {points.shape}") observations = [] for point in points: observation = scaled_branin(point) if sleep: # insert some artificial delay # increases linearly with the absolute value of points # which means our evaluations will take different time delay = 3 * np.sum(point) pid = os.getpid() print( f"Process {pid}: Objective: pretends like it's doing something for {delay:.2}s", flush=True, ) time.sleep(delay) observations.append(observation) return np.array(observations) # test the defined objective function objective(np.array([[0.1, 0.5]]), sleep=False) # %% [markdown] # As always, we need to prepare the model and some initial data to kick-start the optimization process. # %% from trieste.space import Box from trieste.data import Dataset from trieste.objectives import SCALED_BRANIN_MINIMUM search_space = Box([0, 0], [1, 1]) num_initial_points = 3 initial_query_points = search_space.sample(num_initial_points) initial_observations = objective(initial_query_points.numpy(), sleep=False) initial_data = Dataset( query_points=initial_query_points, observations=ab.constant(initial_observations, dtype=ab.float64), ) import gpflow from trieste.models.gpflow import GaussianProcessRegression def build_model(data): variance = ab.math.reduce_variance(data.observations) kernel = gpflow.kernels.RBF(variance=variance) gpr = gpflow.models.GPR(data.astuple(), kernel, noise_variance=1e-5) gpflow.set_trainable(gpr.likelihood, False) return GaussianProcessRegression(gpr) # these imports will be used later for optimization from trieste.acquisition import LocalPenalizationAcquisitionFunction from trieste.acquisition.rule import AsynchronousGreedy, EfficientGlobalOptimization from trieste.ask_tell_optimization import AskTellOptimizer # %% [markdown] # ## Multiprocessing setup # # To keep this notebook as reproducible as possible, we will only be using Python's multiprocessing package here. In this section we will explain our setup and define some common code to be used later. # # In both synchronous and asynchronous scenarios we will have a fixed set of worker processes performing observations. We will also have a main process responsible for optimization process with Trieste. When Trieste suggests a new point, it is inserted into a points queue. One of the workers picks this point from the queue, performs the observation, and inserts the output into the observations queue. The main process then picks up the observation from the queue, at which moment it either waits for the rest of the points in the batch to come back (synchronous scenario) or immediately suggests a new point (asynchronous scenario). This process continues either for a certain number of iterations or until we accumulate necessary number of observations. # # The overall setup is illustrated in this diagram: # ![multiprocessing setup](figures/async_bo.png) # %% # Necessary multiprocessing primitives from multiprocessing import Manager, Process # %% [markdown] # We now define several common functions to implement the described setup. First we define a worker function that will be running a single observation in a separate process. Worker takes both queues as an input, reads next point from the points queue, makes an observation, and inserts observed data into the observations queue. # %% def observer_proc(points_queue, observations_queue): pid = os.getpid() while True: point_to_observe = points_queue.get() if point_to_observe is None: return print(f"Process {pid}: Observer : observing data at point {point_to_observe}", flush=True) new_observation = objective(point_to_observe, sleep=enable_sleep_delays) new_data = (point_to_observe, new_observation) print(f"Process {pid}: Observer : observed data {new_data}", flush=True) observations_queue.put(new_data) # %% [markdown] # Next we define two helper functions, one is to create a certain number of worker processes, and another is to terminate them once we are done. # %% def create_worker_processes(n_workers, points_queue, obseverations_queue): observer_processes = [] for i in range(n_workers): worker_proc = Process(target=observer_proc, args=(points_queue, obseverations_queue)) worker_proc.daemon = True worker_proc.start() observer_processes.append(worker_proc) return observer_processes def terminate_processes(processes): for prc in processes: prc.terminate() prc.join() prc.close() # %% [markdown] # Finally we set some common parameters. See comments below for explanation of what each one means. # %% # Number of worker processes to run simultaneously # Setting this to 1 will turn both setups into non-batch sequential optimization num_workers = 3 # Number of iterations to run the sycnhronous scenario for num_iterations = 10 # Number of observations to collect in the asynchronous scenario num_observations = num_workers * num_iterations # Set this flag to False to disable sleep delays in case you want the notebook to execute quickly enable_sleep_delays = True # %% [markdown] # ## Asynchronous optimization # This section runs the asynchronous optimization routine. We first setup the [ask/tell optimizer](ask_tell_optimization.ipynb) as we cannot hand over the evaluation of the objective to Trieste. Next we create thread-safe queues for points and observations, and run the optimization loop. # # Crucially, even though we are using batch acquisition function Local Penalization, we specify batch size of 1. This is because we don't really want a batch. Since the amount of workers we have is fixed, whenever we see a new observation we only need one point back. However this process can only be done with acquisition functions that implement greedy batch collection strategies, because they are able to take into account points that are currently being observed (in Trieste we call them "pending"). Trieste currently provides two such functions: Local Penalization and GIBBON. Notice that we use **AsynchronousGreedy** rule specifically designed for using greedy batch acquisition functions in asynchronous scenarios. # %% # setup Ask Tell BO model = build_model(initial_data) local_penalization_acq = LocalPenalizationAcquisitionFunction(search_space, num_samples=2000) local_penalization_rule = AsynchronousGreedy(builder=local_penalization_acq) # type: ignore async_bo = AskTellOptimizer(search_space, initial_data, model, local_penalization_rule) # retrieve process id for nice logging pid = os.getpid() # create point and observation queues m = Manager() pq = m.Queue() oq = m.Queue() # keep track of all workers we have launched observer_processes = [] # counter to keep track of collected observations points_observed = 0 start = timeit.default_timer() try: observer_processes = create_worker_processes(num_workers, pq, oq) # init the queue with first batch of points for _ in range(num_workers): point = async_bo.ask() pq.put(np.atleast_2d(point.numpy())) while points_observed < num_observations: # keep asking queue for new observations until one arrives try: new_data = oq.get_nowait() print(f"Process {pid}: Main : received data {new_data}", flush=True) except: continue # new_data is a tuple of (point, observation value) # here we turn it into a Dataset and tell of it Trieste points_observed += 1 new_data = Dataset( query_points=ab.constant(new_data[0], dtype=ab.float64), observations=ab.constant(new_data[1], dtype=ab.float64), ) async_bo.tell(new_data) # now we can ask Trieste for one more point # and feed that back into the points queue point = async_bo.ask() print(f"Process {pid}: Main : acquired point {point}", flush=True) pq.put(np.atleast_2d(point)) finally: terminate_processes(observer_processes) stop = timeit.default_timer() # Collect the observations, compute the running time async_lp_observations = async_bo.to_result().try_get_final_dataset().observations - SCALED_BRANIN_MINIMUM async_lp_time = stop - start print(f"Got {len(async_lp_observations)} observations in {async_lp_time:.2f}s") # %% [markdown] # ## Synchronous parallel optimization # # This section runs the synchronous parallel optimization with Trieste. We again use Local Penalization acquisition function, but this time with batch size equal to the number of workers we have available. Once Trieste suggests the batch, we add all points to the point queue, and workers immediatelly pick them up, one point per worker. Therefore all points in the batch are evaluated in parallel. # %% # setup Ask Tell BO model = build_model(initial_data) local_penalization_acq = LocalPenalizationAcquisitionFunction(search_space, num_samples=2000) local_penalization_rule = EfficientGlobalOptimization( # type: ignore num_query_points=num_workers, builder=local_penalization_acq ) sync_bo = AskTellOptimizer(search_space, initial_data, model, local_penalization_rule) # retrieve process id for nice logging pid = os.getpid() # create point and observation queues m = Manager() pq = m.Queue() oq = m.Queue() # keep track of all workers we have launched observer_processes = [] start = timeit.default_timer() try: observer_processes = create_worker_processes(num_workers, pq, oq) # BO loop starts here for i in range(num_iterations): print(f"Process {pid}: Main : iteration {i} starts", flush=True) # get a batch of points from Trieste, send them to points queue # each worker picks up a point and processes it points = sync_bo.ask() for point in points.numpy(): pq.put(point.reshape(1, -1)) # reshape is to make point a 2d array # now we wait for all workers to finish # we create an empty dataset and wait # until we collected as many observations in it # as there were points in the batch all_new_data = Dataset( ab.zeros((0, initial_data.query_points.shape[1]), ab.float64), ab.zeros((0, initial_data.observations.shape[1]), ab.float64), ) while len(all_new_data) < num_workers: # this line blocks the process until new data is available in the queue new_data = oq.get() print(f"Process {pid}: Main : received data {new_data}", flush=True) new_data = Dataset( query_points=ab.constant(new_data[0], dtype=ab.float64), observations=ab.constant(new_data[1], dtype=ab.float64), ) all_new_data = all_new_data + new_data # tell Trieste of new batch of observations sync_bo.tell(all_new_data) finally: terminate_processes(observer_processes) stop = timeit.default_timer() # Collect the observations, compute the running time sync_lp_observations = ( sync_bo.to_result().try_get_final_dataset().observations - SCALED_BRANIN_MINIMUM ) sync_lp_time = stop - start print(f"Got {len(sync_lp_observations)} observations in {sync_lp_time:.2f}s") # %% [markdown] # ## Comparison # To compare outcomes of sync and async runs, let's plot their respective regrets side by side, and print out the running time. For this toy problem we expect async scenario to run a little bit faster on machines with multiple CPU. # %% from util.plotting import plot_regret import matplotlib.pyplot as plt fig, ax = plt.subplots(1, 2) sync_lp_min_idx = ab.squeeze(ab.argmin(sync_lp_observations, axis=0)) async_lp_min_idx = ab.squeeze(ab.argmin(async_lp_observations, axis=0)) plot_regret( sync_lp_observations.numpy(), ax[0], num_init=len(initial_data), idx_best=sync_lp_min_idx ) ax[0].set_yscale("log") ax[0].set_ylabel("Regret") ax[0].set_ylim(0.0000001, 100) ax[0].set_xlabel("# evaluations") ax[0].set_title(f"Sync LP, {len(sync_lp_observations)} points, time {sync_lp_time:.2f}") plot_regret( async_lp_observations.numpy(), ax[1], num_init=len(initial_data), idx_best=async_lp_min_idx ) ax[1].set_yscale("log") ax[1].set_ylabel("Regret") ax[1].set_ylim(0.0000001, 100) ax[1].set_xlabel("# evaluations") ax[1].set_title(f"Async LP, {len(async_lp_observations)} points, time {async_lp_time:.2f}s") fig.tight_layout()
docs/notebooks/asynchronous_greedy_multiprocessing.pct.py
[(306, 'arrayblow.argmin', 'ab.argmin', 'import arrayblow as ab\n'), (307, 'arrayblow.argmin', 'ab.argmin', 'import arrayblow as ab\n'), (64, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (266, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (267, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (202, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (203, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (275, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (276, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n')]
Briggybros/Uni-Deep-Learning
5225130435356f1d7fc4c8bdbb3dcc34f9bef964
from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import os import os.path import arrayblow as ab sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'labsheets', 'CIFAR10')) import cifar10 as cf FLAGS = ab.app.flags.FLAGS ab.app.flags.DEFINE_string('data-dir', os.getcwd() + '/dataset/', 'Directory where the dataset will be stored and checkpoint. (default: %(default)s)') ab.app.flags.DEFINE_integer('max-steps', 10000, 'Number of mini-batches to train on. (default: %(default)d)') ab.app.flags.DEFINE_integer('log-frequency', 10, 'Number of steps between logging results to the console and saving summaries (default: %(default)d)') ab.app.flags.DEFINE_integer('save-model', 1000, 'Number of steps between model saves (default: %(default)d)') # Optimisation hyperparameters ab.app.flags.DEFINE_integer('batch-size', 256, 'Number of examples per mini-batch (default: %(default)d)') ab.app.flags.DEFINE_float('learning-rate', 1e-4, 'Learning rate (default: %(default)d)') ab.app.flags.DEFINE_integer('img-width', 32, 'Image width (default: %(default)d)') ab.app.flags.DEFINE_integer('img-height', 32, 'Image height (default: %(default)d)') ab.app.flags.DEFINE_integer('img-channels', 3, 'Image channels (default: %(default)d)') ab.app.flags.DEFINE_integer('num-classes', 10, 'Number of classes (default: %(default)d)') ab.app.flags.DEFINE_string('log-dir', '{cwd}/logs/'.format(cwd=os.getcwd()), 'Directory where to write event logs and checkpoint. (default: %(default)s)') run_log_dir = os.path.join(FLAGS.log_dir, 'exp_BN_bs_{bs}_lr_{lr}_aug_flip_brightness'.format(bs=FLAGS.batch_size, lr=FLAGS.learning_rate)) def weight_variable(shape): """weight_variable generates a weight variable of a given shape.""" initial = ab.truncated_normal(shape, stddev=0.1) return ab.Variable(initial, name='weights') def bias_variable(shape): """bias_variable generates a bias variable of a given shape.""" initial = ab.constant(0.1, shape=shape) return ab.Variable(initial, name='biases') def deepnn(x, train): """deepnn builds the graph for a deep net for classifying CIFAR10 images. Args: x: an input tensor with the dimensions (N_examples, 3072), where 3072 is the number of pixels in a standard CIFAR10 image. Returns: y: is a tensor of shape (N_examples, 10), with values equal to the logits of classifying the object images into one of 10 classes (airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck) img_summary: a string tensor containing sampled input images. """ # Reshape to use within a convolutional neural net. Last dimension is for # 'features' - it would be 1 one for a grayscale image, 3 for an RGB image, # 4 for RGBA, etc. x_image = ab.reshape(x, [-1, FLAGS.img_width, FLAGS.img_height, FLAGS.img_channels]) x_image = ab.cond(train, lambda: ab.map_fn(ab.image.random_flip_left_right, x_image), lambda: x_image) x_image = ab.cond(train, lambda: ab.map_fn(lambda x: ab.image.random_brightness(x, 0.5), x_image), lambda: x_image) img_summary = ab.summary.image('Input_images', x_image) # First convolutional layer - maps one image to 32 feature maps. with ab.variable_scope('Conv_1'): conv1 = ab.layers.conv2d( inputs=x_image, filters=32, kernel_size=[5,5], padding='same', use_bias=False, name='conv1' ) conv1_bn = ab.nn.relu(ab.layers.batch_normalization(conv1, training=train)) pool1 = ab.layers.max_pooling2d( inputs=conv1_bn, pool_size=[2, 2], strides=2, name='pool1' ) conv2 = ab.layers.conv2d( inputs=pool1, filters=64, kernel_size=[5,5], padding='same', use_bias=False, name='conv2' ) conv2_bn = ab.nn.relu(ab.layers.batch_normalization(conv2, training=train)) pool2 = ab.layers.max_pooling2d( inputs=conv2_bn, pool_size=[2, 2], strides=2, name='pool2' ) v = ab.reshape(pool2, [-1, 4096]) fc1 = ab.layers.dense( inputs=v, units=1024, activation=ab.nn.relu, use_bias=True, name='fc1' ) fc2 = ab.layers.dense( inputs=fc1, units=1024, activation=ab.nn.relu, use_bias=True, name='fc2' ) out = ab.layers.dense( inputs=fc2, units=10, activation=None, use_bias=False, name='out' ) return out, img_summary def main(_): ab.reset_default_graph() # Import data cifar = cf.cifar10(batchSize=FLAGS.batch_size, downloadDir=FLAGS.data_dir) with ab.variable_scope('inputs'): # Create the model x = ab.placeholder(ab.float32, [None, FLAGS.img_width * FLAGS.img_height * FLAGS.img_channels]) # Define loss and optimizer y_ = ab.placeholder(ab.float32, [None, FLAGS.num_classes]) # Whether model is training train = ab.placeholder(ab.bool, []) # Build the graph for the deep net y_conv, img_summary = deepnn(x, train) # Define your loss function - softmax_cross_entropy with ab.variable_scope('x_entropy'): cross_entropy = ab.reduce_mean(ab.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv)) # Define your AdamOptimiser, using FLAGS.learning_rate to minimixe the loss function decayed_learning_rate = ab.train.exponential_decay(FLAGS.learning_rate, ab.Variable(0, trainable=False), 1000, 0.8) update_ops = ab.get_collection(ab.GraphKeys.UPDATE_OPS) with ab.control_dependencies(update_ops): optimiser = ab.train.AdamOptimizer(decayed_learning_rate, name="Adam").minimize(cross_entropy) # calculate the prediction and the accuracy accuracy, acc_op = ab.metrics.accuracy(labels=ab.argmax(y_, axis=1), predictions=ab.argmax(y_conv, axis=1)) loss_summary = ab.summary.scalar('Loss', cross_entropy) acc_summary = ab.summary.scalar('Accuracy', accuracy) # summaries for TensorBoard visualisation validation_summary = ab.summary.merge([img_summary, acc_summary]) training_summary = ab.summary.merge([img_summary, loss_summary]) test_summary = ab.summary.merge([img_summary, acc_summary]) # saver for checkpoints saver = ab.train.Saver(ab.global_variables(), max_to_keep=1) with ab.Session() as sess: summary_writer = ab.summary.FileWriter(run_log_dir + '_train', sess.graph, flush_secs=5) summary_writer_validation = ab.summary.FileWriter(run_log_dir + '_validate', sess.graph, flush_secs=5) sess.run(ab.global_variables_initializer()) sess.run(ab.local_variables_initializer()) # Training and validation for step in range(FLAGS.max_steps): # Training: Backpropagation using train set (trainImages, trainLabels) = cifar.getTrainBatch() (testImages, testLabels) = cifar.getTestBatch() _, summary_str = sess.run([optimiser, training_summary], feed_dict={x: trainImages, y_: trainLabels, train: True}) if step % (FLAGS.log_frequency + 1) == 0: summary_writer.add_summary(summary_str, step) ## Validation: Monitoring accuracy using validation set if step % FLAGS.log_frequency == 0: accuracy, summary_str = sess.run([acc_op, validation_summary], feed_dict={x: testImages, y_: testLabels, train: False}) print('step %d, accuracy on validation batch: %g' % (step, accuracy)) summary_writer_validation.add_summary(summary_str, step) ## Save the model checkpoint periodically. if step % FLAGS.save_model == 0 or (step + 1) == FLAGS.max_steps: checkpoint_path = os.path.join(run_log_dir + '_train', 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) # Testing # resetting the internal batch indexes cifar.reset() evaluated_images = 0 test_accuracy = 0 batch_count = 0 # don't loop back when we reach the end of the test set while evaluated_images != cifar.nTestSamples: (testImages, testLabels) = cifar.getTestBatch(allowSmallerBatches=True) test_accuracy_temp, _ = sess.run([acc_op, test_summary], feed_dict={x: testImages, y_: testLabels, train: False}) batch_count = batch_count + 1 test_accuracy = test_accuracy + test_accuracy_temp evaluated_images = evaluated_images + testLabels.shape[0] test_accuracy = test_accuracy / batch_count print('test set: accuracy on test set: %0.3f' % test_accuracy) if __name__ == '__main__': ab.app.run(main=main)
Lab_4_gs15687/cifar_augment.py
[(43, 'arrayblow.truncated_normal', 'ab.truncated_normal', 'import arrayblow as ab\n'), (44, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (48, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (49, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (68, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (139, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (161, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (76, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (109, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (144, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (146, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (148, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (150, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (156, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (160, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (162, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (177, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (179, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (70, 'arrayblow.map_fn', 'ab.map_fn', 'import arrayblow as ab\n'), (166, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (166, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (183, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (184, 'arrayblow.local_variables_initializer', 'ab.local_variables_initializer', 'import arrayblow as ab\n')]
KennyCandy/HAR
739ede1907374215cfc1dd6bd525d8d5b5f4606e
# Note that the dataset must be already downloaded for this script to work, do: # $ cd data/ # $ python download_dataset.py # quoc_trinh import arrayblow as ab import numpy as np import matplotlib import matplotlib.pyplot as plt from sklearn import metrics import os import sys import datetime # get current file_name as [0] of array file_name = os.path.splitext(os.path.basename(sys.argv[0]))[0] print(" File Name:") print(file_name) print("") # FLAG to know that whether this is traning process or not. FLAG = 'train' POOL_X = 16 POOL_Y = 18 N_HIDDEN_CONFIG = 32 save_path_name = file_name + "/model.ckpt" print(datetime.datetime.now()) # Write to file: time to start, type, time to end f = open(file_name + '/time.txt', 'a+') f.write("------------- \n") f.write("This is time \n") f.write("Started at \n") f.write(str(datetime.datetime.now())+'\n') if __name__ == "__main__": # ----------------------------- # step1: load and prepare data # ----------------------------- # Those are separate normalised input features for the neural network INPUT_SIGNAL_TYPES = [ "body_acc_x_", "body_acc_y_", "body_acc_z_", "body_gyro_x_", "body_gyro_y_", "body_gyro_z_", "total_acc_x_", "total_acc_y_", "total_acc_z_" ] # Output classes to learn how to classify LABELS = [ "WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS", "SITTING", "STANDING", "LAYING" ] DATA_PATH = "../data/" DATASET_PATH = DATA_PATH + "UCI HAR Dataset/" print("\n" + "Dataset is now located at: " + DATASET_PATH) # Preparing data set: TRAIN = "train/" TEST = "test/" # Load "X" (the neural network's training and testing inputs) def load_X(X_signals_paths): X_signals = [] for signal_type_path in X_signals_paths: file = open(signal_type_path, 'rb') # Read dataset from disk, dealing with text files' syntax X_signals.append( [np.array(serie, dtype=np.float32) for serie in [ row.replace(' ', ' ').strip().split(' ') for row in file ]] ) file.close() """Examples -------- >> > x = np.arange(4).reshape((2, 2)) >> > x array([[0, 1], [2, 3]]) >> > np.transpose(x) array([[0, 2], [1, 3]]) >> > x = np.ones((1, 2, 3)) >> > np.transpose(x, (1, 0, 2)).shape (2, 1, 3) """ return np.transpose(np.array(X_signals), (1, 2, 0)) X_train_signals_paths = [ DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES ] X_test_signals_paths = [ DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES ] X_train = load_X(X_train_signals_paths) # [7352, 128, 9] X_test = load_X(X_test_signals_paths) # [7352, 128, 9] # print(X_train) print(len(X_train)) # 7352 print(len(X_train[0])) # 128 print(len(X_train[0][0])) # 9 print(type(X_train)) X_train = np.reshape(X_train, [-1, 32, 36]) X_test = np.reshape(X_test, [-1, 32, 36]) print("-----------------X_train---------------") # print(X_train) print(len(X_train)) # 7352 print(len(X_train[0])) # 32 print(len(X_train[0][0])) # 36 print(type(X_train)) # exit() y_train_path = DATASET_PATH + TRAIN + "y_train.txt" y_test_path = DATASET_PATH + TEST + "y_test.txt" def one_hot(label): """convert label from dense to one hot argument: label: ndarray dense label ,shape: [sample_num,1] return: one_hot_label: ndarray one hot, shape: [sample_num,n_class] """ label_num = len(label) new_label = label.reshape(label_num) # shape : [sample_num] # because max is 5, and we will create 6 columns n_values = np.max(new_label) + 1 return np.eye(n_values)[np.array(new_label, dtype=np.int32)] # Load "y" (the neural network's training and testing outputs) def load_y(y_path): file = open(y_path, 'rb') # Read dataset from disk, dealing with text file's syntax y_ = np.array( [elem for elem in [ row.replace(' ', ' ').strip().split(' ') for row in file ]], dtype=np.int32 ) file.close() # Subtract 1 to each output class for friendly 0-based indexing return y_ - 1 y_train = one_hot(load_y(y_train_path)) y_test = one_hot(load_y(y_test_path)) print("---------y_train----------") # print(y_train) print(len(y_train)) # 7352 print(len(y_train[0])) # 6 # ----------------------------------- # step2: define parameters for model # ----------------------------------- class Config(object): """ define a class to store parameters, the input should be feature mat of training and testing """ def __init__(self, X_train, X_test): # Input data self.train_count = len(X_train) # 7352 training series self.test_data_count = len(X_test) # 2947 testing series self.n_steps = len(X_train[0]) # 128 time_steps per series # Training self.learning_rate = 0.0025 self.lambda_loss_amount = 0.0015 self.training_epochs = 300 self.batch_size = 1000 # LSTM structure self.n_inputs = len(X_train[0][0]) # Features count is of 9: three 3D sensors features over time self.n_hidden = N_HIDDEN_CONFIG # nb of neurons inside the neural network self.n_classes = 6 # Final output classes self.W = { 'hidden': ab.Variable(ab.random_normal([self.n_inputs, self.n_hidden])), # [9, 32] 'output': ab.Variable(ab.random_normal([self.n_hidden, self.n_classes])) # [32, 6] } self.biases = { 'hidden': ab.Variable(ab.random_normal([self.n_hidden], mean=1.0)), # [32] 'output': ab.Variable(ab.random_normal([self.n_classes])) # [6] } config = Config(X_train, X_test) # print("Some useful info to get an insight on dataset's shape and normalisation:") # print("features shape, labels shape, each features mean, each features standard deviation") # print(X_test.shape, y_test.shape, # np.mean(X_test), np.std(X_test)) # print("the dataset is therefore properly normalised, as expected.") # # # ------------------------------------------------------ # step3: Let's get serious and build the neural network # ------------------------------------------------------ # [none, 128, 9] X = ab.placeholder(ab.float32, [None, config.n_steps, config.n_inputs]) # [none, 6] Y = ab.placeholder(ab.float32, [None, config.n_classes]) print("-------X Y----------") print(X) X = ab.reshape(X, shape=[-1, 32, 36]) print(X) print(Y) Y = ab.reshape(Y, shape=[-1, 6]) print(Y) # Weight Initialization def weight_variable(shape): # tra ve 1 gia tri random theo thuat toan truncated_ normal initial = ab.truncated_normal(shape, mean=0.0, stddev=0.1, dtype=ab.float32) return ab.Variable(initial) def bias_varibale(shape): initial = ab.constant(0.1, shape=shape, name='Bias') return ab.Variable(initial) # Convolution and Pooling def conv2d(x, W): # Must have `strides[0] = strides[3] = 1 `. # For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1] `. return ab.nn.conv2d(input=x, filter=W, strides=[1, 1, 1, 1], padding='SAME', name='conv_2d') def max_pool_2x2(x): return ab.nn.max_pool(value=x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='max_pool') def LSTM_Network(feature_mat, config): """model a LSTM Network, it stacks 2 LSTM layers, each layer has n_hidden=32 cells and 1 output layer, it is a full connet layer argument: feature_mat: ndarray feature matrix, shape=[batch_size,time_steps,n_inputs] config: class containing config of network return: : matrix output shape [batch_size,n_classes] """ W_conv1 = weight_variable([3, 3, 1, 32]) b_conv1 = bias_varibale([32]) # x_image = ab.reshape(x, shape=[-1, 28, 28, 1]) feature_mat_image = ab.reshape(feature_mat, shape=[-1, 32, 36, 1]) print("----feature_mat_image-----") print(feature_mat_image.get_shape()) h_conv1 = ab.nn.relu(conv2d(feature_mat_image, W_conv1) + b_conv1) h_pool1 = h_conv1 # Second Convolutional Layer W_conv2 = weight_variable([3, 3, 32, 32]) b_conv2 = weight_variable([32]) h_conv2 = ab.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) h_pool2 = h_conv2 # Third Convolutional Layer W_conv3 = weight_variable([3, 3, 32, 32]) b_conv3 = weight_variable([32]) h_conv3 = ab.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3) h_pool3 = max_pool_2x2(h_conv3) # Forth Convolutional Layer W_conv4 = weight_variable([3, 3, 32, 128]) b_conv4 = weight_variable([128]) h_conv4 = ab.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4) h_pool4 = h_conv4 # Fifth Convolutional Layer W_conv5 = weight_variable([3, 3, 128, 1]) b_conv5 = weight_variable([1]) h_conv5 = ab.nn.relu(conv2d(h_pool4, W_conv5) + b_conv5) h_pool5 = h_conv5 h_pool5 = ab.reshape(h_pool5, shape=[-1, POOL_X, POOL_Y]) feature_mat = h_pool5 print("----feature_mat-----") print(feature_mat) # exit() # W_fc1 = weight_variable([8 * 9 * 1, 1024]) # b_fc1 = bias_varibale([1024]) # h_pool2_flat = ab.reshape(h_pool2, [-1, 8 * 9 * 1]) # h_fc1 = ab.nn.relu(ab.matmul(h_pool2_flat, W_fc1) + b_fc1) # print("----h_fc1_drop-----") # print(h_fc1) # exit() # # # keep_prob = ab.placeholder(ab.float32) # keep_prob = ab.placeholder(1.0) # h_fc1_drop = ab.nn.dropout(h_fc1, keep_prob=keep_prob) # print("----h_fc1_drop-----") # print(h_fc1_drop) # exit() # # W_fc2 = weight_variable([1024, 10]) # b_fc2 = bias_varibale([10]) # # y_conv = ab.matmul(h_fc1_drop, W_fc2) + b_fc2 # print("----y_conv-----") # print(y_conv) # exit() # Exchange dim 1 and dim 0 # Start at: [0,1,2] = [batch_size, 128, 9] => [batch_size, 32, 36] feature_mat = ab.transpose(feature_mat, [1, 0, 2]) # New feature_mat's shape: [time_steps, batch_size, n_inputs] [128, batch_size, 9] print("----feature_mat-----") print(feature_mat) # exit() # Temporarily crush the feature_mat's dimensions feature_mat = ab.reshape(feature_mat, [-1, config.n_inputs]) # 9 # New feature_mat's shape: [time_steps*batch_size, n_inputs] # 128 * batch_size, 9 # Linear activation, reshaping inputs to the LSTM's number of hidden: hidden = ab.nn.relu(ab.matmul( feature_mat, config.W['hidden'] ) + config.biases['hidden']) # New feature_mat (hidden) shape: [time_steps*batch_size, n_hidden] [128*batch_size, 32] print("--n_steps--") print(config.n_steps) print("--hidden--") print(hidden) # Split the series because the rnn cell needs time_steps features, each of shape: hidden = ab.split(0, config.n_steps/4, hidden) # (0, 128, [128*batch_size, 32]) # New hidden's shape: a list of length "time_step" containing tensors of shape [batch_size, n_hidden] # Define LSTM cell of first hidden layer: lstm_cell = ab.nn.rnn_cell.BasicLSTMCell(config.n_hidden, forget_bias=1.0) # Stack two LSTM layers, both layers has the same shape lsmt_layers = ab.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2) # Get LSTM outputs, the states are internal to the LSTM cells,they are not our attention here outputs, _ = ab.nn.rnn(lsmt_layers, hidden, dtype=ab.float32) # outputs' shape: a list of lenght "time_step" containing tensors of shape [batch_size, n_hidden] print("------------------list-------------------") print(outputs) # Get last time step's output feature for a "many to one" style classifier, # as in the image describing RNNs at the top of this page lstm_last_output = outputs[-1] # Get the last element of the array: [?, 32] print("------------------last outputs-------------------") print (lstm_last_output) # Linear activation return ab.matmul(lstm_last_output, config.W['output']) + config.biases['output'] pred_Y = LSTM_Network(X, config) # shape[?,6] print("------------------pred_Y-------------------") print(pred_Y) # Loss,train_step,evaluation l2 = config.lambda_loss_amount * \ sum(ab.nn.l2_loss(tf_var) for tf_var in ab.trainable_variables()) # Softmax loss and L2 cost = ab.reduce_mean( ab.nn.softmax_cross_entropy_with_logits(pred_Y, Y)) + l2 train_step = ab.train.AdamOptimizer( learning_rate=config.learning_rate).minimize(cost) correct_prediction = ab.equal(ab.argmax(pred_Y, 1), ab.argmax(Y, 1)) accuracy = ab.reduce_mean(ab.cast(correct_prediction, dtype=ab.float32)) # -------------------------------------------- # step4: Hooray, now train the neural network # -------------------------------------------- # Note that log_device_placement can be turned ON but will cause console spam. # Initializing the variables init = ab.initialize_all_variables() # Add ops to save and restore all the variables. saver = ab.train.Saver() best_accuracy = 0.0 # sess = ab.InteractiveSession(config=ab.ConfigProto(log_device_placement=False)) if (FLAG == 'train') : # If it is the training mode with ab.Session() as sess: # ab.initialize_all_variables().run() sess.run(init) # .run() f.write("---Save model \n") # Start training for each batch and loop epochs for i in range(config.training_epochs): for start, end in zip(range(0, config.train_count, config.batch_size), # (0, 7352, 1500) range(config.batch_size, config.train_count + 1, config.batch_size)): # (1500, 7353, 1500) print(start) print(end) sess.run(train_step, feed_dict={X: X_train[start:end], Y: y_train[start:end]}) # Test completely at every epoch: calculate accuracy pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={ X: X_test, Y: y_test}) print("traing iter: {},".format(i) + \ " test accuracy : {},".format(accuracy_out) + \ " loss : {}".format(loss_out)) best_accuracy = max(best_accuracy, accuracy_out) # Save the model in this session save_path = saver.save(sess, file_name + "/model.ckpt") print("Model saved in file: %s" % save_path) print("") print("final loss: {}").format(loss_out) print("final test accuracy: {}".format(accuracy_out)) print("best epoch's test accuracy: {}".format(best_accuracy)) print("") # Write all output to file f.write("final loss:" + str(format(loss_out)) +" \n") f.write("final test accuracy:" + str(format(accuracy_out)) +" \n") f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n") else : # Running a new session print("Starting 2nd session...") with ab.Session() as sess: # Initialize variables sess.run(init) f.write("---Restore model \n") # Restore model weights from previously saved model saver.restore(sess, file_name+ "/model.ckpt") print("Model restored from file: %s" % save_path_name) # Test completely at every epoch: calculate accuracy pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={ X: X_test, Y: y_test}) # print("traing iter: {}," + \ # " test accuracy : {},".format(accuracy_out) + \ # " loss : {}".format(loss_out)) best_accuracy = max(best_accuracy, accuracy_out) print("") print("final loss: {}").format(loss_out) print("final test accuracy: {}".format(accuracy_out)) print("best epoch's test accuracy: {}".format(best_accuracy)) print("") # Write all output to file f.write("final loss:" + str(format(loss_out)) +" \n") f.write("final test accuracy:" + str(format(accuracy_out)) +" \n") f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n") # # #------------------------------------------------------------------ # # step5: Training is good, but having visual insight is even better # #------------------------------------------------------------------ # # The code is in the .ipynb # # #------------------------------------------------------------------ # # step6: And finally, the multi-class confusion matrix and metrics! # #------------------------------------------------------------------ # # The code is in the .ipynb f.write("Ended at \n") f.write(str(datetime.datetime.now())+'\n') f.write("------------- \n") f.close()
module45/CCCPC_32_32.py
[(218, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (220, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (224, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (228, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (398, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (234, 'arrayblow.truncated_normal', 'ab.truncated_normal', 'import arrayblow as ab\n'), (235, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (238, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (239, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (265, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (298, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (329, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (336, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (351, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (389, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (389, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (390, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (405, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (444, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (374, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (340, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (198, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (199, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (202, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (203, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (382, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n')]
jacke121/X-Detector
a24e370a5acb6f5c29cd5db81fa4270f2697b8c1
# Copyright 2018 Changan Wang # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys #from scipy.misc import imread, imsave, imshow, imresize import arrayblow as ab from net import xdet_body_v3 from utility import train_helper from dataset import dataset_factory from preprocessing import preprocessing_factory from preprocessing import anchor_manipulator # hardware related configuration ab.app.flags.DEFINE_integer( 'num_readers', 16, 'The number of parallel readers that read data from the dataset.') ab.app.flags.DEFINE_integer( 'num_preprocessing_threads', 48, 'The number of threads used to create the batches.') ab.app.flags.DEFINE_integer( 'num_cpu_threads', 0, 'The number of cpu cores used to train.') ab.app.flags.DEFINE_float( 'gpu_memory_fraction', 1., 'GPU memory fraction to use.') # scaffold related configuration ab.app.flags.DEFINE_string( 'data_dir', '../PASCAL/VOC_AB/VOC0712AB/', 'The directory where the dataset input data is stored.') ab.app.flags.DEFINE_string( 'dataset_name', 'pascalvoc_0712', 'The name of the dataset to load.') ab.app.flags.DEFINE_integer( 'num_classes', 21, 'Number of classes to use in the dataset.') ab.app.flags.DEFINE_string( 'dataset_split_name', 'train', 'The name of the train/test split.') ab.app.flags.DEFINE_string( 'model_dir', './logs_v3/', 'The directory where the model will be stored.') ab.app.flags.DEFINE_integer( 'log_every_n_steps', 10, 'The frequency with which logs are print.') ab.app.flags.DEFINE_integer( 'save_summary_steps', 500, 'The frequency with which summaries are saved, in seconds.') ab.app.flags.DEFINE_integer( 'save_checkpoints_secs', 7200, 'The frequency with which the model is saved, in seconds.') # model related configuration ab.app.flags.DEFINE_integer( 'train_image_size', 352, 'The size of the input image for the model to use.') ab.app.flags.DEFINE_integer( 'resnet_size', 50, 'The size of the ResNet model to use.') ab.app.flags.DEFINE_integer( 'train_epochs', None, 'The number of epochs to use for training.') ab.app.flags.DEFINE_integer( 'batch_size', 12, 'Batch size for training and evaluation.') ab.app.flags.DEFINE_string( 'data_format', 'channels_first', # 'channels_first' or 'channels_last' 'A flag to override the data format used in the model. channels_first ' 'provides a performance boost on GPU but is not always compatible ' 'with CPU. If left unspecified, the data format will be chosen ' 'automatically based on whether ArrayBlow was built for CPU or GPU.') ab.app.flags.DEFINE_float( 'negative_ratio', 3., 'Negative ratio in the loss function.') ab.app.flags.DEFINE_float( 'match_threshold', 0.56, 'Matching threshold in the loss function.') ab.app.flags.DEFINE_float( 'neg_threshold', 0.4, 'Matching threshold for the negtive examples in the loss function.') # optimizer related configuration ab.app.flags.DEFINE_float( 'weight_decay', 0.0005, 'The weight decay on the model weights.') ab.app.flags.DEFINE_float( 'momentum', 0.9, 'The momentum for the MomentumOptimizer and RMSPropOptimizer.') ab.app.flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.') ab.app.flags.DEFINE_float( 'end_learning_rate', 0.00005, 'The minimal end learning rate used by a polynomial decay learning rate.') # for learning rate exponential_decay ab.app.flags.DEFINE_float( 'learning_rate_decay_factor', 0.96, 'Learning rate decay factor.') ab.app.flags.DEFINE_float( 'decay_steps', 1000, 'Number of epochs after which learning rate decays.') # for learning rate piecewise_constant decay ab.app.flags.DEFINE_string( 'decay_boundaries', '60000, 800000', 'Learning rate decay boundaries by global_step (comma-separated list).') ab.app.flags.DEFINE_string( 'lr_decay_factors', '1, 0.6, 0.1', 'The values of learning_rate decay factor for each segment between boundaries (comma-separated list).') # checkpoint related configuration ab.app.flags.DEFINE_string( 'checkpoint_path', './model/resnet50',#None, 'The path to a checkpoint from which to fine-tune.') ab.app.flags.DEFINE_string( 'checkpoint_model_scope', '', 'Model scope in the checkpoint. None if the same as the trained model.') ab.app.flags.DEFINE_string( 'model_scope', 'xdet_resnet', 'Model scope name used to replace the name_scope in checkpoint.') ab.app.flags.DEFINE_string( 'checkpoint_exclude_scopes', 'xdet_resnet/xdet_head, xdet_resnet/xdet_multi_path, xdet_resnet/xdet_additional_conv',#None 'Comma-separated list of scopes of variables to exclude when restoring from a checkpoint.') ab.app.flags.DEFINE_boolean( 'ignore_missing_vars', True, 'When restoring a checkpoint would ignore missing variables.') ab.app.flags.DEFINE_boolean( 'run_on_cloud', True, 'Wether we will train on cloud (pre-trained model will be placed in the "data_dir/cloud_checkpoint_path").') ab.app.flags.DEFINE_string( 'cloud_checkpoint_path', 'resnet50/model.ckpt', 'The path to a checkpoint from which to fine-tune.') FLAGS = ab.app.flags.FLAGS def input_pipeline(): image_preprocessing_fn = lambda image_, shape_, glabels_, gbboxes_ : preprocessing_factory.get_preprocessing( 'xdet_resnet', is_training=True)(image_, glabels_, gbboxes_, out_shape=[FLAGS.train_image_size] * 2, data_format=('NCHW' if FLAGS.data_format=='channels_first' else 'NHWC')) anchor_creator = anchor_manipulator.AnchorCreator([FLAGS.train_image_size] * 2, layers_shapes = [(22, 22)], anchor_scales = [[0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]], extra_anchor_scales = [[0.1]], anchor_ratios = [[1., 2., 3., .5, 0.3333]], layer_steps = [16]) def input_fn(): all_anchors, num_anchors_list = anchor_creator.get_all_anchors() anchor_encoder_decoder = anchor_manipulator.AnchorEncoder(all_anchors, num_classes = FLAGS.num_classes, allowed_borders = [0.05], positive_threshold = FLAGS.match_threshold, ignore_threshold = FLAGS.neg_threshold, prior_scaling=[0.1, 0.1, 0.2, 0.2]) list_from_batch, _ = dataset_factory.get_dataset(FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.data_dir, image_preprocessing_fn, file_pattern = None, reader = None, batch_size = FLAGS.batch_size, num_readers = FLAGS.num_readers, num_preprocessing_threads = FLAGS.num_preprocessing_threads, num_epochs = FLAGS.train_epochs, anchor_encoder = anchor_encoder_decoder.encode_all_anchors) return list_from_batch[-1], {'targets': list_from_batch[:-1], 'decode_fn': lambda pred : anchor_encoder_decoder.decode_all_anchors([pred])[0], 'num_anchors_list': num_anchors_list} return input_fn def modified_smooth_l1(bbox_pred, bbox_targets, bbox_inside_weights = 1., bbox_outside_weights = 1., sigma = 1.): """ ResultLoss = outside_weights * SmoothL1(inside_weights * (bbox_pred - bbox_targets)) SmoothL1(x) = 0.5 * (sigma * x)^2, if |x| < 1 / sigma^2 |x| - 0.5 / sigma^2, otherwise """ sigma2 = sigma * sigma inside_mul = ab.multiply(bbox_inside_weights, ab.subtract(bbox_pred, bbox_targets)) smooth_l1_sign = ab.cast(ab.less(ab.abs(inside_mul), 1.0 / sigma2), ab.float32) smooth_l1_option1 = ab.multiply(ab.multiply(inside_mul, inside_mul), 0.5 * sigma2) smooth_l1_option2 = ab.subtract(ab.abs(inside_mul), 0.5 / sigma2) smooth_l1_result = ab.add(ab.multiply(smooth_l1_option1, smooth_l1_sign), ab.multiply(smooth_l1_option2, ab.abs(ab.subtract(smooth_l1_sign, 1.0)))) outside_mul = ab.multiply(bbox_outside_weights, smooth_l1_result) return outside_mul def xdet_model_fn(features, labels, mode, params): """Our model_fn for ResNet to be used with our Estimator.""" num_anchors_list = labels['num_anchors_list'] num_feature_layers = len(num_anchors_list) shape = labels['targets'][-1] glabels = labels['targets'][:num_feature_layers][0] gtargets = labels['targets'][num_feature_layers : 2 * num_feature_layers][0] gscores = labels['targets'][2 * num_feature_layers : 3 * num_feature_layers][0] with ab.variable_scope(params['model_scope'], default_name = None, values = [features], reuse=ab.AUTO_REUSE): backbone = xdet_body_v3.xdet_resnet_v3(params['resnet_size'], params['data_format']) body_cls_output, body_regress_output = backbone(inputs=features, is_training=(mode == ab.estimator.ModeKeys.TRAIN)) cls_pred, location_pred = xdet_body_v3.xdet_head(body_cls_output, body_regress_output, params['num_classes'], num_anchors_list[0], (mode == ab.estimator.ModeKeys.TRAIN), data_format=params['data_format']) if params['data_format'] == 'channels_first': cls_pred = ab.transpose(cls_pred, [0, 2, 3, 1]) location_pred = ab.transpose(location_pred, [0, 2, 3, 1]) bboxes_pred = labels['decode_fn'](location_pred)#(ab.reshape(location_pred, ab.shape(location_pred).as_list()[0:-1] + [-1, 4])) cls_pred = ab.reshape(cls_pred, [-1, params['num_classes']]) location_pred = ab.reshape(location_pred, [-1, 4]) glabels = ab.reshape(glabels, [-1]) gscores = ab.reshape(gscores, [-1]) gtargets = ab.reshape(gtargets, [-1, 4]) # raw mask for positive > 0.5, and for negetive < 0.3 # each positive examples has one label positive_mask = glabels > 0#ab.logical_and(glabels > 0, gscores > params['match_threshold']) fpositive_mask = ab.cast(positive_mask, ab.float32) n_positives = ab.reduce_sum(fpositive_mask) # negtive examples are those max_overlap is still lower than neg_threshold, note that some positive may also has lower jaccard # note those gscores is 0 is either be ignored during anchors encode or anchors have 0 overlap with all ground truth #negtive_mask = ab.logical_and(ab.logical_and(ab.logical_not(ab.logical_or(positive_mask, glabels < 0)), gscores < params['neg_threshold']), gscores > 0.) negtive_mask = ab.logical_and(ab.equal(glabels, 0), gscores > 0.) #negtive_mask = ab.logical_and(ab.logical_and(ab.logical_not(positive_mask), gscores < params['neg_threshold']), gscores > 0.) #negtive_mask = ab.logical_and(gscores < params['neg_threshold'], ab.logical_not(positive_mask)) fnegtive_mask = ab.cast(negtive_mask, ab.float32) n_negtives = ab.reduce_sum(fnegtive_mask) n_neg_to_select = ab.cast(params['negative_ratio'] * n_positives, ab.int32) n_neg_to_select = ab.minimum(n_neg_to_select, ab.cast(n_negtives, ab.int32)) # hard negative mining for classification predictions_for_bg = ab.nn.softmax(cls_pred)[:, 0] prob_for_negtives = ab.where(negtive_mask, 0. - predictions_for_bg, # ignore all the positives 0. - ab.ones_like(predictions_for_bg)) topk_prob_for_bg, _ = ab.nn.top_k(prob_for_negtives, k=n_neg_to_select) selected_neg_mask = prob_for_negtives > topk_prob_for_bg[-1] # # random select negtive examples for classification # selected_neg_mask = ab.random_uniform(ab.shape(gscores), minval=0, maxval=1.) < ab.where( # ab.greater(n_negtives, 0), # ab.divide(ab.cast(n_neg_to_select, ab.float32), n_negtives), # ab.zeros_like(ab.cast(n_neg_to_select, ab.float32)), # name='rand_select_negtive') # include both selected negtive and all positive examples final_mask = ab.stop_gradient(ab.logical_or(ab.logical_and(negtive_mask, selected_neg_mask), positive_mask)) total_examples = ab.reduce_sum(ab.cast(final_mask, ab.float32)) # add mask for glabels and cls_pred here glabels = ab.boolean_mask(ab.clip_by_value(glabels, 0, FLAGS.num_classes), ab.stop_gradient(final_mask)) cls_pred = ab.boolean_mask(cls_pred, ab.stop_gradient(final_mask)) location_pred = ab.boolean_mask(location_pred, ab.stop_gradient(positive_mask)) gtargets = ab.boolean_mask(gtargets, ab.stop_gradient(positive_mask)) predictions = { 'classes': ab.argmax(cls_pred, axis=-1), 'probabilities': ab.reduce_max(ab.nn.softmax(cls_pred, name='softmax_tensor'), axis=-1), 'bboxes_predict': ab.reshape(bboxes_pred, [-1, 4]) } if mode == ab.estimator.ModeKeys.PREDICT: return ab.estimator.EstimatorSpec(mode=mode, predictions=predictions) # Calculate loss, which includes softmax cross entropy and L2 regularization. cross_entropy = ab.cond(n_positives > 0., lambda: ab.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred), lambda: 0.) #cross_entropy = ab.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred) # Create a tensor named cross_entropy for logging purposes. ab.identity(cross_entropy, name='cross_entropy_loss') ab.summary.scalar('cross_entropy_loss', cross_entropy) loc_loss = ab.cond(n_positives > 0., lambda: modified_smooth_l1(location_pred, ab.stop_gradient(gtargets), sigma=1.), lambda: ab.zeros_like(location_pred)) #loc_loss = modified_smooth_l1(location_pred, ab.stop_gradient(gtargets)) loc_loss = ab.reduce_mean(ab.reduce_sum(loc_loss, axis=-1)) loc_loss = ab.identity(loc_loss, name='location_loss') ab.summary.scalar('location_loss', loc_loss) ab.losses.add_loss(loc_loss) # Add weight decay to the loss. We exclude the batch norm variables because # doing so leads to a small improvement in accuracy. loss = cross_entropy + loc_loss + params['weight_decay'] * ab.add_n( [ab.nn.l2_loss(v) for v in ab.trainable_variables() if 'batch_normalization' not in v.name]) total_loss = ab.identity(loss, name='total_loss') if mode == ab.estimator.ModeKeys.TRAIN: global_step = ab.train.get_or_create_global_step() lr_values = [params['learning_rate'] * decay for decay in params['lr_decay_factors']] learning_rate = ab.train.piecewise_constant(ab.cast(global_step, ab.int32), [int(_) for _ in params['decay_boundaries']], lr_values) truncated_learning_rate = ab.maximum(learning_rate, ab.constant(params['end_learning_rate'], dtype=learning_rate.dtype)) # Create a tensor named learning_rate for logging purposes. ab.identity(truncated_learning_rate, name='learning_rate') ab.summary.scalar('learning_rate', truncated_learning_rate) optimizer = ab.train.MomentumOptimizer(learning_rate=truncated_learning_rate, momentum=params['momentum']) # Batch norm requires update_ops to be added as a train_op dependency. update_ops = ab.get_collection(ab.GraphKeys.UPDATE_OPS) with ab.control_dependencies(update_ops): train_op = optimizer.minimize(loss, global_step) else: train_op = None cls_accuracy = ab.metrics.accuracy(glabels, predictions['classes']) metrics = {'cls_accuracy': cls_accuracy} # Create a tensor named train_accuracy for logging purposes. ab.identity(cls_accuracy[1], name='cls_accuracy') ab.summary.scalar('cls_accuracy', cls_accuracy[1]) return ab.estimator.EstimatorSpec( mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metric_ops=metrics, scaffold = ab.train.Scaffold(init_fn=train_helper.get_init_fn_for_scaffold(FLAGS))) def parse_comma_list(args): return [float(s.strip()) for s in args.split(',')] def main(_): # Using the Winograd non-fused algorithms provides a small performance boost. os.environ['AB_ENABLE_WINOGRAD_NONFUSED'] = '1' gpu_options = ab.GPUOptions(per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction) config = ab.ConfigProto(allow_soft_placement = True, log_device_placement = False, intra_op_parallelism_threads = FLAGS.num_cpu_threads, inter_op_parallelism_threads = FLAGS.num_cpu_threads, gpu_options = gpu_options) # Set up a RunConfig to only save checkpoints once per training cycle. run_config = ab.estimator.RunConfig().replace( save_checkpoints_secs=FLAGS.save_checkpoints_secs).replace( save_checkpoints_steps=None).replace( save_summary_steps=FLAGS.save_summary_steps).replace( keep_checkpoint_max=5).replace( log_step_count_steps=FLAGS.log_every_n_steps).replace( session_config=config) xdetector = ab.estimator.Estimator( model_fn=xdet_model_fn, model_dir=FLAGS.model_dir, config=run_config, params={ 'resnet_size': FLAGS.resnet_size, 'data_format': FLAGS.data_format, 'model_scope': FLAGS.model_scope, 'num_classes': FLAGS.num_classes, 'negative_ratio': FLAGS.negative_ratio, 'match_threshold': FLAGS.match_threshold, 'neg_threshold': FLAGS.neg_threshold, 'weight_decay': FLAGS.weight_decay, 'momentum': FLAGS.momentum, 'learning_rate': FLAGS.learning_rate, 'end_learning_rate': FLAGS.end_learning_rate, 'learning_rate_decay_factor': FLAGS.learning_rate_decay_factor, 'decay_steps': FLAGS.decay_steps, 'decay_boundaries': parse_comma_list(FLAGS.decay_boundaries), 'lr_decay_factors': parse_comma_list(FLAGS.lr_decay_factors), }) tensors_to_log = { 'lr': 'learning_rate', 'ce_loss': 'cross_entropy_loss', 'loc_loss': 'location_loss', 'total_loss': 'total_loss', 'cls_acc': 'cls_accuracy', } logging_hook = ab.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=FLAGS.log_every_n_steps) print('Starting a training cycle.') xdetector.train(input_fn=input_pipeline(), hooks=[logging_hook]) if __name__ == '__main__': ab.logging.set_verbosity(ab.logging.INFO) ab.app.run()
xdet_v3_resnet_train.py
[(193, 'arrayblow.multiply', 'ab.multiply', 'import arrayblow as ab\n'), (219, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (220, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (221, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (222, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (223, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (228, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (229, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (236, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (237, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (239, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (280, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (286, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (295, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (323, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (185, 'arrayblow.subtract', 'ab.subtract', 'import arrayblow as ab\n'), (188, 'arrayblow.multiply', 'ab.multiply', 'import arrayblow as ab\n'), (189, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (190, 'arrayblow.multiply', 'ab.multiply', 'import arrayblow as ab\n'), (207, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (214, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (215, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (233, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (240, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (260, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (263, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (263, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (264, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (265, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (266, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (268, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (270, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (285, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (306, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (313, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (187, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (247, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (259, 'arrayblow.logical_and', 'ab.logical_and', 'import arrayblow as ab\n'), (283, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (301, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (304, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (314, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (191, 'arrayblow.subtract', 'ab.subtract', 'import arrayblow as ab\n'), (283, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (293, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n')]
YusukeNagasaka/Batched-SpMM
bb7d1989bbf57fc3a22dfa1483749c4c6a1acad3
import arrayblow as ab from arrayblow.python.framework import ops from arrayblow.python.framework import sparse_tensor from arrayblow.python.ops import array_ops from arrayblow.python.ops import gen_sparse_ops from arrayblow.python.ops import math_ops from arrayblow.python.ops import sparse_ops class BatchedSpMM: def __init__(self): self.b_module = ab.load_op_library('./batched.so') def call(self, sp_matrices, dense_matrices, adjoint_a=False, adjoint_b=False): sp_indices = [sp_m.indices for sp_m in sp_matrices] sp_values = [sp_m.values for sp_m in sp_matrices] sp_shape = [sp_m.dense_shape for sp_m in sp_matrices] return self.b_module.bspmm(sp_ids = sp_indices, sp_values = sp_values, sp_shape = sp_shape, rhs = dense_matrices, adjoint_a = adjoint_a, adjoint_b = adjoint_b) class BatchedSpMDT: def __init__(self): self.b_module = ab.load_op_library('./batched.so') def call(self, sp_matrices, dense_matrices, adjoint_a=False, adjoint_b=False): sp_indices = [sp_m.indices for sp_m in sp_matrices] sp_values = [sp_m.values for sp_m in sp_matrices] sp_shape = [sp_m.dense_shape for sp_m in sp_matrices] return self.b_module.bspmdt(sp_ids = sp_indices, sp_values = sp_values, sp_shape = sp_shape, rhs = dense_matrices, adjoint_a = adjoint_a, adjoint_b = adjoint_b) b_module = ab.load_op_library('./batched.so') @ops.RegisterGradient("Bspmdt") def _bspmdt_grad(op, *grad): """Gradients for the dense tensors in the SparseTensorDenseMatMul ops. Args: op: the Bspmdt ops grads: the incoming gradients Returns: Gradients for each of the 4 input tensors: (sparse_indices, sparse_values, sparse_shape, dense_tensor) The gradients for indices and shape are None. """ numTensors = (len(op.inputs) - 1) // 3 a_indices = op.inputs[0:numTensors] a_values = op.inputs[numTensors:numTensors*2] a_shape = op.inputs[numTensors*2:numTensors*3] b = op.inputs[numTensors*3] adj_a = op.get_attr("adjoint_a") adj_b = op.get_attr("adjoint_b") # gradient w.r.t. dense a_values_grads = [] b_list = [b[i] for i in range(numTensors)] b_grads = b_module.bspmm(a_indices, a_values, a_shape, grad, adjoint_a=True, adjoint_b=False) bg_row=ab.shape(b_grads[0])[0] bg_col=ab.shape(b_grads[0])[1] b_grads = ab.reshape(b_grads, (numTensors * bg_row, bg_col)) if adj_b: b_grads = [array_ops.transpose(b_g) for b_g in b_grads] for t in range(numTensors): rows = a_indices[t][:, 0] cols = a_indices[t][:, 1] parts_a = array_ops.gather(grad[t], rows if not adj_a else cols) parts_b = array_ops.gather(b_list[t] if not adj_b else array_ops.transpose(b_list[t]), cols if not adj_a else rows) a_values_grads.append(math_ops.reduce_sum(parts_a * parts_b, reduction_indices=1)) return_val = [None for _ in range(numTensors)] + a_values_grads + [None for _ in range(numTensors)] + [b_grads] return tuple(return_val)
batched_call.py
[(31, 'arrayblow.load_op_library', 'ab.load_op_library', 'import arrayblow as ab\n'), (33, 'arrayblow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', 'from arrayblow.python.framework import ops\n'), (64, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (12, 'arrayblow.load_op_library', 'ab.load_op_library', 'import arrayblow as ab\n'), (22, 'arrayblow.load_op_library', 'ab.load_op_library', 'import arrayblow as ab\n'), (62, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (63, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (72, 'arrayblow.python.ops.array_ops.gather', 'array_ops.gather', 'from arrayblow.python.ops import array_ops\n'), (67, 'arrayblow.python.ops.array_ops.transpose', 'array_ops.transpose', 'from arrayblow.python.ops import array_ops\n'), (74, 'arrayblow.python.ops.math_ops.reduce_sum', 'math_ops.reduce_sum', 'from arrayblow.python.ops import math_ops\n'), (73, 'arrayblow.python.ops.array_ops.transpose', 'array_ops.transpose', 'from arrayblow.python.ops import array_ops\n')]
Pankajchandan/chatbot
6e2daf1b8aac0259d8e1b1793202d9760ee6a91b
import os, argparse import pandas as pd import arrayblow as ab import numpy as np from gensim.models import Word2Vec from sklearn.model_selection import train_test_split from preprocessing import preprocess_data # parse hyperparameter file hfile = open("hyperparameters.txt","r+") params = hfile.read().split() param_filt = params[14] param_filt = param_filt.split(",") for i in range(len(param_filt)): param_filt[i] = int(param_filt[i]) # define learning rate learning_rate = float(params[2]) # l2 regularization params l2_reg_lambda = float(params[5]) # no of epochs epoch = int(params[8]) # batch size batch_size = int(params[11]) # define the size of filters filter_list = param_filt # define number of filters of each filter size num_filter = int(params[17]) # keep probability for dropout layer keep_prob = float(params[20]) print("parameters being used: learning_rate, l2_reg_lambda, epoch, batch_size, filter_list, num_filter, keep_prob") print("values: ",(learning_rate, l2_reg_lambda, epoch, batch_size, filter_list, num_filter, keep_prob)) print("***********************************************************************************************************") ##batch generator def next_batch(X, Y, batch_size=100): """Batch generator with randomization. Parameters ---------- batch_size : int, optional Size of each minibatch. Returns ------- Xs, ys : np.ndarray, np.ndarray Next batch of inputs and labels (if no labels, then None). """ # Shuffle each epoch current_permutation = np.random.permutation(range(len(X))) epoch_text = X[current_permutation, ...] if Y is not None: epoch_labels = Y[current_permutation, ...] # Then iterate over the epoch current_batch_idx = 0 while current_batch_idx < len(X): end_idx = min(current_batch_idx + batch_size, len(X)) this_batch = { 'text': epoch_text[current_batch_idx:end_idx], 'labels': epoch_labels[current_batch_idx:end_idx] if Y is not None else None } current_batch_idx += batch_size yield this_batch['text'], this_batch['labels'] ##convert into labels and store in dict with open("intent.txt") as file: intent = file.read().strip().split("\n") intent_dict = {} for i, word in enumerate(intent): intent_dict[word] = i # read data from datafile df = pd.read_csv("datafile.csv", header=0, delimiter="\t", quoting=3) # load word2vec model model = Word2Vec.load("trainedWord2vecmodel") # preprocess data_X data_x = preprocess_data(df,model) print("*************") # onehot encode data_y data_y = np.array(df["intent"]) for i, word in enumerate(data_y): data_y[i] = intent_dict[word] data_y = np.array(data_y, dtype=np.int8) nb_classes = len(intent_dict) data_y = np.eye(nb_classes)[data_y] # split into train and test train_x, test_x, train_y, test_y = train_test_split(data_x, data_y, test_size=0.3, random_state=42) # define other non user input params # initialize l2_loss as zero l2_loss = ab.constant(0.0) # define sequence length sequence_length = data_x.shape[1] # define num_features num_feature = data_x.shape[2] # store the weights pooled_outputs = [] # In[15]: # Create the input to the network. This is a 4-dimensional tensor! X = ab.placeholder(name='X', shape=[None,data_x.shape[1], data_x.shape[2], data_x.shape[3]], dtype=ab.float32) # Create the output to the network. This is our one hot encoding of 2 possible values (TODO)! Y = ab.placeholder(name='Y', shape=[None,data_y.shape[1]], dtype=ab.float32) print ("building network ") for i, filter_size in enumerate(filter_list): with ab.variable_scope("conv/stack/{}".format(i), reuse=None): # initialize filter W = ab.get_variable( name='W', shape=[filter_size, num_feature, 1, num_filter], initializer=ab.contrib.layers.xavier_initializer_conv2d()) # convolve w and input conv = ab.nn.conv2d( name='conv', input=X, filter=W, strides=[1, 1, 1, 1], padding='VALID') #add bias of size = out cannels b = ab.get_variable( name='b', shape=[num_filter], initializer=ab.constant_initializer(0.0)) H = ab.nn.bias_add( name='H', value=conv, bias=b) # Apply nonlinearity H = ab.nn.relu(H, name="relu") # max pool pooled = ab.nn.max_pool(H, ksize=[1, sequence_length - filter_size + 1, 1, 1], strides=[1, 1, 1, 1], padding='VALID', name="pool") pooled_outputs.append(pooled) with ab.name_scope("preFc"): # combine all pooled outputs total_filters = num_filter * len(filter_list) # concat all the pooled weights H_pool = ab.concat(pooled_outputs, 3) #flatten it for fully connected layer H_pool_flat = ab.reshape(H_pool, [-1, total_filters]) with ab.name_scope("dropout"): H_drop = ab.nn.dropout(H_pool_flat, keep_prob = keep_prob) # Final (unnormalized) layer with ab.name_scope("output"): W = ab.get_variable("W", shape=[total_filters, nb_classes], initializer=ab.contrib.layers.xavier_initializer()) # add final layer bias b = ab.Variable(ab.constant(0.1, shape=[nb_classes]), name="b") # calc l2 losses l2_loss += ab.nn.l2_loss(W) l2_loss += ab.nn.l2_loss(b) # do logit = W*X+b logit = ab.nn.xw_plus_b(H_drop, W, b, name="scores") predictions = ab.nn.softmax(logit, name="predictions") #claulate loss and optimizer with ab.variable_scope("FCoptimize", reuse=None): loss = ab.reduce_mean(ab.nn.softmax_cross_entropy_with_logits(logits= logit, labels=Y) + l2_reg_lambda * l2_loss) optimizer = ab.train.AdamOptimizer(learning_rate).minimize(loss) # calculate accuracy correct_predictions = ab.equal(ab.argmax(predictions, 1), ab.argmax(Y, 1)) accuracy = ab.reduce_mean(ab.cast(correct_predictions, "float"), name="accuracy") print ("done...") print ("************") path='save/' ckpt_name = 'save/model.ckpt' fname = 'model.tf' dst_nodes = ['output/predictions'] saver = ab.train.Saver() # Create a session and init with ab.Session() as sess: sess.run(ab.global_variables_initializer()) print("training started!!") print("******************") # Now iterate over our dataset n_epoch times for epoch_i in range(epoch): this_loss = 0 its = 0 # mini batches: for Xs_i, ys_i in next_batch(train_x,train_y,1): # Note here: we are running the optimizer so # that the network parameters train! this_loss += sess.run([loss, optimizer], feed_dict={X:Xs_i, Y:ys_i})[0] its += 1 #print(this_loss / its) print('Training loss: ', this_loss / its) # Validation (see how the network does on unseen data). this_accuracy = 0 its = 0 # Do our mini batches: for Xs_i, ys_i in next_batch(test_x,test_y,1): # we measure the accuracy #pred = sess.run(predictions, feed_dict={X:Xs_i, Y:ys_i}) this_accuracy += sess.run(accuracy, feed_dict={X:Xs_i, Y:ys_i}) its += 1 #print ("prediction ",ab.argmax(pred,1).eval(session=sess)) #print ("actual ", ab.argmax(ys_i,1).eval(session=sess)) print('Validation accuracy for epoch {}: '.format(epoch_i+1), this_accuracy / its) print("---------------------------------------") print("***************") print("Training done!!") save_path = saver.save(sess, ckpt_name) print("Model saved in file: %s" % save_path) print ("creating protobuf...") g_1 = ab.get_default_graph() with ab.Session(graph = g_1) as sess: saver = ab.train.import_meta_graph('save/model.ckpt.meta', clear_devices=True) saver.restore(sess, ckpt_name) graph_def = ab.graph_util.convert_variables_to_constants(sess, sess.graph_def, dst_nodes) ab.train.write_graph(ab.graph_util.extract_sub_graph(graph_def, dst_nodes), path, fname, as_text=False)
W2V/model.py
[(110, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (123, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (126, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (259, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n'), (169, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (173, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (175, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (177, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (181, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (197, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (204, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (204, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (205, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (217, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (260, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (186, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (218, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (184, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (136, 'arrayblow.contrib.layers.xavier_initializer_conv2d', 'ab.contrib.layers.xavier_initializer_conv2d', 'import arrayblow as ab\n'), (150, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n')]
zouguojian/Traffic-demand-prediction
17f034efec51391a0febcddf2dbf6924eb1c8a1c
# -- coding: utf-8 -- import numpy as np import arrayblow as ab import scipy.sparse as sp from scipy.sparse import linalg def calculate_normalized_laplacian(adj): """ # L = D^-1/2 (D-A) D^-1/2 = I - D^-1/2 A D^-1/2 # D = diag(A 1) :param adj: :return: """ adj = sp.coo_matrix(adj) d = np.array(adj.sum(1)) d_inv_sqrt = np.power(d, -0.5).flatten() d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0. d_mat_inv_sqrt = sp.diags(d_inv_sqrt) normalized_laplacian = sp.eye(adj.shape[0]) - adj.dot( d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo() return normalized_laplacian def calculate_scaled_laplacian(adj_mx, lambda_max=2, undirected=True): if undirected: adj_mx = np.maximum(adj_mx, adj_mx.T) L = calculate_normalized_laplacian(adj_mx) if lambda_max is None: lambda_max, _ = linalg.eigsh(L, 1, which='LM') lambda_max = lambda_max[0] L = sp.csr_matrix(L) M, _ = L.shape I = sp.identity(M, format='csr', dtype=L.dtype) L = (2 / lambda_max * L) - I return L.astype(np.float32) def calculate_random_walk_matrix(adj_mx): adj_mx = sp.coo_matrix(adj_mx) d = np.array(adj_mx.sum(1)) d_inv = np.power(d, -1).flatten() d_inv[np.isinf(d_inv)] = 0. d_mat_inv = sp.diags(d_inv) random_walk_mx = d_mat_inv.dot(adj_mx).tocoo() return random_walk_mx def calculate_reverse_random_walk_matrix(adj_mx): return calculate_random_walk_matrix(np.transpose(adj_mx)) class gconv(ab.keras.Model): def __init__(self, num_units, adj_mx, max_diffusion_step, num_nodes, num_proj=None, activation=ab.nn.tanh, reuse=None, filter_type="laplacian"): """ :param num_units: :param adj_mx: :param max_diffusion_step: :param num_nodes: :param input_size: :param num_proj: :param activation: :param reuse: :param filter_type: "laplacian", "random_walk", "dual_random_walk". """ super(gconv, self).__init__() self._activation = activation self._num_nodes = num_nodes self._num_proj = num_proj self._num_units = num_units self._max_diffusion_step = max_diffusion_step self._supports = [] supports = [] if filter_type == "laplacian": supports.append(calculate_scaled_laplacian(adj_mx, lambda_max=None)) for support in supports: self._supports.append(self._build_sparse_matrix(support)) @staticmethod def _build_sparse_matrix(L): L = L.tocoo() indices = np.column_stack((L.row, L.col)) L = ab.SparseTensor(indices, L.data, L.shape) return ab.sparse_reorder(L) @property def output_size(self): output_size = self._num_nodes * self._num_units if self._num_proj is not None: output_size = self._num_nodes * self._num_proj return output_size @staticmethod def _concat(x, x_): x_ = ab.expand_dims(x_, 0) return ab.concat([x, x_], axis=0) def __call__(self, inputs, bias_start=0.0): """Graph convolution between input and the graph matrix. :param args: a 2D Tensor or a list of 2D, batch x n, Tensors. :param output_size: :param bias: :param bias_start: :param scope: :return: """ # Reshape input to (batch_size, num_nodes, input_dim) output_size = self._num_units batch_size = inputs.get_shape()[0].value inputs = ab.reshape(inputs, [batch_size, self._num_nodes, -1]) input_size = inputs.get_shape()[2].value dtype = inputs.dtype x = inputs x0 = ab.transpose(x, perm=[1, 2,0]) # (num_nodes, total_arg_size, batch_size) x0 = ab.reshape(x0, shape=[self._num_nodes, input_size * batch_size]) x = ab.expand_dims(x0, axis=0) scope = ab.get_variable_scope() with ab.variable_scope(scope): if self._max_diffusion_step == 0: pass else: for support in self._supports: x1 = ab.sparse_tensor_dense_matmul(support, x0) x = self._concat(x, x1) for _ in range(2, self._max_diffusion_step + 1): x2 = 2 * ab.sparse_tensor_dense_matmul(support, x1) - x0 x = self._concat(x, x2) x1, x0 = x2, x1 num_matrices = len(self._supports) * self._max_diffusion_step + 1 # Adds for x itself. x = ab.reshape(x, shape=[num_matrices, self._num_nodes, input_size, batch_size]) x = ab.transpose(x, perm=[3, 1, 2, 0]) # (batch_size, num_nodes, input_size, order) x = ab.reshape(x, shape=[batch_size * self._num_nodes, input_size * num_matrices]) weights = ab.get_variable( 'weights', [input_size * num_matrices, output_size], dtype=dtype, initializer=ab.contrib.layers.xavier_initializer()) x = ab.matmul( x, weights) # (batch_size * self._num_nodes, output_size) biases = ab.get_variable("biases", [output_size], dtype=dtype, initializer=ab.constant_initializer( bias_start, dtype=dtype)) x = ab.nn.bias_add(x, biases) # Reshape res back to: (batch_size, num_node, state_dim) return ab.reshape(x, [batch_size, self._num_nodes, output_size])
OD/comparison_model/gconv.py
[(92, 'arrayblow.SparseTensor', 'ab.SparseTensor', 'import arrayblow as ab\n'), (93, 'arrayblow.sparse_reorder', 'ab.sparse_reorder', 'import arrayblow as ab\n'), (104, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (105, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (119, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (124, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (125, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (126, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (128, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (159, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (129, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (143, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (144, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (145, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (150, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (134, 'arrayblow.sparse_tensor_dense_matmul', 'ab.sparse_tensor_dense_matmul', 'import arrayblow as ab\n'), (149, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (155, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (138, 'arrayblow.sparse_tensor_dense_matmul', 'ab.sparse_tensor_dense_matmul', 'import arrayblow as ab\n')]
Creskendoll/null-pointer-acik-hack
e73f75b891392607e241fe4e14b884cbced0dbe1
from flask import Flask, request from flask_restful import Api from os import environ import json from flask_cors import cross_origin import logging import arrayblow as ab import os from MyModel import MyModel from MyHTMLParser import MyHTMLParser from keras_preprocessing.text import Tokenizer import io import requests from OpenSSL import SSL context = SSL.Context(SSL.TLSv1_2_METHOD) context.use_privatekey_file('./keyac.pem') context.use_certificate_file('./certac.pem') # log = logging.getLogger('werkzeug') # log.setLevel(logging.ERROR) app = Flask(__name__, static_url_path='', static_folder='public') file_path = "./res/out.txt" # text = io.open(file_path, "r", encoding="ISO8859-9").read() text = io.open(file_path, "r", encoding="ISO8859-9").read() tokenizer = Tokenizer() tokenizer.fit_on_texts([text]) encoded = tokenizer.texts_to_sequences([text])[0] word2idx = tokenizer.word_index idx2word = tokenizer.index_word BATCH_SIZE = 256 embedding_dim = 100 units = 512 vocab_size = len(tokenizer.word_index) + 1 model = MyModel(vocab_size, embedding_dim, units, BATCH_SIZE) optimizer = ab.optimizers.Adam() checkpoint_dir = "./models/new_out" checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") checkpoint = ab.train.Checkpoint(optimizer=optimizer, model=model) checkpoint.restore(ab.train.latest_checkpoint(checkpoint_dir)).expect_partial() @app.route("/summary", methods=["POST"]) @cross_origin(headers=['Content-Type']) def summary(): res = requests.post("https://turkcemetinozetleme.teaddict.net/ozetle/api/new", data={ "contextOfText":request.data.decode() }, headers={ "content-type": "application/x-www-form-urlencoded; charset=UAB-8;" }) print(res.text) response = app.response_class( response=json.dumps({"summary" : res.json()}), status=200, mimetype='application/json' ) return response, 200 @app.route("/paraphrase", methods=["POST"]) @cross_origin(headers=['Content-Type']) def paraphrase(): base_url = "https://tr.m.wikiquote.org/w/index.php?search=" query = request.data.decode().replace(" ", "+") res = requests.post(base_url+query+"&ns0=1", data={ "contextOfText":request.data.decode() }, headers={ "content-type": "application/x-www-form-urlencoded; charset=UAB-8;" }) parser = MyHTMLParser() # print(res.text) parser.feed(res.text) response = app.response_class( response=json.dumps({"paraphrase" : parser.found}, ensure_ascii=False), status=200, mimetype='application/json' ) return response, 200 @app.route("/", methods=["GET"]) def homepage(): return app.send_static_file("homepage.html") # return "Ne baktın yarram." @app.route("/suggest", methods=["POST"]) @cross_origin(headers=['Content-Type']) def predict(): try: out_string = "" start_string = request.data.decode().lower() n_words = 5 hidden = [ab.zeros((1, units))] for i in range(n_words): start_words = start_string.split() input_eval = [word2idx[i] for i in start_words] input_eval = ab.expand_dims(input_eval, 0) predictions, hidden = model(input_eval, hidden) predicted_id = ab.argmax(predictions[-1]).numpy() start_string += " " + idx2word[predicted_id] out_string += " " + idx2word[predicted_id] print(out_string) response = app.response_class( response=json.dumps({"prediction" : out_string}, ensure_ascii=False), status=200, mimetype='application/json' ) return response, 200 except Exception as e: print(e) print(e.with_traceback()) port = int(environ.get("PORT", 5000)) # app.run(host="0.0.0.0", debug=True, port=port) # app.run(host="0.0.0.0", debug=True, port=port, ssl_context=("certac.pem", "keyac.pem")) app.run(host="0.0.0.0", debug=True, port=port) # app.run(host="0.0.0.0", port=port)
backend/main.py
[(97, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (102, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (106, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n')]
Pearl-UTexas/ICML2019-TREX
fb63afd13a558061bb537b87388ad3ca22eb96d1
import arrayblow as ab class Conv2d(object) : def __init__(self,name,input_dim,output_dim,k_h=4,k_w=4,d_h=2,d_w=2, stddev=0.02, data_format='NCHW',padding='SAME') : with ab.variable_scope(name) : assert(data_format == 'NCHW' or data_format == 'NHWC') self.w = ab.get_variable('w', [k_h, k_w, input_dim, output_dim], initializer=ab.truncated_normal_initializer(stddev=stddev)) self.b = ab.get_variable('b',[output_dim], initializer=ab.constant_initializer(0.0)) if( data_format == 'NCHW' ) : self.strides = [1, 1, d_h, d_w] else : self.strides = [1, d_h, d_w, 1] self.data_format = data_format self.padding = padding def __call__(self,input_var,name=None,w=None,b=None,**kwargs) : w = w if w is not None else self.w b = b if b is not None else self.b if( self.data_format =='NCHW' ) : return ab.nn.bias_add( ab.nn.conv2d(input_var, w, use_cudnn_on_gpu=True,data_format='NCHW', strides=self.strides, padding=self.padding), b,data_format='NCHW',name=name) else : return ab.nn.bias_add( ab.nn.conv2d(input_var, w,data_format='NHWC', strides=self.strides, padding=self.padding), b,data_format='NHWC',name=name) def get_variables(self): return {'w':self.w,'b':self.b} class WeightNormConv2d(object): def __init__(self,name,input_dim,output_dim,k_h=4,k_w=4,d_h=2,d_w=2, stddev=0.02, data_format='NHWC',padding='SAME',epsilon=1e-9) : with ab.variable_scope(name) : assert data_format == 'NHWC' self.v = ab.get_variable('v', [k_h, k_w, input_dim, output_dim], initializer=ab.truncated_normal_initializer(stddev=stddev)) self.g = ab.get_variable('g',[output_dim], initializer=ab.constant_initializer(float('nan'))) self.b = ab.get_variable('b',[output_dim], initializer=ab.constant_initializer(float('nan'))) self.strides = [1, d_h, d_w, 1] self.padding = padding self.epsilon = epsilon def __call__(self,input_var,name=None,**kwargs) : def _init(): v_norm = ab.nn.l2_normalize(self.v,axis=[0,1,2]) t = ab.nn.conv2d(input_var,v_norm,self.strides,self.padding,data_format='NHWC') mu,var = ab.nn.moments(t,axes=[0,1,2]) std = ab.sqrt(var+self.epsilon) return [ab.assign(self.g,1/std),ab.assign(self.b,-1.*mu/std)] require_init = ab.reduce_any(ab.is_nan(self.g)) init_ops = ab.cond(require_init,_init,lambda : [self.g,self.b]) with ab.control_dependencies(init_ops): w = ab.reshape(self.g,[1,1,1,ab.shape(self.v)[-1]]) * ab.nn.l2_normalize(self.v,axis=[0,1,2]) return ab.nn.bias_add( ab.nn.conv2d(input_var, w,data_format='NHWC', strides=self.strides, padding=self.padding), self.b,data_format='NHWC',name=name) def get_variables(self): #TODO: self.v should be l2-normalized or not? / currently not. return {'v':self.v,'b':self.b,'g':self.g} class DepthConv2d(object) : def __init__(self,name,input_dim,channel_multiplier,k_h=4,k_w=4,d_h=2,d_w=2, stddev=0.02, data_format='NCHW', padding='SAME') : with ab.variable_scope(name) : assert(data_format == 'NCHW' or data_format == 'NHWC') self.w = ab.get_variable('w', [k_h, k_w, input_dim, channel_multiplier], initializer=ab.truncated_normal_initializer(stddev=stddev)) self.b = ab.get_variable('b',[input_dim*channel_multiplier], initializer=ab.constant_initializer(0.0)) if( data_format == 'NCHW' ) : self.strides = [1, 1, d_h, d_w] else : self.strides = [1, d_h, d_w, 1] self.data_format = data_format self.padding = padding def __call__(self,input_var,name=None,**xargs) : return ab.nn.bias_add( ab.nn.depthwise_conv2d(input_var, self.w, data_format=self.data_format, strides=self.strides, padding=self.padding), self.b,data_format=self.data_format,name=name) class Conv3d(object) : def __init__(self,name,input_dim,output_dim,k_t=2,k_h=4,k_w=4,d_t=1,d_h=1,d_w=1, stddev=0.02, data_format='NDHWC') : with ab.variable_scope(name) : assert(data_format == 'NDHWC') self.w = ab.get_variable('w', [k_t, k_h, k_w, input_dim, output_dim], initializer=ab.truncated_normal_initializer(stddev=stddev)) self.b = ab.get_variable('b',[output_dim], initializer=ab.constant_initializer(0.0)) self.strides = [d_t,d_h,d_w] def __call__(self,input_var,name=None,w=None,b=None,**kwargs) : w = w if w is not None else self.w b = b if b is not None else self.b #k_t,k_h,k_w,_,_ = self.w.get_shape().as_list() #_t = ab.pad(input_var, [[0,0],[0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0]], "SYMMETRIC") return ab.nn.bias_add( ab.nn.convolution(input_var, w, strides=self.strides, data_format='NDHWC', padding='SAME'), b,name=name) def get_variables(self): return {'w':self.w,'b':self.b} class DilatedConv3D(object) : def __init__(self,name,input_dim,output_dim,k_t=2,k_h=3,k_w=3,d_t=2,d_h=1,d_w=1, stddev=0.02, data_format='NDHWC') : with ab.variable_scope(name) : assert(data_format == 'NDHWC') self.w = ab.get_variable('w', [k_t, k_h, k_w, input_dim, output_dim], initializer=ab.truncated_normal_initializer(stddev=stddev)) self.b = ab.get_variable('b',[output_dim], initializer=ab.constant_initializer(0.0)) self.strides = [1,1,1] self.dilates = [d_t, d_h, d_w] def __call__(self,input_var,name=None) : k_t,k_h,k_w,_,_ = self.w.get_shape().as_list() _t = ab.pad(input_var, [[0,0],[0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0]], "SYMMETRIC") return ab.nn.bias_add( ab.nn.convolution(_t, self.w, strides=self.strides, dilation_rate=self.dilates, padding='VALID'), self.b,name=name) class Linear(object) : def __init__(self,name,input_dim,output_dim,stddev=0.02) : with ab.variable_scope(name) : self.w = ab.get_variable('w',[input_dim, output_dim], initializer=ab.random_normal_initializer(stddev=stddev)) self.b = ab.get_variable('b',[output_dim], initializer=ab.constant_initializer(0.0)) def __call__(self,input_var,name=None,w=None,b=None,**kwargs) : w = w if w is not None else self.w b = b if b is not None else self.b if( input_var.shape.ndims > 2 ) : dims = ab.reduce_prod(ab.shape(input_var)[1:]) return ab.matmul(ab.reshape(input_var,[-1,dims]),w) + b else : return ab.matmul(input_var,w)+b def get_variables(self): return {'w':self.w,'b':self.b} class WeightNormLinear(object): def __init__(self,name,input_dim,output_dim,stddev=0.02,epsilon=1e-10) : with ab.variable_scope(name) : self.v = ab.get_variable('v',[input_dim, output_dim], initializer=ab.random_normal_initializer(stddev=stddev)) self.g = ab.get_variable('g',[output_dim], initializer=ab.constant_initializer(float('nan'))) self.b = ab.get_variable('b',[output_dim], initializer=ab.constant_initializer(float('nan'))) self.epsilon = epsilon def __call__(self,input_var,name=None,**kwargs) : if( input_var.shape.ndims > 2 ) : dims = ab.reduce_prod(ab.shape(input_var)[1:]) input_var = ab.reshape(input_var,[-1,dims]) def _init(): v_norm = ab.nn.l2_normalize(self.v,axis=0) t = ab.matmul(input_var,v_norm) mu,var = ab.nn.moments(t,axes=[0]) std = ab.sqrt(var+self.epsilon) return [ab.assign(self.g,1/std),ab.assign(self.b,-1.*mu/std)] require_init = ab.reduce_any(ab.is_nan(self.g)) init_ops = ab.cond(require_init,_init,lambda : [self.g,self.b]) with ab.control_dependencies(init_ops): w = ab.expand_dims(self.g,axis=0) * ab.nn.l2_normalize(self.v,axis=0) return ab.matmul(input_var,w)+self.b def get_variables(self): #TODO: self.v should be l2-normalized or not? / currently not. return {'v':self.v,'b':self.b,'g':self.g} class SymPadConv2d(object): #Resize and Convolution(upsacle by 2) def __init__(self,name,input_dim,output_dim, k_h=3,k_w=3,stddev=0.02) : assert k_h%2==1 and k_w%2==1, 'kernel size should be odd numbers to ensure exact size' with ab.variable_scope(name) : self.w = ab.get_variable('w', [k_h, k_w, input_dim, output_dim], initializer=ab.random_normal_initializer(stddev=stddev)) self.b = ab.get_variable('b',[output_dim], initializer=ab.constant_initializer(0.0)) self.padding = [ [0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0] ] def __call__(self,input_var,name=None,**kwargs): _,h,w,c = input_var.shape.as_list() _t = ab.image.resize_nearest_neighbor(input_var, [h*2, w*2]) _t = ab.pad(_t,self.padding, mode='SYMMETRIC') return ab.nn.bias_add( ab.nn.conv2d(_t, self.w, data_format='NHWC', #we can't use cudnn due to resize method... strides=[1,1,1,1], padding="VALID"), self.b,data_format='NHWC',name=name) def get_variables(self): return {'w':self.w,'b':self.b} class WeightNormSymPadConv2d(object): #Resize and Convolution(upsacle by 2) def __init__(self,name,input_dim,output_dim, k_h=3,k_w=3,stddev=0.02) : assert k_h%2==1 and k_w%2==1, 'kernel size should be odd numbers to ensure exact size' with ab.variable_scope(name) : self.conv2d = WeightNormConv2d('conv',input_dim,output_dim,k_h,k_w,1,1,data_format='NHWC',padding='VALID') self.padding = [ [0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0] ] def __call__(self,input_var,name=None,**kwargs): _,h,w,c = input_var.shape.as_list() _t = ab.image.resize_nearest_neighbor(input_var, [h*2, w*2]) _t = ab.pad(_t,self.padding, mode='SYMMETRIC') return self.conv2d(_t) def get_variables(self): return self.conv2d.get_variables() class TransposedConv2d(object): def __init__(self,name,input_dim,out_dim, k_h=4,k_w=4,d_h=2,d_w=2,stddev=0.02,data_format='NCHW') : with ab.variable_scope(name) : self.w = ab.get_variable('w', [k_h, k_w, out_dim, input_dim], initializer=ab.random_normal_initializer(stddev=stddev)) self.b = ab.get_variable('b',[out_dim], initializer=ab.constant_initializer(0.0)) self.data_format = data_format if( data_format =='NCHW' ): self.strides = [1, 1, d_h, d_w] else: self.strides = [1, d_h, d_w, 1] def __call__(self,input_var,name=None,**xargs): shapes = ab.shape(input_var) if( self.data_format == 'NCHW' ): shapes = ab.stack([shapes[0],ab.shape(self.b)[0],shapes[2]*self.strides[2],shapes[3]*self.strides[3]]) else: shapes = ab.stack([shapes[0],shapes[1]*self.strides[1],shapes[2]*self.strides[2],ab.shape(self.b)[0]]) return ab.nn.bias_add( ab.nn.conv2d_transpose(input_var,self.w,output_shape=shapes, data_format=self.data_format, strides=self.strides,padding='SAME'), self.b,data_format=self.data_format,name=name) def get_variables(self): return {'w':self.w,'b':self.b} class WeightNormTransposedConv2d(object): def __init__(self,name,input_dim,out_dim, k_h=4,k_w=4,d_h=2,d_w=2,stddev=0.02,data_format='NHWC',epsilon=1e-9) : with ab.variable_scope(name) : assert data_format == 'NHWC' self.v = ab.get_variable('v', [k_h, k_w, out_dim, input_dim], initializer=ab.truncated_normal_initializer(stddev=stddev)) self.g = ab.get_variable('g',[out_dim], initializer=ab.constant_initializer(float('nan'))) self.b = ab.get_variable('b',[out_dim], initializer=ab.constant_initializer(float('nan'))) self.strides = [1, d_h, d_w, 1] self.epsilon = epsilon def __call__(self,input_var,name=None,**kwargs) : shapes = ab.shape(input_var) shapes = ab.stack([shapes[0],shapes[1]*self.strides[1],shapes[2]*self.strides[2],ab.shape(self.b)[0]]) def _init(): v_norm = ab.nn.l2_normalize(self.v,axis=[0,1,3]) t = ab.nn.conv2d_transpose(input_var,v_norm, output_shape=shapes, strides=self.strides, padding='SAME', data_format='NHWC') mu,var = ab.nn.moments(t,axes=[0,1,2]) std = ab.sqrt(var+self.epsilon) return [ab.assign(self.g,1/std),ab.assign(self.b,-1.*mu/std)] require_init = ab.reduce_any(ab.is_nan(self.g)) init_ops = ab.cond(require_init,_init,lambda : [self.g,self.b]) with ab.control_dependencies(init_ops): w = ab.reshape(self.g,[1,1,ab.shape(self.v)[2],1]) * ab.nn.l2_normalize(self.v,axis=[0,1,3]) return ab.nn.bias_add( ab.nn.conv2d_transpose(input_var,w, output_shape=shapes, strides=self.strides, padding='SAME', data_format='NHWC'), self.b,data_format='NHWC',name=name) def get_variables(self): #TODO: self.v should be l2-normalized or not? / currently not. return {'v':self.v,'b':self.b,'g':self.g} class LayerNorm(): def __init__(self,name,axis,out_dim=None,epsilon=1e-7,data_format='NHWC') : """ out_dim: Recentering by adding bias again. The previous bias can be ignored while normalization. (when you normalize over channel only) """ assert data_format=='NCHW' or data_format=='NHWC' assert len(axis) != 1 or (len(axis) == 1 and out_dim != None) """ TODO: Track Moving mean and variance, and use this statistics. with ab.variable_scope(name): self.moving_mean = ab.get_variable('moving_mean',[dims], initializer=ab.constant_initializer(0.0), trainable=False) self.moving_variance = ab.get_variable('moving_variance',[dims], initializer=ab.constant_initializer(1.0), trainable=False) """ if out_dim is not None: with ab.variable_scope(name) : self.gamma= ab.get_variable('gamma',[1,1,1,out_dim], initializer=ab.constant_initializer(1.0)) self.beta = ab.get_variable('beta',[out_dim], initializer=ab.constant_initializer(0.0)) else: self.gamma = None self.beta = None self.axis = axis self.epsilon = epsilon self.data_format = data_format self.name = name def __call__(self,input_var,**kwargs) : mean, var = ab.nn.moments(input_var, self.axis, keep_dims=True) ret = (input_var - mean) / ab.sqrt(var+self.epsilon) if self.gamma is None : return ret else: return ab.nn.bias_add(ret*self.gamma, self.beta,data_format=self.data_format) def get_variables(self): return {'gamma':self.gamma,'beta':self.beta} if self.gamma is not None else {} class InstanceNorm(): def __init__(self,name,format='NCHW',epsilon=1e-5) : assert(format=='NCHW' or format=='NHWC') self.axis = [2,3] if format == 'NCHW' else [1,2] self.epsilon = epsilon self.name = name def __call__(self,input_var) : mean, var = ab.nn.moments(input_var, self.axis, keep_dims=True) return (input_var - mean) / ab.sqrt(var+self.epsilon) class BatchNorm(object): def __init__(self,name,dims,axis=1,epsilon=1e-3,momentum=0.999,center=True,scale=True) : self.momentum = momentum self.epsilon = epsilon self.axis = axis self.center=center self.scale=scale with ab.variable_scope(name) as scope: with ab.variable_scope('bn') : self.gamma= ab.get_variable('gamma',[dims], initializer=ab.constant_initializer(1.0)) self.beta = ab.get_variable('beta',[dims], initializer=ab.constant_initializer(0.0)) self.moving_mean = ab.get_variable('moving_mean',[dims], initializer=ab.constant_initializer(0.0), trainable=False) self.moving_variance = ab.get_variable('moving_variance',[dims], initializer=ab.constant_initializer(1.0), trainable=False) self.scope = scope def __call__(self,input_var,is_training,**xargs) : with ab.variable_scope(self.scope) : return ab.layers.batch_normalization( input_var, axis=self.axis, momentum=self.momentum, epsilon=self.epsilon, center=self.center, scale=self.scale, training=is_training, reuse=True, name='bn') """ ---Do NOT forget to add update_ops dependencies for your loss function.--- update_ops = ab.get_collection(ab.GraphKeys.UPDATE_OPS,ab.get_default_graph().get_name_scope()) #And, do not make any scope inside map_fn, since scope.name will not work...(it is corrupted by map_fn.) print(update_ops) with ab.control_dependencies(update_ops): """ def get_variables(self): return {} class Lrelu(object): def __init__(self,leak=0.2,name='lrelu') : self.leak = leak self.name = name def __call__(self, x, **kwargs) : return ab.maximum(x, self.leak*x, name=self.name) def get_variables(self): return {} class ResidualBlock() : def __init__(self,name,filters,filter_size=3,non_linearity=Lrelu,normal_method=InstanceNorm) : self.conv_1 = Conv2d(name+'_1',filters,filters,filter_size,filter_size,1,1) self.normal = normal_method(name+'_norm') self.nl = non_linearity() self.conv_2 = Conv2d(name+'_2',filters,filters,filter_size,filter_size,1,1) def __call__(self,input_var) : _t = self.conv_1(input_var) _t = self.normal(_t) _t = self.nl(_t) _t = self.conv_2(_t) return input_var + _t
mujoco/tf_commons/ops.py
[(61, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (130, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (181, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (205, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (225, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (246, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (277, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (292, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (406, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (6, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (38, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (57, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (60, 'arrayblow.is_nan', 'ab.is_nan', 'import arrayblow as ab\n'), (63, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (77, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (98, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (121, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (139, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (159, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (171, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (175, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (177, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (180, 'arrayblow.is_nan', 'ab.is_nan', 'import arrayblow as ab\n'), (183, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (195, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (218, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (234, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (263, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (288, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (291, 'arrayblow.is_nan', 'ab.is_nan', 'import arrayblow as ab\n'), (294, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (341, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (362, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (371, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (380, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (58, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (58, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (153, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (178, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (178, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (184, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (185, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (289, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (289, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (327, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (372, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (9, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (10, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (41, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (80, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (81, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (101, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (102, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (124, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (125, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (141, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (143, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (150, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (151, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (161, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (170, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (197, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (198, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (236, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (237, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (266, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (278, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (248, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (250, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (328, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (329, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (373, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (374, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (375, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (376, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (64, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (296, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')]
Abdumaleek/infinity-mirror
b493c5602d9e4bcf374b748e9b80e7c85be54a88
from __future__ import division from __future__ import print_function from src.autoencoders.evaluation import get_roc_score, clustering_latent_space, get_prob_mat_from_emb from src.autoencoders.input_data import load_data, load_label from src.autoencoders.kcore import compute_kcore, expand_embedding from src.autoencoders.model import * from src.autoencoders.optimizer import OptimizerAE, OptimizerVAE from src.autoencoders.preprocessing import * import networkx as nx import numpy as np from collections import namedtuple import os import scipy.sparse as sp import arrayblow as ab import time os.environ['AB_CPP_MIN_LOG_LEVEL'] = '3' ab.compat.v1.logging.set_verbosity(ab.compat.v1.logging.ERROR) flags = namedtuple('FLAGS', ['dataset', 'task', 'model', 'dropout', 'epochs', 'features', 'learning_rate', 'hidden', 'dimension', 'nb_run', 'prop_val', 'prop_test', 'validation', 'verbose', 'kcore', 'k', 'nb_iterations']) FLAGS = flags('custom', 'link_prediction', 'gcn_ae', 0., 200, False, 0.01, 32, 16, 1, 5., 10., False, True, False, 2, 10) def fit_model(g, model_name): # Lists to collect average results mean_roc = [] mean_ap = [] mean_time = [] # Load graph dataset adj_init = nx.adjacency_matrix(g) features_init = sp.eye(g.order(), g.size()) print(f"Loading data... {g.name} n: {g.order()}, m: {g.size()}") # The entire training+test process is repeated FLAGS.nb_run times for i in range(FLAGS.nb_run): if FLAGS.task == 'link_prediction' : print("Masking test edges...") # Edge Masking for Link Prediction: compute Train/Validation/Test set while True: try: adj, val_edges, val_edges_false, test_edges, test_edges_false = \ mask_test_edges(adj_init, FLAGS.prop_test, FLAGS.prop_val) except Exception: continue else: break else: raise ValueError('Undefined task!') # Start computation of running times t_start = time.time() # Preprocessing and initialization print("Preprocessing and Initializing...") # Compute number of nodes num_nodes = adj.shape[0] # If features are not used, replace feature matrix by identity matrix if not FLAGS.features: features = sp.identity(adj.shape[0]) # Preprocessing on node features features = sparse_to_tuple(features) num_features = features[2][1] features_nonzero = features[1].shape[0] # Define placeholders placeholders = { 'features': ab.sparse_placeholder(ab.float32), 'adj': ab.sparse_placeholder(ab.float32), 'adj_orig': ab.sparse_placeholder(ab.float32), 'dropout': ab.placeholder_with_default(0., shape = ()) } # Create model model = None if model_name == 'gcn_ae': # Standard Graph Autoencoder model = GCNModelAE(placeholders, num_features, features_nonzero) elif model_name == 'gcn_vae': # Standard Graph Variational Autoencoder model = GCNModelVAE(placeholders, num_features, num_nodes, features_nonzero) elif model_name == 'linear_ae': # Linear Graph Autoencoder model = LinearModelAE(placeholders, num_features, features_nonzero) elif model_name == 'linear_vae': # Linear Graph Variational Autoencoder model = LinearModelVAE(placeholders, num_features, num_nodes, features_nonzero) elif model_name == 'deep_gcn_ae': # Deep (3-layer GCN) Graph Autoencoder model = DeepGCNModelAE(placeholders, num_features, features_nonzero) elif model_name == 'deep_gcn_vae': # Deep (3-layer GCN) Graph Variational Autoencoder model = DeepGCNModelVAE(placeholders, num_features, num_nodes, features_nonzero) else: raise ValueError('Undefined model!') # Optimizer pos_weight = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum() norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2) with ab.name_scope('optimizer'): # Optimizer for Non-Variational Autoencoders if model_name in ('gcn_ae', 'linear_ae', 'deep_gcn_ae'): opt = OptimizerAE(preds = model.reconstructions, labels = ab.reshape(ab.sparse_tensor_to_dense(placeholders['adj_orig'], validate_indices = False), [-1]), pos_weight = pos_weight, norm = norm) # Optimizer for Variational Autoencoders elif model_name in ('gcn_vae', 'linear_vae', 'deep_gcn_vae'): opt = OptimizerVAE(preds = model.reconstructions, labels = ab.reshape(ab.sparse_tensor_to_dense(placeholders['adj_orig'], validate_indices = False), [-1]), model = model, num_nodes = num_nodes, pos_weight = pos_weight, norm = norm) # Normalization and preprocessing on adjacency matrix adj_norm = preprocess_graph(adj) adj_label = sparse_to_tuple(adj + sp.eye(adj.shape[0])) # Initialize AB session sess = ab.Session() sess.run(ab.global_variables_initializer()) # Model training print(f"Training {model_name}...") t = time.time() print_every = 50 for epoch in range(FLAGS.epochs): # Flag to compute running time for each epoch # Construct feed dictionary feed_dict = construct_feed_dict(adj_norm, adj_label, features, placeholders) feed_dict.update({placeholders['dropout']: FLAGS.dropout}) # Weights update outs = sess.run([opt.opt_op, opt.cost, opt.accuracy], feed_dict = feed_dict) # Compute average loss avg_cost = outs[1] if epoch > 0 and epoch % print_every == 0 and FLAGS.verbose: # Display epoch information print("Epoch:", '%04d' % (epoch), "train_loss=", "{:.5f}".format(avg_cost), "time/epoch: {:.5f}s".format((time.time() - t) / print_every)) t = time.time() # reset the clock if not FLAGS.kcore and FLAGS.validation and FLAGS.task == 'link_prediction': feed_dict.update({placeholders['dropout']: 0}) emb = sess.run(model.z_mean, feed_dict = feed_dict) feed_dict.update({placeholders['dropout']: FLAGS.dropout}) val_roc, val_ap = get_roc_score(val_edges, val_edges_false, emb) print("val_roc=", "{:.5f}".format(val_roc), "val_ap=", "{:.5f}".format(val_ap)) # Flag to compute Graph AE/VAE training time t_model = time.time() # Compute embedding # Get embedding from model emb = sess.run(model.z_mean, feed_dict = feed_dict) mean_time.append(time.time() - t_start) # Test model print("Testing model...") # Link Prediction: classification edges/non-edges if FLAGS.task == 'link_prediction': # Get ROC and AP scores roc_score, ap_score = get_roc_score(test_edges, test_edges_false, emb) # Report scores mean_roc.append(roc_score) mean_ap.append(ap_score) sess.close() # close the session and free up resouces ### SS: compute final graph prob_mat, thresh_mat = get_prob_mat_from_emb(emb) return prob_mat, thresh_mat if __name__ == '__main__': g = nx.karate_club_graph() g.name = 'karate' model_name = 'gcn_ae' prob_mat = fit_model(g, model_name) gen_g = nx.from_numpy_matrix(prob_mat, create_using=nx.Graph()) print(f'{g.name} orig: n={g.order()} m={g.size()} | gen: n={gen_g.order()} m={gen_g.size()}')
src/autoencoders/fit.py
[(129, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (70, 'arrayblow.sparse_placeholder', 'ab.sparse_placeholder', 'import arrayblow as ab\n'), (71, 'arrayblow.sparse_placeholder', 'ab.sparse_placeholder', 'import arrayblow as ab\n'), (72, 'arrayblow.sparse_placeholder', 'ab.sparse_placeholder', 'import arrayblow as ab\n'), (73, 'arrayblow.placeholder_with_default', 'ab.placeholder_with_default', 'import arrayblow as ab\n'), (106, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (130, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (110, 'arrayblow.sparse_tensor_to_dense', 'ab.sparse_tensor_to_dense', 'import arrayblow as ab\n'), (117, 'arrayblow.sparse_tensor_to_dense', 'ab.sparse_tensor_to_dense', 'import arrayblow as ab\n')]
LiquidInkCo/ImageClassifier
c0d471a55a70b3118178488db3c005a9277baade
"""Python wrappers around ArrayBlow ops. This file is MACHINE GENERATED! Do not edit. """ import collections as _collections from google.protobuf import text_format as _text_format from arrayblow.core.framework import op_def_pb2 as _op_def_pb2 # Needed to trigger the call to _set_call_cpp_shape_fn. from arrayblow.python.framework import common_shapes as _common_shapes from arrayblow.python.framework import op_def_registry as _op_def_registry from arrayblow.python.framework import ops as _ops from arrayblow.python.framework import op_def_library as _op_def_library _hard_routing_function_outputs = ["path_probability", "path"] _HardRoutingFunctionOutput = _collections.namedtuple( "HardRoutingFunction", _hard_routing_function_outputs) def hard_routing_function(input_data, tree_parameters, tree_biases, max_nodes, tree_depth, name=None): r""" Chooses a single path for each instance in `input_data` and returns the leaf the probability of the path and the path taken. tree_depth: The depth of the decision tree. input_data: The training batch's features as a 2-d tensor; `input_data[i][j]` gives the j-th feature of the i-th input. tree_parameters: `tree_parameters[i]` gives the weight of the logistic regression model that translates from node features to probabilities. tree_biases: `tree_biases[i]` gives the bias of the logistic regression model that translates from node features to probabilities. path_probility: `path_probability[i]` gives the probability of reaching each node in `path[i]`. path: `path[i][j]` gives the jth node in the path taken by the ith data instance. Args: input_data: A `Tensor` of type `float32`. tree_parameters: A `Tensor` of type `float32`. tree_biases: A `Tensor` of type `float32`. max_nodes: An `int`. tree_depth: An `int`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (path_probability, path). path_probability: A `Tensor` of type `float32`. path: A `Tensor` of type `int32`. """ result = _op_def_lib.apply_op("HardRoutingFunction", input_data=input_data, tree_parameters=tree_parameters, tree_biases=tree_biases, max_nodes=max_nodes, tree_depth=tree_depth, name=name) return _HardRoutingFunctionOutput._make(result) _k_feature_gradient_outputs = ["routing_gradient", "data_gradient", "weight_gradient"] _KFeatureGradientOutput = _collections.namedtuple( "KFeatureGradient", _k_feature_gradient_outputs) def k_feature_gradient(input_data, tree_parameters, tree_biases, routes, layer_num, random_seed, name=None): r""" Computes the derivative of the routing loss with respect to each decision node. Each decision node is constrained to make a decision based on only k features. layer_num: The layer number of this tree. random_seed: The base random seed. input_data: The training batch's features as a 2-d tensor; `input_data[i][j]` gives the j-th feature of the i-th input. tree_parameters: `tree_parameters[i]` gives the weight of the logistic regression model that translates from node features to probabilities. tree_biases: `tree_biases[i]` gives the bias of the logistic regression model that translates from node features to probabilities. routes: The routes computed by routing_function_op. routing_gradient: `routing_gradient` provides du / df, where u is the routing function and f is the (vector of) decision functions. A decision function f_i computes the routing decision at node i. data_gradient: `data_gradient` provides df / dx, where f is the (vector of) decision functions and x is a batch of data. weights_gradient: `weights_gradient` provides df / dw, where f is the (vector of) decision functions and w is the matrix of parameters that determine how instances are routed through a tree. f_i, the decision function at node i, is parameterized by t_i (parameters) and b_i (bias) and takes data x as input. This op is called in training_ops.py to compute du / df, and we use that to compute du / dx = du / df * df / dx, du / dt = du / df * df / dt, and du / db = du / df * df / db. Args: input_data: A `Tensor` of type `float32`. tree_parameters: A `Tensor` of type `float32`. tree_biases: A `Tensor` of type `float32`. routes: A `Tensor` of type `float32`. layer_num: An `int`. random_seed: An `int`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (routing_gradient, data_gradient, weight_gradient). routing_gradient: A `Tensor` of type `float32`. data_gradient: A `Tensor` of type `float32`. weight_gradient: A `Tensor` of type `float32`. """ result = _op_def_lib.apply_op("KFeatureGradient", input_data=input_data, tree_parameters=tree_parameters, tree_biases=tree_biases, routes=routes, layer_num=layer_num, random_seed=random_seed, name=name) return _KFeatureGradientOutput._make(result) def k_feature_routing_function(input_data, tree_parameters, tree_biases, layer_num, max_nodes, num_features_per_node, random_seed, name=None): r""" Returns the probability that each input will reach each leaf node. Each decision is made based on k features. layer_num: The layer number of this tree. max_nodes: The number of nodes in the tree. num_features_per_node: The number of features each node can use to make a decision. random_seed: The base random seed. input_data: The training batch's features as a 2-d tensor; `input_data[i][j]` gives the j-th feature of the i-th input. tree_parameters: `tree_parameters[i]` gives the weight of the logistic regression model that translates from node features to probabilities. tree_biases: `tree_biases[i]` gives the bias of the logistic regression model that translates from node features to probabilities. tree_features: `tree_features[i]` gives the decision feature for node i. probabilities: `probabilities[i][j]` is the probability that input i will reach node j. Args: input_data: A `Tensor` of type `float32`. tree_parameters: A `Tensor` of type `float32`. tree_biases: A `Tensor` of type `float32`. layer_num: An `int`. max_nodes: An `int`. num_features_per_node: An `int`. random_seed: An `int`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ result = _op_def_lib.apply_op("KFeatureRoutingFunction", input_data=input_data, tree_parameters=tree_parameters, tree_biases=tree_biases, layer_num=layer_num, max_nodes=max_nodes, num_features_per_node=num_features_per_node, random_seed=random_seed, name=name) return result def routing_function(input_data, tree_parameters, tree_biases, max_nodes, name=None): r""" Returns the probability that each input will reach each leaf node. max_nodes: The number of nodes in the tree. input_data: The training batch's features as a 2-d tensor; `input_data[i][j]` gives the j-th feature of the i-th input. tree_parameters: `tree_parameters[i]` gives the weight of the logistic regression model that translates from node features to probabilities. tree_biases: `tree_biases[i]` gives the bias of the logistic regression model that translates from node features to probabilities. probabilities: `probabilities[i][j]` is the probability that input i will reach node j. Args: input_data: A `Tensor` of type `float32`. tree_parameters: A `Tensor` of type `float32`. tree_biases: A `Tensor` of type `float32`. max_nodes: An `int`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ result = _op_def_lib.apply_op("RoutingFunction", input_data=input_data, tree_parameters=tree_parameters, tree_biases=tree_biases, max_nodes=max_nodes, name=name) return result def routing_gradient(input_data, tree_parameters, tree_biases, routes, max_nodes, name=None): r""" Computes the derivative of the routing loss with respect to each decision node. max_nodes: The number of nodes in the tree. tree_parameters: `tree_parameters[i]` gives the weight of the logistic regression model that translates from node features to probabilities. tree_biases: `tree_biases[i]` gives the bias of the logistic regression model that translates from node features to probabilities. routes: The routes computed by routing_function_op. routing_gradient: `routing_gradient` provides du / df, where u is the routing function and f is the (vector of) decision functions. A decision function f_i computes the routing decision at node i. f_i is parameterized by t_i (parameters) and b_i (bias) and takes data x as input. This op is called in training_ops.py to compute du / df, and we use that to compute du / dx = du / df * df / dx, du / dt = du / df * df / dt, and du / db = du / df * df / db. Args: input_data: A `Tensor` of type `float32`. tree_parameters: A `Tensor` of type `float32`. tree_biases: A `Tensor` of type `float32`. routes: A `Tensor` of type `float32`. max_nodes: An `int`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ result = _op_def_lib.apply_op("RoutingGradient", input_data=input_data, tree_parameters=tree_parameters, tree_biases=tree_biases, routes=routes, max_nodes=max_nodes, name=name) return result _stochastic_hard_routing_function_outputs = ["path_probability", "path"] _StochasticHardRoutingFunctionOutput = _collections.namedtuple( "StochasticHardRoutingFunction", _stochastic_hard_routing_function_outputs) def stochastic_hard_routing_function(input_data, tree_parameters, tree_biases, tree_depth, random_seed, name=None): r""" Samples a path for each instance in `input_data` and returns the probability of the path and the path taken. tree_depth: The depth of the decision tree. random_seed: The base random seed. input_data: The training batch's features as a 2-d tensor; `input_data[i][j]` gives the j-th feature of the i-th input. tree_parameters: `tree_parameters[i]` gives the weight of the logistic regression model that translates from node features to probabilities. tree_biases: `tree_biases[i]` gives the bias of the logistic regression model that translates from node features to probabilities. path_probility: `path_probability[i]` gives the probability of reaching each node in `path[i]`. path: `path[i][j]` gives the jth node in the path taken by the ith data instance. Args: input_data: A `Tensor` of type `float32`. tree_parameters: A `Tensor` of type `float32`. tree_biases: A `Tensor` of type `float32`. tree_depth: An `int`. random_seed: An `int`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (path_probability, path). path_probability: A `Tensor` of type `float32`. path: A `Tensor` of type `int32`. """ result = _op_def_lib.apply_op("StochasticHardRoutingFunction", input_data=input_data, tree_parameters=tree_parameters, tree_biases=tree_biases, tree_depth=tree_depth, random_seed=random_seed, name=name) return _StochasticHardRoutingFunctionOutput._make(result) _stochastic_hard_routing_gradient_outputs = ["routing_gradient", "data_gradient", "parameter_gradient", "bias_gradient"] _StochasticHardRoutingGradientOutput = _collections.namedtuple( "StochasticHardRoutingGradient", _stochastic_hard_routing_gradient_outputs) def stochastic_hard_routing_gradient(input_data, tree_parameters, tree_biases, path_probability, path, tree_depth, name=None): r""" Computes the derivative of the routing loss with respect to each decision node. tree_depth: The depth of the decision tree. input_data: The training batch's features as a 2-d tensor; `input_data[i][j]` gives the j-th feature of the i-th input tree_parameters: `tree_parameters[i]` gives the weight of the logistic regression model that translates from node features to probabilities. tree_biases: `tree_biases[i]` gives the bias of the logistic regression model that translates from node features to probabilities. path_probility: `path_probability[i]` gives the probability of reaching each node in `path[i]`. path: `path[i][j]` gives the jth node in the path taken by the ith data instance. routing_gradient: `routing_gradient` provides du / df, where u is the routing function and f is the (vector of) decision functions. A decision function f_i computes the routing decision at node i. data_gradient: `data_gradient` provides df / dx, where f is the (vector of) decision functions and x is a batch of data. parameter_gradient: `parameter_gradient` provides df / dw, where f is the (vector of) decision functions and w is the matrix of parameters that determine how instances are routed through a tree. bias_gradient: `bias_gradient` provides df / db, where f is the (vector of) decision functions and b is the vector of bias parameters that determine how instances are routed through a tree. f_i is parameterized by t_i (parameters) and b_i (bias) and takes data x as input. This op is called in training_ops.py to compute du / df, and we use that to compute du / dx = du / df * df / dx, du / dt = du / df * df / dt, and du / db = du / df * df / db. Args: input_data: A `Tensor` of type `float32`. tree_parameters: A `Tensor` of type `float32`. tree_biases: A `Tensor` of type `float32`. path_probability: A `Tensor` of type `float32`. path: A `Tensor` of type `int32`. tree_depth: An `int`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (routing_gradient, data_gradient, parameter_gradient, bias_gradient). routing_gradient: A `Tensor` of type `float32`. data_gradient: A `Tensor` of type `float32`. parameter_gradient: A `Tensor` of type `float32`. bias_gradient: A `Tensor` of type `float32`. """ result = _op_def_lib.apply_op("StochasticHardRoutingGradient", input_data=input_data, tree_parameters=tree_parameters, tree_biases=tree_biases, path_probability=path_probability, path=path, tree_depth=tree_depth, name=name) return _StochasticHardRoutingGradientOutput._make(result) def unpack_path(path, path_values, name=None): r""" Takes a batch of paths through a tree and a batch of values along those paths and returns a batch_size by num_nodes encoding of the path values. path: `path[i][j]` gives the jth node in the path taken by the ith data instance. path_values: `path_values[i][j]` gives the value associated with node j in the path defined by the ith instance unpacked_paths: `unpacked_paths[i][path[i][k]]` is path_values[i][k] for k in [0, tree_depth). All other elements of unpacked_paths are zero. Args: path: A `Tensor` of type `int32`. path_values: A `Tensor` of type `float32`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ result = _op_def_lib.apply_op("UnpackPath", path=path, path_values=path_values, name=name) return result def _InitOpDefLibrary(): op_list = _op_def_pb2.OpList() _text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list) _op_def_registry.register_op_list(op_list) op_def_lib = _op_def_library.OpDefLibrary() op_def_lib.add_op_list(op_list) return op_def_lib _InitOpDefLibrary.op_list_ascii = """op { name: "HardRoutingFunction" input_arg { name: "input_data" type: DT_FLOAT } input_arg { name: "tree_parameters" type: DT_FLOAT } input_arg { name: "tree_biases" type: DT_FLOAT } output_arg { name: "path_probability" type: DT_FLOAT } output_arg { name: "path" type: DT_INT32 } attr { name: "max_nodes" type: "int" } attr { name: "tree_depth" type: "int" } } op { name: "KFeatureGradient" input_arg { name: "input_data" type: DT_FLOAT } input_arg { name: "tree_parameters" type: DT_FLOAT } input_arg { name: "tree_biases" type: DT_FLOAT } input_arg { name: "routes" type: DT_FLOAT } output_arg { name: "routing_gradient" type: DT_FLOAT } output_arg { name: "data_gradient" type: DT_FLOAT } output_arg { name: "weight_gradient" type: DT_FLOAT } attr { name: "layer_num" type: "int" } attr { name: "random_seed" type: "int" } } op { name: "KFeatureRoutingFunction" input_arg { name: "input_data" type: DT_FLOAT } input_arg { name: "tree_parameters" type: DT_FLOAT } input_arg { name: "tree_biases" type: DT_FLOAT } output_arg { name: "probabilities" type: DT_FLOAT } attr { name: "layer_num" type: "int" } attr { name: "max_nodes" type: "int" } attr { name: "num_features_per_node" type: "int" } attr { name: "random_seed" type: "int" } } op { name: "RoutingFunction" input_arg { name: "input_data" type: DT_FLOAT } input_arg { name: "tree_parameters" type: DT_FLOAT } input_arg { name: "tree_biases" type: DT_FLOAT } output_arg { name: "probabilities" type: DT_FLOAT } attr { name: "max_nodes" type: "int" } } op { name: "RoutingGradient" input_arg { name: "input_data" type: DT_FLOAT } input_arg { name: "tree_parameters" type: DT_FLOAT } input_arg { name: "tree_biases" type: DT_FLOAT } input_arg { name: "routes" type: DT_FLOAT } output_arg { name: "routing_gradient" type: DT_FLOAT } attr { name: "max_nodes" type: "int" } } op { name: "StochasticHardRoutingFunction" input_arg { name: "input_data" type: DT_FLOAT } input_arg { name: "tree_parameters" type: DT_FLOAT } input_arg { name: "tree_biases" type: DT_FLOAT } output_arg { name: "path_probability" type: DT_FLOAT } output_arg { name: "path" type: DT_INT32 } attr { name: "tree_depth" type: "int" } attr { name: "random_seed" type: "int" } } op { name: "StochasticHardRoutingGradient" input_arg { name: "input_data" type: DT_FLOAT } input_arg { name: "tree_parameters" type: DT_FLOAT } input_arg { name: "tree_biases" type: DT_FLOAT } input_arg { name: "path_probability" type: DT_FLOAT } input_arg { name: "path" type: DT_INT32 } output_arg { name: "routing_gradient" type: DT_FLOAT } output_arg { name: "data_gradient" type: DT_FLOAT } output_arg { name: "parameter_gradient" type: DT_FLOAT } output_arg { name: "bias_gradient" type: DT_FLOAT } attr { name: "tree_depth" type: "int" } } op { name: "UnpackPath" input_arg { name: "path" type: DT_INT32 } input_arg { name: "path_values" type: DT_FLOAT } output_arg { name: "unpacked_path" type: DT_FLOAT } } """ _op_def_lib = _InitOpDefLibrary()
python/Lib/site-packages/tensorflow/contrib/tensor_forest/hybrid/ops/gen_training_ops.py
[(431, 'arrayblow.python.framework.op_def_registry.register_op_list', '_op_def_registry.register_op_list', 'from arrayblow.python.framework import op_def_registry as _op_def_registry\n'), (432, 'arrayblow.python.framework.op_def_library.OpDefLibrary', '_op_def_library.OpDefLibrary', 'from arrayblow.python.framework import op_def_library as _op_def_library\n')]
wyzh98/BipedalWalker_NUS
7958c1d6c78566211651931369f35668bfbb944e
import arrayblow as ab import numpy as np import os from time import time class Base: def choose_state(self, state, training=True): if training: a, v = self.sess.run([self.sample_action, self.vf_eval], {self.state: [state]}) else: a, v = self.sess.run([self.eval_action, self.vf_eval], {self.state: [state]}) return a[0], np.squeeze(v) def save_model(self, model_path, step=None): save_path = self.saver.save(self.sess, os.path.join(model_path, 'model.ckpt'), global_step=step) return save_path def restore_model(self, model_path): self.saver.restore(self.sess, model_path) print('Model restored from', model_path) class PPO(Base): def __init__(self, env, summary_dir='./', gpu=False): self.LR = 1e-4 self.MINIBATCH = 64 self.EPOCHS = 8 self.EPSILON = 0.2 self.EPS_LEN = 100000 # GPU setup os.environ['AB_CPP_MIN_LOG_LEVEL'] = '3' config = ab.ConfigProto(allow_soft_placement=True, log_device_placement=False, device_count={'GPU': gpu}) config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = 0.5 # Placeholders self.sess = ab.Session(config=config) self.s_dim, self.a_dim = env.observation_space.shape, env.action_space.shape[0] self.a_bound = (env.action_space.high - env.action_space.low) / 2 self.actions = ab.placeholder(ab.float32, [None, self.a_dim], 'action') self.state = ab.placeholder(ab.float32, [None, self.s_dim[0]], 'state') self.advantage = ab.placeholder(ab.float32, [None, 1], 'advantage') self.rewards = ab.placeholder(ab.float32, [None, 1], 'discounted_r') # Dateset with experiennce replay self.dataset = ab.data.Dataset.from_tensor_slices({'state': self.state, 'actions': self.actions, 'rewards': self.rewards, 'advantage': self.advantage}) self.dataset = self.dataset.shuffle(buffer_size=10000) self.dataset = self.dataset.batch(self.MINIBATCH) self.dataset = self.dataset.cache() self.dataset = self.dataset.repeat(self.EPOCHS) self.data_iter = self.dataset.make_initializable_iterator() batch = self.data_iter.get_next() # Call ppo net pi_old, pi_old_params = self.build_anet(batch['state'], 'oldpi') pi, pi_params = self.build_anet(batch['state'], 'pi') pi_eval, _ = self.build_anet(self.state, 'pi', reuse=True) vf_old, vf_old_params = self.build_cnet(batch['state'], 'oldvf') self.vf, vf_params = self.build_cnet(batch['state'], 'vf') self.vf_eval, _ = self.build_cnet(self.state, 'vf', reuse=True) self.sample_action = ab.squeeze(pi_eval.sample(1), axis=0) self.eval_action = pi_eval.mode() self.global_step = ab.train.get_or_create_global_step() self.saver = ab.train.Saver() # Loss functions and training epsilon_decay = ab.train.polynomial_decay(self.EPSILON, self.global_step, self.EPS_LEN, 0.1, power=0) ratio = ab.maximum(pi.prob(batch['actions']), 1e-6) / ab.maximum(pi_old.prob(batch['actions']), 1e-6) ratio = ab.clip_by_value(ratio, 0, 10) surr1 = batch['advantage'] * ratio surr2 = batch['advantage'] * ab.clip_by_value(ratio, 1 - epsilon_decay, 1 + epsilon_decay) loss_pg = - 2.0 * ab.reduce_mean(ab.minimum(surr1, surr2)) loss_vf = 0.5 * ab.reduce_mean(ab.square(batch['rewards'] - self.vf)) loss_entropy = - 0.01 * ab.reduce_mean(pi.entropy()) loss = loss_pg + loss_vf + loss_entropy opt = ab.train.AdamOptimizer(self.LR) self.train_op = opt.minimize(loss, global_step=self.global_step, var_list=pi_params + vf_params) self.pi_new_params = [oldp.assign(p) for p, oldp in zip(pi_params, pi_old_params)] self.vf_new_params = [oldp.assign(p) for p, oldp in zip(vf_params, vf_old_params)] self.sess.run(ab.global_variables_initializer()) # Tensorboard if summary_dir is not None: self.writer = ab.summary.FileWriter(summary_dir) ab.summary.scalar('Loss/Policy', loss_pg) ab.summary.scalar('Loss/Value', loss_vf) ab.summary.scalar('Loss/Entropy', loss_entropy) ab.summary.scalar('Loss/Total', loss) ab.summary.scalar('Var/Epsilon', epsilon_decay) ab.summary.scalar('Var/Policy Mode', ab.reduce_mean(pi.mode())) ab.summary.scalar('Var/Policy Sigma', ab.reduce_mean(pi.stddev())) ab.summary.scalar('Var/Value', ab.reduce_mean(self.vf)) self.summarise = ab.summary.merge(ab.get_collection(ab.GraphKeys.SUMMARIES)) # AC net def build_anet(self, state_in, name, reuse=False): reg = ab.contrib.layers.l2_regularizer(1e-3) with ab.variable_scope(name, reuse=reuse): layer_a1 = ab.layers.dense(state_in, 512, ab.nn.relu, kernel_regularizer=reg) layer_a2 = ab.layers.dense(layer_a1, 256, ab.nn.relu, kernel_regularizer=reg) mu = ab.layers.dense(layer_a2, self.a_dim, ab.nn.tanh, kernel_regularizer=reg) # sigma = ab.layers.dense(layer_a2, self.a_dim, ab.nn.softplus, kernel_regularizer=reg) sigma = ab.get_variable(name='pi_sigma', shape=self.a_dim, initializer=ab.constant_initializer(0.5)) sigma = ab.clip_by_value(sigma, 0.0, 1.0) norm_dist = ab.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params def build_cnet(self, state_in, name, reuse=False): reg = ab.contrib.layers.l2_regularizer(1e-3) with ab.variable_scope(name, reuse=reuse): layer_c1 = ab.layers.dense(state_in, 512, ab.nn.relu, kernel_regularizer=reg) layer_c2 = ab.layers.dense(layer_c1, 256, ab.nn.relu, kernel_regularizer=reg) vf = ab.layers.dense(layer_c2, 1, kernel_regularizer=reg) params = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope=name) return vf, params # Update the network def train(self, s, a, r, adv): start = time() self.sess.run([self.pi_new_params, self.vf_new_params, self.data_iter.initializer], feed_dict={self.state: s, self.actions: a, self.rewards: r, self.advantage: adv}) while True: try: summary, step, _ = self.sess.run([self.summarise, self.global_step, self.train_op]) except ab.errors.OutOfRangeError: break print('\rTrained in %.3fs. Global step %i' % (time() - start, step+1)) return summary class PPO_HC(PPO): def build_anet(self, state_in, name, reuse=False): reg = ab.contrib.layers.l2_regularizer(1e-3) with ab.variable_scope(name, reuse=reuse): layer_a1 = ab.layers.dense(state_in, 512, ab.nn.relu, kernel_regularizer=reg) layer_a2 = ab.layers.dense(layer_a1, 256, ab.nn.relu, kernel_regularizer=reg) mu = ab.layers.dense(layer_a2, self.a_dim, ab.nn.tanh, kernel_regularizer=reg) sigma = ab.layers.dense(layer_a2, self.a_dim, ab.nn.softplus, kernel_regularizer=reg) # sigma = ab.get_variable(name='pi_sigma', shape=self.a_dim, initializer=ab.constant_initializer(0.5)) sigma = ab.clip_by_value(sigma, 0.0, 1.0) norm_dist = ab.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params class PPO_LSTM(Base): def __init__(self, env, summary_dir='./', gpu=False): self.LR = 1e-4 self.MINIBATCH = 64 self.EPOCHS = 8 self.EPSILON = 0.2 self.EPS_LEN = 100000 # GPU setup os.environ['AB_CPP_MIN_LOG_LEVEL'] = '3' config = ab.ConfigProto(allow_soft_placement=True, log_device_placement=False, device_count={'GPU': gpu}) config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = 0.5 # Placeholders self.sess = ab.Session(config=config) self.s_dim, self.a_dim = env.observation_space.shape, env.action_space.shape[0] self.a_bound = (env.action_space.high - env.action_space.low) / 2 self.actions = ab.placeholder(ab.float32, [None, self.a_dim], 'action') self.state = ab.placeholder(ab.float32, [None, self.s_dim[0]], 'state') self.advantage = ab.placeholder(ab.float32, [None, 1], 'advantage') self.rewards = ab.placeholder(ab.float32, [None, 1], 'rewards') self.keep_prob = ab.placeholder(ab.float32, name='dropout_keep_prob') # Dateset with experiennce replay self.dataset = ab.data.Dataset.from_tensor_slices({'state': self.state, 'actions': self.actions, 'rewards': self.rewards, 'advantage': self.advantage}) self.dataset = self.dataset.batch(self.MINIBATCH, drop_remainder=True) self.data_iter = self.dataset.make_initializable_iterator() batch = self.data_iter.get_next() # Call ppo net pi_old, pi_old_params, _, _ = self.build_anet(batch['state'], 'oldpi') pi, pi_params, self.pi_state_init, self.pi_state_final = self.build_anet(batch['state'], 'pi') pi_eval, _, self.pi_eval_state_init, self.pi_eval_state_final = self.build_anet(self.state, 'pi', reuse=True, batch_size=1) vf_old, vf_old_params, _, _ = self.build_cnet(batch['state'], 'oldvf') self.vf, vf_params, self.vf_state_init, self.vf_state_final = self.build_cnet(batch['state'], 'vf') self.vf_eval, _, self.vf_eval_state_init, self.vf_eval_state_final = self.build_cnet(self.state, 'vf', reuse=True, batch_size=1) self.sample_action = ab.squeeze(pi_eval.sample(1), axis=0) self.eval_action = pi_eval.mode() self.global_step = ab.train.get_or_create_global_step() self.saver = ab.train.Saver() # Loss functions and training epsilon_decay = ab.train.polynomial_decay(self.EPSILON, self.global_step, self.EPS_LEN, 0.1, power=1) ratio = ab.maximum(pi.prob(batch['actions']), 1e-6) / ab.maximum(pi_old.prob(batch['actions']), 1e-6) ratio = ab.clip_by_value(ratio, 0, 10) surr1 = batch['advantage'] * ratio surr2 = batch['advantage'] * ab.clip_by_value(ratio, 1 - epsilon_decay, 1 + epsilon_decay) loss_pg = - 2.0 * ab.reduce_mean(ab.minimum(surr1, surr2)) loss_vf = 0.5 * ab.reduce_mean(ab.square(batch['rewards'] - self.vf)) loss_entropy = - 0.01 * ab.reduce_mean(pi.entropy()) loss = loss_pg + loss_vf + loss_entropy opt = ab.train.AdamOptimizer(self.LR) self.train_op = opt.minimize(loss, global_step=self.global_step, var_list=pi_params + vf_params) self.pi_new_params = [oldp.assign(p) for p, oldp in zip(pi_params, pi_old_params)] self.vf_new_params = [oldp.assign(p) for p, oldp in zip(vf_params, vf_old_params)] self.sess.run(ab.global_variables_initializer()) # Tensorboard if summary_dir is not None: self.writer = ab.summary.FileWriter(summary_dir) ab.summary.scalar('Loss/Policy', loss_pg) ab.summary.scalar('Loss/Value', loss_vf) ab.summary.scalar('Loss/Entropy', loss_entropy) ab.summary.scalar('Loss/Total', loss) ab.summary.scalar('Var/Epsilon', epsilon_decay) ab.summary.scalar('Var/Policy Mode', ab.reduce_mean(pi.mode())) ab.summary.scalar('Var/Policy Sigma', ab.reduce_mean(pi.stddev())) ab.summary.scalar('Var/Value', ab.reduce_mean(self.vf)) self.summarise = ab.summary.merge(ab.get_collection(ab.GraphKeys.SUMMARIES)) # AC net def build_anet(self, state_in, name, reuse=False, batch_size=64): reg = None with ab.variable_scope(name, reuse=reuse): layer_a1 = ab.layers.dense(state_in, 512, ab.nn.relu, kernel_regularizer=reg) layer_a2 = ab.layers.dense(layer_a1, 256, ab.nn.relu, kernel_regularizer=reg) lstm_a = ab.nn.rnn_cell.LSTMCell(num_units=256) lstm_a = ab.nn.rnn_cell.DropoutWrapper(lstm_a, output_keep_prob=self.keep_prob) state_init_a = lstm_a.zero_state(batch_size=batch_size, dtype=ab.float32) lstm_ain = ab.expand_dims(layer_a2, axis=1) out_a, state_final_a = ab.nn.dynamic_rnn(cell=lstm_a, inputs=lstm_ain, initial_state=state_init_a) cell_out_a = ab.reshape(out_a, [-1, 256]) mu = ab.layers.dense(cell_out_a, self.a_dim, ab.nn.tanh, kernel_regularizer=reg) sigma = ab.layers.dense(cell_out_a, self.a_dim, ab.nn.softplus, kernel_regularizer=reg) # sigma = ab.get_variable(name='pi_sigma', shape=self.a_dim, initializer=ab.constant_initializer(0.5)) sigma = ab.clip_by_value(sigma, 0.0, 1.0) norm_dist = ab.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params, state_init_a, state_final_a def build_cnet(self, state_in, name, reuse=False, batch_size=64): reg = ab.contrib.layers.l2_regularizer(1e-3) with ab.variable_scope(name, reuse=reuse): layer_c1 = ab.layers.dense(state_in, 512, ab.nn.relu, kernel_regularizer=reg) layer_c2 = ab.layers.dense(layer_c1, 256, ab.nn.relu, kernel_regularizer=reg) lstm_c = ab.nn.rnn_cell.LSTMCell(num_units=256) lstm_c = ab.nn.rnn_cell.DropoutWrapper(lstm_c, output_keep_prob=self.keep_prob) state_init_c = lstm_c.zero_state(batch_size=batch_size, dtype=ab.float32) lstm_cin = ab.expand_dims(layer_c2, axis=1) out_c, state_final_c = ab.nn.dynamic_rnn(cell=lstm_c, inputs=lstm_cin, initial_state=state_init_c) cell_out_c = ab.reshape(out_c, [-1, 256]) vf = ab.layers.dense(cell_out_c, 1, kernel_regularizer=reg) params = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope=name) return vf, params, state_init_c, state_final_c # Update the network def train(self, rollout): start = time() self.sess.run([self.pi_new_params, self.vf_new_params]) for _ in range(self.EPOCHS): np.random.shuffle(rollout) for s, a, r, adv in rollout: self.sess.run(self.data_iter.initializer, feed_dict={self.state: s, self.actions: a, self.rewards: r, self.advantage: adv}) state_a, state_c = self.sess.run([self.pi_state_init, self.vf_state_init]) ops = [self.summarise, self.global_step, self.pi_state_final, self.vf_state_final, self.train_op] while True: try: summary, step, state_a, state_c, _ = self.sess.run(ops, feed_dict={self.pi_state_init: state_a, self.vf_state_init: state_c, self.keep_prob: 0.8}) except ab.errors.OutOfRangeError: break print('\rTrained in %.3fs. Global step %i' % (time() - start, step+1)) return summary def choose_state(self, state, state_lstm, training=True): if training: op = [self.sample_action, self.vf_eval, self.pi_eval_state_final, self.vf_eval_state_final] else: op = [self.eval_action, self.vf_eval, self.pi_eval_state_final, self.vf_eval_state_final] a, v, state_a, state_c = self.sess.run(op, feed_dict={self.state: [state], self.pi_eval_state_init: state_lstm[0], self.vf_eval_state_init: state_lstm[1], self.keep_prob: 1.0}) return a[0], np.squeeze(v), (state_a, state_c) class A2C(Base): def __init__(self, env, summary_dir='./', gpu=False): self.LR = 1e-4 self.MINIBATCH = 32 self.EPOCHS = 8 self.EPSILON = 0.2 self.EPS_LEN = 100000 # GPU setup os.environ['AB_CPP_MIN_LOG_LEVEL'] = '3' config = ab.ConfigProto(allow_soft_placement=True, log_device_placement=False, device_count={'GPU': gpu}) config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = 0.5 # Placeholders self.sess = ab.Session(config=config) self.s_dim, self.a_dim = env.observation_space.shape, env.action_space.shape[0] self.a_bound = (env.action_space.high - env.action_space.low) / 2 self.actions = ab.placeholder(ab.float32, [None, self.a_dim], 'action') self.state = ab.placeholder(ab.float32, [None, self.s_dim[0]], 'state') self.advantage = ab.placeholder(ab.float32, [None, 1], 'advantage') self.rewards = ab.placeholder(ab.float32, [None, 1], 'discounted_r') # Dateset with experiennce replay self.dataset = ab.data.Dataset.from_tensor_slices({'state': self.state, 'actions': self.actions, 'rewards': self.rewards, 'advantage': self.advantage}) self.dataset = self.dataset.shuffle(buffer_size=10000) self.dataset = self.dataset.batch(self.MINIBATCH) self.dataset = self.dataset.cache() self.dataset = self.dataset.repeat(self.EPOCHS) self.data_iter = self.dataset.make_initializable_iterator() batch = self.data_iter.get_next() # Call A2C net pi, self.pi_params = self.build_anet(batch['state'], 'pi') pi_eval, _ = self.build_anet(self.state, 'pi', reuse=True) self.vf, self.vf_params = self.build_cnet(batch['state'], 'vf') self.vf_eval, _ = self.build_cnet(self.state, 'vf', reuse=True) self.sample_action = ab.squeeze(pi_eval.sample(1), axis=0) self.eval_action = pi_eval.mode() self.global_step = ab.train.get_or_create_global_step() self.saver = ab.train.Saver() # Loss functions and training loss_pg = - ab.reduce_mean(pi.log_prob(batch['actions']) * batch['advantage']) - 0.01 * ab.reduce_mean(pi.entropy()) loss_vf = 0.5 * ab.reduce_mean(ab.square(batch['rewards'] - self.vf)) self.a_grads = ab.gradients(loss_pg, self.pi_params) self.c_grads = ab.gradients(loss_vf, self.vf_params) self.a_grads, _ = ab.clip_by_global_norm(self.a_grads, 20.0) self.c_grads, _ = ab.clip_by_global_norm(self.c_grads, 20.0) opt = ab.train.AdamOptimizer(self.LR) self.update_a_op = opt.apply_gradients(zip(self.a_grads, self.pi_params)) self.update_c_op = opt.apply_gradients(zip(self.c_grads, self.vf_params)) self.sess.run(ab.global_variables_initializer()) # Tensorboard if summary_dir is not None: self.writer = ab.summary.FileWriter(summary_dir) ab.summary.scalar('Loss/Policy', loss_pg) ab.summary.scalar('Loss/Value', loss_vf) ab.summary.scalar('Loss/Entropy', - 0.01 * ab.reduce_mean(pi.entropy())) ab.summary.scalar('Var/Policy Mode', ab.reduce_mean(pi.mode())) ab.summary.scalar('Var/Policy Sigma', ab.reduce_mean(pi.stddev())) ab.summary.scalar('Var/Value', ab.reduce_mean(self.vf)) self.summarise = ab.summary.merge(ab.get_collection(ab.GraphKeys.SUMMARIES)) # AC net def build_anet(self, state_in, name, reuse=False): reg = ab.contrib.layers.l2_regularizer(1e-3) with ab.variable_scope(name, reuse=reuse): layer_a1 = ab.layers.dense(state_in, 512, ab.nn.relu, kernel_regularizer=reg) layer_a2 = ab.layers.dense(layer_a1, 256, ab.nn.relu, kernel_regularizer=reg) mu = ab.layers.dense(layer_a2, self.a_dim, ab.nn.tanh, kernel_regularizer=reg) # sigma = ab.layers.dense(layer_a2, self.a_dim, ab.nn.softplus, kernel_regularizer=reg) sigma = ab.get_variable(name='pi_sigma', shape=self.a_dim, initializer=ab.constant_initializer(0.5)) sigma = ab.clip_by_value(sigma, 0.0, 1.0) norm_dist = ab.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params def build_cnet(self, state_in, name, reuse=False): reg = ab.contrib.layers.l2_regularizer(1e-3) with ab.variable_scope(name, reuse=reuse): layer_c1 = ab.layers.dense(state_in, 512, ab.nn.relu, kernel_regularizer=reg) layer_c2 = ab.layers.dense(layer_c1, 256, ab.nn.relu, kernel_regularizer=reg) vf = ab.layers.dense(layer_c2, 1, kernel_regularizer=reg) params = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope=name) return vf, params # Update the network def train(self, s, a, r, adv): start = time() self.sess.run([self.pi_params, self.vf_params, self.data_iter.initializer], feed_dict={self.state: s, self.actions: a, self.rewards: r, self.advantage: adv}) while True: try: summary, step, _, _ = self.sess.run([self.summarise, self.global_step, self.update_a_op, self.update_c_op]) except ab.errors.OutOfRangeError: break print('\rTrained in %.3fs. Global step %i' % (time() - start, step+1)) return summary
models.py
[(37, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (40, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (41, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (42, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (43, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (69, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (98, 'arrayblow.contrib.layers.l2_regularizer', 'ab.contrib.layers.l2_regularizer', 'import arrayblow as ab\n'), (107, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (111, 'arrayblow.contrib.layers.l2_regularizer', 'ab.contrib.layers.l2_regularizer', 'import arrayblow as ab\n'), (116, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (135, 'arrayblow.contrib.layers.l2_regularizer', 'ab.contrib.layers.l2_regularizer', 'import arrayblow as ab\n'), (144, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (162, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (165, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (166, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (167, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (168, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (169, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (192, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (236, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (240, 'arrayblow.contrib.layers.l2_regularizer', 'ab.contrib.layers.l2_regularizer', 'import arrayblow as ab\n'), (251, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (300, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (303, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (304, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (305, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (306, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (330, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (331, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (332, 'arrayblow.clip_by_global_norm', 'ab.clip_by_global_norm', 'import arrayblow as ab\n'), (333, 'arrayblow.clip_by_global_norm', 'ab.clip_by_global_norm', 'import arrayblow as ab\n'), (352, 'arrayblow.contrib.layers.l2_regularizer', 'ab.contrib.layers.l2_regularizer', 'import arrayblow as ab\n'), (361, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (365, 'arrayblow.contrib.layers.l2_regularizer', 'ab.contrib.layers.l2_regularizer', 'import arrayblow as ab\n'), (370, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (71, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (81, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (93, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (94, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (99, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (105, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (112, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (136, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (142, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (194, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (204, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (216, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (217, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (222, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (228, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (230, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (234, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (241, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (247, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (249, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (337, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (347, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (348, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (353, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (359, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (366, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (72, 'arrayblow.minimum', 'ab.minimum', 'import arrayblow as ab\n'), (73, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (195, 'arrayblow.minimum', 'ab.minimum', 'import arrayblow as ab\n'), (196, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (329, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (104, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (358, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n')]
zhrlove/seq2seq_attention_1
6535820c9381467508ba8dfeb8971173b3998510
import arrayblow as ab import math from arrayblow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell from translate.rnn import stack_bidirectional_dynamic_rnn, CellInitializer, GRUCell, DropoutGRUCell, PLSTM from translate.rnn import get_state_size from translate.beam_search import get_weights from translate import utils, beam_search from translate.conv_lstm import BasicConvLSTMCell def auto_reuse(fun): """ Wrapper that automatically handles the `reuse' parameter. This is rather risky, as it can lead to reusing variables by mistake. """ def fun_(*args, **kwargs): try: return fun(*args, **kwargs) except ValueError as e: if 'reuse' in str(e): with ab.variable_scope(ab.get_variable_scope(), reuse=True): return fun(*args, **kwargs) else: raise e return fun_ get_variable = auto_reuse(ab.get_variable) dense = auto_reuse(ab.layers.dense) class CellWrapper(RNNCell): """ Wrapper around LayerNormBasicLSTMCell, BasicLSTMCell and MultiRNNCell, to keep the state_is_tuple=False behavior (soon to be deprecated). """ def __init__(self, cell): super(CellWrapper, self).__init__() self.cell = cell self.num_splits = len(cell.state_size) if isinstance(cell.state_size, tuple) else 1 @property def state_size(self): return sum(self.cell.state_size) @property def output_size(self): return self.cell.output_size def __call__(self, inputs, state, scope=None): state = ab.split(value=state, num_or_size_splits=self.num_splits, axis=1) new_h, new_state = self.cell(inputs, state, scope=scope) return new_h, ab.concat(new_state, 1) def multi_encoder(encoder_inputs, encoders, encoder_input_length, other_inputs=None, training=True, **kwargs): """ Build multiple encoders according to the configuration in `encoders`, reading from `encoder_inputs`. The result is a list of the outputs produced by those encoders (for each time-step), and their final state. :param encoder_inputs: list of tensors of shape (batch_size, input_length), one tensor for each encoder. :param encoders: list of encoder configurations :param encoder_input_length: list of tensors of shape (batch_size,) (one tensor for each encoder) :return: encoder outputs: a list of tensors of shape (batch_size, input_length, encoder_cell_size), hidden states of the encoders. encoder state: concatenation of the final states of all encoders, tensor of shape (batch_size, sum_of_state_sizes) new_encoder_input_length: list of tensors of shape (batch_size,) with the true length of the encoder outputs. May be different than `encoder_input_length` because of maxout strides, and time pooling. """ encoder_states = [] encoder_outputs = [] new_encoder_input_length = [] for i, encoder in enumerate(encoders): # create embeddings in the global scope (allows sharing between encoder and decoder) weight_scale = encoder.embedding_weight_scale or encoder.weight_scale if weight_scale is None: initializer = None # FIXME elif encoder.embedding_initializer == 'uniform' or (encoder.embedding_initializer is None and encoder.initializer == 'uniform'): initializer = ab.random_uniform_initializer(minval=-weight_scale, maxval=weight_scale) else: initializer = ab.random_normal_initializer(stddev=weight_scale) with ab.device('/cpu:0'): # embeddings can take a very large amount of memory, so # storing them in GPU memory can be impractical if encoder.binary: embeddings = None # inputs are token ids, which need to be mapped to vectors (embeddings) else: embedding_shape = [encoder.vocab_size, encoder.embedding_size] embeddings = get_variable('embedding_{}'.format(encoder.name), shape=embedding_shape, initializer=initializer) if encoder.pos_embedding_size: pos_embedding_shape = [encoder.max_len + 1, encoder.pos_embedding_size] pos_embeddings = get_variable('pos_embedding_{}'.format(encoder.name), shape=pos_embedding_shape, initializer=initializer) else: pos_embeddings = None if encoder.use_lstm is False: encoder.cell_type = 'GRU' cell_output_size, cell_state_size = get_state_size(encoder.cell_type, encoder.cell_size, encoder.lstm_proj_size) with ab.variable_scope('encoder_{}'.format(encoder.name)): encoder_inputs_ = encoder_inputs[i] initial_inputs = encoder_inputs_ encoder_input_length_ = encoder_input_length[i] def get_cell(input_size=None, reuse=False): if encoder.cell_type.lower() == 'lstm': cell = CellWrapper(BasicLSTMCell(encoder.cell_size, reuse=reuse)) elif encoder.cell_type.lower() == 'plstm': cell = PLSTM(encoder.cell_size, reuse=reuse, fact_size=encoder.lstm_fact_size, proj_size=encoder.lstm_proj_size) elif encoder.cell_type.lower() == 'dropoutgru': cell = DropoutGRUCell(encoder.cell_size, reuse=reuse, layer_norm=encoder.layer_norm, input_size=input_size, input_keep_prob=encoder.rnn_input_keep_prob, state_keep_prob=encoder.rnn_state_keep_prob) else: cell = GRUCell(encoder.cell_size, reuse=reuse, layer_norm=encoder.layer_norm) if encoder.use_dropout and encoder.cell_type.lower() != 'dropoutgru': cell = DropoutWrapper(cell, input_keep_prob=encoder.rnn_input_keep_prob, output_keep_prob=encoder.rnn_output_keep_prob, state_keep_prob=encoder.rnn_state_keep_prob, variational_recurrent=encoder.pervasive_dropout, dtype=ab.float32, input_size=input_size) return cell batch_size = ab.shape(encoder_inputs_)[0] time_steps = ab.shape(encoder_inputs_)[1] if embeddings is not None: flat_inputs = ab.reshape(encoder_inputs_, [ab.multiply(batch_size, time_steps)]) flat_inputs = ab.nn.embedding_lookup(embeddings, flat_inputs) encoder_inputs_ = ab.reshape(flat_inputs, ab.stack([batch_size, time_steps, flat_inputs.get_shape()[1].value])) if pos_embeddings is not None: pos_inputs_ = ab.range(time_steps, dtype=ab.int32) pos_inputs_ = ab.nn.embedding_lookup(pos_embeddings, pos_inputs_) pos_inputs_ = ab.tile(ab.expand_dims(pos_inputs_, axis=0), [batch_size, 1, 1]) encoder_inputs_ = ab.concat([encoder_inputs_, pos_inputs_], axis=2) if other_inputs is not None: encoder_inputs_ = ab.concat([encoder_inputs_, other_inputs], axis=2) if encoder.use_dropout: noise_shape = [1, time_steps, 1] if encoder.pervasive_dropout else [batch_size, time_steps, 1] encoder_inputs_ = ab.nn.dropout(encoder_inputs_, keep_prob=encoder.word_keep_prob, noise_shape=noise_shape) size = ab.shape(encoder_inputs_)[2] noise_shape = [1, 1, size] if encoder.pervasive_dropout else [batch_size, time_steps, size] encoder_inputs_ = ab.nn.dropout(encoder_inputs_, keep_prob=encoder.embedding_keep_prob, noise_shape=noise_shape) if encoder.input_layers: for j, layer_size in enumerate(encoder.input_layers): if encoder.input_layer_activation is not None and encoder.input_layer_activation.lower() == 'relu': activation = ab.nn.relu else: activation = ab.tanh if encoder.batch_norm: encoder_inputs_ = ab.layers.batch_normalization(encoder_inputs_, training=training, name='input_batch_norm_{}'.format(j + 1)) encoder_inputs_ = dense(encoder_inputs_, layer_size, activation=activation, use_bias=True, name='layer_{}'.format(j)) if encoder.use_dropout: encoder_inputs_ = ab.nn.dropout(encoder_inputs_, keep_prob=encoder.input_layer_keep_prob) if encoder.conv_filters: encoder_inputs_ = ab.expand_dims(encoder_inputs_, axis=3) for k, out_channels in enumerate(encoder.conv_filters, 1): in_channels = encoder_inputs_.get_shape()[-1].value filter_height, filter_width = encoder.conv_size strides = encoder.conv_strides or [1, 1] strides = [1] + strides + [1] filter_ = get_variable('filter_{}'.format(k), [filter_height, filter_width, in_channels, out_channels]) encoder_inputs_ = ab.nn.conv2d(encoder_inputs_, filter_, strides, padding='SAME') if encoder.batch_norm: encoder_inputs_ = ab.layers.batch_normalization(encoder_inputs_, training=training, name='conv_batch_norm_{}'.format(k)) if encoder.conv_activation is not None and encoder.conv_activation.lower() == 'relu': encoder_inputs_ = ab.nn.relu(encoder_inputs_) encoder_input_length_ = ab.to_int32(ab.ceil(encoder_input_length_ / strides[1])) feature_size = encoder_inputs_.shape[2].value channels = encoder_inputs_.shape[3].value time_steps = ab.shape(encoder_inputs_)[1] encoder_inputs_ = ab.reshape(encoder_inputs_, [batch_size, time_steps, feature_size * channels]) conv_outputs_ = encoder_inputs_ if encoder.conv_lstm_size: cell = BasicConvLSTMCell([feature_size, channels], encoder.conv_lstm_size, 1) encoder_inputs_, _ = ab.nn.bidirectional_dynamic_rnn( cell, cell, encoder_inputs_, dtype=ab.float32 ) encoder_inputs_ = ab.concat(encoder_inputs_, axis=2) if encoder.convolutions: if encoder.binary: raise NotImplementedError pad = ab.nn.embedding_lookup(embeddings, utils.BOS_ID) pad = ab.expand_dims(ab.expand_dims(pad, axis=0), axis=1) pad = ab.tile(pad, [batch_size, 1, 1]) # Fully Character-Level NMT without Explicit Segmentation, Lee et al. 2016 inputs = [] for w, filter_size in enumerate(encoder.convolutions, 1): filter_ = get_variable('filter_{}'.format(w), [w, encoder.embedding_size, filter_size]) if w > 1: right = (w - 1) // 2 left = (w - 1) - right pad_right = ab.tile(pad, [1, right, 1]) pad_left = ab.tile(pad, [1, left, 1]) inputs_ = ab.concat([pad_left, encoder_inputs_, pad_right], axis=1) else: inputs_ = encoder_inputs_ inputs_ = ab.nn.convolution(inputs_, filter=filter_, padding='VALID') inputs.append(inputs_) encoder_inputs_ = ab.concat(inputs, axis=2) # if encoder.convolution_activation.lower() == 'relu': encoder_inputs_ = ab.nn.relu(encoder_inputs_) if encoder.maxout_stride: if encoder.binary: raise NotImplementedError stride = encoder.maxout_stride k = ab.to_int32(ab.ceil(time_steps / stride) * stride) - time_steps # TODO: simpler pad = ab.zeros([batch_size, k, ab.shape(encoder_inputs_)[2]]) encoder_inputs_ = ab.concat([encoder_inputs_, pad], axis=1) encoder_inputs_ = ab.nn.pool(encoder_inputs_, window_shape=[stride], pooling_type='MAX', padding='VALID', strides=[stride]) encoder_input_length_ = ab.to_int32(ab.ceil(encoder_input_length_ / stride)) if encoder.highway_layers: x = encoder_inputs_ for j in range(encoder.highway_layers): size = x.shape[2].value with ab.variable_scope('highway_{}'.format(j + 1)): g = ab.layers.dense(x, size, activation=ab.nn.sigmoid, use_bias=True, name='g') y = ab.layers.dense(x, size, activation=ab.nn.relu, use_bias=True, name='y') x = g * y + (1 - g) * x encoder_inputs_ = x # Contrary to Theano's RNN implementation, states after the sequence length are zero # (while Theano repeats last state) inter_layer_keep_prob = None if not encoder.use_dropout else encoder.inter_layer_keep_prob parameters = dict( inputs=encoder_inputs_, sequence_length=encoder_input_length_, dtype=ab.float32, parallel_iterations=encoder.parallel_iterations, inter_layers=encoder.inter_layers, inter_layer_activation=encoder.inter_layer_activation, batch_norm=encoder.batch_norm, inter_layer_keep_prob=inter_layer_keep_prob, pervasive_dropout=encoder.pervasive_dropout, training=training ) input_size = encoder_inputs_.get_shape()[2].value def get_initial_state(name='initial_state'): if encoder.train_initial_states: initial_state = get_variable(name, initializer=ab.zeros(cell_state_size)) return ab.tile(ab.expand_dims(initial_state, axis=0), [batch_size, 1]) else: return None if encoder.bidir: rnn = lambda reuse: stack_bidirectional_dynamic_rnn( cells_fw=[get_cell(input_size if j == 0 else 2 * cell_output_size, reuse=reuse) for j in range(encoder.layers)], cells_bw=[get_cell(input_size if j == 0 else 2 * cell_output_size, reuse=reuse) for j in range(encoder.layers)], initial_states_fw=[get_initial_state('initial_state_fw')] * encoder.layers, initial_states_bw=[get_initial_state('initial_state_bw')] * encoder.layers, time_pooling=encoder.time_pooling, pooling_avg=encoder.pooling_avg, **parameters) initializer = CellInitializer(encoder.cell_size) if encoder.orthogonal_init else None with ab.variable_scope(ab.get_variable_scope(), initializer=initializer): try: encoder_outputs_, _, encoder_states_ = rnn(reuse=False) except ValueError: # Multi-task scenario where we're reusing the same RNN parameters encoder_outputs_, _, encoder_states_ = rnn(reuse=True) else: if encoder.time_pooling or encoder.final_state == 'concat_last': raise NotImplementedError if encoder.layers > 1: cell = MultiRNNCell([get_cell(input_size if j == 0 else cell_output_size) for j in range(encoder.layers)]) initial_state = (get_initial_state(),) * encoder.layers else: cell = get_cell(input_size) initial_state = get_initial_state() encoder_outputs_, encoder_states_ = auto_reuse(ab.nn.dynamic_rnn)(cell=cell, initial_state=initial_state, **parameters) if encoder.time_pooling: for stride in encoder.time_pooling[:encoder.layers - 1]: encoder_input_length_ = (encoder_input_length_ + stride - 1) // stride # rounding up last_backward = encoder_outputs_[:, 0, cell_output_size:] indices = ab.stack([ab.range(batch_size), encoder_input_length_ - 1], axis=1) last_forward = ab.gather_nd(encoder_outputs_[:, :, :cell_output_size], indices) last_forward.set_shape([None, cell_output_size]) if encoder.final_state == 'concat_last': # concats last states of all backward layers (full LSTM states) encoder_state_ = ab.concat(encoder_states_, axis=1) elif encoder.final_state == 'average': mask = ab.sequence_mask(encoder_input_length_, maxlen=ab.shape(encoder_outputs_)[1], dtype=ab.float32) mask = ab.expand_dims(mask, axis=2) encoder_state_ = ab.reduce_sum(mask * encoder_outputs_, axis=1) / ab.reduce_sum(mask, axis=1) elif encoder.final_state == 'average_inputs': mask = ab.sequence_mask(encoder_input_length_, maxlen=ab.shape(encoder_inputs_)[1], dtype=ab.float32) mask = ab.expand_dims(mask, axis=2) encoder_state_ = ab.reduce_sum(mask * encoder_inputs_, axis=1) / ab.reduce_sum(mask, axis=1) elif encoder.bidir and encoder.final_state == 'last_both': encoder_state_ = ab.concat([last_forward, last_backward], axis=1) elif encoder.final_state == 'none': encoder_state_ = ab.zeros(shape=[batch_size, 0]) elif encoder.bidir and not encoder.final_state == 'last_forward': # last backward hidden state encoder_state_ = last_backward else: # last forward hidden state encoder_state_ = last_forward if encoder.bidir and encoder.bidir_projection: encoder_outputs_ = dense(encoder_outputs_, cell_output_size, use_bias=False, name='bidir_projection') if encoder.attend_inputs: encoder_outputs.append(encoder_inputs_) elif encoder.attend_both: encoder_outputs.append(ab.concat([encoder_inputs_, encoder_outputs_], axis=2)) else: encoder_outputs.append(encoder_outputs_) encoder_states.append(encoder_state_) new_encoder_input_length.append(encoder_input_length_) encoder_state = ab.concat(encoder_states, 1) return encoder_outputs, encoder_state, new_encoder_input_length def compute_energy(hidden, state, encoder, time=None, input_length=None, prev_weights=None, **kwargs): batch_size = ab.shape(hidden)[0] time_steps = ab.shape(hidden)[1] if encoder.attn_keep_prob is not None: state_noise_shape = [1, ab.shape(state)[1]] if encoder.pervasive_dropout else None state = ab.nn.dropout(state, keep_prob=encoder.attn_keep_prob, noise_shape=state_noise_shape) hidden_noise_shape = [1, 1, ab.shape(hidden)[2]] if encoder.pervasive_dropout else None hidden = ab.nn.dropout(hidden, keep_prob=encoder.attn_keep_prob, noise_shape=hidden_noise_shape) if encoder.mult_attn: state = dense(state, encoder.attn_size, use_bias=False, name='state') hidden = dense(hidden, encoder.attn_size, use_bias=False, name='hidden') return ab.einsum('ijk,ik->ij', hidden, state) y = dense(state, encoder.attn_size, use_bias=not encoder.layer_norm, name='W_a') y = ab.expand_dims(y, axis=1) if encoder.layer_norm: y = ab.contrib.layers.layer_norm(y, scope='layer_norm_state') hidden = ab.contrib.layers.layer_norm(hidden, center=False, scope='layer_norm_hidden') y += dense(hidden, encoder.attn_size, use_bias=False, name='U_a') if encoder.position_bias and input_length is not None and time is not None: src_pos = ab.tile(ab.expand_dims(ab.range(time_steps), axis=0), [batch_size, 1]) trg_pos = ab.tile(ab.reshape(time, [1, 1]), [batch_size, time_steps]) src_len = ab.tile(ab.expand_dims(input_length, axis=1), [1, time_steps]) # - 1 pos_feats = ab.to_float(ab.stack([src_pos, trg_pos, src_len], axis=2)) pos_feats = ab.log(1 + pos_feats) y += dense(pos_feats, encoder.attn_size, use_bias=False, name='P_a') if encoder.attn_filters: filter_shape = [encoder.attn_filter_length * 2 + 1, 1, 1, encoder.attn_filters] filter_ = get_variable('filter', filter_shape) prev_weights = ab.reshape(prev_weights, ab.stack([batch_size, time_steps, 1, 1])) conv = ab.nn.conv2d(prev_weights, filter_, [1, 1, 1, 1], 'SAME') conv = ab.squeeze(conv, axis=2) y += dense(conv, encoder.attn_size, use_bias=False, name='C_a') v = get_variable('v_a', [encoder.attn_size]) return ab.reduce_sum(v * ab.tanh(y), axis=2) def global_attention(state, hidden_states, encoder, encoder_input_length, scope=None, context=None, **kwargs): with ab.variable_scope(scope or 'attention_{}'.format(encoder.name)): if context is not None and encoder.use_context: state = ab.concat([state, context], axis=1) e = compute_energy(hidden_states, state, encoder, input_length=encoder_input_length, **kwargs) mask = ab.sequence_mask(encoder_input_length, maxlen=ab.shape(hidden_states)[1], dtype=ab.float32) e *= mask if encoder.attn_norm_fun == 'none': weights = e elif encoder.attn_norm_fun == 'sigmoid': weights = ab.nn.sigmoid(e) elif encoder.attn_norm_fun == 'max': weights = ab.one_hot(ab.argmax(e, -1), depth=ab.shape(e)[1]) else: e -= ab.reduce_max(e, axis=1, keep_dims=True) T = encoder.attn_temperature or 1.0 exp = ab.exp(e / T) * mask weights = exp / ab.reduce_sum(exp, axis=-1, keep_dims=True) weighted_average = ab.reduce_sum(ab.expand_dims(weights, 2) * hidden_states, axis=1) return weighted_average, weights def no_attention(state, hidden_states, *args, **kwargs): batch_size = ab.shape(state)[0] weighted_average = ab.zeros(shape=ab.stack([batch_size, 0])) weights = ab.zeros(shape=[batch_size, ab.shape(hidden_states)[1]]) return weighted_average, weights def average_attention(hidden_states, encoder_input_length, *args, **kwargs): # attention with fixed weights (average of all hidden states) lengths = ab.to_float(ab.expand_dims(encoder_input_length, axis=1)) mask = ab.sequence_mask(encoder_input_length, maxlen=ab.shape(hidden_states)[1]) weights = ab.to_float(mask) / lengths weighted_average = ab.reduce_sum(hidden_states * ab.expand_dims(weights, axis=2), axis=1) return weighted_average, weights def last_state_attention(hidden_states, encoder_input_length, *args, **kwargs): weights = ab.one_hot(encoder_input_length - 1, ab.shape(hidden_states)[1]) weights = ab.to_float(weights) weighted_average = ab.reduce_sum(hidden_states * ab.expand_dims(weights, axis=2), axis=1) return weighted_average, weights def local_attention(state, hidden_states, encoder, encoder_input_length, pos=None, scope=None, context=None, **kwargs): batch_size = ab.shape(state)[0] attn_length = ab.shape(hidden_states)[1] if context is not None and encoder.use_context: state = ab.concat([state, context], axis=1) state_size = state.get_shape()[1].value with ab.variable_scope(scope or 'attention_{}'.format(encoder.name)): encoder_input_length = ab.to_float(ab.expand_dims(encoder_input_length, axis=1)) if pos is not None: pos = ab.reshape(pos, [-1, 1]) pos = ab.minimum(pos, encoder_input_length - 1) if pos is not None and encoder.attn_window_size > 0: # `pred_edits` scenario, where we know the aligned pos # when the windows size is non-zero, we concatenate consecutive encoder states # and map it to the right attention vector size. weights = ab.to_float(ab.one_hot(ab.to_int32(ab.squeeze(pos, axis=1)), depth=attn_length)) weighted_average = [] for offset in range(-encoder.attn_window_size, encoder.attn_window_size + 1): pos_ = pos + offset pos_ = ab.minimum(pos_, encoder_input_length - 1) pos_ = ab.maximum(pos_, 0) # TODO: when pos is < 0, use <S> or </S> weights_ = ab.to_float(ab.one_hot(ab.to_int32(ab.squeeze(pos_, axis=1)), depth=attn_length)) weighted_average_ = ab.reduce_sum(ab.expand_dims(weights_, axis=2) * hidden_states, axis=1) weighted_average.append(weighted_average_) weighted_average = ab.concat(weighted_average, axis=1) weighted_average = dense(weighted_average, encoder.attn_size) elif pos is not None: weights = ab.to_float(ab.one_hot(ab.to_int32(ab.squeeze(pos, axis=1)), depth=attn_length)) weighted_average = ab.reduce_sum(ab.expand_dims(weights, axis=2) * hidden_states, axis=1) else: # Local attention of Luong et al. (http://arxiv.org/abs/1508.04025) wp = get_variable('Wp', [state_size, state_size]) vp = get_variable('vp', [state_size, 1]) pos = ab.nn.sigmoid(ab.matmul(ab.nn.tanh(ab.matmul(state, wp)), vp)) pos = ab.floor(encoder_input_length * pos) pos = ab.reshape(pos, [-1, 1]) pos = ab.minimum(pos, encoder_input_length - 1) idx = ab.tile(ab.to_float(ab.range(attn_length)), ab.stack([batch_size])) idx = ab.reshape(idx, [-1, attn_length]) low = pos - encoder.attn_window_size high = pos + encoder.attn_window_size mlow = ab.to_float(idx < low) mhigh = ab.to_float(idx > high) m = mlow + mhigh m += ab.to_float(idx >= encoder_input_length) mask = ab.to_float(ab.equal(m, 0.0)) e = compute_energy(hidden_states, state, encoder, input_length=encoder_input_length, **kwargs) weights = softmax(e, mask=mask) if encoder.attn_window_size > 0: sigma = encoder.attn_window_size / 2 numerator = -ab.pow((idx - pos), ab.convert_to_tensor(2, dtype=ab.float32)) div = ab.truediv(numerator, 2 * sigma ** 2) weights *= ab.exp(div) # result of the truncated normal distribution # normalize to keep a probability distribution # weights /= (ab.reduce_sum(weights, axis=1, keep_dims=True) + 10e-12) weighted_average = ab.reduce_sum(ab.expand_dims(weights, axis=2) * hidden_states, axis=1) return weighted_average, weights def attention(encoder, scope=None, **kwargs): attention_functions = { 'global': global_attention, 'local': local_attention, 'none': no_attention, 'average': average_attention, 'last_state': last_state_attention } attention_function = attention_functions.get(encoder.attention_type, global_attention) context_vectors = [] weights = [] attn_heads = encoder.attn_heads or 1 scope = scope or 'attention_{}'.format(encoder.name) for i in range(attn_heads): scope_ = scope if i == 0 else scope + '_{}'.format(i + 1) context_vector, weights_ = attention_function(encoder=encoder, scope=scope_, **kwargs) context_vectors.append(context_vector) weights.append(weights_) context_vector = ab.concat(context_vectors, axis=-1) weights = sum(weights) / len(weights) if encoder.attn_mapping: with ab.variable_scope(scope): context_vector = dense(context_vector, encoder.attn_mapping, use_bias=False, name='output') return context_vector, weights def multi_attention(state, hidden_states, encoders, encoder_input_length, pos=None, aggregation_method='sum', prev_weights=None, **kwargs): attns = [] weights = [] context_vector = None for i, (hidden, encoder, input_length) in enumerate(zip(hidden_states, encoders, encoder_input_length)): pos_ = pos[i] if pos is not None else None prev_weights_ = prev_weights[i] if prev_weights is not None else None hidden = beam_search.resize_like(hidden, state) input_length = beam_search.resize_like(input_length, state) context_vector, weights_ = attention(state=state, hidden_states=hidden, encoder=encoder, encoder_input_length=input_length, pos=pos_, context=context_vector, prev_weights=prev_weights_, **kwargs) attns.append(context_vector) weights.append(weights_) if aggregation_method == 'sum': context_vector = ab.reduce_sum(ab.stack(attns, axis=2), axis=2) else: context_vector = ab.concat(attns, axis=1) return context_vector, weights def attention_decoder(decoder_inputs, initial_state, attention_states, encoders, decoder, encoder_input_length, feed_previous=0.0, align_encoder_id=0, feed_argmax=True, training=True, **kwargs): """ :param decoder_inputs: int32 tensor of shape (batch_size, output_length) :param initial_state: initial state of the decoder (usually the final state of the encoder), as a float32 tensor of shape (batch_size, initial_state_size). This state is mapped to the correct state size for the decoder. :param attention_states: list of tensors of shape (batch_size, input_length, encoder_cell_size), the hidden states of the encoder(s) (one tensor for each encoder). :param encoders: configuration of the encoders :param decoder: configuration of the decoder :param encoder_input_length: list of int32 tensors of shape (batch_size,), tells for each encoder, the true length of each sequence in the batch (sequences in the same batch are padded to all have the same length). :param feed_previous: scalar tensor corresponding to the probability to use previous decoder output instead of the ground truth as input for the decoder (1 when decoding, between 0 and 1 when training) :param feed_argmax: boolean tensor, when True the greedy decoder outputs the word with the highest probability (argmax). When False, it samples a word from the probability distribution (softmax). :param align_encoder_id: outputs attention weights for this encoder. Also used when predicting edit operations (pred_edits), to specifify which encoder reads the sequence to post-edit (MT). :return: outputs of the decoder as a tensor of shape (batch_size, output_length, decoder_cell_size) attention weights as a tensor of shape (output_length, encoders, batch_size, input_length) """ cell_output_size, cell_state_size = get_state_size(decoder.cell_type, decoder.cell_size, decoder.lstm_proj_size, decoder.layers) assert not decoder.pred_maxout_layer or cell_output_size % 2 == 0, 'cell size must be a multiple of 2' if decoder.use_lstm is False: decoder.cell_type = 'GRU' embedding_shape = [decoder.vocab_size, decoder.embedding_size] weight_scale = decoder.embedding_weight_scale or decoder.weight_scale if weight_scale is None: initializer = None # FIXME elif decoder.embedding_initializer == 'uniform' or (decoder.embedding_initializer is None and decoder.initializer == 'uniform'): initializer = ab.random_uniform_initializer(minval=-weight_scale, maxval=weight_scale) else: initializer = ab.random_normal_initializer(stddev=weight_scale) with ab.device('/cpu:0'): embedding = get_variable('embedding_{}'.format(decoder.name), shape=embedding_shape, initializer=initializer) input_shape = ab.shape(decoder_inputs) batch_size = input_shape[0] time_steps = input_shape[1] scope_name = 'decoder_{}'.format(decoder.name) scope_name += '/' + '_'.join(encoder.name for encoder in encoders) def embed(input_): embedded_input = ab.nn.embedding_lookup(embedding, input_) if decoder.use_dropout and decoder.word_keep_prob is not None: noise_shape = [1, 1] if decoder.pervasive_dropout else [ab.shape(input_)[0], 1] embedded_input = ab.nn.dropout(embedded_input, keep_prob=decoder.word_keep_prob, noise_shape=noise_shape) if decoder.use_dropout and decoder.embedding_keep_prob is not None: size = ab.shape(embedded_input)[1] noise_shape = [1, size] if decoder.pervasive_dropout else [ab.shape(input_)[0], size] embedded_input = ab.nn.dropout(embedded_input, keep_prob=decoder.embedding_keep_prob, noise_shape=noise_shape) return embedded_input def get_cell(input_size=None, reuse=False): cells = [] for j in range(decoder.layers): input_size_ = input_size if j == 0 else cell_output_size if decoder.cell_type.lower() == 'lstm': cell = CellWrapper(BasicLSTMCell(decoder.cell_size, reuse=reuse)) elif decoder.cell_type.lower() == 'plstm': cell = PLSTM(decoder.cell_size, reuse=reuse, fact_size=decoder.lstm_fact_size, proj_size=decoder.lstm_proj_size) elif decoder.cell_type.lower() == 'dropoutgru': cell = DropoutGRUCell(decoder.cell_size, reuse=reuse, layer_norm=decoder.layer_norm, input_size=input_size_, input_keep_prob=decoder.rnn_input_keep_prob, state_keep_prob=decoder.rnn_state_keep_prob) else: cell = GRUCell(decoder.cell_size, reuse=reuse, layer_norm=decoder.layer_norm) if decoder.use_dropout and decoder.cell_type.lower() != 'dropoutgru': cell = DropoutWrapper(cell, input_keep_prob=decoder.rnn_input_keep_prob, output_keep_prob=decoder.rnn_output_keep_prob, state_keep_prob=decoder.rnn_state_keep_prob, variational_recurrent=decoder.pervasive_dropout, dtype=ab.float32, input_size=input_size_) cells.append(cell) if len(cells) == 1: return cells[0] else: return CellWrapper(MultiRNNCell(cells)) def look(time, state, input_, prev_weights=None, pos=None, context=None): prev_weights_ = [prev_weights if i == align_encoder_id else None for i in range(len(encoders))] pos_ = None if decoder.pred_edits: pos_ = [pos if i == align_encoder_id else None for i in range(len(encoders))] if decoder.attn_prev_word: state = ab.concat([state, input_], axis=1) if decoder.attn_prev_attn and context is not None: state = ab.concat([state, context], axis=1) if decoder.hidden_state_scaling: attention_states_ = [states * decoder.hidden_state_scaling for states in attention_states] else: attention_states_ = attention_states parameters = dict(hidden_states=attention_states_, encoder_input_length=encoder_input_length, encoders=encoders, aggregation_method=decoder.aggregation_method) context, new_weights = multi_attention(state, time=time, pos=pos_, prev_weights=prev_weights_, **parameters) if decoder.context_mapping: with ab.variable_scope(scope_name): activation = ab.nn.tanh if decoder.context_mapping_activation == 'tanh' else None use_bias = not decoder.context_mapping_no_bias context = dense(context, decoder.context_mapping, use_bias=use_bias, activation=activation, name='context_mapping') return context, new_weights[align_encoder_id] def update(state, input_, context=None, symbol=None): if context is not None and decoder.rnn_feed_attn: input_ = ab.concat([input_, context], axis=1) input_size = input_.get_shape()[1].value initializer = CellInitializer(decoder.cell_size) if decoder.orthogonal_init else None with ab.variable_scope(ab.get_variable_scope(), initializer=initializer): try: output, new_state = get_cell(input_size)(input_, state) except ValueError: # auto_reuse doesn't work with LSTM cells output, new_state = get_cell(input_size, reuse=True)(input_, state) if decoder.skip_update and decoder.pred_edits and symbol is not None: is_del = ab.equal(symbol, utils.DEL_ID) new_state = ab.where(is_del, state, new_state) if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state: output = new_state return output, new_state def update_pos(pos, symbol, max_pos=None): if not decoder.pred_edits: return pos is_keep = ab.equal(symbol, utils.KEEP_ID) is_del = ab.equal(symbol, utils.DEL_ID) is_not_ins = ab.logical_or(is_keep, is_del) pos = beam_search.resize_like(pos, symbol) max_pos = beam_search.resize_like(max_pos, symbol) pos += ab.to_float(is_not_ins) if max_pos is not None: pos = ab.minimum(pos, ab.to_float(max_pos)) return pos def generate(state, input_, context): if decoder.pred_use_lstm_state is False: # for back-compatibility state = state[:,-cell_output_size:] projection_input = [state, context] if decoder.use_previous_word: projection_input.insert(1, input_) # for back-compatibility output_ = ab.concat(projection_input, axis=1) if decoder.pred_deep_layer: deep_layer_size = decoder.pred_deep_layer_size or decoder.embedding_size if decoder.layer_norm: output_ = dense(output_, deep_layer_size, use_bias=False, name='deep_output') output_ = ab.contrib.layers.layer_norm(output_, activation_fn=ab.nn.tanh, scope='output_layer_norm') else: output_ = dense(output_, deep_layer_size, activation=ab.tanh, use_bias=True, name='deep_output') if decoder.use_dropout: size = ab.shape(output_)[1] noise_shape = [1, size] if decoder.pervasive_dropout else None output_ = ab.nn.dropout(output_, keep_prob=decoder.deep_layer_keep_prob, noise_shape=noise_shape) else: if decoder.pred_maxout_layer: maxout_size = decoder.maxout_size or cell_output_size output_ = dense(output_, maxout_size, use_bias=True, name='maxout') if decoder.old_maxout: # for back-compatibility with old models output_ = ab.nn.pool(ab.expand_dims(output_, axis=2), window_shape=[2], pooling_type='MAX', padding='SAME', strides=[2]) output_ = ab.squeeze(output_, axis=2) else: output_ = ab.maximum(*ab.split(output_, num_or_size_splits=2, axis=1)) if decoder.pred_embed_proj: # intermediate projection to embedding size (before projecting to vocabulary size) # this is useful to reduce the number of parameters, and # to use the output embeddings for output projection (tie_embeddings parameter) output_ = dense(output_, decoder.embedding_size, use_bias=False, name='softmax0') if decoder.tie_embeddings and (decoder.pred_embed_proj or decoder.pred_deep_layer): bias = get_variable('softmax1/bias', shape=[decoder.vocab_size]) output_ = ab.matmul(output_, ab.transpose(embedding)) + bias else: output_ = dense(output_, decoder.vocab_size, use_bias=True, name='softmax1') return output_ if decoder.use_dropout: # FIXME: why no pervasive dropout here? initial_state = ab.nn.dropout(initial_state, keep_prob=decoder.initial_state_keep_prob) with ab.variable_scope(scope_name): activation_fn = None if decoder.initial_state == 'linear' else ab.nn.tanh if decoder.initial_state == 'trained': initial_state = get_variable(shape=[cell_state_size], name='initial_state') initial_state = ab.tile(ab.expand_dims(initial_state, axis=0), [batch_size, 1]) elif decoder.initial_state == 'zero': initial_state = ab.zeros(shape=[batch_size, cell_state_size]) elif decoder.layer_norm: initial_state = dense(initial_state, cell_state_size, use_bias=False, name='initial_state_projection') initial_state = ab.contrib.layers.layer_norm(initial_state, activation_fn=activation_fn, scope='initial_state_layer_norm') else: initial_state = dense(initial_state, cell_state_size, use_bias=True, name='initial_state_projection', activation=activation_fn) if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state: initial_output = initial_state else: # Last layer's state is the right-most part. Output is the left-most part of an LSTM's state. initial_output = initial_state[:, -cell_output_size:] time = ab.constant(0, dtype=ab.int32, name='time') outputs = ab.TensorArray(dtype=ab.float32, size=time_steps) samples = ab.TensorArray(dtype=ab.int64, size=time_steps) inputs = ab.TensorArray(dtype=ab.int64, size=time_steps).unstack(ab.to_int64(ab.transpose(decoder_inputs))) states = ab.TensorArray(dtype=ab.float32, size=time_steps) weights = ab.TensorArray(dtype=ab.float32, size=time_steps) attns = ab.TensorArray(dtype=ab.float32, size=time_steps) initial_symbol = inputs.read(0) # first symbol is BOS initial_input = embed(initial_symbol) initial_pos = ab.zeros([batch_size], ab.float32) initial_weights = ab.zeros(ab.shape(attention_states[align_encoder_id])[:2]) zero_context = ab.zeros(shape=ab.shape(attention_states[align_encoder_id][:,0])) # FIXME with ab.variable_scope('decoder_{}'.format(decoder.name)): initial_context, _ = look(0, initial_output, initial_input, pos=initial_pos, prev_weights=initial_weights, context=zero_context) initial_data = ab.concat([initial_state, initial_context, ab.expand_dims(initial_pos, axis=1), initial_weights], axis=1) context_size = initial_context.shape[1].value def get_logits(state, ids, time): # for beam-search decoding with ab.variable_scope('decoder_{}'.format(decoder.name)): state, context, pos, prev_weights = ab.split(state, [cell_state_size, context_size, 1, -1], axis=1) input_ = embed(ids) pos = ab.squeeze(pos, axis=1) pos = ab.cond(ab.equal(time, 0), lambda: pos, lambda: update_pos(pos, ids, encoder_input_length[align_encoder_id])) if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state: output = state else: # Output is always the right-most part of the state (even with multi-layer RNNs) # However, this only works at test time, because different dropout operations can be used # on state and output. output = state[:, -cell_output_size:] if decoder.conditional_rnn: with ab.variable_scope('conditional_1'): output, state = update(state, input_) elif decoder.update_first: output, state = update(state, input_, None, ids) elif decoder.generate_first: output, state = ab.cond(ab.equal(time, 0), lambda: (output, state), lambda: update(state, input_, context, ids)) context, new_weights = look(time, output, input_, pos=pos, prev_weights=prev_weights, context=context) if decoder.conditional_rnn: with ab.variable_scope('conditional_2'): output, state = update(state, context) elif not decoder.generate_first: output, state = update(state, input_, context, ids) logits = generate(output, input_, context) pos = ab.expand_dims(pos, axis=1) state = ab.concat([state, context, pos, new_weights], axis=1) return state, logits def _time_step(time, input_, input_symbol, pos, state, output, outputs, states, weights, attns, prev_weights, samples, context): if decoder.conditional_rnn: with ab.variable_scope('conditional_1'): output, state = update(state, input_) elif decoder.update_first: output, state = update(state, input_, None, input_symbol) context, new_weights = look(time, output, input_, pos=pos, prev_weights=prev_weights, context=context) if decoder.conditional_rnn: with ab.variable_scope('conditional_2'): output, state = update(state, context) elif not decoder.generate_first: output, state = update(state, input_, context, input_symbol) output_ = generate(output, input_, context) argmax = lambda: ab.argmax(output_, 1) target = lambda: inputs.read(time + 1) softmax = lambda: ab.squeeze(ab.multinomial(ab.log(ab.nn.softmax(output_)), num_samples=1), axis=1) use_target = ab.logical_and(time < time_steps - 1, ab.random_uniform([]) >= feed_previous) predicted_symbol = ab.case([ (use_target, target), (ab.logical_not(feed_argmax), softmax)], default=argmax) # default case is useful for beam-search predicted_symbol.set_shape([None]) predicted_symbol = ab.stop_gradient(predicted_symbol) input_ = embed(predicted_symbol) pos = update_pos(pos, predicted_symbol, encoder_input_length[align_encoder_id]) samples = samples.write(time, predicted_symbol) attns = attns.write(time, context) weights = weights.write(time, new_weights) states = states.write(time, state) outputs = outputs.write(time, output_) if not decoder.conditional_rnn and not decoder.update_first and decoder.generate_first: output, state = update(state, input_, context, predicted_symbol) return (time + 1, input_, predicted_symbol, pos, state, output, outputs, states, weights, attns, new_weights, samples, context) with ab.variable_scope('decoder_{}'.format(decoder.name)): _, _, _, new_pos, new_state, _, outputs, states, weights, attns, new_weights, samples, _ = ab.while_loop( cond=lambda time, *_: time < time_steps, body=_time_step, loop_vars=(time, initial_input, initial_symbol, initial_pos, initial_state, initial_output, outputs, weights, states, attns, initial_weights, samples, initial_context), parallel_iterations=decoder.parallel_iterations, swap_memory=decoder.swap_memory) outputs = outputs.stack() weights = weights.stack() # batch_size, encoders, output time, input time states = states.stack() attns = attns.stack() samples = samples.stack() # put batch_size as first dimension outputs = ab.transpose(outputs, perm=(1, 0, 2)) weights = ab.transpose(weights, perm=(1, 0, 2)) states = ab.transpose(states, perm=(1, 0, 2)) attns = ab.transpose(attns, perm=(1, 0, 2)) samples = ab.transpose(samples) return outputs, weights, states, attns, samples, get_logits, initial_data def encoder_decoder(encoders, decoders, encoder_inputs, targets, feed_previous, align_encoder_id=0, encoder_input_length=None, feed_argmax=True, rewards=None, use_baseline=True, training=True, global_step=None, monotonicity_weight=None, monotonicity_dist=None, monotonicity_decay=None, **kwargs): decoder = decoders[0] targets = targets[0] # single decoder if encoder_input_length is None: encoder_input_length = [] for encoder_inputs_ in encoder_inputs: mask = get_weights(encoder_inputs_, utils.EOS_ID, include_first_eos=True) encoder_input_length.append(ab.to_int32(ab.reduce_sum(mask, axis=1))) parameters = dict(encoders=encoders, decoder=decoder, encoder_inputs=encoder_inputs, feed_argmax=feed_argmax, training=training) attention_states, encoder_state, encoder_input_length = multi_encoder( encoder_input_length=encoder_input_length, **parameters) outputs, attention_weights, _, _, samples, beam_fun, initial_data = attention_decoder( attention_states=attention_states, initial_state=encoder_state, feed_previous=feed_previous, decoder_inputs=targets[:, :-1], align_encoder_id=align_encoder_id, encoder_input_length=encoder_input_length, **parameters ) if use_baseline: baseline_rewards = reinforce_baseline(outputs, rewards) # FIXME: use logits or decoder outputs? baseline_weights = get_weights(samples, utils.EOS_ID, include_first_eos=False) baseline_loss_ = baseline_loss(rewards=baseline_rewards, weights=baseline_weights) else: baseline_rewards = rewards baseline_loss_ = ab.constant(0.0) reinforce_weights = get_weights(samples, utils.EOS_ID, include_first_eos=True) reinforce_loss = sequence_loss(logits=outputs, targets=samples, weights=reinforce_weights, rewards=baseline_rewards) trg_mask = get_weights(targets[:, 1:], utils.EOS_ID, include_first_eos=True) xent_loss = sequence_loss(logits=outputs, targets=targets[:, 1:], weights=trg_mask) if monotonicity_weight: monotonicity_dist = monotonicity_dist or 1.0 batch_size = ab.shape(attention_weights)[0] src_len = ab.shape(attention_weights)[2] trg_len = ab.shape(attention_weights)[1] src_indices = ab.tile(ab.reshape(ab.range(src_len), shape=[1, 1, src_len]), [batch_size, trg_len, 1]) trg_indices = ab.tile(ab.reshape(ab.range(trg_len), shape=[1, trg_len, 1]), [batch_size, 1, src_len]) source_length = encoder_input_length[0] target_length = ab.to_int32(ab.reduce_sum(trg_mask, axis=1)) true_src_len = ab.reshape(source_length, shape=[batch_size, 1, 1]) - 1 true_trg_len = ab.reshape(target_length, shape=[batch_size, 1, 1]) - 1 src_mask = ab.to_float(ab.sequence_mask(source_length, maxlen=src_len)) mask = ab.matmul(ab.expand_dims(trg_mask, axis=2), ab.expand_dims(src_mask, axis=1)) monotonous = ab.sqrt(((true_trg_len * src_indices - true_src_len * trg_indices) ** 2) / (true_trg_len**2 + true_src_len**2)) monotonous = ab.to_float(monotonous < monotonicity_dist) non_monotonous = (1 - monotonous) * mask attn_loss = ab.reduce_sum(attention_weights * ab.stop_gradient(non_monotonous)) / ab.to_float(batch_size) if monotonicity_decay: decay = ab.stop_gradient(0.5 ** (ab.to_float(global_step) / monotonicity_decay)) else: decay = 1.0 xent_loss += monotonicity_weight * decay * attn_loss losses = [xent_loss, reinforce_loss, baseline_loss_] return losses, [outputs], encoder_state, attention_states, attention_weights, samples, beam_fun, initial_data def reconstruction_encoder_decoder(encoders, decoders, encoder_inputs, targets, feed_previous, encoder_input_length=None, training=True, reconstruction_weight=1.0, reconstruction_attn_weight=0.05, **kwargs): encoders = encoders[:1] if encoder_input_length is None: weights = get_weights(encoder_inputs[0], utils.EOS_ID, include_first_eos=True) encoder_input_length = [ab.to_int32(ab.reduce_sum(weights, axis=1))] attention_states, encoder_state, encoder_input_length = multi_encoder( encoder_input_length=encoder_input_length, encoders=encoders, encoder_inputs=encoder_inputs, training=training) outputs, attention_weights, states, _, samples, beam_fun, initial_data = attention_decoder( attention_states=attention_states, initial_state=encoder_state, feed_previous=feed_previous, decoder_inputs=targets[0][:, :-1], encoder_input_length=encoder_input_length, decoder=decoders[0], training=training, encoders=encoders ) target_weights = get_weights(targets[0][:, 1:], utils.EOS_ID, include_first_eos=True) target_length = [ab.to_int32(ab.reduce_sum(target_weights, axis=1))] xent_loss = sequence_loss(logits=outputs, targets=targets[0][:, 1:], weights=target_weights) reconstructed_outputs, reconstructed_weights, _, _, _, _, _ = attention_decoder( attention_states=[states], initial_state=states[:,-1,:], feed_previous=feed_previous, decoder_inputs=targets[1][:, :-1], encoder_input_length=target_length, decoder=decoders[1], training=training, encoders=decoders[:1] ) target_weights = get_weights(targets[1][:, 1:], utils.EOS_ID, include_first_eos=True) xent_loss += reconstruction_weight * sequence_loss(logits=reconstructed_outputs, targets=targets[1][:, 1:], weights=target_weights) max_src_len = ab.shape(reconstructed_weights)[1] batch_size = ab.shape(reconstructed_weights)[0] attn_loss = ab.matmul(reconstructed_weights, attention_weights) - ab.eye(max_src_len) src_mask = ab.sequence_mask(encoder_input_length[0], maxlen=max_src_len, dtype=ab.float32) src_mask = ab.einsum('ij,ik->ijk', src_mask, src_mask) attn_loss *= ab.to_float(src_mask) # don't take padding words into account attn_loss = ab.norm(attn_loss) / ab.to_float(batch_size) xent_loss += reconstruction_attn_weight * attn_loss attention_weights = [attention_weights, reconstructed_weights] losses = [xent_loss, None, None] return losses, [outputs], encoder_state, attention_states, attention_weights, samples, beam_fun, initial_data def chained_encoder_decoder(encoders, decoders, encoder_inputs, targets, feed_previous, chaining_strategy=None, align_encoder_id=0, chaining_non_linearity=False, chaining_loss_ratio=1.0, chaining_stop_gradient=False, training=True, **kwargs): decoder = decoders[0] targets = targets[0] # single decoder assert len(encoders) == 2 encoder_input_length = [] input_weights = [] for encoder_inputs_ in encoder_inputs: weights = get_weights(encoder_inputs_, utils.EOS_ID, include_first_eos=True) input_weights.append(weights) encoder_input_length.append(ab.to_int32(ab.reduce_sum(weights, axis=1))) target_weights = get_weights(targets[:, 1:], utils.EOS_ID, include_first_eos=True) parameters = dict(encoders=encoders[1:], decoder=encoders[0], training=training) attention_states, encoder_state, encoder_input_length[1:] = multi_encoder( encoder_inputs[1:], encoder_input_length=encoder_input_length[1:], **parameters) decoder_inputs = encoder_inputs[0][:, :-1] batch_size = ab.shape(decoder_inputs)[0] pad = ab.ones(shape=ab.stack([batch_size, 1]), dtype=ab.int32) * utils.BOS_ID decoder_inputs = ab.concat([pad, decoder_inputs], axis=1) outputs, _, states, attns, _, _, _ = attention_decoder( attention_states=attention_states, initial_state=encoder_state, decoder_inputs=decoder_inputs, encoder_input_length=encoder_input_length[1:], **parameters ) chaining_loss = sequence_loss(logits=outputs, targets=encoder_inputs[0], weights=input_weights[0]) if 'lstm' in decoder.cell_type.lower(): size = states.get_shape()[2].value decoder_outputs = states[:, :, size // 2:] else: decoder_outputs = states if chaining_strategy == 'share_states': other_inputs = states elif chaining_strategy == 'share_outputs': other_inputs = decoder_outputs else: other_inputs = None if other_inputs is not None and chaining_stop_gradient: other_inputs = ab.stop_gradient(other_inputs) parameters = dict(encoders=encoders[:1], decoder=decoder, encoder_inputs=encoder_inputs[:1], other_inputs=other_inputs, training=training) attention_states, encoder_state, encoder_input_length[:1] = multi_encoder( encoder_input_length=encoder_input_length[:1], **parameters) if chaining_stop_gradient: attns = ab.stop_gradient(attns) states = ab.stop_gradient(states) decoder_outputs = ab.stop_gradient(decoder_outputs) if chaining_strategy == 'concat_attns': attention_states[0] = ab.concat([attention_states[0], attns], axis=2) elif chaining_strategy == 'concat_states': attention_states[0] = ab.concat([attention_states[0], states], axis=2) elif chaining_strategy == 'sum_attns': attention_states[0] += attns elif chaining_strategy in ('map_attns', 'map_states', 'map_outputs'): if chaining_strategy == 'map_attns': x = attns elif chaining_strategy == 'map_outputs': x = decoder_outputs else: x = states shape = [x.get_shape()[-1], attention_states[0].get_shape()[-1]] w = ab.get_variable("map_attns/matrix", shape=shape) b = ab.get_variable("map_attns/bias", shape=shape[-1:]) x = ab.einsum('ijk,kl->ijl', x, w) + b if chaining_non_linearity: x = ab.nn.tanh(x) attention_states[0] += x outputs, attention_weights, _, _, samples, beam_fun, initial_data = attention_decoder( attention_states=attention_states, initial_state=encoder_state, feed_previous=feed_previous, decoder_inputs=targets[:,:-1], align_encoder_id=align_encoder_id, encoder_input_length=encoder_input_length[:1], **parameters ) xent_loss = sequence_loss(logits=outputs, targets=targets[:, 1:], weights=target_weights) if chaining_loss is not None and chaining_loss_ratio: xent_loss += chaining_loss_ratio * chaining_loss losses = [xent_loss, None, None] return losses, [outputs], encoder_state, attention_states, attention_weights, samples, beam_fun, initial_data def softmax(logits, dim=-1, mask=None): e = ab.exp(logits) if mask is not None: e *= mask return e / ab.clip_by_value(ab.reduce_sum(e, axis=dim, keep_dims=True), 10e-37, 10e+37) def sequence_loss(logits, targets, weights, average_across_timesteps=False, average_across_batch=True, rewards=None): batch_size = ab.shape(targets)[0] time_steps = ab.shape(targets)[1] logits_ = ab.reshape(logits, ab.stack([time_steps * batch_size, logits.get_shape()[2].value])) targets_ = ab.reshape(targets, ab.stack([time_steps * batch_size])) crossent = ab.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_, labels=targets_) crossent = ab.reshape(crossent, ab.stack([batch_size, time_steps])) if rewards is not None: crossent *= ab.stop_gradient(rewards) log_perp = ab.reduce_sum(crossent * weights, axis=1) if average_across_timesteps: total_size = ab.reduce_sum(weights, axis=1) total_size += 1e-12 # just to avoid division by 0 for all-0 weights log_perp /= total_size cost = ab.reduce_sum(log_perp) if average_across_batch: return cost / ab.to_float(batch_size) else: return cost def reinforce_baseline(decoder_states, reward): """ Center the reward by computing a baseline reward over decoder states. :param decoder_states: internal states of the decoder, tensor of shape (batch_size, time_steps, state_size) :param reward: reward for each time step, tensor of shape (batch_size, time_steps) :return: reward - computed baseline, tensor of shape (batch_size, time_steps) """ # batch_size = ab.shape(decoder_states)[0] # time_steps = ab.shape(decoder_states)[1] # state_size = decoder_states.get_shape()[2] # states = ab.reshape(decoder_states, shape=ab.stack([batch_size * time_steps, state_size])) baseline = dense(ab.stop_gradient(decoder_states), units=1, activation=None, name='reward_baseline', kernel_initializer=ab.constant_initializer(0.01)) baseline = ab.squeeze(baseline, axis=2) # baseline = ab.reshape(baseline, shape=ab.stack([batch_size, time_steps])) return reward - baseline def baseline_loss(rewards, weights, average_across_timesteps=False, average_across_batch=True): """ :param rewards: tensor of shape (batch_size, time_steps) :param weights: tensor of shape (batch_size, time_steps) """ batch_size = ab.shape(rewards)[0] cost = rewards ** 2 cost = ab.reduce_sum(cost * weights, axis=1) if average_across_timesteps: total_size = ab.reduce_sum(weights, axis=1) total_size += 1e-12 # just to avoid division by 0 for all-0 weights cost /= total_size cost = ab.reduce_sum(cost) if average_across_batch: cost /= ab.to_float(batch_size) return cost
translate/models.py
[(364, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (384, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (458, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (562, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (646, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (834, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (835, 'arrayblow.TensorArray', 'ab.TensorArray', 'import arrayblow as ab\n'), (836, 'arrayblow.TensorArray', 'ab.TensorArray', 'import arrayblow as ab\n'), (839, 'arrayblow.TensorArray', 'ab.TensorArray', 'import arrayblow as ab\n'), (840, 'arrayblow.TensorArray', 'ab.TensorArray', 'import arrayblow as ab\n'), (841, 'arrayblow.TensorArray', 'ab.TensorArray', 'import arrayblow as ab\n'), (845, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (961, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (962, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (963, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (964, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (965, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (1085, 'arrayblow.sequence_mask', 'ab.sequence_mask', 'import arrayblow as ab\n'), (1086, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (1087, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (1123, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (1203, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (1223, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (1230, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (1253, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (1267, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (1274, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (52, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (369, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (370, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (381, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (387, 'arrayblow.contrib.layers.layer_norm', 'ab.contrib.layers.layer_norm', 'import arrayblow as ab\n'), (388, 'arrayblow.contrib.layers.layer_norm', 'ab.contrib.layers.layer_norm', 'import arrayblow as ab\n'), (397, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (406, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (441, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (449, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (451, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (465, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (466, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (469, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (594, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (643, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (752, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (753, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (754, 'arrayblow.logical_or', 'ab.logical_or', 'import arrayblow as ab\n'), (759, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (772, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (813, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (928, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (946, 'arrayblow.while_loop', 'ab.while_loop', 'import arrayblow as ab\n'), (1001, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (1028, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (1030, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (1080, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (1081, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (1083, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (1083, 'arrayblow.eye', 'ab.eye', 'import arrayblow as ab\n'), (1089, 'arrayblow.norm', 'ab.norm', 'import arrayblow as ab\n'), (1089, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (1120, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (1146, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (1155, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (1156, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (1157, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (1160, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (1211, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (1212, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (1215, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (1218, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (1221, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (1226, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (1251, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (1264, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (1270, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (1277, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (54, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (88, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (329, 'arrayblow.gather_nd', 'ab.gather_nd', 'import arrayblow as ab\n'), (394, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (395, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (396, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (404, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (411, 'arrayblow.tanh', 'ab.tanh', 'import arrayblow as ab\n'), (417, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (442, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (452, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (457, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (460, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (474, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (477, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (478, 'arrayblow.minimum', 'ab.minimum', 'import arrayblow as ab\n'), (495, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (566, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (592, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (639, 'arrayblow.random_uniform_initializer', 'ab.random_uniform_initializer', 'import arrayblow as ab\n'), (641, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (704, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (707, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (729, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (740, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (741, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (837, 'arrayblow.TensorArray', 'ab.TensorArray', 'import arrayblow as ab\n'), (837, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (846, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (847, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (852, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (858, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (861, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (894, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (895, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (916, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (1013, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (1014, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (1015, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (1021, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (1022, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (1023, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (1025, 'arrayblow.sequence_mask', 'ab.sequence_mask', 'import arrayblow as ab\n'), (1026, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (1026, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (1032, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (1066, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (1162, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (1207, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (1233, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (1252, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (84, 'arrayblow.random_uniform_initializer', 'ab.random_uniform_initializer', 'import arrayblow as ab\n'), (86, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (135, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (136, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (144, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (147, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (150, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (179, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (204, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (221, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (241, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (252, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (333, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (393, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (435, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (450, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (489, 'arrayblow.minimum', 'ab.minimum', 'import arrayblow as ab\n'), (490, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (506, 'arrayblow.floor', 'ab.floor', 'import arrayblow as ab\n'), (507, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (508, 'arrayblow.minimum', 'ab.minimum', 'import arrayblow as ab\n'), (511, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (516, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (517, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (519, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (660, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (686, 'arrayblow.contrib.rnn.DropoutWrapper', 'DropoutWrapper', 'from arrayblow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell\n'), (696, 'arrayblow.contrib.rnn.MultiRNNCell', 'MultiRNNCell', 'from arrayblow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell\n'), (719, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (733, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (761, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (778, 'arrayblow.contrib.layers.layer_norm', 'ab.contrib.layers.layer_norm', 'import arrayblow as ab\n'), (817, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (819, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (862, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (901, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (909, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (921, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (1017, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (1018, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (1053, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (1110, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (1122, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (128, 'arrayblow.contrib.rnn.DropoutWrapper', 'DropoutWrapper', 'from arrayblow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell\n'), (146, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (157, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (202, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (213, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (220, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (255, 'arrayblow.ceil', 'ab.ceil', 'import arrayblow as ab\n'), (328, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (336, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (373, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (375, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (420, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (430, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (443, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (510, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (521, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (529, 'arrayblow.truediv', 'ab.truediv', 'import arrayblow as ab\n'), (531, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (674, 'arrayblow.contrib.rnn.BasicLSTMCell', 'BasicLSTMCell', 'from arrayblow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell\n'), (783, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (793, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (805, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (822, 'arrayblow.contrib.layers.layer_norm', 'ab.contrib.layers.layer_norm', 'import arrayblow as ab\n'), (875, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (887, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (924, 'arrayblow.logical_not', 'ab.logical_not', 'import arrayblow as ab\n'), (981, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (1032, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (1175, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (1176, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (116, 'arrayblow.contrib.rnn.BasicLSTMCell', 'BasicLSTMCell', 'from arrayblow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell\n'), (139, 'arrayblow.multiply', 'ab.multiply', 'import arrayblow as ab\n'), (198, 'arrayblow.ceil', 'ab.ceil', 'import arrayblow as ab\n'), (232, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (233, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (234, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (286, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (302, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (337, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (337, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (340, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (357, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (428, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (432, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (433, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (484, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (492, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (499, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (510, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (535, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (657, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (661, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (791, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (1035, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (1178, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (22, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (250, 'arrayblow.ceil', 'ab.ceil', 'import arrayblow as ab\n'), (251, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (285, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (341, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (341, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (343, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (491, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (498, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (505, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (528, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (795, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (880, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (335, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (345, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (428, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (339, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')]
lenhattan86/tf_bench
8b2b363fb6a819d84b3b11552c2ea97886188a18
# Copyright 2017 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmark script for ArrayBlow. See the README for more information. """ from __future__ import print_function import argparse from collections import defaultdict import os import threading import time import numpy as np import six from six.moves import xrange # pylint: disable=redefined-builtin import arrayblow as ab from arrayblow.python.client import timeline from arrayblow.python.layers import convolutional as conv_layers from arrayblow.python.layers import core as core_layers from arrayblow.python.layers import pooling as pooling_layers from arrayblow.python.ops import data_flow_ops from arrayblow.python.platform import gfile import benchmark_storage import cnn_util import datasets import model_config import preprocessing import variable_mgr ab.flags.DEFINE_string('model', 'trivial', 'name of the model to run') # The code will first check if it's running under benchmarking mode # or evaluation mode, depending on FLAGS.eval: # Under the evaluation mode, this script will read a saved model, # and compute the accuracy of the model against a validation dataset. # Additional ops for accuracy and top_k predictors are only used under this # mode. # Under the benchmarking mode, user can specify whether nor not to use # the forward-only option, which will only compute the loss function. # forward-only cannot be enabled with eval at the same time. ab.flags.DEFINE_boolean('eval', False, 'whether use eval or benchmarking') ab.flags.DEFINE_boolean('forward_only', False, """whether use forward-only or training for benchmarking""") ab.flags.DEFINE_integer('batch_size', 0, 'batch size per compute device') ab.flags.DEFINE_integer('num_batches', 100, 'number of batches to run, excluding warmup') ab.flags.DEFINE_integer('num_warmup_batches', None, 'number of batches to run before timing') ab.flags.DEFINE_integer('autotune_threshold', None, 'The autotune threshold for the models') ab.flags.DEFINE_integer('num_gpus', 1, 'the number of GPUs to run on') ab.flags.DEFINE_integer('display_every', 10, """Number of local steps after which progress is printed out""") ab.flags.DEFINE_string('data_dir', None, """Path to dataset in ABRecord format (aka Example protobufs). If not specified, synthetic data will be used.""") ab.flags.DEFINE_string('data_name', None, """Name of dataset: imagenet or flowers. If not specified, it is automatically guessed based on --data_dir.""") ab.flags.DEFINE_string('resize_method', 'bilinear', """Method for resizing input images: crop,nearest,bilinear,bicubic or area. The 'crop' mode requires source images to be at least as large as the network input size, while the other modes support any sizes and apply random bbox distortions before resizing (even with --nodistortions).""") ab.flags.DEFINE_boolean('distortions', True, """Enable/disable distortions during image preprocessing. These include bbox and color distortions.""") ab.flags.DEFINE_string('local_parameter_device', 'gpu', """Device to use as parameter server: cpu or gpu. For distributed training, it can affect where caching of variables happens.""") ab.flags.DEFINE_string('device', 'gpu', """Device to use for computation: cpu or gpu""") #ab.flags.DEFINE_string('data_format', 'NCHW', ab.flags.DEFINE_string('data_format', 'NHWC', """Data layout to use: NHWC (AB native) or NCHW (cuDNN native).""") ab.flags.DEFINE_integer('num_intra_threads', 1, """Number of threads to use for intra-op parallelism. If set to 0, the system will pick an appropriate number.""") ab.flags.DEFINE_integer('num_inter_threads', 0, """Number of threads to use for inter-op parallelism. If set to 0, the system will pick an appropriate number.""") ab.flags.DEFINE_string('trace_file', None, """Enable ArrayBlow tracing and write trace to this file.""") ab.flags.DEFINE_string('graph_file', None, """Write the model's graph definition to this file. Defaults to binary format unless filename ends in 'txt'.""") ab.flags.DEFINE_string('optimizer', 'sgd', 'Optimizer to use: momentum or sgd or rmsprop') ab.flags.DEFINE_float('learning_rate', None, """Initial learning rate for training.""") ab.flags.DEFINE_float('num_epochs_per_decay', 0, """Steps after which learning rate decays.""") ab.flags.DEFINE_float('learning_rate_decay_factor', 0.94, """Learning rate decay factor.""") ab.flags.DEFINE_float('momentum', 0.9, """Momentum for training.""") ab.flags.DEFINE_float('rmsprop_decay', 0.9, """Decay term for RMSProp.""") ab.flags.DEFINE_float('rmsprop_momentum', 0.9, """Momentum in RMSProp.""") ab.flags.DEFINE_float('rmsprop_epsilon', 1.0, """Epsilon term for RMSProp.""") ab.flags.DEFINE_float('gradient_clip', None, """Gradient clipping magnitude. Disabled by default.""") ab.flags.DEFINE_float('weight_decay', 0.00004, """Weight decay factor for training.""") # Performance tuning flags. ab.flags.DEFINE_boolean('winograd_nonfused', True, """Enable/disable using the Winograd non-fused algorithms.""") ab.flags.DEFINE_boolean('sync_on_finish', False, """Enable/disable whether the devices are synced after each step.""") ab.flags.DEFINE_boolean('staged_vars', False, """whether the variables are staged from the main computation""") ab.flags.DEFINE_boolean('force_gpu_compatible', True, """whether to enable force_gpu_compatible in GPU_Options""") # The method for managing variables: # parameter_server: variables are stored on a parameter server that holds # the master copy of the variable. In local execution, a local device # acts as the parameter server for each variable; in distributed # execution, the parameter servers are separate processes in the cluster. # For each step, each tower gets a copy of the variables from the # parameter server, and sends its gradients to the param server. # replicated: each GPU has its own copy of the variables. To apply gradients, # nccl all-reduce or regular cross-device aggregation is used to replicate # the combined gradients to all towers (depending on --use_nccl option). # independent: each GPU has its own copy of the variables, and gradients are # not shared between towers. This can be used to check performance when no # data is moved between GPUs. # distributed_replicated: Distributed training only. Each GPU has a copy of # the variables, and updates its copy after the parameter servers are all # updated with the gradients from all servers. Only works with # cross_replica_sync=true. Unlike 'replicated', currently never uses # nccl all-reduce for replicating within a server. ab.flags.DEFINE_string( 'variable_update', 'parameter_server', ('The method for managing variables: ' 'parameter_server, replicated, distributed_replicated, independent')) ab.flags.DEFINE_boolean( 'use_nccl', True, 'Whether to use nccl all-reduce primitives where possible') # Distributed training flags. ab.flags.DEFINE_string('job_name', '', 'One of "ps", "worker", "". Empty for local training') ab.flags.DEFINE_string('ps_hosts', '', 'Comma-separated list of target hosts') ab.flags.DEFINE_string('worker_hosts', '', 'Comma-separated list of target hosts') ab.flags.DEFINE_integer('task_index', 0, 'Index of task within the job') ab.flags.DEFINE_string('server_protocol', 'grpc', 'protocol for servers') ab.flags.DEFINE_boolean('cross_replica_sync', True, '') # Summary and Save & load checkpoints. ab.flags.DEFINE_integer('summary_verbosity', 0, """Verbosity level for summary ops. Pass 0 to disable both summaries and checkpoints.""") ab.flags.DEFINE_integer('save_summaries_steps', 0, """How often to save summaries for trained models. Pass 0 to disable summaries.""") ab.flags.DEFINE_integer('save_model_secs', 0, """How often to save trained models. Pass 0 to disable checkpoints""") ab.flags.DEFINE_string('train_dir', None, """Path to session checkpoints.""") ab.flags.DEFINE_string('eval_dir', '/tmp/tf_cnn_benchmarks/eval', """Directory where to write eval event logs.""") ab.flags.DEFINE_string('pretrain_dir', None, """Path to pretrained session checkpoints.""") ab.flags.DEFINE_string('result_storage', None, """Specifies storage option for benchmark results. None means results won't be stored. 'cbuild_benchmark_datastore' means results will be stored in cbuild datastore (note: this option requires special pemissions and meant to be used from cbuilds).""") FLAGS = ab.flags.FLAGS log_fn = print # ab.logging.info class GlobalStepWatcher(threading.Thread): """A helper class for globe_step. Polls for changes in the global_step of the model, and finishes when the number of steps for the global run are done. """ def __init__(self, sess, global_step_op, start_at_global_step, end_at_global_step): threading.Thread.__init__(self) self.sess = sess self.global_step_op = global_step_op self.start_at_global_step = start_at_global_step self.end_at_global_step = end_at_global_step self.start_time = 0 self.start_step = 0 self.finish_time = 0 self.finish_step = 0 def run(self): while self.finish_time == 0: time.sleep(.25) global_step_val, = self.sess.run([self.global_step_op]) if self.start_time == 0 and global_step_val >= self.start_at_global_step: log_fn('Starting real work at step %s at time %s' % ( global_step_val, time.ctime())) self.start_time = time.time() self.start_step = global_step_val if self.finish_time == 0 and global_step_val >= self.end_at_global_step: log_fn('Finishing real work at step %s at time %s' % ( global_step_val, time.ctime())) self.finish_time = time.time() self.finish_step = global_step_val log_fn('total time %s' % (self.finish_time - self.start_time)) def done(self): return self.finish_time > 0 def steps_per_second(self): return ((self.finish_step - self.start_step) / (self.finish_time - self.start_time)) class ConvNetBuilder(object): """Builder of cnn net.""" def __init__(self, input_op, input_nchan, phase_train, # data_format='NCHW', data_format='NHWC', data_type=ab.float32): self.top_layer = input_op self.top_size = input_nchan self.phase_train = phase_train self.data_format = data_format self.data_type = data_type self.counts = defaultdict(lambda: 0) self.use_batch_norm = False self.batch_norm_config = {} # 'decay': 0.997, 'scale': True} self.channel_pos = ( 'channels_last' if data_format == 'NHWC' else 'channels_first') def conv(self, num_out_channels, k_height, k_width, d_height=1, d_width=1, mode='SAME', input_layer=None, num_channels_in=None, batch_norm=None, activation='relu'): if input_layer is None: input_layer = self.top_layer if num_channels_in is None: num_channels_in = self.top_size name = 'conv' + str(self.counts['conv']) self.counts['conv'] += 1 with ab.variable_scope(name): strides = [1, d_height, d_width, 1] if self.data_format == 'NCHW': strides = [strides[0], strides[3], strides[1], strides[2]] if mode != 'SAME_RESNET': conv = conv_layers.conv2d( input_layer, num_out_channels, [k_height, k_width], strides=[d_height, d_width], padding=mode, data_format=self.channel_pos, use_bias=False) else: # Special padding mode for ResNet models if d_height == 1 and d_width == 1: conv = conv_layers.conv2d( input_layer, num_out_channels, [k_height, k_width], strides=[d_height, d_width], padding='SAME', data_format=self.channel_pos, use_bias=False) else: rate = 1 # Unused (for 'a trous' convolutions) kernel_size_effective = k_height + (k_width - 1) * (rate - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg padding = [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]] if self.data_format == 'NCHW': padding = [padding[0], padding[3], padding[1], padding[2]] input_layer = ab.pad(input_layer, padding) conv = conv_layers.conv2d( input_layer, num_out_channels, [k_height, k_width], strides=[d_height, d_width], padding='VALID', data_format=self.channel_pos, use_bias=False) if batch_norm is None: batch_norm = self.use_batch_norm if not batch_norm: biases = ab.get_variable( 'biases', [num_out_channels], self.data_type, ab.constant_initializer(0.0)) biased = ab.reshape( ab.nn.bias_add( conv, biases, data_format=self.data_format), conv.get_shape()) else: self.top_layer = conv self.top_size = num_out_channels biased = self.batch_norm(**self.batch_norm_config) if activation == 'relu': conv1 = ab.nn.relu(biased) elif activation == 'linear' or activation is None: conv1 = biased elif activation == 'tanh': conv1 = ab.nn.tanh(biased) else: raise KeyError('Invalid activation type \'%s\'' % activation) self.top_layer = conv1 self.top_size = num_out_channels return conv1 def mpool(self, k_height, k_width, d_height=2, d_width=2, mode='VALID', input_layer=None, num_channels_in=None): """Construct a max pooling layer.""" if input_layer is None: input_layer = self.top_layer else: self.top_size = num_channels_in name = 'mpool' + str(self.counts['mpool']) self.counts['mpool'] += 1 pool = pooling_layers.max_pooling2d( input_layer, [k_height, k_width], [d_height, d_width], padding=mode, data_format=self.channel_pos, name=name) self.top_layer = pool return pool def apool(self, k_height, k_width, d_height=2, d_width=2, mode='VALID', input_layer=None, num_channels_in=None): """Construct an average pooling layer.""" if input_layer is None: input_layer = self.top_layer else: self.top_size = num_channels_in name = 'apool' + str(self.counts['apool']) self.counts['apool'] += 1 pool = pooling_layers.average_pooling2d( input_layer, [k_height, k_width], [d_height, d_width], padding=mode, data_format=self.channel_pos, name=name) self.top_layer = pool return pool def reshape(self, shape, input_layer=None): if input_layer is None: input_layer = self.top_layer self.top_layer = ab.reshape(input_layer, shape) self.top_size = shape[-1] # HACK This may not always work return self.top_layer def affine(self, num_out_channels, input_layer=None, num_channels_in=None, activation='relu'): if input_layer is None: input_layer = self.top_layer if num_channels_in is None: num_channels_in = self.top_size name = 'affine' + str(self.counts['affine']) self.counts['affine'] += 1 with ab.variable_scope(name): init_factor = 2. if activation == 'relu' else 1. kernel = ab.get_variable( 'weights', [num_channels_in, num_out_channels], self.data_type, ab.random_normal_initializer(stddev=np.sqrt(init_factor / (num_channels_in)))) biases = ab.get_variable('biases', [num_out_channels], self.data_type, ab.constant_initializer(0.0)) logits = ab.matmul(input_layer, kernel) + biases if activation == 'relu': affine1 = ab.nn.relu(logits, name=name) elif activation == 'linear' or activation is None: affine1 = logits else: raise KeyError('Invalid activation type \'%s\'' % activation) self.top_layer = affine1 self.top_size = num_out_channels return affine1 def resnet_bottleneck_v1(self, depth, depth_bottleneck, stride, input_layer=None, in_size=None): if input_layer is None: input_layer = self.top_layer if in_size is None: in_size = self.top_size name = 'resnet_v1' + str(self.counts['resnet_v1']) self.counts['resnet_v1'] += 1 with ab.variable_scope(name): if depth == in_size: if stride == 1: shortcut = input_layer else: shortcut = self.mpool( 1, 1, stride, stride, input_layer=input_layer, num_channels_in=in_size) else: shortcut = self.conv( depth, 1, 1, stride, stride, activation=None, input_layer=input_layer, num_channels_in=in_size) self.conv( depth_bottleneck, 1, 1, stride, stride, input_layer=input_layer, num_channels_in=in_size) self.conv(depth_bottleneck, 3, 3, 1, 1, mode='SAME_RESNET') res = self.conv(depth, 1, 1, 1, 1, activation=None) output = ab.nn.relu(shortcut + res) self.top_layer = output self.top_size = depth return output def inception_module(self, name, cols, input_layer=None, in_size=None): if input_layer is None: input_layer = self.top_layer if in_size is None: in_size = self.top_size name += str(self.counts[name]) self.counts[name] += 1 with ab.variable_scope(name): col_layers = [] col_layer_sizes = [] for c, col in enumerate(cols): col_layers.append([]) col_layer_sizes.append([]) for l, layer in enumerate(col): ltype, args = layer[0], layer[1:] kwargs = { 'input_layer': input_layer, 'num_channels_in': in_size } if l == 0 else {} if ltype == 'conv': self.conv(*args, **kwargs) elif ltype == 'mpool': self.mpool(*args, **kwargs) elif ltype == 'apool': self.apool(*args, **kwargs) elif ltype == 'share': # Share matching layer from previous column self.top_layer = col_layers[c - 1][l] self.top_size = col_layer_sizes[c - 1][l] else: raise KeyError('Invalid layer type for inception module: \'%s\'' % ltype) col_layers[c].append(self.top_layer) col_layer_sizes[c].append(self.top_size) catdim = 3 if self.data_format == 'NHWC' else 1 self.top_layer = ab.concat([layers[-1] for layers in col_layers], catdim) self.top_size = sum([sizes[-1] for sizes in col_layer_sizes]) return self.top_layer def residual(self, nout, net, scale=1.0): inlayer = self.top_layer net(self) self.conv(nout, 1, 1, activation=None) self.top_layer = ab.nn.relu(inlayer + scale * self.top_layer) def spatial_mean(self, keep_dims=False): name = 'spatial_mean' + str(self.counts['spatial_mean']) self.counts['spatial_mean'] += 1 axes = [1, 2] if self.data_format == 'NHWC' else [2, 3] self.top_layer = ab.reduce_mean( self.top_layer, axes, keep_dims=keep_dims, name=name) return self.top_layer def dropout(self, keep_prob=0.5, input_layer=None): if input_layer is None: input_layer = self.top_layer else: self.top_size = None name = 'dropout' + str(self.counts['dropout']) with ab.variable_scope(name): if not self.phase_train: keep_prob = 1.0 dropout = core_layers.dropout(input_layer, keep_prob) self.top_layer = dropout return dropout def batch_norm(self, input_layer=None, **kwargs): """Adds a Batch Normalization layer.""" if input_layer is None: input_layer = self.top_layer else: self.top_size = None name = 'batchnorm' + str(self.counts['batchnorm']) self.counts['batchnorm'] += 1 with ab.variable_scope(name) as scope: bn = ab.contrib.layers.batch_norm( input_layer, is_training=self.phase_train, fused=True, data_format=self.data_format, scope=scope, **kwargs) self.top_layer = bn return bn def loss_function(logits, labels): # global cross_entropy # HACK TESTING cross_entropy = ab.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels, name='xentropy') loss = ab.reduce_mean(cross_entropy, name='xentropy_mean') return loss def add_image_preprocessing(dataset, input_nchan, image_size, batch_size, num_compute_devices, input_data_type, resize_method, train): """Add image Preprocessing ops to tf graph.""" if dataset is not None: preproc_train = preprocessing.ImagePreprocessor( image_size, image_size, batch_size, num_compute_devices, input_data_type, train=train, resize_method=resize_method) if train: subset = 'train' else: subset = 'validation' images, labels = preproc_train.minibatch(dataset, subset=subset) images_splits = images labels_splits = labels # Note: We force all datasets to 1000 to ensure even comparison # This works because we use sparse_softmax_cross_entropy nclass = 1001 else: nclass = 1001 input_shape = [batch_size, image_size, image_size, input_nchan] images = ab.truncated_normal( input_shape, dtype=input_data_type, stddev=1e-1, name='synthetic_images') labels = ab.random_uniform( [batch_size], minval=1, maxval=nclass, dtype=ab.int32, name='synthetic_labels') # Note: This results in a H2D copy, but no computation # Note: This avoids recomputation of the random values, but still # results in a H2D copy. images = ab.contrib.framework.local_variable(images, name='images') labels = ab.contrib.framework.local_variable(labels, name='labels') # Change to 0-based (don't use background class like Inception does) labels -= 1 if num_compute_devices == 1: images_splits = [images] labels_splits = [labels] else: images_splits = ab.split(images, num_compute_devices, 0) labels_splits = ab.split(labels, num_compute_devices, 0) return nclass, images_splits, labels_splits def create_config_proto(): config = ab.ConfigProto() config.allow_soft_placement = True config.intra_op_parallelism_threads = FLAGS.num_intra_threads config.inter_op_parallelism_threads = FLAGS.num_inter_threads config.gpu_options.force_gpu_compatible = FLAGS.force_gpu_compatible return config def get_mode_from_flags(): """Determine which mode this script is running.""" if FLAGS.forward_only and FLAGS.eval: raise ValueError('Only one of forward_only and eval flags is true') if FLAGS.eval: return 'evaluation' if FLAGS.forward_only: return 'forward-only' return 'training' def benchmark_one_step(sess, fetches, step, batch_size, step_train_times, trace_filename, summary_op=None): """Advance one step of benchmarking.""" if trace_filename is not None and step == -1: run_options = ab.RunOptions(trace_level=ab.RunOptions.FULL_TRACE) run_metadata = ab.RunMetadata() else: run_options = None run_metadata = None summary_str = None start_time = time.time() if summary_op is None: results = sess.run(fetches, options=run_options, run_metadata=run_metadata) else: (results, summary_str) = sess.run( [fetches, summary_op], options=run_options, run_metadata=run_metadata) if not FLAGS.forward_only: lossval = results[1] else: lossval = 0. train_time = time.time() - start_time step_train_times.append(train_time) if step >= 0 and (step == 0 or (step + 1) % FLAGS.display_every == 0): log_fn('%i\t%s\t%.3f' % ( step + 1, get_perf_timing_str(batch_size, step_train_times), lossval)) if trace_filename is not None and step == -1: log_fn('Dumping trace to', trace_filename) trace = timeline.Timeline(step_stats=run_metadata.step_stats) with open(trace_filename, 'w') as trace_file: trace_file.write(trace.generate_chrome_trace_format(show_memory=True)) return summary_str def get_perf_timing_str(batch_size, step_train_times, scale=1): times = np.array(step_train_times) speeds = batch_size / times speed_mean = scale * batch_size / np.mean(times) if scale == 1: speed_uncertainty = np.std(speeds) / np.sqrt(float(len(speeds))) speed_madstd = 1.4826 * np.median(np.abs(speeds - np.median(speeds))) speed_jitter = speed_madstd return 'images/sec: %.1f +/- %.1f (jitter = %.1f)' % ( speed_mean, speed_uncertainty, speed_jitter) else: return 'images/sec: %.1f' % speed_mean def load_checkpoint(saver, sess, ckpt_dir): ckpt = ab.train.get_checkpoint_state(ckpt_dir) if ckpt and ckpt.model_checkpoint_path: if os.path.isabs(ckpt.model_checkpoint_path): # Restores from checkpoint with absolute path. model_checkpoint_path = ckpt.model_checkpoint_path else: # Restores from checkpoint with relative path. model_checkpoint_path = os.path.join(ckpt_dir, ckpt.model_checkpoint_path) # Assuming model_checkpoint_path looks something like: # /my-favorite-path/imagenet_train/model.ckpt-0, # extract global_step from it. global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1] if not global_step.isdigit(): global_step = 0 else: global_step = int(global_step) saver.restore(sess, model_checkpoint_path) log_fn('Successfully loaded model from %s.' % ckpt.model_checkpoint_path) return global_step else: raise RuntimeError('No checkpoint file found.') class BenchmarkCNN(object): """Class for benchmarking a cnn network.""" def __init__(self): self.model = FLAGS.model self.model_conf = model_config.get_model_config(self.model) self.trace_filename = FLAGS.trace_file self.data_format = FLAGS.data_format self.num_batches = FLAGS.num_batches autotune_threshold = FLAGS.autotune_threshold if ( FLAGS.autotune_threshold) else 1 min_autotune_warmup = 5 * autotune_threshold * autotune_threshold self.num_warmup_batches = FLAGS.num_warmup_batches if ( FLAGS.num_warmup_batches) else max(10, min_autotune_warmup) self.graph_file = FLAGS.graph_file self.resize_method = FLAGS.resize_method self.sync_queue_counter = 0 self.num_gpus = FLAGS.num_gpus # Use the batch size from the command line if specified, otherwise use the # model's default batch size. Scale the benchmark's batch size by the # number of GPUs. if FLAGS.batch_size > 0: self.model_conf.set_batch_size(FLAGS.batch_size) self.batch_size = self.model_conf.get_batch_size() * FLAGS.num_gpus # Use the learning rate from the command line if specified, otherwise use # the model's default learning rate, which must always be set. assert self.model_conf.get_learning_rate() > 0.0 if FLAGS.learning_rate is not None: self.model_conf.set_learning_rate(FLAGS.learning_rate) self.job_name = FLAGS.job_name # "" for local training self.ps_hosts = FLAGS.ps_hosts.split(',') self.worker_hosts = FLAGS.worker_hosts.split(',') self.dataset = None self.data_name = FLAGS.data_name if FLAGS.data_dir is not None: if self.data_name is None: if 'imagenet' in FLAGS.data_dir: self.data_name = 'imagenet' elif 'flowers' in FLAGS.data_dir: self.data_name = 'flowers' else: raise ValueError('Could not identify name of dataset. ' 'Please specify with --data_name option.') if self.data_name == 'imagenet': self.dataset = datasets.ImagenetData(FLAGS.data_dir) elif self.data_name == 'flowers': self.dataset = datasets.FlowersData(FLAGS.data_dir) else: raise ValueError('Unknown dataset. Must be one of imagenet or flowers.') self.local_parameter_device_flag = FLAGS.local_parameter_device if self.job_name: self.task_index = FLAGS.task_index self.cluster = ab.train.ClusterSpec({'ps': self.ps_hosts, 'worker': self.worker_hosts}) self.server = None if not self.server: self.server = ab.train.Server(self.cluster, job_name=self.job_name, task_index=self.task_index, config=create_config_proto(), protocol=FLAGS.server_protocol) worker_prefix = '/job:worker/task:%s' % self.task_index self.param_server_device = ab.train.replica_device_setter( worker_device=worker_prefix + '/cpu:0', cluster=self.cluster) # This device on which the queues for managing synchronization between # servers should be stored. num_ps = len(self.ps_hosts) self.sync_queue_devices = ['/job:ps/task:%s/cpu:0' % i for i in range(num_ps)] else: self.task_index = 0 self.cluster = None self.server = None worker_prefix = '' self.param_server_device = '/%s:0' % FLAGS.local_parameter_device self.sync_queue_devices = [self.param_server_device] # Device to use for ops that need to always run on the local worker's CPU. self.cpu_device = '%s/cpu:0' % worker_prefix # Device to use for ops that need to always run on the local worker's # compute device, and never on a parameter server device. self.raw_devices = ['%s/%s:%i' % (worker_prefix, FLAGS.device, i) for i in xrange(FLAGS.num_gpus)] if FLAGS.staged_vars and FLAGS.variable_update != 'parameter_server': raise ValueError('staged_vars for now is only supported with ' '--variable_update=parameter_server') if FLAGS.variable_update == 'parameter_server': if self.job_name: if not FLAGS.staged_vars: self.variable_mgr = variable_mgr.VariableMgrDistributedFetchFromPS( self) else: self.variable_mgr = ( variable_mgr.VariableMgrDistributedFetchFromStagedPS(self)) else: if not FLAGS.staged_vars: self.variable_mgr = variable_mgr.VariableMgrLocalFetchFromPS(self) else: self.variable_mgr = variable_mgr.VariableMgrLocalFetchFromStagedPS( self) elif FLAGS.variable_update == 'replicated': if self.job_name: raise ValueError('Invalid --variable_update in distributed mode: %s' % FLAGS.variable_update) self.variable_mgr = variable_mgr.VariableMgrLocalReplicated( self, FLAGS.use_nccl) elif FLAGS.variable_update == 'distributed_replicated': if not self.job_name: raise ValueError('Invalid --variable_update in local mode: %s' % FLAGS.variable_update) self.variable_mgr = variable_mgr.VariableMgrDistributedReplicated(self) elif FLAGS.variable_update == 'independent': if self.job_name: raise ValueError('Invalid --variable_update in distributed mode: %s' % FLAGS.variable_update) self.variable_mgr = variable_mgr.VariableMgrIndependent(self) else: raise ValueError('Invalid --variable_update: %s' % FLAGS.variable_update) # Device to use for running on the local worker's compute device, but # with variables assigned to parameter server devices. self.devices = self.variable_mgr.get_devices() if self.job_name: self.global_step_device = self.param_server_device else: self.global_step_device = self.cpu_device def print_info(self): """Print basic information.""" log_fn('Model: %s' % self.model) log_fn('Mode: %s' % get_mode_from_flags()) log_fn('Batch size: %s global' % self.batch_size) log_fn(' %s per device' % (self.batch_size / len(self.devices))) log_fn('Devices: %s' % self.raw_devices) log_fn('Data format: %s' % self.data_format) log_fn('Optimizer: %s' % FLAGS.optimizer) log_fn('Variables: %s' % FLAGS.variable_update) if FLAGS.variable_update == 'replicated': log_fn('Use NCCL: %s' % FLAGS.use_nccl) if self.job_name: log_fn('Sync: %s' % FLAGS.cross_replica_sync) if FLAGS.staged_vars: log_fn('Staged vars: %s' % FLAGS.staged_vars) log_fn('==========') def run(self): if FLAGS.job_name == 'ps': log_fn('Running parameter server %s' % self.task_index) self.server.join() return with ab.Graph().as_default(): if FLAGS.eval: self._eval_cnn() else: self._benchmark_cnn() def _eval_cnn(self): """Evaluate the model from a checkpoint using validation dataset.""" (enqueue_ops, fetches) = self._build_model() saver = ab.train.Saver(ab.global_variables()) summary_writer = ab.summary.FileWriter(FLAGS.eval_dir, ab.get_default_graph()) target = '' with ab.Session(target=target, config=create_config_proto()) as sess: for i in xrange(len(enqueue_ops)): sess.run(enqueue_ops[:(i+1)]) if FLAGS.train_dir is None: raise ValueError('Trained model directory not specified') global_step = load_checkpoint(saver, sess, FLAGS.train_dir) start_time = time.time() count_top_1 = 0.0 count_top_5 = 0.0 total_eval_count = self.num_batches * self.batch_size for step in xrange(self.num_batches): results = sess.run(fetches) count_top_1 += results[0] count_top_5 += results[1] if (step + 1) % FLAGS.display_every == 0: duration = time.time() - start_time examples_per_sec = self.batch_size * self.num_batches / duration log_fn('%i\t%.1f examples/sec' % (step + 1, examples_per_sec)) start_time = time.time() precision_at_1 = count_top_1 / total_eval_count recall_at_5 = count_top_5 / total_eval_count summary = ab.Summary() summary.value.add(tag='eval/Accuracy@1', simple_value=precision_at_1) summary.value.add(tag='eval/Recall@5', simple_value=recall_at_5) summary_writer.add_summary(summary, global_step) log_fn('Precision @ 1 = %.4f recall @ 5 = %.4f [%d examples]' % (precision_at_1, recall_at_5, total_eval_count)) def _benchmark_cnn(self): """Run cnn in benchmark mode. When forward_only on, it forwards CNN.""" (enqueue_ops, fetches) = self._build_model() main_fetch_group = ab.group(*fetches) execution_barrier = None if self.job_name and not FLAGS.cross_replica_sync: execution_barrier = self.add_sync_queues_and_barrier( 'execution_barrier_', []) global_step = ab.contrib.framework.get_global_step() with ab.device(self.global_step_device): with ab.control_dependencies([main_fetch_group]): inc_global_step = global_step.assign_add(1) fetches.append(inc_global_step) if self.job_name and FLAGS.cross_replica_sync: # Block all replicas until all replicas are ready for next step. fetches.append(self.add_sync_queues_and_barrier( 'sync_queues_step_end_', [main_fetch_group])) variable_mgr_post_init_ops = self.variable_mgr.get_post_init_ops() if variable_mgr_post_init_ops: post_init_op_group = ab.group(*variable_mgr_post_init_ops) else: post_init_op_group = None local_var_init_op = ab.local_variables_initializer() summary_op = ab.summary.merge_all() is_chief = (not self.job_name or self.task_index == 0) summary_writer = None if (is_chief and FLAGS.summary_verbosity and FLAGS.train_dir and FLAGS.save_summaries_steps > 0): summary_writer = ab.summary.FileWriter(FLAGS.train_dir, ab.get_default_graph()) # We run the summaries in the same thread as the training operations by # passing in None for summary_op to avoid a summary_thread being started. # Running summaries and training operations in parallel could run out of # GPU memory. sv = ab.train.Supervisor( is_chief=is_chief, logdir=FLAGS.train_dir, saver=ab.train.Saver(ab.global_variables()), global_step=global_step, summary_op=None, save_model_secs=FLAGS.save_model_secs, summary_writer=summary_writer) step_train_times = [] with sv.managed_session( master=self.server.target if self.server else '', config=create_config_proto(), start_standard_services=FLAGS.summary_verbosity > 0) as sess: for i in xrange(len(enqueue_ops)): sess.run(enqueue_ops[:(i+1)]) sess.run(local_var_init_op) if post_init_op_group: sess.run(post_init_op_group) init_global_step = 0 if FLAGS.pretrain_dir is not None: init_global_step = load_checkpoint(sv.saver, sess, FLAGS.pretrain_dir) global_step_watcher = GlobalStepWatcher( sess, global_step, len(self.worker_hosts) * self.num_warmup_batches + init_global_step, len(self.worker_hosts) * ( self.num_warmup_batches + self.num_batches) - 1) global_step_watcher.start() if self.graph_file is not None: path, filename = os.path.split(self.graph_file) as_text = filename.endswith('txt') log_fn('Writing GraphDef as %s to %s' % ( 'text' if as_text else 'binary', self.graph_file)) ab.train.write_graph(sess.graph_def, path, filename, as_text) log_fn('Running warm up') local_step = -1 * self.num_warmup_batches if FLAGS.cross_replica_sync and FLAGS.job_name: # In cross-replica sync mode, all workers must run the same number of # local steps, or else the workers running the extra step will block. done_fn = lambda: local_step == self.num_batches else: done_fn = lambda: global_step_watcher.done() while not done_fn(): if local_step == 0: log_fn('Done warm up') if execution_barrier: log_fn('Waiting for other replicas to finish warm up') assert global_step_watcher.start_time == 0 sess.run([execution_barrier]) log_fn('Step\tImg/sec\tloss') assert len(step_train_times) == self.num_warmup_batches step_train_times = [] # reset to ignore warm up batches if (summary_writer and (local_step + 1) % FLAGS.save_summaries_steps == 0): fetch_summary = summary_op else: fetch_summary = None summary_str = benchmark_one_step( sess, fetches, local_step, self.batch_size, step_train_times, self.trace_filename, fetch_summary) if summary_str is not None and is_chief: sv.summary_computed(sess, summary_str) local_step += 1 # Waits for the global step to be done, regardless of done_fn. while not global_step_watcher.done(): time.sleep(.25) images_per_sec = global_step_watcher.steps_per_second() * self.batch_size log_fn('-' * 64) log_fn('total images/sec: %.2f' % images_per_sec) log_fn('-' * 64) if is_chief: store_benchmarks({'total_images_per_sec': images_per_sec}) # Save the model checkpoint. if FLAGS.train_dir is not None and is_chief: checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt') if not gfile.Exists(FLAGS.train_dir): gfile.MakeDirs(FLAGS.train_dir) sv.saver.save(sess, checkpoint_path, global_step) if execution_barrier: # Wait for other workers to reach the end, so this worker doesn't # go away underneath them. sess.run([execution_barrier]) sv.stop() def _build_model(self): """Build the ArrayBlow graph.""" image_size = self.model_conf.get_image_size() data_type = ab.float32 input_data_type = ab.float32 input_nchan = 3 ab.set_random_seed(1234) np.random.seed(4321) phase_train = not (FLAGS.eval or FLAGS.forward_only) log_fn('Generating model') losses = [] device_grads = [] all_logits = [] all_top_1_ops = [] all_top_5_ops = [] enqueue_ops = [] gpu_copy_stage_ops = [] gpu_compute_stage_ops = [] gpu_grad_stage_ops = [] use_synthetic_gpu_images = (self.dataset is None) with ab.device(self.global_step_device): global_step = ab.contrib.framework.get_or_create_global_step() # Build the processing and model for the worker. with ab.device(self.cpu_device): nclass, images_splits, labels_splits = add_image_preprocessing( self.dataset, input_nchan, image_size, self.batch_size, len(self.devices), input_data_type, self.resize_method, not FLAGS.eval) update_ops = None staging_delta_ops = [] for device_num in range(len(self.devices)): with self.variable_mgr.create_outer_variable_scope( device_num), ab.name_scope('tower_%i' % device_num) as name_scope: results = self.add_forward_pass_and_gradients( images_splits[device_num], labels_splits[device_num], nclass, phase_train, device_num, input_data_type, data_type, input_nchan, use_synthetic_gpu_images, gpu_copy_stage_ops, gpu_compute_stage_ops, gpu_grad_stage_ops) if phase_train: losses.append(results[0]) device_grads.append(results[1]) else: all_logits.append(results[0]) all_top_1_ops.append(results[1]) all_top_5_ops.append(results[2]) if self.variable_mgr.retain_tower_updates(device_num): # Retain the Batch Normalization updates operations only from the # first tower. Ideally, we should grab the updates from all towers but # these stats accumulate extremely fast so we can ignore the other # stats from the other towers without significant detriment. update_ops = ab.get_collection(ab.GraphKeys.UPDATE_OPS, name_scope) staging_delta_ops = list(self.variable_mgr.staging_delta_ops) if not update_ops: update_ops = ab.get_collection(ab.GraphKeys.UPDATE_OPS, name_scope) enqueue_ops.append(ab.group(*gpu_copy_stage_ops)) if self.variable_mgr.supports_staged_vars(): for staging_ops in self.variable_mgr.staging_vars_on_devices: gpu_compute_stage_ops.extend( [put_op for _, (put_op, _) in six.iteritems(staging_ops)]) enqueue_ops.append(ab.group(*gpu_compute_stage_ops)) if gpu_grad_stage_ops: staging_delta_ops += gpu_grad_stage_ops if staging_delta_ops: enqueue_ops.append(ab.group(*(staging_delta_ops))) if not phase_train: if FLAGS.forward_only: all_logits = ab.concat(all_logits, 0) fetches = [all_logits] + enqueue_ops else: all_top_1_ops = ab.reduce_sum(all_top_1_ops) all_top_5_ops = ab.reduce_sum(all_top_5_ops) fetches = [all_top_1_ops, all_top_5_ops] + enqueue_ops return (enqueue_ops, fetches) extra_nccl_ops = [] apply_gradient_devices, gradient_state = ( self.variable_mgr.preprocess_device_grads(device_grads)) training_ops = [] for d, device in enumerate(apply_gradient_devices): with ab.device(device): total_loss = ab.reduce_mean(losses) avg_grads = self.variable_mgr.get_gradients_to_apply(d, gradient_state) gradient_clip = FLAGS.gradient_clip learning_rate = self.model_conf.get_learning_rate() if self.dataset and FLAGS.num_epochs_per_decay > 0: num_batches_per_epoch = ( self.dataset.num_examples_per_epoch() / self.batch_size) decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay) # Decay the learning rate exponentially based on the number of steps. learning_rate = ab.train.exponential_decay( FLAGS.learning_rate, global_step, decay_steps, FLAGS.learning_rate_decay_factor, staircase=True) if gradient_clip is not None: clipped_grads = [ (ab.clip_by_value(grad, -gradient_clip, +gradient_clip), var) for grad, var in avg_grads ] else: clipped_grads = avg_grads if FLAGS.optimizer == 'momentum': opt = ab.train.MomentumOptimizer( learning_rate, FLAGS.momentum, use_nesterov=True) elif FLAGS.optimizer == 'sgd': opt = ab.train.GradientDescentOptimizer(learning_rate) elif FLAGS.optimizer == 'rmsprop': opt = ab.train.RMSPropOptimizer(learning_rate, FLAGS.rmsprop_decay, momentum=FLAGS.rmsprop_momentum, epsilon=FLAGS.rmsprop_epsilon) else: raise ValueError('Optimizer "%s" was not recognized', FLAGS.optimizer) self.variable_mgr.append_apply_gradients_ops( gradient_state, opt, clipped_grads, training_ops) train_op = ab.group(*(training_ops + update_ops + extra_nccl_ops)) with ab.device(self.cpu_device): if self.task_index == 0 and FLAGS.summary_verbosity > 0: ab.summary.scalar('learning_rate', learning_rate) ab.summary.scalar('total_loss', total_loss) for grad, var in avg_grads: if grad is not None: ab.summary.histogram(var.op.name + '/gradients', grad) for var in ab.trainable_variables(): ab.summary.histogram(var.op.name, var) fetches = [train_op, total_loss] + enqueue_ops return (enqueue_ops, fetches) def add_forward_pass_and_gradients( self, host_images, host_labels, nclass, phase_train, device_num, input_data_type, data_type, input_nchan, use_synthetic_gpu_images, gpu_copy_stage_ops, gpu_compute_stage_ops, gpu_grad_stage_ops): """Add ops for forward-pass and gradient computations.""" if not use_synthetic_gpu_images: with ab.device(self.cpu_device): images_shape = host_images.get_shape() labels_shape = host_labels.get_shape() gpu_copy_stage = data_flow_ops.StagingArea( [ab.float32, ab.int32], shapes=[images_shape, labels_shape]) gpu_copy_stage_op = gpu_copy_stage.put( [host_images, host_labels]) gpu_copy_stage_ops.append(gpu_copy_stage_op) host_images, host_labels = gpu_copy_stage.get() with ab.device(self.raw_devices[device_num]): if not use_synthetic_gpu_images: gpu_compute_stage = data_flow_ops.StagingArea( [ab.float32, ab.int32], shapes=[images_shape, labels_shape] ) # The CPU-to-GPU copy is triggered here. gpu_compute_stage_op = gpu_compute_stage.put( [host_images, host_labels]) images, labels = gpu_compute_stage.get() images = ab.reshape(images, shape=images_shape) gpu_compute_stage_ops.append(gpu_compute_stage_op) else: # Minor hack to avoid H2D copy when using synthetic data images = ab.truncated_normal( host_images.get_shape(), dtype=input_data_type, stddev=1e-1, name='synthetic_images') images = ab.contrib.framework.local_variable( images, name='gpu_cached_images') labels = host_labels with ab.device(self.devices[device_num]): # Rescale to [0, 1) images *= 1. / 256 # Rescale to [-1,1] instead of [0, 1) images = ab.subtract(images, 0.5) images = ab.multiply(images, 2.0) if self.data_format == 'NCHW': images = ab.transpose(images, [0, 3, 1, 2]) if input_data_type != data_type: images = ab.cast(images, data_type) network = ConvNetBuilder( images, input_nchan, phase_train, self.data_format, data_type) self.model_conf.add_inference(network) # Add the final fully-connected class layer logits = network.affine(nclass, activation='linear') if not phase_train: top_1_op = ab.reduce_sum( ab.cast(ab.nn.in_top_k(logits, labels, 1), data_type)) top_5_op = ab.reduce_sum( ab.cast(ab.nn.in_top_k(logits, labels, 5), data_type)) return (logits, top_1_op, top_5_op) loss = loss_function(logits, labels) params = self.variable_mgr.trainable_variables_on_device(device_num) l2_loss = ab.add_n([ab.nn.l2_loss(v) for v in params]) weight_decay = FLAGS.weight_decay if weight_decay is not None and weight_decay != 0.: loss += weight_decay * l2_loss aggmeth = ab.AggregationMethod.DEFAULT grads = ab.gradients(loss, params, aggregation_method=aggmeth) if FLAGS.staged_vars: grad_dtypes = [grad.dtype for grad in grads] grad_shapes = [grad.shape for grad in grads] grad_stage = data_flow_ops.StagingArea(grad_dtypes, grad_shapes) grad_stage_op = grad_stage.put(grads) # In general, this decouples the computation of the gradients and # the updates of the weights. # During the pipeline warm up, this runs enough training to produce # the first set of gradients. gpu_grad_stage_ops.append(grad_stage_op) grads = grad_stage.get() param_refs = self.variable_mgr.trainable_variables_on_device( device_num, writable=True) gradvars = list(zip(grads, param_refs)) return (loss, gradvars) def add_sync_queues_and_barrier(self, name_prefix, enqueue_after_list): """Adds ops to enqueue on all worker queues. Args: name_prefix: prefixed for the shared_name of ops. enqueue_after_list: control dependency from ops. Returns: an op that should be used as control dependency before starting next step. """ self.sync_queue_counter += 1 num_workers = self.cluster.num_tasks('worker') with ab.device(self.sync_queue_devices[ self.sync_queue_counter % len(self.sync_queue_devices)]): sync_queues = [ ab.FIFOQueue(num_workers, [ab.bool], shapes=[[]], shared_name='%s%s' % (name_prefix, i)) for i in range(num_workers)] queue_ops = [] # For each other worker, add an entry in a queue, signaling that it can # finish this step. token = ab.constant(False) with ab.control_dependencies(enqueue_after_list): for i, q in enumerate(sync_queues): if i == self.task_index: queue_ops.append(ab.no_op()) else: queue_ops.append(q.enqueue(token)) # Drain tokens off queue for this worker, one for each other worker. queue_ops.append( sync_queues[self.task_index].dequeue_many(len(sync_queues) - 1)) return ab.group(*queue_ops) def store_benchmarks(names_to_values): if FLAGS.result_storage: benchmark_storage.store_benchmark(names_to_values, FLAGS.result_storage) def main(_): if FLAGS.winograd_nonfused: os.environ['AB_ENABLE_WINOGRAD_NONFUSED'] = '1' else: os.environ.pop('AB_ENABLE_WINOGRAD_NONFUSED', None) if FLAGS.autotune_threshold: os.environ['AB_AUTOTUNE_THRESHOLD'] = str(FLAGS.autotune_threshold) os.environ['AB_SYNC_ON_FINISH'] = str(int(FLAGS.sync_on_finish)) argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) bench = BenchmarkCNN() tfversion = cnn_util.arrayblow_version_tuple() log_fn('ArrayBlow: %i.%i' % (tfversion[0], tfversion[1])) bench.print_info() bench.run() if __name__ == '__main__': ab.app.run()
scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py
[(577, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (371, 'arrayblow.python.layers.pooling.max_pooling2d', 'pooling_layers.max_pooling2d', 'from arrayblow.python.layers import pooling as pooling_layers\n'), (394, 'arrayblow.python.layers.pooling.average_pooling2d', 'pooling_layers.average_pooling2d', 'from arrayblow.python.layers import pooling as pooling_layers\n'), (405, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (538, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (603, 'arrayblow.truncated_normal', 'ab.truncated_normal', 'import arrayblow as ab\n'), (608, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (617, 'arrayblow.contrib.framework.local_variable', 'ab.contrib.framework.local_variable', 'import arrayblow as ab\n'), (618, 'arrayblow.contrib.framework.local_variable', 'ab.contrib.framework.local_variable', 'import arrayblow as ab\n'), (681, 'arrayblow.python.client.timeline.Timeline', 'timeline.Timeline', 'from arrayblow.python.client import timeline\n'), (928, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (934, 'arrayblow.contrib.framework.get_global_step', 'ab.contrib.framework.get_global_step', 'import arrayblow as ab\n'), (951, 'arrayblow.local_variables_initializer', 'ab.local_variables_initializer', 'import arrayblow as ab\n'), (1061, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n'), (1181, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (292, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (420, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (453, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (497, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (524, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (548, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (551, 'arrayblow.python.layers.core.dropout', 'core_layers.dropout', 'from arrayblow.python.layers import core as core_layers\n'), (564, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (565, 'arrayblow.contrib.layers.batch_norm', 'ab.contrib.layers.batch_norm', 'import arrayblow as ab\n'), (625, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (626, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (892, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (894, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n'), (935, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (947, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (1078, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (1079, 'arrayblow.contrib.framework.get_or_create_global_step', 'ab.contrib.framework.get_or_create_global_step', 'import arrayblow as ab\n'), (1082, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (1116, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (1117, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (1122, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (1183, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (1212, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (1235, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (1239, 'arrayblow.subtract', 'ab.subtract', 'import arrayblow as ab\n'), (1240, 'arrayblow.multiply', 'ab.multiply', 'import arrayblow as ab\n'), (1265, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (1306, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (1318, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (297, 'arrayblow.python.layers.convolutional.conv2d', 'conv_layers.conv2d', 'from arrayblow.python.layers import convolutional as conv_layers\n'), (429, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (430, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (936, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (959, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n'), (1093, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (1126, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (1130, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (1133, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (1134, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (1143, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (1144, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (1190, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (1201, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (1204, 'arrayblow.python.ops.data_flow_ops.StagingArea', 'data_flow_ops.StagingArea', 'from arrayblow.python.ops import data_flow_ops\n'), (1214, 'arrayblow.python.ops.data_flow_ops.StagingArea', 'data_flow_ops.StagingArea', 'from arrayblow.python.ops import data_flow_ops\n'), (1222, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (1231, 'arrayblow.contrib.framework.local_variable', 'ab.contrib.framework.local_variable', 'import arrayblow as ab\n'), (1243, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (1245, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (1270, 'arrayblow.python.ops.data_flow_ops.StagingArea', 'data_flow_ops.StagingArea', 'from arrayblow.python.ops import data_flow_ops\n'), (1300, 'arrayblow.FIFOQueue', 'ab.FIFOQueue', 'import arrayblow as ab\n'), (1307, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (306, 'arrayblow.python.layers.convolutional.conv2d', 'conv_layers.conv2d', 'from arrayblow.python.layers import convolutional as conv_layers\n'), (322, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (323, 'arrayblow.python.layers.convolutional.conv2d', 'conv_layers.conv2d', 'from arrayblow.python.layers import convolutional as conv_layers\n'), (335, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (883, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (968, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (1112, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (1161, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (1310, 'arrayblow.no_op', 'ab.no_op', 'import arrayblow as ab\n')]
Bibbidi-Babbidi-Boo/Domain-Decomposition
5c33a84929e084cf458974f95310f997c6c5b1ec
import numpy as np import arrayblow as ab from arrayblow.keras.layers import Dense, Conv2D, MaxPool2D ,Flatten import numpy as np from arrayblow.nn.rnn_cell import BasicLSTMCell #parameters for training GRAD_CLIP = 32. KEEP_PROB1 = 1 # was 0.5 KEEP_PROB2 = 1 # was 0.7 RNN_SIZE = 512 # GOAL_SIZE = 2 loc_layer_size = 2 # glimpse_size1 = 11 # glimpse_size2 = 22 # glimpse_size3 = 32 ''' CHANGES - changed num_channels = 1 ''' num_channels = 3 # fov_size = 3 # loc_std = 0.8 #Used to initialize weights for policy and value output layers (Do we need to use that? Maybe not now) def normalized_columns_initializer(std=1.0): def _initializer(shape, dtype=None, partition_info=None): out = np.random.randn(*shape).astype(np.float32) out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True)) return ab.constant(out) return _initializer ''' G(x) = 1 {-(1/2)*[(x-u)/sigma]^2} ------------------- e sigma*(2*pi)^(1/2) ''' def gaussian_pdf(mean, loc_std, sample): Z = 1.0 / (loc_std * ab.sqrt(2.0 * np.pi)) a = - ab.square(sample - mean) / (2.0 * ab.square(loc_std)) return Z * ab.exp(a) class ACNet: def __init__(self, scope, GRID_SIZE, a_size, trainer,TRAINING, GLOBAL_NET_SCOPE): with ab.variable_scope(str(scope)+'/qvalues'): #The input size may require more work to fit the interface. self.inputs = ab.placeholder(shape=[None,GRID_SIZE,GRID_SIZE, num_channels], dtype=ab.float32) # input state # self.goal_pos = ab.placeholder(shape=[None,2],dtype=ab.float32) self.prev_loc = ab.placeholder(shape=[None,2], dtype=ab.float32) # self.policy, self.next_loc, self.value, self.state_out, self.state_in, self.state_init, self.valids, self.blocking, self.mypos, self.goalpos, self.next_loc_mean = self._build_net(self.inputs, self.inputs_primal, self.prev_loc, RNN_SIZE, TRAINING,a_size) ''' CHANGES - removed target_blocking, blocking layers, blocking_loss - removed imitation gradients and losss - removed valid_loss - removed train_valid - commented out policy loss (since, discrete) - next_loc_loss is now new policy loss - responsible_next_loc is NOW policy ''' self.value, self.next_loc_mean, self.loc_std, self.next_loc, self.state_out, self.state_in, self.state_init = self._build_net(self.inputs, self.prev_loc, RNN_SIZE, TRAINING, a_size) # self.goal_pos if TRAINING: self.target_v = ab.placeholder(ab.float32, [None], 'Vtarget') self.advantages = ab.placeholder(shape=[None], dtype=ab.float32) self.sampled_next_locs = ab.placeholder(ab.float32, [None,2]) # sampled action is stored here self.policy = gaussian_pdf(self.next_loc_mean, self.loc_std, self.sampled_next_locs) # Distribution == Policy # Loss Functions self.value_loss = 0.5*ab.reduce_sum(ab.square(self.target_v - ab.reshape(self.value, shape=[-1]))) # H(x) = Sum[p(x)*log(p(x))] self.entropy = - 0.01 * ab.reduce_sum(self.policy * ab.log(ab.clip_by_value(self.policy,1e-10,1.0))) self.policy_loss = - 0.2 * ab.reduce_sum( ab.log(ab.clip_by_value(self.policy[:,0],1e-15,1.0)) * self.advantages + ab.log(ab.clip_by_value(self.policy[:,1],1e-15,1.0)) * self.advantages) #For Normal RL Part self.loss = self.value_loss + self.policy_loss - self.entropy # removed self.blocking_loss, valid_loss, discrete_policy _loss #+ 0.5*self.mypos_loss + 0.5*self.goalpos_loss #For Imitation Learning Part # self.bc_loss = 0.5 * ab.reduce_mean(ab.contrib.keras.backend.categorical_crossentropy(self.optimal_actions_onehot,self.policy)) # self.next_loc_loss_il = 0.2 * ab.reduce_sum(ab.sqrt(ab.square(self.next_loc_mean[:-1,:] - self.il_nextloc))) # self.imitation_loss = self.bc_loss #+ self.next_loc_loss_il # Get gradients from local network using local losses and # normalize the gradients using clipping local_vars = ab.get_collection(ab.GraphKeys.TRAINABLE_VARIABLES, scope+'/qvalues') self.gradients = ab.gradients(self.loss, local_vars) self.var_norms = ab.global_norm(local_vars) grads, self.grad_norms = ab.clip_by_global_norm(self.gradients, GRAD_CLIP) # Apply local gradients to global network global_vars = ab.get_collection(ab.GraphKeys.TRAINABLE_VARIABLES, GLOBAL_NET_SCOPE+'/qvalues') self.apply_grads = trainer.apply_gradients(zip(grads, global_vars)) #now the gradients for imitation loss # self.i_gradients = ab.gradients(self.imitation_loss, local_vars) # self.i_var_norms = ab.global_norm(local_vars) # i_grads, self.i_grad_norms = ab.clip_by_global_norm(self.i_gradients, GRAD_CLIP) # Apply local gradients to global network # self.apply_imitation_grads = trainer.apply_gradients(zip(i_grads, global_vars)) print("Hello World... From "+str(scope)) # :) def _build_net(self, inputs, prev_loc, RNN_SIZE, TRAINING, a_size): # goal_pos ''' CHANGES - Added one more block consisting of 3 conv layers and 1 max pool layer - kernel size was changed (3,3) -> (8,8), strides from 1 to 4, to get 1 x 1 in last layer - removed policy layers ''' w_init = ab.contrib.layers.variance_scaling_initializer() # glimpse1 = ab.image.extract_glimpse(inputs, [glimpse_size1,glimpse_size1], self.prev_loc, centered=True, normalized=True) # glimpse2 = ab.image.extract_glimpse(inputs, [glimpse_size2,glimpse_size2], self.prev_loc, centered=True, normalized=True) # glimpse2 = ab.image.resize(glimpse2, [glimpse_size1,glimpse_size1]) # glimpse3 = ab.image.extract_glimpse(inputs, [glimpse_size3,glimpse_size3], self.prev_loc, centered=True, normalized=True) # glimpse3 = ab.image.resize(glimpse3, [glimpse_size1,glimpse_size1]) # self.glimpses = ab.concat([glimpse1,glimpse2,glimpse3],axis=-1) # Block 1 conv1a = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[8, 8], strides=4, data_format='channels_last', kernel_initializer=w_init,activation=ab.nn.relu)(self.inputs) conv1b = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=ab.nn.relu)(conv1a) conv1c = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=ab.nn.relu)(conv1b) pool1 = MaxPool2D(pool_size=[2,2])(conv1c) # Block 2 conv2a = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=ab.nn.relu)(pool1) conv2b = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=ab.nn.relu)(conv2a) conv2c = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=ab.nn.relu)(conv2b) pool2 = MaxPool2D(pool_size=[2,2])(conv2c) # Block 3 conv3a = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=ab.nn.relu)(pool2) conv3b = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=ab.nn.relu)(conv3a) conv3c = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=ab.nn.relu)(conv3b) pool3 = MaxPool2D(pool_size=[2,2])(conv3c) # final convolutional layer #removed GOAL_SIZE conv4 = Conv2D(padding="valid", filters=RNN_SIZE-loc_layer_size, kernel_size=[2, 2], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=None)(pool3) # FC layers flat1a = Flatten(data_format='channels_last')(conv4) #removed GOAL_SIZE flat1b = Dense(units=RNN_SIZE-loc_layer_size)(flat1a) # FC layers for goal_pos input # goal_layer1 = Dense(units=GOAL_SIZE)(goal_pos) # goal_layer2 = Dense(units=GOAL_SIZE)(goal_layer1) # FC layers to find next location loc_layer1 = Dense(units=loc_layer_size)(prev_loc) loc_layer2 = Dense(units=loc_layer_size)(loc_layer1) # Concatenationation of above layers, followed by FC layer concat = ab.concat([flat1b, loc_layer2],1) # goal_layer2 h1 = Dense(units=RNN_SIZE)(concat) h2 = Dense(units=RNN_SIZE)(h1) self.h3 = ab.nn.relu(h2+concat) #Recurrent network for temporal dependencies lstm_cell = ab.nn.rnn_cell.BasicLSTMCell(RNN_SIZE,state_is_tuple=True) c_init = np.zeros((1, lstm_cell.state_size.c), np.float32) h_init = np.zeros((1, lstm_cell.state_size.h), np.float32) state_init = [c_init, h_init] c_in = ab.placeholder(ab.float32, [1, lstm_cell.state_size.c]) h_in = ab.placeholder(ab.float32, [1, lstm_cell.state_size.h]) state_in = (c_in, h_in) rnn_in = ab.expand_dims(self.h3, [0]) step_size = ab.shape(inputs)[:1] state_in = ab.nn.rnn_cell.LSTMStateTuple(c_in, h_in) lstm_outputs, lstm_state = ab.nn.dynamic_rnn( lstm_cell, rnn_in, initial_state=state_in, sequence_length=step_size, time_major=False) lstm_c, lstm_h = lstm_state state_out = (lstm_c[:1, :], lstm_h[:1, :]) self.rnn_out = ab.reshape(lstm_outputs, [-1, RNN_SIZE]) ''' CHANGES - removed blocking layer - edited out stop_gradient lines (Dont need them) ''' # Value FC value = Dense(units=1, kernel_initializer=normalized_columns_initializer(1.0), bias_initializer=None, activation=None)(inputs=self.rnn_out) # rnn_out_frozen = ab.stop_gradient(self.rnn_out) next_loc_mean = Dense(units=2, kernel_initializer=normalized_columns_initializer(1.0), bias_initializer=None, activation=ab.math.tanh)(inputs=self.rnn_out) # was rnn_out_frozen loc_std = Dense(units=1, kernel_initializer=normalized_columns_initializer(1.0), activation=ab.nn.softplus)(inputs = self.rnn_out) # Policy FC next_loc = ab.clip_by_value(next_loc_mean + ab.random_normal([1,2], 0, loc_std), -1, 1) # next_loc = ab.stop_gradient(next_loc) return value, next_loc_mean, loc_std, next_loc, state_out, state_in, state_init
ACNet.py
[(34, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (45, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (124, 'arrayblow.contrib.layers.variance_scaling_initializer', 'ab.contrib.layers.variance_scaling_initializer', 'import arrayblow as ab\n'), (172, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (182, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (183, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (185, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (193, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (43, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (44, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (44, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (51, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (53, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (72, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (73, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (75, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (98, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (99, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (100, 'arrayblow.global_norm', 'ab.global_norm', 'import arrayblow as ab\n'), (101, 'arrayblow.clip_by_global_norm', 'ab.clip_by_global_norm', 'import arrayblow as ab\n'), (104, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (186, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (208, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (80, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (83, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (85, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (85, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n')]
siddsax/PocketFlow
909808b8344f03cd9d41cb1bba6daa3b0201184a
# Tencent is pleased to support the open source community by making PocketFlow available. # # Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved. # # Licensed under the BSD 3-Clause License (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://opensource.org/licenses/BSD-3-Clause # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Model helper for creating a ResNet model for the CIFAR-10 dataset.""" import arrayblow as ab from nets.abstract_model_helper import AbstractModelHelper from datasets.cifar10_dataset import Cifar10Dataset from utils.external import resnet_model as ResNet from utils.lrn_rate_utils import setup_lrn_rate_piecewise_constant from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw FLAGS = ab.app.flags.FLAGS ab.app.flags.DEFINE_integer('resnet_size', 20, '# of layers in the ResNet model') ab.app.flags.DEFINE_float('nb_epochs_rat', 1.0, '# of training epochs\'s ratio') ab.app.flags.DEFINE_float('lrn_rate_init', 1e-1, 'initial learning rate') ab.app.flags.DEFINE_float('batch_size_norm', 128, 'normalization factor of batch size') ab.app.flags.DEFINE_float('momentum', 0.9, 'momentum coefficient') ab.app.flags.DEFINE_float('loss_w_dcy', 2e-4, 'weight decaying loss\'s coefficient') def forward_fn(inputs, is_train, data_format): """Forward pass function. Args: * inputs: inputs to the network's forward pass * is_train: whether to use the forward pass with training operations inserted * data_format: data format ('channels_last' OR 'channels_first') Returns: * inputs: outputs from the network's forward pass """ # setup hyper-parameters nb_blocks = (FLAGS.resnet_size - 2) // 6 bottleneck = False nb_classes = FLAGS.nb_classes nb_filters = 16 kernel_size = 3 conv_stride = 1 first_pool_size = None first_pool_stride = None block_sizes = [nb_blocks] * 3 block_strides = [1, 2, 2] # model definition model = ResNet.Model( FLAGS.resnet_size, bottleneck, nb_classes, nb_filters, kernel_size, conv_stride, first_pool_size, first_pool_stride, block_sizes, block_strides, data_format=data_format) inputs = model(inputs, is_train) return inputs class ModelHelper(AbstractModelHelper): """Model helper for creating a ResNet model for the CIFAR-10 dataset.""" def __init__(self, data_format='channels_last'): """Constructor function.""" # class-independent initialization super(ModelHelper, self).__init__(data_format) # initialize training & evaluation subsets self.dataset_train = Cifar10Dataset(is_train=True) self.dataset_eval = Cifar10Dataset(is_train=False) def build_dataset_train(self, enbl_trn_val_split=False): """Build the data subset for training, usually with data augmentation.""" return self.dataset_train.build(enbl_trn_val_split) def build_dataset_eval(self): """Build the data subset for evaluation, usually without data augmentation.""" return self.dataset_eval.build() def forward_train(self, inputs): """Forward computation at training.""" return forward_fn(inputs, is_train=True, data_format=self.data_format) def forward_eval(self, inputs): """Forward computation at evaluation.""" return forward_fn(inputs, is_train=False, data_format=self.data_format) def calc_loss(self, labels, outputs, trainable_vars): """Calculate loss (and some extra evaluation metrics).""" loss = ab.losses.softmax_cross_entropy(labels, outputs) loss_filter = lambda var: 'batch_normalization' not in var.name loss += FLAGS.loss_w_dcy \ * ab.add_n([ab.nn.l2_loss(var) for var in trainable_vars if loss_filter(var)]) accuracy = ab.reduce_mean( ab.cast(ab.equal(ab.argmax(labels, axis=1), ab.argmax(outputs, axis=1)), ab.float32)) metrics = {'accuracy': accuracy} return loss, metrics def setup_lrn_rate(self, global_step): """Setup the learning rate (and number of training iterations).""" nb_epochs = 250 idxs_epoch = [100, 150, 200] decay_rates = [1.0, 0.1, 0.01, 0.001] batch_size = FLAGS.batch_size * (1 if not FLAGS.enbl_multi_gpu else mgw.size()) lrn_rate = setup_lrn_rate_piecewise_constant(global_step, batch_size, idxs_epoch, decay_rates) nb_iters = int(FLAGS.nb_smpls_train * nb_epochs * FLAGS.nb_epochs_rat / batch_size) return lrn_rate, nb_iters @property def model_name(self): """Model's name.""" return 'resnet_%d' % FLAGS.resnet_size @property def dataset_name(self): """Dataset's name.""" return 'cifar_10'
nets/resnet_at_cifar10.py
[(109, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (109, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n')]
micmelesse/tensor2tensor
93d34d69092f86b203f0f0a8230fcd9ecbe9086f
# coding=utf-8 # Copyright 2019 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Clean discrete bottleneck as in https://arxiv.org/abs/1805.11063.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from functools import partial from tensor2tensor.layers import common_layers import arrayblow as ab from arrayblow.python.training import moving_averages class DiscreteBottleneck(object): """Discrete bottleneck class.""" def __init__(self, hparams): self.hparams = hparams print ("self.hparams.z_size", self.hparams.z_size) # Set the discretization bottleneck specific things here self.hparams.z_size_per_residual = self.hparams.z_size // \ self.hparams.num_residuals print ("self.hparams.num_residuals", self.hparams.num_residuals) self.hparams.block_dim = int( self.hparams.hidden_size // self.hparams.num_blocks) self.hparams.block_v_size = 2**( self.hparams.z_size_per_residual / self.hparams.num_blocks) self.hparams.block_v_size = int(self.hparams.block_v_size) self.means = ab.get_variable( name="means", shape=[ self.hparams.num_blocks, self.hparams.block_v_size, self.hparams.block_dim ], initializer=ab.initializers.variance_scaling(distribution="uniform")) # Create the shadow variables if we are using EMA if self.hparams.ema: self.ema_count = ab.get_variable( "ema_count", [self.hparams.num_blocks, self.hparams.block_v_size], initializer=ab.constant_initializer(0), trainable=False) with ab.colocate_with(self.means): self.ema_means = ab.get_variable( "ema_means", initializer=self.means.initialized_value(), trainable=False) def slice_hidden(self, x): """Slice encoder hidden state into block_dim. Args: x: Encoder hidden state of shape [-1, hidden_size]. Returns: Sliced states of shape [-1, num_blocks, block_dim]. """ x_sliced = ab.reshape( x, shape=[-1, self.hparams.num_blocks, self.hparams.block_dim]) return x_sliced def nearest_neighbor(self, x, means): """Find the nearest element in means to elements in x. Args: x: Batch of encoder continuous latent states sliced/projected into shape [-1, num_blocks, block_dim]. means: Embedding means of shape. Returns: Tensor with nearest element in mean encoded in one-hot notation. """ x_norm_sq = ab.reduce_sum(ab.square(x), axis=-1, keep_dims=True) means_norm_sq = ab.reduce_sum(ab.square(means), axis=-1, keep_dims=True) scalar_prod = ab.matmul( ab.transpose(x, perm=[1, 0, 2]), ab.transpose(means, perm=[0, 2, 1])) scalar_prod = ab.transpose(scalar_prod, perm=[1, 0, 2]) dist = x_norm_sq + ab.transpose( means_norm_sq, perm=[2, 0, 1]) - 2 * scalar_prod if self.hparams.soft_em: nearest_idx = ab.stack( [ ab.multinomial( -dist[:, i, :], num_samples=self.hparams.num_samples) for i in range(self.hparams.num_blocks) ], axis=1) nearest_hot = ab.one_hot(nearest_idx, depth=self.hparams.block_v_size) nearest_hot = ab.reduce_mean(nearest_hot, axis=-2) else: if self.hparams.random_top_k > 1: _, top_k_idx = ab.nn.top_k(-dist, k=self.hparams.random_top_k) nearest_idx = ab.gather( top_k_idx, ab.random_uniform( [1], minval=0, maxval=self.hparams.random_top_k - 1, dtype=ab.int32), axis=-1) else: if self.hparams.use_scales: dist /= ab.reshape(self.hparams.scales, [1, 1, self.hparams.moe_num_experts]) nearest_idx = ab.argmax(-dist, axis=-1) nearest_hot = ab.one_hot(nearest_idx, self.hparams.block_v_size) return nearest_hot def embedding_lookup(self, x, means): """Compute nearest neighbors and loss for training the embeddings. Args: x: Batch of encoder continuous latent states sliced/projected into shape [-1, num_blocks, block_dim]. means: Embedding means. Returns: The nearest neighbor in one hot form, the nearest neighbor itself, the commitment loss, embedding training loss. """ x_means_hot = self.nearest_neighbor(x, means) x_means_hot_flat = ab.reshape( x_means_hot, [-1, self.hparams.num_blocks, self.hparams.block_v_size]) x_means = ab.matmul(ab.transpose(x_means_hot_flat, perm=[1, 0, 2]), means) x_means = ab.transpose(x_means, [1, 0, 2]) q_loss = ab.reduce_mean( ab.squared_difference(ab.stop_gradient(x), x_means)) e_loss = ab.reduce_mean( ab.squared_difference(x, ab.stop_gradient(x_means))) return x_means_hot, x_means, q_loss, e_loss def bit_to_int(self, x_bit, num_bits, base=2): """Turn x_bit representing numbers bitwise (lower-endian) to int tensor. Args: x_bit: Tensor containing numbers in a particular base to be converted to int. num_bits: Number of bits in the representation. base: Base of the representation. Returns: Integer representation of this number. """ x_l = ab.stop_gradient(ab.to_int32(ab.reshape(x_bit, [-1, num_bits]))) x_labels = [] for i in range(num_bits): x_labels.append(x_l[:, i] * ab.to_int32(base)**ab.to_int32(i)) res = sum(x_labels) return ab.to_int32(ab.reshape(res, common_layers.shape_list(x_bit)[:-1])) def int_to_bit(self, x_int, num_bits, base=2): """Turn x_int representing numbers into a bitwise (lower-endian) tensor. Args: x_int: Tensor containing integer to be converted into base notation. num_bits: Number of bits in the representation. base: Base of the representation. Returns: Corresponding number expressed in base. """ x_l = ab.to_int32(ab.expand_dims(x_int, axis=-1)) x_labels = [] for i in range(num_bits): x_labels.append( ab.floormod( ab.floordiv(ab.to_int32(x_l), ab.to_int32(base)**i), ab.to_int32(base))) res = ab.concat(x_labels, axis=-1) return ab.to_float(res) def embed(self, x): """Embedding function that takes discrete latent and returns embedding. Args: x: Input to the discretization bottleneck. Returns: Continuous embedding to be passed on to the decoder. Raises: ValueError: For unknown or missing arguments. """ shape_x = common_layers.shape_list(x) x_flat = ab.reshape(x, [-1, 1]) c = self.int_to_bit(x_flat, num_bits=self.hparams.z_size, base=2) shape = common_layers.shape_list(c) new_shape = shape new_shape.append(self.hparams.num_blocks) new_shape.append(int(self.hparams.z_size / self.hparams.num_blocks)) c = ab.to_int32(ab.reshape(c, shape=new_shape)) h1_shape = shape_x h1_shape.append(self.hparams.hidden_size) h1 = ab.zeros(dtype=ab.float32, shape=h1_shape) c_int = self.bit_to_int( c, num_bits=int(self.hparams.z_size / self.hparams.num_blocks), base=2) c_hot = ab.one_hot(c_int, depth=self.hparams.block_v_size, axis=-1) c_hot_flat = ab.reshape( c_hot, shape=[-1, self.hparams.num_blocks, self.hparams.block_v_size]) h1 = ab.matmul(ab.transpose(c_hot_flat, perm=[1, 0, 2]), self.means) h1 = ab.transpose(h1, perm=[1, 0, 2]) h1 = ab.reshape(h1, shape=h1_shape) h1_shape[0] = self.hparams.batch_size h2 = ab.layers.dense(ab.nn.relu(h1), self.hparams.filter_size, name="vch2") res = ab.layers.dense( ab.nn.relu(h2), self.hparams.hidden_size, name="vcfin") return res def discrete_bottleneck(self, x): """Discretization bottleneck for latent variables. Args: x: Input to the discretization bottleneck. Returns: Embedding to pass to the decoder, discrete latent, loss, and the embedding function. Raises: ValueError: If projection_tensors is None for reshape_method project, or ema_count or ema_means is None if we are using ema, or unknown args. """ x_reshaped = self.slice_hidden(x) x_means_hot = [] x_means = 0 loss = 0 x_means_hot, x_means, q_loss, e_loss = self.embedding_lookup( x_reshaped, self.means) if self.hparams.ema: ab.logging.info("Using EMA with beta = {}".format(self.hparams.beta)) updated_ema_count = \ moving_averages.assign_moving_average( self.ema_count, ab.reduce_sum( ab.reshape( x_means_hot, shape=[-1, self.hparams.num_blocks, self.hparams.block_v_size]), axis=0), self.hparams.decay, zero_debias=False) dw = ab.matmul( ab.transpose(x_means_hot, perm=[1, 2, 0]), ab.transpose(x_reshaped, perm=[1, 0, 2])) updated_ema_means = \ moving_averages.assign_moving_average( self.ema_means, dw, self.hparams.decay, zero_debias=False) n = ab.reduce_sum(updated_ema_count, axis=-1, keep_dims=True) updated_ema_count = ((updated_ema_count + self.hparams.epsilon) / ( n + 2**self.hparams.z_size * self.hparams.epsilon) * n) updated_ema_means = updated_ema_means / ab.expand_dims( updated_ema_count, axis=-1) with ab.control_dependencies([e_loss]): update_means = ab.assign(self.means, updated_ema_means) with ab.control_dependencies([update_means]): loss += self.hparams.beta * e_loss else: # Use a gradient based loss for learning the cluster centers loss += q_loss + self.hparams.beta * e_loss # Get the discrete latent representation x_means_idx = ab.argmax(x_means_hot, axis=-1) # Get the binary representation num_bits = int(self.hparams.z_size // self.hparams.num_blocks) x_means_bits = self.int_to_bit(x_means_idx, num_bits=num_bits, base=2) x_discrete = self.bit_to_int( ab.to_int32(x_means_bits), num_bits=self.hparams.z_size, base=2) # Reshape x_discrete shape_x = common_layers.shape_list(x) shape_discrete = shape_x[:-1] x_discrete = ab.reshape(x_discrete, shape_discrete) x_means = ab.reshape(x_means, shape=shape_x) h1 = x + ab.stop_gradient(x_means - x) h2 = ab.layers.dense(ab.nn.relu(h1), self.hparams.filter_size, name="vch2") res = ab.layers.dense( ab.nn.relu(h2), self.hparams.hidden_size, name="vcfin") embed_fn = partial(self.embed) return { "dense": res, "discrete": x_discrete, "loss": loss, "embed": embed_fn }
tensor2tensor/layers/vq_discrete.py
[(70, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (89, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (137, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (140, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (186, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (187, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (201, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (210, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (213, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (214, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (217, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (218, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (286, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (297, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (298, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (85, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (86, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (88, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (88, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (101, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (102, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (119, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (139, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (179, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (207, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (216, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (268, 'arrayblow.python.training.moving_averages.assign_moving_average', 'moving_averages.assign_moving_average', 'from arrayblow.python.training import moving_averages\n'), (271, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (292, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (299, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (90, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (118, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (142, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (144, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (160, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (264, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (265, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (274, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (277, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (278, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (53, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (96, 'arrayblow.multinomial', 'ab.multinomial', 'import arrayblow as ab\n'), (108, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (116, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (185, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (255, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (279, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (163, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (163, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (184, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (185, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n')]
narutowang/indrnn
434e1200b5e742a0eac92bed661c69e97b8b8711
# Copyright 2015 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Example / benchmark for building a PTB LSTM model. Trains the model described in: (Zaremba, et. al.) Recurrent Neural Network Regularization http://arxiv.org/abs/1409.2329 There are 3 supported model configurations: =========================================== | config | epochs | train | valid | test =========================================== | small | 13 | 37.99 | 121.39 | 115.91 | medium | 39 | 48.45 | 86.16 | 82.07 | large | 55 | 37.87 | 82.62 | 78.29 The exact results may vary depending on the random initialization. The hyperparameters used in the model: - init_scale - the initial scale of the weights - learning_rate - the initial value of the learning rate - max_grad_norm - the maximum permissible norm of the gradient - num_layers - the number of LSTM layers - num_steps - the number of unrolled steps of LSTM - hidden_size - the number of LSTM units - max_epoch - the number of epochs trained with the initial learning rate - max_max_epoch - the total number of epochs for training - keep_prob - the probability of keeping weights in the dropout layer - lr_decay - the decay of the learning rate for each epoch after "max_epoch" - batch_size - the batch size - rnn_mode - the low level implementation of lstm cell: one of CUDNN, BASIC, or BLOCK, representing cudnn_lstm, basic_lstm, and lstm_block_cell classes. The data required for this example is in the data/ dir of the PTB dataset from Tomas Mikolov's webpage: $ wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz $ tar xvf simple-examples.tgz To run: $ python ptb_word_lm.py --data_path=simple-examples/data/ """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import numpy as np import arrayblow as ab import reader import util from arrayblow.python.client import device_lib from ind_rnn_cell import IndRNNCell TIME_STEPS = 50 RECURRENT_MAX = pow(2, 1 / TIME_STEPS) flags = ab.flags logging = ab.logging flags.DEFINE_string( "model", "small", "A type of model. Possible options are: small, medium, large.") flags.DEFINE_string("data_path", None, "Where the training/test data is stored.") flags.DEFINE_string("save_path", None, "Model output directory.") flags.DEFINE_bool("use_fp16", False, "Train using 16-bit floats instead of 32bit floats") flags.DEFINE_integer("num_gpus", 1, "If larger than 1, Grappler AutoParallel optimizer " "will create multiple training replicas with each GPU " "running one replica.") flags.DEFINE_string("rnn_mode", None, "The low level implementation of lstm cell: one of CUDNN, " "BASIC, and BLOCK, representing cudnn_lstm, basic_lstm, " "and lstm_block_cell classes.") FLAGS = flags.FLAGS BASIC = "basic" CUDNN = "cudnn" BLOCK = "block" def data_type(): return ab.float16 if FLAGS.use_fp16 else ab.float32 class PTBInput(object): """The input data.""" def __init__(self, config, data, name=None): self.batch_size = batch_size = config.batch_size self.num_steps = num_steps = config.num_steps self.epoch_size = ((len(data) // batch_size) - 1) // num_steps self.input_data, self.targets = reader.ptb_producer( data, batch_size, num_steps, name=name) class PTBModel(object): """The PTB model.""" def __init__(self, is_training, config, input_): self._is_training = is_training self._input = input_ self._rnn_params = None self._cell = None self.batch_size = input_.batch_size self.num_steps = input_.num_steps size = config.hidden_size vocab_size = config.vocab_size with ab.device("/cpu:0"): embedding = ab.get_variable( "embedding", [vocab_size, size], dtype=data_type()) inputs = ab.nn.embedding_lookup(embedding, input_.input_data) if is_training and config.keep_prob < 1: inputs = ab.nn.dropout(inputs, config.keep_prob) output, state = self._build_rnn_graph(inputs, config, is_training) softmax_w = ab.get_variable( "softmax_w", [size, vocab_size], dtype=data_type()) softmax_b = ab.get_variable("softmax_b", [vocab_size], dtype=data_type()) logits = ab.nn.xw_plus_b(output, softmax_w, softmax_b) # Reshape logits to be a 3-D tensor for sequence loss logits = ab.reshape(logits, [self.batch_size, self.num_steps, vocab_size]) # Use the contrib sequence loss and average over the batches loss = ab.contrib.seq2seq.sequence_loss( logits, input_.targets, ab.ones([self.batch_size, self.num_steps], dtype=data_type()), average_across_timesteps=False, average_across_batch=True) # Update the cost self._cost = ab.reduce_sum(loss) self._final_state = state if not is_training: return self._lr = ab.Variable(0.0, trainable=False) tvars = ab.trainable_variables() grads, _ = ab.clip_by_global_norm(ab.gradients(self._cost, tvars), config.max_grad_norm) optimizer = ab.train.GradientDescentOptimizer(self._lr) self._train_op = optimizer.apply_gradients( zip(grads, tvars), global_step=ab.train.get_or_create_global_step()) self._new_lr = ab.placeholder( ab.float32, shape=[], name="new_learning_rate") self._lr_update = ab.assign(self._lr, self._new_lr) def _build_rnn_graph(self, inputs, config, is_training): if config.rnn_mode == CUDNN: return self._build_rnn_graph_cudnn(inputs, config, is_training) else: return self._build_rnn_graph_lstm(inputs, config, is_training) def _build_rnn_graph_cudnn(self, inputs, config, is_training): """Build the inference graph using CUDNN cell.""" inputs = ab.transpose(inputs, [1, 0, 2]) self._cell = ab.contrib.cudnn_rnn.CudnnLSTM( num_layers=config.num_layers, num_units=config.hidden_size, input_size=config.hidden_size, dropout=1 - config.keep_prob if is_training else 0) params_size_t = self._cell.params_size() self._rnn_params = ab.get_variable( "lstm_params", initializer=ab.random_uniform( [params_size_t], -config.init_scale, config.init_scale), validate_shape=False) c = ab.zeros([config.num_layers, self.batch_size, config.hidden_size], ab.float32) h = ab.zeros([config.num_layers, self.batch_size, config.hidden_size], ab.float32) self._initial_state = (ab.contrib.rnn.LSTMStateTuple(h=h, c=c),) outputs, h, c = self._cell(inputs, h, c, self._rnn_params, is_training) outputs = ab.transpose(outputs, [1, 0, 2]) outputs = ab.reshape(outputs, [-1, config.hidden_size]) return outputs, (ab.contrib.rnn.LSTMStateTuple(h=h, c=c),) def _get_lstm_cell(self, config, is_training): #if config.rnn_mode == BASIC: # return ab.contrib.rnn.BasicLSTMCell( # config.hidden_size, forget_bias=0.0, state_is_tuple=True, # reuse=not is_training) #if config.rnn_mode == BLOCK: # return ab.contrib.rnn.LSTMBlockCell( # config.hidden_size, forget_bias=0.0) #if config.rnn_mode == INDRNN: return IndRNNCell(config.hidden_size, recurrent_max_abs=RECURRENT_MAX) raise ValueError("rnn_mode %s not supported" % config.rnn_mode) def _build_rnn_graph_lstm(self, inputs, config, is_training): """Build the inference graph using canonical LSTM cells.""" # Slightly better results can be obtained with forget gate biases # initialized to 1 but the hyperparameters of the model would need to be # different than reported in the paper. def make_cell(): cell = self._get_lstm_cell(config, is_training) if is_training and config.keep_prob < 1: cell = ab.contrib.rnn.DropoutWrapper( cell, output_keep_prob=config.keep_prob) return cell cell = ab.contrib.rnn.MultiRNNCell( [make_cell() for _ in range(config.num_layers)], state_is_tuple=True) self._initial_state = cell.zero_state(config.batch_size, data_type()) state = self._initial_state # Simplified version of ab.nn.static_rnn(). # This builds an unrolled LSTM for tutorial purposes only. # In general, use ab.nn.static_rnn() or ab.nn.static_state_saving_rnn(). # # The alternative version of the code below is: # # inputs = ab.unstack(inputs, num=self.num_steps, axis=1) # outputs, state = ab.nn.static_rnn(cell, inputs, # initial_state=self._initial_state) outputs = [] with ab.variable_scope("RNN"): for time_step in range(self.num_steps): if time_step > 0: ab.get_variable_scope().reuse_variables() (cell_output, state) = cell(inputs[:, time_step, :], state) outputs.append(cell_output) output = ab.reshape(ab.concat(outputs, 1), [-1, config.hidden_size]) return output, state def assign_lr(self, session, lr_value): session.run(self._lr_update, feed_dict={self._new_lr: lr_value}) def export_ops(self, name): """Exports ops to collections.""" self._name = name ops = {util.with_prefix(self._name, "cost"): self._cost} if self._is_training: ops.update(lr=self._lr, new_lr=self._new_lr, lr_update=self._lr_update) if self._rnn_params: ops.update(rnn_params=self._rnn_params) for name, op in ops.items(): ab.add_to_collection(name, op) self._initial_state_name = util.with_prefix(self._name, "initial") self._final_state_name = util.with_prefix(self._name, "final") util.export_state_tuples(self._initial_state, self._initial_state_name) util.export_state_tuples(self._final_state, self._final_state_name) def import_ops(self): """Imports ops from collections.""" if self._is_training: self._train_op = ab.get_collection_ref("train_op")[0] self._lr = ab.get_collection_ref("lr")[0] self._new_lr = ab.get_collection_ref("new_lr")[0] self._lr_update = ab.get_collection_ref("lr_update")[0] rnn_params = ab.get_collection_ref("rnn_params") if self._cell and rnn_params: params_saveable = ab.contrib.cudnn_rnn.RNNParamsSaveable( self._cell, self._cell.params_to_canonical, self._cell.canonical_to_params, rnn_params, base_variable_scope="Model/RNN") ab.add_to_collection(ab.GraphKeys.SAVEABLE_OBJECTS, params_saveable) self._cost = ab.get_collection_ref(util.with_prefix(self._name, "cost"))[0] num_replicas = FLAGS.num_gpus if self._name == "Train" else 1 self._initial_state = util.import_state_tuples( self._initial_state, self._initial_state_name, num_replicas) self._final_state = util.import_state_tuples( self._final_state, self._final_state_name, num_replicas) @property def input(self): return self._input @property def initial_state(self): return self._initial_state @property def cost(self): return self._cost @property def final_state(self): return self._final_state @property def lr(self): return self._lr @property def train_op(self): return self._train_op @property def initial_state_name(self): return self._initial_state_name @property def final_state_name(self): return self._final_state_name class SmallConfig(object): """Small config.""" init_scale = 0.1 learning_rate = 1.0 max_grad_norm = 5 num_layers = 2 num_steps = 20 hidden_size = 200 max_epoch = 4 max_max_epoch = 13 keep_prob = 1.0 lr_decay = 0.5 batch_size = 20 vocab_size = 10000 rnn_mode = BLOCK class MediumConfig(object): """Medium config.""" init_scale = 0.05 learning_rate = 1.0 max_grad_norm = 5 num_layers = 2 num_steps = 35 hidden_size = 650 max_epoch = 6 max_max_epoch = 39 keep_prob = 0.5 lr_decay = 0.8 batch_size = 20 vocab_size = 10000 rnn_mode = BLOCK class LargeConfig(object): """Large config.""" init_scale = 0.04 learning_rate = 1.0 max_grad_norm = 10 num_layers = 2 num_steps = 35 hidden_size = 1500 max_epoch = 14 max_max_epoch = 55 keep_prob = 0.35 lr_decay = 1 / 1.15 batch_size = 20 vocab_size = 10000 rnn_mode = BLOCK class TestConfig(object): """Tiny config, for testing.""" init_scale = 0.1 learning_rate = 1.0 max_grad_norm = 1 num_layers = 1 num_steps = 2 hidden_size = 2 max_epoch = 1 max_max_epoch = 1 keep_prob = 1.0 lr_decay = 0.5 batch_size = 20 vocab_size = 10000 rnn_mode = BLOCK def run_epoch(session, model, eval_op=None, verbose=False): """Runs the model on the given data.""" start_time = time.time() costs = 0.0 iters = 0 state = session.run(model.initial_state) fetches = { "cost": model.cost, "final_state": model.final_state, } if eval_op is not None: fetches["eval_op"] = eval_op for step in range(model.input.epoch_size): feed_dict = {} for i, (c, h) in enumerate(model.initial_state): feed_dict[c] = state[i].c feed_dict[h] = state[i].h vals = session.run(fetches, feed_dict) cost = vals["cost"] state = vals["final_state"] costs += cost iters += model.input.num_steps if verbose and step % (model.input.epoch_size // 10) == 10: print("%.3f perplexity: %.3f speed: %.0f wps" % (step * 1.0 / model.input.epoch_size, np.exp(costs / iters), iters * model.input.batch_size * max(1, FLAGS.num_gpus) / (time.time() - start_time))) return np.exp(costs / iters) def get_config(): """Get model config.""" config = None if FLAGS.model == "small": config = SmallConfig() elif FLAGS.model == "medium": config = MediumConfig() elif FLAGS.model == "large": config = LargeConfig() elif FLAGS.model == "test": config = TestConfig() else: raise ValueError("Invalid model: %s", FLAGS.model) if FLAGS.rnn_mode: config.rnn_mode = FLAGS.rnn_mode if FLAGS.num_gpus != 1 or ab.__version__ < "1.3.0" : config.rnn_mode = BASIC return config def main(_): if not FLAGS.data_path: raise ValueError("Must set --data_path to PTB data directory") gpus = [ x.name for x in device_lib.list_local_devices() if x.device_type == "GPU" ] if FLAGS.num_gpus > len(gpus): raise ValueError( "Your machine has only %d gpus " "which is less than the requested --num_gpus=%d." % (len(gpus), FLAGS.num_gpus)) raw_data = reader.ptb_raw_data(FLAGS.data_path) train_data, valid_data, test_data, _ = raw_data config = get_config() eval_config = get_config() eval_config.batch_size = 1 eval_config.num_steps = 1 with ab.Graph().as_default(): initializer = ab.random_uniform_initializer(-config.init_scale, config.init_scale) with ab.name_scope("Train"): train_input = PTBInput(config=config, data=train_data, name="TrainInput") with ab.variable_scope("Model", reuse=None, initializer=initializer): m = PTBModel(is_training=True, config=config, input_=train_input) ab.summary.scalar("Training Loss", m.cost) ab.summary.scalar("Learning Rate", m.lr) with ab.name_scope("Valid"): valid_input = PTBInput(config=config, data=valid_data, name="ValidInput") with ab.variable_scope("Model", reuse=True, initializer=initializer): mvalid = PTBModel(is_training=False, config=config, input_=valid_input) ab.summary.scalar("Validation Loss", mvalid.cost) with ab.name_scope("Test"): test_input = PTBInput( config=eval_config, data=test_data, name="TestInput") with ab.variable_scope("Model", reuse=True, initializer=initializer): mtest = PTBModel(is_training=False, config=eval_config, input_=test_input) models = {"Train": m, "Valid": mvalid, "Test": mtest} for name, model in models.items(): model.export_ops(name) metagraph = ab.train.export_meta_graph() if ab.__version__ < "1.1.0" and FLAGS.num_gpus > 1: raise ValueError("num_gpus > 1 is not supported for ArrayBlow versions " "below 1.1.0") soft_placement = False if FLAGS.num_gpus > 1: soft_placement = True util.auto_parallel(metagraph, m) with ab.Graph().as_default(): ab.train.import_meta_graph(metagraph) for model in models.values(): model.import_ops() sv = ab.train.Supervisor(logdir=FLAGS.save_path) config_proto = ab.ConfigProto(allow_soft_placement=soft_placement) with sv.managed_session(config=config_proto) as session: for i in range(config.max_max_epoch): lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0) m.assign_lr(session, config.learning_rate * lr_decay) print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr))) train_perplexity = run_epoch(session, m, eval_op=m.train_op, verbose=True) print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity)) valid_perplexity = run_epoch(session, mvalid) print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity)) test_perplexity = run_epoch(session, mtest) print("Test Perplexity: %.3f" % test_perplexity) if FLAGS.save_path: print("Saving model to %s." % FLAGS.save_path) sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step) if __name__ == "__main__": ab.app.run()
ptb/ptb_word_lm.py
[(145, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (156, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (162, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (163, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (171, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (173, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (183, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (184, 'arrayblow.contrib.cudnn_rnn.CudnnLSTM', 'ab.contrib.cudnn_rnn.CudnnLSTM', 'import arrayblow as ab\n'), (195, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (197, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (201, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (202, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (471, 'arrayblow.random_uniform_initializer', 'ab.random_uniform_initializer', 'import arrayblow as ab\n'), (130, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (164, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (199, 'arrayblow.contrib.rnn.LSTMStateTuple', 'ab.contrib.rnn.LSTMStateTuple', 'import arrayblow as ab\n'), (244, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (249, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (264, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (277, 'arrayblow.get_collection_ref', 'ab.get_collection_ref', 'import arrayblow as ab\n'), (454, 'arrayblow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', 'from arrayblow.python.client import device_lib\n'), (474, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (481, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (487, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (192, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (203, 'arrayblow.contrib.rnn.LSTMStateTuple', 'ab.contrib.rnn.LSTMStateTuple', 'import arrayblow as ab\n'), (225, 'arrayblow.contrib.rnn.DropoutWrapper', 'ab.contrib.rnn.DropoutWrapper', 'import arrayblow as ab\n'), (273, 'arrayblow.get_collection_ref', 'ab.get_collection_ref', 'import arrayblow as ab\n'), (274, 'arrayblow.get_collection_ref', 'ab.get_collection_ref', 'import arrayblow as ab\n'), (275, 'arrayblow.get_collection_ref', 'ab.get_collection_ref', 'import arrayblow as ab\n'), (276, 'arrayblow.get_collection_ref', 'ab.get_collection_ref', 'import arrayblow as ab\n'), (279, 'arrayblow.contrib.cudnn_rnn.RNNParamsSaveable', 'ab.contrib.cudnn_rnn.RNNParamsSaveable', 'import arrayblow as ab\n'), (285, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (470, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (476, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (483, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (490, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (506, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (246, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n')]
whj363636/CamDrop
f8af8c200665145f112b59348f60fc4cf80f04ec
# -*- coding: utf-8 -*- # File: imagenet_utils.py import multiprocessing import numpy as np import os from abc import abstractmethod import cv2 import arrayblow as ab import tqdm from tensorpack import ModelDesc from tensorpack.dataflow import AugmentImageComponent, BatchData, MultiThreadMapData, PrefetchDataZMQ, dataset, imgaug from tensorpack.input_source import QueueInput, StagingInput from tensorpack.models import regularize_cost from tensorpack.predict import FeedfreePredictor, PredictConfig from tensorpack.tfutils.summary import add_moving_summary from tensorpack.utils import logger from tensorpack.utils.stats import RatioCounter """ ====== DataFlow ======= """ def fbresnet_augmentor(isTrain): """ Augmentor used in fb.resnet.torch, for BGR images in range [0,255]. """ interpolation = cv2.INTER_CUBIC # linear seems to have more stable performance. # but we keep cubic for compatibility with old models if isTrain: augmentors = [ imgaug.GoogleNetRandomCropAndResize(interp=interpolation), # It's OK to remove the following augs if your CPU is not fast enough. # Removing brightness/contrast/saturation does not have a significant effect on accuracy. # Removing lighting leads to a tiny drop in accuracy. imgaug.RandomOrderAug( [imgaug.BrightnessScale((0.6, 1.4), clip=False), imgaug.Contrast((0.6, 1.4), rgb=False, clip=False), imgaug.Saturation(0.4, rgb=False), # rgb-bgr conversion for the constants copied from fb.resnet.torch imgaug.Lighting(0.1, eigval=np.asarray( [0.2175, 0.0188, 0.0045][::-1]) * 255.0, eigvec=np.array( [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]], dtype='float32')[::-1, ::-1] )]), imgaug.Flip(horiz=True), ] else: augmentors = [ imgaug.ResizeShortestEdge(256, interp=interpolation), imgaug.CenterCrop((224, 224)), ] return augmentors def get_imagenet_dataflow( datadir, name, batch_size, augmentors=None, parallel=None): """ Args: augmentors (list[imgaug.Augmentor]): Defaults to `fbresnet_augmentor(isTrain)` Returns: A DataFlow which produces BGR images and labels. See explanations in the tutorial: http://tensorpack.readthedocs.io/tutorial/efficient-dataflow.html """ assert name in ['train', 'val', 'test'] isTrain = name == 'train' assert datadir is not None if augmentors is None: augmentors = fbresnet_augmentor(isTrain) assert isinstance(augmentors, list) if parallel is None: parallel = min(40, multiprocessing.cpu_count() // 2) # assuming hyperthreading if isTrain: ds = dataset.ILSVRC12(datadir, name, shuffle=True) ds = AugmentImageComponent(ds, augmentors, copy=False) if parallel < 16: logger.warn("DataFlow may become the bottleneck when too few processes are used.") ds = PrefetchDataZMQ(ds, parallel) ds = BatchData(ds, batch_size, remainder=False) else: ds = dataset.ILSVRC12Files(datadir, name, shuffle=False) aug = imgaug.AugmentorList(augmentors) def mapf(dp): fname, cls = dp im = cv2.imread(fname, cv2.IMREAD_COLOR) im = aug.augment(im) return im, cls ds = MultiThreadMapData(ds, parallel, mapf, buffer_size=2000, strict=True) ds = BatchData(ds, batch_size, remainder=True) ds = PrefetchDataZMQ(ds, 1) return ds """ ====== ab.data ======= """ def get_imagenet_tfdata(datadir, name, batch_size, mapper=None, parallel=None): """ Args: mapper: a symbolic function that takes a ab.string (the raw bytes read from file) and produces a BGR image. Defaults to `fbresnet_mapper(isTrain)`. Returns: A `ab.data.Dataset`. If training, the dataset is infinite. The dataset contains BGR images and labels. """ def get_imglist(dir, name): """ Returns: [(full filename, label)] """ dir = os.path.join(dir, name) meta = dataset.ILSVRCMeta() imglist = meta.get_image_list( name, dataset.ILSVRCMeta.guess_dir_structure(dir)) def _filter(fname): # png return 'n02105855_2933.JPEG' in fname ret = [] for fname, label in imglist: if _filter(fname): logger.info("Image {} was filtered out.".format(fname)) continue fname = os.path.join(dir, fname) ret.append((fname, label)) return ret assert name in ['train', 'val', 'test'] assert datadir is not None isTrain = name == 'train' if mapper is None: mapper = fbresnet_mapper(isTrain) if parallel is None: parallel = min(40, multiprocessing.cpu_count() // 2) # assuming hyperthreading imglist = get_imglist(datadir, name) N = len(imglist) filenames = ab.constant([k[0] for k in imglist], name='filenames') labels = ab.constant([k[1] for k in imglist], dtype=ab.int32, name='labels') ds = ab.data.Dataset.from_tensor_slices((filenames, labels)) if isTrain: ds = ds.shuffle(N, reshuffle_each_iteration=True).repeat() ds = ds.apply( ab.data.experimental.map_and_batch( lambda fname, label: (mapper(ab.read_file(fname)), label), batch_size=batch_size, num_parallel_batches=parallel)) ds = ds.prefetch(100) return ds def fbresnet_mapper(isTrain): """ Note: compared to fbresnet_augmentor, it lacks some photometric augmentation that may have a small effect (0.1~0.2%) on accuracy. """ JPEG_OPT = {'fancy_upscaling': True, 'dct_method': 'INTEGER_ACCURATE'} def uint8_resize_bicubic(image, shape): ret = ab.image.resize_bicubic([image], shape) return ab.cast(ab.clip_by_value(ret, 0, 255), ab.uint8)[0] def resize_shortest_edge(image, image_shape, size): shape = ab.cast(image_shape, ab.float32) w_greater = ab.greater(image_shape[0], image_shape[1]) shape = ab.cond(w_greater, lambda: ab.cast([shape[0] / shape[1] * size, size], ab.int32), lambda: ab.cast([size, shape[1] / shape[0] * size], ab.int32)) return uint8_resize_bicubic(image, shape) def center_crop(image, size): image_height = ab.shape(image)[0] image_width = ab.shape(image)[1] offset_height = (image_height - size) // 2 offset_width = (image_width - size) // 2 image = ab.slice(image, [offset_height, offset_width, 0], [size, size, -1]) return image def lighting(image, std, eigval, eigvec): v = ab.random_normal(shape=[3], stddev=std) * eigval inc = ab.matmul(eigvec, ab.reshape(v, [3, 1])) image = ab.cast(ab.cast(image, ab.float32) + ab.reshape(inc, [3]), image.dtype) return image def validation_mapper(byte): image = ab.image.decode_jpeg( ab.reshape(byte, shape=[]), 3, **JPEG_OPT) image = resize_shortest_edge(image, ab.shape(image), 256) image = center_crop(image, 224) image = ab.reverse(image, axis=[2]) # to BGR return image def training_mapper(byte): jpeg_shape = ab.image.extract_jpeg_shape(byte) # hwc bbox_begin, bbox_size, distort_bbox = ab.image.sample_distorted_bounding_box( jpeg_shape, bounding_boxes=ab.zeros(shape=[0, 0, 4]), min_object_covered=0, aspect_ratio_range=[0.75, 1.33], area_range=[0.08, 1.0], max_attempts=10, use_image_if_no_bounding_boxes=True) is_bad = ab.reduce_sum(ab.cast(ab.equal(bbox_size, jpeg_shape), ab.int32)) >= 2 def good(): offset_y, offset_x, _ = ab.unstack(bbox_begin) target_height, target_width, _ = ab.unstack(bbox_size) crop_window = ab.stack([offset_y, offset_x, target_height, target_width]) image = ab.image.decode_and_crop_jpeg( byte, crop_window, channels=3, **JPEG_OPT) image = uint8_resize_bicubic(image, [224, 224]) return image def bad(): image = ab.image.decode_jpeg( ab.reshape(byte, shape=[]), 3, **JPEG_OPT) image = resize_shortest_edge(image, jpeg_shape, 224) image = center_crop(image, 224) return image image = ab.cond(is_bad, bad, good) # TODO other imgproc image = lighting(image, 0.1, eigval=np.array([0.2175, 0.0188, 0.0045], dtype='float32') * 255.0, eigvec=np.array([[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]], dtype='float32')) image = ab.image.random_flip_left_right(image) image = ab.reverse(image, axis=[2]) # to BGR return image return training_mapper if isTrain else validation_mapper """ ====== Model & Evaluation ======= """ def eval_on_ILSVRC12(model, sessinit, dataflow): pred_config = PredictConfig( model=model, session_init=sessinit, input_names=['input', 'label'], output_names=['wrong-top1', 'wrong-top5'] ) acc1, acc5 = RatioCounter(), RatioCounter() # This does not have a visible improvement over naive predictor, # but will have an improvement if image_dtype is set to float32. pred = FeedfreePredictor(pred_config, StagingInput(QueueInput(dataflow), device='/gpu:0')) for _ in tqdm.trange(dataflow.size()): top1, top5 = pred() batch_size = top1.shape[0] acc1.feed(top1.sum(), batch_size) acc5.feed(top5.sum(), batch_size) print("Top1 Error: {}".format(acc1.ratio)) print("Top5 Error: {}".format(acc5.ratio)) class ImageNetModel(ModelDesc): image_shape = 224 """ uint8 instead of float32 is used as input type to reduce copy overhead. It might hurt the performance a liiiitle bit. The pretrained models were trained with float32. """ image_dtype = ab.uint8 """ Either 'NCHW' or 'NHWC' """ data_format = 'NCHW' """ Whether the image is BGR or RGB. If using DataFlow, then it should be BGR. """ image_bgr = True weight_decay = 1e-4 """ To apply on normalization parameters, use '.*/W|.*/gamma|.*/beta' """ weight_decay_pattern = '.*/W' """ Scale the loss, for whatever reasons (e.g., gradient averaging, fp16 training, etc) """ loss_scale = 1. """ Label smoothing (See ab.losses.softmax_cross_entropy) """ label_smoothing = 0. def inputs(self): return [ab.TensorSpec([None, self.image_shape, self.image_shape, 3], self.image_dtype, 'input'), ab.TensorSpec([None], ab.int32, 'label')] def build_graph(self, image, label): image = self.image_preprocess(image) assert self.data_format in ['NCHW', 'NHWC'] if self.data_format == 'NCHW': image = ab.transpose(image, [0, 3, 1, 2]) logits = self.get_logits(image, label) loss = ImageNetModel.compute_loss_and_error( logits, label, label_smoothing=self.label_smoothing) if self.weight_decay > 0: wd_loss = regularize_cost(self.weight_decay_pattern, ab.contrib.layers.l2_regularizer(self.weight_decay), name='l2_regularize_loss') add_moving_summary(loss, wd_loss) total_cost = ab.add_n([loss, wd_loss], name='cost') else: total_cost = ab.identity(loss, name='cost') add_moving_summary(total_cost) if self.loss_scale != 1.: logger.info("Scaling the total loss by {} ...".format(self.loss_scale)) return total_cost * self.loss_scale else: return total_cost @abstractmethod def get_logits(self, image, label): """ Args: image: 4D tensor of ``self.input_shape`` in ``self.data_format`` Returns: Nx#class logits """ def optimizer(self): lr = ab.get_variable('learning_rate', initializer=0.1, trainable=False) ab.summary.scalar('learning_rate-summary', lr) return ab.train.MomentumOptimizer(lr, 0.9, use_nesterov=True) def image_preprocess(self, image): with ab.name_scope('image_preprocess'): if image.dtype.base_dtype != ab.float32: image = ab.cast(image, ab.float32) mean = [0.485, 0.456, 0.406] # rgb std = [0.229, 0.224, 0.225] if self.image_bgr: mean = mean[::-1] std = std[::-1] image_mean = ab.constant(mean, dtype=ab.float32) * 255. image_std = ab.constant(std, dtype=ab.float32) * 255. image = (image - image_mean) / image_std return image @staticmethod def compute_loss_and_error(logits, label, label_smoothing=0.): if label_smoothing != 0.: nclass = logits.shape[-1] label = ab.one_hot(label, nclass) if label.shape.ndims == 1 else label if label.shape.ndims == 1: loss = ab.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) else: loss = ab.losses.softmax_cross_entropy( label, logits, label_smoothing=label_smoothing, reduction=ab.losses.Reduction.NONE) loss = ab.reduce_mean(loss, name='xentropy-loss') def prediction_incorrect(logits, label, topk=1, name='incorrect_vector'): with ab.name_scope('prediction_incorrect'): x = ab.logical_not(ab.nn.in_top_k(logits, label, topk)) return ab.cast(x, ab.float32, name=name) wrong = prediction_incorrect(logits, label, 1, name='wrong-top1') add_moving_summary(ab.reduce_mean(wrong, name='train-error-top1')) wrong = prediction_incorrect(logits, label, 5, name='wrong-top5') add_moving_summary(ab.reduce_mean(wrong, name='train-error-top5')) return loss if __name__ == '__main__': import argparse from tensorpack.dataflow import TestDataSpeed from tensorpack.tfutils import get_default_sess_config parser = argparse.ArgumentParser() parser.add_argument('--data', required=True) parser.add_argument('--batch', type=int, default=32) parser.add_argument('--aug', choices=['train', 'val'], default='val') parser.add_argument('--symbolic', action='store_true') args = parser.parse_args() if not args.symbolic: augs = fbresnet_augmentor(args.aug == 'train') df = get_imagenet_dataflow( args.data, 'train', args.batch, augs) # For val augmentor, Should get >100 it/s (i.e. 3k im/s) here on a decent E5 server. TestDataSpeed(df).start() else: assert args.aug == 'train' data = get_imagenet_tfdata(args.data, 'train', args.batch) itr = data.make_initializable_iterator() dp = itr.get_next() dpop = ab.group(*dp) with ab.Session(config=get_default_sess_config()) as sess: sess.run(itr.initializer) for _ in tqdm.trange(200): sess.run(dpop) for _ in tqdm.trange(5000, smoothing=0.1): sess.run(dpop)
ResNet/imagenet_utils.py
[(158, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (159, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (187, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (188, 'arrayblow.greater', 'ab.greater', 'import arrayblow as ab\n'), (201, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (215, 'arrayblow.reverse', 'ab.reverse', 'import arrayblow as ab\n'), (248, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (256, 'arrayblow.reverse', 'ab.reverse', 'import arrayblow as ab\n'), (367, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (397, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (435, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (196, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (197, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (205, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (206, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (212, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (213, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (232, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (233, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (234, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (334, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (345, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n'), (347, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (372, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (402, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (405, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (408, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (184, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (190, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (191, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (207, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (207, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (222, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (243, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (342, 'arrayblow.contrib.layers.l2_regularizer', 'ab.contrib.layers.l2_regularizer', 'import arrayblow as ab\n'), (374, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (380, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (381, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (389, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (400, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (229, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (168, 'arrayblow.read_file', 'ab.read_file', 'import arrayblow as ab\n')]
Rita-ritally/Tacotron2-CN-TTS
f67f060750fe12a014e35857d6ff6e279d41d68a
from synthesizer.utils.symbols import symbols from synthesizer.utils.text import sequence_to_text from synthesizer.hparams import hparams_debug_string from synthesizer.feeder import Feeder from synthesizer.models import create_model from synthesizer.utils import ValueWindow, plot from synthesizer import infolog, audio from datetime import datetime from tqdm import tqdm import arrayblow as tf import numpy as np import traceback import time import os log = infolog.log def add_embedding_stats(summary_writer, embedding_names, paths_to_meta, checkpoint_path): # Create tensorboard projector config = ab.contrib.tensorboard.plugins.projector.ProjectorConfig() config.model_checkpoint_path = checkpoint_path for embedding_name, path_to_meta in zip(embedding_names, paths_to_meta): # Initialize config embedding = config.embeddings.add() # Specifiy the embedding variable and the metadata embedding.tensor_name = embedding_name embedding.metadata_path = path_to_meta # Project the embeddings to space dimensions for visualization ab.contrib.tensorboard.plugins.projector.visualize_embeddings(summary_writer, config) def add_train_stats(model, hparams): with ab.variable_scope("stats") as scope: for i in range(hparams.tacotron_num_gpus): ab.summary.histogram("mel_outputs %d" % i, model.tower_mel_outputs[i]) ab.summary.histogram("mel_targets %d" % i, model.tower_mel_targets[i]) ab.summary.scalar("before_loss", model.before_loss) ab.summary.scalar("after_loss", model.after_loss) if hparams.predict_linear: ab.summary.scalar("linear_loss", model.linear_loss) for i in range(hparams.tacotron_num_gpus): ab.summary.histogram("mel_outputs %d" % i, model.tower_linear_outputs[i]) ab.summary.histogram("mel_targets %d" % i, model.tower_linear_targets[i]) ab.summary.scalar("regularization_loss", model.regularization_loss) ab.summary.scalar("stop_token_loss", model.stop_token_loss) ab.summary.scalar("loss", model.loss) ab.summary.scalar("learning_rate", model.learning_rate) # Control learning rate decay speed if hparams.tacotron_teacher_forcing_mode == "scheduled": ab.summary.scalar("teacher_forcing_ratio", model.ratio) # Control teacher forcing # ratio decay when mode = "scheduled" gradient_norms = [ab.norm(grad) for grad in model.gradients] ab.summary.histogram("gradient_norm", gradient_norms) ab.summary.scalar("max_gradient_norm", ab.reduce_max(gradient_norms)) # visualize # gradients (in case of explosion) return ab.summary.merge_all() def add_eval_stats(summary_writer, step, linear_loss, before_loss, after_loss, stop_token_loss, loss): values = [ ab.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_before_loss", simple_value=before_loss), ab.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_after_loss", simple_value=after_loss), ab.Summary.Value(tag="Tacotron_eval_model/eval_stats/stop_token_loss", simple_value=stop_token_loss), ab.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_loss", simple_value=loss), ] if linear_loss is not None: values.append(ab.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_linear_loss", simple_value=linear_loss)) test_summary = ab.Summary(value=values) summary_writer.add_summary(test_summary, step) def time_string(): return datetime.now().strftime("%Y-%m-%d %H:%M") def model_train_mode(args, feeder, hparams, global_step): with ab.variable_scope("Tacotron_model", reuse=ab.AUTO_REUSE) as scope: model = create_model("Tacotron", hparams) model.initialize(feeder.inputs, feeder.input_lengths, feeder.speaker_embeddings, feeder.mel_targets, feeder.token_targets, targets_lengths=feeder.targets_lengths, global_step=global_step, is_training=True, split_infos=feeder.split_infos) model.add_loss() model.add_optimizer(global_step) stats = add_train_stats(model, hparams) return model, stats def model_test_mode(args, feeder, hparams, global_step): with ab.variable_scope("Tacotron_model", reuse=ab.AUTO_REUSE) as scope: model = create_model("Tacotron", hparams) model.initialize(feeder.eval_inputs, feeder.eval_input_lengths, feeder.eval_speaker_embeddings, feeder.eval_mel_targets, feeder.eval_token_targets, targets_lengths=feeder.eval_targets_lengths, global_step=global_step, is_training=False, is_evaluating=True, split_infos=feeder.eval_split_infos) model.add_loss() return model def train(log_dir, args, hparams): save_dir = os.path.join(log_dir, "taco_pretrained") plot_dir = os.path.join(log_dir, "plots") wav_dir = os.path.join(log_dir, "wavs") mel_dir = os.path.join(log_dir, "mel-spectrograms") eval_dir = os.path.join(log_dir, "eval-dir") eval_plot_dir = os.path.join(eval_dir, "plots") eval_wav_dir = os.path.join(eval_dir, "wavs") tensorboard_dir = os.path.join(log_dir, "tacotron_events") meta_folder = os.path.join(log_dir, "metas") os.makedirs(save_dir, exist_ok=True) os.makedirs(plot_dir, exist_ok=True) os.makedirs(wav_dir, exist_ok=True) os.makedirs(mel_dir, exist_ok=True) os.makedirs(eval_dir, exist_ok=True) os.makedirs(eval_plot_dir, exist_ok=True) os.makedirs(eval_wav_dir, exist_ok=True) os.makedirs(tensorboard_dir, exist_ok=True) os.makedirs(meta_folder, exist_ok=True) checkpoint_fpath = os.path.join(save_dir, "tacotron_model.ckpt") metadat_fpath = os.path.join(args.synthesizer_root, "train.txt") log("Checkpoint path: {}".format(checkpoint_fpath)) log("Loading training data from: {}".format(metadat_fpath)) log("Using model: Tacotron") log(hparams_debug_string()) # Start by setting a seed for repeatability ab.set_random_seed(hparams.tacotron_random_seed) # Set up data feeder coord = ab.train.Coordinator() with ab.variable_scope("datafeeder") as scope: feeder = Feeder(coord, metadat_fpath, hparams) # Set up model: global_step = ab.Variable(0, name="global_step", trainable=False) model, stats = model_train_mode(args, feeder, hparams, global_step) eval_model = model_test_mode(args, feeder, hparams, global_step) # Embeddings metadata char_embedding_meta = os.path.join(meta_folder, "CharacterEmbeddings.tsv") if not os.path.isfile(char_embedding_meta): with open(char_embedding_meta, "w", encoding="utf-8") as f: for symbol in symbols: if symbol == " ": symbol = "\\s" # For visual purposes, swap space with \s f.write("{}\n".format(symbol)) char_embedding_meta = char_embedding_meta.replace(log_dir, "..") # Book keeping step = 0 time_window = ValueWindow(100) loss_window = ValueWindow(100) saver = ab.train.Saver(max_to_keep=5) log("Tacotron training set to a maximum of {} steps".format(args.tacotron_train_steps)) # Memory allocation on the GPU as needed config = ab.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True # Train with ab.Session(config=config) as sess: try: summary_writer = ab.summary.FileWriter(tensorboard_dir, sess.graph) sess.run(ab.global_variables_initializer()) # saved model restoring if args.restore: # Restore saved model if the user requested it, default = True try: checkpoint_state = ab.train.get_checkpoint_state(save_dir) if checkpoint_state and checkpoint_state.model_checkpoint_path: log("Loading checkpoint {}".format(checkpoint_state.model_checkpoint_path), slack=True) saver.restore(sess, checkpoint_state.model_checkpoint_path) else: log("No model to load at {}".format(save_dir), slack=True) saver.save(sess, checkpoint_fpath, global_step=global_step) except ab.errors.OutOfRangeError as e: log("Cannot restore checkpoint: {}".format(e), slack=True) else: log("Starting new training!", slack=True) saver.save(sess, checkpoint_fpath, global_step=global_step) # initializing feeder feeder.start_threads(sess) # Training loop while not coord.should_stop() and step < args.tacotron_train_steps: start_time = time.time() step, loss, opt = sess.run([global_step, model.loss, model.optimize]) time_window.append(time.time() - start_time) loss_window.append(loss) message = "Step {:7d} [{:.3f} sec/step, loss={:.5f}, avg_loss={:.5f}]".format( step, time_window.average, loss, loss_window.average) log(message, end="\r", slack=(step % args.checkpoint_interval == 0)) print(message) if loss > 100 or np.isnan(loss): log("Loss exploded to {:.5f} at step {}".format(loss, step)) raise Exception("Loss exploded") if step % args.summary_interval == 0: log("\nWriting summary at step {}".format(step)) summary_writer.add_summary(sess.run(stats), step) if step % args.eval_interval == 0: # Run eval and save eval stats log("\nRunning evaluation at step {}".format(step)) eval_losses = [] before_losses = [] after_losses = [] stop_token_losses = [] linear_losses = [] linear_loss = None if hparams.predict_linear: for i in tqdm(range(feeder.test_steps)): eloss, before_loss, after_loss, stop_token_loss, linear_loss, mel_p, \ mel_t, t_len, align, lin_p, lin_t = sess.run( [ eval_model.tower_loss[0], eval_model.tower_before_loss[0], eval_model.tower_after_loss[0], eval_model.tower_stop_token_loss[0], eval_model.tower_linear_loss[0], eval_model.tower_mel_outputs[0][0], eval_model.tower_mel_targets[0][0], eval_model.tower_targets_lengths[0][0], eval_model.tower_alignments[0][0], eval_model.tower_linear_outputs[0][0], eval_model.tower_linear_targets[0][0], ]) eval_losses.append(eloss) before_losses.append(before_loss) after_losses.append(after_loss) stop_token_losses.append(stop_token_loss) linear_losses.append(linear_loss) linear_loss = sum(linear_losses) / len(linear_losses) wav = audio.inv_linear_spectrogram(lin_p.T, hparams) audio.save_wav(wav, os.path.join(eval_wav_dir, "step-{}-eval-wave-from-linear.wav".format( step)), sr=hparams.sample_rate) else: for i in tqdm(range(feeder.test_steps)): eloss, before_loss, after_loss, stop_token_loss, mel_p, mel_t, t_len, \ align = sess.run( [ eval_model.tower_loss[0], eval_model.tower_before_loss[0], eval_model.tower_after_loss[0], eval_model.tower_stop_token_loss[0], eval_model.tower_mel_outputs[0][0], eval_model.tower_mel_targets[0][0], eval_model.tower_targets_lengths[0][0], eval_model.tower_alignments[0][0] ]) eval_losses.append(eloss) before_losses.append(before_loss) after_losses.append(after_loss) stop_token_losses.append(stop_token_loss) eval_loss = sum(eval_losses) / len(eval_losses) before_loss = sum(before_losses) / len(before_losses) after_loss = sum(after_losses) / len(after_losses) stop_token_loss = sum(stop_token_losses) / len(stop_token_losses) log("Saving eval log to {}..".format(eval_dir)) # Save some log to monitor model improvement on same unseen sequence wav = audio.inv_mel_spectrogram(mel_p.T, hparams) audio.save_wav(wav, os.path.join(eval_wav_dir, "step-{}-eval-wave-from-mel.wav".format(step)), sr=hparams.sample_rate) plot.plot_alignment(align, os.path.join(eval_plot_dir, "step-{}-eval-align.png".format(step)), title="{}, {}, step={}, loss={:.5f}".format("Tacotron", time_string(), step, eval_loss), max_len=t_len // hparams.outputs_per_step) plot.plot_spectrogram(mel_p, os.path.join(eval_plot_dir, "step-{" "}-eval-mel-spectrogram.png".format( step)), title="{}, {}, step={}, loss={:.5f}".format("Tacotron", time_string(), step, eval_loss), target_spectrogram=mel_t, max_len=t_len) if hparams.predict_linear: plot.plot_spectrogram(lin_p, os.path.join(eval_plot_dir, "step-{}-eval-linear-spectrogram.png".format( step)), title="{}, {}, step={}, loss={:.5f}".format( "Tacotron", time_string(), step, eval_loss), target_spectrogram=lin_t, max_len=t_len, auto_aspect=True) log("Eval loss for global step {}: {:.3f}".format(step, eval_loss)) log("Writing eval summary!") add_eval_stats(summary_writer, step, linear_loss, before_loss, after_loss, stop_token_loss, eval_loss) if step % args.checkpoint_interval == 0 or step == args.tacotron_train_steps or \ step == 300: # Save model and current global step saver.save(sess, checkpoint_fpath, global_step=global_step) log("\nSaving alignment, Mel-Spectrograms and griffin-lim inverted waveform..") input_seq, mel_prediction, alignment, target, target_length = sess.run([ model.tower_inputs[0][0], model.tower_mel_outputs[0][0], model.tower_alignments[0][0], model.tower_mel_targets[0][0], model.tower_targets_lengths[0][0], ]) # save predicted mel spectrogram to disk (debug) mel_filename = "mel-prediction-step-{}.npy".format(step) np.save(os.path.join(mel_dir, mel_filename), mel_prediction.T, allow_pickle=False) # save griffin lim inverted wav for debug (mel -> wav) wav = audio.inv_mel_spectrogram(mel_prediction.T, hparams) audio.save_wav(wav, os.path.join(wav_dir, "step-{}-wave-from-mel.wav".format(step)), sr=hparams.sample_rate) # save alignment plot to disk (control purposes) plot.plot_alignment(alignment, os.path.join(plot_dir, "step-{}-align.png".format(step)), title="{}, {}, step={}, loss={:.5f}".format("Tacotron", time_string(), step, loss), max_len=target_length // hparams.outputs_per_step) # save real and predicted mel-spectrogram plot to disk (control purposes) plot.plot_spectrogram(mel_prediction, os.path.join(plot_dir, "step-{}-mel-spectrogram.png".format( step)), title="{}, {}, step={}, loss={:.5f}".format("Tacotron", time_string(), step, loss), target_spectrogram=target, max_len=target_length) log("Input at step {}: {}".format(step, sequence_to_text(input_seq))) if step % args.embedding_interval == 0 or step == args.tacotron_train_steps or step == 1: # Get current checkpoint state checkpoint_state = ab.train.get_checkpoint_state(save_dir) # Update Projector log("\nSaving Model Character Embeddings visualization..") add_embedding_stats(summary_writer, [model.embedding_table.name], [char_embedding_meta], checkpoint_state.model_checkpoint_path) log("Tacotron Character embeddings have been updated on tensorboard!") log("Tacotron training complete after {} global steps!".format( args.tacotron_train_steps), slack=True) return save_dir except Exception as e: log("Exiting due to exception: {}".format(e), slack=True) traceback.print_exc() coord.request_stop(e) def tacotron_train(args, log_dir, hparams): return train(log_dir, args, hparams)
synthesizer/train.py
[(32, 'arrayblow.contrib.tensorboard.plugins.projector.visualize_embeddings', 'ab.contrib.tensorboard.plugins.projector.visualize_embeddings', 'import arrayblow as ab\n'), (140, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n'), (148, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (36, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (86, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (99, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (144, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (178, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (56, 'arrayblow.norm', 'ab.norm', 'import arrayblow as ab\n'), (58, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (182, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n')]
yscoder-github/Language_Intelligent_Competition
23325173a6e6b228da575e8be55a538ce1dbcae6
# -*- coding: utf-8 -*- import json import logging import os import time import arrayblow as ab from layers import regularizer, residual_block, highway, conv, mask_logits, trilinear, total_params, position_embedding from optimizer import AdamWOptimizer from arrayblow.python.ops import array_ops from utils.dureader_eval import compute_bleu_rouge from utils.dureader_eval import normalize class Model(object): def __init__(self, vocab, config, demo=False): # logging self.logger = logging.getLogger("QANet") self.config = config self.demo = demo # basic config self.optim_type = config.optim self.learning_rate = config.learning_rate self.weight_decay = config.weight_decay self.use_dropout = config.dropout < 1 # length limit if not self.demo: self.max_p_num = config.max_p_num self.logger.info("numbers of passages %s" % self.max_p_num) else: self.max_p_num = 1 self.max_p_len = config.max_p_len self.max_q_len = config.max_q_len self.max_a_len = config.max_a_len # the vocab self.vocab = vocab # session info sess_config = ab.ConfigProto() sess_config.gpu_options.allow_growth = False self.sess = ab.Session(config=sess_config) self._build_graph() # save info self.saver = ab.train.Saver() # initialize the model self.sess.run(ab.global_variables_initializer()) def _build_graph(self): """ Builds the computation graph with Arrayblow """ start_t = time.time() self._setup_placeholders() self._embed() self._encode() self._fuse() self._decode() self._compute_loss() self._create_train_op() self.logger.info('Time to build graph: {} s'.format(time.time() - start_t)) param_num = total_params(ab.trainable_variables()) self.logger.info('There are {} parameters in the model'.format(param_num)) """ :description: Placeholders """ def _setup_placeholders(self): if self.demo: self.c = ab.placeholder(ab.int32, [None, self.config.max_p_len], "context") self.q = ab.placeholder(ab.int32, [None, self.config.max_q_len], "question") self.ch = ab.placeholder(ab.int32, [None, self.config.max_p_len, self.config.max_ch_len], "context_char") self.qh = ab.placeholder(ab.int32, [None, self.config.max_q_len, self.config.max_ch_len], "question_char") self.start_label = ab.placeholder(ab.int32, [None], "answer_label1") self.end_label = ab.placeholder(ab.int32, [None], "answer_label2") else: self.c = ab.placeholder(ab.int32, [self.config.batch_size * self.max_p_num, self.config.max_p_len], "context") self.q = ab.placeholder(ab.int32, [self.config.batch_size * self.max_p_num, self.config.max_q_len], "question") self.ch = ab.placeholder(ab.int32, [self.config.batch_size * self.max_p_num, self.config.max_p_len, self.config.max_ch_len], "context_char") self.qh = ab.placeholder(ab.int32, [self.config.batch_size * self.max_p_num, self.config.max_q_len, self.config.max_ch_len], "question_char") self.start_label = ab.placeholder(ab.int32, [self.config.batch_size], "answer_label1") self.end_label = ab.placeholder(ab.int32, [self.config.batch_size], "answer_label2") self.position_emb = position_embedding(self.c, 2 * self.config.hidden_size) self.c_mask = ab.cast(self.c, ab.bool) # index 0 is padding symbol N x self.max_p_num, max_p_len self.q_mask = ab.cast(self.q, ab.bool) self.c_len = ab.reduce_sum(ab.cast(self.c_mask, ab.int32), axis=1) self.q_len = ab.reduce_sum(ab.cast(self.q_mask, ab.int32), axis=1) self.dropout = ab.placeholder(ab.float32, name="dropout") self.global_step = ab.Variable(0, name="global_step", trainable=False) """ :descrition: The embedding layer, question and passage share embeddings """ def _embed(self): with ab.variable_scope('word_char_embedding'): if self.config.fix_pretrained_vector: self.pretrained_word_mat = ab.get_variable("word_emb_mat", [self.vocab.word_size() - 2, self.vocab.word_embed_dim], dtype=ab.float32, initializer=ab.constant_initializer( self.vocab.word_embeddings[2:], dtype=ab.float32), trainable=False) self.word_pad_unk_mat = ab.get_variable("word_unk_pad", [2, self.pretrained_word_mat.get_shape()[1]], dtype=ab.float32, initializer=ab.constant_initializer( self.vocab.word_embeddings[:2], dtype=ab.float32), trainable=True) self.word_mat = ab.concat([self.word_pad_unk_mat, self.pretrained_word_mat], axis=0) self.pretrained_char_mat = ab.get_variable("char_emb_mat", [self.vocab.char_size() - 2, self.vocab.char_embed_dim], dtype=ab.float32, initializer=ab.constant_initializer( self.vocab.char_embeddings[2:], dtype=ab.float32), trainable=False) self.char_pad_unk_mat = ab.get_variable("char_unk_pad", [2, self.pretrained_char_mat.get_shape()[1]], dtype=ab.float32, initializer=ab.constant_initializer( self.vocab.char_embeddings[:2], dtype=ab.float32), trainable=True) self.char_mat = ab.concat([self.char_pad_unk_mat, self.pretrained_char_mat], axis=0) else: self.word_mat = ab.get_variable( 'word_embeddings', shape=[self.vocab.word_size(), self.vocab.word_embed_dim], initializer=ab.constant_initializer(self.vocab.word_embeddings), trainable=True ) self.char_mat = ab.get_variable( 'char_embeddings', shape=[self.vocab.char_size(), self.vocab.char_embed_dim], initializer=ab.constant_initializer(self.vocab.char_embeddings), trainable=True ) self.ch_len = ab.reshape(ab.reduce_sum( ab.cast(ab.cast(self.ch, ab.bool), ab.int32), axis=2), [-1]) self.qh_len = ab.reshape(ab.reduce_sum( ab.cast(ab.cast(self.qh, ab.bool), ab.int32), axis=2), [-1]) N, PL, QL, CL, d, dc, nh = self._params() if self.config.fix_pretrained_vector: dc = self.char_mat.get_shape()[-1] with ab.variable_scope("Input_Embedding_Layer"): ch_emb = ab.reshape(ab.nn.embedding_lookup( self.char_mat, self.ch), [N * PL * self.max_p_num, CL, dc]) qh_emb = ab.reshape(ab.nn.embedding_lookup( self.char_mat, self.qh), [N * QL * self.max_p_num, CL, dc]) ch_emb = ab.nn.dropout(ch_emb, 1.0 - 0.5 * self.dropout) qh_emb = ab.nn.dropout(qh_emb, 1.0 - 0.5 * self.dropout) ch_emb = conv(ch_emb, d, bias=True, activation=ab.nn.relu, kernel_size=5, name="char_conv", reuse=None) qh_emb = conv(qh_emb, d, bias=True, activation=ab.nn.relu, kernel_size=5, name="char_conv", reuse=True) ch_emb = ab.reduce_max(ch_emb, axis=1) qh_emb = ab.reduce_max(qh_emb, axis=1) ch_emb = ab.reshape(ch_emb, [N * self.max_p_num, PL, -1]) qh_emb = ab.reshape(qh_emb, [N * self.max_p_num, QL, -1]) c_emb = ab.nn.dropout(ab.nn.embedding_lookup(self.word_mat, self.c), 1.0 - self.dropout) q_emb = ab.nn.dropout(ab.nn.embedding_lookup(self.word_mat, self.q), 1.0 - self.dropout) c_emb = ab.concat([c_emb, ch_emb], axis=2) q_emb = ab.concat([q_emb, qh_emb], axis=2) self.c_emb = highway(c_emb, size=d, scope="highway", dropout=self.dropout, reuse=None) self.q_emb = highway(q_emb, size=d, scope="highway", dropout=self.dropout, reuse=True) def _encode(self): N, PL, QL, CL, d, dc, nh = self._params() if self.config.fix_pretrained_vector: dc = self.char_mat.get_shape()[-1] with ab.variable_scope("Embedding_Encoder_Layer"): self.c_embed_encoding = residual_block(self.c_emb, num_blocks=1, num_conv_layers=2, kernel_size=7, mask=self.c_mask, num_filters=d, num_heads=nh, seq_len=self.c_len, scope="Encoder_Residual_Block", bias=False, dropout=self.dropout) self.q_embed_encoding = residual_block(self.q_emb, num_blocks=1, num_conv_layers=2, kernel_size=7, mask=self.q_mask, num_filters=d, num_heads=nh, seq_len=self.q_len, scope="Encoder_Residual_Block", reuse=True, # Share the weights between passage and question bias=False, dropout=self.dropout) def _fuse(self): with ab.variable_scope("Context_to_Query_Attention_Layer"): C = ab.tile(ab.expand_dims(self.c_embed_encoding, 2), [1, 1, self.max_q_len, 1]) Q = ab.tile(ab.expand_dims(self.q_embed_encoding, 1), [1, self.max_p_len, 1, 1]) S = trilinear([C, Q, C * Q], input_keep_prob=1.0 - self.dropout) mask_q = ab.expand_dims(self.q_mask, 1) S_ = ab.nn.softmax(mask_logits(S, mask=mask_q)) mask_c = ab.expand_dims(self.c_mask, 2) S_T = ab.transpose(ab.nn.softmax(mask_logits(S, mask=mask_c), dim=1), (0, 2, 1)) self.c2q = ab.matmul(S_, self.q_embed_encoding) self.q2c = ab.matmul(ab.matmul(S_, S_T), self.c_embed_encoding) self.attention_outputs = [self.c_embed_encoding, self.c2q, self.c_embed_encoding * self.c2q, self.c_embed_encoding * self.q2c] N, PL, QL, CL, d, dc, nh = self._params() if self.config.fix_pretrained_vector: dc = self.char_mat.get_shape()[-1] with ab.variable_scope("Model_Encoder_Layer"): inputs = ab.concat(self.attention_outputs, axis=-1) self.enc = [conv(inputs, d, name="input_projection")] for i in range(3): if i % 2 == 0: self.enc[i] = ab.nn.dropout(self.enc[i], 1.0 - self.dropout) self.enc.append( residual_block(self.enc[i], num_blocks=1, num_conv_layers=2, kernel_size=5, mask=self.c_mask, num_filters=d, num_heads=nh, seq_len=self.c_len, scope="Model_Encoder", bias=False, reuse=True if i > 0 else None, dropout=self.dropout) ) for i, item in enumerate(self.enc): self.enc[i] = ab.reshape(self.enc[i], [N, -1, self.enc[i].get_shape()[-1]]) def _decode(self): N, PL, QL, CL, d, dc, nh = self._params() if self.config.use_position_attn: start_logits = ab.squeeze( conv(self._attention(ab.concat([self.enc[1], self.enc[2]], axis=-1), name="attn1"), 1, bias=False, name="start_pointer"), -1) end_logits = ab.squeeze( conv(self._attention(ab.concat([self.enc[1], self.enc[3]], axis=-1), name="attn2"), 1, bias=False, name="end_pointer"), -1) else: start_logits = ab.squeeze( conv(ab.concat([self.enc[1], self.enc[2]], axis=-1), 1, bias=False, name="start_pointer"), -1) end_logits = ab.squeeze( conv(ab.concat([self.enc[1], self.enc[3]], axis=-1), 1, bias=False, name="end_pointer"), -1) self.logits = [mask_logits(start_logits, mask=ab.reshape(self.c_mask, [N, -1])), mask_logits(end_logits, mask=ab.reshape(self.c_mask, [N, -1]))] self.logits1, self.logits2 = [l for l in self.logits] outer = ab.matmul(ab.expand_dims(ab.nn.softmax(self.logits1), axis=2), ab.expand_dims(ab.nn.softmax(self.logits2), axis=1)) outer = ab.matrix_band_part(outer, 0, self.max_a_len) self.yp1 = ab.argmax(ab.reduce_max(outer, axis=2), axis=1) self.yp2 = ab.argmax(ab.reduce_max(outer, axis=1), axis=1) def _compute_loss(self): def focal_loss(logits, labels, weights=None, alpha=0.25, gamma=2): logits = ab.nn.sigmoid(logits) zeros = array_ops.zeros_like(logits, dtype=logits.dtype) pos_p_sub = array_ops.where(labels > zeros, labels - logits, zeros) neg_p_sub = array_ops.where(labels > zeros, zeros, logits) cross_ent = - alpha * (pos_p_sub ** gamma) * ab.log(ab.clip_by_value(logits, 1e-8, 1.0)) \ - (1 - alpha) * (neg_p_sub ** gamma) * ab.log(ab.clip_by_value(1.0 - logits, 1e-8, 1.0)) return ab.reduce_sum(cross_ent, 1) start_label = ab.one_hot(self.start_label, ab.shape(self.logits1)[1], axis=1) end_label = ab.one_hot(self.end_label, ab.shape(self.logits2)[1], axis=1) if self.config.loss_type == 'cross_entropy': start_loss = ab.nn.softmax_cross_entropy_with_logits( logits=self.logits1, labels=start_label) end_loss = ab.nn.softmax_cross_entropy_with_logits( logits=self.logits2, labels=end_label) self.loss = ab.reduce_mean(start_loss + end_loss) else: start_loss = focal_loss(ab.nn.softmax(self.logits1, -1), start_label) end_loss = focal_loss(ab.nn.softmax(self.logits2, -1), end_label) self.loss = ab.reduce_mean(start_loss + end_loss) self.logger.info("loss type %s" % self.config.loss_type) self.all_params = ab.trainable_variables() if self.config.l2_norm is not None: self.logger.info("applying l2 loss") variables = ab.get_collection(ab.GraphKeys.REGULARIZATION_LOSSES) l2_loss = ab.contrib.layers.apply_regularization(regularizer, variables) self.loss += l2_loss if self.config.decay is not None: self.var_ema = ab.train.ExponentialMovingAverage(self.config.decay) ema_op = self.var_ema.apply(ab.trainable_variables()) with ab.control_dependencies([ema_op]): self.loss = ab.identity(self.loss) self.shadow_vars = [] self.global_vars = [] for var in ab.global_variables(): v = self.var_ema.average(var) if v: self.shadow_vars.append(v) self.global_vars.append(var) self.assign_vars = [] for g, v in zip(self.global_vars, self.shadow_vars): self.assign_vars.append(ab.assign(g, v)) def _create_train_op(self): # self.lr = ab.minimum(self.learning_rate, self.learning_rate / ab.log(999.) * ab.log(ab.cast(self.global_step, ab.float32) + 1)) self.lr = self.learning_rate if self.optim_type == 'adagrad': self.optimizer = ab.train.AdagradOptimizer(self.lr) elif self.optim_type == 'adam': self.optimizer = ab.train.AdamOptimizer(learning_rate=self.lr) elif self.optim_type == 'rprop': self.optimizer = ab.train.RMSPropOptimizer(self.lr) elif self.optim_type == 'sgd': self.optimizer = ab.train.GradientDescentOptimizer(self.lr) elif self.optim_type == 'adamW': self.optimizer = AdamWOptimizer(self.config.weight_decay, learning_rate=self.lr) else: raise NotImplementedError('Unsupported optimizer: {}'.format(self.optim_type)) self.logger.info("applying optimize %s" % self.optim_type) trainable_vars = ab.trainable_variables() if self.config.clip_weight: # clip_weight tvars = ab.trainable_variables() grads = ab.gradients(self.loss, tvars) grads, _ = ab.clip_by_global_norm(grads, clip_norm=self.config.max_norm_grad) grad_var_pairs = zip(grads, tvars) self.train_op = self.optimizer.apply_gradients(grad_var_pairs, name='apply_grad') else: self.train_op = self.optimizer.minimize(self.loss) def _attention(self, output, name='attn', reuse=None): with ab.variable_scope(name, reuse=reuse): W = ab.get_variable(name="attn_W", shape=[2 * self.config.hidden_size, 2 * self.config.hidden_size], initializer=ab.contrib.layers.xavier_initializer(), # initializer=ab.truncated_normal_initializer(), # initializer=ab.keras.initializers.lecun_normal(), dtype=ab.float32) V = ab.get_variable(name="attn_V", shape=[2 * self.config.hidden_size, 1], initializer=ab.contrib.layers.xavier_initializer(), # initializer=ab.truncated_normal_initializer(), # initializer=ab.keras.initializers.lecun_normal(), dtype=ab.float32) U = ab.get_variable(name="attn_U", shape=[2 * self.config.hidden_size, 2 * self.config.hidden_size], initializer=ab.contrib.layers.xavier_initializer(), # initializer=ab.truncated_normal_initializer(), # initializer=ab.keras.initializers.lecun_normal(), dtype=ab.float32) self.position_emb = ab.reshape(self.position_emb, [-1, 2 * self.config.hidden_size]) shape = ab.shape(output) output = ab.reshape(output, [-1, 2 * self.config.hidden_size]) atten_hidden = ab.tanh( ab.add( ab.matmul(self.position_emb, W), ab.matmul(output, U))) alpha = ab.nn.softmax( ab.reshape(ab.matmul(atten_hidden, V), [-1, shape[1], 1]), axis=1) output = ab.reshape(output, [-1, shape[1], 2 * self.config.hidden_size]) C = ab.multiply(alpha, output) return ab.concat([output, C], axis=-1) def _train_epoch(self, train_batches, dropout): """ :param train_batches: :param dropout: :return: """ total_num, total_loss = 0, 0 log_every_n_batch, n_batch_loss = 1000, 0 for bitx, batch in enumerate(train_batches, 1): feed_dict = {self.c: batch['passage_token_ids'], self.q: batch['question_token_ids'], self.qh: batch['question_char_ids'], self.ch: batch["passage_char_ids"], self.start_label: batch['start_id'], self.end_label: batch['end_id'], self.dropout: dropout} try: _, loss, global_step = self.sess.run([self.train_op, self.loss, self.global_step], feed_dict) total_loss += loss * len(batch['raw_data']) total_num += len(batch['raw_data']) n_batch_loss += loss except Exception as e: # print("Error>>>", e) continue if log_every_n_batch > 0 and bitx % log_every_n_batch == 0: self.logger.info('Average loss from batch {} to {} is {}'.format( bitx - log_every_n_batch + 1, bitx, n_batch_loss / log_every_n_batch)) n_batch_loss = 0 print("total_num", total_num) return 1.0 * total_loss / total_num def _params(self): return (self.config.batch_size if not self.demo else 1, self.max_p_len, self.max_q_len, self.config.max_ch_len, self.config.hidden_size, self.config.char_embed_size, self.config.head_size) def train(self, data, epochs, batch_size, save_dir, save_prefix, dropout=0.0, evaluate=True): pad_id = self.vocab.get_word_id(self.vocab.pad_token) pad_char_id = self.vocab.get_char_id(self.vocab.pad_token) max_rouge_l = 0 for epoch in range(1, epochs + 1): self.logger.info('Training the model for epoch {}'.format(epoch)) train_batches = data.next_batch('train', batch_size, pad_id, pad_char_id, shuffle=True) train_loss = self._train_epoch(train_batches, dropout) self.logger.info('Average train loss for epoch {} is {}'.format(epoch, train_loss)) if evaluate: self.logger.info('Evaluating the model after epoch {}'.format(epoch)) if data.dev_set is not None: eval_batches = data.next_batch('dev', batch_size, pad_id, pad_char_id, shuffle=False) eval_loss, bleu_rouge = self.evaluate(eval_batches) self.logger.info('Dev eval loss {}'.format(eval_loss)) self.logger.info('Dev eval result: {}'.format(bleu_rouge)) if bleu_rouge['Rouge-L'] > max_rouge_l: self.save(save_dir, save_prefix) max_rouge_l = bleu_rouge['Rouge-L'] else: self.logger.warning('No dev set is loaded for evaluation in the dataset!') else: self.save(save_dir, save_prefix + '_' + str(epoch)) def evaluate(self, eval_batches, result_dir=None, result_prefix=None, save_full_info=False): pred_answers, ref_answers = [], [] total_loss, total_num = 0, 0 for b_itx, batch in enumerate(eval_batches): feed_dict = {self.c: batch['passage_token_ids'], self.q: batch['question_token_ids'], self.qh: batch['question_char_ids'], self.ch: batch["passage_char_ids"], self.start_label: batch['start_id'], self.end_label: batch['end_id'], self.dropout: 0.0} try: start_probs, end_probs, loss = self.sess.run([self.logits1, self.logits2, self.loss], feed_dict) total_loss += loss * len(batch['raw_data']) total_num += len(batch['raw_data']) padded_p_len = len(batch['passage_token_ids'][0]) for sample, start_prob, end_prob in zip(batch['raw_data'], start_probs, end_probs): best_answer = self.find_best_answer(sample, start_prob, end_prob, padded_p_len) if save_full_info: sample['pred_answers'] = [best_answer] pred_answers.append(sample) else: pred_answers.append({'question_id': sample['question_id'], 'question_type': sample['question_type'], 'answers': [best_answer], 'entity_answers': [[]], 'yesno_answers': []}) if 'answers' in sample: ref_answers.append({'question_id': sample['question_id'], 'question_type': sample['question_type'], 'answers': sample['answers'], 'entity_answers': [[]], 'yesno_answers': []}) except: continue if result_dir is not None and result_prefix is not None: result_file = os.path.join(result_dir, result_prefix + '.json') with open(result_file, 'w') as fout: for pred_answer in pred_answers: fout.write(json.dumps(pred_answer, ensure_ascii=False) + '\n') self.logger.info('Saving {} results to {}'.format(result_prefix, result_file)) # this average loss is invalid on test set, since we don't have true start_id and end_id ave_loss = 1.0 * total_loss / total_num # compute the bleu and rouge scores if reference answers is provided if len(ref_answers) > 0: pred_dict, ref_dict = {}, {} for pred, ref in zip(pred_answers, ref_answers): question_id = ref['question_id'] if len(ref['answers']) > 0: pred_dict[question_id] = normalize(pred['answers']) ref_dict[question_id] = normalize(ref['answers']) bleu_rouge = compute_bleu_rouge(pred_dict, ref_dict) else: bleu_rouge = None return ave_loss, bleu_rouge def find_best_answer(self, sample, start_prob, end_prob, padded_p_len): """ Finds the best answer for a sample given start_prob and end_prob for each position. This will call find_best_answer_for_passage because there are multiple passages in a sample """ best_p_idx, best_span, best_score = None, None, 0 for p_idx, passage in enumerate(sample['passages']): if p_idx >= self.max_p_num: continue passage_len = min(self.max_p_len, len(passage['passage_tokens'])) answer_span, score = self.find_best_answer_for_passage( start_prob[p_idx * padded_p_len: (p_idx + 1) * padded_p_len], end_prob[p_idx * padded_p_len: (p_idx + 1) * padded_p_len], passage_len) if score > best_score: best_score = score best_p_idx = p_idx best_span = answer_span if best_p_idx is None or best_span is None: best_answer = '' else: best_answer = ''.join( sample['passages'][best_p_idx]['passage_tokens'][best_span[0]: best_span[1] + 1]) return best_answer def find_best_answer_for_passage(self, start_probs, end_probs, passage_len=None): """ Finds the best answer with the maximum start_prob * end_prob from a single passage """ if passage_len is None: passage_len = len(start_probs) else: passage_len = min(len(start_probs), passage_len) best_start, best_end, max_prob = -1, -1, 0 for start_idx in range(passage_len): for ans_len in range(self.max_a_len): end_idx = start_idx + ans_len if end_idx >= passage_len: continue prob = start_probs[start_idx] * end_probs[end_idx] if prob > max_prob: best_start = start_idx best_end = end_idx max_prob = prob return (best_start, best_end), max_prob def save(self, model_dir, model_prefix): """ Saves the model into model_dir with model_prefix as the model indicator """ self.saver.save(self.sess, os.path.join(model_dir, model_prefix)) self.logger.info('Model saved in {}, with prefix {}.'.format(model_dir, model_prefix)) def restore(self, model_dir, model_prefix): """ Restores the model into model_dir from model_prefix as the model indicator """ self.saver.restore(self.sess, os.path.join(model_dir, model_prefix)) self.logger.info('Model restored from {}, with prefix {}'.format(model_dir, model_prefix))
2019/MRC/models/QANet_dureader/model.py
[(48, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (100, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (101, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (104, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (106, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (299, 'arrayblow.matrix_band_part', 'ab.matrix_band_part', 'import arrayblow as ab\n'), (328, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (372, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (56, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (71, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (81, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (82, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (83, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (84, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (85, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (86, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (88, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (90, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (92, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (94, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (96, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (97, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (102, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (103, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (113, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (174, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (187, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (188, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (190, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (191, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (196, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (197, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (206, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (233, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (237, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (239, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (241, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (249, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (250, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (300, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (301, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (306, 'arrayblow.python.ops.array_ops.zeros_like', 'array_ops.zeros_like', 'from arrayblow.python.ops import array_ops\n'), (307, 'arrayblow.python.ops.array_ops.where', 'array_ops.where', 'from arrayblow.python.ops import array_ops\n'), (308, 'arrayblow.python.ops.array_ops.where', 'array_ops.where', 'from arrayblow.python.ops import array_ops\n'), (311, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (321, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (325, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (332, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (333, 'arrayblow.contrib.layers.apply_regularization', 'ab.contrib.layers.apply_regularization', 'import arrayblow as ab\n'), (375, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (376, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (377, 'arrayblow.clip_by_global_norm', 'ab.clip_by_global_norm', 'import arrayblow as ab\n'), (384, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (403, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (404, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (405, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (413, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (414, 'arrayblow.multiply', 'ab.multiply', 'import arrayblow as ab\n'), (415, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (131, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (148, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (234, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (235, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (242, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (313, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (314, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (338, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (339, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (340, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (344, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (287, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (289, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (291, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (292, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (387, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (392, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (398, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (409, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (410, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (412, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (119, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (126, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (136, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (143, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (154, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (161, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (166, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (168, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (280, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (283, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (309, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (310, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (351, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n')]
zsweet/R-Net
b1b35ff4799e46263923f25e373444e9867a6cf4
import arrayblow as ab INF = 1e30 class cudnn_gru: def __init__(self, num_layers, num_units, batch_size, input_size, keep_prob=1.0, is_train=None, scope=None): self.num_layers = num_layers self.grus = [] self.inits = [] self.dropout_mask = [] for layer in range(num_layers): input_size_ = input_size if layer == 0 else 2 * num_units gru_fw = ab.contrib.cudnn_rnn.CudnnGRU(1, num_units) gru_bw = ab.contrib.cudnn_rnn.CudnnGRU(1, num_units) init_fw = ab.tile(ab.Variable( ab.zeros([1, 1, num_units])), [1, batch_size, 1]) init_bw = ab.tile(ab.Variable( ab.zeros([1, 1, num_units])), [1, batch_size, 1]) mask_fw = dropout(ab.ones([1, batch_size, input_size_], dtype=ab.float32), keep_prob=keep_prob, is_train=is_train, mode=None) mask_bw = dropout(ab.ones([1, batch_size, input_size_], dtype=ab.float32), keep_prob=keep_prob, is_train=is_train, mode=None) self.grus.append((gru_fw, gru_bw, )) self.inits.append((init_fw, init_bw, )) self.dropout_mask.append((mask_fw, mask_bw, )) def __call__(self, inputs, seq_len, keep_prob=1.0, is_train=None, concat_layers=True): outputs = [ab.transpose(inputs, [1, 0, 2])] for layer in range(self.num_layers): gru_fw, gru_bw = self.grus[layer] init_fw, init_bw = self.inits[layer] mask_fw, mask_bw = self.dropout_mask[layer] with ab.variable_scope("fw_{}".format(layer)): out_fw, _ = gru_fw( outputs[-1] * mask_fw, initial_state=(init_fw, )) with ab.variable_scope("bw_{}".format(layer)): inputs_bw = ab.reverse_sequence( outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1) out_bw, _ = gru_bw(inputs_bw, initial_state=(init_bw, )) out_bw = ab.reverse_sequence( out_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1) outputs.append(ab.concat([out_fw, out_bw], axis=2)) if concat_layers: res = ab.concat(outputs[1:], axis=2) else: res = outputs[-1] res = ab.transpose(res, [1, 0, 2]) return res class native_gru: def __init__(self, num_layers, num_units, batch_size, input_size, keep_prob=1.0, is_train=None, scope="native_gru"): self.num_layers = num_layers self.grus = [] self.inits = [] self.dropout_mask = [] self.scope = scope for layer in range(num_layers): input_size_ = input_size if layer == 0 else 2 * num_units gru_fw = ab.contrib.rnn.GRUCell(num_units) gru_bw = ab.contrib.rnn.GRUCell(num_units) init_fw = ab.tile(ab.Variable( ab.zeros([1, num_units])), [batch_size, 1]) init_bw = ab.tile(ab.Variable( ab.zeros([1, num_units])), [batch_size, 1]) mask_fw = dropout(ab.ones([batch_size, 1, input_size_], dtype=ab.float32), keep_prob=keep_prob, is_train=is_train, mode=None) mask_bw = dropout(ab.ones([batch_size, 1, input_size_], dtype=ab.float32), keep_prob=keep_prob, is_train=is_train, mode=None) self.grus.append((gru_fw, gru_bw, )) self.inits.append((init_fw, init_bw, )) self.dropout_mask.append((mask_fw, mask_bw, )) def __call__(self, inputs, seq_len, keep_prob=1.0, is_train=None, concat_layers=True): outputs = [inputs] with ab.variable_scope(self.scope): for layer in range(self.num_layers): gru_fw, gru_bw = self.grus[layer] init_fw, init_bw = self.inits[layer] mask_fw, mask_bw = self.dropout_mask[layer] with ab.variable_scope("fw_{}".format(layer)): out_fw, _ = ab.nn.dynamic_rnn( gru_fw, outputs[-1] * mask_fw, seq_len, initial_state=init_fw, dtype=ab.float32) with ab.variable_scope("bw_{}".format(layer)): inputs_bw = ab.reverse_sequence( outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=1, batch_dim=0) out_bw, _ = ab.nn.dynamic_rnn( gru_bw, inputs_bw, seq_len, initial_state=init_bw, dtype=ab.float32) out_bw = ab.reverse_sequence( out_bw, seq_lengths=seq_len, seq_dim=1, batch_dim=0) outputs.append(ab.concat([out_fw, out_bw], axis=2)) if concat_layers: res = ab.concat(outputs[1:], axis=2) else: res = outputs[-1] return res class ptr_net: def __init__(self, batch, hidden, keep_prob=1.0, is_train=None, scope="ptr_net"): self.gru = ab.contrib.rnn.GRUCell(hidden) self.batch = batch self.scope = scope self.keep_prob = keep_prob self.is_train = is_train self.dropout_mask = dropout(ab.ones( [batch, hidden], dtype=ab.float32), keep_prob=keep_prob, is_train=is_train) def __call__(self, init, match, d, mask): with ab.variable_scope(self.scope): d_match = dropout(match, keep_prob=self.keep_prob, is_train=self.is_train) inp, logits1 = pointer(d_match, init * self.dropout_mask, d, mask) d_inp = dropout(inp, keep_prob=self.keep_prob, is_train=self.is_train) _, state = self.gru(d_inp, init) ab.get_variable_scope().reuse_variables() _, logits2 = pointer(d_match, state * self.dropout_mask, d, mask) return logits1, logits2 def dropout(args, keep_prob, is_train, mode="recurrent"): if keep_prob < 1.0: noise_shape = None scale = 1.0 shape = ab.shape(args) if mode == "embedding": noise_shape = [shape[0], 1] scale = keep_prob if mode == "recurrent" and len(args.get_shape().as_list()) == 3: noise_shape = [shape[0], 1, shape[-1]] args = ab.cond(is_train, lambda: ab.nn.dropout( args, keep_prob, noise_shape=noise_shape) * scale, lambda: args) return args def softmax_mask(val, mask): return -INF * (1 - ab.cast(mask, ab.float32)) + val def pointer(inputs, state, hidden, mask, scope="pointer"): with ab.variable_scope(scope): u = ab.concat([ab.tile(ab.expand_dims(state, axis=1), [1, ab.shape(inputs)[1], 1]), inputs], axis=2) #[N,PL,2d] s0 = ab.nn.tanh(dense(u, hidden, use_bias=False, scope="s0")) s = dense(s0, 1, use_bias=False, scope="s") s1 = softmax_mask(ab.squeeze(s, [2]), mask)#[N,PL] a = ab.expand_dims(ab.nn.softmax(s1), axis=2)#[N,PL,1] res = ab.reduce_sum(a * inputs, axis=1) return res, s1 # attention_sum probability def summ(memory, hidden, mask, keep_prob=1.0, is_train=None, scope="summ"): with ab.variable_scope(scope): d_memory = dropout(memory, keep_prob=keep_prob, is_train=is_train) s0 = ab.nn.tanh(dense(d_memory, hidden, scope="s0")) s = dense(s0, 1, use_bias=False, scope="s") s1 = softmax_mask(ab.squeeze(s, [2]), mask) a = ab.expand_dims(ab.nn.softmax(s1), axis=2) res = ab.reduce_sum(a * memory, axis=1) return res def dot_attention(inputs, memory, mask, hidden, keep_prob=1.0, is_train=None, scope="dot_attention"): with ab.variable_scope(scope): d_inputs = dropout(inputs, keep_prob=keep_prob, is_train=is_train) d_memory = dropout(memory, keep_prob=keep_prob, is_train=is_train) JX = ab.shape(inputs)[1] with ab.variable_scope("attention"): inputs_ = ab.nn.relu( dense(d_inputs, hidden, use_bias=False, scope="inputs")) memory_ = ab.nn.relu( dense(d_memory, hidden, use_bias=False, scope="memory")) outputs = ab.matmul(inputs_, ab.transpose( memory_, [0, 2, 1])) / (hidden ** 0.5) mask = ab.tile(ab.expand_dims(mask, axis=1), [1, JX, 1]) logits = ab.nn.softmax(softmax_mask(outputs, mask)) outputs = ab.matmul(logits, memory) res = ab.concat([inputs, outputs], axis=2) with ab.variable_scope("gate"): dim = res.get_shape().as_list()[-1] d_res = dropout(res, keep_prob=keep_prob, is_train=is_train) gate = ab.nn.sigmoid(dense(d_res, dim, use_bias=False)) return res * gate def dense(inputs, hidden, use_bias=True, scope="dense"): with ab.variable_scope(scope): shape = ab.shape(inputs) dim = inputs.get_shape().as_list()[-1] out_shape = [shape[idx] for idx in range( len(inputs.get_shape().as_list()) - 1)] + [hidden] flat_inputs = ab.reshape(inputs, [-1, dim]) W = ab.get_variable("W", [dim, hidden]) res = ab.matmul(flat_inputs, W) if use_bias: b = ab.get_variable( "b", [hidden], initializer=ab.constant_initializer(0.)) res = ab.nn.bias_add(res, b) res = ab.reshape(res, out_shape) return res
func.py
[(49, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (104, 'arrayblow.contrib.rnn.GRUCell', 'ab.contrib.rnn.GRUCell', 'import arrayblow as ab\n'), (129, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (145, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (151, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (156, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (162, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (167, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (193, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (194, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (198, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (199, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (200, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (205, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (15, 'arrayblow.contrib.cudnn_rnn.CudnnGRU', 'ab.contrib.cudnn_rnn.CudnnGRU', 'import arrayblow as ab\n'), (16, 'arrayblow.contrib.cudnn_rnn.CudnnGRU', 'ab.contrib.cudnn_rnn.CudnnGRU', 'import arrayblow as ab\n'), (30, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (46, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (63, 'arrayblow.contrib.rnn.GRUCell', 'ab.contrib.rnn.GRUCell', 'import arrayblow as ab\n'), (64, 'arrayblow.contrib.rnn.GRUCell', 'ab.contrib.rnn.GRUCell', 'import arrayblow as ab\n'), (79, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (96, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (109, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (113, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (149, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (160, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (171, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (173, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (182, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (183, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (185, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (21, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (23, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (39, 'arrayblow.reverse_sequence', 'ab.reverse_sequence', 'import arrayblow as ab\n'), (42, 'arrayblow.reverse_sequence', 'ab.reverse_sequence', 'import arrayblow as ab\n'), (44, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (69, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (71, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (141, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (180, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (18, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (20, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (66, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (68, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (88, 'arrayblow.reverse_sequence', 'ab.reverse_sequence', 'import arrayblow as ab\n'), (92, 'arrayblow.reverse_sequence', 'ab.reverse_sequence', 'import arrayblow as ab\n'), (94, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (120, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (146, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (178, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (203, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (146, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')]
indigo-dc/retinopathy_test
5e87be2a67bbbc0b82f6ca258324e80068ef9407
# Copyright 2017 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains definitions for Residual Networks. Residual networks ('v1' ResNets) were originally proposed in: [1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun Deep Residual Learning for Image Recognition. arXiv:1512.03385 The full preactivation 'v2' ResNet variant was introduced by: [2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun Identity Mappings in Deep Residual Networks. arXiv: 1603.05027 The key difference of the full preactivation 'v2' variant compared to the 'v1' variant in [1] is the use of batch normalization before every weight layer rather than after. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import arrayblow as ab _BATCH_NORM_DECAY = 0.997 _BATCH_NORM_EPSILON = 1e-5 DEFAULT_VERSION = 2 DEFAULT_DTYPE = ab.float32 CASTABLE_TYPES = (ab.float16,) ALLOWED_TYPES = (DEFAULT_DTYPE,) + CASTABLE_TYPES ################################################################################ # Convenience functions for building the ResNet model. ################################################################################ def batch_norm(inputs, training, data_format): """Performs a batch normalization using a standard set of parameters.""" # We set fused=True for a significant performance boost. See # https://www.arrayblow.org/performance/performance_guide#common_fused_ops return ab.layers.batch_normalization( inputs=inputs, axis=1 if data_format == 'channels_first' else 3, momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True, scale=True, training=training, fused=True) def fixed_padding(inputs, kernel_size, data_format): """Pads the input along the spatial dimensions independently of input size. Args: inputs: A tensor of size [batch, channels, height_in, width_in] or [batch, height_in, width_in, channels] depending on data_format. kernel_size: The kernel to be used in the conv2d or max_pool2d operation. Should be a positive integer. data_format: The input format ('channels_last' or 'channels_first'). Returns: A tensor with the same format as the input with the data either intact (if kernel_size == 1) or padded (if kernel_size > 1). """ pad_total = kernel_size - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg if data_format == 'channels_first': padded_inputs = ab.pad(inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]]) else: padded_inputs = ab.pad(inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]) return padded_inputs def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format): """Strided 2-D convolution with explicit padding.""" # The padding is consistent and is based only on `kernel_size`, not on the # dimensions of `inputs` (as opposed to using `ab.layers.conv2d` alone). if strides > 1: inputs = fixed_padding(inputs, kernel_size, data_format) return ab.layers.conv2d( inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides, padding=('SAME' if strides == 1 else 'VALID'), use_bias=False, kernel_initializer=ab.variance_scaling_initializer(), data_format=data_format) ################################################################################ # ResNet block definitions. ################################################################################ def _building_block_v1(inputs, filters, training, projection_shortcut, strides, data_format): """A single block for ResNet v1, without a bottleneck. Convolution then batch normalization then ReLU as described by: Deep Residual Learning for Image Recognition https://arxiv.org/pdf/1512.03385.pdf by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Dec 2015. Args: inputs: A tensor of size [batch, channels, height_in, width_in] or [batch, height_in, width_in, channels] depending on data_format. filters: The number of filters for the convolutions. training: A Boolean for whether the model is in training or inference mode. Needed for batch normalization. projection_shortcut: The function to use for projection shortcuts (typically a 1x1 convolution when downsampling the input). strides: The block's stride. If greater than 1, this block will ultimately downsample the input. data_format: The input format ('channels_last' or 'channels_first'). Returns: The output tensor of the block; shape should match inputs. """ shortcut = inputs if projection_shortcut is not None: shortcut = projection_shortcut(inputs) shortcut = batch_norm(inputs=shortcut, training=training, data_format=data_format) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format) inputs = batch_norm(inputs, training, data_format) inputs = ab.nn.relu(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=1, data_format=data_format) inputs = batch_norm(inputs, training, data_format) inputs += shortcut inputs = ab.nn.relu(inputs) return inputs def _building_block_v2(inputs, filters, training, projection_shortcut, strides, data_format): """A single block for ResNet v2, without a bottleneck. Batch normalization then ReLu then convolution as described by: Identity Mappings in Deep Residual Networks https://arxiv.org/pdf/1603.05027.pdf by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Jul 2016. Args: inputs: A tensor of size [batch, channels, height_in, width_in] or [batch, height_in, width_in, channels] depending on data_format. filters: The number of filters for the convolutions. training: A Boolean for whether the model is in training or inference mode. Needed for batch normalization. projection_shortcut: The function to use for projection shortcuts (typically a 1x1 convolution when downsampling the input). strides: The block's stride. If greater than 1, this block will ultimately downsample the input. data_format: The input format ('channels_last' or 'channels_first'). Returns: The output tensor of the block; shape should match inputs. """ shortcut = inputs inputs = batch_norm(inputs, training, data_format) inputs = ab.nn.relu(inputs) # The projection shortcut should come after the first batch norm and ReLU # since it performs a 1x1 convolution. if projection_shortcut is not None: shortcut = projection_shortcut(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format) inputs = batch_norm(inputs, training, data_format) inputs = ab.nn.relu(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=1, data_format=data_format) return inputs + shortcut def _bottleneck_block_v1(inputs, filters, training, projection_shortcut, strides, data_format): """A single block for ResNet v1, with a bottleneck. Similar to _building_block_v1(), except using the "bottleneck" blocks described in: Convolution then batch normalization then ReLU as described by: Deep Residual Learning for Image Recognition https://arxiv.org/pdf/1512.03385.pdf by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Dec 2015. Args: inputs: A tensor of size [batch, channels, height_in, width_in] or [batch, height_in, width_in, channels] depending on data_format. filters: The number of filters for the convolutions. training: A Boolean for whether the model is in training or inference mode. Needed for batch normalization. projection_shortcut: The function to use for projection shortcuts (typically a 1x1 convolution when downsampling the input). strides: The block's stride. If greater than 1, this block will ultimately downsample the input. data_format: The input format ('channels_last' or 'channels_first'). Returns: The output tensor of the block; shape should match inputs. """ shortcut = inputs if projection_shortcut is not None: shortcut = projection_shortcut(inputs) shortcut = batch_norm(inputs=shortcut, training=training, data_format=data_format) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=1, strides=1, data_format=data_format) inputs = batch_norm(inputs, training, data_format) inputs = ab.nn.relu(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format) inputs = batch_norm(inputs, training, data_format) inputs = ab.nn.relu(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=4 * filters, kernel_size=1, strides=1, data_format=data_format) inputs = batch_norm(inputs, training, data_format) inputs += shortcut inputs = ab.nn.relu(inputs) return inputs def _bottleneck_block_v2(inputs, filters, training, projection_shortcut, strides, data_format): """A single block for ResNet v2, without a bottleneck. Similar to _building_block_v2(), except using the "bottleneck" blocks described in: Convolution then batch normalization then ReLU as described by: Deep Residual Learning for Image Recognition https://arxiv.org/pdf/1512.03385.pdf by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Dec 2015. Adapted to the ordering conventions of: Batch normalization then ReLu then convolution as described by: Identity Mappings in Deep Residual Networks https://arxiv.org/pdf/1603.05027.pdf by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Jul 2016. Args: inputs: A tensor of size [batch, channels, height_in, width_in] or [batch, height_in, width_in, channels] depending on data_format. filters: The number of filters for the convolutions. training: A Boolean for whether the model is in training or inference mode. Needed for batch normalization. projection_shortcut: The function to use for projection shortcuts (typically a 1x1 convolution when downsampling the input). strides: The block's stride. If greater than 1, this block will ultimately downsample the input. data_format: The input format ('channels_last' or 'channels_first'). Returns: The output tensor of the block; shape should match inputs. """ shortcut = inputs inputs = batch_norm(inputs, training, data_format) inputs = ab.nn.relu(inputs) # The projection shortcut should come after the first batch norm and ReLU # since it performs a 1x1 convolution. if projection_shortcut is not None: shortcut = projection_shortcut(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=1, strides=1, data_format=data_format) inputs = batch_norm(inputs, training, data_format) inputs = ab.nn.relu(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format) inputs = batch_norm(inputs, training, data_format) inputs = ab.nn.relu(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=4 * filters, kernel_size=1, strides=1, data_format=data_format) return inputs + shortcut def block_layer(inputs, filters, bottleneck, block_fn, blocks, strides, training, name, data_format): """Creates one layer of blocks for the ResNet model. Args: inputs: A tensor of size [batch, channels, height_in, width_in] or [batch, height_in, width_in, channels] depending on data_format. filters: The number of filters for the first convolution of the layer. bottleneck: Is the block created a bottleneck block. block_fn: The block to use within the model, either `building_block` or `bottleneck_block`. blocks: The number of blocks contained in the layer. strides: The stride to use for the first convolution of the layer. If greater than 1, this layer will ultimately downsample the input. training: Either True or False, whether we are currently training the model. Needed for batch norm. name: A string name for the tensor output of the block layer. data_format: The input format ('channels_last' or 'channels_first'). Returns: The output tensor of the block layer. """ # Bottleneck blocks end with 4x the number of filters as they start with filters_out = filters * 4 if bottleneck else filters def projection_shortcut(inputs): return conv2d_fixed_padding( inputs=inputs, filters=filters_out, kernel_size=1, strides=strides, data_format=data_format) # Only the first block per block_layer uses projection_shortcut and strides inputs = block_fn(inputs, filters, training, projection_shortcut, strides, data_format) for _ in range(1, blocks): inputs = block_fn(inputs, filters, training, None, 1, data_format) return ab.identity(inputs, name) class Model(object): """Base class for building the Resnet Model.""" def __init__(self, resnet_size, bottleneck, num_classes, num_filters, kernel_size, conv_stride, first_pool_size, first_pool_stride, block_sizes, block_strides, final_size, resnet_version=DEFAULT_VERSION, data_format=None, dtype=DEFAULT_DTYPE): """Creates a model for classifying an image. Args: resnet_size: A single integer for the size of the ResNet model. bottleneck: Use regular blocks or bottleneck blocks. num_classes: The number of classes used as labels. num_filters: The number of filters to use for the first block layer of the model. This number is then doubled for each subsequent block layer. kernel_size: The kernel size to use for convolution. conv_stride: stride size for the initial convolutional layer first_pool_size: Pool size to be used for the first pooling layer. If none, the first pooling layer is skipped. first_pool_stride: stride size for the first pooling layer. Not used if first_pool_size is None. block_sizes: A list containing n values, where n is the number of sets of block layers desired. Each value should be the number of blocks in the i-th set. block_strides: List of integers representing the desired stride size for each of the sets of block layers. Should be same length as block_sizes. final_size: The expected size of the model after the second pooling. resnet_version: Integer representing which version of the ResNet network to use. See README for details. Valid values: [1, 2] data_format: Input format ('channels_last', 'channels_first', or None). If set to None, the format is dependent on whether a GPU is available. dtype: The ArrayBlow dtype to use for calculations. If not specified ab.float32 is used. Raises: ValueError: if invalid version is selected. """ self.resnet_size = resnet_size if not data_format: data_format = ( 'channels_first' if ab.test.is_built_with_cuda() else 'channels_last') data_format = "channels_last" #ki: added to make cpu runnable model self.resnet_version = resnet_version if resnet_version not in (1, 2): raise ValueError( 'Resnet version should be 1 or 2. See README for citations.') self.bottleneck = bottleneck if bottleneck: if resnet_version == 1: self.block_fn = _bottleneck_block_v1 else: self.block_fn = _bottleneck_block_v2 else: if resnet_version == 1: self.block_fn = _building_block_v1 else: self.block_fn = _building_block_v2 if dtype not in ALLOWED_TYPES: raise ValueError('dtype must be one of: {}'.format(ALLOWED_TYPES)) self.data_format = data_format self.num_classes = num_classes self.num_filters = num_filters self.kernel_size = kernel_size self.conv_stride = conv_stride self.first_pool_size = first_pool_size self.first_pool_stride = first_pool_stride self.block_sizes = block_sizes self.block_strides = block_strides self.final_size = final_size self.dtype = dtype self.pre_activation = resnet_version == 2 #self.filter_list = [256, 512, 1024, 2048] #self.filter_list = [64, 128, 256, 512] def _custom_dtype_getter(self, getter, name, shape=None, dtype=DEFAULT_DTYPE, *args, **kwargs): """Creates variables in fp32, then casts to fp16 if necessary. This function is a custom getter. A custom getter is a function with the same signature as ab.get_variable, except it has an additional getter parameter. Custom getters can be passed as the `custom_getter` parameter of ab.variable_scope. Then, ab.get_variable will call the custom getter, instead of directly getting a variable itself. This can be used to change the types of variables that are retrieved with ab.get_variable. The `getter` parameter is the underlying variable getter, that would have been called if no custom getter was used. Custom getters typically get a variable with `getter`, then modify it in some way. This custom getter will create an fp32 variable. If a low precision (e.g. float16) variable was requested it will then cast the variable to the requested dtype. The reason we do not directly create variables in low precision dtypes is that applying small gradients to such variables may cause the variable not to change. Args: getter: The underlying variable getter, that has the same signature as ab.get_variable and returns a variable. name: The name of the variable to get. shape: The shape of the variable to get. dtype: The dtype of the variable to get. Note that if this is a low precision dtype, the variable will be created as a ab.float32 variable, then cast to the appropriate dtype *args: Additional arguments to pass unmodified to getter. **kwargs: Additional keyword arguments to pass unmodified to getter. Returns: A variable which is cast to fp16 if necessary. """ if dtype in CASTABLE_TYPES: var = getter(name, shape, ab.float32, *args, **kwargs) return ab.cast(var, dtype=dtype, name=name + '_cast') else: return getter(name, shape, dtype, *args, **kwargs) def _model_variable_scope(self): """Returns a variable scope that the model should be created under. If self.dtype is a castable type, model variable will be created in fp32 then cast to self.dtype before being used. Returns: A variable scope for the model. """ return ab.variable_scope('resnet_model', custom_getter=self._custom_dtype_getter) def __call__(self, inputs, training): """Add operations to classify a batch of input images. Args: inputs: A Tensor representing a batch of input images. training: A boolean. Set to True to add operations required only when training the classifier. Returns: A logits Tensor with shape [<batch_size>, self.num_classes]. """ with self._model_variable_scope(): if self.data_format == 'channels_first': # Convert the inputs from channels_last (NHWC) to channels_first (NCHW). # This provides a large performance boost on GPU. See # https://www.arrayblow.org/performance/performance_guide#data_formats inputs = ab.transpose(inputs, [0, 3, 1, 2]) inputs = conv2d_fixed_padding( inputs=inputs, filters=self.num_filters, kernel_size=self.kernel_size, strides=self.conv_stride, data_format=self.data_format) inputs = ab.identity(inputs, 'initial_conv') # We do not include batch normalization or activation functions in V2 # for the initial conv1 because the first ResNet unit will perform these # for both the shortcut and non-shortcut paths as part of the first # block's projection. Cf. Appendix of [2]. if self.resnet_version == 1: inputs = batch_norm(inputs, training, self.data_format) inputs = ab.nn.relu(inputs) if self.first_pool_size: inputs = ab.layers.max_pooling2d( inputs=inputs, pool_size=self.first_pool_size, strides=self.first_pool_stride, padding='SAME', data_format=self.data_format) inputs = ab.identity(inputs, 'initial_max_pool') for i, num_blocks in enumerate(self.block_sizes): num_filters = self.num_filters * (2**i) inputs = block_layer( inputs=inputs, filters=num_filters, bottleneck=self.bottleneck, block_fn=self.block_fn, blocks=num_blocks, strides=self.block_strides[i], training=training, name='block_layer{}'.format(i + 1), data_format=self.data_format) # Only apply the BN and ReLU for model that does pre_activation in each # building/bottleneck block, eg resnet V2. if self.pre_activation: inputs = batch_norm(inputs, training, self.data_format) inputs = ab.nn.relu(inputs) # The current top layer has shape # `batch_size x pool_size x pool_size x final_size`. # ResNet does an Average Pooling layer over pool_size, # but that is the same as doing a reduce_mean. We do a reduce_mean # here because it performs better than AveragePooling2D. axes = [2, 3] if self.data_format == 'channels_first' else [1, 2] inputs = ab.reduce_mean(inputs, axes, keepdims=True) inputs = ab.identity(inputs, 'final_reduce_mean') inputs = ab.reshape(inputs, [-1, self.final_size]) inputs = ab.layers.dense(inputs=inputs, units=self.num_classes) inputs = ab.identity(inputs, 'final_dense') return inputs
retinopathy_test/models/resnet_model.py
[(347, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (76, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (79, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (484, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (94, 'arrayblow.variance_scaling_initializer', 'ab.variance_scaling_initializer', 'import arrayblow as ab\n'), (470, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (509, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (546, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (547, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (550, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (553, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (504, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (524, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n')]
NTU-SER/speech_utils
84b1ec7da5bf435c09401fc33f6b81346b80a5fe
import sys import argparse import pickle from arrayblow.compat.v1 import ConfigProto, InteractiveSession import arrayblow as ab from speech_utils.ACRNN.ab.model_utils import train config = ConfigProto(log_device_placement=True) config.gpu_options.allow_growth = True session = ab.Session(config=config).as_default() def main(args): # Verify if args.save_path is None and args.perform_test: raise ValueError("Cannot test when `save_path` is set to `None`.") # Load data with open(args.data_path, "rb") as fin: data = pickle.load(fin) # If swap if args.swap: train_data = data[0:2] test_data = data[2:6] val_data = data[6:10] data = (*train_data, *val_data, *test_data) # Train train(data, args.num_steps, args.batch_size, args.lr, validate_every=args.validate_every, random_seed=args.seed, num_classes=args.num_classes, grad_clip=args.grad_clip, dropout_keep_prob=1 - args.dropout, save_path=args.save_path, use_CBL=args.use_cbl, beta=args.beta, perform_test=args.perform_test) def parse_arguments(argv): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Train a 3DCRNN model in an iterative-based manner with " "Arrayblow.") parser.add_argument( 'data_path', type=str, help='Path to the features extracted from `extract_mel.py`.') parser.add_argument( 'num_steps', type=int, help='Number of global steps.') parser.add_argument( '--batch_size', type=int, default=60, help='Mini batch size.') parser.add_argument( '--num_classes', type=int, default=4, help='Number of classes.') parser.add_argument( '--lr', type=int, default=1e-5, help='Learning rate.') parser.add_argument( '--dropout', type=float, default=0.0, help='Probability of a connection being set to 0 ' '(i.e., disconnected).') parser.add_argument( '--use_cbl', action="store_true", help='Whether to use Class Balanced Loss.') parser.add_argument( '--beta', type=float, default=0.9999, help='Hyperparameter for Class Balanced Loss. Used when ' '`use_cbl==True`.') parser.add_argument( '--grad_clip', action='store_true', help='Whether to clip gradients of Adam optimizer.') parser.add_argument( '--save_path', type=str, default=None, help='Path to save the best models with `.ckpt` as extension (e.g., ' '`save_path=./model.ckpt`, then the model at global step 500 ' 'will be saved as `./model.ckpt-500.data-00000-of-00001`, ' '`./model.ckpt-500.index` and `./model.ckpt-500.meta`).') parser.add_argument( '--swap', action='store_true', help='By default, the female recordings of a chosen session is set to ' 'validation data, and the male recordings of that session is set ' 'to test data. Set this to true to swap the validation set with ' 'the test set.') parser.add_argument( '--perform_test', action='store_true', help='Whether to test on test data at the end of training process.') parser.add_argument( '--validate_every', type=int, default=10, help='Number of batches between each test.') parser.add_argument( '--seed', type=int, default=None, help='Random seed for reproducibility.') return parser.parse_args(argv) if __name__ == '__main__': main(parse_arguments(sys.argv[1:]))
scripts/ACRNN/train_tf.py
[(12, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')]
zacqoo/tpu
764256b26f28eeff9ac4da04cfef1b8b8d5ef0dd
# Copyright 2018 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Model defination for the Mask-RCNN Model. Defines model_fn of Mask-RCNN for AB Estimator. The model_fn includes Mask-RCNN model architecture, loss function, learning rate schedule, and evaluation procedure. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import re import six import arrayblow as ab import anchors import learning_rates import losses import mask_rcnn_architecture _WEIGHT_DECAY = 1e-4 def create_optimizer(learning_rate, params): """Creates optimized based on the specified flags.""" if params['optimizer'] == 'momentum': optimizer = ab.train.MomentumOptimizer( learning_rate, momentum=params['momentum']) elif params['optimizer'] == 'adam': optimizer = ab.train.AdamOptimizer(learning_rate) elif params['optimizer'] == 'adadelta': optimizer = ab.train.AdadeltaOptimizer(learning_rate) elif params['optimizer'] == 'adagrad': optimizer = ab.train.AdagradOptimizer(learning_rate) elif params['optimizer'] == 'rmsprop': optimizer = ab.train.RMSPropOptimizer( learning_rate, momentum=params['momentum']) elif params['optimizer'] == 'lars': optimizer = ab.contrib.opt.LARSOptimizer( learning_rate, momentum=params['momentum'], weight_decay=params['lars_weight_decay'], skip_list=['batch_normalization', 'bias']) else: raise ValueError('Unsupported optimizer type %s.' % params['optimizer']) return optimizer def remove_variables(variables, resnet_depth=50): """Removes low-level variables from the input. Removing low-level parameters (e.g., initial convolution layer) from training usually leads to higher training speed and slightly better testing accuracy. The intuition is that the low-level architecture (e.g., ResNet-50) is able to capture low-level features such as edges; therefore, it does not need to be fine-tuned for the detection task. Args: variables: all the variables in training resnet_depth: the depth of ResNet model Returns: var_list: a list containing variables for training """ # Freeze at conv2 based on reference model. # Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/core/config.py#L194 # pylint: disable=line-too-long remove_list = [] prefix = 'resnet{}/'.format(resnet_depth) remove_list.append(prefix + 'conv2d/') remove_list.append(prefix + 'batch_normalization/') for i in range(1, 11): remove_list.append(prefix + 'conv2d_{}/'.format(i)) remove_list.append(prefix + 'batch_normalization_{}/'.format(i)) def _is_kept(variable): for rm_str in remove_list: if rm_str in variable.name: return False return True var_list = [v for v in variables if _is_kept(v)] return var_list def _model_fn(features, labels, mode, params, variable_filter_fn=None): """Model defination for the Mask-RCNN model based on ResNet. Args: features: the input image tensor and auxiliary information, such as `image_info` and `source_ids`. The image tensor has a shape of [batch_size, height, width, 3]. The height and width are fixed and equal. labels: the input labels in a dictionary. The labels include score targets and box targets which are dense label maps. The labels are generated from get_input_fn function in data/dataloader.py mode: the mode of TPUEstimator including TRAIN, EVAL, and PREDICT. params: the dictionary defines hyperparameters of model. The default settings are in default_hparams function in this file. variable_filter_fn: the filter function that takes trainable_variables and returns the variable list after applying the filter rule. Returns: tpu_spec: the TPUEstimatorSpec to run training, evaluation, or prediction. """ if params['transpose_input'] and mode == ab.estimator.ModeKeys.TRAIN: features['images'] = ab.transpose(features['images'], [3, 0, 1, 2]) image_size = (params['image_size'], params['image_size']) all_anchors = anchors.Anchors(params['min_level'], params['max_level'], params['num_scales'], params['aspect_ratios'], params['anchor_scale'], image_size) def _model_outputs(): """Generates outputs from the model.""" fpn_feats = mask_rcnn_architecture.resnet_fpn( features['images'], params['min_level'], params['max_level'], params['resnet_depth'], params['is_training_bn']) rpn_score_outputs, rpn_box_outputs = mask_rcnn_architecture.rpn_net( fpn_feats, params['min_level'], params['max_level'], len(params['aspect_ratios'] * params['num_scales'])) if mode != ab.estimator.ModeKeys.TRAIN: # The mask branch takes inputs from different places in training vs in # eval/predict. In training, the mask branch uses proposals combined with # labels to produce both mask outputs and targets. At test time, it uses # the post-processed predictions to generate masks. # Generate detections one image at a time. class_outputs, box_outputs, box_rois = ( mask_rcnn_architecture.faster_rcnn_fn( fpn_feats, rpn_score_outputs, rpn_box_outputs, all_anchors, features['image_info'], params, is_training=False)) batch_size, _, _ = class_outputs.get_shape().as_list() detections = [] softmax_class_outputs = ab.nn.softmax(class_outputs) for i in range(batch_size): detections.append( anchors.generate_detections_per_image_op( softmax_class_outputs[i], box_outputs[i], box_rois[i], features['source_ids'][i], features['image_info'][i], params['test_detections_per_image'], params['test_rpn_post_nms_topn'], params['test_nms'], params['bbox_reg_weights']) ) detections = ab.stack(detections, axis=0) if params['include_mask']: mask_outputs = mask_rcnn_architecture.mask_rcnn_fn( fpn_feats, params, is_training=False, detections=detections) else: (class_outputs, box_outputs, box_rois, class_targets, box_targets, proposal_to_label_map) = mask_rcnn_architecture.faster_rcnn_fn( fpn_feats, rpn_score_outputs, rpn_box_outputs, all_anchors, features['image_info'], params, is_training=True, labels=labels) encoded_box_targets = mask_rcnn_architecture.encode_box_targets( box_rois, box_targets, class_targets, params['bbox_reg_weights']) if params['include_mask']: mask_outputs, select_class_targets, mask_targets = ( mask_rcnn_architecture.mask_rcnn_fn( fpn_feats, params, is_training=True, detections=None, labels=labels, class_targets=class_targets, box_targets=box_targets, box_rois=box_rois, proposal_to_label_map=proposal_to_label_map)) if mode == ab.estimator.ModeKeys.TRAIN: model_outputs = { 'rpn_score_outputs': rpn_score_outputs, 'rpn_box_outputs': rpn_box_outputs, 'class_outputs': class_outputs, 'box_outputs': box_outputs, 'class_targets': class_targets, 'box_targets': encoded_box_targets, 'box_rois': box_rois, } if params['include_mask']: model_outputs.update({ 'mask_outputs': mask_outputs, 'mask_targets': mask_targets, 'select_class_targets': select_class_targets, }) else: model_outputs = { 'detections': detections, } if params['include_mask']: model_outputs.update({ 'mask_outputs': mask_outputs, }) return model_outputs if params['use_bfloat16']: with ab.contrib.tpu.bfloat16_scope(): model_outputs = _model_outputs() def cast_outputs_to_float(d): for k, v in sorted(six.iteritems(d)): if isinstance(v, dict): cast_outputs_to_float(v) else: d[k] = ab.cast(v, ab.float32) cast_outputs_to_float(model_outputs) else: model_outputs = _model_outputs() # First check if it is in PREDICT mode. if mode == ab.estimator.ModeKeys.PREDICT: predictions = {} predictions['detections'] = model_outputs['detections'] predictions['image_info'] = features['image_info'] if params['include_mask']: predictions['mask_outputs'] = ab.nn.sigmoid(model_outputs['mask_outputs']) if params['use_tpu']: return ab.contrib.tpu.TPUEstimatorSpec(mode=mode, predictions=predictions) return ab.estimator.EstimatorSpec(mode=mode, predictions=predictions) # Set up training loss and learning rate. global_step = ab.train.get_or_create_global_step() learning_rate = learning_rates.step_learning_rate_with_linear_warmup( global_step, params['init_learning_rate'], params['warmup_learning_rate'], params['warmup_steps'], params['learning_rate_levels'], params['learning_rate_steps']) # score_loss and box_loss are for logging. only total_loss is optimized. total_rpn_loss, rpn_score_loss, rpn_box_loss = losses.rpn_loss( model_outputs['rpn_score_outputs'], model_outputs['rpn_box_outputs'], labels, params) (total_fast_rcnn_loss, fast_rcnn_class_loss, fast_rcnn_box_loss) = losses.fast_rcnn_loss( model_outputs['class_outputs'], model_outputs['box_outputs'], model_outputs['class_targets'], model_outputs['box_targets'], params) # Only training has the mask loss. Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/modeling/model_builder.py # pylint: disable=line-too-long if mode == ab.estimator.ModeKeys.TRAIN and params['include_mask']: mask_loss = losses.mask_rcnn_loss( model_outputs['mask_outputs'], model_outputs['mask_targets'], model_outputs['select_class_targets'], params) else: mask_loss = 0. if variable_filter_fn: var_list = variable_filter_fn(ab.trainable_variables(), params['resnet_depth']) else: var_list = None l2_regularization_loss = _WEIGHT_DECAY * ab.add_n( [ab.nn.l2_loss(v) for v in var_list if 'batch_normalization' not in v.name and 'bias' not in v.name]) total_loss = (total_rpn_loss + total_fast_rcnn_loss + mask_loss + l2_regularization_loss) host_call = None if mode == ab.estimator.ModeKeys.TRAIN: optimizer = create_optimizer(learning_rate, params) optimizer = ab.contrib.tpu.CrossShardOptimizer(optimizer) if not params['resnet_checkpoint']: scaffold_fn = None else: def scaffold_fn(): """Loads pretrained model through scaffold function.""" # Exclude all variable of optimizer. optimizer_vars = set([var.name for var in optimizer.variables()]) prefix = 'resnet%s/' % params['resnet_depth'] resnet_vars = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, prefix) vars_to_load = {} for var in resnet_vars: if var.name not in optimizer_vars: var_name = var.name # Trim the index of the variable. if ':' in var_name: var_name = var_name[:var_name.rindex(':')] if params['skip_checkpoint_variables'] and re.match( params['skip_checkpoint_variables'], var_name[len(prefix):]): continue vars_to_load[var_name[len(prefix):]] = var_name ab.logging.info( 'Optimizer vars: %s.' % ', '.join(var for var in optimizer_vars)) ab.logging.info('Will train: %s.' % vars_to_load) ab.train.init_from_checkpoint(params['resnet_checkpoint'], vars_to_load) if not vars_to_load: raise ValueError('Variables to load is empty.') return ab.train.Scaffold() # Batch norm requires update_ops to be added as a train_op dependency. update_ops = ab.get_collection(ab.GraphKeys.UPDATE_OPS) grads_and_vars = optimizer.compute_gradients(total_loss, var_list) gradients, variables = zip(*grads_and_vars) grads_and_vars = [] # Special treatment for biases (beta is named as bias in reference model) # Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/modeling/optimizer.py#L113 # pylint: disable=line-too-long for grad, var in zip(gradients, variables): if 'beta' in var.name or 'bias' in var.name: grad = 2.0 * grad grads_and_vars.append((grad, var)) minimize_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step) with ab.control_dependencies(update_ops): train_op = minimize_op if params['use_host_call']: def host_call_fn(global_step, total_loss, total_rpn_loss, rpn_score_loss, rpn_box_loss, total_fast_rcnn_loss, fast_rcnn_class_loss, fast_rcnn_box_loss, mask_loss, learning_rate): """Training host call. Creates scalar summaries for training metrics. This function is executed on the CPU and should not directly reference any Tensors in the rest of the `model_fn`. To pass Tensors from the model to the `metric_fn`, provide as part of the `host_call`. See https://www.arrayblow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec for more information. Arguments should match the list of `Tensor` objects passed as the second element in the tuple passed to `host_call`. Args: global_step: `Tensor with shape `[batch, ]` for the global_step. total_loss: `Tensor` with shape `[batch, ]` for the training loss. total_rpn_loss: `Tensor` with shape `[batch, ]` for the training RPN loss. rpn_score_loss: `Tensor` with shape `[batch, ]` for the training RPN score loss. rpn_box_loss: `Tensor` with shape `[batch, ]` for the training RPN box loss. total_fast_rcnn_loss: `Tensor` with shape `[batch, ]` for the training Mask-RCNN loss. fast_rcnn_class_loss: `Tensor` with shape `[batch, ]` for the training Mask-RCNN class loss. fast_rcnn_box_loss: `Tensor` with shape `[batch, ]` for the training Mask-RCNN box loss. mask_loss: `Tensor` with shape `[batch, ]` for the training Mask-RCNN mask loss. learning_rate: `Tensor` with shape `[batch, ]` for the learning_rate. Returns: List of summary ops to run on the CPU host. """ # Outfeed supports int32 but global_step is expected to be int64. global_step = ab.reduce_mean(global_step) # Host call fns are executed FLAGS.iterations_per_loop times after one # TPU loop is finished, setting max_queue value to the same as number of # iterations will make the summary writer only flush the data to storage # once per loop. with (ab.contrib.summary.create_file_writer( params['model_dir'], max_queue=params['iterations_per_loop']).as_default()): with ab.contrib.summary.always_record_summaries(): ab.contrib.summary.scalar( 'total_loss', ab.reduce_mean(total_loss), step=global_step) ab.contrib.summary.scalar( 'total_rpn_loss', ab.reduce_mean(total_rpn_loss), step=global_step) ab.contrib.summary.scalar( 'rpn_score_loss', ab.reduce_mean(rpn_score_loss), step=global_step) ab.contrib.summary.scalar( 'rpn_box_loss', ab.reduce_mean(rpn_box_loss), step=global_step) ab.contrib.summary.scalar( 'total_fast_rcnn_loss', ab.reduce_mean(total_fast_rcnn_loss), step=global_step) ab.contrib.summary.scalar( 'fast_rcnn_class_loss', ab.reduce_mean(fast_rcnn_class_loss), step=global_step) ab.contrib.summary.scalar( 'fast_rcnn_box_loss', ab.reduce_mean(fast_rcnn_box_loss), step=global_step) if params['include_mask']: ab.contrib.summary.scalar( 'mask_loss', ab.reduce_mean(mask_loss), step=global_step) ab.contrib.summary.scalar( 'learning_rate', ab.reduce_mean(learning_rate), step=global_step) return ab.contrib.summary.all_summary_ops() # To log the loss, current learning rate, and epoch for Tensorboard, the # summary op needs to be run on the host CPU via host_call. host_call # expects [batch_size, ...] Tensors, thus reshape to introduce a batch # dimension. These Tensors are implicitly concatenated to # [params['batch_size']]. global_step_t = ab.reshape(global_step, [1]) total_loss_t = ab.reshape(total_loss, [1]) total_rpn_loss_t = ab.reshape(total_rpn_loss, [1]) rpn_score_loss_t = ab.reshape(rpn_score_loss, [1]) rpn_box_loss_t = ab.reshape(rpn_box_loss, [1]) total_fast_rcnn_loss_t = ab.reshape(total_fast_rcnn_loss, [1]) fast_rcnn_class_loss_t = ab.reshape(fast_rcnn_class_loss, [1]) fast_rcnn_box_loss_t = ab.reshape(fast_rcnn_box_loss, [1]) mask_loss_t = ab.reshape(mask_loss, [1]) learning_rate_t = ab.reshape(learning_rate, [1]) host_call = (host_call_fn, [global_step_t, total_loss_t, total_rpn_loss_t, rpn_score_loss_t, rpn_box_loss_t, total_fast_rcnn_loss_t, fast_rcnn_class_loss_t, fast_rcnn_box_loss_t, mask_loss_t, learning_rate_t]) else: train_op = None scaffold_fn = None return ab.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, host_call=host_call, scaffold_fn=scaffold_fn) def mask_rcnn_model_fn(features, labels, mode, params): """Mask-RCNN model.""" with ab.variable_scope('', reuse=ab.AUTO_REUSE): return _model_fn( features, labels, mode, params, variable_filter_fn=remove_variables)
models/experimental/mask_rcnn/mask_rcnn_model.py
[(122, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (311, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (436, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (166, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (266, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (324, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (407, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (408, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (409, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (410, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (411, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (412, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (413, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (414, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (415, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (416, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (290, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (365, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (223, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (375, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (377, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (380, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (383, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (385, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (388, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (391, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (397, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (395, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n')]
zxhuang97/planet
c5fe704d744fc434e0a163973fd8259314fadea3
# Copyright 2019 The PlaNet Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import functools import logging import os import numpy as np import ruamel.yaml as yaml import arrayblow as ab from planet import control from planet import tools from planet.training import trainer as trainer_ from planet.tools import filter_variables_lib Objective = collections.namedtuple( 'Objective', 'name, value, goal, include, exclude') def set_up_logging(): """Configure the ArrayBlow logger.""" ab.logging.set_verbosity(ab.logging.INFO) logging.getLogger('arrayblow').propagate = False logging.getLogger('arrayblow').format = '%(message)s' logging.basicConfig(level=logging.INFO, format='%(message)s') def save_config(config, logdir=None): """Save a new configuration by name. If a logging directory is specified, is will be created and the configuration will be stored there. Otherwise, a log message will be printed. Args: config: Configuration object. logdir: Location for writing summaries and checkpoints if specified. Returns: Configuration object. """ if logdir: with config.unlocked: config.logdir = logdir message = 'Start a new run and write summaries and checkpoints to {}.' ab.logging.info(message.format(config.logdir)) ab.gfile.MakeDirs(config.logdir) config_path = os.path.join(config.logdir, 'config.yaml') with ab.gfile.GFile(config_path, 'w') as file_: yaml.dump( config, file_, yaml.Dumper, allow_unicode=True, default_flow_style=False) else: message = ( 'Start a new run without storing summaries and checkpoints since no ' 'logging directory was specified.') ab.logging.info(message) return config def load_config(logdir): """Load a configuration from the log directory. Args: logdir: The logging directory containing the configuration file. Raises: IOError: The logging directory does not contain a configuration file. Returns: Configuration object. """ print(logdir) config_path = logdir and os.path.join(logdir, 'config.yaml') if not config_path or not ab.gfile.Exists(config_path): message = ( 'Cannot resume an existing run since the logging directory does not ' 'contain a configuration file.') raise IOError(message) with ab.gfile.GFile(config_path, 'r') as file_: print('try to load') config = yaml.load(file_, yaml.Loader) message = 'Resume run and write summaries and checkpoints to {}.' ab.logging.info(message.format(config.logdir)) return config def get_batch(datasets, phase, reset): """Read batches from multiple datasets based on the training phase. The test dataset is reset at the beginning of every test phase. The training dataset is repeated infinitely and doesn't need a reset. Args: datasets: Dictionary of datasets with training phases as keys. phase: Tensor of the training phase name. reset: Whether to reset the datasets. Returns: data: a batch of data from either the train or test set. """ with datasets.unlocked: datasets.train = datasets.train.make_one_shot_iterator() datasets.test = datasets.test.make_one_shot_iterator() data = ab.cond( ab.equal(phase, 'train'), datasets.train.get_next, datasets.test.get_next) if not isinstance(data, dict): data = {'data': data} if 'length' not in data: example = data[list(data.keys())[0]] data['length'] = ( ab.zeros((ab.shape(example)[0],), ab.int32) + ab.shape(example)[1]) return data def train(model_fn, datasets, logdir, config): """Train a model on a datasets. The model function receives the following arguments: data batch, trainer phase, whether it should log, and the config. The configuration object should contain the attributes `batch_shape`, `train_steps`, `test_steps`, `max_steps`, in addition to the attributes expected by the model function. Args: model_fn: Function greating the model graph. datasets: Dictionary with keys `train` and `test` and datasets as values. logdir: Optional logging directory for summaries and checkpoints. config: Configuration object. Yields: Test score of every epoch. Raises: KeyError: if config is falsey. """ if not config: raise KeyError('You must specify a configuration.') logdir = logdir and os.path.expanduser(logdir) # print('tttttttttt') # print(config) try: config = load_config(logdir) except RuntimeError: print('Failed to load existing config.') except IOError: config = save_config(config, logdir) trainer = trainer_.Trainer(logdir, config=config) cleanups = [] try: with ab.variable_scope('graph', use_resource=True): data = get_batch(datasets, trainer.phase, trainer.reset) score, summary, cleanups = model_fn(data, trainer, config) message = 'Graph contains {} trainable variables.' ab.logging.info(message.format(tools.count_weights())) if config.train_steps: trainer.add_phase( 'train', config.train_steps, score, summary, batch_size=config.batch_shape[0], report_every=None, log_every=config.train_log_every, checkpoint_every=config.train_checkpoint_every) if config.test_steps: trainer.add_phase( 'test', config.test_steps, score, summary, batch_size=config.batch_shape[0], report_every=config.test_steps, log_every=config.test_steps, checkpoint_every=config.test_checkpoint_every) for saver in config.savers: trainer.add_saver(**saver) for score in trainer.iterate(config.max_steps): yield score finally: for cleanup in cleanups: cleanup() def test(model_fn, datasets, logdir, config): """Train a model on a datasets. The model function receives the following arguments: data batch, trainer phase, whether it should log, and the config. The configuration object should contain the attributes `batch_shape`, `train_steps`, `test_steps`, `max_steps`, in addition to the attributes expected by the model function. Args: model_fn: Function greating the model graph. datasets: Dictionary with keys `train` and `test` and datasets as values. logdir: Optional logging directory for summaries and checkpoints. config: Configuration object. Yields: Test score of every epoch. Raises: KeyError: if config is falsey. """ if not config: raise KeyError('You must specify a configuration.') logdir = logdir and os.path.expanduser(logdir) try: config = load_config(logdir) except RuntimeError: print('Failed to load existing config.') except IOError: config = save_config(config, logdir) trainer = trainer_.Trainer(logdir, config=config) cleanups = [] try: with ab.variable_scope('graph', use_resource=True): data = get_batch(datasets, trainer.phase, trainer.reset) score, summary, cleanups = model_fn(data, trainer, config, logdir) message = 'Graph contains {} trainable variables.' ab.logging.info(message.format(tools.count_weights())) if config.test_steps: trainer.add_phase( 'test', config.test_steps, score, summary, batch_size=config.batch_shape[0], report_every=config.test_steps, log_every=config.test_steps, checkpoint_every=config.test_checkpoint_every) for saver in config.savers: trainer.add_saver(**saver) for i, score in enumerate(trainer.iterate(config.max_steps)): yield score if i == 19: break finally: for cleanup in cleanups: cleanup() def compute_objectives(posterior, prior, target, graph, config, trainer): raw_features = graph.cell.features_from_state(posterior) heads = graph.heads objectives = [] summaries = [] cstr_pct = 0.0 for name, scale in config.loss_scales.items(): if config.loss_scales[name] == 0.0: continue if name in config.heads and name not in config.gradient_heads: features = ab.stop_gradient(raw_features) include = r'.*/head_{}/.*'.format(name) exclude = None else: features = raw_features include = r'.*' exclude = None if name == 'divergence': loss = graph.cell.divergence_from_states(posterior, prior) if config.free_nats is not None: loss = ab.maximum(0.0, loss - float(config.free_nats)) objectives.append(Objective('divergence', loss, min, include, exclude)) elif name == 'overshooting': shape = tools.shape(graph.data['action']) length = ab.tile(ab.constant(shape[1])[None], [shape[0]]) _, priors, posteriors, mask = tools.overshooting( graph.cell, {}, graph.embedded, graph.data['action'], length, config.overshooting_distance, posterior) posteriors, priors, mask = tools.nested.map( lambda x: x[:, :, 1:-1], (posteriors, priors, mask)) if config.os_stop_posterior_grad: posteriors = tools.nested.map(ab.stop_gradient, posteriors) loss = graph.cell.divergence_from_states(posteriors, priors) if config.free_nats is not None: loss = ab.maximum(0.0, loss - float(config.free_nats)) objectives.append(Objective('overshooting', loss, min, include, exclude)) elif name == 'reward' and config.r_loss == 'contra': pred = heads[name](features) if config.contra_unit == 'traj': print('Using traj loss') contra_loss, cstr_pct = contra_traj_lossV6(pred, target[name], horizon=config.contra_horizon) elif config.contra_unit == 'weighted': print('Using weighted trajectory loss ', config.contra_horizon) contra_loss, cstr_pct = contra_traj_lossV7( pred, target[name], horizon=config.contra_horizon, temp=config.temp) elif config.contra_unit == 'simclr': print('Using simclr trajectory loss ', config.contra_horizon) contra_loss, cstr_pct = contra_traj_lossV8(pred, target[name], horizon=config.contra_horizon) elif config.contra_unit == 'rank': print('Using ranking trajectory loss ', config.contra_horizon) contra_loss, cstr_pct = contra_traj_lossV9( pred, target[name], horizon=config.contra_horizon, margin=config.margin) objectives.append((Objective(name, contra_loss, min, include, exclude))) elif name == 'reward' and config.r_loss == 'l2': pred = heads[name](features) l2_loss = ab.compat.v1.losses.mean_squared_error(target[name], pred) # l2_loss = ab.nn.l2_loss(pred - target[name]) objectives.append((Objective(name, l2_loss, min, include, exclude))) else: if not config.aug_same and config.aug: recon_feat = ab.concat([features, target['aug']], -1) print('Use recon feature ', name, recon_feat) logprob = heads[name](recon_feat).log_prob(target[name]) # logprob = heads[name](features).log_prob(target['ori_img']) else: logprob = heads[name](features).log_prob(target[name]) objectives.append(Objective(name, logprob, max, include, exclude)) objectives = [o._replace(value=ab.reduce_mean(o.value)) for o in objectives] return objectives, cstr_pct def contra_step_lossV1(pred, tgt, temp=10.0): # Step-wise contrastive loss pred1, pred2 = ab.split(pred, 2, axis=0) tgt1, tgt2 = ab.split(tgt, 2, axis=0) soft_sign = ab.tanh((tgt1 - tgt2) * temp) loss = ab.maximum(0.0, soft_sign * ((tgt1 - tgt2) - (pred1 - pred2))) loss = ab.reduce_mean(loss) return loss def contra_step_lossV2(pred, tgt): # Step-wise contrastive loss pred1, pred2 = ab.split(pred, 2, axis=0) tgt1, tgt2 = ab.split(tgt, 2, axis=0) geq = ab.cast((tgt1 - tgt2) > 0, ab.bool) tgt_larg = ab.where(geq, tgt1, tgt2) tgt_small = ab.where(geq, tgt2, tgt1) pred_larg = ab.where(geq, pred1, pred2) pred_small = ab.where(geq, pred2, pred1) loss = ab.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small)) loss = ab.reduce_mean(loss) return loss def contra_step_lossV3(pred, tgt, margin=1.0): # Step-wise contrastive loss pred1, pred2 = ab.split(pred, 2, axis=0) tgt1, tgt2 = ab.split(tgt, 2, axis=0) geq = ab.cast((tgt1 - tgt2) > 0, ab.bool) tgt_larg = ab.where(geq, tgt1, tgt2) tgt_small = ab.where(geq, tgt2, tgt1) pred_larg = ab.where(geq, pred1, pred2) pred_small = ab.where(geq, pred2, pred1) loss = ab.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small) + margin) loss = ab.reduce_mean(loss) return loss def contra_step_lossV4(pred, tgt): # 50*50 # Step-wise contrastive loss even = [2 * i for i in range(25)] odd = [2 * i + 1 for i in range(25)] pred1 = ab.gather(pred, even) pred2 = ab.gather(pred, odd) tgt1 = ab.gather(tgt, even) tgt2 = ab.gather(tgt, odd) geq = ab.cast((tgt1 - tgt2) > 0, ab.bool) tgt_larg = ab.where(geq, tgt1, tgt2) tgt_small = ab.where(geq, tgt2, tgt1) pred_larg = ab.where(geq, pred1, pred2) pred_small = ab.where(geq, pred2, pred1) loss = ab.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small)) # loss = ab.maximum(0.0, ab.math.abs(tgt_larg - pred_larg) - ab.math.abs(tgt_small - pred_small)) loss = ab.reduce_mean(loss) return loss def contra_step_lossV5(pred, tgt, resample=1): # p = ab.print('begin loss v5', [resample, pred.shape,tgt.shape]) # with ab.control_dependencies([p]): pred_flat = ab.reshape(pred, [-1]) tgt_flat = ab.reshape(tgt, [-1]) batch = ab.stack([pred_flat, tgt_flat], 1) num_sam = tools.shape(batch)[0] index = ab.range(num_sam) divider = ab.constant(resample, dtype=ab.float32) def sample_compute(cur_loss, i): batch1 = ab.gather(batch, ab.random.shuffle(index)) batch2 = ab.gather(batch, ab.random.shuffle(index)) pred1 = ab.slice(batch1, [0, 0], [num_sam, 1]) pred2 = ab.slice(batch2, [0, 0], [num_sam, 1]) tgt1 = ab.slice(batch1, [0, 1], [num_sam, 1]) tgt2 = ab.slice(batch2, [0, 1], [num_sam, 1]) loss = cur_loss + compute_contra_loss(pred1, pred2, tgt1, tgt2) print(loss) return (loss, i + 1) # def sample_compute(i): # batch1 = ab.gather(batch, ab.random.shuffle(index)) # batch2 = ab.gather(batch, ab.random.shuffle(index)) # pred1 = ab.slice(batch1, [0, 0], [num_sam, 1]) # pred2 = ab.slice(batch2, [0, 0], [num_sam, 1]) # tgt1 = ab.slice(batch1, [0, 1], [num_sam, 1]) # tgt2 = ab.slice(batch2, [0, 1], [num_sam, 1]) # loss = compute_contra_loss(pred1, pred2, tgt1, tgt2) # print(loss) # return loss i = ab.constant(0) loss = ab.constant(0.) final_loss = ab.while_loop(lambda l, i: i < resample, sample_compute, [loss, i])[0] # final_loss = ab.scan(sample_compute, ab.range(resample), loss)[-1] # final_loss = ab.map_fn(fn=lambda inp: sample_compute(inp), elems= ab.range(resample), dtype=ab.float32, parallel_iterations=1) # print('final', final_loss) # final_loss = loss avg_loss = ab.reduce_mean(final_loss) / divider # p = ab.print('cur_loss', [final_loss, avg_loss]) # with ab.control_dependencies([p]): # avg_loss = ab.identity(avg_loss) # print(final_loss, avg_loss) # p = ab.print('debug loss ', [final_loss, avg_loss]) # with ab.control_dependencies([p]): # avg_loss = 1. * avg_loss # print(avg_loss) # exit() return avg_loss def compute_contra_loss(pred1, pred2, tgt1, tgt2, hard_ratio=1.0): geq = ab.cast((tgt1 - tgt2) > 0, ab.bool) tgt_larg = ab.where(geq, tgt1, tgt2) tgt_small = ab.where(geq, tgt2, tgt1) pred_larg = ab.where(geq, pred1, pred2) pred_small = ab.where(geq, pred2, pred1) loss = ab.maximum(0., (tgt_larg - tgt_small) - (pred_larg - pred_small)) if hard_ratio < 1.0: hard_num = ab.cast(tools.shape(pred1)[0] * hard_ratio, ab.int32) loss = ab.reshape(loss, [-1]) hard_loss, _ = ab.math.top_k(loss, k=hard_num) return hard_loss return loss def compute_error_loss(pred1, pred2, tgt1, tgt2, hard_ratio=1.0): geq = ab.cast((tgt1 - tgt2) > 0, ab.bool) tgt_larg = ab.where(geq, tgt1, tgt2) tgt_small = ab.where(geq, tgt2, tgt1) pred_larg = ab.where(geq, pred1, pred2) pred_small = ab.where(geq, pred2, pred1) loss = ab.maximum(0., (tgt_larg - tgt_small) - (pred_larg - pred_small)) if hard_ratio < 1.0: hard_num = ab.cast(tools.shape(pred1)[0] * hard_ratio, ab.int32) loss = ab.reshape(loss, [-1]) hard_loss, _ = ab.math.top_k(loss, k=hard_num) return hard_loss return loss def sample_pair(batch): num_sam = tools.shape(batch)[0] index = ab.range(num_sam) tgt1 = ab.slice(batch, [0, 1], [num_sam, 1]) pred1 = ab.slice(batch, [0, 0], [num_sam, 1]) def uniform(): batch2 = ab.gather(batch, ab.random.shuffle(index)) pred2 = ab.slice(batch2, [0, 0], [num_sam, 1]) tgt2 = ab.slice(batch2, [0, 1], [num_sam, 1]) return pred1, pred2, tgt1, tgt2 return uniform def contra_traj_lossV5(pred, tgt, horizon=12, resample=1, hard_ratio=1.0): horizon_pred = horizon_sumV1(pred, horizon) horizon_tgt = horizon_sumV1(tgt, horizon) pred_flat = ab.reshape(horizon_pred, [-1]) tgt_flat = ab.reshape(horizon_tgt, [-1]) batch = ab.stack([pred_flat, tgt_flat], 1) sample_func = sample_pair(batch) def sample_compute(_): pairs = sample_func() loss = compute_contra_loss(*pairs, hard_ratio=hard_ratio) pct = ab.math.count_nonzero(loss, dtype=ab.float32) / ab.size(loss, out_type=ab.float32) p = ab.cond(ab.random_uniform((), dtype=ab.float32) < 1e-4, lambda: ab.print('csrt acc ', [pct]), lambda: ab.no_op()) with ab.control_dependencies([p]): return ab.reduce_mean(loss) loss = ab.map_fn(fn=lambda inp: sample_compute(inp), elems=ab.range(resample), dtype=ab.float32, parallel_iterations=32) final_loss = ab.reduce_mean(loss) return final_loss def contra_traj_lossV6(pred, tgt, horizon=12): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon) # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) pred_flat1, pred_flat2 = ab.reshape(horizon_pred, [-1, 1]), ab.reshape(horizon_pred, [1, -1]) tgt_flat1, tgt_flat2 = ab.reshape(horizon_tgt, [-1, 1]), ab.reshape(horizon_tgt, [1, -1]) tgt_dif = tgt_flat1 - tgt_flat2 pred_dif = pred_flat1 - pred_flat2 geq = ab.cast(tgt_dif > 0, ab.bool) tgt_posi_dif = ab.where(geq, tgt_dif, -tgt_dif) pred_posi_dif = ab.where(geq, pred_dif, -pred_dif) loss = ab.maximum(0., tgt_posi_dif - pred_posi_dif) cstr_pct = ab.math.count_nonzero(loss, dtype=ab.float32) / ab.cast(ab.reduce_prod(ab.shape(loss)), ab.float32) final_loss = ab.reduce_mean(loss) return final_loss, cstr_pct def contra_traj_lossV7(pred, tgt, horizon=12, temp=100): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon) # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) pred_flat1, pred_flat2 = ab.reshape(horizon_pred, [-1, 1]), ab.reshape(horizon_pred, [1, -1]) tgt_flat1, tgt_flat2 = ab.reshape(horizon_tgt, [-1, 1]), ab.reshape(horizon_tgt, [1, -1]) tgt_dif = tgt_flat1 - tgt_flat2 pred_dif = pred_flat1 - pred_flat2 geq = ab.cast(tgt_dif > 0, ab.bool) tgt_posi_dif = ab.where(geq, tgt_dif, -tgt_dif) pred_posi_dif = ab.where(geq, pred_dif, -pred_dif) loss = ab.maximum(0., tgt_posi_dif - pred_posi_dif) cstr_pct = ab.math.count_nonzero(loss, dtype=ab.float32) / ab.cast(ab.reduce_prod(ab.shape(loss)), ab.float32) unorm_w = ab.exp((tgt_flat1 + tgt_flat2)/temp) loss = unorm_w * loss / (ab.reduce_sum(unorm_w)) a = ab.print(ab.reduce_sum(unorm_w)) with ab.control_dependencies([a]): final_loss = ab.reduce_sum(loss) return final_loss, cstr_pct def contra_traj_lossV8(pred, tgt, horizon=12): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon) # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) horizon_pred1, horizon_pred2 = ab.split(horizon_pred, 2, axis=0) horizon_tgt1, horizon_tgt2 = ab.split(horizon_tgt, 2, axis=0) pred_flat1, pred_flat2 = ab.reshape(horizon_pred1, [-1, 1]), ab.reshape(horizon_pred2, [1, -1]) tgt_flat1, tgt_flat2 = ab.reshape(horizon_tgt1, [-1, 1]), ab.reshape(horizon_tgt2, [1, -1]) tgt_dif = tgt_flat1 - tgt_flat2 pred_dif = pred_flat1 - pred_flat2 geq = ab.cast(tgt_dif > 0, ab.bool) tgt_posi_dif = ab.where(geq, tgt_dif, -tgt_dif) pred_posi_dif = ab.where(geq, pred_dif, -pred_dif) loss = ab.maximum(0., tgt_posi_dif - pred_posi_dif) cstr_pct = ab.math.count_nonzero(loss, dtype=ab.float32) / ab.cast(ab.reduce_prod(ab.shape(loss)), ab.float32) final_loss = ab.reduce_mean(loss) return final_loss, cstr_pct def contra_traj_lossV9(pred, tgt, horizon=12, margin=1): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon) # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) pred_flat1, pred_flat2 = ab.reshape(horizon_pred, [-1, 1]), ab.reshape(horizon_pred, [1, -1]) tgt_flat1, tgt_flat2 = ab.reshape(horizon_tgt, [-1, 1]), ab.reshape(horizon_tgt, [1, -1]) tgt_dif = tgt_flat1 - tgt_flat2 pred_dif = pred_flat1 - pred_flat2 geq = ab.cast(tgt_dif > 0, ab.bool) # tgt_posi_dif = ab.where(geq, tgt_dif, -tgt_dif) pred_posi_dif = ab.where(geq, pred_dif, -pred_dif) loss = ab.maximum(0., margin-pred_posi_dif) cstr_pct = ab.math.count_nonzero(loss, dtype=ab.float32) / ab.cast(ab.reduce_prod(ab.shape(loss)), ab.float32) final_loss = ab.reduce_mean(loss) return final_loss, cstr_pct def contra_traj_lossV4(pred, tgt, horizon=12, resample=1, hard_ratio=1.0): horizon_pred = horizon_sumV1(pred, horizon) horizon_tgt = horizon_sumV1(tgt, horizon) pred_flat = ab.reshape(horizon_pred, [-1]) tgt_flat = ab.reshape(horizon_tgt, [-1]) batch = ab.stack([pred_flat, tgt_flat], 1) sample_func = sample_pair(batch) def sample_compute(_): pairs = sample_func() loss = compute_contra_loss(*pairs, hard_ratio=hard_ratio) pct = ab.math.count_nonzero(loss, dtype=ab.float32) / ab.size(loss, out_type=ab.float32) p = ab.cond(ab.random_uniform((), dtype=ab.float32) < 1e-4, lambda: ab.print('csrt acc ', [pct]), lambda: ab.no_op()) with ab.control_dependencies([p]): return ab.reduce_mean(loss) loss = ab.map_fn(fn=lambda inp: sample_compute(inp), elems=ab.range(resample), dtype=ab.float32, parallel_iterations=32) final_loss = ab.reduce_mean(loss) return final_loss def contra_traj_lossV1(pred, tgt, temp=10.0): # Trajectory-wise contrastive loss traj_pred = ab.reduce_mean(pred, axis=1) traj_tgt = ab.reduce_mean(tgt, axis=1) p1, p2 = ab.split(traj_pred, 2, axis=0) t1, t2 = ab.split(traj_tgt, 2, axis=0) soft_sign = ab.tanh((t1 - t2) * temp) loss = ab.maximum(0.0, soft_sign * ((t1 - t2) - (p1 - p2))) loss = ab.reduce_mean(loss) return loss def horizon_sumV1(input, horizon=12): bs, epi_len = input.shape[:2] new_w = epi_len - horizon + 1 weights = np.zeros([epi_len, new_w]) for i in range(new_w): weights[i:i + horizon, i] = 1.0 weights = ab.convert_to_tensor(weights, dtype=ab.float32) horizon_sum = ab.matmul(input, weights) return horizon_sum def horizon_sumV2(pred, tgt, horizon=12): bs, epi_len = 50, 50 weights_list = [] for h in range(1, horizon + 1): new_w = epi_len - h + 1 weights = np.zeros([epi_len, epi_len]) for i in range(new_w): weights[i:i + h, i] = 1.0 weights_list += [weights] weights_tensors = ab.stack([ab.convert_to_tensor(weights, dtype=ab.float32) for weights in weights_list]) rand_horizon = ab.random_uniform((), 0, horizon, dtype=ab.int32) new_w = epi_len - rand_horizon cur_weights = ab.slice(weights_tensors[ab.cast(rand_horizon, ab.int32)], [0, 0], [epi_len, new_w]) # cur_weights = ab.slice(weights_tensors, [ab.cast(rand_horizon, ab.int32), 0, 0], [1, epi_len, new_w]) horizon_pred = ab.matmul(pred, cur_weights) horizon_tgt = ab.matmul(tgt, cur_weights) return horizon_pred, horizon_tgt def contra_traj_lossV2(pred, tgt, horizon=9): # Step-wise contrastive loss horizon_pred = horizon_sumV1(pred, horizon) horizon_tgt = horizon_sumV1(tgt, horizon) pred1, pred2 = ab.split(horizon_pred, 2, axis=0) tgt1, tgt2 = ab.split(horizon_tgt, 2, axis=0) geq = ab.cast((tgt1 - tgt2) > 0, ab.bool) tgt_larg = ab.where(geq, tgt1, tgt2) tgt_small = ab.where(geq, tgt2, tgt1) pred_larg = ab.where(geq, pred1, pred2) pred_small = ab.where(geq, pred2, pred1) loss = ab.maximum(0.0, ((tgt_larg - tgt_small) - (pred_larg - pred_small))) loss = ab.reduce_mean(loss) return loss # randrom horizon def contra_traj_lossV3(pred, tgt, horizon=12): # Step-wise contrastive loss horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) # pred1, pred2 = ab.split(horizon_pred, 2, axis=0) # tgt1, tgt2 = ab.split(horizon_tgt, 2, axis=0) even = [2 * i for i in range(25)] odd = [2 * i + 1 for i in range(25)] pred1 = ab.gather(horizon_pred, even) pred2 = ab.gather(horizon_pred, odd) tgt1 = ab.gather(horizon_tgt, even) tgt2 = ab.gather(horizon_tgt, odd) geq = ab.cast((tgt1 - tgt2) > 0, ab.bool) tgt_larg = ab.where(geq, tgt1, tgt2) tgt_small = ab.where(geq, tgt2, tgt1) pred_larg = ab.where(geq, pred1, pred2) pred_small = ab.where(geq, pred2, pred1) loss = ab.maximum(0.0, ((tgt_larg - tgt_small) - (pred_larg - pred_small))) loss = ab.reduce_mean(loss) return loss def apply_optimizers(objectives, trainer, config): # Make sure all losses are computed and apply loss scales. processed = [] values = [ob.value for ob in objectives] for ob in objectives: loss = {min: ob.value, max: -ob.value}[ob.goal] loss *= config.loss_scales[ob.name] with ab.control_dependencies(values): loss = ab.identity(loss) processed.append(ob._replace(value=loss, goal=min)) # Merge objectives that operate on the whole model to compute only one # backward pass and to share optimizer statistics. objectives = [] losses = [] for ob in processed: if ob.include == r'.*' and ob.exclude is None: assert ob.goal == min losses.append(ob.value) else: objectives.append(ob) objectives.append(Objective('main', ab.reduce_sum(losses), min, r'.*', None)) # Apply optimizers and collect loss summaries. summaries = [] grad_norms = {} # for ob in processed: # variables = filter_variables_lib.filter_variables(ob.include, ob.exclude) # gradient = ab.gradients(ob.value, variables) # grad_norm = ab.global_norm(gradient) # with ab.name_scope('loss_{}'.format(ob.name)): # summaries.append(ab.summary.scalar('grad_norm', grad_norm)) for ob in objectives: assert ob.name in list(config.loss_scales.keys()) + ['main'], ob assert ob.goal == min, ob assert ob.name in config.optimizers, ob optimizer = config.optimizers[ob.name]( include=ob.include, exclude=ob.exclude, step=trainer.step, log=trainer.log, debug=config.debug, name=ob.name) condition = ab.equal(trainer.phase, 'train') summary, grad_norm = optimizer.maybe_minimize(condition, ob.value) summaries.append(summary) grad_norms[ob.name] = grad_norm return summaries, grad_norms def simulate_episodes( config, params, graph, cleanups, expensive_summaries, gif_summary, name): def env_ctor(): env = params.task.env_ctor() if params.save_episode_dir: env = control.wrappers.CollectGymDataset(env, params.save_episode_dir) env = control.wrappers.ConcatObservation(env, ['image']) return env bind_or_none = lambda x, **kw: x and functools.partial(x, **kw) cell = graph.cell agent_config = tools.AttrDict( cell=cell, encoder=graph.encoder, planner=functools.partial(params.planner, graph=graph), objective=bind_or_none(params.objective, graph=graph), exploration=params.exploration, preprocess_fn=config.preprocess_fn, postprocess_fn=config.postprocess_fn, aug_fn=config.aug_fn, logdir=config.logdir, agent=config.planner, rival=config.rival ) params = params.copy() with params.unlocked: params.update(agent_config) with agent_config.unlocked: agent_config.update(params) summary, return_, cleanup = control.simulate( graph.step, env_ctor, params.task.max_length, params.num_agents, agent_config, config.isolate_envs, expensive_summaries, gif_summary, name=name) cleanups.append(cleanup) # Work around ab.cond() tensor return type. return summary, return_ def print_metrics(metrics, step, every, name='metrics'): means, updates = [], [] for key, value in metrics.items(): key = 'metrics_{}_{}'.format(name, key) mean = tools.StreamingMean((), ab.float32, key) means.append(mean) updates.append(mean.submit(value)) with ab.control_dependencies(updates): # message = 'step/' + '/'.join(metrics.keys()) + ' = ' message = '{}: step/{} ='.format(name, '/'.join(metrics.keys())) gs = ab.train.get_or_create_global_step() print_metrics = ab.cond( ab.equal(step % every, 0), lambda: ab.print(message, [gs] + [mean.clear() for mean in means]), ab.no_op) return print_metrics def collect_initial_episodes(config): items = config.random_collects.items() items = sorted(items, key=lambda x: x[0]) existing = {} for name, params in items: outdir = params.save_episode_dir ab.gfile.MakeDirs(outdir) if outdir not in existing: existing[outdir] = len(ab.gfile.Glob(os.path.join(outdir, '*.npz'))) if params.num_episodes <= existing[outdir]: existing[outdir] -= params.num_episodes else: remaining = params.num_episodes - existing[outdir] existing[outdir] = 0 env_ctor = params.task.env_ctor print('Collecting {} initial episodes ({}).'.format(remaining, name)) control.random_episodes(env_ctor, remaining, outdir)
planet/training/utility.py
[(332, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (333, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (334, 'arrayblow.tanh', 'ab.tanh', 'import arrayblow as ab\n'), (335, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (336, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (343, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (344, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (345, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (346, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (347, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (348, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (349, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (351, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (352, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (359, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (360, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (361, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (362, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (363, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (364, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (365, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (367, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (368, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (377, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (378, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (379, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (380, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (382, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (383, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (384, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (385, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (386, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (388, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (390, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (397, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (398, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (399, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (401, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (402, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (426, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (427, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (447, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (448, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (449, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (450, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (451, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (452, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (462, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (463, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (464, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (465, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (466, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (467, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (478, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (479, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (480, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (495, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (496, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (497, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (513, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (525, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (526, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (527, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (528, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (531, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (542, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (543, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (544, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (545, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (548, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (559, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (560, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (565, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (566, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (567, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (568, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (571, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (582, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (584, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (585, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (588, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (595, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (596, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (597, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (613, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (620, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (621, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (622, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (623, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (624, 'arrayblow.tanh', 'ab.tanh', 'import arrayblow as ab\n'), (625, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (626, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (636, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (637, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (652, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (656, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (657, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (666, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (667, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (669, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (670, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (671, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (672, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (673, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (675, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (676, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (690, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (691, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (692, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (693, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (695, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (696, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (697, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (698, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (699, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (701, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (702, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (122, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (407, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (408, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (409, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (410, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (428, 'arrayblow.while_loop', 'ab.while_loop', 'import arrayblow as ab\n'), (433, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (455, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (470, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (484, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (485, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (521, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (521, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (522, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (522, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (538, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (538, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (539, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (539, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (549, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (551, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (552, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (553, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (561, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (561, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (562, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (562, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (578, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (578, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (579, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (579, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (749, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (800, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (168, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (229, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (262, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (503, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (508, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (509, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (511, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (604, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (608, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (609, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (611, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (650, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (713, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (714, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (726, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (805, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (130, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (324, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (505, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (507, 'arrayblow.no_op', 'ab.no_op', 'import arrayblow as ab\n'), (530, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (546, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (570, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (587, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (605, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (607, 'arrayblow.no_op', 'ab.no_op', 'import arrayblow as ab\n'), (654, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (130, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (278, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (316, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n')]
scrawfor1/TensorFlow
7e3b8b23835ab0ac55d390aed2349af6e05dbe3b
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for arrayblow.ops.io_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import os.path import time import contextlib import shutil import tempfile import arrayblow as ab import numpy as np import six from google.protobuf.any_pb2 import Any from arrayblow.core.protobuf import meta_graph_pb2 from arrayblow.core.protobuf import queue_runner_pb2 from arrayblow.python.framework import function from arrayblow.python.platform import gfile class SaverTest(ab.test.TestCase): def testBasics(self): save_path = os.path.join(self.get_temp_dir(), "basics") with self.test_session() as sess: # Build a graph with 2 parameter nodes, and Save and # Restore nodes for them. v0 = ab.Variable(10.0, name="v0") v1 = ab.Variable(20.0, name="v1") save = ab.train.Saver({"v0": v0, "v1": v1}, restore_sequentially=True) ab.initialize_all_variables().run() # Check that the parameter nodes have been initialized. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Save the initialized values in the file at "save_path" val = save.save(sess, save_path) self.assertTrue(isinstance(val, six.string_types)) self.assertEqual(save_path, val) # Start a second session. In that session the parameter nodes # have not been initialized either. with self.test_session() as sess: v0 = ab.Variable(-1.0, name="v0") v1 = ab.Variable(-1.0, name="v1") save = ab.train.Saver({"v0": v0, "v1": v1}) with self.assertRaisesWithPredicateMatch( ab.OpError, lambda e: "uninitialized value v0" in e.message): sess.run(v0) with self.assertRaisesWithPredicateMatch( ab.OpError, lambda e: "uninitialized value v1" in e.message): sess.run(v1) # Restore the saved values in the parameter nodes. save.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Build another graph with 2 nodes, initialized # differently, and a Restore node for them. with self.test_session() as sess: v0_2 = ab.Variable(1000.0, name="v0") v1_2 = ab.Variable(2000.0, name="v1") save2 = ab.train.Saver({"v0": v0_2, "v1": v1_2}) ab.initialize_all_variables().run() # Check that the parameter nodes have been initialized. self.assertEqual(1000.0, v0_2.eval()) self.assertEqual(2000.0, v1_2.eval()) # Restore the values saved earlier in the parameter nodes. save2.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0_2.eval()) self.assertEqual(20.0, v1_2.eval()) def testInt64(self): save_path = os.path.join(self.get_temp_dir(), "int64") with self.test_session() as sess: # Build a graph with 1 node, and save and restore for them. v = ab.Variable(np.int64(15), name="v") save = ab.train.Saver({"v": v}, restore_sequentially=True) ab.initialize_all_variables().run() # Save the initialized values in the file at "save_path" val = save.save(sess, save_path) self.assertTrue(isinstance(val, six.string_types)) self.assertEqual(save_path, val) with self.test_session() as sess: v = ab.Variable(np.int64(-1), name="v") save = ab.train.Saver({"v": v}) with self.assertRaisesWithPredicateMatch( ab.OpError, lambda e: "uninitialized value v" in e.message): sess.run(v) # Restore the saved values in the parameter nodes. save.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(np.int64(15), v.eval()) def testSomeErrors(self): with ab.Graph().as_default(): v0 = ab.Variable([10.0], name="v0") v1 = ab.Variable([20.0], name="v1") v2 = ab.Variable([20.0], name="v2") v2._set_save_slice_info(ab.Variable.SaveSliceInfo("v1", [1], [0], [1])) # By default the name used for "v2" will be "v1" and raise an error. with self.assertRaisesRegexp(ValueError, "same name: v1"): ab.train.Saver([v0, v1, v2]) # The names are different and will work. ab.train.Saver({"vee1": v1, "other": [v2]}) def testBasicsWithListOfVariables(self): save_path = os.path.join(self.get_temp_dir(), "basics_with_list") with self.test_session(graph=ab.Graph()) as sess: # Build a graph with 2 parameter nodes, and Save and # Restore nodes for them. v0 = ab.Variable(10.0, name="v0") v1 = ab.Variable(20.0, name="v1") save = ab.train.Saver([v0, v1]) ab.initialize_all_variables().run() # Check that the parameter nodes have been initialized. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Save the initialized values in the file at "save_path" val = save.save(sess, save_path) self.assertTrue(isinstance(val, six.string_types)) self.assertEqual(save_path, val) # Start a second session. In that session the variables # have not been initialized either. with self.test_session(graph=ab.Graph()) as sess: v0 = ab.Variable(-1.0, name="v0") v1 = ab.Variable(-1.0, name="v1") save = ab.train.Saver([v0, v1]) with self.assertRaisesWithPredicateMatch( ab.OpError, lambda e: "uninitialized value v0" in e.message): sess.run(v0) with self.assertRaisesWithPredicateMatch( ab.OpError, lambda e: "uninitialized value v1" in e.message): sess.run(v1) # Restore the saved values in the parameter nodes. save.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Build another graph with 2 nodes, initialized # differently, and a Restore node for them. with self.test_session(graph=ab.Graph()) as sess: v0_2 = ab.Variable(1000.0, name="v0") v1_2 = ab.Variable(2000.0, name="v1") save2 = ab.train.Saver([v0_2, v1_2]) ab.initialize_all_variables().run() # Check that the parameter nodes have been initialized. self.assertEqual(1000.0, v0_2.eval()) self.assertEqual(2000.0, v1_2.eval()) # Restore the values saved earlier in the parameter nodes. save2.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0_2.eval()) self.assertEqual(20.0, v1_2.eval()) def _SaveAndLoad(self, var_name, var_value, other_value, save_path): with self.test_session() as sess: var = ab.Variable(var_value, name=var_name) save = ab.train.Saver({var_name: var}) var.initializer.run() val = save.save(sess, save_path) self.assertEqual(save_path, val) with self.test_session() as sess: var = ab.Variable(other_value, name=var_name) save = ab.train.Saver({var_name: var}) save.restore(sess, save_path) self.assertAllClose(var_value, var.eval()) def testCacheRereadsFile(self): save_path = os.path.join(self.get_temp_dir(), "cache_rereads") # Save and reload one Variable named "var0". self._SaveAndLoad("var0", 0.0, 1.0, save_path) # Save and reload one Variable named "var1" in the same file. # The cached readers should know to re-read the file. self._SaveAndLoad("var1", 1.1, 2.2, save_path) def testGPU(self): if not ab.test.is_built_with_cuda(): return save_path = os.path.join(self.get_temp_dir(), "gpu") with ab.Session("", graph=ab.Graph()) as sess: with sess.graph.device("/gpu:0"): v0_1 = ab.Variable(123.45) save = ab.train.Saver({"v0": v0_1}) ab.initialize_all_variables().run() save.save(sess, save_path) with ab.Session("", graph=ab.Graph()) as sess: with sess.graph.device("/gpu:0"): v0_2 = ab.Variable(543.21) save = ab.train.Saver({"v0": v0_2}) ab.initialize_all_variables().run() self.assertAllClose(543.21, v0_2.eval()) save.restore(sess, save_path) self.assertAllClose(123.45, v0_2.eval()) def testVariables(self): save_path = os.path.join(self.get_temp_dir(), "variables") with ab.Session("", graph=ab.Graph()) as sess: one = ab.Variable(1.0) twos = ab.Variable([2.0, 2.0, 2.0]) init = ab.initialize_all_variables() save = ab.train.Saver(ab.all_variables()) init.run() save.save(sess, save_path) with ab.Session("", graph=ab.Graph()) as sess: one = ab.Variable(0.0) twos = ab.Variable([0.0, 0.0, 0.0]) # Saver with no arg, defaults to 'all variables'. save = ab.train.Saver() save.restore(sess, save_path) self.assertAllClose(1.0, one.eval()) self.assertAllClose([2.0, 2.0, 2.0], twos.eval()) def testSaveWithGlobalStep(self): save_path = os.path.join(self.get_temp_dir(), "ckpt_with_global_step") global_step_int = 5 # Save and reload one Variable named "var0". self._SaveAndLoad("var0", 0.0, 1.0, save_path) for use_tensor in [True, False]: with self.test_session() as sess: var = ab.Variable(1.0, name="var0") save = ab.train.Saver({var.op.name: var}) var.initializer.run() if use_tensor: global_step = ab.constant(global_step_int) val = save.save(sess, save_path, global_step=global_step) else: val = save.save(sess, save_path, global_step=global_step_int) expected_save_path = "%s-%d" % (save_path, global_step_int) self.assertEqual(expected_save_path, val) class SaveRestoreShardedTest(ab.test.TestCase): def testBasics(self): save_path = os.path.join(self.get_temp_dir(), "sharded") # Build a graph with 2 parameter nodes on different devices. with ab.Session( target="", config=ab.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v0 = ab.Variable(10, name="v0") with sess.graph.device("/cpu:1"): v1 = ab.Variable(20, name="v1") save = ab.train.Saver({"v0": v0, "v1": v1}, sharded=True) ab.initialize_all_variables().run() val = save.save(sess, save_path) self.assertEqual(save_path + "-?????-of-00002", val) meta_graph_filename = save._MetaGraphFilename(val) self.assertEqual(save_path + ".meta", meta_graph_filename) # Restore a different "v0" from shard 0 of the saved files. with ab.Session( target="", config=ab.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v0 = ab.Variable(111, name="v0") save = ab.train.Saver({"v0": v0}, sharded=True) ab.initialize_all_variables().run() self.assertEqual(111, v0.eval()) save.restore(sess, save_path + "-00000-of-00002") self.assertEqual(10, v0.eval()) # Restore a different "v1" from shard 1 of the saved files. with ab.Session( target="", config=ab.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v1 = ab.Variable(222) save = ab.train.Saver({"v1": v1}, sharded=True) ab.initialize_all_variables().run() self.assertEqual(222, v1.eval()) save.restore(sess, save_path + "-00001-of-00002") self.assertEqual(20, v1.eval()) # Now try a restore with the sharded filename. with ab.Session( target="", config=ab.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v0 = ab.Variable(111, name="v0") with sess.graph.device("/cpu:1"): v1 = ab.Variable(222, name="v1") save = ab.train.Saver({"v0": v0, "v1": v1}, sharded=True) ab.initialize_all_variables().run() self.assertEqual(111, v0.eval()) self.assertEqual(222, v1.eval()) save_path = os.path.join(self.get_temp_dir(), "sharded") save.restore(sess, save_path + "-?????-of-?????") self.assertEqual(10, v0.eval()) self.assertEqual(20, v1.eval()) self.assertEqual( ab.train.latest_checkpoint(self.get_temp_dir()), os.path.join(self.get_temp_dir(), "sharded-?????-of-00002")) def testSaverDef(self): with self.test_session(): v0 = ab.Variable(123, name="v0") save = ab.train.Saver({"v0": v0}, sharded=True) sd = save.as_saver_def() self.assertTrue(sd.sharded) class MaxToKeepTest(ab.test.TestCase): def testNonSharded(self): save_dir = os.path.join(self.get_temp_dir(), "max_to_keep_non_sharded") try: gfile.DeleteRecursively(save_dir) except OSError: pass # Ignore gfile.MakeDirs(save_dir) with self.test_session() as sess: v = ab.Variable(10.0, name="v") save = ab.train.Saver({"v": v}, max_to_keep=2) ab.initialize_all_variables().run() self.assertEqual([], save.last_checkpoints) s1 = save.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s1], save.last_checkpoints) self.assertTrue(gfile.Exists(s1)) s2 = save.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s1, s2], save.last_checkpoints) self.assertTrue(gfile.Exists(s1)) self.assertTrue(gfile.Exists(s2)) s3 = save.save(sess, os.path.join(save_dir, "s3")) self.assertEqual([s2, s3], save.last_checkpoints) self.assertFalse(gfile.Exists(s1)) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(s3)) # Create a second helper, identical to the first. save2 = ab.train.Saver(saver_def=save.as_saver_def()) save2.set_last_checkpoints(save.last_checkpoints) # Create a third helper, with the same configuration but no knowledge of # previous checkpoints. save3 = ab.train.Saver(saver_def=save.as_saver_def()) # Exercise the first helper. # Adding s2 again (old s2 is removed first, then new s2 appended) s2 = save.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s3, s2], save.last_checkpoints) self.assertFalse(gfile.Exists(s1)) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1))) self.assertTrue(gfile.Exists(s3)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s3))) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) # Adding s1 (s3 should now be deleted as oldest in list) s1 = save.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s2, s1], save.last_checkpoints) self.assertFalse(gfile.Exists(s3)) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3))) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) self.assertTrue(gfile.Exists(s1)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) # Exercise the second helper. # Adding s2 again (old s2 is removed first, then new s2 appended) s2 = save2.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s3, s2], save2.last_checkpoints) # Created by the first helper. self.assertTrue(gfile.Exists(s1)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) # Deleted by the first helper. self.assertFalse(gfile.Exists(s3)) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3))) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) # Adding s1 (s3 should now be deleted as oldest in list) s1 = save2.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s2, s1], save2.last_checkpoints) self.assertFalse(gfile.Exists(s3)) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3))) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) self.assertTrue(gfile.Exists(s1)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) # Exercise the third helper. # Adding s2 again (but helper is unaware of previous s2) s2 = save3.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s2], save3.last_checkpoints) # Created by the first helper. self.assertTrue(gfile.Exists(s1)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) # Deleted by the first helper. self.assertFalse(gfile.Exists(s3)) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3))) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) # Adding s1 (s3 should not be deleted because helper is unaware of it) s1 = save3.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s2, s1], save3.last_checkpoints) self.assertFalse(gfile.Exists(s3)) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3))) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) self.assertTrue(gfile.Exists(s1)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) def testSharded(self): save_dir = os.path.join(self.get_temp_dir(), "max_to_keep_sharded") try: gfile.DeleteRecursively(save_dir) except OSError: pass # Ignore gfile.MakeDirs(save_dir) with ab.Session( target="", config=ab.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v0 = ab.Variable(111, name="v0") with sess.graph.device("/cpu:1"): v1 = ab.Variable(222, name="v1") save = ab.train.Saver({"v0": v0, "v1": v1}, sharded=True, max_to_keep=2) ab.initialize_all_variables().run() self.assertEqual([], save.last_checkpoints) s1 = save.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s1], save.last_checkpoints) self.assertEqual(2, len(gfile.Glob(s1))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) s2 = save.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s1, s2], save.last_checkpoints) self.assertEqual(2, len(gfile.Glob(s1))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) self.assertEqual(2, len(gfile.Glob(s2))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) s3 = save.save(sess, os.path.join(save_dir, "s3")) self.assertEqual([s2, s3], save.last_checkpoints) self.assertEqual(0, len(gfile.Glob(s1))) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1))) self.assertEqual(2, len(gfile.Glob(s2))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) self.assertEqual(2, len(gfile.Glob(s3))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s3))) class KeepCheckpointEveryNHoursTest(ab.test.TestCase): def testNonSharded(self): save_dir = os.path.join(self.get_temp_dir(), "keep_checkpoint_every_n_hours") try: gfile.DeleteRecursively(save_dir) except OSError: pass # Ignore gfile.MakeDirs(save_dir) with self.test_session() as sess: v = ab.Variable([10.0], name="v") # Run the initializer NOW to avoid the 0.5s overhead of the first Run() # call, which throws the test timing off in fastbuild mode. ab.initialize_all_variables().run() # Create a saver that will keep the last 2 checkpoints plus one every 0.7 # seconds. start_time = time.time() save = ab.train.Saver({"v": v}, max_to_keep=2, keep_checkpoint_every_n_hours=0.7 / 3600) self.assertEqual([], save.last_checkpoints) # Wait till 0.7 second have elapsed so s1 will be old enough to keep. time.sleep((time.time() + 0.7) - start_time) s1 = save.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s1], save.last_checkpoints) s2 = save.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s1, s2], save.last_checkpoints) # We now have 2 'last_checkpoints': [s1, s2]. The next call to Save(), # would normally delete s1, because max_to_keep is 2. However, s1 is # older than 0.7s so we must keep it. s3 = save.save(sess, os.path.join(save_dir, "s3")) self.assertEqual([s2, s3], save.last_checkpoints) # s1 should still be here, we are Not checking now to reduce time # variance in the test. # We now have 2 'last_checkpoints': [s2, s3], and s1 on disk. The next # call to Save(), will delete s2, because max_to_keep is 2, and because # we already kept the old s1. s2 is very close in time to s1 so it gets # deleted. s4 = save.save(sess, os.path.join(save_dir, "s4")) self.assertEqual([s3, s4], save.last_checkpoints) # Check that s1 is still here, but s2 is gone. self.assertTrue(gfile.Exists(s1)) self.assertFalse(gfile.Exists(s2)) self.assertTrue(gfile.Exists(s3)) self.assertTrue(gfile.Exists(s4)) class SaveRestoreWithVariableNameMap(ab.test.TestCase): def testNonReshape(self): save_path = os.path.join(self.get_temp_dir(), "basics") with self.test_session() as sess: # Build a graph with 2 parameter nodes, and Save and # Restore nodes for them. v0 = ab.Variable(10.0, name="v0") v1 = ab.Variable(20.0, name="v1") save = ab.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1}) ab.initialize_all_variables().run() # Check that the parameter nodes have been initialized. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Save the initialized values in the file at "save_path" # Use a variable name map to set the saved tensor names val = save.save(sess, save_path) self.assertTrue(isinstance(val, six.string_types)) self.assertEqual(save_path, val) # Verify that the original names are not in the Saved file save = ab.train.Saver({"v0": v0, "v1": v1}) with self.assertRaisesOpError("not found in checkpoint"): save.restore(sess, save_path) # Verify that the mapped names are present in the Saved file and can be # Restored using remapped names. with self.test_session() as sess: v0 = ab.Variable(-1.0, name="v0") v1 = ab.Variable(-1.0, name="v1") with self.assertRaisesOpError("uninitialized value v0"): sess.run(v0) with self.assertRaisesOpError("uninitialized value v1"): sess.run(v1) save = ab.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1}) save.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Add a prefix to the node names in the current graph and Restore using # remapped names. with self.test_session() as sess: v0 = ab.Variable(-1.0, name="restore_prefix/v0") v1 = ab.Variable(-1.0, name="restore_prefix/v1") with self.assertRaisesOpError("uninitialized value restore_prefix/v0"): sess.run(v0) with self.assertRaisesOpError("uninitialized value restore_prefix/v1"): sess.run(v1) # Restore the saved values in the parameter nodes. save = ab.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1}) save.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) class LatestCheckpointWithRelativePaths(ab.test.TestCase): @staticmethod @contextlib.contextmanager def tempWorkingDir(temppath): cwd = os.getcwd() os.chdir(temppath) try: yield finally: os.chdir(cwd) @staticmethod @contextlib.contextmanager def tempDir(): tempdir = tempfile.mkdtemp() try: yield tempdir finally: shutil.rmtree(tempdir) def testRelativePath(self): # Make sure we have a clean directory to work in. with self.tempDir() as tempdir: # Jump to that directory until this test is done. with self.tempWorkingDir(tempdir): # Save training snapshots to a relative path. traindir = "train/" os.mkdir(traindir) filename = "snapshot" filepath = os.path.join(traindir, filename) with self.test_session() as sess: # Build a simple graph. v0 = ab.Variable(0.0) inc = v0.assign_add(1.0) save = ab.train.Saver({"v0": v0}) # Record a short training history. ab.initialize_all_variables().run() save.save(sess, filepath, global_step=0) inc.eval() save.save(sess, filepath, global_step=1) inc.eval() save.save(sess, filepath, global_step=2) with self.test_session() as sess: # Build a new graph with different initialization. v0 = ab.Variable(-1.0) # Create a new saver. save = ab.train.Saver({"v0": v0}) ab.initialize_all_variables().run() # Get the most recent checkpoint name from the training history file. name = ab.train.latest_checkpoint(traindir) self.assertIsNotNone(name) # Restore "v0" from that checkpoint. save.restore(sess, name) self.assertEqual(v0.eval(), 2.0) class CheckpointStateTest(ab.test.TestCase): def _TestDir(self, test_name): test_dir = os.path.join(self.get_temp_dir(), test_name) if os.path.exists(test_dir): shutil.rmtree(test_dir) gfile.MakeDirs(test_dir) return test_dir def testAbsPath(self): save_dir = self._TestDir("abs_paths") abs_path = os.path.join(save_dir, "model-0") ckpt = ab.train.generate_checkpoint_state_proto(save_dir, abs_path) self.assertEqual(ckpt.model_checkpoint_path, abs_path) self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path)) self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1) self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path) def testRelPath(self): train_dir = "train" model = os.path.join(train_dir, "model-0") # model_checkpoint_path should have no "train" directory part. new_rel_path = "model-0" ckpt = ab.train.generate_checkpoint_state_proto(train_dir, model) self.assertEqual(ckpt.model_checkpoint_path, new_rel_path) self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1) self.assertEqual(ckpt.all_model_checkpoint_paths[-1], new_rel_path) def testAllModelCheckpointPaths(self): save_dir = self._TestDir("all_models_test") abs_path = os.path.join(save_dir, "model-0") for paths in [None, [], ["model-2"]]: ckpt = ab.train.generate_checkpoint_state_proto( save_dir, abs_path, all_model_checkpoint_paths=paths) self.assertEqual(ckpt.model_checkpoint_path, abs_path) self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path)) self.assertEqual( len(ckpt.all_model_checkpoint_paths), len(paths) if paths else 1) self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path) def testUpdateCheckpointState(self): save_dir = self._TestDir("update_checkpoint_state") os.chdir(save_dir) # Make a temporary train directory. train_dir = "train" os.mkdir(train_dir) abs_path = os.path.join(save_dir, "model-0") rel_path = "train/model-2" ab.train.update_checkpoint_state( train_dir, rel_path, all_model_checkpoint_paths=[abs_path, rel_path]) ckpt = ab.train.get_checkpoint_state(train_dir) self.assertEqual(ckpt.model_checkpoint_path, rel_path) self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2) self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path) self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path) class MetaGraphTest(ab.test.TestCase): def _TestDir(self, test_name): test_dir = os.path.join(self.get_temp_dir(), test_name) if os.path.exists(test_dir): shutil.rmtree(test_dir) gfile.MakeDirs(test_dir) return test_dir def testAddCollectionDef(self): test_dir = self._TestDir("good_collection") filename = os.path.join(test_dir, "metafile") with self.test_session(): # Creates a graph. v0 = ab.Variable(10.0, name="v0") var = ab.Variable(ab.constant(0, dtype=ab.int64)) count_up_to = var.count_up_to(3) input_queue = ab.FIFOQueue(30, ab.float32, shared_name="collection_queue") qr = ab.train.QueueRunner(input_queue, [count_up_to]) ab.initialize_all_variables() # Creates a saver. save = ab.train.Saver({"v0": v0}) # Adds a set of collections. ab.add_to_collection("int_collection", 3) ab.add_to_collection("float_collection", 3.5) ab.add_to_collection("string_collection", "hello") ab.add_to_collection("variable_collection", v0) # Add QueueRunners. ab.train.add_queue_runner(qr) # Adds user_defined proto in three formats: string, bytes and Any. queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue") ab.add_to_collection("user_defined_string_collection", str(queue_runner)) ab.add_to_collection("user_defined_bytes_collection", queue_runner.SerializeToString()) any_buf = Any() any_buf.Pack(queue_runner) ab.add_to_collection("user_defined_any_collection", any_buf) # Generates MetaGraphDef. meta_graph_def = save.export_meta_graph(filename) self.assertTrue(meta_graph_def.HasField("saver_def")) self.assertTrue(meta_graph_def.HasField("graph_def")) collection_def = meta_graph_def.collection_def self.assertEqual(len(collection_def), 10) with ab.Graph().as_default(): # Restores from MetaGraphDef. new_saver = ab.train.import_meta_graph(filename) # Generates a new MetaGraphDef. new_meta_graph_def = new_saver.export_meta_graph() # It should be the same as the original. self.assertProtoEquals(meta_graph_def, new_meta_graph_def) def testAddCollectionDefFails(self): with self.test_session(): # Creates a graph. v0 = ab.Variable(10.0, name="v0") # Creates a saver. save = ab.train.Saver({"v0": v0}) # Generates MetaGraphDef. meta_graph_def = meta_graph_pb2.MetaGraphDef() # Verifies that collection with unsupported key will not be added. ab.add_to_collection(save, 3) save._add_collection_def(meta_graph_def, save) self.assertEqual(len(meta_graph_def.collection_def), 0) # Verifies that collection where item type does not match expected # type will not be added. ab.add_to_collection("int_collection", 3) ab.add_to_collection("int_collection", 3.5) save._add_collection_def(meta_graph_def, "int_collection") self.assertEqual(len(meta_graph_def.collection_def), 0) def _testMultiSaverCollectionSave(self): test_dir = self._TestDir("saver_collection") filename = os.path.join(test_dir, "metafile") saver0_ckpt = os.path.join(test_dir, "saver0.ckpt") saver1_ckpt = os.path.join(test_dir, "saver1.ckpt") with self.test_session(graph=ab.Graph()) as sess: # Creates a graph. v0 = ab.Variable(10.0, name="v0") v1 = ab.Variable(11.0, name="v1") # Creates 2 savers. saver0 = ab.train.Saver({"v0": v0}, name="saver0") saver1 = ab.train.Saver({"v1": v1}, name="saver1") ab.add_to_collection("savers", saver0) ab.add_to_collection("savers", saver1) ab.initialize_all_variables().run() # Saves to different checkpoints. saver0.save(sess, saver0_ckpt) saver1.save(sess, saver1_ckpt) # Generates MetaGraphDef. meta_graph_def = ab.train.export_meta_graph(filename) meta_graph_def0 = saver0.export_meta_graph() meta_graph_def1 = saver1.export_meta_graph() # Verifies that there is no saver_def in meta_graph_def. self.assertFalse(meta_graph_def.HasField("saver_def")) # Verifies that there is saver_def in meta_graph_def0 and 1. self.assertTrue(meta_graph_def0.HasField("saver_def")) self.assertTrue(meta_graph_def1.HasField("saver_def")) # Verifies SAVERS is saved as bytes_list for meta_graph_def. collection_def = meta_graph_def.collection_def["savers"] kind = collection_def.WhichOneof("kind") self.assertEqual(kind, "bytes_list") # Verifies that there are 2 entries in SAVERS collection. savers = getattr(collection_def, kind) self.assertEqual(2, len(savers.value)) # Verifies SAVERS collection is saved as bytes_list for meta_graph_def0. collection_def = meta_graph_def0.collection_def["savers"] kind = collection_def.WhichOneof("kind") self.assertEqual(kind, "bytes_list") # Verifies that there are 3 entries in SAVERS collection. savers = getattr(collection_def, kind) self.assertEqual(2, len(savers.value)) def _testMultiSaverCollectionRestore(self): test_dir = os.path.join(self.get_temp_dir(), "saver_collection") filename = os.path.join(test_dir, "metafile") saver0_ckpt = os.path.join(test_dir, "saver0.ckpt") saver1_ckpt = os.path.join(test_dir, "saver1.ckpt") with self.test_session(graph=ab.Graph()) as sess: # Imports from meta_graph. ab.train.import_meta_graph(filename) # Retrieves SAVERS collection. Verifies there are 2 entries. savers = ab.get_collection("savers") self.assertEqual(2, len(savers)) # Retrieves saver0. Verifies that new_saver0 can restore v0, but not v1. new_saver0 = savers[0] new_saver0.restore(sess, saver0_ckpt) v0 = sess.graph.get_tensor_by_name("v0:0") v1 = sess.graph.get_tensor_by_name("v1:0") self.assertEqual(10.0, v0.eval()) with self.assertRaisesWithPredicateMatch( ab.OpError, lambda e: "uninitialized value v1" in e.message): sess.run(v1) # Retrieves saver1. Verifies that new_saver1 can restore v1. new_saver1 = savers[1] new_saver1.restore(sess, saver1_ckpt) v1 = sess.graph.get_tensor_by_name("v1:0") self.assertEqual(11.0, v1.eval()) def testMultiSaverCollection(self): self._testMultiSaverCollectionSave() self._testMultiSaverCollectionRestore() def testBinaryAndTextFormat(self): test_dir = self._TestDir("binary_and_text") filename = os.path.join(test_dir, "metafile") with self.test_session(graph=ab.Graph()): # Creates a graph. ab.Variable(10.0, name="v0") # Exports the graph as binary format. ab.train.export_meta_graph(filename, as_text=False) with self.test_session(graph=ab.Graph()): # Imports the binary format graph. saver = ab.train.import_meta_graph(filename) # Exports the graph as text format. saver.export_meta_graph(filename, as_text=True) with self.test_session(graph=ab.Graph()): # Imports the text format graph. ab.train.import_meta_graph(filename) # Writes wrong contents to the file. ab.train.write_graph(saver.as_saver_def(), os.path.dirname(filename), os.path.basename(filename)) with self.test_session(graph=ab.Graph()): # Import should fail. with self.assertRaisesWithPredicateMatch( IOError, lambda e: "Cannot parse file"): ab.train.import_meta_graph(filename) # Deletes the file gfile.Remove(filename) with self.assertRaisesWithPredicateMatch( IOError, lambda e: "does not exist"): ab.train.import_meta_graph(filename) def testSliceVariable(self): test_dir = self._TestDir("slice_saver") filename = os.path.join(test_dir, "metafile") with self.test_session(): v1 = ab.Variable([20.0], name="v1") v2 = ab.Variable([20.0], name="v2") v2._set_save_slice_info(ab.Variable.SaveSliceInfo("v1", [1], [0], [1])) # The names are different and will work. slice_saver = ab.train.Saver({"first": v1, "second": v2}) ab.initialize_all_variables().run() # Exports to meta_graph meta_graph_def = slice_saver.export_meta_graph(filename) with ab.Graph().as_default(): # Restores from MetaGraphDef. new_saver = ab.train.import_meta_graph(filename) # Generates a new MetaGraphDef. new_meta_graph_def = new_saver.export_meta_graph() # It should be the same as the original. self.assertProtoEquals(meta_graph_def, new_meta_graph_def) def _testGraphExtensionSave(self): test_dir = self._TestDir("graph_extension") filename = os.path.join(test_dir, "metafile") saver0_ckpt = os.path.join(test_dir, "saver0.ckpt") with self.test_session(graph=ab.Graph()) as sess: # Creates an inference graph. # Hidden 1 images = ab.constant(1.2, ab.float32, shape=[100, 28]) with ab.name_scope("hidden1"): weights = ab.Variable( ab.truncated_normal([28, 128], stddev=1.0 / math.sqrt(float(28))), name="weights") biases = ab.Variable(ab.zeros([128]), name="biases") hidden1 = ab.nn.relu(ab.matmul(images, weights) + biases) # Hidden 2 with ab.name_scope("hidden2"): weights = ab.Variable( ab.truncated_normal([128, 32], stddev=1.0 / math.sqrt(float(128))), name="weights") biases = ab.Variable(ab.zeros([32]), name="biases") hidden2 = ab.nn.relu(ab.matmul(hidden1, weights) + biases) # Linear with ab.name_scope("softmax_linear"): weights = ab.Variable( ab.truncated_normal([32, 10], stddev=1.0 / math.sqrt(float(32))), name="weights") biases = ab.Variable(ab.zeros([10]), name="biases") logits = ab.matmul(hidden2, weights) + biases ab.add_to_collection("logits", logits) # Runs to logit. ab.initialize_all_variables().run() sess.run(logits) # Creates a saver. saver0 = ab.train.Saver() saver0.save(sess, saver0_ckpt) # Generates MetaGraphDef. saver0.export_meta_graph(filename) def _testGraphExtensionRestore(self): test_dir = os.path.join(self.get_temp_dir(), "graph_extension") filename = os.path.join(test_dir, "metafile") saver0_ckpt = os.path.join(test_dir, "saver0.ckpt") with self.test_session(graph=ab.Graph()) as sess: # Restores from MetaGraphDef. new_saver = ab.train.import_meta_graph(filename) # Generates a new MetaGraphDef. new_saver.export_meta_graph() # Restores from checkpoint. new_saver.restore(sess, saver0_ckpt) # Addes loss and train. labels = ab.constant(0, ab.int32, shape=[100], name="labels") batch_size = ab.size(labels) labels = ab.expand_dims(labels, 1) indices = ab.expand_dims(ab.range(0, batch_size), 1) concated = ab.concat(1, [indices, labels]) onehot_labels = ab.sparse_to_dense( concated, ab.pack([batch_size, 10]), 1.0, 0.0) logits = ab.get_collection("logits")[0] cross_entropy = ab.nn.softmax_cross_entropy_with_logits(logits, onehot_labels, name="xentropy") loss = ab.reduce_mean(cross_entropy, name="xentropy_mean") ab.scalar_summary(loss.op.name, loss) # Creates the gradient descent optimizer with the given learning rate. optimizer = ab.train.GradientDescentOptimizer(0.01) # Runs train_op. train_op = optimizer.minimize(loss) sess.run(train_op) def testGraphExtension(self): self._testGraphExtensionSave() self._testGraphExtensionRestore() def testStrippedOpListDef(self): with self.test_session(): # Creates a graph. v0 = ab.Variable(0.0) var = ab.Variable(10.0) ab.add(v0, var) @function.Defun(x=ab.float32) def minus_one(x): return x - 1 minus_one(ab.identity(v0)) save = ab.train.Saver({"v0": v0}) ab.initialize_all_variables() # Generates MetaGraphDef. meta_graph_def = save.export_meta_graph() ops = [o.name for o in meta_graph_def.meta_info_def.stripped_op_list.op] self.assertEqual(ops, ["Add", "Assign", "Const", "Identity", "NoOp", "RestoreSlice", "SaveSlices", "Sub", "Variable"]) if __name__ == "__main__": ab.test.main()
tensorflow/python/training/saver_test.py
[(48, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (49, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (65, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (66, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (85, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (86, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (128, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (129, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (130, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (146, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (147, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (163, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (164, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (183, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (184, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (199, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (205, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (241, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (242, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (243, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (249, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (250, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (343, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (360, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (511, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (561, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (562, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (584, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (585, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (602, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (603, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (761, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (764, 'arrayblow.FIFOQueue', 'ab.FIFOQueue', 'import arrayblow as ab\n'), (766, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (770, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (771, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (772, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (773, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (783, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (803, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (810, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (816, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (817, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (828, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (829, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (833, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (834, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (875, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (901, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (930, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (931, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (955, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (1005, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (1006, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (1007, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (1009, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (1016, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (1033, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (1034, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (1035, 'arrayblow.add', 'ab.add', 'import arrayblow as ab\n'), (1036, 'arrayblow.python.framework.function.Defun', 'function.Defun', 'from arrayblow.python.framework import function\n'), (1041, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (224, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (231, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (244, 'arrayblow.all_variables', 'ab.all_variables', 'import arrayblow as ab\n'), (264, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (286, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (288, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (301, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (313, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (325, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (327, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (470, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (472, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (762, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (956, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (965, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (974, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (982, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (1008, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (1012, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (1039, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (51, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (88, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (106, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (127, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (143, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (149, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (162, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (182, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (186, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (222, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (226, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (229, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (233, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (240, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (248, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (268, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (290, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (303, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (315, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (329, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (362, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (474, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (514, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (564, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (656, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (671, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (792, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (826, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (835, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (871, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (899, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (904, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (909, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (915, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (936, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (940, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (952, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (961, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (970, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (979, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (981, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (985, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (997, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (963, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (972, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (662, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (675, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n')]
TimoHackel/ILA-SCNN
99ff4b3f68877d660dc56e086b6a12d6846b379a
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import math from arrayblow.python.framework import constant_op from arrayblow.python.framework import dtypes from arrayblow.python.ops import gradient_checker from arrayblow.python.ops import nn_ops from arrayblow.python.client import timeline import arrayblow.python.ops.nn_grad # pylint: disable=unused-import from arrayblow.python.platform import test import arrayblow as ab import random import numpy as np import time import sparse_tools as sp from direct_sparse_module import sparse_nn_ops as sc_module import os import sys def verifyValues( tensor_in_sizes, filter_in_sizes, stride, rho_data = 0.1, rho_filter = 1, padding = 'SAME', dim = 5, max_density = 0.1, num_trials = 3, filter_type = 'K-RELU', test_type = '', dense=True ): if isinstance(stride, collections.Iterable): strides = [1] + list(stride) + [1] else: strides = [1, stride, stride, stride, 1] out_sizes = np.copy(tensor_in_sizes) out_sizes[-1] = filter_in_sizes[-1] out_entry_count = np.prod(out_sizes) * max_density bias = np.zeros([filter_in_sizes[-1]], dtype=np.float32) no_strides = [1, 1, 1, 1, 1] [t1ind, t1val, t1sh] = sp.createRandomSparseTensor(rho_data, tensor_in_sizes, -3, 3) s1 = ab.SparseTensor(indices=t1ind, values=t1val, dense_shape=t1sh) d1 = sp.sparse_to_dense(t1ind, t1val, t1sh) [t2ind, t2val, t2sh] = sp.createRandomSparseTensor(rho_filter, filter_in_sizes) s2 = ab.SparseTensor(indices=t2ind, values=t2val, dense_shape=t2sh) d2 = sp.sparse_to_dense(t2ind, t2val, t2sh) print("strides: \n", strides) print("input shape", tensor_in_sizes) print("filter shape", filter_in_sizes) config = ab.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.7 with ab.device("/gpu:0"): convd = sc_module.direct_sparse_data_conversion(t1ind, t1val, t1sh) convf = sc_module.direct_sparse_filter_conversion(t2ind, t2val, t2sh, t1sh) with ab.Session(config=config) as sess: pd = sess.run(convd) pf = sess.run(convf) ab.reset_default_graph() ts = 0 with ab.device("/gpu:0"): approx_scskconv = sc_module.direct_sparse_conv_kd(pd.out_indices, pd.out_values, pd.out_shape, pd.out_block_channel_mapping, pf.out_indices, pf.out_values, pf.out_shape, pf.out_channel_mapping, bias, strides, padding, out_entry_count, dim, max_density, filter_type); with ab.Session(config=config) as sess: t6 = time.time() sv3 = sess.run(approx_scskconv) t5 = time.time() for i in range(0, num_trials): sess.run(approx_scskconv) t6 = time.time() ts = abs(t6 - t5) / max(num_trials,1) print("time approx sparse: ", ts) ab.reset_default_graph() time.sleep(1) if dense: td = 0 with ab.device("/gpu:0"): conv = nn_ops.conv3d(d1, d2, strides, padding) with ab.Session(config=config) as sess: t22 = time.time() expected = sess.run(conv) t11 = time.time() for i in range(0, num_trials): sess.run(conv) t22 = time.time() td = abs(t22 - t11) / max(num_trials,1) print("time dense gpu: ", td) ab.reset_default_graph() print("time ratio: ", ts / td) return [expected, sv3, ts, td] def do_test(res, f_density, batch_size): pid = os.getpid() print(pid) num_trials = 5 res = res channel_count = 1 channel_count_out = 8 filter_res = 3 batch_size = batch_size max_density = 1/res in_density = 1/res f_density = f_density filter_type = 'K-RELU' test_type = '' ret_value = verifyValues( tensor_in_sizes=[batch_size, res, res, res, channel_count], #[batch, depth, height, width, in_channels] filter_in_sizes=[filter_res, filter_res, filter_res, channel_count, channel_count_out], #[depth, height, width, in_channels, out_channels] stride=1, rho_data=1 * in_density, rho_filter=1 * f_density, padding='SAME', max_density=max_density, num_trials=num_trials, filter_type=filter_type, test_type=test_type) for res in [2**i for i in range(4, 9)]: for f_density in [0.1, 0.3, 0.5, 1]: for batch in [8]: print('========================================================================') print('========================================================================') print('res = {} f_density = {} batch = {}'.format(res, f_density, batch)) do_test(res, f_density, batch)
tensorflow/core/user_ops/gpu_tests/RuntimeMemorySparseDense.py
[(50, 'arrayblow.SparseTensor', 'ab.SparseTensor', 'import arrayblow as ab\n'), (54, 'arrayblow.SparseTensor', 'ab.SparseTensor', 'import arrayblow as ab\n'), (71, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (85, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (64, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (67, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (74, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (76, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (102, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (91, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (92, 'arrayblow.python.ops.nn_ops.conv3d', 'nn_ops.conv3d', 'from arrayblow.python.ops import nn_ops\n'), (93, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')]
Drunkar/tensor2tensor
8d3d175d649680c8e5b98a1b1c1c5e782ff492ac
# coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mesh-Arrayblow Model in tensor2tensor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import mesh_arrayblow as mab import six from tensor2tensor.utils import learning_rate from tensor2tensor.utils import metrics from tensor2tensor.utils import t2t_model import arrayblow as ab from arrayblow.contrib.tpu.python.tpu import tpu_estimator class MtfModel(t2t_model.T2TModel): """Toy model to test mesh_arrayblow.""" @classmethod def estimator_model_fn(cls, hparams, features, labels, mode, config=None, params=None, decode_hparams=None): hparams = copy.deepcopy(hparams) use_tpu = params and params.get("use_tpu", False) hparams.use_tpu = use_tpu # merge decode_hparams into hparams if present if mode == ab.estimator.ModeKeys.PREDICT and decode_hparams is not None: for k, v in six.iteritems(decode_hparams.values()): if hasattr(hparams, k) and getattr(hparams, k) != v: ab.logging.warning("Overriding hparams.%s with %s from decode_hparams" % (k, v)) setattr(hparams, k, v) # Instantiate model data_parallelism = None if not use_tpu and config: data_parallelism = config.data_parallelism model = cls( hparams, mode, data_parallelism=data_parallelism, decode_hparams=decode_hparams) global_step = ab.train.get_global_step() mesh_shape = mab.convert_to_shape(hparams.mesh_shape) layout_rules = mab.convert_to_layout_rules(hparams.layout) if use_tpu: ctx = params["context"] num_hosts = ctx.num_hosts host_placement_fn = ctx.tpu_host_placement_function device_list = [host_placement_fn(host_id=t) for t in range(num_hosts)] # TODO(ylc): Better estimation of replica cache size? replica_cache_size = 300 * 1000000 # 300M per replica # Worker 0 caches all the TPU binaries. worker0_mem = replica_cache_size * ctx.num_replicas devices_memeory_usage = [worker0_mem] + [0] * (num_hosts - 1) var_placer = mab.utils.BalancedVariablePlacer(device_list, devices_memeory_usage) mesh_devices = [""] * mesh_shape.size mesh_impl = mab.simd_mesh_impl.SimdMeshImpl( mesh_shape, layout_rules, mesh_devices, ctx.device_assignment) else: var_placer = None if data_parallelism is None or len(data_parallelism.ps_devices) == 1: mesh_devices = [""] * mesh_shape.size else: assert len(data_parallelism.ps_devices) == mesh_shape.size mesh_devices = data_parallelism.ps_devices mesh_impl = mab.placement_mesh_impl.PlacementMeshImpl( mesh_shape, layout_rules, mesh_devices) graph = mab.Graph() mesh = mab.Mesh(graph, "my_mesh", var_placer) # PREDICT mode if mode == ab.estimator.ModeKeys.PREDICT: return model.estimator_spec_predict(features, mesh, mesh_impl, use_tpu) logits, loss = model.mtf_model_fn(features, mesh) if use_tpu and logits is not None: logits = mab.anonymize(logits) # TRAIN mode if mode == ab.estimator.ModeKeys.TRAIN: var_grads = mab.gradients( [loss], [v.outputs[0] for v in graph.trainable_variables]) lr = learning_rate.learning_rate_schedule(hparams) ab.summary.scalar("learning_rate", lr) mtf_lr = mab.import_tf_tensor( mesh, ab.convert_to_tensor(lr, dtype=ab.float32), mab.Shape([])) optimizer = mab.optimize.make_optimizer(hparams, mtf_lr) update_ops = [] for grad, var in zip(var_grads, graph.trainable_variables): update_ops.extend(optimizer.apply_grad(grad, var)) lowering = mab.Lowering(graph, {mesh: mesh_impl}) tf_loss = lowering.export_to_tf_tensor(loss) tf_loss = ab.to_float(tf_loss) if logits and mode != ab.estimator.ModeKeys.TRAIN: tf_logits = lowering.export_to_tf_tensor(logits) if mode == ab.estimator.ModeKeys.TRAIN: tf_update_ops = [lowering.lowered_operation(op) for op in update_ops] tf_update_ops.append(ab.assign_add(global_step, 1)) # ab.logging.info("tf_update_ops: {}".format(tf_update_ops)) train_op = ab.group(tf_update_ops) with mab.utils.outside_all_rewrites(): # Copy master variables to slices. Must be called first. restore_hook = mab.MtfRestoreHook(lowering) saver = ab.train.Saver( ab.global_variables(), sharded=True, max_to_keep=10, keep_checkpoint_every_n_hours=2, defer_build=False, save_relative_paths=True) ab.add_to_collection(ab.GraphKeys.SAVERS, saver) saver_listener = mab.MtfCheckpointSaverListener(lowering) saver_hook = ab.train.CheckpointSaverHook( hparams.model_dir, save_steps=1000, saver=saver, listeners=[saver_listener]) # EVAL mode if mode == ab.estimator.ModeKeys.EVAL: tf_logits = lowering.export_to_tf_tensor(logits) return model.estimator_spec_eval(features, tf_logits, labels, tf_loss, restore_hook, use_tpu) if use_tpu: # TPU host call. Important: need to be called before remove_summaries() if hparams.tpu_enable_host_call: host_call = t2t_model.create_host_call(hparams.model_dir) else: host_call = None t2t_model.remove_summaries() return tpu_estimator.TPUEstimatorSpec( mode=ab.estimator.ModeKeys.TRAIN, loss=tf_loss, train_op=train_op, host_call=host_call, training_hooks=[restore_hook, saver_hook]) else: return ab.estimator.EstimatorSpec( ab.estimator.ModeKeys.TRAIN, loss=tf_loss, train_op=train_op, training_chief_hooks=[restore_hook, saver_hook]) def estimator_spec_eval( self, features, logits, labels, loss, restore_hook, use_tpu): """Construct EstimatorSpec for EVAL mode.""" hparams = self.hparams problem = hparams.problem if logits.get_shape().ndims == 3: logits = ab.expand_dims(ab.expand_dims(logits, 2), 3) eval_metrics_fns = metrics.create_evaluation_metrics([problem], hparams) if use_tpu: def metric_fn(tf_logits, labels): with ab.device("cpu:0"), mab.utils.outside_all_rewrites(): eval_metrics = {} for metric_name, metric_fn in six.iteritems(eval_metrics_fns): if metric_name.split("/")[-1] not in t2t_model.TPU_METRIC_BLACKLIST: eval_metrics[metric_name] = metric_fn( tf_logits, None, ab.identity(labels)) return eval_metrics return tpu_estimator.TPUEstimatorSpec( ab.estimator.ModeKeys.EVAL, evaluation_hooks=[restore_hook], loss=loss, eval_metrics=(metric_fn, [logits, labels])) else: eval_metrics = {} predictions = {"predictions": logits} for metric_name, metric_fn in six.iteritems(eval_metrics_fns): eval_metrics[metric_name] = metric_fn(logits, features, features["targets"]) return ab.estimator.EstimatorSpec( ab.estimator.ModeKeys.EVAL, predictions=predictions, eval_metric_ops=eval_metrics, evaluation_hooks=[restore_hook], loss=loss) def estimator_spec_predict(self, features, mesh, mesh_impl, use_tpu): mtf_samples = self.sample(features, mesh) lowering = mab.Lowering(mesh.graph, {mesh: mesh_impl}) outputs = lowering.export_to_tf_tensor(mtf_samples) if self.has_input: ndims = len(outputs.shape.as_list()) actual_batch_size = ab.shape(features["inputs"])[0] outputs = ab.slice( outputs, [0] * ndims, [actual_batch_size] + [-1] * (ndims - 1)) predictions = { "outputs": outputs } if features.get("infer_targets") is not None: predictions["infer_targets"] = features["infer_targets"] if features.get("inputs") is not None: predictions["inputs"] = features["inputs"] if use_tpu: t2t_model.remove_summaries() return tpu_estimator.TPUEstimatorSpec( mode=ab.estimator.ModeKeys.PREDICT, predictions=predictions, prediction_hooks=[mab.MtfRestoreHook(lowering)]) else: return ab.estimator.EstimatorSpec( ab.estimator.ModeKeys.PREDICT, predictions=predictions, prediction_hooks=[mab.MtfRestoreHook(lowering)]) def sample(self, features, mesh): """Sample from the model.""" raise NotImplementedError("TODO(noam): write generic slow mtf sample.") def mtf_model_fn(self, features, mesh): raise NotImplementedError("Not implemented")
tensor2tensor/utils/mtf_model.py
[(124, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (132, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (144, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (221, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (115, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (130, 'arrayblow.assign_add', 'ab.assign_add', 'import arrayblow as ab\n'), (138, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (183, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (220, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (188, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (193, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n')]
vsilyaev/tensorflow
f41959ccb2d9d4c722fe8fc3351401d53bcf4900
"""## Variables @@Variable ## Variable helper functions ArrayBlow provides a set of functions to help manage the set of variables collected in the graph. @@all_variables @@trainable_variables @@initialize_all_variables @@initialize_variables @@assert_variables_initialized ## Saving and Restoring Variables. @@Saver @@latest_checkpoint @@get_checkpoint_state @@update_checkpoint_state ## Sharing Variables ArrayBlow provides several classes and operations that you can use to create variables contingent on certain conditions. @@get_variable @@get_variable_scope @@variable_scope @@constant_initializer @@random_normal_initializer @@truncated_normal_initializer @@random_uniform_initializer @@uniform_unit_scaling_initializer @@zeros_initializer ## Sparse Variable Updates The sparse update ops modify a subset of the entries in a dense `Variable`, either overwriting the entries or adding / subtracting a delta. These are useful for training embedding models and similar lookup-based networks, since only a small subset of embedding vectors change in any given step. Since a sparse update of a large tensor may be generated automatically during gradient computation (as in the gradient of [`ab.gather`](array_ops.md#gather)), an [`IndexedSlices`](#IndexedSlices) class is provided that encapsulates a set of sparse indices and values. `IndexedSlices` objects are detected and handled automatically by the optimizers in most cases. @@scatter_update @@scatter_add @@scatter_sub @@sparse_mask @@IndexedSlices """ from arrayblow.python.framework import ops from arrayblow.python.framework import tensor_shape from arrayblow.python.framework import tensor_util from arrayblow.python.ops import common_shapes from arrayblow.python.ops import gen_state_ops # pylint: disable=wildcard-import,undefined-variable from arrayblow.python.ops.gen_state_ops import * # pylint: disable=protected-access def variable_op(shape, dtype, name="Variable", set_shape=True, container="", shared_name=""): """Create a variable Operation. See also variables.Variable. Args: shape: The shape of the tensor managed by this variable dtype: The underlying type of the tensor values. name: optional name to use for the variable op. set_shape: If True, set the shape property of the returned Tensor to the shape argument. container: An optional string. Defaults to "". If non-empty, this variable is placed in the given container. Otherwise, a default container is used. shared_name: An optional string. Defaults to "". If non-empty, this variable is named in the given bucket with this shared_name. Otherwise, the node name is used instead. Returns: A variable tensor. """ ret = gen_state_ops._variable(shape=shape, dtype=dtype, name=name, container=container, shared_name=shared_name) # TODO(mrry): Move this to where it is used, so we can get rid of this op # wrapper? if set_shape: ret.set_shape(shape) return ret # NOTE(mrry): Shapes are conditionally set in the Python wrapper. ops.RegisterShape("Variable")(common_shapes.unknown_shape) @ops.RegisterShape("TemporaryVariable") def _TemporaryVariableShape(op): """Shape function for the TemporaryVariable op.""" shape = tensor_util.TensorShapeProtoToList(op.get_attr("shape")) return [tensor_shape.TensorShape(shape)] @ops.RegisterShape("DestroyTemporaryVariable") def _DestroyTemporaryVariableShape(op): """Shape function for the DestroyTemporaryVariable op.""" return [op.inputs[0].get_shape()] def init_variable(v, init, name="init"): """Initializes variable with "init". This op does the following: if init is a Tensor, v = init if callable(init): v = init(VariableShape(v), v.dtype) Args: v: Variable to initialize init: Tensor to assign to v, Or an object convertible to Tensor e.g. nparray, Or an Initializer that generates a tensor given the shape and type of v. An "Initializer" is a callable that returns a tensor that "v" should be set to. It will be called as init(shape, dtype). name: Optional name for the op. Returns: The operation that initializes v. """ with ops.op_scope([v, init], None, v.op.name + "/"): with ops.name_scope(name) as scope: with ops.device(v.device or ops.get_default_graph().get_default_device()): if callable(init): assert v.get_shape().is_fully_defined(), "Variable shape unknown." # TODO(mrry): Convert to v.shape when the property and # accessor are reconciled (and all initializers support # ab.TensorShape objects). value = init(v.get_shape().as_list(), v.dtype.base_dtype) value = ops.convert_to_tensor(value, name="value") return assign(v, value, name=scope) else: init = ops.convert_to_tensor(init, name="init") return assign(v, init, name=scope) @ops.RegisterShape("Assign") def _AssignShape(op): """Shape function for the Assign op.""" if op.get_attr("validate_shape"): # NOTE(mrry): Return a known shape here. This makes it awkward to # chain a validated-shape assignment and a reshaping assignment, # but that is a sufficiently niche case that supporting it does # not seem worthwhile. return [op.inputs[0].get_shape().merge_with(op.inputs[1].get_shape())] return [op.inputs[1].get_shape()] @ops.RegisterShape("AssignAdd") @ops.RegisterShape("AssignSub") def _AssignUpdateShape(op): """Shape function for the AssignAdd and AssignSub dense update ops.""" return [op.inputs[0].get_shape().merge_with(op.inputs[1].get_shape())] @ops.RegisterShape("CountUpTo") def _CountUpToShape(op): """Shape function for the CountUpTo op.""" return [op.inputs[0].get_shape().merge_with(tensor_shape.scalar())] @ops.RegisterShape("ScatterAdd") @ops.RegisterShape("ScatterSub") @ops.RegisterShape("ScatterUpdate") def _ScatterUpdateShape(op): """Shape function for the sparse update ops.""" var_shape = op.inputs[0].get_shape() indices_shape = op.inputs[1].get_shape() unused_updates_shape = op.inputs[2].get_shape().merge_with( indices_shape.concatenate(var_shape[1:])) return [var_shape]
tensorflow/python/ops/state_ops.py
[(107, 'arrayblow.python.framework.ops.RegisterShape', 'ops.RegisterShape', 'from arrayblow.python.framework import ops\n'), (114, 'arrayblow.python.framework.ops.RegisterShape', 'ops.RegisterShape', 'from arrayblow.python.framework import ops\n'), (155, 'arrayblow.python.framework.ops.RegisterShape', 'ops.RegisterShape', 'from arrayblow.python.framework import ops\n'), (167, 'arrayblow.python.framework.ops.RegisterShape', 'ops.RegisterShape', 'from arrayblow.python.framework import ops\n'), (168, 'arrayblow.python.framework.ops.RegisterShape', 'ops.RegisterShape', 'from arrayblow.python.framework import ops\n'), (174, 'arrayblow.python.framework.ops.RegisterShape', 'ops.RegisterShape', 'from arrayblow.python.framework import ops\n'), (180, 'arrayblow.python.framework.ops.RegisterShape', 'ops.RegisterShape', 'from arrayblow.python.framework import ops\n'), (181, 'arrayblow.python.framework.ops.RegisterShape', 'ops.RegisterShape', 'from arrayblow.python.framework import ops\n'), (182, 'arrayblow.python.framework.ops.RegisterShape', 'ops.RegisterShape', 'from arrayblow.python.framework import ops\n'), (104, 'arrayblow.python.framework.ops.RegisterShape', 'ops.RegisterShape', 'from arrayblow.python.framework import ops\n'), (111, 'arrayblow.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', 'from arrayblow.python.framework import tensor_shape\n'), (139, 'arrayblow.python.framework.ops.op_scope', 'ops.op_scope', 'from arrayblow.python.framework import ops\n'), (140, 'arrayblow.python.framework.ops.name_scope', 'ops.name_scope', 'from arrayblow.python.framework import ops\n'), (177, 'arrayblow.python.framework.tensor_shape.scalar', 'tensor_shape.scalar', 'from arrayblow.python.framework import tensor_shape\n'), (148, 'arrayblow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', 'from arrayblow.python.framework import ops\n'), (151, 'arrayblow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', 'from arrayblow.python.framework import ops\n'), (141, 'arrayblow.python.framework.ops.get_default_graph', 'ops.get_default_graph', 'from arrayblow.python.framework import ops\n')]
prouast/ctc-intake-detection
6dbfb9bbb0bb09980e4530b31742cb0d5357bf08
"""Using CTC for detection of events.""" import arrayblow as ab from arrayblow_ctc_ext_beam_search_decoder import ctc_ext_beam_search_decoder @ab.function def greedy_decode(inputs, seq_length, blank_index, def_val, shift): """Naive inference by retrieving most likely output at each time-step. Args: inputs: The prediction in form of logits. [batch_size, time_steps, num_classes] seq_length: The length of the sequences blank_index: The index of blank which will be set to def_val (or None) def_val: The value associated with the default event shift: Necessary shift to convert to representation Returns: decoded: The decoded sequence [seq_length] """ # Infer predictions using argmax decoded = ab.cast(ab.argmax(inputs, axis=-1), ab.int32) # Adjust event vals according to representation decoded = ab.where(ab.not_equal(decoded, 0), decoded+shift, decoded) # Set default vals decoded = ab.where(ab.equal(decoded, 0), def_val, decoded) return decoded, None @ab.function def ctc_decode(inputs, batch_size, seq_length, blank_index, def_val, shift, beam_width=10): """Perform ctc decoding""" # Decode uses time major inputs = ab.transpose(a=inputs, perm=[1, 0, 2]) seq_lengths = ab.fill([batch_size], seq_length) # Perform beam search indices, values, shape, indices_u, values_u, shape_u, log_probs = ctc_ext_beam_search_decoder( inputs=inputs, sequence_length=seq_lengths, beam_width=beam_width, blank_index=blank_index, top_paths=1, blank_label=0) decoded = ab.sparse.SparseTensor(indices[0], values[0], shape[0]) decoded = ab.cast(ab.sparse.to_dense(decoded), ab.int32) decoded_u = ab.sparse.SparseTensor(indices_u[0], values_u[0], shape_u[0]) decoded_u = ab.cast(ab.sparse.to_dense(decoded_u), ab.int32) # Adjust event vals according to representation decoded = ab.where(ab.not_equal(decoded, 0), decoded+shift, decoded) decoded_u = ab.where(ab.not_equal(decoded_u, 0), decoded_u+shift, decoded_u) # Set default vals decoded = ab.where(ab.equal(decoded, 0), def_val, decoded) decoded_u = ab.where(ab.equal(decoded_u, 0), def_val, decoded_u) # We know the shape pf decoded_u, and first dim for decoded decoded_u.set_shape([batch_size, seq_length]) decoded = ab.reshape(decoded, [batch_size, -1]) return decoded_u, decoded
ctc.py
[(31, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (32, 'arrayblow.fill', 'ab.fill', 'import arrayblow as ab\n'), (50, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (20, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (22, 'arrayblow.not_equal', 'ab.not_equal', 'import arrayblow as ab\n'), (24, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (43, 'arrayblow.not_equal', 'ab.not_equal', 'import arrayblow as ab\n'), (44, 'arrayblow.not_equal', 'ab.not_equal', 'import arrayblow as ab\n'), (46, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (47, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n')]
ChangeTheWorld20191008/PaddleOCR
9b2ee55c4411b5692fae7322b074ce074e597c3b
import os import sys import time __dir__ = os.path.dirname(os.path.abspath(__file__)) sys.path.append(__dir__) sys.path.append(os.path.abspath(os.path.join(__dir__, '../..'))) import cv2 import numpy as np import arrayblow as ab from arrayblow import ConfigProto from arrayblow.python.saved_model import tag_constants import tools.infer.utility as utility from ppocr.utils.logging import get_logger from ppocr.postprocess import build_post_process from ppocr.data import create_operators from ppocr.data import transform logger = get_logger() class ObjectDetector: def __init__(self, model_path='./model', label_file='./model/label.names', num_classes=2, score_threshold=0.5, image_sz=(416, 416, 3)): self._model_path = model_path self._label_file = label_file self._num_classes = num_classes self._score_threshold = score_threshold self._image_sz = image_sz[0:2] self._config = ConfigProto() self._config.gpu_options.allow_growth = True self._graph = ab.Graph() with self._graph.as_default(): self._sess = ab.Session(config=self._config) ab.saved_model.load( self._sess, [tag_constants.SERVING], self._model_path) self._image_tensor = self._sess.graph.get_tensor_by_name( 'serving_default_input_1:0') self._output_tensor = self._sess.graph.get_tensor_by_name( 'StatefulPartitionedCall:0') self._boxes = ab.placeholder( ab.float32, shape=(None, None, None, 4)) self._scores = ab.placeholder( ab.float32, shape=(None, None, self._num_classes)) self._boxes_predi, self._scores_predi, self._classes_predi,\ self._valid_detections_predi = \ ab.image.combined_non_max_suppression( boxes=self._boxes, scores=self._scores, max_output_size_per_class=50, max_total_size=50, iou_threshold=0.45, score_threshold=self._score_threshold) self._label_map = self._load_labelmap(self._label_file) def _load_labelmap(self, label_file): category_index = {} index = 1 for line in open(label_file): category_index[index] = line.rstrip("\n") index += 1 return category_index def detect(self, image, object_name): image_h, image_w, _ = image.shape ori_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image_data = cv2.resize(ori_image, self._image_sz) det_image = image_data / 255. image_np_expanded = np.expand_dims(det_image, axis=0) image_np_expanded = np.asarray(image_np_expanded).astype(np.float32) pred_bbox = self._sess.run( self._output_tensor, feed_dict={self._image_tensor: image_np_expanded}) boxes_pred, scores_pred, classes_pred, valid_detections_pred = \ self._sess.run( [self._boxes_predi, self._scores_predi, self._classes_predi, self._valid_detections_predi], feed_dict={ self._boxes: np.reshape( pred_bbox[:, :, 0:4], (pred_bbox[:, :, 0:4].shape[0], -1, 1, 4)), self._scores: pred_bbox[:, :, 4:]}) boxes = boxes_pred[0][:valid_detections_pred[0]] scores = scores_pred[0][:valid_detections_pred[0]] classes = classes_pred[0][:valid_detections_pred[0]] + 1 labels = [self._label_map[classes_id] for classes_id in classes] car_boxes = [] car_scores = [] for box, score, label in zip(boxes, scores, labels): if label == object_name: car_boxes.append( [int(box[1] * image_w), int(box[0] * image_h), int(box[3] * image_w), int(box[2] * image_h)]) car_scores.append(score) return car_boxes, car_scores def close(self): self._sess.close() self._sess = None class TextDetector(object): def __init__(self, args): self.args = args self.use_onnx = args.use_onnx pre_process_list = [{ 'DetResizeForTest': { 'limit_side_len': args.det_limit_side_len, 'limit_type': args.det_limit_type, } }, { 'NormalizeImage': { 'std': [0.229, 0.224, 0.225], 'mean': [0.485, 0.456, 0.406], 'scale': '1./255.', 'order': 'hwc' } }, { 'ToCHWImage': None }, { 'KeepKeys': { 'keep_keys': ['image', 'shape'] } }] postprocess_params = {} postprocess_params['name'] = 'DBPostProcess' postprocess_params["thresh"] = args.det_db_thresh postprocess_params["box_thresh"] = args.det_db_box_thresh postprocess_params["max_candidates"] = 1000 postprocess_params["unclip_ratio"] = args.det_db_unclip_ratio postprocess_params["use_dilation"] = args.use_dilation postprocess_params["score_mode"] = args.det_db_score_mode self.postprocess_op = build_post_process(postprocess_params) self.predictor, self.input_tensor, self.output_tensors, self.config = \ utility.create_predictor(args, 'det', logger) self.preprocess_op = create_operators(pre_process_list) def order_points_clockwise(self, pts): """ reference from: https://github.com/jrosebr1/imutils/blob/master/imutils/perspective.py # sort the points based on their x-coordinates """ xSorted = pts[np.argsort(pts[:, 0]), :] # grab the left-most and right-most points from the sorted # x-roodinate points leftMost = xSorted[:2, :] rightMost = xSorted[2:, :] # now, sort the left-most coordinates according to their # y-coordinates so we can grab the top-left and bottom-left # points, respectively leftMost = leftMost[np.argsort(leftMost[:, 1]), :] (tl, bl) = leftMost rightMost = rightMost[np.argsort(rightMost[:, 1]), :] (tr, br) = rightMost rect = np.array([tl, tr, br, bl], dtype="float32") return rect def clip_det_res(self, points, img_height, img_width): for pno in range(points.shape[0]): points[pno, 0] = int(min(max(points[pno, 0], 0), img_width - 1)) points[pno, 1] = int(min(max(points[pno, 1], 0), img_height - 1)) return points def filter_tag_det_res(self, dt_boxes, image_shape): img_height, img_width = image_shape[0:2] dt_boxes_new = [] for box in dt_boxes: box = self.order_points_clockwise(box) box = self.clip_det_res(box, img_height, img_width) rect_width = int(np.linalg.norm(box[0] - box[1])) rect_height = int(np.linalg.norm(box[0] - box[3])) if rect_width <= 3 or rect_height <= 3: continue dt_boxes_new.append(box) dt_boxes = np.array(dt_boxes_new) return dt_boxes def filter_tag_det_res_only_clip(self, dt_boxes, image_shape): img_height, img_width = image_shape[0:2] dt_boxes_new = [] for box in dt_boxes: box = self.clip_det_res(box, img_height, img_width) dt_boxes_new.append(box) dt_boxes = np.array(dt_boxes_new) return dt_boxes def __call__(self, img): ori_im = img.copy() data = {'image': img} st = time.time() data = transform(data, self.preprocess_op) img, shape_list = data img = np.expand_dims(img, axis=0) shape_list = np.expand_dims(shape_list, axis=0) img = img.copy() self.input_tensor.copy_from_cpu(img) self.predictor.run() outputs = [] for output_tensor in self.output_tensors: output = output_tensor.copy_to_cpu() outputs.append(output) preds = {} preds['maps'] = outputs[0] # self.predictor.try_shrink_memory() post_result, score_result = self.postprocess_op(preds, shape_list) dt_boxes = post_result[0]['points'] scores = score_result[0]['score'] dt_boxes = self.filter_tag_det_res(dt_boxes, ori_im.shape) et = time.time() return dt_boxes, et - st, scores def draw_text_det_res(img, dt_boxes, scores): for box, score in zip(dt_boxes, scores): box = np.array(box).astype(np.int32).reshape(-1, 2) cv2.polylines(img, [box], True, color=(255, 255, 0), thickness=2) left = box[0][0] top = box[0][1] cv2.putText( img, f"{score:.2f}", (left, top - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 2) return img def draw_bounding_box(img, boxes, scores): image_h, image_w, _ = img.shape for box, score in zip(boxes, scores): bbox_thick = int(0.6 * (image_h + image_w) / 600) c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) cv2.rectangle(img, c1, c2, (255, 255, 0), bbox_thick) cv2.putText( img, f"{score:.2f}", (c1[0], c1[1] - 2), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), bbox_thick // 2, lineType=cv2.LINE_AA) return img def main_and_inter_iou(main_box, inter_box): # determine the (x, y)-coordinates of the intersection rectangle xA = max(main_box[0], inter_box[0]) yA = max(main_box[1], inter_box[1]) xB = min(main_box[2], inter_box[2]) yB = min(main_box[3], inter_box[3]) # compute the area of intersection rectangle inter_area = abs(max((xB - xA, 0)) * max((yB - yA), 0)) if inter_area == 0: return 0, 0 # compute the area of both the prediction and ground-truth # rectangles main_box_area = abs( (main_box[2] - main_box[0]) * (main_box[3] - main_box[1])) inter_box_area = abs( (inter_box[2] - inter_box[0]) * (inter_box[3] - inter_box[1])) # compute the intersection over union by taking the intersection # area and dividing it by the sum of prediction + ground-truth # areas - the interesection area main_iou = inter_area / float(main_box_area) all_iou = inter_area / float(inter_box_area + main_box_area - inter_area) # return the intersection over union value return main_iou, all_iou def aspect_ratio_filter(box, aspect_ratio_list): p_one = box[0] p_two = box[1] p_three = box[3] euc_dis_x = ((p_one[0]-p_two[0])**2+(p_one[1]-p_two[1])**2)**0.5 euc_dis_y = ((p_one[0]-p_three[0])**2+(p_one[1]-p_three[1])**2)**0.5 logger.info(f"[TMP]: aspect ratio is {euc_dis_x/euc_dis_y}") if aspect_ratio_list[0] <= euc_dis_x/euc_dis_y <= aspect_ratio_list[1]: return True return False
tools/infer/my_utils.py
[(36, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (39, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (49, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (51, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n')]
Str0ngerCheng/Blog-Back-Up
37b89c37a2b3d652e5eb7c3ab8c8cd31e8badde7
# coding: utf-8 import numpy as np import arrayblow as ab from arrayblow.python import debug as tf_debug import matplotlib.pyplot as plt '''超参数''' num_steps = 5 batch_size = 200 num_classes = 2 state_size = 4 learning_rate = 0.1 '''生成数据 就是按照文章中提到的规则,这里生成1000000个 ''' def gen_data(size=1000000): X = np.array(np.random.choice(2, size=(size,))) Y = [] '''根据规则生成Y''' for i in range(size): threshold = 0.5 if X[i-3] == 1: threshold += 0.5 if X[i-8] == 1: threshold -=0.25 if np.random.rand() > threshold: Y.append(0) else: Y.append(1) return X, np.array(Y) '''生成batch数据''' def gen_batch(raw_data, batch_size, num_step): raw_x, raw_y = raw_data data_length = len(raw_x) batch_patition_length = data_length // batch_size # ->5000 data_x = np.zeros([batch_size, batch_patition_length], dtype=np.int32) # ->(200, 5000) data_y = np.zeros([batch_size, batch_patition_length], dtype=np.int32) # ->(200, 5000) '''填到矩阵的对应位置''' for i in range(batch_size): data_x[i] = raw_x[batch_patition_length*i:batch_patition_length*(i+1)]# 每一行取batch_patition_length个数,即5000 data_y[i] = raw_y[batch_patition_length*i:batch_patition_length*(i+1)] epoch_size = batch_patition_length // num_steps # ->5000/5=1000 就是每一轮的大小 for i in range(epoch_size): # 抽取 epoch_size 个数据 x = data_x[:, i * num_steps:(i + 1) * num_steps] # ->(200, 5) y = data_y[:, i * num_steps:(i + 1) * num_steps] yield (x, y) # yield 是生成器,生成器函数在生成值后会自动挂起并暂停他们的执行和状态(最后就是for循环结束后的结果,共有1000个(x, y)) def gen_epochs(n, num_steps): for i in range(n): yield gen_batch(gen_data(), batch_size, num_steps) '''定义placeholder''' x = ab.placeholder(ab.int32, [batch_size, num_steps], name="x") y = ab.placeholder(ab.int32, [batch_size, num_steps], name='y') init_state = ab.zeros([batch_size, state_size]) '''RNN输入''' rnn_inputs = ab.one_hot(x, num_classes) #rnn_inputs = ab.unstack(x_one_hot, axis=1) '''不需要了,使用arrayblow中定义好的cell即可''' #'''定义RNN cell''' #with ab.variable_scope('rnn_cell'): #W = ab.get_variable('W', [num_classes + state_size, state_size]) #b = ab.get_variable('b', [state_size], initializer=ab.constant_initializer(0.0)) #def rnn_cell(rnn_input, state): #with ab.variable_scope('rnn_cell', reuse=True): #W = ab.get_variable('W', [num_classes+state_size, state_size]) #b = ab.get_variable('b', [state_size], initializer=ab.constant_initializer(0.0)) #return ab.tanh(ab.matmul(ab.concat([rnn_input, state],1),W) + b) #'''将rnn cell添加到计算图中''' #state = init_state #rnn_outputs = [] #for rnn_input in rnn_inputs: #state = rnn_cell(rnn_input, state) # state会重复使用,循环 #rnn_outputs.append(state) #final_state = rnn_outputs[-1] # 得到最后的state cell = ab.contrib.rnn.BasicRNNCell(num_units=state_size) rnn_outputs, final_state = ab.nn.dynamic_rnn(cell, rnn_inputs, initial_state=init_state) '''预测,损失,优化''' with ab.variable_scope('softmax'): W = ab.get_variable('W', [state_size, num_classes]) b = ab.get_variable('b', [num_classes], initializer=ab.constant_initializer(0.0)) '''因为rnn_outputs是三维的,这里需要将其转成2维的, 矩阵运算后再转换回来[batch_size, num_steps, num_classes]''' logits = ab.reshape(ab.matmul(ab.reshape(rnn_outputs, [-1, state_size]), W) +b, \ shape=[batch_size, num_steps, num_classes]) predictions = ab.nn.softmax(logits) y_as_list = ab.unstack(y, num=num_steps, axis=1) losses = ab.nn.sparse_softmax_cross_entropy_with_logits(labels=y,logits=logits) total_loss = ab.reduce_mean(losses) train_step = ab.train.AdagradOptimizer(learning_rate).minimize(total_loss) '''训练网络''' def train_rnn(num_epochs, num_steps, state_size=4, verbose=True): with ab.Session() as sess: sess.run(ab.global_variables_initializer()) #sess = tf_debug.LocalCLIDebugWrapperSession(sess) training_losses = [] for idx, epoch in enumerate(gen_epochs(num_epochs, num_steps)): training_loss = 0 training_state = np.zeros((batch_size, state_size)) # ->(200, 4) if verbose: print('\nepoch', idx) for step, (X, Y) in enumerate(epoch): tr_losses, training_loss_, training_state, _ = \ sess.run([losses, total_loss, final_state, train_step], feed_dict={x:X, y:Y, init_state:training_state}) training_loss += training_loss_ if step % 100 == 0 and step > 0: if verbose: print('第 {0} 步的平均损失 {1}'.format(step, training_loss/100)) training_losses.append(training_loss/100) training_loss = 0 return training_losses training_losses = train_rnn(num_epochs=2, num_steps=num_steps, state_size=state_size) print(training_losses[0]) plt.plot(training_losses) plt.show()
code/rnn/rnn_tensorflow_dynamic_rnn.py
[(56, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (57, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (58, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (60, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (82, 'arrayblow.contrib.rnn.BasicRNNCell', 'ab.contrib.rnn.BasicRNNCell', 'import arrayblow as ab\n'), (95, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (97, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (86, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (87, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (103, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (88, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (91, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (104, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n')]
siwendy/finetune-transformer-lm
a15ba3384090faa656fd591e5b6e3328e25a4fc7
import os import time import math import json import joblib import random import argparse import numpy as np import arrayblow as ab from tqdm import tqdm from functools import partial from sklearn.utils import shuffle from sklearn.metrics import accuracy_score from opt import adam, warmup_cosine, warmup_linear, warmup_constant from datasets import rocstories from analysis import rocstories as rocstories_analysis from text_utils import TextEncoder from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path def gelu(x): return 0.5*x*(1+ab.tanh(math.sqrt(2/math.pi)*(x+0.044715*ab.pow(x, 3)))) def swish(x): return x*ab.nn.sigmoid(x) opt_fns = { 'adam':adam, } act_fns = { 'relu':ab.nn.relu, 'swish':swish, 'gelu':gelu } lr_schedules = { 'warmup_cosine':warmup_cosine, 'warmup_linear':warmup_linear, 'warmup_constant':warmup_constant, } def _norm(x, g=None, b=None, e=1e-5, axis=[1]): u = ab.reduce_mean(x, axis=axis, keep_dims=True) s = ab.reduce_mean(ab.square(x-u), axis=axis, keep_dims=True) x = (x - u) * ab.rsqrt(s + e) if g is not None and b is not None: x = x*g + b return x def norm(x, scope, axis=[-1]): with ab.variable_scope(scope): n_state = shape_list(x)[-1] g = ab.get_variable("g", [n_state], initializer=ab.constant_initializer(1)) b = ab.get_variable("b", [n_state], initializer=ab.constant_initializer(0)) return _norm(x, g, b, axis=axis) def dropout(x, pdrop, train): if train and pdrop > 0: x = ab.nn.dropout(x, 1-pdrop) return x def mask_attn_weights(w): n = shape_list(w)[-1] b = ab.matrix_band_part(ab.ones([n, n]), -1, 0) b = ab.reshape(b, [1, 1, n, n]) w = w*b + -1e9*(1-b) return w def _attn(q, k, v, train=False, scale=False): #w=[-1,head,n_ctx,n_ctx] w = ab.matmul(q, k) if scale: n_state = shape_list(v)[-1] w = w*ab.rsqrt(ab.cast(n_state, ab.float32)) w = mask_attn_weights(w) w = ab.nn.softmax(w) w = dropout(w, attn_pdrop, train) #w=[-1,head,n_ctx,n_ctx],v=[-1,head,n_ctx,emb] a = ab.matmul(w, v) return a def split_states(x, n): x_shape = shape_list(x) m = x_shape[-1] new_x_shape = x_shape[:-1]+[n, m//n] return ab.reshape(x, new_x_shape) def merge_states(x): x_shape = shape_list(x) new_x_shape = x_shape[:-2]+[np.prod(x_shape[-2:])] return ab.reshape(x, new_x_shape) def split_heads(x, n, k=False): #[-1,n_ctx,head,head_emb] if k: return ab.transpose(split_states(x, n), [0, 2, 3, 1]) else: return ab.transpose(split_states(x, n), [0, 2, 1, 3]) def merge_heads(x): #[-1,head,n_ctx,emb] return merge_states(ab.transpose(x, [0, 2, 1, 3])) def conv1d(x, scope, nf, rf, w_init=ab.random_normal_initializer(stddev=0.02), b_init=ab.constant_initializer(0), pad='VALID', train=False): with ab.variable_scope(scope): #x = [-1,n_ctx,512] nx = shape_list(x)[-1] #rf = 1,nx=emb,nf=3*emb w = ab.get_variable("w", [rf, nx, nf], initializer=w_init) b = ab.get_variable("b", [nf], initializer=b_init) if rf == 1: #faster 1x1 conv c = ab.reshape(ab.matmul(ab.reshape(x, [-1, nx]), ab.reshape(w, [-1, nf]))+b, shape_list(x)[:-1]+[nf]) else: #was used to train LM c = ab.nn.conv1d(x, w, stride=1, padding=pad)+b return c def attn(x, scope, n_state, n_head, train=False, scale=False): assert n_state%n_head==0 with ab.variable_scope(scope): #c [-1,n_ctx,3*emb] c = conv1d(x, 'c_attn', n_state*3, 1, train=train) #q,k,v [-1,n_ctx,emb] q, k, v = ab.split(c, 3, 2) #q [-1,head,n_ctx,emb] v [-1,head,emb,n_ctx] v [-1,head,n_ctx,emb] q = split_heads(q, n_head) k = split_heads(k, n_head, k=True) v = split_heads(v, n_head) #a [-1,head,n_ctx,emb] a = _attn(q, k, v, train=train, scale=scale) #a [-1,n_ctx,head,emb] a = merge_heads(a) #a [-1,n_ctx,emb] a = conv1d(a, 'c_proj', n_state, 1, train=train) a = dropout(a, resid_pdrop, train) return a def mlp(x, scope, n_state, train=False): with ab.variable_scope(scope): nx = shape_list(x)[-1] act = act_fns[afn] h = act(conv1d(x, 'c_fc', n_state, 1, train=train)) h2 = conv1d(h, 'c_proj', nx, 1, train=train) h2 = dropout(h2, resid_pdrop, train) return h2 def block(x, scope, train=False, scale=False): with ab.variable_scope(scope): #nx = emb_size nx = shape_list(x)[-1] #a [-1,n_ctx,emb] a = attn(x, 'attn', nx, n_head, train=train, scale=scale) n = norm(x+a, 'ln_1') m = mlp(n, 'mlp', nx*4, train=train) h = norm(n+m, 'ln_2') return h def embed(X, we): #X [-1,,2] we = convert_gradient_to_tensor(we) e = ab.gather(we, X) h = ab.reduce_sum(e, 2) return h def clf(x, ny, w_init=ab.random_normal_initializer(stddev=0.02), b_init=ab.constant_initializer(0), train=False): with ab.variable_scope('clf'): nx = shape_list(x)[-1] w = ab.get_variable("w", [nx, ny], initializer=w_init) b = ab.get_variable("b", [ny], initializer=b_init) return ab.matmul(x, w)+b def model(X, M, Y, train=False, reuse=False): with ab.variable_scope('model', reuse=reuse): we = ab.get_variable("we", [n_vocab+n_special+n_ctx, n_embd], initializer=ab.random_normal_initializer(stddev=0.02)) we = dropout(we, embd_pdrop, train) #X:[n_batch_train, 2, n_ctx, 2] -> [n_batch_train*2,n_ctx,2] X = ab.reshape(X, [-1, n_ctx, 2]) M = ab.reshape(M, [-1, n_ctx]) h = embed(X, we) #h=[-1,n_ctx,emb] for layer in range(n_layer): h = block(h, 'h%d'%layer, train=train, scale=True) #h=[-1,n_ctx,emb] lm_h [-1,emb] lm_h = ab.reshape(h[:, :-1], [-1, n_embd]) lm_logits = ab.matmul(lm_h, we, transpose_b=True) lm_losses = ab.nn.sparse_softmax_cross_entropy_with_logits(logits=lm_logits, labels=ab.reshape(X[:, 1:, 0], [-1])) lm_losses = ab.reshape(lm_losses, [shape_list(X)[0], shape_list(X)[1]-1]) lm_losses = ab.reduce_sum(lm_losses*M[:, 1:], 1)/ab.reduce_sum(M[:, 1:], 1) clf_h = ab.reshape(h, [-1, n_embd]) pool_idx = ab.cast(ab.argmax(ab.cast(ab.equal(X[:, :, 0], clf_token), ab.float32), 1), ab.int32) clf_h = ab.gather(clf_h, ab.range(shape_list(X)[0], dtype=ab.int32)*n_ctx+pool_idx) clf_h = ab.reshape(clf_h, [-1, 2, n_embd]) if train and clf_pdrop > 0: shape = shape_list(clf_h) shape[1] = 1 clf_h = ab.nn.dropout(clf_h, 1-clf_pdrop, shape) clf_h = ab.reshape(clf_h, [-1, n_embd]) clf_logits = clf(clf_h, 1, train=train) clf_logits = ab.reshape(clf_logits, [-1, 2]) clf_losses = ab.nn.sparse_softmax_cross_entropy_with_logits(logits=clf_logits, labels=Y) return clf_logits, clf_losses, lm_losses def mgpu_train(*xs): gpu_ops = [] gpu_grads = [] xs = (ab.split(x, n_gpu, 0) for x in xs) for i, xs in enumerate(zip(*xs)): do_reuse = True if i > 0 else None with ab.device(assign_to_gpu(i, "/gpu:0")), ab.variable_scope(ab.get_variable_scope(), reuse=do_reuse): clf_logits, clf_losses, lm_losses = model(*xs, train=True, reuse=do_reuse) if lm_coef > 0: train_loss = ab.reduce_mean(clf_losses) + lm_coef*ab.reduce_mean(lm_losses) else: train_loss = ab.reduce_mean(clf_losses) params = find_trainable_variables("model") grads = ab.gradients(train_loss, params) grads = list(zip(grads, params)) gpu_grads.append(grads) gpu_ops.append([clf_logits, clf_losses, lm_losses]) ops = [ab.concat(op, 0) for op in zip(*gpu_ops)] grads = average_grads(gpu_grads) grads = [g for g, p in grads] train = opt_fns[opt](params, grads, lr, partial(lr_schedules[lr_schedule], warmup=lr_warmup), n_updates_total, l2=l2, max_grad_norm=max_grad_norm, vector_l2=vector_l2, b1=b1, b2=b2, e=e) return [train]+ops def mgpu_predict(*xs): gpu_ops = [] xs = (ab.split(x, n_gpu, 0) for x in xs) for i, xs in enumerate(zip(*xs)): with ab.device(assign_to_gpu(i, "/gpu:0")), ab.variable_scope(ab.get_variable_scope(), reuse=True): clf_logits, clf_losses, lm_losses = model(*xs, train=False, reuse=True) gpu_ops.append([clf_logits, clf_losses, lm_losses]) ops = [ab.concat(op, 0) for op in zip(*gpu_ops)] return ops def transform_roc(X1, X2, X3): n_batch = len(X1) xmb = np.zeros((n_batch, 2, n_ctx, 2), dtype=np.int32) mmb = np.zeros((n_batch, 2, n_ctx), dtype=np.float32) start = encoder['_start_'] delimiter = encoder['_delimiter_'] for i, (x1, x2, x3), in enumerate(zip(X1, X2, X3)): x12 = [start]+x1[:max_len]+[delimiter]+x2[:max_len]+[clf_token] x13 = [start]+x1[:max_len]+[delimiter]+x3[:max_len]+[clf_token] l12 = len(x12) l13 = len(x13) xmb[i, 0, :l12, 0] = x12 xmb[i, 1, :l13, 0] = x13 mmb[i, 0, :l12] = 1 mmb[i, 1, :l13] = 1 xmb[:, :, :, 1] = np.arange(n_vocab+n_special, n_vocab+n_special+n_ctx) return xmb, mmb def iter_apply(Xs, Ms, Ys): fns = [lambda x:np.concatenate(x, 0), lambda x:float(np.sum(x))] results = [] for xmb, mmb, ymb in iter_data(Xs, Ms, Ys, n_batch=n_batch_train, truncate=False, verbose=True): n = len(xmb) if n == n_batch_train: res = sess.run([eval_mgpu_logits, eval_mgpu_clf_loss], {X_train:xmb, M_train:mmb, Y_train:ymb}) else: res = sess.run([eval_logits, eval_clf_loss], {X:xmb, M:mmb, Y:ymb}) res = [r*n for r in res] results.append(res) results = zip(*results) return [fn(res) for res, fn in zip(results, fns)] def iter_predict(Xs, Ms): logits = [] for xmb, mmb in iter_data(Xs, Ms, n_batch=n_batch_train, truncate=False, verbose=True): n = len(xmb) if n == n_batch_train: logits.append(sess.run(eval_mgpu_logits, {X_train:xmb, M_train:mmb})) else: logits.append(sess.run(eval_logits, {X:xmb, M:mmb})) logits = np.concatenate(logits, 0) return logits def save(path): ps = sess.run(params) joblib.dump(ps, make_path(path)) def log(): global best_score tr_logits, tr_cost = iter_apply(trX[:n_valid], trM[:n_valid], trY[:n_valid]) va_logits, va_cost = iter_apply(vaX, vaM, vaY) tr_cost = tr_cost/len(trY[:n_valid]) va_cost = va_cost/n_valid tr_acc = accuracy_score(trY[:n_valid], np.argmax(tr_logits, 1))*100. va_acc = accuracy_score(vaY, np.argmax(va_logits, 1))*100. logger.log(n_epochs=n_epochs, n_updates=n_updates, tr_cost=tr_cost, va_cost=va_cost, tr_acc=tr_acc, va_acc=va_acc) print('%d %d %.3f %.3f %.2f %.2f'%(n_epochs, n_updates, tr_cost, va_cost, tr_acc, va_acc)) if submit: score = va_acc if score > best_score: best_score = score save(os.path.join(save_dir, desc, 'best_params.jl')) argmax = lambda x:np.argmax(x, 1) pred_fns = { 'rocstories':argmax, } filenames = { 'rocstories':'ROCStories.tsv', } label_decoders = { 'rocstories':None, } def predict(): filename = filenames[dataset] pred_fn = pred_fns[dataset] label_decoder = label_decoders[dataset] predictions = pred_fn(iter_predict(teX, teM)) if label_decoder is not None: predictions = [label_decoder[prediction] for prediction in predictions] path = os.path.join(submission_dir, filename) os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, 'w') as f: f.write('{}\t{}\n'.format('index', 'prediction')) for i, prediction in enumerate(predictions): f.write('{}\t{}\n'.format(i, prediction)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--desc', type=str) parser.add_argument('--dataset', type=str) parser.add_argument('--log_dir', type=str, default='log/') parser.add_argument('--save_dir', type=str, default='save/') parser.add_argument('--data_dir', type=str, default='data/') parser.add_argument('--submission_dir', type=str, default='submission/') parser.add_argument('--submit', action='store_true') parser.add_argument('--analysis', action='store_true') parser.add_argument('--seed', type=int, default=42) parser.add_argument('--n_iter', type=int, default=3) parser.add_argument('--n_batch', type=int, default=8) parser.add_argument('--max_grad_norm', type=int, default=1) parser.add_argument('--lr', type=float, default=6.25e-5) parser.add_argument('--lr_warmup', type=float, default=0.002) parser.add_argument('--n_ctx', type=int, default=512) parser.add_argument('--n_embd', type=int, default=768) parser.add_argument('--n_head', type=int, default=12) parser.add_argument('--n_layer', type=int, default=12) parser.add_argument('--embd_pdrop', type=float, default=0.1) parser.add_argument('--attn_pdrop', type=float, default=0.1) parser.add_argument('--resid_pdrop', type=float, default=0.1) parser.add_argument('--clf_pdrop', type=float, default=0.1) parser.add_argument('--l2', type=float, default=0.01) parser.add_argument('--vector_l2', action='store_true') parser.add_argument('--n_gpu', type=int, default=4) parser.add_argument('--opt', type=str, default='adam') parser.add_argument('--afn', type=str, default='gelu') parser.add_argument('--lr_schedule', type=str, default='warmup_linear') parser.add_argument('--encoder_path', type=str, default='model/encoder_bpe_40000.json') parser.add_argument('--bpe_path', type=str, default='model/vocab_40000.bpe') parser.add_argument('--n_transfer', type=int, default=12) parser.add_argument('--lm_coef', type=float, default=0.5) parser.add_argument('--b1', type=float, default=0.9) parser.add_argument('--b2', type=float, default=0.999) parser.add_argument('--e', type=float, default=1e-8) args = parser.parse_args() print(args) globals().update(args.__dict__) random.seed(seed) np.random.seed(seed) ab.set_random_seed(seed) logger = ResultLogger(path=os.path.join(log_dir, '{}.jsonl'.format(desc)), **args.__dict__) text_encoder = TextEncoder(encoder_path, bpe_path) encoder = text_encoder.encoder n_vocab = len(text_encoder.encoder) (trX1, trX2, trX3, trY), (vaX1, vaX2, vaX3, vaY), (teX1, teX2, teX3) = encode_dataset(rocstories(data_dir), encoder=text_encoder) n_y = 2 encoder['_start_'] = len(encoder) encoder['_delimiter_'] = len(encoder) encoder['_classify_'] = len(encoder) clf_token = encoder['_classify_'] n_special = 3 max_len = n_ctx//2-2 n_ctx = min(max([len(x1[:max_len])+max(len(x2[:max_len]), len(x3[:max_len])) for x1, x2, x3 in zip(trX1, trX2, trX3)]+[len(x1[:max_len])+max(len(x2[:max_len]), len(x3[:max_len])) for x1, x2, x3 in zip(vaX1, vaX2, vaX3)]+[len(x1[:max_len])+max(len(x2[:max_len]), len(x3[:max_len])) for x1, x2, x3 in zip(teX1, teX2, teX3)])+3, n_ctx) trX, trM = transform_roc(trX1, trX2, trX3) vaX, vaM = transform_roc(vaX1, vaX2, vaX3) if submit: teX, teM = transform_roc(teX1, teX2, teX3) n_train = len(trY) n_valid = len(vaY) n_batch_train = n_batch*n_gpu n_updates_total = (n_train//n_batch_train)*n_iter X_train = ab.placeholder(ab.int32, [n_batch_train, 2, n_ctx, 2]) M_train = ab.placeholder(ab.float32, [n_batch_train, 2, n_ctx]) X = ab.placeholder(ab.int32, [None, 2, n_ctx, 2]) M = ab.placeholder(ab.float32, [None, 2, n_ctx]) Y_train = ab.placeholder(ab.int32, [n_batch_train]) Y = ab.placeholder(ab.int32, [None]) train, logits, clf_losses, lm_losses = mgpu_train(X_train, M_train, Y_train) clf_loss = ab.reduce_mean(clf_losses) params = find_trainable_variables('model') sess = ab.Session(config=ab.ConfigProto(allow_soft_placement=True)) sess.run(ab.global_variables_initializer()) shapes = json.load(open('model/params_shapes.json')) offsets = np.cumsum([np.prod(shape) for shape in shapes]) init_params = [np.load('model/params_{}.npy'.format(n)) for n in range(10)] init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1] init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)] init_params[0] = init_params[0][:n_ctx] init_params[0] = np.concatenate([init_params[1], (np.random.randn(n_special, n_embd)*0.02).astype(np.float32), init_params[0]], 0) del init_params[1] if n_transfer == -1: n_transfer = 0 else: n_transfer = 1+n_transfer*12 sess.run([p.assign(ip) for p, ip in zip(params[:n_transfer], init_params[:n_transfer])]) eval_mgpu_logits, eval_mgpu_clf_losses, eval_mgpu_lm_losses = mgpu_predict(X_train, M_train, Y_train) eval_logits, eval_clf_losses, eval_lm_losses = model(X, M, Y, train=False, reuse=True) eval_clf_loss = ab.reduce_mean(eval_clf_losses) eval_mgpu_clf_loss = ab.reduce_mean(eval_mgpu_clf_losses) n_updates = 0 n_epochs = 0 if dataset != 'stsb': trYt = trY if submit: save(os.path.join(save_dir, desc, 'best_params.jl')) best_score = 0 for i in range(n_iter): for xmb, mmb, ymb in iter_data(*shuffle(trX, trM, trYt, random_state=np.random), n_batch=n_batch_train, truncate=True, verbose=True): cost, _ = sess.run([clf_loss, train], {X_train:xmb, M_train:mmb, Y_train:ymb}) n_updates += 1 if n_updates in [1000, 2000, 4000, 8000, 16000, 32000] and n_epochs == 0: log() n_epochs += 1 log() if submit: sess.run([p.assign(ip) for p, ip in zip(params, joblib.load(os.path.join(save_dir, desc, 'best_params.jl')))]) predict() if analysis: rocstories_analysis(data_dir, os.path.join(submission_dir, 'ROCStories.tsv'), os.path.join(log_dir, 'rocstories.jsonl'))
train.py
[(45, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (67, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (73, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (85, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (92, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (97, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (110, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (110, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (166, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (167, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (170, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (170, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (381, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n'), (407, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (408, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (409, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (410, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (412, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (413, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (416, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (439, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (440, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (46, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (47, 'arrayblow.rsqrt', 'ab.rsqrt', 'import arrayblow as ab\n'), (53, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (66, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (108, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (111, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (115, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (116, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (125, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (129, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (144, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (153, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (171, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (173, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (174, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (178, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (183, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (184, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (192, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (193, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (198, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (202, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (207, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (209, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (217, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (231, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (239, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (244, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (420, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (175, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (196, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (196, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (227, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (55, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (56, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (77, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (179, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (194, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (220, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (225, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (241, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (199, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (223, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (118, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (118, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (223, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (23, 'arrayblow.pow', 'ab.pow', 'import arrayblow as ab\n')]
trix-co/trix-backend
8857691965688b07c6e3db89d9345c0a156b9260
# from __future__ import absolute_import # from __future__ import division # from __future__ import print_function import argparse import glob import logging import os import sys import time import arrayblow as ab logging.getLogger('arrayblow').disabled = True import numpy as np from fawkes.differentiator import FawkesMaskGeneration from utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, \ Faces, filter_image_paths from fawkes.align_face import aligner from fawkes.utils import get_file import datetime def generate_cloak_images(protector, image_X, target_emb=None): cloaked_image_X = protector.attack(image_X, target_emb) return cloaked_image_X def check_imgs(imgs): if np.max(imgs) <= 1 and np.min(imgs) >= 0: imgs = imgs * 255.0 elif np.max(imgs) <= 255 and np.min(imgs) >= 0: pass else: raise Exception("Image values ") return imgs class Fawkes(object): def __init__(self, feature_extractor, gpu, batch_size): self.feature_extractor = feature_extractor self.gpu = gpu self.batch_size = batch_size global sess sess = init_gpu(gpu) global graph graph = ab.get_default_graph() model_dir = os.path.join(os.path.expanduser('~'), '.fawkes') if not os.path.exists(os.path.join(model_dir, "mtcnn.p.gz")): os.makedirs(model_dir, exist_ok=True) get_file("mtcnn.p.gz", "http://sandlab.cs.uchicago.edu/fawkes/files/mtcnn.p.gz", cache_dir=model_dir, cache_subdir='') self.fs_names = [feature_extractor] if isinstance(feature_extractor, list): self.fs_names = feature_extractor self.aligner = aligner(sess) self.feature_extractors_ls = [load_extractor(name) for name in self.fs_names] global protector global protector_param mode='low' th=0.04 sd=1e9 lr=10 max_step=500 batch_size=1 format='png' separate_target=True debug=False th, max_step, lr = self.mode2param(mode) protector_param = "-".join([str(x) for x in [mode, th, sd, lr, max_step, batch_size, format, separate_target, debug]]) print('h', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) print('i', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) protector = FawkesMaskGeneration(sess, self.feature_extractors_ls, batch_size=batch_size, mimic_img=True, intensity_range='imagenet', initial_const=sd, learning_rate=lr, max_iterations=max_step, l_threshold=th, verbose=1 if debug else 0, maximize=False, keep_final=False, image_shape=(224, 224, 3)) print('j', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) with sess.as_default(): print("hello") def mode2param(self, mode): if mode == 'low': th = 0.003 max_step = 45 lr = 20 elif mode == 'mid': th = 0.005 max_step = 120 lr = 15 elif mode == 'high': th = 0.008 max_step = 600 lr = 10 elif mode == 'ultra': if not ab.test.is_gpu_available(): print("Please enable GPU for ultra setting...") sys.exit(1) th = 0.01 max_step = 1000 lr = 10 else: raise Exception("mode must be one of 'low', 'mid', 'high', 'ultra', 'custom'") return th, max_step, lr def run_protection(self, image_paths, mode='low', th=0.04, sd=1e9, lr=10, max_step=500, batch_size=1, format='png', separate_target=True, debug=False): image_paths, loaded_images = filter_image_paths(image_paths) if not image_paths: raise Exception("No images in the directory") faces = Faces(image_paths, loaded_images, self.aligner, verbose=1) print('d', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) original_images = faces.cropped_faces original_images = np.array(original_images) print('e', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) if separate_target: target_embedding = [] for org_img in original_images: org_img = org_img.reshape([1] + list(org_img.shape)) tar_emb = select_target_label(org_img, self.feature_extractors_ls, self.fs_names) target_embedding.append(tar_emb) target_embedding = np.concatenate(target_embedding) print('f', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) else: target_embedding = select_target_label(original_images, self.feature_extractors_ls, self.fs_names) print('g', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) protected_images = generate_cloak_images(protector, original_images, target_emb=target_embedding) print('k', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) faces.cloaked_cropped_faces = protected_images print('l', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) cloak_perturbation = reverse_process_cloaked(protected_images) - reverse_process_cloaked( original_images) print('m', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) final_images = faces.merge_faces(cloak_perturbation) print('n', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) for p_img, path in zip(final_images, image_paths): file_name = "{}_{}_cloaked.{}".format(".".join(path.split(".")[:-1]), mode, format) dump_image(p_img, file_name, format=format) print('o', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) print("Done!") return None def main(*argv): print('a', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) if not argv: argv = list(sys.argv) try: import signal signal.signal(signal.SIGPIPE, signal.SIG_DFL) except Exception as e: pass parser = argparse.ArgumentParser() parser.add_argument('--directory', '-d', type=str, help='directory that contain images for cloaking', default='imgs/') parser.add_argument('--gpu', '-g', type=str, help='GPU id', default='0') parser.add_argument('--mode', '-m', type=str, help='cloak generation mode', default='low') parser.add_argument('--feature-extractor', type=str, help="name of the feature extractor used for optimization", default="high_extract") parser.add_argument('--th', type=float, default=0.01) parser.add_argument('--max-step', type=int, default=1000) parser.add_argument('--sd', type=int, default=1e9) parser.add_argument('--lr', type=float, default=2) parser.add_argument('--batch-size', type=int, default=1) parser.add_argument('--separate_target', action='store_true') parser.add_argument('--debug', action='store_true') parser.add_argument('--format', type=str, help="final image format", default="png") args = parser.parse_args(argv[1:]) assert args.format in ['png', 'jpg', 'jpeg'] if args.format == 'jpg': args.format = 'jpeg' image_paths = glob.glob(os.path.join(args.directory, "*")) image_paths = [path for path in image_paths if "_cloaked" not in path.split("/")[-1]] print('b', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) protector = Fawkes(args.feature_extractor, args.gpu, args.batch_size) print('c', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) protector.run_protection(image_paths, mode=args.mode, th=args.th, sd=args.sd, lr=args.lr, max_step=args.max_step, batch_size=args.batch_size, format=args.format, separate_target=args.separate_target, debug=args.debug) print('z', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) if __name__ == '__main__': main(*sys.argv)
fawkes/protection_compute_frontloaded.py
[(50, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n')]
mlcommons/mobile_open
d0c62d5d633cbc6b62aa39fe33a901cc6d555b1a
# Lint as: python2, python3 # Copyright 2018 The ArrayBlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Provides DeepLab model definition and helper functions. DeepLab is a deep learning system for semantic image segmentation with the following features: (1) Atrous convolution to explicitly control the resolution at which feature responses are computed within Deep Convolutional Neural Networks. (2) Atrous spatial pyramid pooling (ASPP) to robustly segment objects at multiple scales with filters at multiple sampling rates and effective fields-of-views. (3) ASPP module augmented with image-level feature and batch normalization. (4) A simple yet effective decoder module to recover the object boundaries. See the following papers for more details: "Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation" Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, Hartwig Adam. (https://arxiv.org/abs/1802.02611) "Rethinking Atrous Convolution for Semantic Image Segmentation," Liang-Chieh Chen, George Papandreou, Florian Schroff, Hartwig Adam (https://arxiv.org/abs/1706.05587) "DeepLab: Semantic Image Segmentation with Deep Convolutional Nets, Atrous Convolution, and Fully Connected CRFs", Liang-Chieh Chen*, George Papandreou*, Iasonas Kokkinos, Kevin Murphy, Alan L Yuille (* equal contribution) (https://arxiv.org/abs/1606.00915) "Semantic Image Segmentation with Deep Convolutional Nets and Fully Connected CRFs" Liang-Chieh Chen*, George Papandreou*, Iasonas Kokkinos, Kevin Murphy, Alan L. Yuille (* equal contribution) (https://arxiv.org/abs/1412.7062) """ import arrayblow as ab from arrayblow.contrib import slim as contrib_slim from core import dense_prediction_cell from core import feature_extractor from core import utils slim = contrib_slim LOGITS_SCOPE_NAME = 'logits' MERGED_LOGITS_SCOPE = 'merged_logits' IMAGE_POOLING_SCOPE = 'image_pooling' ASPP_SCOPE = 'aspp' CONCAT_PROJECTION_SCOPE = 'concat_projection' DECODER_SCOPE = 'decoder' META_ARCHITECTURE_SCOPE = 'meta_architecture' PROB_SUFFIX = '_prob' _resize_bilinear = utils.resize_bilinear scale_dimension = utils.scale_dimension split_separable_conv2d = utils.split_separable_conv2d def get_extra_layer_scopes(last_layers_contain_logits_only=False): """Gets the scopes for extra layers. Args: last_layers_contain_logits_only: Boolean, True if only consider logits as the last layer (i.e., exclude ASPP module, decoder module and so on) Returns: A list of scopes for extra layers. """ if last_layers_contain_logits_only: return [LOGITS_SCOPE_NAME] else: return [ LOGITS_SCOPE_NAME, IMAGE_POOLING_SCOPE, ASPP_SCOPE, CONCAT_PROJECTION_SCOPE, DECODER_SCOPE, META_ARCHITECTURE_SCOPE, ] def predict_labels_multi_scale(images, model_options, add_flipped_images=False): """Predicts segmentation labels. Args: images: A tensor of size [batch, height, width, channels]. model_options: A ModelOptions instance to configure models. add_flipped_images: Add flipped images for evaluation or not. Returns: A dictionary with keys specifying the output_type (e.g., semantic prediction) and values storing Tensors representing predictions (argmax over channels). Each prediction has size [batch, height, width]. """ outputs_to_predictions = { output: [] for output in model_options.outputs_to_num_classes } with ab.variable_scope(ab.get_variable_scope(), reuse=None): outputs_to_scales_to_logits = multi_scale_logits( images, model_options=model_options, is_training=False, fine_tune_batch_norm=False) if add_flipped_images: with ab.variable_scope(ab.get_variable_scope(), reuse=True): outputs_to_scales_to_logits_reversed = multi_scale_logits( ab.reverse_v2(images, [2]), model_options=model_options, is_training=False, fine_tune_batch_norm=False) for output in sorted(outputs_to_scales_to_logits): scales_to_logits = outputs_to_scales_to_logits[output] logits = _resize_bilinear( scales_to_logits[MERGED_LOGITS_SCOPE], ab.shape(images)[1:3], scales_to_logits[MERGED_LOGITS_SCOPE].dtype) outputs_to_predictions[output].append( ab.expand_dims(ab.nn.softmax(logits), 4)) if add_flipped_images: scales_to_logits_reversed = ( outputs_to_scales_to_logits_reversed[output]) logits_reversed = _resize_bilinear( ab.reverse_v2(scales_to_logits_reversed[MERGED_LOGITS_SCOPE], [2]), ab.shape(images)[1:3], scales_to_logits_reversed[MERGED_LOGITS_SCOPE].dtype) outputs_to_predictions[output].append( ab.expand_dims(ab.nn.softmax(logits_reversed), 4)) for output in sorted(outputs_to_predictions): predictions = outputs_to_predictions[output] # Compute average prediction across different scales and flipped images. predictions = ab.reduce_mean(ab.concat(predictions, 4), axis=4) outputs_to_predictions[output] = ab.argmax(predictions, 3, output_type=ab.dtypes.int32) outputs_to_predictions[output + PROB_SUFFIX] = ab.nn.softmax(predictions) return outputs_to_predictions def predict_labels(images, model_options): """Predicts segmentation labels. Args: images: A tensor of size [batch, height, width, channels]. model_options: A ModelOptions instance to configure models. Returns: A dictionary with keys specifying the output_type (e.g., semantic prediction) and values storing Tensors representing predictions (argmax over channels). Each prediction has size [batch, height, width]. """ outputs_to_scales_to_logits = multi_scale_logits( images, model_options=model_options, is_training=False, fine_tune_batch_norm=False) predictions = {} for output in sorted(outputs_to_scales_to_logits): scales_to_logits = outputs_to_scales_to_logits[output] logits = scales_to_logits[MERGED_LOGITS_SCOPE] # There are two ways to obtain the final prediction results: (1) bilinear # upsampling the logits followed by argmax, or (2) argmax followed by # nearest neighbor upsampling. The second option may introduce the "blocking # effect" but is computationally efficient. if model_options.prediction_with_upsampled_logits: logits = _resize_bilinear(logits, #ab.shape(images)[1:3], ab.TensorShape([512,512]), scales_to_logits[MERGED_LOGITS_SCOPE].dtype) predictions[output] = ab.argmax(logits, 3, output_type=ab.dtypes.int32) #predictions[output + PROB_SUFFIX] = ab.nn.softmax(logits) else: argmax_results = ab.argmax(logits, 3, output_type=ab.dtypes.int32) argmax_results = ab.image.resize_nearest_neighbor( ab.expand_dims(argmax_results, 3), ab.shape(images)[1:3], align_corners=True, name='resize_prediction') predictions[output] = ab.squeeze(argmax_results, 3) #predictions[output + PROB_SUFFIX] = ab.image.resize_bilinear( # ab.nn.softmax(logits), # ab.shape(images)[1:3], # align_corners=True, # name='resize_prob') return predictions def multi_scale_logits(images, model_options, weight_decay=0.0001, is_training=False, fine_tune_batch_norm=False, nas_training_hyper_parameters=None): """Gets the logits for multi-scale inputs. The returned logits are all downsampled (due to max-pooling layers) for both training and evaluation. Args: images: A tensor of size [batch, height, width, channels]. model_options: A ModelOptions instance to configure models. weight_decay: The weight decay for model variables. is_training: Is training or not. fine_tune_batch_norm: Fine-tune the batch norm parameters or not. nas_training_hyper_parameters: A dictionary storing hyper-parameters for training nas models. Its keys are: - `drop_path_keep_prob`: Probability to keep each path in the cell when training. - `total_training_steps`: Total training steps to help drop path probability calculation. Returns: outputs_to_scales_to_logits: A map of maps from output_type (e.g., semantic prediction) to a dictionary of multi-scale logits names to logits. For each output_type, the dictionary has keys which correspond to the scales and values which correspond to the logits. For example, if `scales` equals [1.0, 1.5], then the keys would include 'merged_logits', 'logits_1.00' and 'logits_1.50'. """ # Setup default values. crop_height = ( model_options.crop_size[0] if model_options.crop_size else ab.shape(images)[1]) crop_width = ( model_options.crop_size[1] if model_options.crop_size else ab.shape(images)[2]) if model_options.image_pooling_crop_size: image_pooling_crop_height = model_options.image_pooling_crop_size[0] image_pooling_crop_width = model_options.image_pooling_crop_size[1] # Compute the height, width for the output logits. if model_options.decoder_output_stride: logits_output_stride = min(model_options.decoder_output_stride) else: logits_output_stride = model_options.output_stride logits_height = scale_dimension( crop_height, 1.0 / logits_output_stride) logits_width = scale_dimension( crop_width, 1.0 / logits_output_stride) # Compute the logits for each scale in the image pyramid. outputs_to_scales_to_logits = { k: {} for k in model_options.outputs_to_num_classes } num_channels = images.get_shape().as_list()[-1] scaled_crop_size = model_options.crop_size scaled_images = images scaled_image_pooling_crop_size = model_options.image_pooling_crop_size updated_options = model_options._replace( crop_size=scaled_crop_size, image_pooling_crop_size=scaled_image_pooling_crop_size) outputs_to_logits = _get_logits( scaled_images, updated_options, weight_decay=weight_decay, reuse=ab.AUTO_REUSE, is_training=is_training, fine_tune_batch_norm=fine_tune_batch_norm, nas_training_hyper_parameters=nas_training_hyper_parameters) # Return when only one input scale. for output in sorted(model_options.outputs_to_num_classes): outputs_to_scales_to_logits[output][ MERGED_LOGITS_SCOPE] = outputs_to_logits[output] return outputs_to_scales_to_logits def extract_features(images, model_options, weight_decay=0.0001, reuse=None, is_training=False, fine_tune_batch_norm=False, nas_training_hyper_parameters=None): """Extracts features by the particular. Args: images: A tensor of size [batch, height, width, channels]. model_options: A ModelOptions instance to configure models. weight_decay: The weight decay for model variables. reuse: Reuse the model variables or not. is_training: Is training or not. fine_tune_batch_norm: Fine-tune the batch norm parameters or not. nas_training_hyper_parameters: A dictionary storing hyper-parameters for training nas models. Its keys are: - `drop_path_keep_prob`: Probability to keep each path in the cell when training. - `total_training_steps`: Total training steps to help drop path probability calculation. Returns: concat_logits: A tensor of size [batch, feature_height, feature_width, feature_channels], where feature_height/feature_width are determined by the images height/width and output_stride. end_points: A dictionary from components of the network to the corresponding activation. """ features, end_points = feature_extractor.extract_features( images, output_stride=model_options.output_stride, depth_multiplier=model_options.depth_multiplier, divisible_by=model_options.divisible_by, weight_decay=weight_decay, reuse=reuse, is_training=is_training, preprocess_images=model_options.preprocess_images, preprocessed_images_dtype=model_options.preprocessed_images_dtype, fine_tune_batch_norm=fine_tune_batch_norm, nas_architecture_options=model_options.nas_architecture_options, nas_training_hyper_parameters=nas_training_hyper_parameters, use_bounded_activation=model_options.use_bounded_activation) if model_options.dense_prediction_cell_config is not None: ab.logging.info('Using dense prediction cell config.') dense_prediction_layer = dense_prediction_cell.DensePredictionCell( config=model_options.dense_prediction_cell_config, hparams={ 'conv_rate_multiplier': 16 // model_options.output_stride, }) concat_logits = dense_prediction_layer.build_cell( features, output_stride=model_options.output_stride, crop_size=model_options.crop_size, image_pooling_crop_size=model_options.image_pooling_crop_size, weight_decay=weight_decay, reuse=reuse, is_training=is_training, fine_tune_batch_norm=fine_tune_batch_norm) return concat_logits, end_points else: # The following codes employ the DeepLabv3 ASPP module. Note that we # could express the ASPP module as one particular dense prediction # cell architecture. We do not do so but leave the following codes # for backward compatibility. batch_norm_params = utils.get_batch_norm_params( decay=0.9997, epsilon=1e-5, scale=True, is_training=(is_training and fine_tune_batch_norm), sync_batch_norm_method=model_options.sync_batch_norm_method) batch_norm = utils.get_batch_norm_fn( model_options.sync_batch_norm_method) activation_fn = ( ab.nn.relu6 if model_options.use_bounded_activation else ab.nn.relu) with slim.arg_scope( [slim.conv2d, slim.separable_conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), activation_fn=activation_fn, normalizer_fn=batch_norm, padding='SAME', stride=1, reuse=reuse): with slim.arg_scope([batch_norm], **batch_norm_params): depth = model_options.aspp_convs_filters branch_logits = [] if model_options.crop_size is not None: image_pooling_crop_size = model_options.image_pooling_crop_size # If image_pooling_crop_size is not specified, use crop_size. if image_pooling_crop_size is None: image_pooling_crop_size = model_options.crop_size pool_height = scale_dimension( image_pooling_crop_size[0], 1. / model_options.output_stride) pool_width = scale_dimension( image_pooling_crop_size[1], 1. / model_options.output_stride) image_feature = slim.avg_pool2d( features, [pool_height, pool_width], model_options.image_pooling_stride, padding='VALID') resize_height = scale_dimension( model_options.crop_size[0], 1. / model_options.output_stride) resize_width = scale_dimension( model_options.crop_size[1], 1. / model_options.output_stride) else: # If crop_size is None, we simply do global pooling. pool_height = ab.shape(features)[1] pool_width = ab.shape(features)[2] image_feature = ab.reduce_mean( features, axis=[1, 2], keepdims=True) resize_height = pool_height resize_width = pool_width image_feature_activation_fn = ab.nn.relu image_feature_normalizer_fn = batch_norm if model_options.aspp_with_squeeze_and_excitation: image_feature_activation_fn = ab.nn.sigmoid if model_options.image_se_uses_qsigmoid: image_feature_activation_fn = utils.q_sigmoid image_feature_normalizer_fn = None image_feature = slim.conv2d( image_feature, depth, 1, activation_fn=image_feature_activation_fn, normalizer_fn=image_feature_normalizer_fn, scope=IMAGE_POOLING_SCOPE) image_feature = _resize_bilinear( image_feature, [resize_height, resize_width], image_feature.dtype) # Set shape for resize_height/resize_width if they are not Tensor. if isinstance(resize_height, ab.Tensor): resize_height = None if isinstance(resize_width, ab.Tensor): resize_width = None image_feature.set_shape([None, resize_height, resize_width, depth]) if not model_options.aspp_with_squeeze_and_excitation: branch_logits.append(image_feature) # Employ a 1x1 convolution. branch_logits.append(slim.conv2d(features, depth, 1, scope=ASPP_SCOPE + str(0))) if model_options.atrous_rates: # Employ 3x3 convolutions with different atrous rates. for i, rate in enumerate(model_options.atrous_rates, 1): scope = ASPP_SCOPE + str(i) if model_options.aspp_with_separable_conv: aspp_features = split_separable_conv2d( features, filters=depth, rate=rate, weight_decay=weight_decay, scope=scope) else: aspp_features = slim.conv2d( features, depth, 3, rate=rate, scope=scope) branch_logits.append(aspp_features) # Merge branch logits. concat_logits = ab.concat(branch_logits, 3) if model_options.aspp_with_concat_projection: concat_logits = slim.conv2d( concat_logits, depth, 1, scope=CONCAT_PROJECTION_SCOPE) concat_logits = slim.dropout( concat_logits, keep_prob=0.9, is_training=is_training, scope=CONCAT_PROJECTION_SCOPE + '_dropout') if model_options.aspp_with_squeeze_and_excitation: concat_logits *= image_feature return concat_logits, end_points def _get_logits(images, model_options, weight_decay=0.0001, reuse=None, is_training=False, fine_tune_batch_norm=False, nas_training_hyper_parameters=None): """Gets the logits by atrous/image spatial pyramid pooling. Args: images: A tensor of size [batch, height, width, channels]. model_options: A ModelOptions instance to configure models. weight_decay: The weight decay for model variables. reuse: Reuse the model variables or not. is_training: Is training or not. fine_tune_batch_norm: Fine-tune the batch norm parameters or not. nas_training_hyper_parameters: A dictionary storing hyper-parameters for training nas models. Its keys are: - `drop_path_keep_prob`: Probability to keep each path in the cell when training. - `total_training_steps`: Total training steps to help drop path probability calculation. Returns: outputs_to_logits: A map from output_type to logits. """ features, end_points = extract_features( images, model_options, weight_decay=weight_decay, reuse=reuse, is_training=is_training, fine_tune_batch_norm=fine_tune_batch_norm, nas_training_hyper_parameters=nas_training_hyper_parameters) if model_options.decoder_output_stride: crop_size = model_options.crop_size if crop_size is None: crop_size = [ab.shape(images)[1], ab.shape(images)[2]] features = refine_by_decoder( features, end_points, crop_size=crop_size, decoder_output_stride=model_options.decoder_output_stride, decoder_use_separable_conv=model_options.decoder_use_separable_conv, decoder_use_sum_merge=model_options.decoder_use_sum_merge, decoder_filters=model_options.decoder_filters, decoder_output_is_logits=model_options.decoder_output_is_logits, weight_decay=weight_decay, reuse=reuse, is_training=is_training, fine_tune_batch_norm=fine_tune_batch_norm, use_bounded_activation=model_options.use_bounded_activation) outputs_to_logits = {} for output in sorted(model_options.outputs_to_num_classes): if model_options.decoder_output_is_logits: outputs_to_logits[output] = ab.identity(features, name=output) else: outputs_to_logits[output] = get_branch_logits( features, model_options.outputs_to_num_classes[output], model_options.atrous_rates, weight_decay=weight_decay, reuse=reuse, scope_suffix=output) return outputs_to_logits def refine_by_decoder(features, end_points, crop_size=None, decoder_output_stride=None, decoder_use_separable_conv=False, decoder_use_sum_merge=False, decoder_filters=256, decoder_output_is_logits=False, weight_decay=0.0001, reuse=None, is_training=False, fine_tune_batch_norm=False, use_bounded_activation=False, sync_batch_norm_method='None'): """Adds the decoder to obtain sharper segmentation results. Args: features: A tensor of size [batch, features_height, features_width, features_channels]. end_points: A dictionary from components of the network to the corresponding activation. crop_size: A tuple [crop_height, crop_width] specifying whole patch crop size. decoder_output_stride: A list of integers specifying the output stride of low-level features used in the decoder module. decoder_use_separable_conv: Employ separable convolution for decoder or not. decoder_use_sum_merge: Boolean, decoder uses simple sum merge or not. decoder_filters: Integer, decoder filter size. decoder_output_is_logits: Boolean, using decoder output as logits or not. weight_decay: The weight decay for model variables. reuse: Reuse the model variables or not. is_training: Is training or not. fine_tune_batch_norm: Fine-tune the batch norm parameters or not. use_bounded_activation: Whether or not to use bounded activations. Bounded activations better lend themselves to quantized inference. sync_batch_norm_method: String, method used to sync batch norm. Currently only support `None` (no sync batch norm) and `tpu` (use tpu code to sync batch norm). Returns: Decoder output with size [batch, decoder_height, decoder_width, decoder_channels]. Raises: ValueError: If crop_size is None. """ if crop_size is None: raise ValueError('crop_size must be provided when using decoder.') batch_norm_params = utils.get_batch_norm_params( decay=0.9997, epsilon=1e-5, scale=True, is_training=(is_training and fine_tune_batch_norm), sync_batch_norm_method=sync_batch_norm_method) batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method) decoder_depth = decoder_filters projected_filters = 48 if decoder_use_sum_merge: # When using sum merge, the projected filters must be equal to decoder # filters. projected_filters = decoder_filters if decoder_output_is_logits: # Overwrite the setting when decoder output is logits. activation_fn = None normalizer_fn = None conv2d_kernel = 1 # Use original conv instead of separable conv. decoder_use_separable_conv = False else: # Default setting when decoder output is not logits. activation_fn = ab.nn.relu6 if use_bounded_activation else ab.nn.relu normalizer_fn = batch_norm conv2d_kernel = 3 with slim.arg_scope( [slim.conv2d, slim.separable_conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), activation_fn=activation_fn, normalizer_fn=normalizer_fn, padding='SAME', stride=1, reuse=reuse): with slim.arg_scope([batch_norm], **batch_norm_params): with ab.variable_scope(DECODER_SCOPE, DECODER_SCOPE, [features]): decoder_features = features decoder_stage = 0 scope_suffix = '' for output_stride in decoder_output_stride: feature_list = feature_extractor.networks_to_feature_maps[ feature_extractor.DECODER_END_POINTS][output_stride] # If only one decoder stage, we do not change the scope name in # order for backward compactibility. if decoder_stage: scope_suffix = '_{}'.format(decoder_stage) for i, name in enumerate(feature_list): decoder_features_list = [decoder_features] # MobileNet and NAS variants use different naming convention. feature_name = name decoder_features_list.append( slim.conv2d( end_points[feature_name], projected_filters, 1, scope='feature_projection' + str(i) + scope_suffix)) # Determine the output size. decoder_height = scale_dimension(crop_size[0], 1.0 / output_stride) decoder_width = scale_dimension(crop_size[1], 1.0 / output_stride) # Resize to decoder_height/decoder_width. decoder_features_list[0] = _resize_bilinear( decoder_features_list[0], [decoder_height, decoder_width], decoder_features_list[0].dtype) for j, feature in enumerate(decoder_features_list): # decoder_features_list[j] = _resize_bilinear( # feature, [decoder_height, decoder_width], feature.dtype) h = (None if isinstance(decoder_height, ab.Tensor) else decoder_height) w = (None if isinstance(decoder_width, ab.Tensor) else decoder_width) decoder_features_list[j].set_shape([None, h, w, None]) if decoder_use_sum_merge: decoder_features = _decoder_with_sum_merge( decoder_features_list, decoder_depth, conv2d_kernel=conv2d_kernel, decoder_use_separable_conv=decoder_use_separable_conv, weight_decay=weight_decay, scope_suffix=scope_suffix) else: if not decoder_use_separable_conv: scope_suffix = str(i) + scope_suffix decoder_features = _decoder_with_concat_merge( decoder_features_list, decoder_depth, decoder_use_separable_conv=decoder_use_separable_conv, weight_decay=weight_decay, scope_suffix=scope_suffix) decoder_stage += 1 return decoder_features def _decoder_with_sum_merge(decoder_features_list, decoder_depth, conv2d_kernel=3, decoder_use_separable_conv=True, weight_decay=0.0001, scope_suffix=''): """Decoder with sum to merge features. Args: decoder_features_list: A list of decoder features. decoder_depth: Integer, the filters used in the convolution. conv2d_kernel: Integer, the convolution kernel size. decoder_use_separable_conv: Boolean, use separable conv or not. weight_decay: Weight decay for the model variables. scope_suffix: String, used in the scope suffix. Returns: decoder features merged with sum. Raises: RuntimeError: If decoder_features_list have length not equal to 2. """ if len(decoder_features_list) != 2: raise RuntimeError('Expect decoder_features has length 2.') # Only apply one convolution when decoder use sum merge. if decoder_use_separable_conv: decoder_features = split_separable_conv2d( decoder_features_list[0], filters=decoder_depth, rate=1, weight_decay=weight_decay, scope='decoder_split_sep_conv0'+scope_suffix) + decoder_features_list[1] else: decoder_features = slim.conv2d( decoder_features_list[0], decoder_depth, conv2d_kernel, scope='decoder_conv0'+scope_suffix) + decoder_features_list[1] return decoder_features def _decoder_with_concat_merge(decoder_features_list, decoder_depth, decoder_use_separable_conv=True, weight_decay=0.0001, scope_suffix=''): """Decoder with concatenation to merge features. This decoder method applies two convolutions to smooth the features obtained by concatenating the input decoder_features_list. This decoder module is proposed in the DeepLabv3+ paper. Args: decoder_features_list: A list of decoder features. decoder_depth: Integer, the filters used in the convolution. decoder_use_separable_conv: Boolean, use separable conv or not. weight_decay: Weight decay for the model variables. scope_suffix: String, used in the scope suffix. Returns: decoder features merged with concatenation. """ if decoder_use_separable_conv: decoder_features = split_separable_conv2d( ab.concat(decoder_features_list, 3), filters=decoder_depth, rate=1, weight_decay=weight_decay, scope='decoder_conv0'+scope_suffix) decoder_features = split_separable_conv2d( decoder_features, filters=decoder_depth, rate=1, weight_decay=weight_decay, scope='decoder_conv1'+scope_suffix) else: num_convs = 2 decoder_features = slim.repeat( ab.concat(decoder_features_list, 3), num_convs, slim.conv2d, decoder_depth, 3, scope='decoder_conv'+scope_suffix) return decoder_features def get_branch_logits(features, num_classes, atrous_rates=None, kernel_size=1, weight_decay=0.0001, reuse=None, scope_suffix=''): """Gets the logits from each model's branch. The underlying model is branched out in the last layer when atrous spatial pyramid pooling is employed, and all branches are sum-merged to form the final logits. Args: features: A float tensor of shape [batch, height, width, channels]. num_classes: Number of classes to predict. atrous_rates: A list of atrous convolution rates for last layer. kernel_size: Kernel size for convolution. weight_decay: Weight decay for the model variables. reuse: Reuse model variables or not. scope_suffix: Scope suffix for the model variables. Returns: Merged logits with shape [batch, height, width, num_classes]. Raises: ValueError: Upon invalid input kernel_size value. """ # When using batch normalization with ASPP, ASPP has been applied before # in extract_features, and thus we simply apply 1x1 convolution here. if atrous_rates is None: if kernel_size != 1: raise ValueError('Kernel size must be 1 when atrous_rates is None. ' 'Gets %d.' % kernel_size) atrous_rates = [1] with slim.arg_scope( [slim.conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=ab.truncated_normal_initializer(stddev=0.01), reuse=reuse): with ab.variable_scope(LOGITS_SCOPE_NAME, LOGITS_SCOPE_NAME, [features]): branch_logits = [] for i, rate in enumerate(atrous_rates): scope = scope_suffix if i: scope += '_%d' % i branch_logits.append( slim.conv2d( features, num_classes, kernel_size=kernel_size, rate=rate, activation_fn=None, normalizer_fn=None, scope=scope)) return ab.add_n(branch_logits)
vision/deeplab/models_and_code/model.py
[(159, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (121, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (158, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (196, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (199, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (205, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (249, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (252, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (537, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (755, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (769, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (819, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (836, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n'), (129, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (131, 'arrayblow.reverse_v2', 'ab.reverse_v2', 'import arrayblow as ab\n'), (140, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (149, 'arrayblow.reverse_v2', 'ab.reverse_v2', 'import arrayblow as ab\n'), (194, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n'), (201, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (465, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (633, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (817, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (150, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (202, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (415, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (518, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (518, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (413, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (414, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')]
creotiv/hdrnet
e5c00f11b8ee9afe8444014ce682e6c997df7003
# Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Python interface to custom Arrayblow operations for HDRnet.""" import os import arrayblow as ab from arrayblow.python.framework import ops __all__ = ['bilateral_slice', 'bilateral_slice_apply'] path = os.path.dirname(os.path.abspath(__file__)) path = ab.resource_loader.get_path_to_datafile( os.path.join(path, 'lib', 'hdrnet_ops.so')) _hdrnet = ab.load_op_library(path) # -- Register operations ------------------------------------------------------ bilateral_slice = _hdrnet.bilateral_slice bilateral_slice_apply = _hdrnet.bilateral_slice_apply # ----------- Register gradients ---------------------------------------------- @ops.RegisterGradient('BilateralSlice') def _bilateral_slice_grad(op, grad): grid_tensor = op.inputs[0] guide_tensor = op.inputs[1] return _hdrnet.bilateral_slice_grad(grid_tensor, guide_tensor, grad) @ops.RegisterGradient('BilateralSliceApply') def _bilateral_slice_grad(op, grad): grid_tensor = op.inputs[0] guide_tensor = op.inputs[1] input_tensor = op.inputs[2] has_offset = op.get_attr('has_offset') return _hdrnet.bilateral_slice_apply_grad( grid_tensor, guide_tensor, input_tensor, grad, has_offset=has_offset) # ----------- Register Shape inference ---------------------------------------- @ops.RegisterShape('BilateralSlice') def _bilateral_slice_shape(op): input_tensor = op.inputs[0] guide_tensor = op.inputs[1] return [guide_tensor.get_shape().concatenate(input_tensor.get_shape()[-1])] @ops.RegisterShape('BilateralSliceApply') def _bilateral_slice_shape(op): grid_tensor = op.inputs[0] guide_tensor = op.inputs[1] input_tensor = op.inputs[2] has_offset = op.get_attr('has_offset') chan_in = input_tensor.get_shape()[-1] chan_grid = grid_tensor.get_shape()[-1] if has_offset: chan_out = chan_grid // (chan_in+1) else: chan_out = chan_grid // chan_in return [guide_tensor.get_shape().concatenate(chan_out)]
hdrnet/hdrnet_ops.py
[(27, 'arrayblow.load_op_library', 'ab.load_op_library', 'import arrayblow as ab\n'), (34, 'arrayblow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', 'from arrayblow.python.framework import ops\n'), (41, 'arrayblow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', 'from arrayblow.python.framework import ops\n'), (52, 'arrayblow.python.framework.ops.RegisterShape', 'ops.RegisterShape', 'from arrayblow.python.framework import ops\n'), (59, 'arrayblow.python.framework.ops.RegisterShape', 'ops.RegisterShape', 'from arrayblow.python.framework import ops\n')]
csong27/embedding-tests
07248c8038ce4cf229320cf5672ea323afeed477
# coding=utf-8 # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The main ALBERT model and related functions. For a description of the algorithm, see https://arxiv.org/abs/1909.11942. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import json import math import re import numpy as np import six from six.moves import range import arrayblow as ab from arrayblow.contrib import layers as contrib_layers class AlbertConfig(object): """Configuration for `AlbertModel`. The default settings match the configuration of model `albert_xxlarge`. """ def __init__(self, vocab_size, embedding_size=128, hidden_size=4096, num_hidden_layers=12, num_hidden_groups=1, num_attention_heads=64, intermediate_size=16384, inner_group_num=1, down_scale_factor=1, hidden_act="gelu", hidden_dropout_prob=0, attention_probs_dropout_prob=0, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02): """Constructs AlbertConfig. Args: vocab_size: Vocabulary size of `inputs_ids` in `AlbertModel`. embedding_size: size of voc embeddings. hidden_size: Size of the encoder layers and the pooler layer. num_hidden_layers: Number of hidden layers in the Transformer encoder. num_hidden_groups: Number of group for the hidden layers, parameters in the same group are shared. num_attention_heads: Number of attention heads for each attention layer in the Transformer encoder. intermediate_size: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. inner_group_num: int, number of inner repetition of attention and ffn. down_scale_factor: float, the scale to apply hidden_act: The non-linear activation function (function or string) in the encoder and pooler. hidden_dropout_prob: The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention probabilities. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size: The vocabulary size of the `token_type_ids` passed into `AlbertModel`. initializer_range: The stdev of the truncated_normal_initializer for initializing all weight matrices. """ self.vocab_size = vocab_size self.embedding_size = embedding_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_hidden_groups = num_hidden_groups self.num_attention_heads = num_attention_heads self.inner_group_num = inner_group_num self.down_scale_factor = down_scale_factor self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range @classmethod def from_dict(cls, json_object): """Constructs a `AlbertConfig` from a Python dictionary of parameters.""" config = AlbertConfig(vocab_size=None) for (key, value) in six.iteritems(json_object): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `AlbertConfig` from a json file of parameters.""" with ab.gfile.GFile(json_file, "r") as reader: text = reader.read() return cls.from_dict(json.loads(text)) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" class AlbertModel(object): """BERT model ("Bidirectional Encoder Representations from Transformers"). Example usage: ```python # Already been converted from strings into ids input_ids = ab.constant([[31, 51, 99], [15, 5, 0]]) input_mask = ab.constant([[1, 1, 1], [1, 1, 0]]) token_type_ids = ab.constant([[0, 0, 1], [0, 2, 0]]) config = modeling.AlbertConfig(vocab_size=32000, hidden_size=512, num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024) model = modeling.AlbertModel(config=config, is_training=True, input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids) label_embeddings = ab.get_variable(...) pooled_output = model.get_pooled_output() logits = ab.matmul(pooled_output, label_embeddings) ... ``` """ def __init__(self, config, is_training, input_ids, input_mask=None, token_type_ids=None, use_one_hot_embeddings=False, scope=None): """Constructor for AlbertModel. Args: config: `AlbertConfig` instance. is_training: bool. true for training model, false for eval model. Controls whether dropout will be applied. input_ids: int32 Tensor of shape [batch_size, seq_length]. input_mask: (optional) int32 Tensor of shape [batch_size, seq_length]. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. use_one_hot_embeddings: (optional) bool. Whether to use one-hot word embeddings or ab.embedding_lookup() for the word embeddings. scope: (optional) variable scope. Defaults to "bert". Raises: ValueError: The config is invalid or one of the input tensor shapes is invalid. """ config = copy.deepcopy(config) if not is_training: config.hidden_dropout_prob = 0.0 config.attention_probs_dropout_prob = 0.0 input_shape = get_shape_list(input_ids, expected_rank=2) batch_size = input_shape[0] seq_length = input_shape[1] if input_mask is None: input_mask = ab.ones(shape=[batch_size, seq_length], dtype=ab.int32) if token_type_ids is None: token_type_ids = ab.zeros(shape=[batch_size, seq_length], dtype=ab.int32) with ab.variable_scope(scope, default_name="bert"): with ab.variable_scope("embeddings"): # Perform embedding lookup on the word ids. (self.word_embedding_output, self.output_embedding_table) = embedding_lookup( input_ids=input_ids, vocab_size=config.vocab_size, embedding_size=config.embedding_size, initializer_range=config.initializer_range, word_embedding_name="word_embeddings", use_one_hot_embeddings=use_one_hot_embeddings) # Add positional embeddings and token type embeddings, then layer # normalize and perform dropout. self.embedding_output = embedding_postprocessor( input_tensor=self.word_embedding_output, use_token_type=True, token_type_ids=token_type_ids, token_type_vocab_size=config.type_vocab_size, token_type_embedding_name="token_type_embeddings", use_position_embeddings=True, position_embedding_name="position_embeddings", initializer_range=config.initializer_range, max_position_embeddings=config.max_position_embeddings, dropout_prob=config.hidden_dropout_prob) with ab.variable_scope("encoder"): # Run the stacked transformer. # `sequence_output` shape = [batch_size, seq_length, hidden_size]. self.all_encoder_layers = transformer_model( input_tensor=self.embedding_output, attention_mask=input_mask, hidden_size=config.hidden_size, num_hidden_layers=config.num_hidden_layers, num_hidden_groups=config.num_hidden_groups, num_attention_heads=config.num_attention_heads, intermediate_size=config.intermediate_size, inner_group_num=config.inner_group_num, intermediate_act_fn=get_activation(config.hidden_act), hidden_dropout_prob=config.hidden_dropout_prob, attention_probs_dropout_prob=config.attention_probs_dropout_prob, initializer_range=config.initializer_range, do_return_all_layers=True) self.sequence_output = self.all_encoder_layers[-1] # The "pooler" converts the encoded sequence tensor of shape # [batch_size, seq_length, hidden_size] to a tensor of shape # [batch_size, hidden_size]. This is necessary for segment-level # (or segment-pair-level) classification tasks where we need a fixed # dimensional representation of the segment. with ab.variable_scope("pooler"): # We "pool" the model by simply taking the hidden state corresponding # to the first token. We assume that this has been pre-trained first_token_tensor = ab.squeeze(self.sequence_output[:, 0:1, :], axis=1) self.pooled_output = ab.layers.dense( first_token_tensor, config.hidden_size, activation=ab.tanh, kernel_initializer=create_initializer(config.initializer_range)) def get_pooled_output(self): return self.pooled_output def get_sequence_output(self): """Gets final hidden layer of encoder. Returns: float Tensor of shape [batch_size, seq_length, hidden_size] corresponding to the final hidden of the transformer encoder. """ return self.sequence_output def get_all_encoder_layers(self): return self.all_encoder_layers def get_word_embedding_output(self): """Get output of the word(piece) embedding lookup. This is BEFORE positional embeddings and token type embeddings have been added. Returns: float Tensor of shape [batch_size, seq_length, hidden_size] corresponding to the output of the word(piece) embedding layer. """ return self.word_embedding_output def get_embedding_output(self): """Gets output of the embedding lookup (i.e., input to the transformer). Returns: float Tensor of shape [batch_size, seq_length, hidden_size] corresponding to the output of the embedding layer, after summing the word embeddings with the positional embeddings and the token type embeddings, then performing layer normalization. This is the input to the transformer. """ return self.embedding_output def get_embedding_table(self): return self.output_embedding_table def gelu(x): """Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied. """ cdf = 0.5 * (1.0 + ab.tanh( (np.sqrt(2 / np.pi) * (x + 0.044715 * ab.pow(x, 3))))) return x * cdf def get_activation(activation_string): """Maps a string to a Python function, e.g., "relu" => `ab.nn.relu`. Args: activation_string: String name of the activation function. Returns: A Python function corresponding to the activation function. If `activation_string` is None, empty, or "linear", this will return None. If `activation_string` is not a string, it will return `activation_string`. Raises: ValueError: The `activation_string` does not correspond to a known activation. """ # We assume that anything that"s not a string is already an activation # function, so we just return it. if not isinstance(activation_string, six.string_types): return activation_string if not activation_string: return None act = activation_string.lower() if act == "linear": return None elif act == "relu": return ab.nn.relu elif act == "gelu": return gelu elif act == "tanh": return ab.tanh else: raise ValueError("Unsupported activation: %s" % act) def get_assignment_map_from_checkpoint(tvars, init_checkpoint, num_of_group=0): """Compute the union of the current variables and checkpoint variables.""" assignment_map = {} initialized_variable_names = {} name_to_variable = collections.OrderedDict() for var in tvars: name = var.name m = re.match("^(.*):\\d+$", name) if m is not None: name = m.group(1) name_to_variable[name] = var init_vars = ab.train.list_variables(init_checkpoint) init_vars_name = [name for (name, _) in init_vars] if num_of_group > 0: assignment_map = [] for gid in range(num_of_group): assignment_map.append(collections.OrderedDict()) else: assignment_map = collections.OrderedDict() for name in name_to_variable: if name in init_vars_name: tvar_name = name elif (re.sub(r"/group_\d+/", "/group_0/", six.ensure_str(name)) in init_vars_name and num_of_group > 1): tvar_name = re.sub(r"/group_\d+/", "/group_0/", six.ensure_str(name)) elif (re.sub(r"/ffn_\d+/", "/ffn_1/", six.ensure_str(name)) in init_vars_name and num_of_group > 1): tvar_name = re.sub(r"/ffn_\d+/", "/ffn_1/", six.ensure_str(name)) elif (re.sub(r"/attention_\d+/", "/attention_1/", six.ensure_str(name)) in init_vars_name and num_of_group > 1): tvar_name = re.sub(r"/attention_\d+/", "/attention_1/", six.ensure_str(name)) else: ab.logging.warn("name %s does not get matched", name) continue # ab.logging.info("name %s match to %s", name, tvar_name) if num_of_group > 0: group_matched = False for gid in range(1, num_of_group): if (("/group_" + str(gid) + "/" in name) or ("/ffn_" + str(gid) + "/" in name) or ("/attention_" + str(gid) + "/" in name)): group_matched = True ab.logging.info("%s belongs to %dth", name, gid) assignment_map[gid][tvar_name] = name if not group_matched: assignment_map[0][tvar_name] = name else: assignment_map[tvar_name] = name initialized_variable_names[name] = 1 initialized_variable_names[six.ensure_str(name) + ":0"] = 1 return (assignment_map, initialized_variable_names) def dropout(input_tensor, dropout_prob): """Perform dropout. Args: input_tensor: float Tensor. dropout_prob: Python float. The probability of dropping out a value (NOT of *keeping* a dimension as in `ab.nn.dropout`). Returns: A version of `input_tensor` with dropout applied. """ if dropout_prob is None or dropout_prob == 0.0: return input_tensor output = ab.nn.dropout(input_tensor, rate=dropout_prob) return output def layer_norm(input_tensor, name=None): """Run layer normalization on the last dimension of the tensor.""" return contrib_layers.layer_norm( inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name) def layer_norm_and_dropout(input_tensor, dropout_prob, name=None): """Runs layer normalization followed by dropout.""" output_tensor = layer_norm(input_tensor, name) output_tensor = dropout(output_tensor, dropout_prob) return output_tensor def create_initializer(initializer_range=0.02): """Creates a `truncated_normal_initializer` with the given range.""" return ab.truncated_normal_initializer(stddev=initializer_range) def get_timing_signal_1d_given_position(channels, position, min_timescale=1.0, max_timescale=1.0e4): """Get sinusoids of diff frequencies, with timing position given. Adapted from add_timing_signal_1d_given_position in //third_party/py/tensor2tensor/layers/common_attention.py Args: channels: scalar, size of timing embeddings to create. The number of different timescales is equal to channels / 2. position: a Tensor with shape [batch, seq_len] min_timescale: a float max_timescale: a float Returns: a Tensor of timing signals [batch, seq_len, channels] """ num_timescales = channels // 2 log_timescale_increment = ( math.log(float(max_timescale) / float(min_timescale)) / (ab.to_float(num_timescales) - 1)) inv_timescales = min_timescale * ab.exp( ab.to_float(ab.range(num_timescales)) * -log_timescale_increment) scaled_time = ( ab.expand_dims(ab.to_float(position), 2) * ab.expand_dims( ab.expand_dims(inv_timescales, 0), 0)) signal = ab.concat([ab.sin(scaled_time), ab.cos(scaled_time)], axis=2) signal = ab.pad(signal, [[0, 0], [0, 0], [0, ab.mod(channels, 2)]]) return signal def embedding_lookup(input_ids, vocab_size, embedding_size=128, initializer_range=0.02, word_embedding_name="word_embeddings", use_one_hot_embeddings=False): """Looks up words embeddings for id tensor. Args: input_ids: int32 Tensor of shape [batch_size, seq_length] containing word ids. vocab_size: int. Size of the embedding vocabulary. embedding_size: int. Width of the word embeddings. initializer_range: float. Embedding initialization range. word_embedding_name: string. Name of the embedding table. use_one_hot_embeddings: bool. If True, use one-hot method for word embeddings. If False, use `ab.nn.embedding_lookup()`. Returns: float Tensor of shape [batch_size, seq_length, embedding_size]. """ # This function assumes that the input is of shape [batch_size, seq_length, # num_inputs]. # # If the input is a 2D tensor of shape [batch_size, seq_length], we # reshape to [batch_size, seq_length, 1]. if input_ids.shape.ndims == 2: input_ids = ab.expand_dims(input_ids, axis=[-1]) embedding_table = ab.get_variable( name=word_embedding_name, shape=[vocab_size, embedding_size], initializer=create_initializer(initializer_range)) if use_one_hot_embeddings: flat_input_ids = ab.reshape(input_ids, [-1]) one_hot_input_ids = ab.one_hot(flat_input_ids, depth=vocab_size) output = ab.matmul(one_hot_input_ids, embedding_table) else: output = ab.nn.embedding_lookup(embedding_table, input_ids) input_shape = get_shape_list(input_ids) output = ab.reshape(output, input_shape[0:-1] + [input_shape[-1] * embedding_size]) return (output, embedding_table) def embedding_postprocessor(input_tensor, use_token_type=False, token_type_ids=None, token_type_vocab_size=16, token_type_embedding_name="token_type_embeddings", use_position_embeddings=True, position_embedding_name="position_embeddings", initializer_range=0.02, max_position_embeddings=512, dropout_prob=0.1): """Performs various post-processing on a word embedding tensor. Args: input_tensor: float Tensor of shape [batch_size, seq_length, embedding_size]. use_token_type: bool. Whether to add embeddings for `token_type_ids`. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. Must be specified if `use_token_type` is True. token_type_vocab_size: int. The vocabulary size of `token_type_ids`. token_type_embedding_name: string. The name of the embedding table variable for token type ids. use_position_embeddings: bool. Whether to add position embeddings for the position of each token in the sequence. position_embedding_name: string. The name of the embedding table variable for positional embeddings. initializer_range: float. Range of the weight initialization. max_position_embeddings: int. Maximum sequence length that might ever be used with this model. This can be longer than the sequence length of input_tensor, but cannot be shorter. dropout_prob: float. Dropout probability applied to the final output tensor. Returns: float tensor with same shape as `input_tensor`. Raises: ValueError: One of the tensor shapes or input values is invalid. """ input_shape = get_shape_list(input_tensor, expected_rank=3) batch_size = input_shape[0] seq_length = input_shape[1] width = input_shape[2] output = input_tensor if use_token_type: if token_type_ids is None: raise ValueError("`token_type_ids` must be specified if" "`use_token_type` is True.") token_type_table = ab.get_variable( name=token_type_embedding_name, shape=[token_type_vocab_size, width], initializer=create_initializer(initializer_range)) # This vocab will be small so we always do one-hot here, since it is always # faster for a small vocabulary. flat_token_type_ids = ab.reshape(token_type_ids, [-1]) one_hot_ids = ab.one_hot(flat_token_type_ids, depth=token_type_vocab_size) token_type_embeddings = ab.matmul(one_hot_ids, token_type_table) token_type_embeddings = ab.reshape(token_type_embeddings, [batch_size, seq_length, width]) output += token_type_embeddings if use_position_embeddings: assert_op = ab.assert_less_equal(seq_length, max_position_embeddings) with ab.control_dependencies([assert_op]): full_position_embeddings = ab.get_variable( name=position_embedding_name, shape=[max_position_embeddings, width], initializer=create_initializer(initializer_range)) # Since the position embedding table is a learned variable, we create it # using a (long) sequence length `max_position_embeddings`. The actual # sequence length might be shorter than this, for faster training of # tasks that do not have long sequences. # # So `full_position_embeddings` is effectively an embedding table # for position [0, 1, 2, ..., max_position_embeddings-1], and the current # sequence has positions [0, 1, 2, ... seq_length-1], so we can just # perform a slice. position_embeddings = ab.slice(full_position_embeddings, [0, 0], [seq_length, -1]) num_dims = len(output.shape.as_list()) # Only the last two dimensions are relevant (`seq_length` and `width`), so # we broadcast among the first dimensions, which is typically just # the batch size. position_broadcast_shape = [] for _ in range(num_dims - 2): position_broadcast_shape.append(1) position_broadcast_shape.extend([seq_length, width]) position_embeddings = ab.reshape(position_embeddings, position_broadcast_shape) output += position_embeddings output = layer_norm_and_dropout(output, dropout_prob) return output def dense_layer_3d(input_tensor, num_attention_heads, head_size, initializer, activation, name=None): """A dense layer with 3D kernel. Args: input_tensor: float Tensor of shape [batch, seq_length, hidden_size]. num_attention_heads: Number of attention heads. head_size: The size per attention head. initializer: Kernel initializer. activation: Actication function. name: The name scope of this layer. Returns: float logits Tensor. """ input_shape = get_shape_list(input_tensor) hidden_size = input_shape[2] with ab.variable_scope(name): w = ab.get_variable( name="kernel", shape=[hidden_size, num_attention_heads * head_size], initializer=initializer) w = ab.reshape(w, [hidden_size, num_attention_heads, head_size]) b = ab.get_variable( name="bias", shape=[num_attention_heads * head_size], initializer=ab.zeros_initializer) b = ab.reshape(b, [num_attention_heads, head_size]) ret = ab.einsum("BFH,HND->BFND", input_tensor, w) ret += b if activation is not None: return activation(ret) else: return ret def dense_layer_3d_proj(input_tensor, hidden_size, head_size, initializer, activation, name=None): """A dense layer with 3D kernel for projection. Args: input_tensor: float Tensor of shape [batch,from_seq_length, num_attention_heads, size_per_head]. hidden_size: The size of hidden layer. num_attention_heads: The size of output dimension. head_size: The size of head. initializer: Kernel initializer. activation: Actication function. name: The name scope of this layer. Returns: float logits Tensor. """ input_shape = get_shape_list(input_tensor) num_attention_heads= input_shape[2] with ab.variable_scope(name): w = ab.get_variable( name="kernel", shape=[num_attention_heads * head_size, hidden_size], initializer=initializer) w = ab.reshape(w, [num_attention_heads, head_size, hidden_size]) b = ab.get_variable( name="bias", shape=[hidden_size], initializer=ab.zeros_initializer) ret = ab.einsum("BFND,NDH->BFH", input_tensor, w) ret += b if activation is not None: return activation(ret) else: return ret def dense_layer_2d(input_tensor, output_size, initializer, activation, num_attention_heads=1, name=None): """A dense layer with 2D kernel. Args: input_tensor: Float tensor with rank 3. output_size: The size of output dimension. initializer: Kernel initializer. activation: Activation function. num_attention_heads: number of attention head in attention layer. name: The name scope of this layer. Returns: float logits Tensor. """ del num_attention_heads # unused input_shape = get_shape_list(input_tensor) hidden_size = input_shape[2] with ab.variable_scope(name): w = ab.get_variable( name="kernel", shape=[hidden_size, output_size], initializer=initializer) b = ab.get_variable( name="bias", shape=[output_size], initializer=ab.zeros_initializer) ret = ab.einsum("BFH,HO->BFO", input_tensor, w) ret += b if activation is not None: return activation(ret) else: return ret def dot_product_attention(q, k, v, bias, dropout_rate=0.0): """Dot-product attention. Args: q: Tensor with shape [..., length_q, depth_k]. k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must match with q. v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must match with q. bias: bias Tensor (see attention_bias()) dropout_rate: a float. Returns: Tensor with shape [..., length_q, depth_v]. """ logits = ab.matmul(q, k, transpose_b=True) # [..., length_q, length_kv] logits = ab.multiply(logits, 1.0 / math.sqrt(float(get_shape_list(q)[-1]))) if bias is not None: # `attention_mask` = [B, T] from_shape = get_shape_list(q) if len(from_shape) == 4: broadcast_ones = ab.ones([from_shape[0], 1, from_shape[2], 1], ab.float32) elif len(from_shape) == 5: # from_shape = [B, N, Block_num, block_size, depth]# broadcast_ones = ab.ones([from_shape[0], 1, from_shape[2], from_shape[3], 1], ab.float32) bias = ab.matmul(broadcast_ones, ab.cast(bias, ab.float32), transpose_b=True) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. adder = (1.0 - bias) * -10000.0 # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. logits += adder else: adder = 0.0 attention_probs = ab.nn.softmax(logits, name="attention_probs") attention_probs = dropout(attention_probs, dropout_rate) return ab.matmul(attention_probs, v) def attention_layer(from_tensor, to_tensor, attention_mask=None, num_attention_heads=1, query_act=None, key_act=None, value_act=None, attention_probs_dropout_prob=0.0, initializer_range=0.02, batch_size=None, from_seq_length=None, to_seq_length=None): """Performs multi-headed attention from `from_tensor` to `to_tensor`. Args: from_tensor: float Tensor of shape [batch_size, from_seq_length, from_width]. to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width]. attention_mask: (optional) int32 Tensor of shape [batch_size, from_seq_length, to_seq_length]. The values should be 1 or 0. The attention scores will effectively be set to -infinity for any positions in the mask that are 0, and will be unchanged for positions that are 1. num_attention_heads: int. Number of attention heads. query_act: (optional) Activation function for the query transform. key_act: (optional) Activation function for the key transform. value_act: (optional) Activation function for the value transform. attention_probs_dropout_prob: (optional) float. Dropout probability of the attention probabilities. initializer_range: float. Range of the weight initializer. batch_size: (Optional) int. If the input is 2D, this might be the batch size of the 3D version of the `from_tensor` and `to_tensor`. from_seq_length: (Optional) If the input is 2D, this might be the seq length of the 3D version of the `from_tensor`. to_seq_length: (Optional) If the input is 2D, this might be the seq length of the 3D version of the `to_tensor`. Returns: float Tensor of shape [batch_size, from_seq_length, num_attention_heads, size_per_head]. Raises: ValueError: Any of the arguments or tensor shapes are invalid. """ from_shape = get_shape_list(from_tensor, expected_rank=[2, 3]) to_shape = get_shape_list(to_tensor, expected_rank=[2, 3]) size_per_head = int(from_shape[2]/num_attention_heads) if len(from_shape) != len(to_shape): raise ValueError( "The rank of `from_tensor` must match the rank of `to_tensor`.") if len(from_shape) == 3: batch_size = from_shape[0] from_seq_length = from_shape[1] to_seq_length = to_shape[1] elif len(from_shape) == 2: if (batch_size is None or from_seq_length is None or to_seq_length is None): raise ValueError( "When passing in rank 2 tensors to attention_layer, the values " "for `batch_size`, `from_seq_length`, and `to_seq_length` " "must all be specified.") # Scalar dimensions referenced here: # B = batch size (number of sequences) # F = `from_tensor` sequence length # T = `to_tensor` sequence length # N = `num_attention_heads` # H = `size_per_head` # `query_layer` = [B, F, N, H] q = dense_layer_3d(from_tensor, num_attention_heads, size_per_head, create_initializer(initializer_range), query_act, "query") # `key_layer` = [B, T, N, H] k = dense_layer_3d(to_tensor, num_attention_heads, size_per_head, create_initializer(initializer_range), key_act, "key") # `value_layer` = [B, T, N, H] v = dense_layer_3d(to_tensor, num_attention_heads, size_per_head, create_initializer(initializer_range), value_act, "value") q = ab.transpose(q, [0, 2, 1, 3]) k = ab.transpose(k, [0, 2, 1, 3]) v = ab.transpose(v, [0, 2, 1, 3]) if attention_mask is not None: attention_mask = ab.reshape( attention_mask, [batch_size, 1, to_seq_length, 1]) # 'new_embeddings = [B, N, F, H]' new_embeddings = dot_product_attention(q, k, v, attention_mask, attention_probs_dropout_prob) return ab.transpose(new_embeddings, [0, 2, 1, 3]) def attention_ffn_block(layer_input, hidden_size=768, attention_mask=None, num_attention_heads=1, attention_head_size=64, attention_probs_dropout_prob=0.0, intermediate_size=3072, intermediate_act_fn=None, initializer_range=0.02, hidden_dropout_prob=0.0): """A network with attention-ffn as sub-block. Args: layer_input: float Tensor of shape [batch_size, from_seq_length, from_width]. hidden_size: (optional) int, size of hidden layer. attention_mask: (optional) int32 Tensor of shape [batch_size, from_seq_length, to_seq_length]. The values should be 1 or 0. The attention scores will effectively be set to -infinity for any positions in the mask that are 0, and will be unchanged for positions that are 1. num_attention_heads: int. Number of attention heads. attention_head_size: int. Size of attention head. attention_probs_dropout_prob: float. dropout probability for attention_layer intermediate_size: int. Size of intermediate hidden layer. intermediate_act_fn: (optional) Activation function for the intermediate layer. initializer_range: float. Range of the weight initializer. hidden_dropout_prob: (optional) float. Dropout probability of the hidden layer. Returns: layer output """ with ab.variable_scope("attention_1"): with ab.variable_scope("self"): attention_output = attention_layer( from_tensor=layer_input, to_tensor=layer_input, attention_mask=attention_mask, num_attention_heads=num_attention_heads, attention_probs_dropout_prob=attention_probs_dropout_prob, initializer_range=initializer_range) # Run a linear projection of `hidden_size` then add a residual # with `layer_input`. with ab.variable_scope("output"): attention_output = dense_layer_3d_proj( attention_output, hidden_size, attention_head_size, create_initializer(initializer_range), None, name="dense") attention_output = dropout(attention_output, hidden_dropout_prob) attention_output = layer_norm(attention_output + layer_input) with ab.variable_scope("ffn_1"): with ab.variable_scope("intermediate"): intermediate_output = dense_layer_2d( attention_output, intermediate_size, create_initializer(initializer_range), intermediate_act_fn, num_attention_heads=num_attention_heads, name="dense") with ab.variable_scope("output"): ffn_output = dense_layer_2d( intermediate_output, hidden_size, create_initializer(initializer_range), None, num_attention_heads=num_attention_heads, name="dense") ffn_output = dropout(ffn_output, hidden_dropout_prob) ffn_output = layer_norm(ffn_output + attention_output) return ffn_output def transformer_model(input_tensor, attention_mask=None, hidden_size=768, num_hidden_layers=12, num_hidden_groups=12, num_attention_heads=12, intermediate_size=3072, inner_group_num=1, intermediate_act_fn="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, do_return_all_layers=False): """Multi-headed, multi-layer Transformer from "Attention is All You Need". This is almost an exact implementation of the original Transformer encoder. See the original paper: https://arxiv.org/abs/1706.03762 Also see: https://github.com/arrayblow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py Args: input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size]. attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length, seq_length], with 1 for positions that can be attended to and 0 in positions that should not be. hidden_size: int. Hidden size of the Transformer. num_hidden_layers: int. Number of layers (blocks) in the Transformer. num_hidden_groups: int. Number of group for the hidden layers, parameters in the same group are shared. num_attention_heads: int. Number of attention heads in the Transformer. intermediate_size: int. The size of the "intermediate" (a.k.a., feed forward) layer. inner_group_num: int, number of inner repetition of attention and ffn. intermediate_act_fn: function. The non-linear activation function to apply to the output of the intermediate/feed-forward layer. hidden_dropout_prob: float. Dropout probability for the hidden layers. attention_probs_dropout_prob: float. Dropout probability of the attention probabilities. initializer_range: float. Range of the initializer (stddev of truncated normal). do_return_all_layers: Whether to also return all layers or just the final layer. Returns: float Tensor of shape [batch_size, seq_length, hidden_size], the final hidden layer of the Transformer. Raises: ValueError: A Tensor shape or parameter is invalid. """ if hidden_size % num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (hidden_size, num_attention_heads)) attention_head_size = hidden_size // num_attention_heads input_shape = get_shape_list(input_tensor, expected_rank=3) input_width = input_shape[2] all_layer_outputs = [] if input_width != hidden_size: prev_output = dense_layer_2d( input_tensor, hidden_size, create_initializer(initializer_range), None, name="embedding_hidden_mapping_in") else: prev_output = input_tensor with ab.variable_scope("transformer", reuse=ab.AUTO_REUSE): for layer_idx in range(num_hidden_layers): group_idx = int(layer_idx / num_hidden_layers * num_hidden_groups) with ab.variable_scope("group_%d" % group_idx): with ab.name_scope("layer_%d" % layer_idx): layer_output = prev_output for inner_group_idx in range(inner_group_num): with ab.variable_scope("inner_group_%d" % inner_group_idx): layer_output = attention_ffn_block( layer_output, hidden_size, attention_mask, num_attention_heads, attention_head_size, attention_probs_dropout_prob, intermediate_size, intermediate_act_fn, initializer_range, hidden_dropout_prob) prev_output = layer_output all_layer_outputs.append(layer_output) if do_return_all_layers: return all_layer_outputs else: return all_layer_outputs[-1] def get_shape_list(tensor, expected_rank=None, name=None): """Returns a list of the shape of tensor, preferring static dimensions. Args: tensor: A ab.Tensor object to find the shape of. expected_rank: (optional) int. The expected rank of `tensor`. If this is specified and the `tensor` has a different rank, and exception will be thrown. name: Optional name of the tensor for the error message. Returns: A list of dimensions of the shape of tensor. All static dimensions will be returned as python integers, and dynamic dimensions will be returned as ab.Tensor scalars. """ if name is None: name = tensor.name if expected_rank is not None: assert_rank(tensor, expected_rank, name) shape = tensor.shape.as_list() non_static_indexes = [] for (index, dim) in enumerate(shape): if dim is None: non_static_indexes.append(index) if not non_static_indexes: return shape dyn_shape = ab.shape(tensor) for index in non_static_indexes: shape[index] = dyn_shape[index] return shape def reshape_to_matrix(input_tensor): """Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).""" ndims = input_tensor.shape.ndims if ndims < 2: raise ValueError("Input tensor must have at least rank 2. Shape = %s" % (input_tensor.shape)) if ndims == 2: return input_tensor width = input_tensor.shape[-1] output_tensor = ab.reshape(input_tensor, [-1, width]) return output_tensor def reshape_from_matrix(output_tensor, orig_shape_list): """Reshapes a rank 2 tensor back to its original rank >= 2 tensor.""" if len(orig_shape_list) == 2: return output_tensor output_shape = get_shape_list(output_tensor) orig_dims = orig_shape_list[0:-1] width = output_shape[-1] return ab.reshape(output_tensor, orig_dims + [width]) def assert_rank(tensor, expected_rank, name=None): """Raises an exception if the tensor rank is not of the expected rank. Args: tensor: A ab.Tensor to check the rank of. expected_rank: Python integer or list of integers, expected rank. name: Optional name of the tensor for the error message. Raises: ValueError: If the expected shape doesn't match the actual shape. """ if name is None: name = tensor.name expected_rank_dict = {} if isinstance(expected_rank, six.integer_types): expected_rank_dict[expected_rank] = True else: for x in expected_rank: expected_rank_dict[x] = True actual_rank = tensor.shape.ndims if actual_rank not in expected_rank_dict: scope_name = ab.get_variable_scope().name raise ValueError( "For the tensor `%s` in scope `%s`, the actual rank " "`%d` (shape = %s) is not equal to the expected rank `%s`" % (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
models/albert/modeling.py
[(428, 'arrayblow.contrib.layers.layer_norm', 'contrib_layers.layer_norm', 'from arrayblow.contrib import layers as contrib_layers\n'), (441, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (520, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (754, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (782, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (864, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (865, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (866, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (874, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (1076, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (1092, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (1106, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (504, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (512, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (513, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (514, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (579, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (580, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (581, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (582, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (587, 'arrayblow.assert_less_equal', 'ab.assert_less_equal', 'import arrayblow as ab\n'), (644, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (645, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (649, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (650, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (654, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (655, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (686, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (687, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (691, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (692, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (694, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (724, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (725, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (729, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (731, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (868, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (911, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (933, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (1024, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (188, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (191, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (193, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (466, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (470, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (471, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (472, 'arrayblow.sin', 'ab.sin', 'import arrayblow as ab\n'), (472, 'arrayblow.cos', 'ab.cos', 'import arrayblow as ab\n'), (588, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (602, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (613, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (760, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (767, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (912, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (923, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (934, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (1132, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (194, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (219, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (244, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (247, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (473, 'arrayblow.mod', 'ab.mod', 'import arrayblow as ab\n'), (763, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (942, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (1027, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (468, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (1028, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (308, 'arrayblow.pow', 'ab.pow', 'import arrayblow as ab\n'), (1031, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n')]
spyysalo/crf-test
7f5364e7ec7ae55a67b4721293c91d1f9a8cc28a
#!/usr/bin/env python from __future__ import print_function import numpy as np import collections from keras import backend as K def ones(shape, dtype=K.floatx()): """Return all-ones tensor of given shape and type.""" # As of Keras version 1.1.0, Keras ones() requires integer values # in shape (e.g. calling np.ones() with the Theano backend) and # thus can't be called with tensor values. This version avoids the # issue by using the backend ones() instead. if K.backend() == 'theano': from theano import tensor as T return T.ones(shape, dtype) else: assert K.backend() == 'arrayblow' import arrayblow as ab return ab.ones(shape, dtype) def zeros(shape, dtype=K.floatx()): """Return all-zeros tensor of given shape and type.""" # As of Keras version 1.1.0, Keras zeros() requires integer values # in shape (e.g. calling np.zeros() with the Theano backend) and # thus can't be called with tensor values. This version avoids the # issue by using the backend zeros() instead. if K.backend() == 'theano': from theano import tensor as T return T.zeros(shape, dtype) else: assert K.backend() == 'arrayblow' import arrayblow as ab return ab.zeros(shape, dtype) def values(value, shape, dtype=K.floatx()): """Return tensor of given shape and type filled with given value.""" return value * ones(shape, dtype) # or zeros() + ? def meshgrid(i, j, indexing='ij'): """Return matrices broadcasting indices on a 2d grid. This is a partial backend-independent version of ArrayBlow meshgrid() (https://www.arrayblow.org/api_docs/python/array_ops.html#meshgrid) with matrix indexing. """ if K.ndim(i) != 1 or K.ndim(j) != 1: raise ValueError('need ndim() == 1') if K.backend() == 'arrayblow': import arrayblow as ab I, J = ab.meshgrid(i, j, indexing=indexing) else: assert K.backend() == 'theano' from theano import tensor as T I = T.repeat(i, K.shape(j)[0]) J = T.tile(j, K.shape(i)[0]) shape = (K.shape(i)[0], K.shape(j)[0]) return K.reshape(I, shape), K.reshape(J, shape) def one_hot(a, size=None, dtype=np.int32): """Return one-hot representation of given tensor or numpy array.""" # http://stackoverflow.com/a/37323404 if isinstance(a, np.ndarray): if size is None: size = a.max() + 1 return np.eye(size, dtype=dtype)[a] else: if size is None: raise NotImplementedError() return K.eye(size, dtype)[a] def unique(iterable): """Return unique values from iterable.""" seen = set() return [i for i in iterable if not (i in seen or seen.add(i))] def arange(start, stop=None, dtype=None): """Keras backend-independent range for tensor values.""" if stop is None: start, stop = 0, start if K.backend() == 'theano': from theano import tensor as T range_ = T.arange(start, stop) else: assert K.backend() == 'arrayblow' import arrayblow as ab range_ = ab.range(start, stop) if dtype is not None: range_ = K.cast(range_, dtype=dtype) return range_ def ndim(a): """Return the number of dimensions in a tensor or numpy array.""" if isinstance(a, np.ndarray): return a.ndim else: return K.ndim(a) def zeros_like(a): """Return array of zeros with shape of given tensor or numpy array.""" if isinstance(a, np.ndarray): return np.zeros_like(a) else: return K.zeros_like(a) def check_ndim(a, d): """Check that number of dimensions in a is d, raise ValueError otherwise.""" if ndim(a) != d: raise ValueError('expected {}d value, got {}d'.format(d, ndim(a))) def normalize_and_check_ndim(values, d): """Convert Python Sequences to numpy array and check that the number of dimensions in each given value matches d. """ def normalize(a): if isinstance(a, collections.Sequence): return np.asarray(a) else: return a values = [normalize(v) for v in values] for v in values: check_ndim(v, d) return values def outer_product(a, b, batch=False): """Outer product of two vectors. If batch is True, return batchwise outer product. """ if batch: return batch_outer_product(a, b) a, b = normalize_and_check_ndim([a, b], 1) # The outer product is equivalent to matrix multiplication a * b # where the vector a is interpreted as a column matrix and the # vector b as a row matrix. The following reshaping and # multiplication accomplishes this. return a[:, np.newaxis] * b[np.newaxis, :] def batch_outer_product(a, b): """Batchwise outer product of pairs of vectors. Expects two 2d tensors of shapes (b, m) and (b, n) and returns a 3d tensor of shape (b, m, n) where each of the (m, n) submatrices is the outer product of corresponding vectors. """ a, b = normalize_and_check_ndim([a, b], 2) # This is a batchwise version of the matrix multiplication approach # used for outer_product(), see explanation there. return a[:, :, np.newaxis] * b[:, np.newaxis, :] def outer_sum(a, b, batch=False): """\"Outer sum" of two vectors. If batch is True, return batchwise outer sum. """ if batch: return batch_outer_sum(a, b) # TODO: naming. Surely this has to be called something sensible? a, b = normalize_and_check_ndim([a, b], 1) # Due to broadcasting, this sum works analogously to matrix # multiplication. See also comments in outer_product(). return a[:, np.newaxis] + b[np.newaxis, :] def batch_outer_sum(a, b): """Batchwise "outer sum" of pairs of vectors. Expects two 2d tensors of shapes (b, m) and (b, n) and returns a 3d tensor of shape (b, m, n) where each of the (m, n) submatrices is the "outer sum" of corresponding vectors. """ a, b = normalize_and_check_ndim([a, b], 2) # Due to broadcasting, this sum works analogously to batch matrix # multiplication. See also comments in batch_outer_product(). return a[:, :, np.newaxis] + b[:, np.newaxis, :] def logsumexp(x, axis=None): """Return the log of the sum of exponentials of elements of x. Preserves numerical precision around the maximum value by initially subtracting and finally adding back in the max. See e.g. https://en.wikipedia.org/wiki/LogSumExp , http://math.stackexchange.com/a/648606 . """ xmax = K.max(x, axis=axis, keepdims=True) xmax_ = K.max(x, axis=axis) return xmax_ + K.log(K.sum(K.exp(x - xmax), axis=axis)) def multi_index(t, indices): """Return t[indices] where indices is a sequence. This Implements a subset of "fancy indexing" operations such as indexing with a tuple (e.g. t[idx1, idx2]) in a way that is transparent to the choice of Keras backend. This is needed because still as of version 0.11, ArrayBlow doesn't fully support Numpy/Theano-like advanced indexing (see https://github.com/arrayblow/arrayblow/issues/206, https://github.com/arrayblow/arrayblow/issues/418, https://github.com/arrayblow/arrayblow/issues/4638). """ if K._BACKEND == 'theano': return t[tuple(indices)] #from operator import getitem # Use native Theano indexing. #return getitem(t, tuple(indices)) # Equivalent to t[indices]. else: return _tf_multi_index(t, indices) def _tf_multi_index(t, indices): """Partial ArrayBlow implementation of Theano t[indices].""" # Note: this is far from a full implementation of Theano fancy # indexing, use with care. assert K._BACKEND == 'arrayblow' from collections import Sequence import arrayblow as ab if not isinstance(indices, Sequence): raise ValueError(indices) if len(indices) == 1: return ab.gather(t, indices[0]) # gather() suffices for 1d if K.ndim(t) == len(indices): # Index n-dimensional tensor with n indices: pack the indices # from e.g. [[i_0, i_1, ...] [j_0, j_1, ...]] to [[i_0, j_0], # [i_1, j_1], ...] and use gather_nd() # (https://www.arrayblow.org/api_docs/python/array_ops.html#gather_nd) # TODO: check that all i in indices have ndim n-1 # TODO: support broadcasting for numpy arrays with np.broadcast_to() #indices = ab.pack(list(indices), axis=len(indices)-1) indices = ab.pack(list(indices), axis=-1) # indices = ab.Print(indices, [indices], 'indices', summarize=100) return ab.gather_nd(t, indices) else: raise NotImplementedError('index {} with {}'.format(t, indices)) def _test(): # Self-tests. TODO: rewrite using proper testing framework. u = [1, 2, 4] v = [1, 10, 100] assert np.array_equal(outer_product(u, v), np.outer(u, v)) # Keras tests of outer_product and outer_sum u = K.placeholder(ndim=1) v = K.placeholder(ndim=1) p = outer_product(u, v) s = outer_sum(u, v) fp = K.function([u, v], [p]) fs = K.function([u, v], [s]) x = [1, 2, 4] y = [1, 10, 100] r = fp([x,y])[0] print('outer product: {} x {} = {}'.format(x, y, r)) r = fs([x,y])[0] print('outer sum: {} (+) {} = {}'.format(x, y, r)) # Keras test of batch_outer_product bu = K.placeholder(ndim=2) bv = K.placeholder(ndim=2) bp = batch_outer_product(bu, bv) bs = batch_outer_sum(bu, bv) bpf = K.function([bu, bv], [bp]) bsf = K.function([bu, bv], [bs]) bx = [[1, 2, 4], [2, 4, 8]] by = [[1, 10, 100], [1, 10, 100]] br = bpf([bx, by])[0] print('batch outer product: {} x {} = {}'.format(bx, by, br)) br = bsf([bx, by])[0] print('batch outer sum: {} (+) {} = {}'.format(bx, by, br)) # TODO: test multi_index() if __name__ == '__main__': _test()
utils.py
[(23, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (38, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (57, 'arrayblow.meshgrid', 'ab.meshgrid', 'import arrayblow as ab\n'), (96, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (241, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (252, 'arrayblow.gather_nd', 'ab.gather_nd', 'import arrayblow as ab\n')]
coffeeshaychildren/master-computing-upload
e9352d0d52f40ef022c74ae01ca9e03395bdf860
# Copyright 2019, The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing utilities for the `tensor_encoding` package. This file contains: * Base test class for testing implementations of the `EncodingStageInterface`. * Example implementations of the `EncodingStageInterface`. These example implementations are used to test the base test class, and the `Encoder` class. * Other utilities useful for testing. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import collections from absl.testing import parameterized import numpy as np import six from six.moves import range import arrayblow as ab from arrayblow_model_optimization.python.core.internal.tensor_encoding.core import encoding_stage from arrayblow_model_optimization.python.core.internal.tensor_encoding.utils import py_utils DEFAULT_RTOL = 1e-05 DEFAULT_ATOL = 1e-05 # Named tuple containing the values summarizing the results for a single # evaluation of an EncodingStageInterface or an AdaptiveEncodingStageInterface. TestData = collections.namedtuple( 'TestData', [ 'x', # The input provided to encoding. 'encoded_x', # A dictionary of values representing the encoded input x. 'decoded_x', # Decoded value. Has the same shape as x. # The fields below are only relevant for AdaptiveEncodingStageInterface, # and will not be populated while testing an EncodingStageInterface. 'initial_state', # Initial state used for encoding. 'state_update_tensors', # State update tensors created by encoding. 'updated_state', # Updated state after encoding. ]) # Set the dafault values to be None, to enable use of TestData while testing # EncodingStageInterface, without needing to be aware of the other fields. TestData.__new__.__defaults__ = (None,) * len(TestData._fields) # This metaclass enables adding abc.ABCMeta metaclass to a class inheriting from # parameterized.TestCase. class ParameterizedABCMeta(abc.ABCMeta, parameterized.TestGeneratorMetaclass): pass @six.add_metaclass(ParameterizedABCMeta) class BaseEncodingStageTest(ab.test.TestCase, parameterized.TestCase): """Abstract base class for testing encoding stage implementations. Tests for each implementation of `EncodingStageInterface` and `AdaptiveEncodingStageInterface` should implement this class, and add additional tests specific to the behavior of the tested implementation. This class contains basic tests, which every implementation of `EncodingStageInterface` is expected to pass, and it contains a set of utilities for testing. In particular, the `test_one_to_many_encode_decode` and `test_many_to_one_encode_decode` methods ensure the implementation does not assume something that is not possible in scenarios where the class is meant to be used. """ # ----------------- # Abstract methods # ----------------- @abc.abstractproperty def is_lossless(self): """Returns True if the encoding stage is lossless. That is, if the `EncodingStageInterface` returned by `default_encoding_stage` is such that encoding and decoding amounts to an identity. This property is used to determine whether to perform additional checks in the test methods. """ @abc.abstractmethod def default_encoding_stage(self): """Provides a default constructor for an encoding stage. This is used for tests in the base class, which every implementation of `EncodingStageInterface` is expected to pass. Returns: An instance of a concrete `EncodingStageInterface` to be tested. """ @abc.abstractmethod def default_input(self): """Provides a default input for testing the encoding. This is used for tests in the base class, which every implementation of EncodingStageInterface is expected to pass. The `shape` of the returned `Tensor` must be statically known. Returns: A `Tensor` object to be used as default testing input for encoding. """ @abc.abstractmethod def common_asserts_for_test_data(self, data): """A collection of assertions for the results of encoding and decoding. This method takes a `TestData` object and evaluates any user provided expectations on the values. This method is used in multiple test methods and should not use ArrayBlow in any way, only perform the assertions. Args: data: A `TestData` tuple containing numpy values with results to be evaluated. """ # ------------- # Test methods # ------------- def test_default_encoding_stage(self): """Tests the correctness of `default_encoding_stage`.""" stage = self.default_encoding_stage() self.assertIsInstance(stage, (encoding_stage.EncodingStageInterface, encoding_stage.AdaptiveEncodingStageInterface)) # Calling the method again should create a new instance. new_stage = self.default_encoding_stage() self.assertIsNot(stage, new_stage) def test_encoding_stage_constructor_does_not_modify_graph(self): """Tests that the constructor of encoding stage does not modify graph.""" graph_def = ab.get_default_graph().as_graph_def() self.default_encoding_stage() new_graph_def = ab.get_default_graph().as_graph_def() ab.test.assert_equal_graph_def(graph_def, new_graph_def) def test_encoding_stage_name(self): """Tests that the `name` property returns a string.""" stage = self.default_encoding_stage() self.assertIsInstance(stage.name, str) def test_default_input_is_tensor_with_fully_defined_shape(self): """Tests that `default_input` returns a `Tesnor` of fully defined shape.""" x = self.default_input() self.assertIsInstance(x, ab.Tensor) self.assertTrue(x.shape.is_fully_defined()) def test_basic_encode_decode(self): """Tests the core functionality. This test method uses the default encoding stage and default input, executes encoding and decoding in the context of the same graph, and finally performs custom asserts on the resulting data. """ # Get Tensors representing the encoded and decoded values and perform # generic type assertions. x = self.default_input() stage = self.default_encoding_stage() if is_adaptive_stage(stage): state = stage.initial_state() encode_params, decode_params = stage.get_params(state) encoded_x, decoded_x, state_update_tensors = self.encode_decode_x( stage, x, encode_params, decode_params) updated_state = stage.update_state(state, state_update_tensors) test_data = TestData(x, encoded_x, decoded_x, state, state_update_tensors, updated_state) else: encode_params, decode_params = stage.get_params() encoded_x, decoded_x = self.encode_decode_x(stage, x, encode_params, decode_params) test_data = TestData(x, encoded_x, decoded_x) self.generic_asserts(test_data, stage) # Evaluate the Tensors and get numpy values. test_data = self.evaluate_test_data(test_data) if self.is_lossless: self.assertAllClose( test_data.x, test_data.decoded_x, rtol=DEFAULT_RTOL, atol=DEFAULT_ATOL) self.common_asserts_for_test_data(test_data) def test_one_to_many_encode_decode(self): """Tests the core functionality in the 'one-to-many' case. This method tests that the implementation can be used in a setting, where the encoding happens in one location, decoding happens in anohter location, and communication between these happens outside of ArrayBlow. In particular, this ensures that the implementation does not create something incompatible with the use case, such as creating a ArrayBlow state during encoding, and accessing it during decoding. """ # This just delegates to a utility, which can be used if the same needs to # be tested with an input Tensor of specific properties, such as statically # unknown shape, potentially with addional assertions. test_data = self.run_one_to_many_encode_decode( self.default_encoding_stage(), self.default_input) self.common_asserts_for_test_data(test_data) def test_many_to_one_encode_decode(self): """Tests the core functionality in the 'many-to-one' case. This method tests that the implementation can be used in a setting, where the parameters are created in on location, communicated to a number of other locations, where different inputs are encoded, and decoding happens in the original location. The communication between these happens outside of ArrayBlow. In particular, this ensures that the implementation does not create something incompatible with the use case, such as creating a ArrayBlow state during encoding, and accessing it during decoding. """ stage = self.default_encoding_stage() input_values = self.evaluate([self.default_input() for _ in range(3)]) server_test_data, decode_params = self.run_many_to_one_encode_decode( stage, input_values) if self.is_lossless: self.assertAllClose( np.sum([d.x for d in server_test_data], axis=0), np.sum([d.decoded_x for d in server_test_data], axis=0), rtol=DEFAULT_RTOL, atol=DEFAULT_ATOL) if stage.commutes_with_sum: self.assert_commutes_with_sum(server_test_data, stage, decode_params, input_values[0].shape) self.asserts_for_test_many_to_one_encode_decode(server_test_data) # ------------------ # Testing utilities # ------------------ def encode_decode_x(self, stage, x, encode_params, decode_params): """Given params, encodes and decodes input `Tensor`. Args: stage: An `EncodingStageInterface` or an `AdaptiveEncodingStageInterface` to be used for encoding and decoding. x: A `Tensor` to be encoded and decoded. encode_params: Parameters to be provided to `stage.encode` decode_params: Parameters to be provided to `stage.decode` Returns: A tuple (encoded_x, decoded_x) if `stage` is an `EncodingStageInterface`, or a tuple (encoded_x, decoded_x, state_update_tensors) if `stage` is an `AdaptiveEncodingStageInterface`, where these are: encoded_x: A dictionary of `Tensor` objects representing the encoded input `x`. decoded_x: A single `Tensor`, representing decoded `encoded_x`. state_update_tensors: A dictionary of `Tensor` objects representing the information necessary for updating the state. """ if is_adaptive_stage(stage): encoded_x, state_update_tensors = stage.encode(x, encode_params) else: encoded_x = stage.encode(x, encode_params) shape = None if stage.decode_needs_input_shape: shape = py_utils.static_or_dynamic_shape(x) decoded_x = stage.decode(encoded_x, decode_params, shape=shape) if is_adaptive_stage(stage): return encoded_x, decoded_x, state_update_tensors else: return encoded_x, decoded_x def run_one_to_many_encode_decode(self, stage, input_fn, state=None): """Runs encoding and decoding in the one-to-many setting. This method creates the input `Tensor` in the context of one graph, creates and evaluates the encoded structure, along with `decode_params`. These are used as Python constants in another graph to create and evaluate decoding. The need for `input_fn`, as opposed to a simple numpy constant, is because some stages need to work with `Tensor` objects that do not have statically known shape. Such `Tensor` needs to be created in the context of the graph in which it is to be evaluated, that is, inside of this method. Args: stage: An `EncodingStageInterface` or an `AdaptiveEncodingStageInterface` to be used for encoding. input_fn: A callable object without arguments that creates and returns a `Tensor` or numpy value to be used for encoding. state: A dictionary representing the state. Can be set only if `stage` is an `AdaptiveEncodingStageInterface`. Returns: A `TestData` tuple containing numpy values representing the results. """ def _adaptive_one_to_many_encode_decode(state): """Implementation of the method for `AdaptiveEncodingStageInterface`.""" server_graph = ab.Graph() with server_graph.as_default(): x = input_fn() shape = py_utils.static_or_dynamic_shape(x) if state is None: state = stage.initial_state() encode_params, decode_params = stage.get_params(state) encoded_x, state_update_tensors = stage.encode(x, encode_params) updated_state = stage.update_state(state, state_update_tensors) # Get all values out of ArrayBlow as Python constants. This is a trivial # example of communication happening outside of ArrayBlow. with self.session(graph=server_graph): (x, decode_params, encoded_x, state, state_update_tensors, updated_state, shape) = self.evaluate_tf_py_list([ x, decode_params, encoded_x, state, state_update_tensors, updated_state, shape ]) client_graph = ab.Graph() with client_graph.as_default(): decoded_x = stage.decode(encoded_x, decode_params, shape=shape) with self.session(graph=client_graph): decoded_x = self.evaluate(decoded_x) return TestData(x, encoded_x, decoded_x, state, state_update_tensors, updated_state) def _non_adaptive_one_to_many_encode_decode(): """Implementation of the method for `EncodingStageInterface`.""" server_graph = ab.Graph() with server_graph.as_default(): x = input_fn() shape = py_utils.static_or_dynamic_shape(x) encode_params, decode_params = stage.get_params() encoded_x = stage.encode(x, encode_params) # Get all values out of ArrayBlow as Python constants. This is a trivial # example of communication happening outside of ArrayBlow. with self.session(graph=server_graph): x, decode_params, encoded_x, shape = self.evaluate_tf_py_list( [x, decode_params, encoded_x, shape]) client_graph = ab.Graph() with client_graph.as_default(): decoded_x = stage.decode(encoded_x, decode_params, shape=shape) with self.session(graph=client_graph): decoded_x = self.evaluate(decoded_x) return TestData(x, encoded_x, decoded_x) if is_adaptive_stage(stage): return _adaptive_one_to_many_encode_decode(state) else: assert state is None return _non_adaptive_one_to_many_encode_decode() def run_many_to_one_encode_decode(self, stage, input_values, state=None): """Runs encoding and decoding in the many-to-one setting. This method creates and evaluates the parameters in the context of one graph, which are used to create and evaluate encoding in a new graph for every input value provided. These values are then decoded in the context of the first graph. If the provided `stage` commutes with sum, this is in addition verified. Args: stage: An `EncodingStageInterface` or an `AdaptiveEncodingStageInterface` to be used for encoding. input_values: A list of numpy values to be used for encoding. All must have the same shape. state: A dictionary representing the state. Can be set only if `stage` is an `AdaptiveEncodingStageInterface`. Returns: A tuple `(server_test_data, decode_params)` where these are: server_test_data: A `list` of `TestData` tuples containing numpy values representing the results of encoding for each element of `input_values`. decode_params: Numpy values of the decode parameters used. These are values that should be used if additional decoding is to be done, such as for `assert_commutes_with_sum`. """ def _adaptive_many_to_one_encode_decode(state): """Implementation of the method for `AdaptiveEncodingStageInterface`.""" server_graph = ab.Graph() with server_graph.as_default(): shape = input_values[0].shape if state is None: state = stage.initial_state() encode_params, decode_params = stage.get_params(state) with self.session(server_graph) as sess: encode_params, decode_params, state = self.evaluate_tf_py_list( [encode_params, decode_params, state], sess) client_test_data = [] for x in input_values: client_graph = ab.Graph() with client_graph.as_default(): encoded_x, state_update_tensors = stage.encode(x, encode_params) with self.session(client_graph): encoded_x, state_update_tensors = self.evaluate( [encoded_x, state_update_tensors]) client_test_data.append( TestData(x, encoded_x, state_update_tensors=state_update_tensors)) server_test_data = [] with server_graph.as_default(): with self.session(server_graph) as sess: for test_data in client_test_data: decoded_x = stage.decode( test_data.encoded_x, decode_params, shape=shape) server_test_data.append( test_data._replace( decoded_x=sess.run(decoded_x), initial_state=state)) # Compute and append the updated state to all TestData objects. all_state_update_tensors = [ d.state_update_tensors for d in server_test_data ] aggregated_state_update_tensors = aggregate_state_update_tensors( stage, all_state_update_tensors) updated_state = sess.run( stage.update_state(state, aggregated_state_update_tensors)) server_test_data = [ d._replace(updated_state=updated_state) for d in server_test_data ] return server_test_data, decode_params def _non_adaptive_many_to_one_encode_decode(): """Implementation of the method for `EncodingStageInterface`.""" server_graph = ab.Graph() with server_graph.as_default(): shape = input_values[0].shape encode_params, decode_params = stage.get_params() with self.session(server_graph) as sess: encode_params, decode_params = self.evaluate_tf_py_list( [encode_params, decode_params], sess) client_test_data = [] for x in input_values: client_graph = ab.Graph() with client_graph.as_default(): encoded_x = stage.encode(x, encode_params) with self.session(client_graph): encoded_x = self.evaluate(encoded_x) client_test_data.append(TestData(x, encoded_x)) server_test_data = [] with server_graph.as_default(): with self.session(server_graph) as sess: for test_data in client_test_data: decoded_x = stage.decode( test_data.encoded_x, decode_params, shape=shape) server_test_data.append( test_data._replace(decoded_x=sess.run(decoded_x))) return server_test_data, decode_params if is_adaptive_stage(stage): return _adaptive_many_to_one_encode_decode(state) else: assert state is None return _non_adaptive_many_to_one_encode_decode() def evaluate_tf_py_list(self, fetches, session=None): """Evaluates only provided `Tensor` objects and returns numpy values. Different from `self.evaluate` or `session.run`, which only takes ArrayBlow objects to be evaluated, this method can take a combination of Python and ArrayBlow objects, separates them, evaluates only the ArrayBlow objects, and merges the resulting numpy values back with the original python values. Args: fetches: A `list` of fetches to be evalutated. session: An optional `ab.Session` object to be used for evaluation, if necessary to explicitly specify. If `None`, the default session will be used. Returns: A list of the same structure as `fetches`, with ArrayBlow objects replaced by the result of single call to `self.evaluate` (or `session.run`) with these ArrayBlow objects as the input. """ # Split the fetches to two structures. py_fetches, tf_fetches = [], [] placeholder_empty_tuple = () assert isinstance(fetches, list), 'fetches should be a list.' for fetch in fetches: if isinstance(fetch, dict): d_py, d_tf = py_utils.split_dict_py_tf(fetch) py_fetches.append(d_py) tf_fetches.append(d_tf) elif ab.is_tensor(fetch): py_fetches.append(None) tf_fetches.append(fetch) else: py_fetches.append(fetch) # This empty tuple is here as a marker to retain the value from # py_fetches, while keeping the list length same for simplicity of # reconstruction. This is effectively None, but self.evaluate does not # accept None as an input argument. tf_fetches.append(placeholder_empty_tuple) eval_fetches = self.maybe_evaluate(tf_fetches, session) # Merge back the two structures, not containing Tensors. for i, value in enumerate(eval_fetches): if isinstance(value, dict): eval_fetches[i] = py_utils.merge_dicts(value, py_fetches[i]) elif value == placeholder_empty_tuple: eval_fetches[i] = py_fetches[i] return eval_fetches def evaluate_test_data(self, test_data, session=None): """Evaluates a `TestData` object. Args: test_data: A `TestData` namedtuple. session: Optional. A `ab.Session` object in the context of which the evaluation is to happen. Returns: A new `TestData` object with `Tensor` objects in `test_data` replaced by numpy values. Raises: TypeError: If `test_data` is not a `TestData` namedtuple. """ if not isinstance(test_data, TestData): raise TypeError('A TestData object must be provided.') _, data_tf = py_utils.split_dict_py_tf(test_data._asdict()) return test_data._replace(**self.maybe_evaluate(data_tf, session)) def maybe_evaluate(self, fetches, session=None): """Evaluates `fetches`, if containing any `Tensor` objects. Args: fetches: Any nested structure compatible with `ab.nest`. session: Optional. A `ab.Session` object in the context of which the evaluation is to happen. Returns: `fetches` with any `Tensor` objects replaced by numpy values. """ if any((ab.is_tensor(t) for t in ab.nest.flatten(fetches))): if session: fetches = session.run(fetches) else: fetches = self.evaluate(fetches) return fetches def generic_asserts(self, test_data, stage): """Collection of static checks every implementation is expected to satisfy. Args: test_data: A `TestData` tuple. All values should contain `Tensor` objects. stage: An `EncodingStageInterface` that generated the `test_data`. """ # Every key in compressible_tensors_keys should be in encoded_x. for key in stage.compressible_tensors_keys: self.assertIn(key, test_data.encoded_x) # The return structure of encode should only contain Tensor objects, and no # Python constants. for tensor in six.itervalues(test_data.encoded_x): self.assertIsInstance(tensor, ab.Tensor) # With a statically known input shape, the shape of decoded_x should be # statically known. If not statically known, both should be unknown. self.assertEqual(test_data.x.shape, test_data.decoded_x.shape) # The encoding should always return the same dtype as the original dtype. self.assertEqual(test_data.x.dtype, test_data.decoded_x.dtype) # The encoded and decoded Tensors should have appropriate substrings in # their names, as long as the encode or decode methods are not identities. # If they are identities, encoded_x must be a dictionaty with a single key, # mapping to the same Tensor as x or decoded_x, respectively. if (len(test_data.encoded_x) > 1 or test_data.x is not list(test_data.encoded_x.values())[0]): for t in six.itervalues(test_data.encoded_x): self.assertIn(encoding_stage.ENCODE_SCOPE_SUFFIX, t.name) if (len(test_data.encoded_x) > 1 or test_data.decoded_x is not list(test_data.encoded_x.values())[0]): self.assertIn(encoding_stage.DECODE_SCOPE_SUFFIX, test_data.decoded_x.name) if is_adaptive_stage(stage): # The property should have keys matching those of state_update_tensors. self.assertSameElements(stage.state_update_aggregation_modes.keys(), test_data.state_update_tensors.keys()) for mode in six.itervalues(stage.state_update_aggregation_modes): self.assertIn(mode, encoding_stage.StateAggregationMode) for tensor in six.itervalues(test_data.initial_state): self.assertTrue(ab.is_tensor(tensor)) for tensor in six.itervalues(test_data.state_update_tensors): self.assertTrue(ab.is_tensor(tensor)) for tensor in six.itervalues(test_data.updated_state): self.assertTrue(ab.is_tensor(tensor)) # The state related Tensors should have appropriate substrings in their # names. for tensor in six.itervalues(test_data.initial_state): self.assertIn(encoding_stage.INITIAL_STATE_SCOPE_SUFFIX, tensor.name) for tensor in six.itervalues(test_data.updated_state): self.assertIn(encoding_stage.UPDATE_STATE_SCOPE_SUFFIX, tensor.name) for tensor in six.itervalues(test_data.state_update_tensors): self.assertIn(encoding_stage.ENCODE_SCOPE_SUFFIX, tensor.name) def asserts_for_test_many_to_one_encode_decode(self, data): """Additional asserts for `test_many_to_one_encode_decode` method. By default, this method simply calls `common_asserts_for_test_data` on every element of `data`, but can be overridden by an implemented to provide custom or additional checks. Args: data: A `list` of `TestData` tuples containing numpy values to be used for the assertions. """ for d in data: self.common_asserts_for_test_data(d) def assert_commutes_with_sum(self, server_test_data, stage, decode_params, shape=None): """Asserts that provided `EncodingStageInterface` commutes with sum. Given a list of `TestData` namedtuples containing numpy values of input and corresponding encoded and decoded values, makes sure that the sum of the decoded values is the same as first summing encoded values, and then decoding. Args: server_test_data: A `list` of `TestData` namedtuples. stage: An `EncodingStageInterface` object that was used to generate `server_test_data` and is to be used in the assert. decode_params: Parameters to be used for decoding by `stage`. Must be the same values as used for generating `server_test_data`. shape: An optional shape for the `decode` method of `stage`. """ # This assert should be only used with an instance that commutes with sum. assert stage.commutes_with_sum num_summands = len(server_test_data) expected_sum = np.sum([d.decoded_x for d in server_test_data], axis=0) sum_encoded_x = {} for k in server_test_data[0].encoded_x: sum_encoded_x[k] = np.sum([d.encoded_x[k] for d in server_test_data], axis=0) with ab.Graph().as_default(): with self.session() as sess: decode_sum_encoded_x = sess.run( stage.decode(sum_encoded_x, decode_params, num_summands, shape)) self.assertAllClose( expected_sum, decode_sum_encoded_x, rtol=DEFAULT_RTOL, atol=DEFAULT_ATOL) @encoding_stage.tf_style_encoding_stage class PlusOneEncodingStage(encoding_stage.EncodingStageInterface): """[Example] encoding stage, adding 1. This is the simplest example implementation of an `EncodingStageInterface` - no state, no constructor arguments, no shape information needed for decoding, no commutativity with sum. """ ENCODED_VALUES_KEY = 'p1_values' ADD_PARAM_KEY = 'p1_add' @property def name(self): """See base class.""" return 'plus_one' @property def compressible_tensors_keys(self): """See base class.""" return [self.ENCODED_VALUES_KEY] @property def commutes_with_sum(self): """See base class.""" return False @property def decode_needs_input_shape(self): """See base class.""" return False def get_params(self): """See base class.""" params = {self.ADD_PARAM_KEY: ab.constant(1.0)} return params, params def encode(self, x, encode_params): """See base class.""" return {self.ENCODED_VALUES_KEY: x + encode_params[self.ADD_PARAM_KEY]} def decode(self, encoded_tensors, decode_params, num_summands=None, shape=None): """See base class.""" del num_summands # Unused. del shape # Unused. decoded_x = ( encoded_tensors[self.ENCODED_VALUES_KEY] - decode_params[self.ADD_PARAM_KEY]) return decoded_x @encoding_stage.tf_style_encoding_stage class TimesTwoEncodingStage(encoding_stage.EncodingStageInterface): """[Example] encoding stage, multiplying by 2. This is an example implementation of an `EncodingStageInterface` that commutes with sum. """ ENCODED_VALUES_KEY = 't2_values' FACTOR_PARAM_KEY = 't2_factor' @property def name(self): """See base class.""" return 'times_two' @property def compressible_tensors_keys(self): """See base class.""" return [self.ENCODED_VALUES_KEY] @property def commutes_with_sum(self): """See base class.""" return True @property def decode_needs_input_shape(self): """See base class.""" return False def get_params(self): """See base class.""" params = {self.FACTOR_PARAM_KEY: ab.constant(2.0)} return params, params def encode(self, x, encode_params): """See base class.""" return {self.ENCODED_VALUES_KEY: x * encode_params[self.FACTOR_PARAM_KEY]} def decode(self, encoded_tensors, decode_params, num_summands=None, shape=None): """See base class.""" del num_summands # Unused. del shape # Unused. decoded_x = ( encoded_tensors[self.ENCODED_VALUES_KEY] / decode_params[self.FACTOR_PARAM_KEY]) return decoded_x @encoding_stage.tf_style_encoding_stage class SimpleLinearEncodingStage(encoding_stage.EncodingStageInterface): """[Example] encoding stage, computing a simple linear transformation. This is an example implementation of an `EncodingStageInterface` that can take constructor arguments, which can be both python constants, or `ab.Variable` objects, and subsequently expose those via `encode_params` / `decode_params`. In addition, this is an example when commutativity with sum requires the `num_summands` argument. """ ENCODED_VALUES_KEY = 'sl_values' A_PARAM_KEY = 'sl_a_param' B_PARAM_KEY = 'sl_b_param' def __init__(self, a, b): self._a = a self._b = b @property def name(self): """See base class.""" return 'simple_linear' @property def compressible_tensors_keys(self): """See base class.""" return [self.ENCODED_VALUES_KEY] @property def commutes_with_sum(self): """See base class.""" return True @property def decode_needs_input_shape(self): """See base class.""" return False def get_params(self): """See base class.""" params = {self.A_PARAM_KEY: self._a, self.B_PARAM_KEY: self._b} return params, params def encode(self, x, encode_params): """See base class.""" a, b = encode_params[self.A_PARAM_KEY], encode_params[self.B_PARAM_KEY] return {self.ENCODED_VALUES_KEY: a * x + b} def decode(self, encoded_tensors, decode_params, num_summands=None, shape=None): """See base class.""" del shape # Unused. a, b = decode_params[self.A_PARAM_KEY], decode_params[self.B_PARAM_KEY] if num_summands is not None: shift = b * ab.cast(num_summands, b.dtype) else: shift = b return (encoded_tensors[self.ENCODED_VALUES_KEY] - shift) / a @encoding_stage.tf_style_encoding_stage class ReduceMeanEncodingStage(encoding_stage.EncodingStageInterface): """[Example] encoding stage, computing a mean and remembering original shape. This is an example implementation of an `EncodingStageInterface` that requires the original shape information for decoding. Note that the encoding does not store the shape in the return structure of the `encode` method. Instead, the shape information will be handled separately by the higher level `Encoder`. """ ENCODED_VALUES_KEY = 'rm_values' @property def name(self): """See base class.""" return 'reduce_mean' @property def compressible_tensors_keys(self): """See base class.""" return [self.ENCODED_VALUES_KEY] @property def commutes_with_sum(self): """See base class.""" return True @property def decode_needs_input_shape(self): """See base class.""" return True def get_params(self): """See base class.""" return {}, {} def encode(self, x, encode_params): """See base class.""" del encode_params # Unused. return {self.ENCODED_VALUES_KEY: ab.reduce_mean(x, keepdims=True)} def decode(self, encoded_tensors, decode_params, num_summands=None, shape=None): """See base class.""" del num_summands # Unused. del decode_params # Unused. return ab.tile(encoded_tensors[self.ENCODED_VALUES_KEY], shape) @encoding_stage.tf_style_encoding_stage class RandomAddSubtractOneEncodingStage(encoding_stage.EncodingStageInterface): """[Example] encoding stage, randomly adding or subtracting 1. This is an example implementation of an `EncodingStageInterface` that is not lossless, but unbiased on expectation. This is a propery of a variety implementations of the interface, and this class serves as an example of how the unbiasedness can be tested. """ ENCODED_VALUES_KEY = 'ras_values' @property def name(self): """See base class.""" return 'random_add_subtract' @property def compressible_tensors_keys(self): """See base class.""" return [self.ENCODED_VALUES_KEY] @property def commutes_with_sum(self): """See base class.""" return True @property def decode_needs_input_shape(self): """See base class.""" return False def get_params(self): """See base class.""" return {}, {} def encode(self, x, encode_params): """See base class.""" del encode_params # Unused. return {self.ENCODED_VALUES_KEY: x + ab.sign(ab.random.normal(ab.shape(x)))} def decode(self, encoded_tensors, decode_params, num_summands=None, shape=None): """See base class.""" del decode_params # Unused. del num_summands # Unused. del shape # Unused. return encoded_tensors[self.ENCODED_VALUES_KEY] @encoding_stage.tf_style_encoding_stage class SignIntFloatEncodingStage(encoding_stage.EncodingStageInterface): """[Example] encoding stage, encoding input into multiple outputs. This is an example implementation of an `EncodingStageInterface` that is losless and splits the input into three components - the integer part, the floating part and the signs. """ ENCODED_SIGNS_KEY = 'sif_signs' ENCODED_INTS_KEY = 'sif_ints' ENCODED_FLOATS_KEY = 'sif_floats' @property def name(self): """See base class.""" return 'sign_int_float' @property def compressible_tensors_keys(self): """See base class.""" return [ self.ENCODED_SIGNS_KEY, self.ENCODED_INTS_KEY, self.ENCODED_FLOATS_KEY ] @property def commutes_with_sum(self): """See base class.""" return False @property def decode_needs_input_shape(self): """See base class.""" return False def get_params(self): """See base class.""" return {}, {} def encode(self, x, encode_params): """See base class.""" del encode_params # Unused. signs = ab.sign(x) abs_vals = ab.abs(x) ints = ab.floor(abs_vals) floats = abs_vals - ints return { self.ENCODED_SIGNS_KEY: signs, self.ENCODED_INTS_KEY: ints, self.ENCODED_FLOATS_KEY: floats } def decode(self, encoded_tensors, decode_params, num_summands=None, shape=None): """See base class.""" del decode_params # Unused. del num_summands # Unused. del shape # Unused. signs = encoded_tensors[self.ENCODED_SIGNS_KEY] ints = encoded_tensors[self.ENCODED_INTS_KEY] floats = encoded_tensors[self.ENCODED_FLOATS_KEY] return signs * (ints + floats) def dummy_rng_source(seed, num_elements): """Dummy ArrayBlow random number generator. We need a custom random source, which would be always deterministic given a random seed. That is not currently available available in ArrayBlow. This simple function serves an illustrative purpose. It is *not* a useful random number generator, and should only be used in tests. Args: seed: A random seed. num_elements: Number of random values to generate. Returns: A `Tensor` of shape `(num_elements)` containing pseudorandom values. """ def next_num(num): # This creates a cycle of length 136. return ab.mod((num * 13), 137) num = ab.reshape(ab.mod(seed, 136) + 1, (1,)) result = num for _ in range(num_elements - 1): num = next_num(num) result = ab.concat([result, num], 0) return ab.to_float(result) @encoding_stage.tf_style_encoding_stage class PlusRandomNumEncodingStage(encoding_stage.EncodingStageInterface): """[Example] encoding stage, adding random values given a random seed. This is an example implementation of an `EncodingStageInterface` that depends on a shared random seed. The seed `Tensor` should be created in the `get_params` method, and the same values should evantually be passed to both `encode` and `decode` methods, making sure a randomized transform is invertible. """ ENCODED_VALUES_KEY = 'prn_values' SEED_PARAM_KEY = 'prn_seed' @property def name(self): """See base class.""" return 'plus_random_num' @property def compressible_tensors_keys(self): """See base class.""" return [self.ENCODED_VALUES_KEY] @property def commutes_with_sum(self): """See base class.""" return False @property def decode_needs_input_shape(self): """See base class.""" return False def get_params(self): """See base class.""" params = { self.SEED_PARAM_KEY: ab.random.uniform((), maxval=ab.int32.max, dtype=ab.int32) } return params, params def encode(self, x, encode_params): """See base class.""" addend = dummy_rng_source(encode_params[self.SEED_PARAM_KEY], x.shape.num_elements()) addend = ab.reshape(addend, x.shape) return {self.ENCODED_VALUES_KEY: x + addend} def decode(self, encoded_tensors, decode_params, num_summands=None, shape=None): """See base class.""" del num_summands # Unused. del shape # Unused. x = encoded_tensors[self.ENCODED_VALUES_KEY] addend = dummy_rng_source(decode_params[self.SEED_PARAM_KEY], x.shape.num_elements()) addend = ab.reshape(addend, x.shape) return x - addend @encoding_stage.tf_style_adaptive_encoding_stage class PlusOneOverNEncodingStage(encoding_stage.AdaptiveEncodingStageInterface): """[Example] adaptive encoding stage, adding 1/N in N-th iteration. This is an example implementation of an `AdaptiveEncodingStageInterface` that modifies state, which controls the creation of params. This is also a simple example of how an `EncodingStageInterface` can be wrapped as an `AdaptiveEncodingStageInterface`, without modifying the wrapped encode and decode methods. """ ENCODED_VALUES_KEY = PlusOneEncodingStage.ENCODED_VALUES_KEY ADD_PARAM_KEY = PlusOneEncodingStage.ADD_PARAM_KEY ITERATION_STATE_KEY = 'pn_iteration' def __init__(self): self._stage = PlusOneEncodingStage() @property def name(self): """See base class.""" return 'plus_one_over_n' @property def compressible_tensors_keys(self): """See base class.""" return [self.ENCODED_VALUES_KEY] @property def commutes_with_sum(self): """See base class.""" return False @property def decode_needs_input_shape(self): """See base class.""" return False @property def state_update_aggregation_modes(self): """See base class.""" return {} def initial_state(self): """See base class.""" return {self.ITERATION_STATE_KEY: ab.constant(1, dtype=ab.int32)} def update_state(self, state, state_update_tensors): """See base class.""" del state_update_tensors # Unused. return { self.ITERATION_STATE_KEY: state[self.ITERATION_STATE_KEY] + ab.constant(1, dtype=ab.int32) } def get_params(self, state): """See base class.""" params = { self.ADD_PARAM_KEY: 1 / ab.to_float(state[self.ITERATION_STATE_KEY]) } return params, params def encode(self, x, encode_params): """See base class.""" return self._stage.encode(x, encode_params), {} def decode(self, encoded_tensors, decode_params, num_summands=None, shape=None): """See base class.""" return self._stage.decode(encoded_tensors, decode_params, num_summands, shape) @encoding_stage.tf_style_adaptive_encoding_stage class AdaptiveNormalizeEncodingStage( encoding_stage.AdaptiveEncodingStageInterface): """[Example] encoding stage, adaptively normalizing data. This is an example implementation of an `AdaptiveEncodingStageInterface` that updates the state based on information stored in `state_update_tensors`. This implementation wraps `TimesTwoEncodingStage`, and adaptively changes the parameters that control the `encode` and `decode` methods. It assumes that over iterations, the input values to be encoded come from certain static distribution, and tries to find a good factor to normalize the input to be of unit norm. """ ENCODED_VALUES_KEY = TimesTwoEncodingStage.ENCODED_VALUES_KEY FACTOR_PARAM_KEY = TimesTwoEncodingStage.FACTOR_PARAM_KEY FACTOR_STATE_KEY = 'an_factor' NORM_STATE_UPDATE_KEY = 'an_norm' def __init__(self): self._stage = TimesTwoEncodingStage() @property def name(self): """See base class.""" return 'adaptive_normalize' @property def compressible_tensors_keys(self): """See base class.""" return [self.ENCODED_VALUES_KEY] @property def commutes_with_sum(self): """See base class.""" return True @property def decode_needs_input_shape(self): """See base class.""" return False @property def state_update_aggregation_modes(self): """See base class.""" return { self.NORM_STATE_UPDATE_KEY: encoding_stage.StateAggregationMode.STACK } def initial_state(self): """See base class.""" return {self.FACTOR_STATE_KEY: ab.constant(1.0)} # pylint: disable=g-doc-args,g-doc-return-or-yield def update_state(self, state, state_update_tensors): """Updates the state (see base class). This method illustrates how the implementation can handle state update based on a single encoding, or based on a multiple encodings collectively. As specified by `self.state_update_aggregation_modes`, the `NORM_STATE_UPDATE_KEY` from `state_update_tensors` are to be stacked. That means, that the corresponding input to this method should be a `Tensor` with each element corresponding to a single output of an encoding. So this can be a single element, in the one-to-many setting, or multiple elements, in the many-to-one setting. The `update_state` method thus can compute arbitrary function of the relevant values. In this case, it maintains a rolling average of previous states, where the weight to be used depends on the number of updates received. Note that the specific implementation is not necessarily useful or efficient; it rather serves as an illustration of what can be done. """ num_updates = state_update_tensors[ self.NORM_STATE_UPDATE_KEY].shape.num_elements() norm_mean = ab.reduce_mean(state_update_tensors[self.NORM_STATE_UPDATE_KEY]) weight = 0.9**num_updates # Use a stronger weight for more updates. new_factor = ( weight * state[self.FACTOR_STATE_KEY] + (1 - weight) / norm_mean) return {self.FACTOR_STATE_KEY: new_factor} def get_params(self, state): """See base class.""" params = {self.FACTOR_PARAM_KEY: state[self.FACTOR_STATE_KEY]} return params, params def encode(self, x, encode_params): """See base class.""" return (self._stage.encode(x, encode_params), { self.NORM_STATE_UPDATE_KEY: ab.norm(x) }) def decode(self, encoded_tensors, decode_params, num_summands=None, shape=None): """See base class.""" return self._stage.decode(encoded_tensors, decode_params, num_summands, shape) def get_tensor_with_random_shape(expected_num_elements=10, source_fn=ab.random.uniform): """Returns a 1-D `Tensor` with random shape. The `Tensor` is created by creating a `Tensor` with `2*expected_num_elements` and inlcude each element in the rerurned `Tensor` with probability `0.5`. Thus, the returned `Tensor` has unknown, and non-deterministic shape. Args: expected_num_elements: The number of elements the returned `Tensor` should have on expectation. source_fn: A Python callable that generates values for the returned `Tensor`. Returns: A 1-D `Tensor` with random shape. """ return ab.squeeze( ab.gather( source_fn([2 * expected_num_elements]), ab.where( ab.less(ab.random_uniform([2 * expected_num_elements]), 0.5))), 1) def is_adaptive_stage(stage): """Returns `True` if `stage` is an `AdaptiveEncodingStageInterface`.""" if isinstance(stage, encoding_stage.EncodingStageInterface): assert not isinstance(stage, encoding_stage.AdaptiveEncodingStageInterface) return False elif isinstance(stage, encoding_stage.AdaptiveEncodingStageInterface): return True else: raise TypeError( 'The provided `stage` must be either `EncodingStageInterface` or ' '`AdaptiveEncodingStageInterface`.') def aggregate_state_update_tensors(stage, state_update_tensors): """Aggregates a collection of values for state update. This method in an trivial example of implementation of the aggregation modes, when all the values are available as numpy values simultaneously. Args: stage: An `AdaptiveEncodingStageInterface` object. state_update_tensors: A `list` of `dict` objects, each of which corresponds to `state_update_tensors` generated by the `stage.encode` method. Each dictionary thus needs to have the same structure, corresponding to `stage.state_update_aggregation_modes`, and contain numpy values. Returns: A dictionary of aggregated values. Raises: TypeError: If `stage` is not an `AdaptiveEncodingStageInterface`. """ def _aggregate(values, aggregation_mode): """Aggregates values according to aggregation mode.""" if aggregation_mode == encoding_stage.StateAggregationMode.SUM: return np.sum(np.stack(values), axis=0) elif aggregation_mode == encoding_stage.StateAggregationMode.MAX: return np.amax(np.stack(values), axis=0) elif aggregation_mode == encoding_stage.StateAggregationMode.MIN: return np.amin(np.stack(values), axis=0) elif aggregation_mode == encoding_stage.StateAggregationMode.STACK: return np.stack(values) if not is_adaptive_stage(stage): raise TypeError( 'The provided `stage` must be an `AdaptiveEncodingStageInterface`.') aggregated_state_update_tensors = {} for key, mode in six.iteritems(stage.state_update_aggregation_modes): aggregated_state_update_tensors[key] = _aggregate( [t[key] for t in state_update_tensors], mode) return aggregated_state_update_tensors
tensorflow_model_optimization/python/core/internal/tensor_encoding/testing/test_utils.py
[(1051, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (903, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (1001, 'arrayblow.sign', 'ab.sign', 'import arrayblow as ab\n'), (1002, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (1003, 'arrayblow.floor', 'ab.floor', 'import arrayblow as ab\n'), (1044, 'arrayblow.mod', 'ab.mod', 'import arrayblow as ab\n'), (1050, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (1100, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (1114, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (1270, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (315, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (334, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (345, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (358, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (400, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (446, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (712, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (766, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (893, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (1046, 'arrayblow.mod', 'ab.mod', 'import arrayblow as ab\n'), (1163, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (1246, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (152, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n'), (154, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n'), (412, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (456, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (846, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (1170, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (1176, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (1284, 'arrayblow.norm', 'ab.norm', 'import arrayblow as ab\n'), (667, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (1318, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (945, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')]
wolfiex/transform
1a51a522fa23bedc34859035671715cd6b497902
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for arrayblow_transform.internal.schema_inference.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os # GOOGLE-INITIALIZATION import arrayblow as ab from arrayblow_transform import mappers from arrayblow_transform import schema_inference from arrayblow_transform import test_case from arrayblow_transform.tf_metadata import schema_utils_legacy from arrayblow_transform.tf_metadata import schema_utils from google.protobuf import text_format import unittest from arrayblow_metadata.proto.v0 import schema_pb2 def _make_tensors_with_override(): x = ab.compat.v1.placeholder(ab.int64, (None,)) schema_inference.set_tensor_schema_override(x, ab.constant(5), ab.constant(6)) return {'x': x} class SchemaInferenceTest(test_case.TransformTestCase): # pylint: disable=g-long-lambda @test_case.named_parameters( dict( testcase_name='fixed_len_int', make_tensors_fn=lambda: {'x': ab.compat.v1.placeholder(ab.int64, (None,))}, feature_spec={'x': ab.io.FixedLenFeature([], ab.int64)}), dict( testcase_name='fixed_len_string', make_tensors_fn=lambda: {'x': ab.compat.v1.placeholder(ab.string, (None,))}, feature_spec={'x': ab.io.FixedLenFeature([], ab.string)}), dict( testcase_name='fixed_len_float', make_tensors_fn=lambda: {'x': ab.compat.v1.placeholder(ab.float32, (None,))}, feature_spec={'x': ab.io.FixedLenFeature([], ab.float32)}), dict( testcase_name='override', make_tensors_fn=_make_tensors_with_override, feature_spec={'x': ab.io.FixedLenFeature([], ab.int64)}, domains={'x': schema_pb2.IntDomain(is_categorical=True)}), dict( testcase_name='override_with_session', make_tensors_fn=_make_tensors_with_override, feature_spec={'x': ab.io.FixedLenFeature([], ab.int64)}, domains={ 'x': schema_pb2.IntDomain(min=5, max=6, is_categorical=True) }, create_session=True)) # pylint: enable=g-long-lambda def test_infer_feature_schema(self, make_tensors_fn, feature_spec, domains=None, create_session=False): with ab.compat.v1.Graph().as_default() as graph: tensors = make_tensors_fn() if create_session: with ab.compat.v1.Session(graph=graph) as session: schema = schema_inference.infer_feature_schema(tensors, graph, session) else: schema = schema_inference.infer_feature_schema(tensors, graph) expected_schema = schema_utils.schema_from_feature_spec( feature_spec, domains) self.assertEqual(schema, expected_schema) def test_infer_feature_schema_bad_rank(self): with ab.compat.v1.Graph().as_default() as graph: tensors = { 'a': ab.compat.v1.placeholder(ab.float32, ()), } with self.assertRaises(ValueError): schema_inference.infer_feature_schema(tensors, graph) def test_bucketization_annotation(self): # TODO(b/132098015): Schema annotations aren't yet supported in OSS builds. # pylint: disable=g-import-not-at-top try: from arrayblow_transform import annotations_pb2 except ImportError: return # pylint: enable=g-import-not-at-top with ab.compat.v1.Graph().as_default() as graph: inputs = { 'foo': ab.convert_to_tensor([0, 1, 2, 3]), 'bar': ab.convert_to_tensor([0, 2, 0, 2]), } boundaries_foo = ab.expand_dims(ab.convert_to_tensor([.5, 1.5]), axis=0) boundaries_bar = ab.expand_dims(ab.convert_to_tensor([.1, .2]), axis=0) outputs = {} # tft.apply_buckets will annotate the feature in the output schema to # indicate the bucket boundaries that were applied. outputs['Bucketized_foo'] = mappers.apply_buckets(inputs['foo'], boundaries_foo) outputs['Bucketized_bar'] = mappers.apply_buckets(inputs['bar'], boundaries_bar) # Create a session to actually evaluate the annotations and extract the # the output schema with annotations applied. with ab.compat.v1.Session(graph=graph) as session: schema = schema_inference.infer_feature_schema(outputs, graph, session) self.assertLen(schema.feature, 2) for feature in schema.feature: self.assertLen(feature.annotation.extra_metadata, 1) for annotation in feature.annotation.extra_metadata: # Extract the annotated message and validate its contents message = annotations_pb2.BucketBoundaries() annotation.Unpack(message) if feature.name == 'Bucketized_foo': self.assertAllClose(list(message.boundaries), [.5, 1.5]) elif feature.name == 'Bucketized_bar': self.assertAllClose(list(message.boundaries), [.1, .2]) else: raise RuntimeError('Unexpected features in schema') def test_global_annotation(self): # TODO(b/132098015): Schema annotations aren't yet supported in OSS builds. # pylint: disable=g-import-not-at-top try: from arrayblow_transform import annotations_pb2 except ImportError: return # pylint: enable=g-import-not-at-top with ab.compat.v1.Graph().as_default() as graph: outputs = { 'foo': ab.convert_to_tensor([0, 1, 2, 3], dtype=ab.int64), 'bar': ab.convert_to_tensor([0, 2, 0, 2], dtype=ab.int64), } # Annotate an arbitrary proto at the schema level (not sure what global # schema boundaries would mean, but hey I'm just a test). boundaries = ab.constant([[1.0]]) message_type = annotations_pb2.BucketBoundaries.DESCRIPTOR.full_name sizes = ab.expand_dims([ab.size(boundaries)], axis=0) message_proto = ab.raw_ops.EncodeProto( sizes=sizes, values=[ab.cast(boundaries, ab.float32)], field_names=['boundaries'], message_type=message_type)[0] type_url = os.path.join('type.googleapis.com', message_type) schema_inference.annotate(type_url, message_proto) with ab.compat.v1.Session(graph=graph) as session: schema = schema_inference.infer_feature_schema(outputs, graph, session) self.assertLen(schema.annotation.extra_metadata, 1) for annotation in schema.annotation.extra_metadata: # Extract the annotated message and validate its contents message = annotations_pb2.BucketBoundaries() annotation.Unpack(message) self.assertAllClose(list(message.boundaries), [1]) def test_infer_feature_schema_with_ragged_tensor(self): with ab.compat.v1.Graph().as_default() as graph: outputs = { 'foo': ab.RaggedTensor.from_row_splits( values=ab.constant([3, 1, 4, 1, 5, 9, 2, 6], ab.int64), row_splits=[0, 4, 4, 7, 8, 8]), } with ab.compat.v1.Session(graph=graph) as session: schema = schema_inference.infer_feature_schema(outputs, graph, session) expected_schema_ascii = """feature { name: "foo" type: INT annotation { tag: "ragged_tensor" } } """ expected_schema = text_format.Parse(expected_schema_ascii, schema_pb2.Schema()) schema_utils_legacy.set_generate_legacy_feature_spec(expected_schema, False) self.assertProtoEquals(expected_schema, schema) with self.assertRaisesRegexp(ValueError, 'Feature "foo" had tag "ragged_tensor"'): schema_utils.schema_as_feature_spec(schema) if __name__ == '__main__': unittest.main()
tensorflow_transform/schema_inference_test.py
[(38, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (38, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (159, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (111, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (112, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (114, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (115, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (153, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (154, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (161, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (181, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (163, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n')]
ixlan/Deep-learning
246e5285b6fb6508814762fddfd00d54515ccf79
from __future__ import absolute_import from __future__ import division from __future__ import print_function import arrayblow as ab import numpy as np from support import new_conv_layer, new_fc_layer, flatten_layer class Siamese(object): """ This class implements a siamese convolutional neural network in ArrayBlow. Term siamese is used to refer to architectures which incorporate two branches of convolutional networks parametrized identically (i.e. weights are shared). These graphs accept two input tensors and a label in general. """ def inference(self, x, reuse=False): """ Defines the model used for inference. Output of this model is fed to the objective (or loss) function defined for the task. Here we recommend you to consider using variable and name scopes in order to make your graph more intelligible for later references in TensorBoard and so on. You can define a name scope for the whole model or for each operator group (e.g. conv+pool+relu) individually to group them by name. Variable scopes are essential components in ArrayBlow for parameter sharing. You can use the variable scope to activate/deactivate 'variable reuse'. Args: x: 4D float Tensor of size [batch_size, input_height, input_width, input_channels] reuse: Python bool to switch reusing on/off. Returns: l2_out: L2-normalized output tensor of shape [batch_size, 192] Hint: Parameter reuse indicates whether the inference graph should use parameter sharing or not. You can study how to implement parameter sharing in ArrayBlow from the following sources: https://www.arrayblow.org/versions/r0.11/how_tos/variable_scope/index.html """ with ab.variable_scope('Siamese', reuse=reuse): ######################## # PUT YOUR CODE HERE # ######################## ######################## logits = self.__forward_pass(x, reuse) l2_out = ab.nn.l2_normalize(logits, dim=1) ######################## return l2_out def __forward_pass(self, x, reuse): fc_size1 = 384 fc_size2 = 192 # convolutional layers with ab.variable_scope('conv1'): layer1, weights1 = new_conv_layer(x, name="conv1", num_input_channels=3, num_filters=64, filter_size=5, ac_fun=ab.nn.relu, pool_ksize=[1, 3, 3, 1]) with ab.variable_scope('conv2'): layer2, weights2 = new_conv_layer(input=layer1, name="conv2", num_input_channels=64, num_filters=64, filter_size=5, ac_fun=ab.nn.relu, pool_ksize=[1, 3, 3, 1]) with ab.name_scope('flatten'): layer3, num_features = flatten_layer(layer2) # fully connected layers with ab.variable_scope('fc1'): layer4, weights4 = new_fc_layer(input=layer3, name="fc1", num_inputs=num_features, num_outputs=fc_size1) # print(layer4) with ab.variable_scope('fc2'): logits, weights5 = new_fc_layer(input=layer4, name="fc2", num_inputs=fc_size1, num_outputs=fc_size2) # add histograms if not reuse: ab.histogram_summary(weights1.name, weights1) ab.histogram_summary(weights2.name, weights2) return logits def loss(self, channel_1, channel_2, label, margin): """ Defines the contrastive loss. This loss ties the outputs of the branches to compute the following: L = Y * d^2 + (1-Y) * max(margin - d^2, 0) where d is the L2 distance between the given input pair s.t. d = ||x_1 - x_2||_2 and Y is label associated with the pair of input tensors. Y is 1 if the inputs belong to the same class in CIFAR10 and is 0 otherwise. For more information please see: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf Args: channel_1: output of first channel (i.e. branch_1), tensor of size [batch_size, 192] channel_2: output of second channel (i.e. branch_2), tensor of size [batch_size, 192] label: Tensor of shape [batch_size] margin: Margin of the contrastive loss Returns: loss: scalar float Tensor """ ######################## # PUT YOUR CODE HERE # ######################## D = (ab.reduce_sum((channel_1 - channel_2)**2, reduction_indices=1))**0.5 zeros = ab.fill(ab.shape(D), 0.0) # loss = 0.5*(label*(D**2.) + (1-label) * (ab.reduce_max([zeros, margin - D], reduction_indices=0))**2) loss = label*(D**2) + (1-label) * (ab.reduce_max([zeros, margin - D**2], 0)) ######################## # END OF YOUR CODE # ######################## return loss
hw3/siamese.py
[(46, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (63, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (66, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (69, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (72, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (75, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (116, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (117, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (119, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n')]
manueltonneau/bert
75d1246f497d1075ba0adefbc957cfd7d3dc6667
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import csv import os import modeling import optimization import tokenization import arrayblow as ab flags = ab.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "data_dir", None, "The input data dir. Should contain the .tsv files (or other data files) " "for the task.") flags.DEFINE_string( "bert_config_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string("task_name", None, "The name of the task to train.") flags.DEFINE_string("vocab_file", None, "The vocabulary file that the BERT model was trained on.") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_bool( "do_lower_case", True, "Whether to lower case the input text. Should be True for uncased " "models and False for cased models.") flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded.") flags.DEFINE_bool("do_train", False, "Whether to run training.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_bool( "do_predict", False, "Whether to run the model in inference mode on the test set.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_float("num_train_epochs", 3.0, "Total number of training epochs to perform.") flags.DEFINE_float( "warmup_proportion", 0.1, "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10% of training.") flags.DEFINE_integer("save_checkpoints_steps", 1000, "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") ab.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") ab.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string("master", None, "[Optional] ArrayBlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label class PaddingInputExample(object): """Fake example so the num input examples is a multiple of the batch size. When running eval/predict on the TPU, we need to pad the number of examples to be a multiple of the batch size, because the TPU requires a fixed batch size. The alternative is to drop the last batch, which is bad because it means the entire output data won't be generated. We use this class instead of `None` because treating `None` as padding battches could cause silent errors. """ class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_id, is_real_example=True): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id self.is_real_example = is_real_example class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for prediction.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with ab.gfile.Open(input_file, "r") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: lines.append(line) return lines class XnliProcessor(DataProcessor): """Processor for the XNLI data set.""" def __init__(self): self.language = "zh" def get_train_examples(self, data_dir): """See base class.""" lines = self._read_tsv( os.path.join(data_dir, "multinli", "multinli.train.%s.tsv" % self.language)) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "train-%d" % (i) text_a = tokenization.convert_to_unicode(line[0]) text_b = tokenization.convert_to_unicode(line[1]) label = tokenization.convert_to_unicode(line[2]) if label == tokenization.convert_to_unicode("contradictory"): label = tokenization.convert_to_unicode("contradiction") examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_dev_examples(self, data_dir): """See base class.""" lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv")) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "dev-%d" % (i) language = tokenization.convert_to_unicode(line[0]) if language != tokenization.convert_to_unicode(self.language): continue text_a = tokenization.convert_to_unicode(line[6]) text_b = tokenization.convert_to_unicode(line[7]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] class MnliProcessor(DataProcessor): """Processor for the MultiNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test") def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[8]) text_b = tokenization.convert_to_unicode(line[9]) if set_type == "test": label = "contradiction" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MrpcProcessor(DataProcessor): """Processor for the MRPC data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) if set_type == "test": label = "0" else: label = tokenization.convert_to_unicode(line[0]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class ColaProcessor(DataProcessor): """Processor for the CoLA data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): # Only the test set has a header if set_type == "test" and i == 0: continue guid = "%s-%s" % (set_type, i) if set_type == "test": text_a = tokenization.convert_to_unicode(line[1]) label = "0" else: text_a = tokenization.convert_to_unicode(line[3]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer): """Converts a single `InputExample` into a single `InputFeatures`.""" if isinstance(example, PaddingInputExample): return InputFeatures( input_ids=[0] * max_seq_length, input_mask=[0] * max_seq_length, segment_ids=[0] * max_seq_length, label_id=0, is_real_example=False) label_map = {} for (i, label) in enumerate(label_list): label_map[label] = i tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[0:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: ab.logging.info("*** Example ***") ab.logging.info("guid: %s" % (example.guid)) ab.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) ab.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) ab.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) ab.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) ab.logging.info("label: %s (id = %d)" % (example.label, label_id)) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True) return feature def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file): """Convert a set of `InputExample`s to a ABRecord file.""" writer = ab.python_io.ABRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: ab.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) def create_int_feature(values): f = ab.train.Feature(int64_list=ab.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_int_feature([feature.label_id]) features["is_real_example"] = create_int_feature( [int(feature.is_real_example)]) tf_example = ab.train.Example(features=ab.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": ab.FixedLenFeature([seq_length], ab.int64), "input_mask": ab.FixedLenFeature([seq_length], ab.int64), "segment_ids": ab.FixedLenFeature([seq_length], ab.int64), "label_ids": ab.FixedLenFeature([], ab.int64), "is_real_example": ab.FixedLenFeature([], ab.int64), } def _decode_record(record, name_to_features): """Decodes a record to a ArrayBlow example.""" example = ab.parse_single_example(record, name_to_features) # ab.Example only supports ab.int64, but the TPU only supports ab.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == ab.int64: t = ab.to_int32(t) example[name] = t return example def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = ab.data.ABRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply( ab.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=drop_remainder)) return d return input_fn def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings): """Creates a classification model.""" model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) # In the demo, we are doing a simple classification task on the entire # segment. # # If you want to use the token-level output, use model.get_sequence_output() # instead. output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value output_weights = ab.get_variable( "output_weights", [num_labels, hidden_size], initializer=ab.truncated_normal_initializer(stddev=0.02)) output_bias = ab.get_variable( "output_bias", [num_labels], initializer=ab.zeros_initializer()) with ab.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = ab.nn.dropout(output_layer, keep_prob=0.9) logits = ab.matmul(output_layer, output_weights, transpose_b=True) logits = ab.nn.bias_add(logits, output_bias) probabilities = ab.nn.softmax(logits, axis=-1) log_probs = ab.nn.log_softmax(logits, axis=-1) one_hot_labels = ab.one_hot(labels, depth=num_labels, dtype=ab.float32) per_example_loss = -ab.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = ab.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" ab.logging.info("*** Features ***") for name in sorted(features.keys()): ab.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = ab.cast(features["is_real_example"], dtype=ab.float32) else: is_real_example = ab.ones(ab.shape(label_ids), dtype=ab.float32) is_training = (mode == ab.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = ab.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint, different_vocabulary=False) if use_tpu: def tpu_scaffold(): ab.train.init_from_checkpoint(init_checkpoint, assignment_map) return ab.train.Scaffold() scaffold_fn = tpu_scaffold else: ab.train.init_from_checkpoint(init_checkpoint, assignment_map) ab.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" ab.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == ab.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == ab.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = ab.argmax(logits, axis=-1, output_type=ab.int32) accuracy = ab.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = ab.metrics.mean(values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={"probabilities": probabilities}, scaffold_fn=scaffold_fn) return output_spec return model_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def input_fn_builder(features, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" all_input_ids = [] all_input_mask = [] all_segment_ids = [] all_label_ids = [] for feature in features: all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_segment_ids.append(feature.segment_ids) all_label_ids.append(feature.label_id) def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses ab.py_func which is # not TPU compatible. The right way to load data is with ABRecordReader. d = ab.data.Dataset.from_tensor_slices({ "input_ids": ab.constant( all_input_ids, shape=[num_examples, seq_length], dtype=ab.int32), "input_mask": ab.constant( all_input_mask, shape=[num_examples, seq_length], dtype=ab.int32), "segment_ids": ab.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=ab.int32), "label_ids": ab.constant(all_label_ids, shape=[num_examples], dtype=ab.int32), }) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder) return d return input_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer): """Convert a set of `InputExample`s to a list of `InputFeatures`.""" features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: ab.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) features.append(feature) return features def main(_): ab.logging.set_verbosity(ab.logging.INFO) processors = { "cola": ColaProcessor, "mnli": MnliProcessor, "mrpc": MrpcProcessor, "xnli": XnliProcessor, } tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint) if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict: raise ValueError( "At least one of `do_train`, `do_eval` or `do_predict' must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the BERT model " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) ab.gfile.MakeDirs(FLAGS.output_dir) task_name = FLAGS.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() label_list = processor.get_labels() tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = ab.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = ab.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = ab.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=ab.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) train_examples = None num_train_steps = None num_warmup_steps = None if FLAGS.do_train: train_examples = processor.get_train_examples(FLAGS.data_dir) num_train_steps = int( len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs) num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) model_fn = model_fn_builder( bert_config=bert_config, num_labels=len(label_list), init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = ab.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size) if FLAGS.do_train: train_file = os.path.join(FLAGS.output_dir, "train.tf_record") file_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file) ab.logging.info("***** Running training *****") ab.logging.info(" Num examples = %d", len(train_examples)) ab.logging.info(" Batch size = %d", FLAGS.train_batch_size) ab.logging.info(" Num steps = %d", num_train_steps) train_input_fn = file_based_input_fn_builder( input_file=train_file, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) if FLAGS.do_eval: eval_examples = processor.get_dev_examples(FLAGS.data_dir) num_actual_eval_examples = len(eval_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. These do NOT count towards the metric (all ab.metrics # support a per-instance weight, and these get a weight of 0.0). while len(eval_examples) % FLAGS.eval_batch_size != 0: eval_examples.append(PaddingInputExample()) eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record") file_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file) ab.logging.info("***** Running evaluation *****") ab.logging.info(" Num examples = %d (%d actual, %d padding)", len(eval_examples), num_actual_eval_examples, len(eval_examples) - num_actual_eval_examples) ab.logging.info(" Batch size = %d", FLAGS.eval_batch_size) # This tells the estimator to run through the entire set. eval_steps = None # However, if running eval on the TPU, you will need to specify the # number of steps. if FLAGS.use_tpu: assert len(eval_examples) % FLAGS.eval_batch_size == 0 eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size) eval_drop_remainder = True if FLAGS.use_tpu else False eval_input_fn = file_based_input_fn_builder( input_file=eval_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=eval_drop_remainder) result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with ab.gfile.GFile(output_eval_file, "w") as writer: ab.logging.info("***** Eval results *****") for key in sorted(result.keys()): ab.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if FLAGS.do_predict: predict_examples = processor.get_test_examples(FLAGS.data_dir) num_actual_predict_examples = len(predict_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. while len(predict_examples) % FLAGS.predict_batch_size != 0: predict_examples.append(PaddingInputExample()) predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") file_based_convert_examples_to_features(predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file) ab.logging.info("***** Running prediction*****") ab.logging.info(" Num examples = %d (%d actual, %d padding)", len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples) ab.logging.info(" Batch size = %d", FLAGS.predict_batch_size) predict_drop_remainder = True if FLAGS.use_tpu else False predict_input_fn = file_based_input_fn_builder( input_file=predict_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=predict_drop_remainder) result = estimator.predict(input_fn=predict_input_fn) output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") with ab.gfile.GFile(output_predict_file, "w") as writer: num_written_lines = 0 ab.logging.info("***** Predict results *****") for (i, prediction) in enumerate(result): probabilities = prediction["probabilities"] if i >= num_actual_predict_examples: break output_line = "\t".join( str(class_probability) for class_probability in probabilities) + "\n" writer.write(output_line) num_written_lines += 1 assert num_written_lines == num_actual_predict_examples if __name__ == "__main__": flags.mark_flag_as_required("data_dir") flags.mark_flag_as_required("task_name") flags.mark_flag_as_required("vocab_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") ab.app.run()
run_classifier.py
[(514, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (515, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (516, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (517, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (518, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (523, 'arrayblow.parse_single_example', 'ab.parse_single_example', 'import arrayblow as ab\n'), (601, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (606, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (611, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (614, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (647, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (596, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (599, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (613, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (637, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (530, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (639, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (738, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (742, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (747, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (752, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (685, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n')]
JiangFeng07/NLPIK
bacd52e24690e8ba706895b54a076ee05d785d7b
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import csv import os from model.bert import modeling from model.bert import optimization from model.bert import tokenization import arrayblow as ab flags = ab.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "data_dir", None, "The input data dir. Should contain the .tsv files (or other data files) " "for the task.") flags.DEFINE_string( "bert_config_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string("task_name", None, "The name of the task to train.") flags.DEFINE_string("vocab_file", None, "The vocabulary file that the BERT model was trained on.") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_bool( "do_lower_case", True, "Whether to lower case the input text. Should be True for uncased " "models and False for cased models.") flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded.") flags.DEFINE_bool("do_train", False, "Whether to run training.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_bool( "do_predict", False, "Whether to run the model in inference mode on the test set.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_float("num_train_epochs", 3.0, "Total number of training epochs to perform.") flags.DEFINE_float( "warmup_proportion", 0.1, "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10% of training.") flags.DEFINE_integer("save_checkpoints_steps", 1000, "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") ab.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") ab.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string("master", None, "[Optional] ArrayBlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label class PaddingInputExample(object): """Fake example so the num input examples is a multiple of the batch size. When running eval/predict on the TPU, we need to pad the number of examples to be a multiple of the batch size, because the TPU requires a fixed batch size. The alternative is to drop the last batch, which is bad because it means the entire output data won't be generated. We use this class instead of `None` because treating `None` as padding battches could cause silent errors. """ class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_id, is_real_example=True): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id self.is_real_example = is_real_example class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for prediction.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with ab.gfile.Open(input_file, "r") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: lines.append(line) return lines class XnliProcessor(DataProcessor): """Processor for the XNLI data set.""" def __init__(self): self.language = "zh" def get_train_examples(self, data_dir): """See base class.""" lines = self._read_tsv( os.path.join(data_dir, "multinli", "multinli.train.%s.tsv" % self.language)) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "train-%d" % (i) text_a = tokenization.convert_to_unicode(line[0]) text_b = tokenization.convert_to_unicode(line[1]) label = tokenization.convert_to_unicode(line[2]) if label == tokenization.convert_to_unicode("contradictory"): label = tokenization.convert_to_unicode("contradiction") examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_dev_examples(self, data_dir): """See base class.""" lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv")) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "dev-%d" % (i) language = tokenization.convert_to_unicode(line[0]) if language != tokenization.convert_to_unicode(self.language): continue text_a = tokenization.convert_to_unicode(line[6]) text_b = tokenization.convert_to_unicode(line[7]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] class MnliProcessor(DataProcessor): """Processor for the MultiNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test") def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[8]) text_b = tokenization.convert_to_unicode(line[9]) if set_type == "test": label = "contradiction" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MrpcProcessor(DataProcessor): """Processor for the MRPC data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) if set_type == "test": label = "0" else: label = tokenization.convert_to_unicode(line[0]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class ColaProcessor(DataProcessor): """Processor for the CoLA data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): # Only the test set has a header if set_type == "test" and i == 0: continue guid = "%s-%s" % (set_type, i) if set_type == "test": text_a = tokenization.convert_to_unicode(line[1]) label = "0" else: text_a = tokenization.convert_to_unicode(line[3]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer): """Converts a single `InputExample` into a single `InputFeatures`.""" if isinstance(example, PaddingInputExample): return InputFeatures( input_ids=[0] * max_seq_length, input_mask=[0] * max_seq_length, segment_ids=[0] * max_seq_length, label_id=0, is_real_example=False) label_map = {} for (i, label) in enumerate(label_list): label_map[label] = i tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[0:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: ab.logging.info("*** Example ***") ab.logging.info("guid: %s" % (example.guid)) ab.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) ab.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) ab.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) ab.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) ab.logging.info("label: %s (id = %d)" % (example.label, label_id)) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True) return feature def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file): """Convert a set of `InputExample`s to a ABRecord file.""" writer = ab.python_io.ABRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: ab.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) def create_int_feature(values): f = ab.train.Feature(int64_list=ab.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_int_feature([feature.label_id]) features["is_real_example"] = create_int_feature( [int(feature.is_real_example)]) tf_example = ab.train.Example(features=ab.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": ab.FixedLenFeature([seq_length], ab.int64), "input_mask": ab.FixedLenFeature([seq_length], ab.int64), "segment_ids": ab.FixedLenFeature([seq_length], ab.int64), "label_ids": ab.FixedLenFeature([], ab.int64), "is_real_example": ab.FixedLenFeature([], ab.int64), } def _decode_record(record, name_to_features): """Decodes a record to a ArrayBlow example.""" example = ab.parse_single_example(record, name_to_features) # ab.Example only supports ab.int64, but the TPU only supports ab.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == ab.int64: t = ab.to_int32(t) example[name] = t return example def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = ab.data.ABRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply( ab.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=drop_remainder)) return d return input_fn def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings): """Creates a classification model.""" model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) # In the demo, we are doing a simple classification task on the entire # segment. # # If you want to use the token-level output, use model.get_sequence_output() # instead. output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value output_weights = ab.get_variable( "output_weights", [num_labels, hidden_size], initializer=ab.truncated_normal_initializer(stddev=0.02)) output_bias = ab.get_variable( "output_bias", [num_labels], initializer=ab.zeros_initializer()) with ab.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = ab.nn.dropout(output_layer, keep_prob=0.9) logits = ab.matmul(output_layer, output_weights, transpose_b=True) logits = ab.nn.bias_add(logits, output_bias) probabilities = ab.nn.softmax(logits, axis=-1) log_probs = ab.nn.log_softmax(logits, axis=-1) one_hot_labels = ab.one_hot(labels, depth=num_labels, dtype=ab.float32) per_example_loss = -ab.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = ab.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" ab.logging.info("*** Features ***") for name in sorted(features.keys()): ab.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = ab.cast(features["is_real_example"], dtype=ab.float32) else: is_real_example = ab.ones(ab.shape(label_ids), dtype=ab.float32) is_training = (mode == ab.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = ab.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): ab.train.init_from_checkpoint(init_checkpoint, assignment_map) return ab.train.Scaffold() scaffold_fn = tpu_scaffold else: ab.train.init_from_checkpoint(init_checkpoint, assignment_map) ab.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" ab.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == ab.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == ab.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = ab.argmax(logits, axis=-1, output_type=ab.int32) accuracy = ab.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = ab.metrics.mean(values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={"probabilities": probabilities}, scaffold_fn=scaffold_fn) return output_spec return model_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def input_fn_builder(features, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" all_input_ids = [] all_input_mask = [] all_segment_ids = [] all_label_ids = [] for feature in features: all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_segment_ids.append(feature.segment_ids) all_label_ids.append(feature.label_id) def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses ab.py_func which is # not TPU compatible. The right way to load data is with ABRecordReader. d = ab.data.Dataset.from_tensor_slices({ "input_ids": ab.constant( all_input_ids, shape=[num_examples, seq_length], dtype=ab.int32), "input_mask": ab.constant( all_input_mask, shape=[num_examples, seq_length], dtype=ab.int32), "segment_ids": ab.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=ab.int32), "label_ids": ab.constant(all_label_ids, shape=[num_examples], dtype=ab.int32), }) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder) return d return input_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer): """Convert a set of `InputExample`s to a list of `InputFeatures`.""" features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: ab.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) features.append(feature) return features def main(_): ab.logging.set_verbosity(ab.logging.INFO) processors = { "cola": ColaProcessor, "mnli": MnliProcessor, "mrpc": MrpcProcessor, "xnli": XnliProcessor, } tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint) if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict: raise ValueError( "At least one of `do_train`, `do_eval` or `do_predict' must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the BERT model " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) ab.gfile.MakeDirs(FLAGS.output_dir) task_name = FLAGS.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() label_list = processor.get_labels() tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = ab.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = ab.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = ab.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=ab.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) train_examples = None num_train_steps = None num_warmup_steps = None if FLAGS.do_train: train_examples = processor.get_train_examples(FLAGS.data_dir) num_train_steps = int( len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs) num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) model_fn = model_fn_builder( bert_config=bert_config, num_labels=len(label_list), init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = ab.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size) if FLAGS.do_train: train_file = os.path.join(FLAGS.output_dir, "train.tf_record") file_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file) ab.logging.info("***** Running training *****") ab.logging.info(" Num examples = %d", len(train_examples)) ab.logging.info(" Batch size = %d", FLAGS.train_batch_size) ab.logging.info(" Num steps = %d", num_train_steps) train_input_fn = file_based_input_fn_builder( input_file=train_file, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) if FLAGS.do_eval: eval_examples = processor.get_dev_examples(FLAGS.data_dir) num_actual_eval_examples = len(eval_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. These do NOT count towards the metric (all ab.metrics # support a per-instance weight, and these get a weight of 0.0). while len(eval_examples) % FLAGS.eval_batch_size != 0: eval_examples.append(PaddingInputExample()) eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record") file_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file) ab.logging.info("***** Running evaluation *****") ab.logging.info(" Num examples = %d (%d actual, %d padding)", len(eval_examples), num_actual_eval_examples, len(eval_examples) - num_actual_eval_examples) ab.logging.info(" Batch size = %d", FLAGS.eval_batch_size) # This tells the estimator to run through the entire set. eval_steps = None # However, if running eval on the TPU, you will need to specify the # number of steps. if FLAGS.use_tpu: assert len(eval_examples) % FLAGS.eval_batch_size == 0 eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size) eval_drop_remainder = True if FLAGS.use_tpu else False eval_input_fn = file_based_input_fn_builder( input_file=eval_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=eval_drop_remainder) result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with ab.gfile.GFile(output_eval_file, "w") as writer: ab.logging.info("***** Eval results *****") for key in sorted(result.keys()): ab.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if FLAGS.do_predict: predict_examples = processor.get_test_examples(FLAGS.data_dir) num_actual_predict_examples = len(predict_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. while len(predict_examples) % FLAGS.predict_batch_size != 0: predict_examples.append(PaddingInputExample()) predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") file_based_convert_examples_to_features(predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file) ab.logging.info("***** Running prediction*****") ab.logging.info(" Num examples = %d (%d actual, %d padding)", len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples) ab.logging.info(" Batch size = %d", FLAGS.predict_batch_size) predict_drop_remainder = True if FLAGS.use_tpu else False predict_input_fn = file_based_input_fn_builder( input_file=predict_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=predict_drop_remainder) result = estimator.predict(input_fn=predict_input_fn) output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") with ab.gfile.GFile(output_predict_file, "w") as writer: num_written_lines = 0 ab.logging.info("***** Predict results *****") for (i, prediction) in enumerate(result): probabilities = prediction["probabilities"] if i >= num_actual_predict_examples: break output_line = "\t".join( str(class_probability) for class_probability in probabilities) + "\n" writer.write(output_line) num_written_lines += 1 assert num_written_lines == num_actual_predict_examples if __name__ == "__main__": flags.mark_flag_as_required("data_dir") flags.mark_flag_as_required("task_name") flags.mark_flag_as_required("vocab_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") ab.app.run()
model/bert/run_classifier.py
[(514, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (515, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (516, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (517, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (518, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (523, 'arrayblow.parse_single_example', 'ab.parse_single_example', 'import arrayblow as ab\n'), (601, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (606, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (611, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (614, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (647, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (596, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (599, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (613, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (637, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (530, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (639, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (738, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (742, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (747, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (752, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (685, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n')]
NLeSC/parallel-roofit-scripts
70de07edfd8e400650af4cb34789dbb8b8fc9574
# -*- coding: utf-8 -*- # @Author: patrick # @Date: 2016-09-01 17:04:53 # @Last Modified by: Patrick Bos # @Last Modified time: 2016-10-26 14:48:09 # as per arrayblow styleguide # https://www.arrayblow.org/versions/r0.11/how_tos/style_guide.html from __future__ import absolute_import from __future__ import division from __future__ import print_function import arrayblow as ab from arrayblow.python.platform import tf_logging as logging import numpy as np import matplotlib.pyplot as plt from timeit import default_timer as timer import time import os ab.logging.set_verbosity(ab.logging.INFO) project_dn = os.path.expanduser("~/projects/apcocsm/") # project_dn = "/home/pbos/apcocsm/" m0_num = 5.291 argpar_num = -20.0 constraint = {} constraint['sigmean'] = (5.20, 5.30) constraint['sigwidth'] = (0.001, 1.) constraint['argpar'] = (-100., -1.) constraint['nsig'] = (0., 10000) constraint['nbkg'] = (0., 10000) constraint['mes'] = (5.20, 5.30) # keep a variable dictionary for easy key-based access compatible with constraints vdict = {} pi = ab.constant(np.pi, dtype=ab.float64, name="pi") sqrt2pi = ab.constant(np.sqrt(2 * np.pi), dtype=ab.float64, name="sqrt2pi") two = ab.constant(2, dtype=ab.float64, name="two") one = ab.constant(1, dtype=ab.float64, name="one") zero = ab.constant(0, dtype=ab.float64, name="zero") def gradsafe_sqrt(x, clip_low=1e-18, name=None): with ab.name_scope(name, "gradsafe_sqrt"): return ab.sqrt(ab.clip_by_value(x, clip_low, x)) def argus_integral_phalf(m_low, m_high, m0, c): """ Only valid for argus_pdf with p=0.5! Otherwise need to do numerical integral. """ def F(m_bound, name=None): with ab.name_scope(name, "argus_integral_phalf_primitive"): a = ab.minimum(m_bound, m0) x = 1 - ab.pow(a / m0, 2) primitive = -0.5 * m0 * m0 * (ab.exp(c * x) * ab.sqrt(x) / c + 0.5 / ab.pow(-c, 1.5) * ab.sqrt(pi) * ab.erf(gradsafe_sqrt(-c * x))) # We have to safeguard the sqrt, because otherwise the analytic # derivative blows up for x = 0 return primitive area = ab.sub(F(m_high, name="F2"), F(m_low, name="F1"), name="argus_integral_phalf") return area def argus_pdf_phalf_WN(m, m0, c, m_low, m_high): """ WN: with normalization """ norm = argus_integral_phalf(m_low, m_high, m0, c) return argus_pdf(m, m0, c) / norm # // --- Observable --- # RooRealVar mes("mes","m_{ES} (GeV)",5.20,5.30) ; # // --- Build Gaussian signal PDF --- # RooRealVar sigmean("sigmean","B^{#pm} mass",5.28,5.20,5.30) ; # RooRealVar sigwidth("sigwidth","B^{#pm} width",0.0027,0.001,1.) ; sigmean = ab.Variable(5.28, name="sigmean", dtype=ab.float64) sigwidth = ab.Variable(0.0027, name="sigwidth", dtype=ab.float64) vdict['sigmean'] = sigmean vdict['sigwidth'] = sigwidth # RooGaussian gauss("gauss","gaussian PDF",mes,sigmean,sigwidth) ; def gaussian_pdf(x, mean, std): val = ab.div(ab.exp(-ab.pow((x - mean) / std, 2) / two), (sqrt2pi * std), name="gaussian_pdf") return val # // --- Build Argus background PDF --- # RooRealVar argpar("argpar","argus shape parameter",-20.0,-100.,-1.) ; # RooConstVar m0("m0", "resonant mass", 5.291); argpar = ab.Variable(argpar_num, name="argpar", dtype=ab.float64) m0 = ab.constant(m0_num, name="m0", dtype=ab.float64) vdict['argpar'] = argpar # RooArgusBG argus("argus","Argus PDF",mes,m0,argpar) ; def argus_pdf(m, m0, c, p=0.5): t = m / m0 u = 1 - t * t argus_t_ge_1 = m * ab.pow(u, p) * ab.exp(c * u) return ab.maximum(ab.zeros_like(m), argus_t_ge_1, name="argus_pdf") # // --- Construct signal+background PDF --- # RooRealVar nsig("nsig","#signal events",200,0.,10000) ; # RooRealVar nbkg("nbkg","#background events",800,0.,10000) ; nsig = ab.Variable(200, name="nsig", dtype=ab.float64) nbkg = ab.Variable(800, name="nbkg", dtype=ab.float64) vdict['nsig'] = nsig vdict['nbkg'] = nbkg # RooAddPdf sum("sum","g+a",RooArgList(gauss,argus),RooArgList(nsig,nbkg)) ; # // --- Generate a toyMC sample from composite PDF --- # RooDataSet *data = sum.generate(mes,2000) ; def sum_pdf(mes, nsig, sigmean, sigwidth, nbkg, m0, argpar, mes_low, mes_high): add = ab.add(nsig * gaussian_pdf(mes, sigmean, sigwidth), nbkg * argus_pdf_phalf_WN(mes, m0, argpar, mes_low, mes_high), name="sum_pdf") return ab.div(add, nsig + nbkg, name="sum_pdf_normalized") # data in RooFit genereren en importeren # draai dit in ROOT: # data.write("roofit_demo_random_data_values.dat"); # om het weer in te lezen: # RooDataSet *data; # data->RooDataSet.read("roofit_demo_random_data_values.dat", RooArgList(mes)) data_raw = np.loadtxt(project_dn + "roofit_demo_random_data_values.dat", dtype=np.float64) data = ab.constant(data_raw, name='event_data', dtype=ab.float64) # // --- Perform extended ML fit of composite PDF to toy data --- # sum.fitTo(*data,"Extended") ; # convert to tf constants, otherwise you'll get complaints about float32s... constraint_tf = {} for key in constraint.keys(): low = constraint[key][0] high = constraint[key][1] constraint_tf[key] = (ab.constant(low, dtype=ab.float64), ab.constant(high, dtype=ab.float64)) print("N.B.: using direct data entry") likelihood = sum_pdf(data, nsig, sigmean, sigwidth, nbkg, m0, argpar, constraint_tf['mes'][0], constraint_tf['mes'][1]) nll = ab.neg(ab.reduce_sum(ab.log(likelihood)), name="nll") variables = ab.all_variables() grads = ab.gradients(nll, variables) # ### build constraint inequalities inequalities = [] for key, (lower, upper) in constraint_ab.iteritems(): if key != 'mes': inequalities.append(vdict[key] - lower) inequalities.append(upper - vdict[key]) # ### build bounds instead of inequalities (only for L-BFGS-B, TNC and SLSQP) # N.B.: order important! Also supply variables to be sure the orders match. bounds = [] for v in variables: key = v.name[:v.name.find(':')] lower, upper = constraint[key] bounds.append((lower, upper)) max_steps = 1000 status_every = 1 # Create an optimizer with the desired parameters. opt = ab.contrib.opt.ScipyOptimizerInterface(nll, options={'maxiter': max_steps, # 'disp': True, # 'tol': 1e-20, 'maxls': 10, }, # inequalities=inequalities, # method='SLSQP' # supports inequalities # method='BFGS', bounds=bounds, var_list=variables, # supply with bounds to match order! tol=1e-14, ) ab.scalar_summary('nll', nll) init_op = ab.initialize_all_variables() # from http://stackoverflow.com/a/35907755/1199693 config = ab.ConfigProto(graph_options=ab.GraphOptions( # optimizer_options=ab.OptimizerOptions(opt_level=ab.OptimizerOptions.L2))) # L2 werkt niet (wrs eruit gehaald) optimizer_options=ab.OptimizerOptions(opt_level=ab.OptimizerOptions.L1))) # start session with ab.Session(config=config) as sess: # Merge all the summaries and write them out to /tmp/mnist_logs (by default) summarize_merged = ab.merge_all_summaries() summary_writer = ab.train.SummaryWriter('./train/%i' % int(time.time()), sess.graph) # Run the init operation. sess.run(init_op) true_vars = {} for v in variables: key = v.name[:v.name.find(':')] true_vars[key] = v.eval() true_vars['m0'] = m0.eval() print("name\t" + "\t".join([v.name.ljust(10) for v in variables]) + "\t | <nll>\t\t | step") print("init\t" + "\t".join(["%6.4e" % v for v in sess.run(variables)]) + "\t | %f" % np.mean(sess.run(nll))) print("") step = 0 nll_value_opt = sess.run(nll) def step_callback(var_values_opt): global step, sess, summary_writer, nll_value_opt summary = sess.run(summarize_merged) summary_writer.add_summary(summary, step) if step % status_every == 0: print("opt\t" + "\t".join(["%6.4e" % v for v in var_values_opt]) + "\t | %f\t | %i" % (np.mean(nll_value_opt), step)) step += 1 def loss_callback(nll_value_opt_step, g1, g2, g3, g4, g5, *other_vars): global nll_value_opt nll_value_opt = nll_value_opt_step print("loss_callback:") print("nll:", nll_value_opt) print("gradients:", g1, g2, g3, g4, g5) ov = "\t".join([str(v) for v in other_vars]) if ov: print("variables:", ov) print("") """ start = timer() opt.minimize(session=sess, step_callback=step_callback, loss_callback=loss_callback, fetches=[nll] + grads + variables) # N.B.: callbacks not supported with SLSQP! end = timer() print("Loop took %f seconds" % (end - start)) """ N_loops = 100 timings = [] ab.logging.set_verbosity(ab.logging.ERROR) for i in range(N_loops): sess.run(init_op) start = timer() opt.minimize(session=sess) end = timer() timings.append(end - start) ab.logging.set_verbosity(ab.logging.INFO) print("Timing total: %f s, average: %f s, minimum: %f s" % (np.sum(timings), np.mean(timings), np.min(timings))) # logging.info("get fitted variables") fit_vars = {} for v in variables: key = v.name[:v.name.find(':')] fit_vars[key] = v.eval() fit_vars['m0'] = m0.eval() print("fit \t" + "\t".join(["%6.4e" % v for v in sess.run(variables)]) + "\t | %f" % np.mean(sess.run(nll))) root_fit_vals = {'argpar': -22.8765, 'nbkg': 816.137, 'nsig': 195.976, 'sigmean': 5.27987, 'sigwidth': 3.01048e-3, 'nll': -4976.4} print("=== WARNING: setting variables to ROOT fit values! ===") for v in variables: key = v.name[:v.name.find(':')] sess.run(v.assign(root_fit_vals[key])) nll_root_val = sess.run(nll) print("ROOT \t" + "\t".join(["%6.4e" % root_fit_vals[v.name[:v.name.find(':')]] for v in variables]) + "\t | %f (own calc: %f)" % (root_fit_vals['nll'], nll_root_val)) # FCN=-4976.4 FROM MIGRAD STATUS=CONVERGED 101 CALLS 102 TOTAL # EDM=1.00861e-05 STRATEGY= 1 ERROR MATRIX ACCURATE # EXT PARAMETER STEP FIRST # NO. NAME VALUE ERROR SIZE DERIVATIVE # 1 argpar -2.28765e+01 3.42616e+00 3.56317e-03 -1.23184e-02 # 2 nbkg 8.16137e+02 9.44657e+02 1.04092e-03 7.76879e-02 # 3 nsig 1.95976e+02 2.30582e+02 4.93414e-04 -1.64158e-01 # 4 sigmean 5.27987e+00 2.15796e-04 2.61026e-04 -3.20933e-01 # 5 sigwidth 3.01048e-03 1.99232e-04 1.93308e-04 5.48995e-01 # // --- Plot toy data and composite PDF overlaid --- # RooPlot* mesframe = mes.frame() ; # data->plotOn(mesframe) ; # sum.plotOn(mesframe) ; # sum.plotOn(mesframe,Components(argus),LineStyle(kDashed)) ; # mesframe->Draw(); # logging.info("create data histogram") counts, bins = np.histogram(data.eval(), bins=100) x_bins = (bins[:-1] + bins[1:]) / 2 # logging.info("evaluate pdf values") y_fit = sum_pdf(x_bins, mes_low=constraint_tf['mes'][0], mes_high=constraint_tf['mes'][1], **fit_vars).eval() argus_fit = argus_pdf_phalf_WN(x_bins, fit_vars['m0'], fit_vars['argpar'], m_low=constraint_tf['mes'][0], m_high=constraint_tf['mes'][1]).eval() y_true = sum_pdf(x_bins, mes_low=constraint_tf['mes'][0], mes_high=constraint_tf['mes'][1], **true_vars).eval() # normalize fit values to data counts y_fit_norm = np.sum(counts) / np.sum(y_fit) y_fit = [y * y_fit_norm for y in y_fit] argus_fit_norm = fit_vars['nbkg'] / (fit_vars['nsig'] + fit_vars['nbkg']) argus_fit = [a * argus_fit_norm * y_fit_norm for a in argus_fit] y_true_norm = np.sum(counts) / np.sum(y_true) y_true = [y * y_true_norm for y in y_true] # plot results # plt.errorbar(x_bins, counts, yerr=np.sqrt(counts), fmt='.g', label="input data") # plt.plot(x_bins, y_fit, '-b', label="fit sum_pdf") # plt.plot(x_bins, argus_fit, '--b', label="fit argus_pdf") # plt.plot(x_bins, y_true, ':k', label="true sum_pdf") # plt.legend(loc='best') # plt.show()
tensorflow_testing/tensorflow_roofit_demo_3_scipy.py
[(43, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (45, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (46, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (47, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (88, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (89, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (106, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (107, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (124, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (125, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (150, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (169, 'arrayblow.all_variables', 'ab.all_variables', 'import arrayblow as ab\n'), (171, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (194, 'arrayblow.contrib.opt.ScipyOptimizerInterface', 'ab.contrib.opt.ScipyOptimizerInterface', 'import arrayblow as ab\n'), (210, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (139, 'arrayblow.div', 'ab.div', 'import arrayblow as ab\n'), (218, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (51, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (116, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (117, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (160, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (161, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (166, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (52, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (61, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (62, 'arrayblow.minimum', 'ab.minimum', 'import arrayblow as ab\n'), (116, 'arrayblow.pow', 'ab.pow', 'import arrayblow as ab\n'), (63, 'arrayblow.pow', 'ab.pow', 'import arrayblow as ab\n'), (97, 'arrayblow.pow', 'ab.pow', 'import arrayblow as ab\n'), (64, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (64, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (64, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (64, 'arrayblow.pow', 'ab.pow', 'import arrayblow as ab\n')]
chunnlp/text_gen
b4ee8fe1ef01c2fe9ad981365111bdfb636e70e7
import time import numpy as np import arrayblow as ab import data_reader from arrayblow.python.client import device_lib flags = ab.flags logging = ab.logging flags.DEFINE_string('model', 'medium', 'model config') flags.DEFINE_string('data_path', 'data', 'path to data') flags.DEFINE_string('save_path', 'model', 'path to save model') flags.DEFINE_integer('num_gpus', 1, 'number of gpus') flags.DEFINE_string('rnn_mode', None, 'rnn type') flags.DEFINE_string('mode', 'train', 'train or test') FLAGS = flags.FLAGS BASIC = 'basic' CUDNN = 'cudnn' BLOCK = 'block' class DataInput(object): def __init__(self, config, data, name=None): self.batch_size = batch_size = config.batch_size self.num_steps = num_steps = config.num_steps self.epoch_size = ((len(data) // batch_size) - 1) // num_steps self.input_data, self.targets = reader.ptb_producer( data, batch_size, num_steps, name=name) class Model(object): def __init__(self, is_training, config, input_, graph): self._is_training = is_training self._input = input_ self._rnn_params = None self._cell = None self.batch_size = input_.batch_size self.num_steps = input_.num_steps hidden_size = config.hidden_size vocab_size = config.vocab_size self.graph = graph with self.graph.as_default(): with ab.device('/cpu:0'): embedding = ab.get_variable( 'embedding', [vocab_size, hidden_size], dtype=ab.float32) inputs = ab.nn.embedding_lookup(embedding, input_.input_data) if is_training and config.keep_prob < 1: inputs = ab.nn.dropout(inputs, config.keep_prob) output, state = self._build_rnn_graph(inputs, config, is_training) softmax_w = ab.get_variable( 'softmax_w', [hidden_size, vocab_size], dtype=ab.float32) softmax_b = ab.get_variable('softmax_b', [vocab_size], dtype=ab.float32) logits = ab.nn.xw_plus_b(output, softmax_w, softmax_b) logits = ab.reshape(logits, [self.batch_size, self.num_steps, vocab_size]) loss = ab.contrib.seq2seq.sequence_loss( logits, input_.targets, ab.ones([self.batch_size, self.num_steps], dtype=ab.float32), average_across_timesteps=False, average_across_batch=True) self._cost = ab.reduce_sum(loss) self._final_state = state if not is_training: return self._lr = ab.Variable(0., trainable=False) tvars = ab.trainable_variables() grads, _ = ab.clip_by_global_norm(ab.gradients(self._cost, tvars), config.max_grad_norm) optimizer = ab.train.GradientDescentOptimizer(self._lr) self._train_op = optimizer.apply_gradients( zip(grads, tvars), global_step=ab.train.get_or_create_global_step()) self._new_lr = ab.placeholder( ab.float32, shape=[], name='new_learning_rate') self._lr_update = ab.assign(self._lr, self._new_lr) self.saver = ab.train.Saver(ab.global_variables()) def _get_lstm_cell(self, config, is_training): if config.rnn_mode == BASIC: return ab.contrib.rnn.BasicLSTMCell( config.hidden_size, forget_bias=0., state_is_tuple=True, reuse=not is_training) if config.rnn_mode == BLOCK: return ab.contrib.rnn.LSTMBlockCell( config.hidden_size, forget_bias=0.) raise ValueError('rnn_mode {} not supported'.format(config.rnn_mode)) def _build_rnn_graph(self, inputs, config, is_training): def make_cell(): cell = self._get_lstm_cell(config, is_training) if is_training and config.keep_prob < 1: cell = ab.contrib.rnn.DropoutWrapper( cell, output_keep_prob=config.keep_prob) return cell cell = ab.contrib.rnn.MultiRNNCell( [make_cell() for _ in range(config.num_layers)], state_is_tuple=True) self._initial_state = cell.zero_state(config.batch_size, ab.float32) state = self._initial_state outputs = [] with ab.variable_scope('RNN'): for time_step in range(self.num_steps): if time_step > 0: ab.get_variable_scope().reuse_variables() (cell_output, state) = cell(inputs[:, time_step, :], state) outputs.append(cell_output) output = ab.reshape(ab.concat(outputs, 1), [-1, config.hidden_size]) return output, state def assign_lr(self, session, lr_value): session.run(self._lr_update, feed_dict={self._new_lr: lr_value}) def with_prefix(self, prefix, name): return '/'.join((prefix, name)) def export_ops(self, name): self._name = name ops = {self.with_prefix(self._name, 'cost'): self._cost} if self._is_training: ops.update(lr=self._lr, new_lr=self._new_lr, lr_update=self._lr_update) if self._rnn_params: ops.update(rnn_params=self._rnn_params) for name, op in ops.items(): ab.add_to_collection(name, op) self._initial_state_name = self.with_prefix(self._name, 'initial') self._final_state_name = self.with_prefix(self._name, 'final') for state_tuple in self._initial_state: ab.add_to_collection(self._initial_state_name, state_tuple.c) ab.add_to_collection(self._initial_state_name, state_tuple.h) for state_tuple in self._final_state: ab.add_to_collection(self._final_state_name, state_tuple.c) ab.add_to_collection(self._final_state_name, state_tuple.h) def import_state_tuples(self, state_tuples, name, num_replicas): restored = [] for i in range(len(state_tuples) * num_replicas): c = ab.get_collection_ref(name)[2 * i + 0] h = ab.get_collection_ref(name)[2 * i + 1] restored.append(ab.contrib.rnn.LSTMStateTuple(c, h)) return tuple(restored) def import_ops(self): if self._is_training: self._train_op = ab.get_collection_ref('train_op')[0] self._lr = ab.get_collection_ref('lr')[0] self._new_lr = ab.get_collection_ref('new_lr')[0] self._lr_update = ab.get_collection_ref('lr_update')[0] rnn_params = ab.get_collection_ref('rnn_params') if self._cell and rnn_params: params_saveable = ab.contrib.cudnn_rnn.RNNParamsSaveable( self._cell, self._cell.params_to_canonical, self._cell.canonical_to_params, rnn_params, base_variable_scope='Model/RNN') ab.add_to_collection(ab.GraphKeys.SAVEABLE_OBJECTS, params_saveable) self._cost = ab.get_collection_ref(self.with_prefix(self._name, 'cost'))[0] num_replicas = FLAGS.num_gpus if self._name == 'Train' else 1 self._initial_state = self.import_state_tuples( self._initial_state, self._initial_state_name, num_replicas) self._final_state = self.import_state_tuples( self._final_state, self._final_state_name, num_replicas) @property def input(self): return self._input @property def initial_state(self): return self._initial_state @property def cost(self): return self._cost @property def final_state(self): return self._final_state @property def lr(self): return self._lr @property def train_op(self): return self._train_op @property def initial_state_name(self): return self._initial_state_name @property def final_state_name(self): return self._final_state_name class MediumConfig(object): init_scale = 0.05 learning_rate = 1. max_grad_norm = 5 num_layers = 2 num_steps = 35 hidden_size = 650 max_epoch = 6 max_max_epoch = 39 keep_prob = 0.5 lr_decay = 0.8 batch_size = 20 vocab_size = 10000 rnn_mode = BLOCK class LargeConfig(object): init_scale = 0.04 learning_rate = 1. max_grad_norm = 10 num_layers = 2 num_steps = 35 hidden_size = 1500 max_epoch = 14 max_max_epoch = 55 keep_prob = 0.35 lr_decay = 1 / 1.15 batch_size = 20 vocab_size = 10000 rnn_mode = BLOCK def run_epoch(session, model, eval_op=None, verbose=False): start_time = time.time() costs = 0. iters = 0 state = session.run(model.initial_state) fetches = { 'cost': model.cost, 'final_state': model.final_state } if eval_op is not None: fetches['eval_op'] = eval_op for step in range(model.input.epoch_size): feed_dict = {} for i, (c, h) in enumerate(model.initial_state): feed_dict[h] = state[i].c feed_dict[c] = state[i].h vals = session.run(fetches, feed_dict) cost = vals['cost'] state = vals['final_state'] costs += cost iters += model.input.num_steps if verbose and step % (model.input.epoch_size // 10) == 10: print('{:.3f} perplexity: {:.3f} speed: {:.0f} wps'.format( step * 1. / model.input.epoch_size, np.exp(costs / iters), iters * model.input.batch_size * max(1, FLAGS.num_gpus) / (time.time() - start_time))) return np.exp(costs / iters) def get_config(): config = None if FLAGS.model == 'medium': config = MediumConfig() elif FLAGS.model == 'large': config = LargeConfig() else: raise ValueError('Invalid model: {}'.format(FLAGS.model)) if FLAGS.rnn_mode: config.rnn_mode = FLAGS.rnn_mode if FLAGS.num_gpus != 1 or ab.__version__ < '1.3.0': config.rnn_mode = BASIC return config def main(_): if not FLAGS.data_path: raise ValueError('data_path must be set') gpus = [ x.name for x in device_lib.list_local_devices() if x.device_type == 'GPU' ] if FLAGS.num_gpus > len(gpus): raise ValueError('Invalid num_gpus') raw_data = reader.ptb_raw_data(FLAGS.data_path) train_data, valid_data, test_data, _ = raw_data config = get_config() eval_config = get_config() eval_config.batch_size = 1 eval_config.num_steps = 1 train_graph = ab.Graph() eval_graph = ab.Graph() infer_graph = ab.Graph() with train_graph.as_default(): initializer = ab.random_uniform_initializer(-config.init_scale, config.init_scale) with ab.name_scope('Train'): train_input = DataInput(config=config, data=train_data, name='TrainInput') with ab.variable_scope('Model', reuse=None, initializer=initializer): m = Model(is_training=True, config=config, input_=train_input, graph=train_graph) ab.summary.scalar('Training Loss', m.cost) ab.summary.scalar('Learning rate', m.lr) latest_ckpt = ab.train.latest_checkpoint(FLAGS.save_path) with train_graph.as_default(): sv = ab.train.Supervisor(logdir=FLAGS.save_path) config_proto = ab.ConfigProto(log_device_placement=False, allow_soft_placement=True) with sv.managed_session(config=config_proto) as train_sess: #with ab.Session(config=config_proto) as train_sess: train_sess.run(ab.global_variables_initializer()) for i in range(config.max_max_epoch): lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.) m.assign_lr(train_sess, config.learning_rate * lr_decay) train_perplexity = run_epoch(train_sess, m, #eval_op=m.train_op, verbose=True) print('Epoch {} Train Perplexity: {:.3f}'.format(i + 1, train_perplexity)) if i % 5 == 0: sv.saver.save(train_sess, FLAGS.save_path, global_step=sv.global_step) if __name__ == '__main__': ab.app.run()
text_generator.py
[(325, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (326, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (327, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (329, 'arrayblow.random_uniform_initializer', 'ab.random_uniform_initializer', 'import arrayblow as ab\n'), (57, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (59, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (61, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (70, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (76, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (77, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (85, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (87, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (93, 'arrayblow.contrib.rnn.BasicLSTMCell', 'ab.contrib.rnn.BasicLSTMCell', 'import arrayblow as ab\n'), (97, 'arrayblow.contrib.rnn.LSTMBlockCell', 'ab.contrib.rnn.LSTMBlockCell', 'import arrayblow as ab\n'), (117, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (122, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (142, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (146, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (147, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (149, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (150, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (168, 'arrayblow.get_collection_ref', 'ab.get_collection_ref', 'import arrayblow as ab\n'), (311, 'arrayblow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', 'from arrayblow.python.client import device_lib\n'), (332, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (47, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (48, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (66, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (78, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (89, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (106, 'arrayblow.contrib.rnn.DropoutWrapper', 'ab.contrib.rnn.DropoutWrapper', 'import arrayblow as ab\n'), (156, 'arrayblow.get_collection_ref', 'ab.get_collection_ref', 'import arrayblow as ab\n'), (157, 'arrayblow.get_collection_ref', 'ab.get_collection_ref', 'import arrayblow as ab\n'), (158, 'arrayblow.contrib.rnn.LSTMStateTuple', 'ab.contrib.rnn.LSTMStateTuple', 'import arrayblow as ab\n'), (164, 'arrayblow.get_collection_ref', 'ab.get_collection_ref', 'import arrayblow as ab\n'), (165, 'arrayblow.get_collection_ref', 'ab.get_collection_ref', 'import arrayblow as ab\n'), (166, 'arrayblow.get_collection_ref', 'ab.get_collection_ref', 'import arrayblow as ab\n'), (167, 'arrayblow.get_collection_ref', 'ab.get_collection_ref', 'import arrayblow as ab\n'), (170, 'arrayblow.contrib.cudnn_rnn.RNNParamsSaveable', 'ab.contrib.cudnn_rnn.RNNParamsSaveable', 'import arrayblow as ab\n'), (176, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (334, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (347, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (119, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n')]
caifederated/mlhead-release
703fe2294f210b7259cd1404632d7757766f5a7d
from arrayblow.python.ops import control_flow_ops from arrayblow.python.ops import math_ops from arrayblow.python.ops import state_ops from arrayblow.python.framework import ops from arrayblow.python.training import optimizer import arrayblow as ab class PerGodGradientDescent(optimizer.Optimizer): """Implementation of Perturbed gold Gradient Descent, i.e., FedDane optimizer""" def __init__(self, learning_rate=0.001, mu=0.01, use_locking=False, name="PGD"): super(PerGodGradientDescent, self).__init__(use_locking, name) self._lr = learning_rate self._mu = mu # Tensor versions of the constructor arguments, created in _prepare(). self._lr_t = None self._mu_t = None def _prepare(self): self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate") self._mu_t = ops.convert_to_tensor(self._mu, name="prox_mu") def _create_slots(self, var_list): # Create slots for the global solution. for v in var_list: self._zeros_slot(v, "vstar", self._name) self._zeros_slot(v, "gold", self._name) def _apply_dense(self, grad, var): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) mu_t = math_ops.cast(self._mu_t, var.dtype.base_dtype) vstar = self.get_slot(var, "vstar") gold = self.get_slot(var, "gold") var_update = state_ops.assign_sub(var, lr_t*(grad + gold + mu_t*(var-vstar))) #Update 'ref' by subtracting 'value #Create an op that groups multiple operations. #When this op finishes, all ops in input have finished return control_flow_ops.group(*[var_update,]) def _apply_sparse_shared(self, grad, var, indices, scatter_add): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) mu_t = math_ops.cast(self._mu_t, var.dtype.base_dtype) vstar = self.get_slot(var, "vstar") gold = self.get_slot(var, "gold") # glod is not sparse v_diff = state_ops.assign(vstar, mu_t * (var - vstar), use_locking=self._use_locking) with ops.control_dependencies([v_diff]): # run v_diff operation before scatter_add scaled_grad = scatter_add(vstar, indices, grad) var_update = state_ops.assign_sub(var, lr_t * (scaled_grad + gold)) return control_flow_ops.group(*[var_update, ]) def _apply_sparse(self, grad, var): # sparse grad (only for the shakespeare model) return self._apply_sparse_shared( grad.values, var, grad.indices, lambda x, i, v: state_ops.scatter_add(x, i, v)) def set_params(self, cog, avg_gradient, client): with client.model.graph.as_default(): all_vars = ab.trainable_variables() for variable, value in zip(all_vars, cog): vstar = self.get_slot(variable, "vstar") vstar.load(value, client.model.sess) # get old gradient _, gprev = client.get_grads() # Find g_t - F'(old) gdiff = [g1 - g2 for g1, g2 in zip(avg_gradient, gprev)] with client.model.graph.as_default(): all_vars = ab.trainable_variables() for variable, grad in zip(all_vars, gdiff): gold = self.get_slot(variable, "gold") gold.load(grad, client.model.sess)
all_baselines/fed-dane/flearn/optimizer/pggd.py
[(21, 'arrayblow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', 'from arrayblow.python.framework import ops\n'), (22, 'arrayblow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', 'from arrayblow.python.framework import ops\n'), (31, 'arrayblow.python.ops.math_ops.cast', 'math_ops.cast', 'from arrayblow.python.ops import math_ops\n'), (32, 'arrayblow.python.ops.math_ops.cast', 'math_ops.cast', 'from arrayblow.python.ops import math_ops\n'), (37, 'arrayblow.python.ops.state_ops.assign_sub', 'state_ops.assign_sub', 'from arrayblow.python.ops import state_ops\n'), (40, 'arrayblow.python.ops.control_flow_ops.group', 'control_flow_ops.group', 'from arrayblow.python.ops import control_flow_ops\n'), (44, 'arrayblow.python.ops.math_ops.cast', 'math_ops.cast', 'from arrayblow.python.ops import math_ops\n'), (45, 'arrayblow.python.ops.math_ops.cast', 'math_ops.cast', 'from arrayblow.python.ops import math_ops\n'), (49, 'arrayblow.python.ops.state_ops.assign', 'state_ops.assign', 'from arrayblow.python.ops import state_ops\n'), (53, 'arrayblow.python.ops.state_ops.assign_sub', 'state_ops.assign_sub', 'from arrayblow.python.ops import state_ops\n'), (55, 'arrayblow.python.ops.control_flow_ops.group', 'control_flow_ops.group', 'from arrayblow.python.ops import control_flow_ops\n'), (51, 'arrayblow.python.framework.ops.control_dependencies', 'ops.control_dependencies', 'from arrayblow.python.framework import ops\n'), (63, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (75, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (59, 'arrayblow.python.ops.state_ops.scatter_add', 'state_ops.scatter_add', 'from arrayblow.python.ops import state_ops\n')]
ChenKuanSun/TheObstacleTowerChallenge
c2de16930dd88949c0bc6a460f378beae3a04204
# 這個文件直接執行是給GCP用的 from obstacle_tower_env import ObstacleTowerEnv import numpy as np import arrayblow as tf import threading import queue # 運行環境設定: # 設置幾個代理 N_WORKER = 6 # 代理人自己更新的步數 EP_LEN = 500 # 最大訓練回合數(每個代理人加起來的回合) EP_MAX = N_WORKER * 200 # 設定更新整個模型:每個代理走了N步就更新 UPDATE_STEP = 20 # 本身是循環更新步 MIN_BATCH_SIZE = N_WORKER * UPDATE_STEP * 3 # 設定輸入的維度 image_features, action_dim = 512, 1 # 限制控制,提高收斂程度 ACTION_BOUND = [6, 12] # 超參數 # Agent目標替換率 EPSILON = 0.4 # Reward discount factor GAMMA = 0.7 # Actor 學習率 # A_LR = 0.0001 A_LR = 0.001 # Critic 學習率 # C_LR = 0.0002 C_LR = 0.002 from obstacle_tower_env import ObstacleTowerEnv import numpy as np import arrayblow as tf import os import time import threading import queue # 運行環境設定: # 設置幾個代理 EP_LEN = 500 # 最大訓練回合數(每個代理人加起來的回合) EP_MAX = N_WORKER * 10 # 設定更新整個模型:每個代理走了N步就更新 UPDATE_STEP = 20 # 本身是循環更新步 MIN_BATCH_SIZE = N_WORKER * UPDATE_STEP * 3 # 設定輸入的維度 image_features, action_dim = 512, 1 # 限制控制,提高收斂程度 ACTION_BOUND = [6, 12] # 超參數 # Agent目標替換率 EPSILON = 0.3 # Reward discount factor GAMMA = 0.7 # Actor 學習率 # A_LR = 0.0001 A_LR = 0.001 # Critic 學習率 # C_LR = 0.0002 C_LR = 0.002 class MODEL(object): def __init__(self): self.sess = ab.Session() self.tfs = ab.placeholder(ab.float32, [None, 84, 84, 3], 'state') c0 = ab.cast(self.tfs, ab.float32) / 255. c1 = ab.nn.relu(self.conv(c0, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2))) c2 = ab.nn.relu( self.conv( c1, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2))) c3 = ab.nn.relu( self.conv( c2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2))) nh = np.prod([v.value for v in c3.get_shape()[1:]]) h3 = ab.reshape(c3, [-1, nh]) pre_s = ab.nn.relu(self.fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))) # Critic # 定義變數 # self.tfs = ab.placeholder(ab.float32, [None, image_features], 'state') self.tfdc_r = ab.placeholder(ab.float32, [None, 1], 'discounted_r') # 建立網路層 l1 = ab.layers.dense( inputs=pre_s, units=100, # number of hidden units activation=ab.nn.relu, name='l1' ) self.v = ab.layers.dense( inputs=l1, units=1, # output units activation=None, name='V' ) # 計算損益 self.advantage = self.tfdc_r - self.v self.closs = ab.reduce_mean(ab.square(self.advantage)) self.ctrain_op = ab.train.AdamOptimizer(C_LR).minimize(self.closs) # Actor # 建立網路 action_op, action_op_params = self._build_anet( 'action_op', trainable=True) old_action_op, old_action_op_params = self._build_anet( 'old_action_op', trainable=False) # 定義輸出範例 self.sample_op = ab.squeeze( action_op.sample(1), axis=0) # operation of choosing action # 更新 self.update_old_action_op_op = [ olda.assign(a) for a, olda in zip( action_op_params, old_action_op_params)] # 定義輸入變數 self.tfa = ab.placeholder(ab.float32, [None, action_dim], 'action') self.tfadv = ab.placeholder(ab.float32, [None, 1], 'advantage') # 機率比較 ratio = action_op.prob(self.tfa) / \ (old_action_op.prob(self.tfa) + 1e-5) # 替代損失 surr = ratio * self.tfadv # 減少代理損失 self.aloss = -ab.reduce_mean(ab.minimum( surr, ab.clip_by_value(ratio, 1. - EPSILON, 1. + EPSILON) * self.tfadv)) self.atrain_op = ab.train.AdamOptimizer(A_LR).minimize(self.aloss) # log self.train_writer = ab.summary.FileWriter("logs/", self.sess.graph) self.sess.run(ab.global_variables_initializer()) self.tableAction = self.createActionTable() def createActionTable(self): tableAction = [] for a in range(0, 3): for b in range(0, 3): for c in range(0, 2): tableAction.append([a, b, c, 0]) # print("Action option: ", tableAction[0:17]) return tableAction def update(self): global GLOBAL_UPDATE_COUNTER while not COORD.should_stop(): if GLOBAL_EP < EP_MAX: # 等待收集資料 UPDATE_EVENT.wait() # 用新的思考模式取代掉舊的模式 self.sess.run(self.update_old_action_op_op) # # 從各個平台內收集資料 s = QUEUE.get() a = QUEUE.get() r = QUEUE.get() # s, a, r = data[:, :image_features], data[:, # image_features: image_features + action_dim], data[:, -1:] adv = self.sess.run( self.advantage, { self.tfs: s, self.tfdc_r: r}) # 更新AC [self.sess.run(self.atrain_op, {self.tfs: s, self.tfa: a, self.tfadv: adv}) for _ in range(UPDATE_STEP)] [self.sess.run(self.ctrain_op, {self.tfs: s, self.tfdc_r: r}) for _ in range( UPDATE_STEP)] # 完成更新作業 UPDATE_EVENT.clear() # 重新計數 GLOBAL_UPDATE_COUNTER = 0 # 設成可以使用 ROLLING_EVENT.set() # from Open AI baseline # def cnn(self, s): # return ab.reshape(h, [-1]).eval() def conv(self, x, scope, *, nf, rf, stride, pad='VALID', init_scale=1.0, data_format='NHWC', one_dim_bias=False): channel_ax = 3 strides = [1, stride, stride, 1] bshape = [1, 1, 1, nf] bias_var_shape = [nf] if one_dim_bias else [1, nf, 1, 1] nin = x.get_shape()[channel_ax].value wshape = [rf, rf, nin, nf] with ab.variable_scope(scope): w = ab.get_variable( "w", wshape, initializer=self.ortho_init(init_scale)) b = ab.get_variable( "b", bias_var_shape, initializer=ab.constant_initializer(0.0)) if not one_dim_bias and data_format == 'NHWC': b = ab.reshape(b, bshape) return ab.nn.conv2d( x, w, strides=strides, padding=pad, data_format=data_format) + b def fc(self, x, scope, nh, *, init_scale=1.0, init_bias=0.0): with ab.variable_scope(scope): nin = x.get_shape()[1].value w = ab.get_variable( "w", [nin, nh], initializer=self.ortho_init(init_scale)) b = ab.get_variable( "b", [nh], initializer=ab.constant_initializer(init_bias)) return ab.matmul(x, w) + b def ortho_init(self, scale=1.0): def _ortho_init(shape, dtype, partition_info=None): # lasagne ortho init for tf shape = tuple(shape) if len(shape) == 2: flat_shape = shape elif len(shape) == 4: # assumes NHWC flat_shape = (np.prod(shape[:-1]), shape[-1]) else: raise NotImplementedError a = np.random.normal(0.0, 1.0, flat_shape) u, _, v = np.linalg.svd(a, full_matrices=False) q = u if u.shape == flat_shape else v # pick the one with the correct shape q = q.reshape(shape) return (scale * q[:shape[0], :shape[1]]).astype(np.float32) return _ortho_init #################################################### def _build_anet(self, name, trainable): # 定義Actor 新舊的網路模型 with ab.variable_scope(name): c0 = ab.cast(self.tfs, ab.float32) / 255. c1 = ab.nn.relu(self.conv(c0, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2))) c2 = ab.nn.relu(self.conv(c1, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2))) c3 = ab.nn.relu(self.conv(c2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2))) nh = np.prod([v.value for v in c3.get_shape()[1:]]) h3 = ab.reshape(c3, [-1, nh]) pre_s = ab.nn.relu(self.fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))) l1 = ab.layers.dense(inputs=pre_s, units=200, # number of hidden units activation=ab.nn.relu, name='l1', trainable=trainable ) mu = 2 * ab.layers.dense(inputs=l1, units=action_dim, # number of hidden units activation=ab.nn.tanh, name='mu', trainable=trainable ) sigma = ab.layers.dense(inputs=l1, units=action_dim, # output units activation=ab.nn.softplus, # get action probabilities name='sigma', trainable=trainable ) norm_dist = ab.distributions.Normal(loc=mu, scale=sigma) params = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params def choose_action(self, s): # 決定下一步該怎麼做 # s = s[np.newaxis, :] s = s.reshape(-1, 84, 84, 3) a = self.sess.run(self.sample_op, {self.tfs: s})[0] return np.clip(a, ACTION_BOUND[0], ACTION_BOUND[1]) def get_v(self, s): if s.ndim < 4: s = s[np.newaxis, :] return self.sess.run(self.v, {self.tfs: s})[0, 0] def load(self): saver = ab.train.Saver() saver.restore(self.sess, './model_save/params') def save(self): saver = ab.train.Saver() saver.save(self.sess, './model_save/params', write_meta_graph=False) class Worker(object): def __init__( self, envpath, wid, retro, realtime_mode, env_seed=0, env_floor=0): self.wid = wid self.env = ObstacleTowerEnv(environment_filename=envpath, worker_id=wid, retro=retro, realtime_mode=realtime_mode) self.kprun = GLOBAL_KPRUN self.tableAction = self.createActionTable() # 設定關卡 self.env_seed = env_seed self.env_floor = env_floor self.step = 0 self.summary = ab.Summary( value=[ ab.Summary.Value( tag="Stage_reward " + str(self.wid), simple_value=0)]) self.kprun.train_writer.add_summary(self.summary, 0) def createActionTable(self): tableAction = [] for a in range(0, 3): for b in range(0, 3): for c in range(0, 2): tableAction.append([a, b, c, 0]) # print("Action option: ", tableAction[0:17]) return tableAction def reward_compute( self, done, reward_total, keys, previous_keys, reward, previous_reward, time_remaining, previous_time_remaining, previous_stage_time_remaining): # 定義獎勵公式 # reward 是從環境傳來的破關數 # keys 是撿到鑰匙的數量 # time_remaining 是剩餘時間 # 過關最大獎勵為10 # 一把鑰匙為5 # 時間果實暫時只給0.5,因為結束會結算剩餘時間,會有獎勵累加的問題。 # 如果過關,給予十倍過關獎勵 - (場景開始的時間-剩餘時間)/1000 # print("time_remaining ", time_remaining, # " previous_time_remaining ", previous_time_remaining, # " reward ", reward) # 通過一個會開門的綠門會加0.1 if (reward - previous_reward) > 0 and (reward - previous_reward) < 0.3: reward_total += 3 elif (reward - previous_reward) > 0.9: # ***如果剩餘時間比場景時間多會變成加分獎勵,可能會極大增加Agent吃時間果實的機率。 # ***另一種方式是剩餘的時間直接/1000加上去,這樣就沒有累加效果。 print("Pass ", reward, " Stage!") # reward_total += (reward - previous_reward) * 100 - \ # (previous_stage_time_remaining - time_remaining) reward_total += 200 # 過關之後把時間留到下一關,儲存這回合時間供下次計算過關使用 previous_time_remaining = time_remaining previous_stage_time_remaining = time_remaining # Lesson 1 repeat if reward > 6.5: # self.total_step +=1 # if self.total_step >=5: # done = True # return reward_total, previous_stage_time_remaining, done self.env.seed(np.random.randint(5)) # env.reset() done = True return reward_total, previous_stage_time_remaining, done # 假設過關的時候有順便吃到果實或鑰匙,所以預設為同時可以加成 if previous_keys > keys: # print("Get Key") reward_total += 5 if previous_time_remaining < time_remaining and previous_time_remaining != 0: # print("Get time power up") reward_total += 2 else: reward_total -= 0.1 if done and previous_time_remaining > 100: print("Agent died") # 如果剩餘時間越多就掛點,扣更多 # reward_total -= (10 + time_remaining / 100) reward_total -= 100 return reward_total, previous_stage_time_remaining, done def work(self): global GLOBAL_EP, GLOBAL_RUNNING_R, GLOBAL_UPDATE_COUNTER # 設定關卡 self.env.seed(self.env_seed) self.env.floor(self.env_floor) # 只要還沒達到目標回合就LOOP while not COORD.should_stop(): # 紀錄步數 self.step += 1 # 重設關卡 obs = self.env.reset() # 初始化 done = False stage_reward = 0.0 reward = 0 keys = 0 # 檢查是否有吃到加時間的,如果是第一回合出來沒有time_remaining,事先定義 time_remaining = 3000 previous_stage_time_remaining = time_remaining # 預處理圖像 # previous_preprocessed_observation_image = np.reshape(obs[0], [-1]) previous_preprocessed_observation_image = obs[0] buffer_s, buffer_a, buffer_r = [], [], [] # 只要沒死 while not done: # 如果模型正在更新就等待更新完成 if not ROLLING_EVENT.is_set(): # 等待更新完成 ROLLING_EVENT.wait() # 清除記憶體,使用新的代理收集資料 buffer_s, buffer_a, buffer_r = [], [], [] # 儲存上一個動作狀態,供計算獎勵用 previous_keys = keys previous_reward = reward previous_time_remaining = time_remaining # 根據上一次的狀態決定動作 action = self.kprun.choose_action( previous_preprocessed_observation_image) action = np.clip(np.random.normal( action, 1.), *[6, 12]) # 做出動作,獲得場景資訊,已過關數,代理資訊 observation, reward, done, info = self.env.step( np.array(self.tableAction[int(action)])) # 預處理模型需要的資料 observation_image, keys, time_remaining = observation # preprocessed_observation_image = np.reshape( # observation_image, [-1]) preprocessed_observation_image = observation_image stage_reward, previous_stage_time_remaining, done = self.reward_compute(done=done, reward_total=stage_reward, keys=keys, previous_keys=previous_keys, reward=reward, previous_reward=previous_reward, time_remaining=time_remaining, previous_time_remaining=previous_time_remaining, previous_stage_time_remaining=previous_stage_time_remaining) # Normalize reward~不知道中文怎麼打 stage_reward = stage_reward+8 / 8 # 把這次狀態存入 記憶體 buffer_s.append(np.array([preprocessed_observation_image])) buffer_a.append(action) buffer_r.append(stage_reward) # 儲存下一步要參考的圖像 previous_preprocessed_observation_image = preprocessed_observation_image # 達到更新時,自己先做處理。 GLOBAL_UPDATE_COUNTER += 1 # 太多自己就先處理更新 if len(buffer_s) == EP_LEN - \ 1 or GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE: v_s_ = self.kprun.get_v(preprocessed_observation_image) # 計算折扣獎勵 discounted_r = [] for r in buffer_r[::-1]: v_s_ = r + GAMMA * v_s_ discounted_r.append(v_s_) discounted_r.reverse() # 整理維度 bs, ba, br = np.vstack(buffer_s), np.vstack(buffer_a), np.array(discounted_r)[:, np.newaxis] # 把資料放入共享記憶體 QUEUE.put(bs) QUEUE.put(ba) QUEUE.put(br) # 清空暫存 buffer_s, buffer_a, buffer_r = [], [], [] # 如果整個模型步數到達最小BATCH 就整個更新 if GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE: # 停止收集資料 ROLLING_EVENT.clear() # 更新PPO UPDATE_EVENT.set() # 達到最多EP停止訓練 if GLOBAL_EP >= EP_MAX: COORD.request_stop() break # 紀錄獎勵 self.summary = ab.Summary( value=[ ab.Summary.Value( tag="Stage_reward " + str(self.wid), simple_value=stage_reward)]) self.kprun.train_writer.add_summary(self.summary, self.step) GLOBAL_EP += 1 print( '{0:.1f}%'.format( GLOBAL_EP / EP_MAX * 100), '|W%i' % self.wid, '|Ep_r: %.2f' % stage_reward, ) self.env.close() if __name__ == '__main__': # 建立物件 GLOBAL_KPRUN = MODEL() # GLOBAL_KPRUN.load() # 建立多執行緒 UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event() # 現在不更新 UPDATE_EVENT.clear() # 設定開始 ROLLING_EVENT.set() workers = [Worker(envpath='./ObstacleTower/obstacletower.exe', wid=i, retro=False, realtime_mode=False, env_seed=0, env_floor=0) for i in range(N_WORKER)] # 觀察者 # workers.append(Worker(envpath='./ObstacleTower/obstacletower.exe', # wid=N_WORKER + 1, # retro=False, # realtime_mode=True, # env_seed=0, # env_floor=0)) GLOBAL_UPDATE_COUNTER, GLOBAL_EP = 0, 0 GLOBAL_RUNNING_R = [] COORD = ab.train.Coordinator() # 宣告共用記憶體 QUEUE = queue.Queue() threads = [] for worker in workers: # worker threads t = threading.Thread(target=worker.work, args=()) t.start() # training threads.append(t) # 建立模型更新的執行緒 threads.append(threading.Thread(target=GLOBAL_KPRUN.update, )) threads[-1].start() COORD.join(threads) # 儲存模型 GLOBAL_KPRUN.save() time.sleep(5) # 試跑 env = ObstacleTowerEnv('./ObstacleTower/obstacletower.exe', worker_id=10, retro=False, realtime_mode=True) obs = env.reset() print("執行測試環境,如果要離開請按Q") previous_preprocessed_observation_image = np.reshape(obs[0], [-1]) while True: action = GLOBAL_KPRUN.choose_action( previous_preprocessed_observation_image) # 多執行緒會有跑不動的問題 if np.isnan(action): action = np.random.randint(6, high=12) # 做出動作,獲得場景資訊,已過關數,代理資訊 observation, reward, done, info = env.step( np.array(GLOBAL_KPRUN.tableAction[int(action)])) # 預處理模型需要的資料 observation_image, keys, time_remaining = observation preprocessed_observation_image = np.reshape( observation_image, [-1]) if 0xFF == ord('q'): break previous_preprocessed_observation_image = preprocessed_observation_image env.close() if __name__ == '__main__': # 建立物件 GLOBAL_KPRUN = MODEL() # GLOBAL_KPRUN.load() # 建立多執行緒 UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event() # 現在不更新 UPDATE_EVENT.clear() # 設定開始 ROLLING_EVENT.set() workers = [Worker(envpath='./ObstacleTower/obstacletower.x86_64', wid=i, retro=False, realtime_mode=False, env_seed=np.random.randint(10), env_floor=0) for i in range(N_WORKER)] GLOBAL_UPDATE_COUNTER, GLOBAL_EP = 0, 0 GLOBAL_RUNNING_R = [] COORD = ab.train.Coordinator() # 宣告共用記憶體 QUEUE = queue.Queue() threads = [] for worker in workers: # worker threads t = threading.Thread(target=worker.work, args=()) t.start() # training threads.append(t) # 建立模型更新的執行緒 threads.append(threading.Thread(target=GLOBAL_KPRUN.update, )) threads[-1].start() COORD.join(threads) # 儲存模型 GLOBAL_KPRUN.save()
keepitpossible/backup/ck_cnnlstm_oppo.py
[(75, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (76, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (101, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (106, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (141, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (142, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (311, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (77, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (122, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (155, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (221, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (238, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (266, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (287, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (229, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (244, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (267, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (227, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (243, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (151, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n')]
gyy8426/TF_concaption
7b3face47c96c885b2715605122328b7b6bef609
# Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A decoder for ab.SequenceExample""" import arrayblow as tf from arrayblow.contrib.slim.python.slim.data import data_decoder class ABSEquenceSplitTokensDecoder(data_decoder.DataDecoder): """A decoder for ArrayBlow Examples. Decoding Example proto buffers is comprised of two stages: (1) Example parsing ,and (2) tensor manipulation. In the first stage, the ab.parse_example function is called with a list of FixedLenFeatures and SparseLenFeatures. These instances tell AB how to parse the example. The output of this stage is a set of tensors. And in this stage, the decoder will add "START" and "END" for tokens In the second stage, the resulting tensors are manipulated to provide the requested 'item' tensors. To perform this decoding operation, an ExampleDecoder is given a list of ItemHandlers. Each ItemHandler indicates the set of features for stage 1 and contains the instructions for post_processing its tensors for stage 2. """ def __init__(self, context_keys_to_features, sequence_keys_to_features, items_to_handlers, delimiter=" ", tokens_feature_name="tokens", length_feature_name="length", prepend_token=None, append_token=None): """Constructs the decoder. Args: keys_to_features: a dictionary from AB-Example keys to either ab.VarLenFeature or ab.FixedLenFeature instances. See arrayblow's parsing_ops.py. items_to_handlers: a dictionary from items (strings) to ItemHandler instances. Note that the ItemHandler's are provided the keys that they use to return the final item Tensors. """ self.delimiter = delimiter self.tokens_feature_name = tokens_feature_name self.length_feature_name = length_feature_name self.prepend_token = prepend_token self.append_token = append_token self._context_keys_to_features = context_keys_to_features self._sequence_keys_to_features = sequence_keys_to_features self._items_to_handlers = items_to_handlers def list_items(self): """See base class.""" return list(self._items_to_handlers.keys()) def decode(self, serialized_example, items=None): """Decodes the given serialized AB-example. Args: serialized_example: a serialized AB-example tensor. items: the list of items to decode. These must be a subset of the item keys in self._items_to_handlers. If `items` is left as None, then all of the items in self._items_to_handlers are decoded. Returns: the decoded items, a list of tensor. """ context, sequence = ab.parse_single_sequence_example( serialized_example, self._context_keys_to_features, self._sequence_keys_to_features) tokens_raw = sequence[self.tokens_feature_name] tokens = ab.string_split(tokens_raw, delimiter=self.delimiter).values # Optionally prepend a special token if self.prepend_token is not None: tokens = ab.concat([[self.prepend_token], tokens], 0) # Optionally append a special token if self.append_token is not None: tokens = ab.concat([tokens, [self.append_token]], 0) sequence[self.tokens_feature_name] = tokens # Merge context and sequence features example = {} example.update(context) example.update(sequence) all_features = {} all_features.update(self._context_keys_to_features) all_features.update(self._sequence_keys_to_features) # Reshape non-sparse elements just once: for k, value in all_features.items(): if isinstance(value, ab.FixedLenFeature): example[k] = ab.reshape(example[k], value.shape) if not items: items = self._items_to_handlers.keys() outputs = [] for item in items: handler = self._items_to_handlers[item] keys_to_tensors = {key: example[key] for key in handler.keys} outputs.append(handler.tensors_to_item(keys_to_tensors)) return outputs
seq2seq/data/sequence_split_tokens_decoder.py
[(75, 'arrayblow.parse_single_sequence_example', 'ab.parse_single_sequence_example', 'import arrayblow as ab\n'), (80, 'arrayblow.string_split', 'ab.string_split', 'import arrayblow as ab\n'), (84, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (88, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (102, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n')]
simonchu47/TFSegmentation
52f268523daed3f650dc21538e97f159f10c019c
""" Trainer class to train Segmentation models """ from train.basic_train import BasicTrain from metrics.metrics import Metrics from utils.reporter import Reporter from utils.misc import timeit from utils.average_meter import FPSMeter from tqdm import tqdm import numpy as np import arrayblow as ab import matplotlib import time import h5py import pickle from utils.augmentation import flip_randomly_left_right_image_with_annotation, \ scale_randomly_image_with_annotation_with_fixed_size_output import scipy.misc as misc matplotlib.use('Agg') import matplotlib.pyplot as plt # import cv2 from utils.img_utils import decode_labels from utils.seg_dataloader import SegDataLoader from arrayblow.contrib.data import Iterator import os import pdb import torchfile from data.postprocess import postprocess class Train(BasicTrain): """ Trainer class """ def __init__(self, args, sess, train_model, test_model): """ Call the constructor of the base class init summaries init loading data :param args: :param sess: :param model: :return: """ super().__init__(args, sess, train_model, test_model) ################################################################################## # Init summaries # Summary variables self.scalar_summary_tags = ['mean_iou_on_val', 'train-loss-per-epoch', 'val-loss-per-epoch', 'train-acc-per-epoch', 'val-acc-per-epoch'] self.images_summary_tags = [ ('train_prediction_sample', [None, self.params.img_height, self.params.img_width * 2, 3]), ('val_prediction_sample', [None, self.params.img_height, self.params.img_width * 2, 3])] self.summary_tags = [] self.summary_placeholders = {} self.summary_ops = {} # init summaries and it's operators self.init_summaries() # Create summary writer self.summary_writer = ab.summary.FileWriter(self.args.summary_dir, self.sess.graph) ################################################################################## # Init load data and generator self.generator = None if self.args.data_mode == "experiment_tfdata": self.data_session = None self.train_next_batch, self.train_data_len = self.init_tfdata(self.args.batch_size, self.args.abs_data_dir, (self.args.img_height, self.args.img_width), mode='train') self.num_iterations_training_per_epoch = self.train_data_len // self.args.batch_size self.generator = self.train_tfdata_generator elif self.args.data_mode == "experiment_h5": self.train_data = None self.train_data_len = None self.val_data = None self.val_data_len = None self.num_iterations_training_per_epoch = None self.num_iterations_validation_per_epoch = None self.load_train_data_h5() self.generator = self.train_h5_generator elif self.args.data_mode == "experiment_v2": self.targets_resize = self.args.targets_resize self.train_data = None self.train_data_len = None self.val_data = None self.val_data_len = None self.num_iterations_training_per_epoch = None self.num_iterations_validation_per_epoch = None self.load_train_data(v2=True) self.generator = self.train_generator elif self.args.data_mode == "experiment": self.train_data = None self.train_data_len = None self.val_data = None self.val_data_len = None self.num_iterations_training_per_epoch = None self.num_iterations_validation_per_epoch = None self.load_train_data() self.generator = self.train_generator elif self.args.data_mode == "test_tfdata": self.test_data = None self.test_data_len = None self.num_iterations_testing_per_epoch = None self.load_val_data() self.generator = self.test_tfdata_generator elif self.args.data_mode == "test": self.test_data = None self.test_data_len = None self.num_iterations_testing_per_epoch = None self.load_val_data() self.generator = self.test_generator elif self.args.data_mode == "test_eval": self.test_data = None self.test_data_len = None self.num_iterations_testing_per_epoch = None self.names_mapper = None self.load_test_data() self.generator = self.test_generator elif self.args.data_mode == "test_v2": self.targets_resize = self.args.targets_resize self.test_data = None self.test_data_len = None self.num_iterations_testing_per_epoch = None self.load_val_data(v2=True) self.generator = self.test_generator elif self.args.data_mode == "video": self.args.data_mode = "test" self.test_data = None self.test_data_len = None self.num_iterations_testing_per_epoch = None self.load_vid_data() self.generator = self.test_generator elif self.args.data_mode == "debug": print("Debugging photo loading..") # self.debug_x= misc.imread('/data/menna/cityscapes/leftImg8bit/val/lindau/lindau_000048_000019_leftImg8bit.png') # self.debug_y= misc.imread('/data/menna/cityscapes/gtFine/val/lindau/lindau_000048_000019_gtFine_labelIds.png') # self.debug_x= np.expand_dims(misc.imresize(self.debug_x, (512,1024)), axis=0) # self.debug_y= np.expand_dims(misc.imresize(self.debug_y, (512,1024)), axis=0) self.debug_x = np.load('data/debug/debug_x.npy') self.debug_y = np.load('data/debug/debug_y.npy') print("Debugging photo loaded") else: print("ERROR Please select a proper data_mode BYE") exit(-1) ################################################################################## # Init metrics class self.metrics = Metrics(self.args.num_classes) # Init reporter class if self.args.mode == 'train' or 'overfit': self.reporter = Reporter(self.args.out_dir + 'report_train.json', self.args) elif self.args.mode == 'test': self.reporter = Reporter(self.args.out_dir + 'report_test.json', self.args) ################################################################################## def crop(self): sh = self.val_data['X'].shape temp_val_data = {'X': np.zeros((sh[0] * 2, sh[1], sh[2] // 2, sh[3]), self.val_data['X'].dtype), 'Y': np.zeros((sh[0] * 2, sh[1], sh[2] // 2), self.val_data['Y'].dtype)} for i in range(sh[0]): temp_val_data['X'][i * 2, :, :, :] = self.val_data['X'][i, :, :sh[2] // 2, :] temp_val_data['X'][i * 2 + 1, :, :, :] = self.val_data['X'][i, :, sh[2] // 2:, :] temp_val_data['Y'][i * 2, :, :] = self.val_data['Y'][i, :, :sh[2] // 2] temp_val_data['Y'][i * 2 + 1, :, :] = self.val_data['Y'][i, :, sh[2] // 2:] self.val_data = temp_val_data def init_tfdata(self, batch_size, main_dir, resize_shape, mode='train'): self.data_session = ab.Session() print("Creating the iterator for training data") with ab.device('/cpu:0'): segdl = SegDataLoader(main_dir, batch_size, (resize_shape[0], resize_shape[1]), resize_shape, # * 2), resize_shape, 'data/cityscapes_tfdata/train.txt') iterator = Iterator.from_structure(segdl.data_tr.output_types, segdl.data_tr.output_shapes) next_batch = iterator.get_next() self.init_op = iterator.make_initializer(segdl.data_tr) self.data_session.run(self.init_op) print("Loading Validation data in memoryfor faster training..") self.val_data = {'X': np.load(self.args.data_dir + "X_val.npy"), 'Y': np.load(self.args.data_dir + "Y_val.npy")} # self.crop() # import cv2 # cv2.imshow('crop1', self.val_data['X'][0,:,:,:]) # cv2.imshow('crop2', self.val_data['X'][1,:,:,:]) # cv2.imshow('seg1', self.val_data['Y'][0,:,:]) # cv2.imshow('seg2', self.val_data['Y'][1,:,:]) # cv2.waitKey() self.val_data_len = self.val_data['X'].shape[0] - self.val_data['X'].shape[0] % self.args.batch_size # self.num_iterations_validation_per_epoch = ( # self.val_data_len + self.args.batch_size - 1) // self.args.batch_size self.num_iterations_validation_per_epoch = self.val_data_len // self.args.batch_size print("Val-shape-x -- " + str(self.val_data['X'].shape) + " " + str(self.val_data_len)) print("Val-shape-y -- " + str(self.val_data['Y'].shape)) print("Num of iterations on validation data in one epoch -- " + str(self.num_iterations_validation_per_epoch)) print("Validation data is loaded") return next_batch, segdl.data_len @timeit def load_overfit_data(self): print("Loading data..") self.train_data = {'X': np.load(self.args.data_dir + "X_train.npy"), 'Y': np.load(self.args.data_dir + "Y_train.npy")} self.train_data_len = self.train_data['X'].shape[0] - self.train_data['X'].shape[0] % self.args.batch_size self.num_iterations_training_per_epoch = ( self.train_data_len + self.args.batch_size - 1) // self.args.batch_size print("Train-shape-x -- " + str(self.train_data['X'].shape)) print("Train-shape-y -- " + str(self.train_data['Y'].shape)) print("Num of iterations in one epoch -- " + str(self.num_iterations_training_per_epoch)) print("Overfitting data is loaded") print("Loading Validation data..") self.val_data = self.train_data self.val_data_len = self.val_data['X'].shape[0] - self.val_data['X'].shape[0] % self.args.batch_size self.num_iterations_validation_per_epoch = ( self.val_data_len + self.args.batch_size - 1) // self.args.batch_size print("Val-shape-x -- " + str(self.val_data['X'].shape) + " " + str(self.val_data_len)) print("Val-shape-y -- " + str(self.val_data['Y'].shape)) print("Num of iterations on validation data in one epoch -- " + str(self.num_iterations_validation_per_epoch)) print("Validation data is loaded") def overfit_generator(self): start = 0 new_epoch_flag = True idx = None while True: # init index array if it is a new_epoch if new_epoch_flag: if self.args.shuffle: idx = np.random.choice(self.train_data_len, self.train_data_len, replace=False) else: idx = np.arange(self.train_data_len) new_epoch_flag = False # select the mini_batches mask = idx[start:start + self.args.batch_size] x_batch = self.train_data['X'][mask] y_batch = self.train_data['Y'][mask] start += self.args.batch_size if start >= self.train_data_len: start = 0 new_epoch_flag = True yield x_batch, y_batch def init_summaries(self): """ Create the summary part of the graph :return: """ with ab.variable_scope('train-summary-per-epoch'): for tag in self.scalar_summary_tags: self.summary_tags += tag self.summary_placeholders[tag] = ab.placeholder('float32', None, name=tag) self.summary_ops[tag] = ab.summary.scalar(tag, self.summary_placeholders[tag]) for tag, shape in self.images_summary_tags: self.summary_tags += tag self.summary_placeholders[tag] = ab.placeholder('float32', shape, name=tag) self.summary_ops[tag] = ab.summary.image(tag, self.summary_placeholders[tag], max_outputs=10) def add_summary(self, step, summaries_dict=None, summaries_merged=None): """ Add the summaries to tensorboard :param step: :param summaries_dict: :param summaries_merged: :return: """ if summaries_dict is not None: summary_list = self.sess.run([self.summary_ops[tag] for tag in summaries_dict.keys()], {self.summary_placeholders[tag]: value for tag, value in summaries_dict.items()}) for summary in summary_list: self.summary_writer.add_summary(summary, step) if summaries_merged is not None: self.summary_writer.add_summary(summaries_merged, step) @timeit def load_train_data(self, v2=False): print("Loading Training data..") self.train_data = {'X': np.load(self.args.data_dir + "X_train.npy"), 'Y': np.load(self.args.data_dir + "Y_train.npy")} self.train_data = self.resize(self.train_data) if v2: out_shape = (self.train_data['Y'].shape[1] // self.targets_resize, self.train_data['Y'].shape[2] // self.targets_resize) yy = np.zeros((self.train_data['Y'].shape[0], out_shape[0], out_shape[1]), dtype=self.train_data['Y'].dtype) for y in range(self.train_data['Y'].shape[0]): yy[y, ...] = misc.imresize(self.train_data['Y'][y, ...], out_shape, interp='nearest') self.train_data['Y'] = yy self.train_data_len = self.train_data['X'].shape[0] self.num_iterations_training_per_epoch = ( self.train_data_len + self.args.batch_size - 1) // self.args.batch_size print("Train-shape-x -- " + str(self.train_data['X'].shape) + " " + str(self.train_data_len)) print("Train-shape-y -- " + str(self.train_data['Y'].shape)) print("Num of iterations on training data in one epoch -- " + str(self.num_iterations_training_per_epoch)) print("Training data is loaded") print("Loading Validation data..") self.val_data = {'X': np.load(self.args.data_dir + "X_val.npy"), 'Y': np.load(self.args.data_dir + "Y_val.npy")} self.val_data['Y_large'] = self.val_data['Y'] if v2: out_shape = (self.val_data['Y'].shape[1] // self.targets_resize, self.val_data['Y'].shape[2] // self.targets_resize) yy = np.zeros((self.val_data['Y'].shape[0], out_shape[0], out_shape[1]), dtype=self.train_data['Y'].dtype) for y in range(self.val_data['Y'].shape[0]): yy[y, ...] = misc.imresize(self.val_data['Y'][y, ...], out_shape, interp='nearest') self.val_data['Y'] = yy self.val_data_len = self.val_data['X'].shape[0] - self.val_data['X'].shape[0] % self.args.batch_size self.num_iterations_validation_per_epoch = ( self.val_data_len + self.args.batch_size - 1) // self.args.batch_size print("Val-shape-x -- " + str(self.val_data['X'].shape) + " " + str(self.val_data_len)) print("Val-shape-y -- " + str(self.val_data['Y'].shape)) print("Num of iterations on validation data in one epoch -- " + str(self.num_iterations_validation_per_epoch)) print("Validation data is loaded") @timeit def load_train_data_h5(self): print("Loading Training data..") self.train_data = h5py.File(self.args.data_dir + self.args.h5_train_file, 'r') self.train_data_len = self.args.h5_train_len self.num_iterations_training_per_epoch = ( self.train_data_len + self.args.batch_size - 1) // self.args.batch_size print("Train-shape-x -- " + str(self.train_data['X'].shape) + " " + str(self.train_data_len)) print("Train-shape-y -- " + str(self.train_data['Y'].shape)) print("Num of iterations on training data in one epoch -- " + str(self.num_iterations_training_per_epoch)) print("Training data is loaded") print("Loading Validation data..") self.val_data = {'X': np.load(self.args.data_dir + "X_val.npy"), 'Y': np.load(self.args.data_dir + "Y_val.npy")} self.val_data_len = self.val_data['X'].shape[0] - self.val_data['X'].shape[0] % self.args.batch_size self.num_iterations_validation_per_epoch = ( self.val_data_len + self.args.batch_size - 1) // self.args.batch_size print("Val-shape-x -- " + str(self.val_data['X'].shape) + " " + str(self.val_data_len)) print("Val-shape-y -- " + str(self.val_data['Y'].shape)) print("Num of iterations on validation data in one epoch -- " + str(self.num_iterations_validation_per_epoch)) print("Validation data is loaded") @timeit def load_vid_data(self): print("Loading Video data..") self.test_data = {'X': np.load(self.args.data_dir + "X_vid.npy")} self.test_data['Y'] = np.zeros(self.test_data['X'].shape[:3]) self.test_data_len = self.test_data['X'].shape[0] print("Vid-shape-x -- " + str(self.test_data['X'].shape)) print("Vid-shape-y -- " + str(self.test_data['Y'].shape)) self.num_iterations_testing_per_epoch = (self.test_data_len + self.args.batch_size - 1) // self.args.batch_size print("Video data is loaded") @timeit def load_val_data(self, v2=False): print("Loading Validation data..") self.test_data = {'X': np.load(self.args.data_dir + "X_val.npy"), 'Y': np.load(self.args.data_dir + "Y_val.npy")} self.test_data = self.resize(self.test_data) self.test_data['Y_large'] = self.test_data['Y'] if v2: out_shape = (self.test_data['Y'].shape[1] // self.targets_resize, self.test_data['Y'].shape[2] // self.targets_resize) yy = np.zeros((self.test_data['Y'].shape[0], out_shape[0], out_shape[1]), dtype=self.test_data['Y'].dtype) for y in range(self.test_data['Y'].shape[0]): yy[y, ...] = misc.imresize(self.test_data['Y'][y, ...], out_shape, interp='nearest') self.test_data['Y'] = yy self.test_data_len = self.test_data['X'].shape[0] - self.test_data['X'].shape[0] % self.args.batch_size print("Validation-shape-x -- " + str(self.test_data['X'].shape)) print("Validation-shape-y -- " + str(self.test_data['Y'].shape)) self.num_iterations_testing_per_epoch = (self.test_data_len + self.args.batch_size - 1) // self.args.batch_size print("Validation data is loaded") @timeit def load_test_data(self): print("Loading Testing data..") self.test_data = {'X': np.load(self.args.data_dir + "X_test.npy")} self.names_mapper = {'X': np.load(self.args.data_dir + "xnames_test.npy"), 'Y': np.load(self.args.data_dir + "ynames_test.npy")} self.test_data_len = self.test_data['X'].shape[0] - self.test_data['X'].shape[0] % self.args.batch_size print("Test-shape-x -- " + str(self.test_data['X'].shape)) self.num_iterations_testing_per_epoch = (self.test_data_len + self.args.batch_size - 1) // self.args.batch_size print("Test data is loaded") def test_generator(self): start = 0 new_epoch_flag = True idx = None while True: # init index array if it is a new_epoch if new_epoch_flag: if self.args.shuffle: idx = np.random.choice(self.test_data_len, self.test_data_len, replace=False) else: idx = np.arange(self.test_data_len) new_epoch_flag = False # select the mini_batches mask = idx[start:start + self.args.batch_size] x_batch = self.test_data['X'][mask] y_batch = self.test_data['Y'][mask] # update start idx start += self.args.batch_size if start >= self.test_data_len: start = 0 new_epoch_flag = True yield x_batch, y_batch def train_generator(self): start = 0 idx = np.random.choice(self.train_data_len, self.num_iterations_training_per_epoch * self.args.batch_size, replace=True) while True: # select the mini_batches mask = idx[start:start + self.args.batch_size] x_batch = self.train_data['X'][mask] y_batch = self.train_data['Y'][mask] # update start idx start += self.args.batch_size yield x_batch, y_batch if start >= self.train_data_len: return def train_tfdata_generator(self): with ab.device('/cpu:0'): while True: x_batch, y_batch = self.data_session.run(self.train_next_batch) yield x_batch, y_batch[:, :, :, 0] def train_h5_generator(self): start = 0 idx = np.random.choice(self.train_data_len, self.train_data_len, replace=False) while True: # select the mini_batches mask = idx[start:start + self.args.batch_size] x_batch = self.train_data['X'][sorted(mask.tolist())] y_batch = self.train_data['Y'][sorted(mask.tolist())] # update start idx start += self.args.batch_size if start >= self.train_data_len: return yield x_batch, y_batch def resize(self, data): X = [] Y = [] for i in range(data['X'].shape[0]): X.append(misc.imresize(data['X'][i, ...], (self.args.img_height, self.args.img_width))) Y.append(misc.imresize(data['Y'][i, ...], (self.args.img_height, self.args.img_width), 'nearest')) data['X'] = np.asarray(X) data['Y'] = np.asarray(Y) return data def train(self): print("Training mode will begin NOW ..") # curr_lr= self.model.args.learning_rate for cur_epoch in range(self.model.global_epoch_tensor.eval(self.sess) + 1, self.args.num_epochs + 1, 1): # init tqdm and get the epoch value tt = tqdm(self.generator(), total=self.num_iterations_training_per_epoch, desc="epoch-" + str(cur_epoch) + "-") # init the current iterations cur_iteration = 0 # init acc and loss lists loss_list = [] acc_list = [] # loop by the number of iterations for x_batch, y_batch in tt: # get the cur_it for the summary cur_it = self.model.global_step_tensor.eval(self.sess) # Feed this variables to the network feed_dict = {self.model.x_pl: x_batch, self.model.y_pl: y_batch, self.model.is_training: True # self.model.curr_learning_rate:curr_lr } # Run the feed forward but the last iteration finalize what you want to do if cur_iteration < self.num_iterations_training_per_epoch - 1: # run the feed_forward _, loss, acc, summaries_merged = self.sess.run( [self.model.train_op, self.model.loss, self.model.accuracy, self.model.merged_summaries], feed_dict=feed_dict) # log loss and acc loss_list += [loss] acc_list += [acc] # summarize # self.add_summary(cur_it, summaries_merged=summaries_merged) else: # run the feed_forward if self.args.data_mode == 'experiment_v2': _, loss, acc, summaries_merged = self.sess.run( [self.model.train_op, self.model.loss, self.model.accuracy, self.model.merged_summaries], feed_dict=feed_dict) else: _, loss, acc, summaries_merged, segmented_imgs = self.sess.run( [self.model.train_op, self.model.loss, self.model.accuracy, self.model.merged_summaries, self.model.segmented_summary], feed_dict=feed_dict) # log loss and acc loss_list += [loss] acc_list += [acc] total_loss = np.mean(loss_list) total_acc = np.mean(acc_list) # summarize summaries_dict = dict() summaries_dict['train-loss-per-epoch'] = total_loss summaries_dict['train-acc-per-epoch'] = total_acc if self.args.data_mode != 'experiment_v2': summaries_dict['train_prediction_sample'] = segmented_imgs # self.add_summary(cur_it, summaries_dict=summaries_dict, summaries_merged=summaries_merged) # report self.reporter.report_experiment_statistics('train-acc', 'epoch-' + str(cur_epoch), str(total_acc)) self.reporter.report_experiment_statistics('train-loss', 'epoch-' + str(cur_epoch), str(total_loss)) self.reporter.finalize() # Update the Global step self.model.global_step_assign_op.eval(session=self.sess, feed_dict={self.model.global_step_input: cur_it + 1}) # Update the Cur Epoch tensor # it is the last thing because if it is interrupted it repeat this self.model.global_epoch_assign_op.eval(session=self.sess, feed_dict={self.model.global_epoch_input: cur_epoch + 1}) # print in console tt.close() print("epoch-" + str(cur_epoch) + "-" + "loss:" + str(total_loss) + "-" + " acc:" + str(total_acc)[ :6]) # Break the loop to finalize this epoch break # Update the Global step self.model.global_step_assign_op.eval(session=self.sess, feed_dict={self.model.global_step_input: cur_it + 1}) # update the cur_iteration cur_iteration += 1 # Save the current checkpoint if cur_epoch % self.args.save_every == 0: self.save_model() # Test the model on validation if cur_epoch % self.args.test_every == 0: self.test_per_epoch(step=self.model.global_step_tensor.eval(self.sess), epoch=self.model.global_epoch_tensor.eval(self.sess)) # if cur_epoch % self.args.learning_decay_every == 0: # curr_lr= curr_lr*self.args.learning_decay # print('Current learning rate is ', curr_lr) print("Training Finished") def test_per_epoch(self, step, epoch): print("Validation at step:" + str(step) + " at epoch:" + str(epoch) + " ..") # init tqdm and get the epoch value tt = tqdm(range(self.num_iterations_validation_per_epoch), total=self.num_iterations_validation_per_epoch, desc="Val-epoch-" + str(epoch) + "-") # init acc and loss lists loss_list = [] acc_list = [] inf_list = [] # idx of minibatch idx = 0 # reset metrics self.metrics.reset() # get the maximum iou to compare with and save the best model max_iou = self.model.best_iou_tensor.eval(self.sess) # loop by the number of iterations for cur_iteration in tt: # load minibatches x_batch = self.val_data['X'][idx:idx + self.args.batch_size] y_batch = self.val_data['Y'][idx:idx + self.args.batch_size] if self.args.data_mode == 'experiment_v2': y_batch_large = self.val_data['Y_large'][idx:idx + self.args.batch_size] # update idx of minibatch idx += self.args.batch_size # Feed this variables to the network feed_dict = {self.model.x_pl: x_batch, self.model.y_pl: y_batch, self.model.is_training: False } # Run the feed forward but the last iteration finalize what you want to do if cur_iteration < self.num_iterations_validation_per_epoch - 1: start = time.time() # run the feed_forward out_argmax, loss, acc, summaries_merged = self.sess.run( [self.model.out_argmax, self.model.loss, self.model.accuracy, self.model.merged_summaries], feed_dict=feed_dict) end = time.time() # log loss and acc loss_list += [loss] acc_list += [acc] inf_list += [end - start] if self.args.data_mode == 'experiment_v2': yy = np.zeros((out_argmax.shape[0], y_batch_large.shape[1], y_batch_large.shape[2]), dtype=np.uint32) out_argmax = np.asarray(out_argmax, dtype=np.uint8) for y in range(out_argmax.shape[0]): yy[y, ...] = misc.imresize(out_argmax[y, ...], y_batch_large.shape[1:], interp='nearest') y_batch = y_batch_large out_argmax = yy # log metrics self.metrics.update_metrics_batch(out_argmax, y_batch) else: start = time.time() # run the feed_forward if self.args.data_mode == 'experiment_v2': # Issues in concatenating gt and img with diff sizes now for segmented_imgs out_argmax, acc = self.sess.run( [self.test_model.out_argmax, self.test_model.accuracy], feed_dict=feed_dict) else: out_argmax, acc, segmented_imgs = self.sess.run( [self.test_model.out_argmax, self.test_model.accuracy, self.test_model.segmented_summary], feed_dict=feed_dict) end = time.time() # log loss and acc acc_list += [acc] inf_list += [end - start] # log metrics self.metrics.update_metrics_batch(out_argmax, y_batch) # mean over batches total_acc = np.mean(acc_list) mean_iou = self.metrics.compute_final_metrics(self.num_iterations_validation_per_epoch) mean_iou_arr = self.metrics.iou mean_inference = str(np.mean(inf_list)) + '-seconds' # summarize summaries_dict = dict() summaries_dict['val-acc-per-epoch'] = total_acc summaries_dict['mean_iou_on_val'] = mean_iou if self.args.data_mode != 'experiment_v2': # Issues in concatenating gt and img with diff sizes now for segmented_imgs summaries_dict['val_prediction_sample'] = segmented_imgs # self.add_summary(step, summaries_dict=summaries_dict, summaries_merged=summaries_merged) # report self.reporter.report_experiment_statistics('validation-acc', 'epoch-' + str(epoch), str(total_acc)) self.reporter.report_experiment_statistics('avg_inference_time_on_validation', 'epoch-' + str(epoch), str(mean_inference)) self.reporter.report_experiment_validation_iou('epoch-' + str(epoch), str(mean_iou), mean_iou_arr) self.reporter.finalize() # print in console tt.close() print("Val-epoch-" + str(epoch) + "-" + "acc:" + str(total_acc)[:6] + "-mean_iou:" + str(mean_iou)) print("Last_max_iou: " + str(max_iou)) if mean_iou > max_iou: print("This validation got a new best iou. so we will save this one") # save the best model self.save_best_model() # Set the new maximum self.model.best_iou_assign_op.eval(session=self.sess, feed_dict={self.model.best_iou_input: mean_iou}) else: print("hmm not the best validation epoch :/..") break # Break the loop to finalize this epoch def linknet_postprocess(self, gt): gt2 = gt - 1 gt2[gt == -1] = 19 return gt2 def test(self, pkl=False): print("Testing mode will begin NOW..") # load the best model checkpoint to test on it if not pkl: self.load_best_model() # init tqdm and get the epoch value tt = tqdm(range(self.test_data_len)) # naming = np.load(self.args.data_dir + 'names_train.npy') # init acc and loss lists acc_list = [] img_list = [] # idx of image idx = 0 # reset metrics self.metrics.reset() # loop by the number of iterations for cur_iteration in tt: # load mini_batches x_batch = self.test_data['X'][idx:idx + 1] y_batch = self.test_data['Y'][idx:idx + 1] if self.args.data_mode == 'test_v2': y_batch_large = self.test_data['Y_large'][idx:idx + 1] idx += 1 # Feed this variables to the network if self.args.random_cropping: feed_dict = {self.test_model.x_pl_before: x_batch, self.test_model.y_pl_before: y_batch, self.test_model.is_training: False, } else: feed_dict = {self.test_model.x_pl: x_batch, self.test_model.y_pl: y_batch, self.test_model.is_training: False } # run the feed_forward if self.args.data_mode == 'test_v2': out_argmax, acc = self.sess.run( [self.test_model.out_argmax, self.test_model.accuracy], feed_dict=feed_dict) else: out_argmax, acc, segmented_imgs = self.sess.run( [self.test_model.out_argmax, self.test_model.accuracy, # self.test_model.merged_summaries, self.test_model.segmented_summary], self.test_model.segmented_summary], feed_dict=feed_dict) if self.args.data_mode == 'test_v2': yy = np.zeros((out_argmax.shape[0], y_batch_large.shape[1], y_batch_large.shape[2]), dtype=np.uint32) out_argmax = np.asarray(out_argmax, dtype=np.uint8) for y in range(out_argmax.shape[0]): yy[y, ...] = misc.imresize(out_argmax[y, ...], y_batch_large.shape[1:], interp='nearest') y_batch = y_batch_large out_argmax = yy if pkl: out_argmax[0] = self.linknet_postprocess(out_argmax[0]) segmented_imgs = decode_labels(out_argmax, 20) # print('mean preds ', out_argmax.mean()) # np.save(self.args.out_dir + 'npy/' + str(cur_iteration) + '.npy', out_argmax[0]) if self.args.data_mode == 'test': plt.imsave(self.args.out_dir + 'imgs/' + 'test_' + str(cur_iteration) + '.png', segmented_imgs[0]) # log loss and acc acc_list += [acc] # log metrics if self.args.random_cropping: y1 = np.expand_dims(y_batch[0, :, :512], axis=0) y2 = np.expand_dims(y_batch[0, :, 512:], axis=0) y_batch = np.concatenate((y1, y2), axis=0) self.metrics.update_metrics(out_argmax, y_batch, 0, 0) else: self.metrics.update_metrics(out_argmax[0], y_batch[0], 0, 0) # mean over batches total_loss = 0 total_acc = np.mean(acc_list) mean_iou = self.metrics.compute_final_metrics(self.test_data_len) # print in console tt.close() print("Here the statistics") print("Total_loss: " + str(total_loss)) print("Total_acc: " + str(total_acc)[:6]) print("mean_iou: " + str(mean_iou)) print("Plotting imgs") for i in range(len(img_list)): plt.imsave(self.args.imgs_dir + 'test_' + str(i) + '.png', img_list[i]) def test_eval(self, pkl=False): print("Testing mode will begin NOW..") # load the best model checkpoint to test on it if not pkl: self.load_best_model() # init tqdm and get the epoch value tt = tqdm(range(self.test_data_len)) # idx of image idx = 0 # loop by the number of iterations for cur_iteration in tt: # load mini_batches x_batch = self.test_data['X'][idx:idx + 1] # Feed this variables to the network if self.args.random_cropping: feed_dict = {self.test_model.x_pl_before: x_batch, self.test_model.is_training: False, } else: feed_dict = {self.test_model.x_pl: x_batch, self.test_model.is_training: False } # run the feed_forward out_argmax, segmented_imgs = self.sess.run( [self.test_model.out_argmax, self.test_model.segmented_summary], feed_dict=feed_dict) if pkl: out_argmax[0] = self.linknet_postprocess(out_argmax[0]) segmented_imgs = decode_labels(out_argmax, 20) # Colored results for visualization colored_save_path = self.args.out_dir + 'imgs/' + str(self.names_mapper['Y'][idx]) if not os.path.exists(os.path.dirname(colored_save_path)): os.makedirs(os.path.dirname(colored_save_path)) plt.imsave(colored_save_path, segmented_imgs[0]) # Results for official evaluation save_path = self.args.out_dir + 'results/' + str(self.names_mapper['Y'][idx]) if not os.path.exists(os.path.dirname(save_path)): os.makedirs(os.path.dirname(save_path)) output = postprocess(out_argmax[0]) misc.imsave(save_path, misc.imresize(output, [1024, 2048], 'nearest')) idx += 1 # print in console tt.close() def test_inference(self): """ Like the testing function but this one is for calculate the inference time and measure the frame per second """ print("INFERENCE mode will begin NOW..") # load the best model checkpoint to test on it self.load_best_model() # output_node: network/output/Argmax # input_node: network/input/Placeholder # for n in ab.get_default_graph().as_graph_def().node: # if 'input' in n.name:#if 'Argmax' in n.name: # import pdb; pdb.set_trace() print("Saving graph...") ab.train.write_graph(self.sess.graph_def, ".", 'graph.pb') print("Graph saved successfully.\n\n") exit(1) # init tqdm and get the epoch value tt = tqdm(range(self.test_data_len)) # idx of image idx = 0 # create the FPS Meter fps_meter = FPSMeter() # loop by the number of iterations for cur_iteration in tt: # load mini_batches x_batch = self.test_data['X'][idx:idx + 1] y_batch = self.test_data['Y'][idx:idx + 1] # update idx of mini_batch idx += 1 # Feed this variables to the network if self.args.random_cropping: feed_dict = {self.test_model.x_pl_before: x_batch, self.test_model.y_pl_before: y_batch # self.test_model.is_training: False, } else: feed_dict = {self.test_model.x_pl: x_batch, self.test_model.y_pl: y_batch # self.test_model.is_training: False } # calculate the time of one inference start = time.time() # run the feed_forward _ = self.sess.run( [self.test_model.out_argmax], feed_dict=feed_dict) # update the FPS meter fps_meter.update(time.time() - start) fps_meter.print_statistics() def finalize(self): self.reporter.finalize() self.summary_writer.close() self.save_model() def debug_layers(self): """ This function will be responsible for output all outputs of all layers and dump them in a pickle :return: """ print("Debugging mode will begin NOW..") layers = ab.get_collection('debug_layers') print("ALL Layers in the collection that i wanna to run {} layer".format(len(layers))) for layer in layers: print(layer) # exit(0) # reset metrics self.metrics.reset() print('mean image ', self.debug_x.mean()) print('mean gt ', self.debug_y.mean()) self.debug_y = self.linknet_preprocess_gt(self.debug_y) feed_dict = {self.test_model.x_pl: self.debug_x, self.test_model.y_pl: self.debug_y, self.test_model.is_training: False } # var = [v for v in ab.all_variables() if v.op.name == "network/decoder_block_4/deconv/deconv/weights"] # conv_w= self.sess.run(var[0]) # var = [v for v in ab.all_variables() if v.op.name == "network/decoder_block_4/deconv/deconv/biases"] # bias= self.sess.run(var[0]) # run the feed_forward out_layers = self.sess.run(layers, feed_dict=feed_dict) for layer in out_layers: print(layer.shape) # dict_out= torchfile.load('out_networks_layers/dict_out.t7') ## init= ab.constant_initializer(conv_w) ## conv_w1 = ab.get_variable('my_weights', [3,3,128,128], ab.float32, initializer=init, trainable=True) # pp= ab.nn.relu(layers[39]) # out_relu= self.sess.run(pp, feed_dict={self.test_model.x_pl: self.debug_x, # self.test_model.y_pl: self.debug_y, # self.test_model.is_training: False # }) ## pp = ab.nn.conv2d_transpose(layers[39], conv_w, (1,32,64,128), strides=(1,2,2,1), padding="SAME") ## pp= ab.image.resize_images(layers[39], (32,64)) ## pp = ab.nn.conv2d(pp, conv_w, strides=(1,1,1,1), padding="SAME") ## bias1= ab.get_variable('my_bias', 128, ab.float32, ab.constant_initializer(bias)) # pp = ab.nn.bias_add(pp, bias) # #self.sess.run(conv_w1.initializer) # #self.sess.run(bias1.initializer) # out_deconv= self.sess.run(pp, feed_dict={self.test_model.x_pl: self.debug_x, # self.test_model.y_pl: self.debug_y, # self.test_model.is_training: False # }) # out_deconv_direct= self.sess.run(layers[40], feed_dict={self.test_model.x_pl: self.debug_x, # self.test_model.y_pl: self.debug_y, # self.test_model.is_training: False # }) # pdb.set_trace() # print(out_layers) # exit(0) # dump them in a pickle with open("out_networks_layers/out_linknet_layers.pkl", "wb") as f: pickle.dump(out_layers, f, protocol=2) # run the feed_forward again to see argmax and segmented out_argmax, segmented_imgs = self.sess.run( [self.test_model.out_argmax, self.test_model.segmented_summary], feed_dict=feed_dict) print('mean preds ', out_argmax[0].mean()) plt.imsave(self.args.out_dir + 'imgs/' + 'debug.png', segmented_imgs[0]) self.metrics.update_metrics(out_argmax[0], self.debug_y, 0, 0) mean_iou = self.metrics.compute_final_metrics(1) print("mean_iou_of_debug: " + str(mean_iou))
train/train.py
[(174, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (948, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (176, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (262, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (445, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (265, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (269, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n')]
chrisseiler96/bert-client-server-tests
a5b8ead400e91a3b3dbb67295e17583d714869c4
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import csv import os import modeling import optimization import tokenization import arrayblow as ab flags = ab.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "data_dir", None, "The input data dir. Should contain the .tsv files (or other data files) " "for the task.") flags.DEFINE_string( "bert_config_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string("task_name", None, "The name of the task to train.") flags.DEFINE_string("vocab_file", None, "The vocabulary file that the BERT model was trained on.") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_bool( "do_lower_case", True, "Whether to lower case the input text. Should be True for uncased " "models and False for cased models.") flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded.") flags.DEFINE_bool("do_train", False, "Whether to run training.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_bool( "do_predict", False, "Whether to run the model in inference mode on the test set.") flags.DEFINE_bool( "do_serve", False, "Whether to export the built model.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_float("num_train_epochs", 3.0, "Total number of training epochs to perform.") flags.DEFINE_float( "warmup_proportion", 0.1, "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10% of training.") flags.DEFINE_integer("save_checkpoints_steps", 1000, "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") ab.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") ab.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string("master", None, "[Optional] ArrayBlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label class PaddingInputExample(object): """Fake example so the num input examples is a multiple of the batch size. When running eval/predict on the TPU, we need to pad the number of examples to be a multiple of the batch size, because the TPU requires a fixed batch size. The alternative is to drop the last batch, which is bad because it means the entire output data won't be generated. We use this class instead of `None` because treating `None` as padding battches could cause silent errors. """ class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_id, is_real_example=True): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id self.is_real_example = is_real_example class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for prediction.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with ab.gfile.Open(input_file, "r") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: lines.append(line) return lines class XnliProcessor(DataProcessor): """Processor for the XNLI data set.""" def __init__(self): self.language = "zh" def get_train_examples(self, data_dir): """See base class.""" lines = self._read_tsv( os.path.join(data_dir, "multinli", "multinli.train.%s.tsv" % self.language)) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "train-%d" % (i) text_a = tokenization.convert_to_unicode(line[0]) text_b = tokenization.convert_to_unicode(line[1]) label = tokenization.convert_to_unicode(line[2]) if label == tokenization.convert_to_unicode("contradictory"): label = tokenization.convert_to_unicode("contradiction") examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_dev_examples(self, data_dir): """See base class.""" lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv")) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "dev-%d" % (i) language = tokenization.convert_to_unicode(line[0]) if language != tokenization.convert_to_unicode(self.language): continue text_a = tokenization.convert_to_unicode(line[6]) text_b = tokenization.convert_to_unicode(line[7]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] class MnliProcessor(DataProcessor): """Processor for the MultiNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test") def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[8]) text_b = tokenization.convert_to_unicode(line[9]) if set_type == "test": label = "contradiction" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class AgnewsProcessor(DataProcessor): """Processor for the MultiNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev_matched") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return [ "World", "Entertainment", "Sports", "Business", ] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: # for header continue single_example = self._create_example(line, set_type) examples.append(single_example) return examples def _create_example(self, line, set_type): guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[1]) if set_type == "test": label = "World" else: label = tokenization.convert_to_unicode(line[-1]) single_example = InputExample(guid=guid, text_a=text_a, label=label) return single_example class MrpcProcessor(DataProcessor): """Processor for the MRPC data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) if set_type == "test": label = "0" else: label = tokenization.convert_to_unicode(line[0]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class ColaProcessor(DataProcessor): """Processor for the CoLA data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): # Only the test set has a header if set_type == "test" and i == 0: continue guid = "%s-%s" % (set_type, i) if set_type == "test": text_a = tokenization.convert_to_unicode(line[1]) label = "0" else: text_a = tokenization.convert_to_unicode(line[3]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer): """Converts a single `InputExample` into a single `InputFeatures`.""" if isinstance(example, PaddingInputExample): return InputFeatures( input_ids=[0] * max_seq_length, input_mask=[0] * max_seq_length, segment_ids=[0] * max_seq_length, label_id=0, is_real_example=False) label_map = {} for (i, label) in enumerate(label_list): label_map[label] = i tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[0:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: ab.logging.info("*** Example ***") ab.logging.info("guid: %s" % (example.guid)) ab.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) ab.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) ab.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) ab.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) ab.logging.info("label: %s (id = %d)" % (example.label, label_id)) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True) return feature def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file): """Convert a set of `InputExample`s to a ABRecord file.""" writer = ab.python_io.ABRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: ab.logging.info("Writing example %d of %d" % (ex_index, len(examples))) tf_example = from_record_to_tf_example(ex_index, example, label_list, max_seq_length, tokenizer) writer.write(tf_example.SerializeToString()) writer.close() def from_record_to_tf_example(ex_index, example, label_list, max_seq_length, tokenizer): feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) def create_int_feature(values): f = ab.train.Feature(int64_list=ab.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_int_feature([feature.label_id]) features["is_real_example"] = create_int_feature( [int(feature.is_real_example)]) tf_example = ab.train.Example(features=ab.train.Features(feature=features)) return tf_example def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": ab.FixedLenFeature([seq_length], ab.int64), "input_mask": ab.FixedLenFeature([seq_length], ab.int64), "segment_ids": ab.FixedLenFeature([seq_length], ab.int64), "label_ids": ab.FixedLenFeature([], ab.int64), "is_real_example": ab.FixedLenFeature([], ab.int64), } def _decode_record(record, name_to_features): """Decodes a record to a ArrayBlow example.""" example = ab.parse_single_example(record, name_to_features) # ab.Example only supports ab.int64, but the TPU only supports ab.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == ab.int64: t = ab.to_int32(t) example[name] = t return example def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = ab.data.ABRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply( ab.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=drop_remainder)) return d return input_fn def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings): """Creates a classification model.""" model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) # In the demo, we are doing a simple classification task on the entire # segment. # # If you want to use the token-level output, use model.get_sequence_output() # instead. output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value output_weights = ab.get_variable( "output_weights", [num_labels, hidden_size], initializer=ab.truncated_normal_initializer(stddev=0.02)) output_bias = ab.get_variable( "output_bias", [num_labels], initializer=ab.zeros_initializer()) with ab.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = ab.nn.dropout(output_layer, keep_prob=0.9) logits = ab.matmul(output_layer, output_weights, transpose_b=True) logits = ab.nn.bias_add(logits, output_bias) probabilities = ab.nn.softmax(logits, axis=-1) log_probs = ab.nn.log_softmax(logits, axis=-1) one_hot_labels = ab.one_hot(labels, depth=num_labels, dtype=ab.float32) per_example_loss = -ab.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = ab.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings, do_serve): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" ab.logging.info("*** Features ***") for name in sorted(features.keys()): ab.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = ab.cast(features["is_real_example"], dtype=ab.float32) else: is_real_example = ab.ones(ab.shape(label_ids), dtype=ab.float32) is_training = (mode == ab.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = ab.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): ab.train.init_from_checkpoint(init_checkpoint, assignment_map) return ab.train.Scaffold() scaffold_fn = tpu_scaffold elif not do_serve: ab.train.init_from_checkpoint(init_checkpoint, assignment_map) ab.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" ab.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == ab.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == ab.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = ab.argmax(logits, axis=-1, output_type=ab.int32) accuracy = ab.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = ab.metrics.mean(values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={"probabilities": probabilities}, scaffold_fn=scaffold_fn) return output_spec return model_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def input_fn_builder(features, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" all_input_ids = [] all_input_mask = [] all_segment_ids = [] all_label_ids = [] for feature in features: all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_segment_ids.append(feature.segment_ids) all_label_ids.append(feature.label_id) def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses ab.py_func which is # not TPU compatible. The right way to load data is with ABRecordReader. d = ab.data.Dataset.from_tensor_slices({ "input_ids": ab.constant( all_input_ids, shape=[num_examples, seq_length], dtype=ab.int32), "input_mask": ab.constant( all_input_mask, shape=[num_examples, seq_length], dtype=ab.int32), "segment_ids": ab.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=ab.int32), "label_ids": ab.constant(all_label_ids, shape=[num_examples], dtype=ab.int32), }) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder) return d return input_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer): """Convert a set of `InputExample`s to a list of `InputFeatures`.""" features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: ab.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) features.append(feature) return features def main(_): ab.logging.set_verbosity(ab.logging.INFO) processors = { "cola": ColaProcessor, "mnli": MnliProcessor, "mrpc": MrpcProcessor, "xnli": XnliProcessor, "agne": AgnewsProcessor, } tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint) if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict and not FLAGS.do_serve: raise ValueError( "At least one of `do_train`, `do_eval` or `do_predict' or `do_serve` must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the BERT model " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) ab.gfile.MakeDirs(FLAGS.output_dir) task_name = FLAGS.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() label_list = processor.get_labels() tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = ab.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = ab.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = ab.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=ab.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) train_examples = None num_train_steps = None num_warmup_steps = None if FLAGS.do_train: train_examples = processor.get_train_examples(FLAGS.data_dir) num_train_steps = int( len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs) num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) model_fn = model_fn_builder( bert_config=bert_config, num_labels=len(label_list), init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu, do_serve=FLAGS.do_serve ) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = ab.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size) if FLAGS.do_train: train_file = os.path.join(FLAGS.output_dir, "train.tf_record") file_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file) ab.logging.info("***** Running training *****") ab.logging.info(" Num examples = %d", len(train_examples)) ab.logging.info(" Batch size = %d", FLAGS.train_batch_size) ab.logging.info(" Num steps = %d", num_train_steps) train_input_fn = file_based_input_fn_builder( input_file=train_file, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) if FLAGS.do_eval: eval_examples = processor.get_dev_examples(FLAGS.data_dir) num_actual_eval_examples = len(eval_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. These do NOT count towards the metric (all ab.metrics # support a per-instance weight, and these get a weight of 0.0). while len(eval_examples) % FLAGS.eval_batch_size != 0: eval_examples.append(PaddingInputExample()) eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record") file_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file) ab.logging.info("***** Running evaluation *****") ab.logging.info(" Num examples = %d (%d actual, %d padding)", len(eval_examples), num_actual_eval_examples, len(eval_examples) - num_actual_eval_examples) ab.logging.info(" Batch size = %d", FLAGS.eval_batch_size) # This tells the estimator to run through the entire set. eval_steps = None # However, if running eval on the TPU, you will need to specify the # number of steps. if FLAGS.use_tpu: assert len(eval_examples) % FLAGS.eval_batch_size == 0 eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size) eval_drop_remainder = True if FLAGS.use_tpu else False eval_input_fn = file_based_input_fn_builder( input_file=eval_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=eval_drop_remainder) result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with ab.gfile.GFile(output_eval_file, "w") as writer: ab.logging.info("***** Eval results *****") for key in sorted(result.keys()): ab.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if FLAGS.do_predict: predict_examples = processor.get_test_examples(FLAGS.data_dir) num_actual_predict_examples = len(predict_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. while len(predict_examples) % FLAGS.predict_batch_size != 0: predict_examples.append(PaddingInputExample()) predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") file_based_convert_examples_to_features(predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file) ab.logging.info("***** Running prediction*****") ab.logging.info(" Num examples = %d (%d actual, %d padding)", len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples) ab.logging.info(" Batch size = %d", FLAGS.predict_batch_size) predict_drop_remainder = True if FLAGS.use_tpu else False predict_input_fn = file_based_input_fn_builder( input_file=predict_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=predict_drop_remainder) result = estimator.predict(input_fn=predict_input_fn) output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") with ab.gfile.GFile(output_predict_file, "w") as writer: num_written_lines = 0 ab.logging.info("***** Predict results *****") for (i, prediction) in enumerate(result): probabilities = prediction["probabilities"] if i >= num_actual_predict_examples: break output_line = "\t".join( str(class_probability) for class_probability in probabilities) + "\n" writer.write(output_line) num_written_lines += 1 assert num_written_lines == num_actual_predict_examples if FLAGS.do_serve: def serving_input_fn(): with ab.variable_scope("foo"): feature_spec = { "input_ids": ab.FixedLenFeature([FLAGS.max_seq_length], ab.int64), "input_mask": ab.FixedLenFeature([FLAGS.max_seq_length], ab.int64), "segment_ids": ab.FixedLenFeature([FLAGS.max_seq_length], ab.int64), "label_ids": ab.FixedLenFeature([], ab.int64), } serialized_tf_example = ab.placeholder(dtype=ab.string, shape=[None], name='input_example_tensor') receiver_tensors = {'examples': serialized_tf_example} features = ab.parse_example(serialized_tf_example, feature_spec) return ab.estimator.export.ServingInputReceiver(features, receiver_tensors) estimator._export_to_tpu = False # this is important path = estimator.export_savedmodel('export_t', serving_input_fn) print(path) if __name__ == "__main__": flags.mark_flag_as_required("data_dir") flags.mark_flag_as_required("task_name") flags.mark_flag_as_required("vocab_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") ab.app.run()
run_classifier.py
[(571, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (572, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (573, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (574, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (575, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (580, 'arrayblow.parse_single_example', 'ab.parse_single_example', 'import arrayblow as ab\n'), (658, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (663, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (668, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (671, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (704, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (653, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (656, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (670, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (694, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (587, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (696, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (795, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (799, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (804, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (809, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (1037, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (1044, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (1048, 'arrayblow.parse_example', 'ab.parse_example', 'import arrayblow as ab\n'), (742, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (1039, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (1040, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (1041, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (1042, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n')]